我一直在关注 Sentdex 在 YouTube 上关于深度学习的教程,在尝试加载图像并在模型中运行时遇到了错误。该错误表示输入与输入签名不匹配,但我一直在努力找出如何更改它。任何帮助将非常感激!加载模型和测试图像的代码如下:
import cv2
import tensorflow as tf
CATEGORIES = ["Dog", "Cat"]
def prepare(filepath):
IMG_SIZE = 50
img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
model = tf.keras.models.load_model("PyCharmProject\\64x3-CNN.model")
prediction = model.predict([prepare('PyCharmProject\dog.jpg')])
print(prediction)
和我收到的错误:
ValueError Traceback (most recent call last)
<ipython-input-1-241c64aef27c> in <module>
12 model = tf.keras.models.load_model("PyCharmProject\\64x3-CNN.model")
13
---> 14 prediction = model.predict([prepare('PyCharmProject\dog.jpg')])
15 print(prediction)
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\keras\engine\training.py in predict(self, x, batch_size, verbose, steps, callbacks, max_queue_size, workers, use_multiprocessing)
912 max_queue_size=max_queue_size,
913 workers=workers,
--> 914 use_multiprocessing=use_multiprocessing)
915
916 def reset_metrics(self):
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\keras\engine\training_v2.py in predict(self, model, x, batch_size, verbose, steps, callbacks, **kwargs)
444 return self._model_iteration(
445 model, ModeKeys.PREDICT, x=x, batch_size=batch_size, verbose=verbose,
--> 446 steps=steps, callbacks=callbacks, **kwargs)
447
448
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\keras\engine\training_v2.py in _model_iteration(self, model, mode, x, y, batch_size, verbose, sample_weight, steps, callbacks, **kwargs)
426 mode=mode,
427 training_context=training_context,
--> 428 total_epochs=1)
429 cbks.make_logs(model, epoch_logs, result, mode)
430
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\keras\engine\training_v2.py in run_one_epoch(model, iterator, execution_function, dataset_size, batch_size, strategy, steps_per_epoch, num_samples, mode, training_context, total_epochs)
120 step=step, mode=mode, size=current_batch_size) as batch_logs:
121 try:
--> 122 batch_outs = execution_function(iterator)
123 except (StopIteration, errors.OutOfRangeError):
124 # TODO(kaftan): File bug about tf function and errors.OutOfRangeError?
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\keras\engine\training_v2_utils.py in execution_function(input_fn)
82 # `numpy` translates Tensors to values in Eager mode.
83 return nest.map_structure(_non_none_constant_value,
---> 84 distributed_function(input_fn))
85
86 return execution_function
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\eager\def_function.py in __call__(self, *args, **kwds)
447 # This is the first call of __call__, so we have to initialize.
448 initializer_map = object_identity.ObjectIdentityDictionary()
--> 449 self._initialize(args, kwds, add_initializers_to=initializer_map)
450 if self._created_variables:
451 try:
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\eager\def_function.py in _initialize(self, args, kwds, add_initializers_to)
390 self._concrete_stateful_fn = (
391 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 392 *args, **kwds))
393
394 def invalid_creator_scope(*unused_args, **unused_kwds):
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\eager\function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
1837 if self.input_signature:
1838 args, kwargs = None, None
-> 1839 graph_function, _, _ = self._maybe_define_function(args, kwargs)
1840 return graph_function
1841
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\eager\function.py in _maybe_define_function(self, args, kwargs)
2137 graph_function = self._function_cache.primary.get(cache_key, None)
2138 if graph_function is None:
-> 2139 graph_function = self._create_graph_function(args, kwargs)
2140 self._function_cache.primary[cache_key] = graph_function
2141 return graph_function, args, kwargs
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\eager\function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2028 arg_names=arg_names,
2029 override_flat_arg_shapes=override_flat_arg_shapes,
-> 2030 capture_by_value=self._capture_by_value),
2031 self._function_attributes,
2032 # Tell the ConcreteFunction to clean up its graph once it goes out of
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\framework\func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
913 converted_func)
914
--> 915 func_outputs = python_func(*func_args, **func_kwargs)
916
917 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\eager\def_function.py in wrapped_fn(*args, **kwds)
333 # __wrapped__ allows AutoGraph to swap in a converted function. We give
334 # the function a weak reference to itself to avoid a reference cycle.
--> 335 return weak_wrapped_fn().__wrapped__(*args, **kwds)
336 weak_wrapped_fn = weakref.ref(wrapped_fn)
337
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\keras\engine\training_v2_utils.py in distributed_function(input_iterator)
69 strategy = distribution_strategy_context.get_strategy()
70 outputs = strategy.experimental_run_v2(
---> 71 per_replica_function, args=(model, x, y, sample_weights))
72 # Out of PerReplica outputs reduce or pick values to return.
73 all_outputs = dist_utils.unwrap_output_dict(
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\distribute\distribute_lib.py in experimental_run_v2(self, fn, args, kwargs)
762 fn = autograph.tf_convert(fn, ag_ctx.control_status_ctx(),
763 convert_by_default=False)
--> 764 return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
765
766 def reduce(self, reduce_op, value, axis):
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\distribute\distribute_lib.py in call_for_each_replica(self, fn, args, kwargs)
1803 kwargs = {}
1804 with self._container_strategy().scope():
-> 1805 return self._call_for_each_replica(fn, args, kwargs)
1806
1807 def _call_for_each_replica(self, fn, args, kwargs):
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\distribute\distribute_lib.py in _call_for_each_replica(self, fn, args, kwargs)
2148 self._container_strategy(),
2149 replica_id_in_sync_group=constant_op.constant(0, dtypes.int32)):
-> 2150 return fn(*args, **kwargs)
2151
2152 def _reduce_to(self, reduce_op, value, destinations):
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\autograph\impl\api.py in wrapper(*args, **kwargs)
290 def wrapper(*args, **kwargs):
291 with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED):
--> 292 return func(*args, **kwargs)
293
294 if inspect.isfunction(func) or inspect.ismethod(func):
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\keras\engine\training_v2_utils.py in _predict_on_batch(***failed resolving arguments***)
158 def _predict_on_batch(model, x, y=None, sample_weights=None):
159 del y, sample_weights
--> 160 return predict_on_batch(model, x)
161
162 func = _predict_on_batch
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\keras\engine\training_v2_utils.py in predict_on_batch(model, x)
366
367 with backend.eager_learning_phase_scope(0):
--> 368 return model(inputs) # pylint: disable=not-callable
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\keras\engine\base_layer.py in __call__(self, inputs, *args, **kwargs)
848 outputs = base_layer_utils.mark_as_return(outputs, acd)
849 else:
--> 850 outputs = call_fn(cast_inputs, *args, **kwargs)
851
852 except errors.OperatorNotAllowedInGraphError as e:
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\keras\engine\sequential.py in call(self, inputs, training, mask)
253 if not self.built:
254 self._init_graph_network(self.inputs, self.outputs, name=self.name)
--> 255 return super(Sequential, self).call(inputs, training=training, mask=mask)
256
257 outputs = inputs # handle the corner case where self.layers is empty
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\keras\engine\network.py in call(self, inputs, training, mask)
695 ' implement a `call` method.')
696
--> 697 return self._run_internal_graph(inputs, training=training, mask=mask)
698
699 def compute_output_shape(self, input_shape):
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\keras\engine\network.py in _run_internal_graph(self, inputs, training, mask)
840
841 # Compute outputs.
--> 842 output_tensors = layer(computed_tensors, **kwargs)
843
844 # Update tensor_dict.
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\keras\engine\base_layer.py in __call__(self, inputs, *args, **kwargs)
848 outputs = base_layer_utils.mark_as_return(outputs, acd)
849 else:
--> 850 outputs = call_fn(cast_inputs, *args, **kwargs)
851
852 except errors.OperatorNotAllowedInGraphError as e:
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\keras\saving\saved_model\utils.py in return_outputs_and_add_losses(*args, **kwargs)
55 inputs = args[inputs_arg_index]
56 args = args[inputs_arg_index + 1:]
---> 57 outputs, losses = fn(inputs, *args, **kwargs)
58 layer.add_loss(losses, inputs)
59 return outputs
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\eager\def_function.py in __call__(self, *args, **kwds)
439 # In this case we have not created variables on the first call. So we can
440 # run the first trace but we should fail if variables are created.
--> 441 results = self._stateful_fn(*args, **kwds)
442 if self._created_variables:
443 raise ValueError("Creating variables on a non-first call to a function"
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\eager\function.py in __call__(self, *args, **kwargs)
1811 def __call__(self, *args, **kwargs):
1812 """Calls a graph function specialized to the inputs."""
-> 1813 graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
1814 return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
1815
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\eager\function.py in _maybe_define_function(self, args, kwargs)
2094 if self.input_signature is None or args is not None or kwargs is not None:
2095 args, kwargs = self._function_spec.canonicalize_function_inputs(
-> 2096 *args, **kwargs)
2097
2098 cache_key = self._cache_key(args, kwargs)
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\eager\function.py in canonicalize_function_inputs(self, *args, **kwargs)
1640 inputs,
1641 self._input_signature,
-> 1642 self._flat_input_signature)
1643 return inputs, {}
1644
~\LT1Kqob5UDEML61gCyjnAcfMXgkdP3wGcge-packages\tensorflow_core\python\eager\function.py in _convert_inputs_to_signature(inputs, input_signature, flat_input_signature)
1706 flatten_inputs)):
1707 raise ValueError("Python inputs incompatible with input_signature:\n%s" %
-> 1708 format_error_message(inputs, input_signature))
1709
1710 if need_packing:
ValueError: Python inputs incompatible with input_signature:
inputs: (
Tensor("IteratorGetNext:0", shape=(None, 50, 50, 1), dtype=uint8))
input_signature: (
TensorSpec(shape=(None, None, None, 1), dtype=tf.float32, name=None))
我认为这可能是相关的,我也会将用于构建模型的代码放在下面:
import tensorflow as tf
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.callbacks import TensorBoard
import pickle
import time
X = np.asarray(pickle.load(open("X.pickle", "rb")))
y = np.asarray(pickle.load(open("y.pickle", "rb")))
X = X/255.0
dense_layers = [1]
layer_sizes = [64]
conv_layers = [3]
for dense_layer in dense_layers:
for layer_size in layer_sizes:
for conv_layer in conv_layers:
NAME = "{}-conv-{}-nodes-{}-dense-{}".format(conv_layer, layer_size, dense_layer, int(time.time()))
tensorboard = TensorBoard(log_dir='logs\{}'.format(NAME))
model = Sequential()
model.add(Conv2D(layer_size, (3,3), input_shape = X.shape[1:]))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2, 2)))
for l in range(conv_layer-1):
model.add(Conv2D(layer_size, (3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Flatten())
for l in range(dense_layer):
model.add(Dense(layer_size))
model.add(Activation("relu"))
model.add(Dense(1))
model.add(Activation("sigmoid"))
model.compile(loss = "binary_crossentropy",
optimizer = "adam",
metrics = ['accuracy'])
model.fit(X, y, batch_size=13, epochs=1, validation_split=0.1, steps_per_epoch=1727, callbacks=[tensorboard])
model.save('64x3-CNN.model')
```