با عرض سلام من این کد رو تغییر دادم برای دیتاست خودم که 124 کلاس و تصاویر 6060 gray دارم . دیتاست قبلی knifey با 3 کلاس بوده ['forky', 'knifey', 'spoony'] و سایز تصاویر 200200*3 هست ارور زیر رو میده بهم :
InvalidArgumentError (see above for traceback): logits and labels must have the same first
dimension, got logits shape [128,124] and labels shape [32]
[[Node: SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits =
SparseSoftmaxCrossEntropyWithLogits[T=DT_FLOAT, Tlabels=DT_INT64,
_device="/job:localhost/replica:0/task:0/device:CPU:0"](layer_fc_2/BiasAdd,
IteratorGetNext:1)]]
model
def model_fn(features, labels, mode, params):
# Args:
#
# features: This is the x-arg from the input_fn.
# labels: This is the y-arg from the input_fn.
# mode: Either TRAIN, EVAL, or PREDICT
# params: User-defined hyper-parameters, e.g. learning-rate.
# Reference to the tensor named "image" in the input-function.
x = features["image"]
# The convolutional layers expect 4-rank tensors
# but x is a 2-rank tensor, so reshape it.
net = tf.reshape(x, [-1, img_size, img_size, num_channels])
# First convolutional layer.
net = tf.layers.conv2d(inputs=net, name='layer_conv1',
filters=32, kernel_size=3,
padding='same', activation=tf.nn.relu)
net = tf.layers.max_pooling2d(inputs=net, pool_size=2, strides=1)
# Second convolutional layer.
net = tf.layers.conv2d(inputs=net, name='layer_conv2',
filters=32, kernel_size=3,
padding='same', activation=tf.nn.relu)
net = tf.layers.max_pooling2d(inputs=net, pool_size=2, strides=1)
# Flatten to a 2-rank tensor.
net = tf.contrib.layers.flatten(net)
# Eventually this should be replaced with:
# net = tf.layers.flatten(net)
# First fully-connected / dense layer.
# This uses the ReLU activation function.
net = tf.layers.dense(inputs=net, name='layer_fc1',
units=128, activation=tf.nn.relu)
# Second fully-connected / dense layer.
# This is the last layer so it does not use an activation function.
net = tf.layers.dense(inputs=net, name='layer_fc_2',
units=num_classes)
# Logits output of the neural network.
logits = net
with tf.Session() as sess:
print(labels.eval())
# print(logits.eval())
# Softmax output of the neural network.
y_pred = tf.nn.softmax(logits=logits)
# Classification output of the neural network.
y_pred_cls = tf.argmax(y_pred, axis=1)
if mode == tf.estimator.ModeKeys.PREDICT:
# If the estimator is supposed to be in prediction-mode
# then use the predicted class-number that is output by
# the neural network. Optimization etc. is not needed.
spec = tf.estimator.EstimatorSpec(mode=mode,
predictions=y_pred_cls)
else:
# Otherwise the estimator is supposed to be in either
# training or evaluation-mode. Note that the loss-function
# is also required in Evaluation mode.
# Define the loss-function to be optimized, by first
# calculating the cross-entropy between the output of
# the neural network and the true labels for the input data.
# This gives the cross-entropy for each image in the batch.
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits)
# Reduce the cross-entropy batch-tensor to a single number
# which can be used in optimization of the neural network.
loss = tf.reduce_mean(cross_entropy)
# Define the optimizer for improving the neural network.
optimizer = tf.train.AdamOptimizer(learning_rate=params["learning_rate"])
# Get the TensorFlow op for doing a single optimization step.
train_op = optimizer.minimize(
loss=loss, global_step=tf.train.get_global_step())
# Define the evaluation metrics,
# in this case the classification accuracy.
metrics = \
{
"accuracy": tf.metrics.accuracy(labels, y_pred_cls)
}
# Wrap all of this in an EstimatorSpec.
spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics)
return spec