如果这是一个幼稚的问题,我很抱歉。这是我第一次尝试使用 tensorflow。作为 Udacity 课程的一部分,我在非 MNIST 数据集上尝试了 numpy 之后,我正在使用它。现在,这是我编写的代码。然而,这给了我 87-88% 的准确率。请建议我应该注意哪些错误以及代码中可能出现的错误:
batch_size = 256
node_dimensions = 1024
graph = tf.Graph()
with graph.as_default():
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size *image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size,num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
#Layer 1
w1 = tf.Variable(
tf.truncated_normal([image_size * image_size, node_dimensions]))
b1 = tf.Variable(tf.zeros([node_dimensions]))
#Layer 2
w2 = tf.Variable(tf.random_uniform([node_dimensions,
node_dimensions], -0.01, 0.01))
b2 = tf.Variable(tf.zeros([node_dimensions]))
#Layer 3
w3 = tf.Variable(
tf.truncated_normal([node_dimensions, num_labels]))
b3 = tf.Variable(tf.zeros([num_labels]))
# Training computation.
# Layer 1.
y1 = tf.nn.softmax(tf.matmul(tf_train_dataset, w1) + b1)
# Layer 2.
y2 = tf.nn.relu(tf.matmul(y1, w2) + b2)
# Layer 3.
logits = tf.matmul(y2, w3) + b3
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels,logits=logits))
# Valid computation.
# Layer 1.
y1 = tf.nn.softmax(tf.matmul(tf_valid_dataset, w1) + b1)
# Layer 2.
y2 = tf.nn.relu(tf.matmul(y1, w2) + b2)
# Layer 3.
logits_valid = tf.matmul(y2, w3) + b3
# Test computation.
# Layer 1.
y1 = tf.nn.softmax(tf.matmul(tf_test_dataset, w1) + b1)
# Layer 2.
y2 = tf.nn.relu(tf.matmul(y1, w2) + b2)
# Layer 3.
logits_test = tf.matmul(y2, w3) + b3
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(logits_valid)
test_prediction = tf.nn.softmax(logits_test)
num_steps = 10001
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print("Initialized")
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels :batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(),
test_labels))