I am using tensor flow to run a convolution neural network on MNIST database. But I am getting the following error.
tensorflow.python.framework.errors.InvalidArgumentError: You must feed a value for placeholder tensor 'x' with dtype float [[Node: x = Placeholderdtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]]
x = tf.placeholder(tf.float32, [None, 784], name='x') # mnist data image of shape 28*28=784
I thought I correctly update the value of x using feed_dict, but its saying i haven't update the value of placeholder x.
Also, is there any other logical flaw in my code?
Any help would be greatly appreciated. Thanks.
import tensorflow as tf
import numpy
from tensorflow.examples.tutorials.mnist import input_data
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Parameters
learning_rate = 0.01
training_epochs = 10
batch_size = 100
display_step = 1
# tf Graph Input
#x = tf.placeholder(tf.float32, [50, 784], name='x') # mnist data image of shape 28*28=784
#y = tf.placeholder(tf.float32, [50, 10], name='y') # 0-9 digits recognition => 10 classes
# Set model weights
W = tf.Variable(tf.zeros([784, 10]), name="weights")
b = tf.Variable(tf.zeros([10]), name="bias")
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
# Initializing the variables
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
# Training cycle
for i in range(1000):
print i
batch_xs, batch_ys = mnist.train.next_batch(50)
x_image = tf.reshape(x, [-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
y_conv=tf.nn.softmax(tf.matmul(h_fc1, W_fc2) + b_fc2)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(y_conv), reduction_indices=[1]))
sess.run(
[cross_entropy, y_conv],
feed_dict={x: batch_xs, y: batch_ys})
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y,1))
print correct_prediction.eval()
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))