我正在尝试为 CIFAR 分类建立一个 softmax 回归模型。起初,当我尝试将图像和标签传递到提要字典中时,我收到一个错误,指出提要字典不接受张量。然后我使用 .eval() 将它们转换为 numpy 数组,但程序挂在 .eval() 行并且不再继续。如何将此数据传递到 feed_dict?
CIFARIMAGELOADING.PY
import tensorflow as tf
import os
import tensorflow.models.image.cifar10 as cf
IMAGE_SIZE = 24
BATCH_SIZE = 128
def loadimagesandlabels(size):
# Load the images from the CIFAR data directory
FLAGS = tf.app.flags.FLAGS
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)]
filename_queue = tf.train.string_input_producer(filenames)
read_input = cf.cifar10_input.read_cifar10(filename_queue)
# Reshape and crop the image
height = IMAGE_SIZE
width = IMAGE_SIZE
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
cropped_image = tf.random_crop(reshaped_image, [height, width, 3])
# Generate a batch of images and labels by building up a queue of examples
print('Filling queue with CIFAR images')
num_preprocess_threads = 16
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(BATCH_SIZE*min_fraction_of_examples_in_queue)
images, label_batch = tf.train.batch([cropped_image,read_input.label],batch_size=BATCH_SIZE, num_threads=num_preprocess_threads, capacity=min_queue_examples+3*BATCH_SIZE)
print(images)
print(label_batch)
return images, tf.reshape(label_batch, [BATCH_SIZE])
CIFAR.PY
#Set up placeholder vectors for image and labels
x = tf.placeholder(tf.float32, shape = [None, 1728])
y_ = tf.placeholder(tf.float32, shape = [None,10])
W = tf.Variable(tf.zeros([1728,10]))
b = tf.Variable(tf.zeros([10]))
#Implement regression model. Multiply input images x by weight matrix W, add the bias b
#Compute the softmax probabilities that are assigned to each class
y = tf.nn.softmax(tf.matmul(x,W) + b)
#Define cross entropy
#tf.reduce sum sums across all classes and tf.reduce_mean takes the average over these sums
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y), reduction_indices = [1]))
#Train the model
#Each training iteration we load 128 training examples. We then run the train_step operation
#using feed_dict to replace the placeholder tensors x and y_ with the training examples
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
#Open up a Session
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(1000) :
images, labels = CIFARImageLoading.loadimagesandlabels(size=BATCH_SIZE)
unrolled_images = tf.reshape(images, (1728, BATCH_SIZE))
#convert labels to their one_hot representations
# should produce [[1,0,0,...],[0,1,0...],...]
one_hot_labels = tf.one_hot(indices= labels, depth=NUM_CLASSES, on_value=1.0, off_value= 0.0, axis=-1)
print(unrolled_images)
print(one_hot_labels)
images_numpy, labels_numpy = unrolled_images.eval(session=sess), one_hot_labels.eval(session=sess)
sess.run(train_step, feed_dict = {x: images_numpy, y_:labels_numpy})
#Evaluate the model
#.equal returns a tensor of booleans, we want to cast these as floats and then take their mean
#to get percent correctness (accuracy)
print("evaluating")
test_images, test_labels = CIFARImageLoading.loadimagesandlabels(TEST_SIZE)
test_images_unrolled = tf.reshape(test_images, (1728, TEST_SIZE))
test_images_one_hot = tf.one_hot(indices= test_labels, depth=NUM_CLASSES, on_value=1.0, off_value= 0.0, axis=-1)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval(feed_dict = {x: unrolled_images.eval(), y_ : test_images_one_hot.eval()}))