0

我有1000大小32x32x3存储在dummy.tfrecord文件中的图像。我想迭代数据集两次(2 个时期),所以我指定tf.train.string_input_producer([dummy.tfrecord], num_epochs=2). 对于批量大小100,我希望tf.train.shuffle_batch运行2 * 10 = 20迭代,因为它需要10批量100来耗尽1000图像。

我遵循了这个答案,它确实20按预期产生了迭代。但是,最后,我收到了错误:

RandomShuffleQueue '_1_shuffle_batch/random_shuffle_queue' is closed and has insufficient elements (requested 100, current size 0)

这是有道理的,因为0队列中还有图像。

如何关闭队列并干净地退出?也就是说,不应该有错误。

这是完整的脚本:

import numpy as np
import tensorflow as tf

NUM_IMGS = 1000
tfrecord_file = 'dummy.tfrecord'

def read_from_tfrecord(filenames):
    tfrecord_file_queue = tf.train.string_input_producer(filenames,
            num_epochs=2)
    reader = tf.TFRecordReader()
    _, tfrecord_serialized = reader.read(tfrecord_file_queue)

    tfrecord_features = tf.parse_single_example(tfrecord_serialized,
                        features={
                            'label': tf.FixedLenFeature([], tf.string),
                            'image': tf.FixedLenFeature([], tf.string),
                        }, name='features')

    image = tf.decode_raw(tfrecord_features['image'], tf.uint8)
    image = tf.reshape(image, shape=(32, 32, 3))

    label = tf.cast(tfrecord_features['label'], tf.string)

    #provide batches
    images, labels = tf.train.shuffle_batch([image, label],
            batch_size=100,
            num_threads=4,
            capacity=50,
            min_after_dequeue=1)

    return images, labels 

imgs, lbls = read_from_tfrecord([tfrecord_file])
init_op = tf.group(tf.global_variables_initializer(),
        tf.local_variables_initializer())

with tf.Session() as sess:
    sess.run(init_op)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)
    while not coord.should_stop():
        labels, images = sess.run([lbls, imgs])
        print(images.shape) #PRINTED 20 TIMES BUT FAILED AT THE 21ST 
    coord.request_stop()
    coord.join(threads)

dummy.tfrecord如果有人想重现,这是生成文件的脚本:

def generate_image_binary():
    images = np.random.randint(0,255, size=(NUM_POINTS, 32, 32, 3),
            dtype=np.uint8)
    labels = np.random.randint(0,2, size=(NUM_POINTS, 1))
    return labels, images

def write_to_tfrecord(labels, images, tfrecord_file):
    writer = tf.python_io.TFRecordWriter(tfrecord_file)

    for i in range(NUM_POINTS):
        example = tf.train.Example(features=tf.train.Features(feature={
                    'label':
                    tf.train.Feature(bytes_list=tf.train.BytesList(value=[labels[i].tobytes()])),
                    'image': 
                    tf.train.Feature(bytes_list=tf.train.BytesList(value=[images[i].tobytes()]))
                    }))
        writer.write(example.SerializeToString())
    writer.close()

tfrecord_file = 'dummy.tfrecord'
labels, images= generate_image_binary()
write_to_tfrecord(labels, images, tfrecord_file)
4

1 回答 1

0

Coordinator可以捕获和处理异常tf.errors.OutOfRangeError,例如用于报告队列已关闭的异常。您可以更改代码以处理上述异常:

with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)  
try:
    while not coord.should_stop():
        labels, images = sess.run([lbls, imgs])
        print(images.shape) #PRINTED 20 TIMES BUT FAILED AT THE 21ST 
except Exception, e:
    # When done, ask the threads to stop.
    coord.request_stop(e)

finally:
    coord.request_stop()
   # Wait for threads to finish.
coord.join(threads)
于 2017-07-08T22:04:07.640 回答