0

这个问题已经被问过好几次了,但我似乎无法使以前的解决方案适应我的代码。因此,我将不胜感激有关如何解决此问题的任何建议。我曾尝试使用 pdb 并在问题出现之前设置一个跟踪点,但这并没有给我太多信息。我正在根据我的问题调整本教程: https ://www.oreilly.com/ideas/visualizing-convolutional-neural-networks

数据形状:

x_train.shape: (1161, 68, 68, 1)
x_test.shape: (216, 68, 68, 1)
y_test.shape: (216,)
y_train.shape: (1161,)

发生错误的地方:

#Train the Model
steps = int(x_train.shape[0]/batchSize)
for i in range(numEpochs):
    print(i)
    accHist = []
    accHist2 = []
    #x_train, y_train = imf.shuffle(x_train, y_train)
    for j in range(steps):
        print(j)
        #Calculate our current step
        step = i * steps + j
        #Feed forward batch of train images into graph and log accuracy
        acc = sess.run([accuracy], feed_dict={X: x_train[(j*batchSize):((j+1)*batchSize),:,:,:], Y_: np.array(y_train[(j*batchSize):((j+1)*batchSize)]).reshape(1,30), keepRate1: 1, keepRate2: 1})
        print(accHist)
        accHist.append(acc)

        #Back propigate using adam optimizer to update weights and biases.
        sess.run(train_step, feed_dict={X: x_train[(j*batchSize):((j+1)*batchSize),:,:,:], Y_: np.array(y_train[(j*batchSize):((j+1)*batchSize)]).reshape(1,30), keepRate1: 0.2, keepRate2: 0.5})
        print("success")

    print('Epoch number {} Training Accuracy: {}'.format(i+1, np.mean(accHist)))

    #Feed forward all test images into graph and log accuracy
    for k in range(int(x_test.shape[0]/batchSize)):
        acc = sess.run(accuracy, feed_dict={X: x_test[(k*batchSize):((k+1)*batchSize),:,:,:], Y_: np.array(y_test[(k*batchSize):((k+1)*batchSize)]).reshape(1,30), keepRate1: 1, keepRate2: 1})
        accHist2.append(acc)
    print("Test Set Accuracy: {}".format(np.mean(accHist2)))

我收到以下错误消息:

InvalidArgumentError: logits and labels must be same size: logits_size=[30,30] labels_size=[1,30]
     [[Node: cross_entropy_7/SoftmaxCrossEntropyWithLogits = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"](cross_entropy_7/Reshape, cross_entropy_7/Reshape_1)]]

按照教程,我认为登录是在这里设置的:

#FULLY CONNECTED 3 & SOFTMAX OUTPUT
with tf.name_scope('softmax') as scope:
    fc2w = tf.Variable(tf.truncated_normal([512, classes], dtype=tf.float32,
                                           stddev=1e-1), name='weights3_2')
    fc2b = tf.Variable(tf.constant(1.0, shape=[classes], dtype=tf.float32),
                       trainable=True, name='biases3_2')
    Ylogits = tf.nn.bias_add(tf.matmul(fc1_drop, fc2w), fc2b)
    Y = tf.nn.softmax(Ylogits)

print(Ylogits.shape) 这里给了我:(?, 30)。课程设置为 30,所以这似乎是有道理的。

这似乎是不起作用的功能,所以我打印了形状:

with tf.name_scope('cross_entropy'):
    print(Ylogits.shape)
    print(Y.shape)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
    loss = tf.reduce_mean(cross_entropy)

这给了我:

(?, 30)
(?, 30)

在执行上面的反向传播行时,尽管这似乎不起作用。任何人都可以帮忙吗?


回应评论(这基本上是上面提到的链接中的教程代码):

占位符:

    classes = 30
    X = tf.placeholder(tf.float32, name="X-placeholder", shape=(None, 68, 68, 1))
    Y_ = tf.placeholder(tf.float32, [None, classes], name="Y_-placeholder")
    keepRate1 = tf.placeholder(tf.float32, name="keepRate1-placeholder")
    keepRate2 = tf.placeholder(tf.float32, name="keepRate2-placeholder")

模型:

# CONVOLUTION 1 - 1
with tf.name_scope('conv1_1'):
    filter1_1 = tf.Variable(tf.truncated_normal([3, 3, 1, 32], dtype=tf.float32,
                            stddev=1e-1), name='weights1_1')
    stride = [1,1,1,1]
    conv = tf.nn.conv2d(X, filter1_1, stride, padding='SAME')
    biases = tf.Variable(tf.constant(0.0, shape=[32], dtype=tf.float32),
                         trainable=True, name='biases1_1')
    out = tf.nn.bias_add(conv, biases)
    conv1_1 = tf.nn.relu(out)

# CONVOLUTION 1 - 2
with tf.name_scope('conv1_2'):
    filter1_2 = tf.Variable(tf.truncated_normal([3, 3, 32, 32], dtype=tf.float32,
                                                stddev=1e-1), name='weights1_2')
    conv = tf.nn.conv2d(conv1_1, filter1_2, [1,1,1,1], padding='SAME')
    biases = tf.Variable(tf.constant(0.0, shape=[32], dtype=tf.float32),
                         trainable=True, name='biases1_2')
    out = tf.nn.bias_add(conv, biases)
    conv1_2 = tf.nn.relu(out)

# POOL 1
with tf.name_scope('pool1'):
    pool1_1 = tf.nn.max_pool(conv1_2,
                             ksize=[1, 2, 2, 1],
                             strides=[1, 2, 2, 1],
                             padding='SAME',
                             name='pool1_1')
    pool1_1_drop = tf.nn.dropout(pool1_1, keepRate1)

# CONVOLUTION 2 - 1
with tf.name_scope('conv2_1'):
    filter2_1 = tf.Variable(tf.truncated_normal([3, 3, 32, 64], dtype=tf.float32,
                                                stddev=1e-1), name='weights2_1')
    conv = tf.nn.conv2d(pool1_1_drop, filter2_1, [1, 1, 1, 1], padding='SAME')
    biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
                         trainable=True, name='biases2_1')
    out = tf.nn.bias_add(conv, biases)
    conv2_1 = tf.nn.relu(out)

# CONVOLUTION 2 - 2
with tf.name_scope('conv2_2'):
    filter2_2 = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32,
                                                stddev=1e-1), name='weights2_2')
    conv = tf.nn.conv2d(conv2_1, filter2_2, [1, 1, 1, 1], padding='SAME')
    biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
                         trainable=True, name='biases2_2')
    out = tf.nn.bias_add(conv, biases)
    conv2_2 = tf.nn.relu(out)

# POOL 2
with tf.name_scope('pool2'):
    pool2_1 = tf.nn.max_pool(conv2_2,
                             ksize=[1, 2, 2, 1],
                             strides=[1, 2, 2, 1],
                             padding='SAME',
                             name='pool2_1')
    pool2_1_drop = tf.nn.dropout(pool2_1, keepRate1)

#FULLY CONNECTED 1
with tf.name_scope('fc1') as scope:
    shape = int(np.prod(pool2_1_drop.get_shape()[1:]))
    fc1w = tf.Variable(tf.truncated_normal([shape, 512], dtype=tf.float32,
                                           stddev=1e-1), name='weights3_1')
    fc1b = tf.Variable(tf.constant(1.0, shape=[512], dtype=tf.float32),
                       trainable=True, name='biases3_1')
    pool2_flat = tf.reshape(pool2_1_drop, [-1, shape])
    out = tf.nn.bias_add(tf.matmul(pool2_flat, fc1w), fc1b)
    fc1 = tf.nn.relu(out)
    fc1_drop = tf.nn.dropout(fc1, keepRate2)

#FULLY CONNECTED 3 & SOFTMAX OUTPUT
with tf.name_scope('softmax') as scope:
    fc2w = tf.Variable(tf.truncated_normal([512, classes], dtype=tf.float32,
                                           stddev=1e-1), name='weights3_2')
    fc2b = tf.Variable(tf.constant(1.0, shape=[classes], dtype=tf.float32),
                       trainable=True, name='biases3_2')
    Ylogits = tf.nn.bias_add(tf.matmul(fc1_drop, fc2w), fc2b)
    Y = tf.nn.softmax(Ylogits)

numEpochs = 400
batchSize = 30
alpha = 1e-5

with tf.name_scope('cross_entropy'):
    print(Ylogits.shape)
    print(Y.shape)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
    loss = tf.reduce_mean(cross_entropy)

with tf.name_scope('accuracy'):
    correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

with tf.name_scope('train'):
    train_step = tf.train.AdamOptimizer(learning_rate=alpha).minimize(loss)

#Create Session and insert variables
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
4

1 回答 1

1

张量形状 (?, 30) 表示未设置批量大小,因此您可以将任何批量大小数据输入图表,问题是您可能会遇到这类问题,并且需要跟踪你脑海中的张量形状。

您需要解决的问题是:要么您一批有 30 张图像,但一批只有 1 个标签,这需要修复,因为您无法计算只有一个标签的 30 张图像的损失,您要么需要减少数量图像数量增加到 1 或将标签批量大小增加到 30,也可能是您在某个地方错误地重塑了张量。

我会查看您在哪里读取数据,然后对其进行批处理,这很可能是问题所在,或者您正在重塑它们的地方。

发布您的整个代码,它会更有帮助。

于 2017-12-14T18:13:44.147 回答