1

我正在使用二进制分类进行我的第一个神经网络,但是当我尝试使用以下方法评估模型时出现错误:

correct = tf.nn.in_top_k(logits,y,1)

在哪里

  • logits张量是:预测:形状[batch_size = 52,num_classes = 1],输入float32
  • y张量是:目标:形状[batch_size = 52],输入int32

我收到了这个错误:

targets[1] is out of range
     [[{{node in_top_k/InTopKV2}}]]

经过一段时间的调试,我了解到我的张量 y 的值必须 <= 到 num_classes,因此张量 y 的第一个值等于 1 被认为超出范围,即使参数 num_classes = 1 也是如此。

我怎样才能让我的张量值等于 num_classes 并且只严格低于?还是有其他方法?

在我看来,num_classes 应该等于 1,因为它是二进制分类,因此需要 1 个神经元输出。


编辑 这是我的完整代码

import tensorflow as tf
n_inputs = 28 
n_hidden1 = 15
n_hidden2 = 5
n_outputs = 1
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X") 
y = tf.placeholder(tf.int32, shape=(None), name="y")   #None => any
def neuron_layer(X, n_neurons, name, activation=None):
    with tf.name_scope(name):
        n_inputs = int(X.shape[1])
        stddev = 2 / np.sqrt(n_inputs) 
        init = tf.truncated_normal((n_inputs, n_neurons), stddev=stddev) #matrice n_inputs x n_neurons values proche de 0    
        W = tf.Variable(init,name="kernel")  #weights random
        b = tf.Variable(tf.zeros([n_neurons]), name="bias")
        Z = tf.matmul(X, W) + b
        tf.cast(Z,tf.int32)
        if activation is not None:
            return activation(Z)
        else:
            return Z
def to_one_hot(y):
    n_classes = y.max() + 1
    m = len(y)
    Y_one_hot = np.zeros((m, n_classes))
    Y_one_hot[np.arange(m), y] = 1
    return Y_one_hot
hidden1 = neuron_layer(X, n_hidden1, name="hidden1",
                           activation=tf.nn.relu)
hidden2 = neuron_layer(hidden1, n_hidden2, name="hidden2",
                           activation=tf.nn.relu)
logits = neuron_layer(hidden2, n_outputs, name="outputs")
xentropy = tf.keras.backend.binary_crossentropy(tf.to_float(y),logits) 
loss = tf.reduce_mean(xentropy)
learning_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits,y,1)
labels_max = tf.reduce_max(y)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))

init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 40
batch_size = 50
def shuffle_batch(X, y, batch_size):  #Homogeneisation et decoupage en paquets(n_batches)
    rnd_idx = np.random.permutation(len(X))
    n_batches = len(X) // batch_size
    for batch_idx in np.array_split(rnd_idx, n_batches):
        X_batch, y_batch = X[batch_idx], y[batch_idx]
        yield X_batch, y_batch


with tf.Session() as sess:
    init.run()
    X_temp,Y_temp = X_batch,y_batch
    feed_dict={X: X_batch, y: y_batch}
    print("feed",feed_dict)
    print("\n y_batch :",y_batch,y_batch.dtype)
    print("\n X_batch :",X_batch,X_batch.dtype,X_batch.shape)

    for epoch in range(n_epochs):
        for X_batch, y_batch in shuffle_batch(X_train, Y_train, batch_size):
            y_batch=y_batch.astype(np.int32)
            X_batch=X_batch.astype(np.float32)
            sess.run(training_op,feed_dict={X: X_batch, y: y_batch})
        #acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
        #acc_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
        #print(epoch, "Batch accuracy:", acc_batch, "Val accuracy:", acc_val)
    save_path = saver.save(sess, "./my_model_final.ckpt")
    #some tests
    print("y eval :",y.eval(feed_dict={X:X_temp,y:Y_temp}).shape)

    y_one_hot=to_one_hot(y.eval(feed_dict={X:X_temp,y:Y_temp}))
    print("y_one_hot :",y_one_hot.shape)

    print("logits eval : ",logits.eval(feed_dict={X:X_temp,y:Y_temp}))
    #print(correct.eval(feed_dict={X:X_temp,y:Y_temp}))
    print(labels_max.eval(feed_dict={X:X_temp,y:Y_temp}))
4

1 回答 1

1

根据此处的文档,tf.nn.in_top_k(predictions, targets, k)有参数:

  • predictions: float32 类型的张量。batch_size x classes张量。
  • targets: 张量。必须是以下类型之一:int32、int64。类 id的batch_size向量。
  • k: 一个整数。要查看计算精度的顶级元素的数量。

当您执行二进制分类时,即有两个类,因此logits您的情况下张量的形状应该是(52, 2),而形状y应该是(52,)。这里,logits基本上是one-hot encoded张量。这就是您遇到错误的原因。

考虑下面的例子:

示例 1

res = tf.nn.in_top_k([[0,1], [1,0], [0,1], [1, 0], [0, 1]], [0, 1, 1, 1, 1], 1)

这里的形状logits是 (5, 2)y而是 (5,)。如果你愿意tf.reduce_max(y),你会得到1,这少于课程的数量,因此没关系。

这将正常工作并输出[False False True False True]

示例 2

res = tf.nn.in_top_k([[0,1], [1,0], [0,1], [1, 0], [0, 1]], [0, 2, 1, 1, 1], 1)

如果你愿意tf.reduce_max(y),你会得到2,这等于班级的数量。这将引发错误:InvalidArgumentError: targets[1] is out of range

编辑:在上面的代码中,进行以下修改:

  • 更改n_outputs = 1n_outputs = 2
  • 更改sess.run(training_op,feed_dict={X: X_batch, y: y_batch})_, cost, acc = sess.run([training_op, loss, accuracy], feed_dict={X: X_batch, y: to_one_hot(y_batch)})
  • 更改correct = tf.nn.in_top_k(logits, y, 1)correct = tf.nn.in_top_k(logits, tf.argmax(y, 1), 1)

代码(使用随机数据):

n_inputs = 28 
n_hidden1 = 15
n_hidden2 = 5
n_outputs = 2

X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X") 
y = tf.placeholder(tf.int32, shape=(None, 2), name="y")   #None => any

def neuron_layer(X, n_neurons, name, activation=None):
    with tf.name_scope(name):
        n_inputs = int(X.shape[1])
        stddev = 2 / np.sqrt(n_inputs) 
        init = tf.truncated_normal((n_inputs, n_neurons), stddev=stddev) #matrice n_inputs x n_neurons values proche de 0    
        W = tf.Variable(init,name="kernel")  #weights random
        b = tf.Variable(tf.zeros([n_neurons]), name="bias")
        Z = tf.matmul(X, W) + b
        tf.cast(Z,tf.int32)
        if activation is not None:
            return activation(Z)
        else:
            return Z

def to_one_hot(y):
    n_classes = y.max() + 1
    m = len(y)
    Y_one_hot = np.zeros((m, n_classes))
    Y_one_hot[np.arange(m), y] = 1
    return Y_one_hot

hidden1 = neuron_layer(X, n_hidden1, name="hidden1",
                           activation=tf.nn.relu)
hidden2 = neuron_layer(hidden1, n_hidden2, name="hidden2",
                           activation=tf.nn.relu)
logits = neuron_layer(hidden2, n_outputs, name="outputs")
xentropy = tf.keras.backend.binary_crossentropy(tf.to_float(y),logits) 
loss = tf.reduce_mean(xentropy)
learning_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits,tf.argmax(y, 1),1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))

init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 1

X_train = np.random.rand(100, 28)
X_train = X_train.astype(np.float32)

Y_train = np.random.randint(low = 0, high = 2, size = 100, dtype=np.int32) 

with tf.Session() as sess:
    init.run()
    for epoch in range(n_epochs):
        _, cost, corr, acc = sess.run([training_op, loss, correct, accuracy], feed_dict={X: X_train, y: to_one_hot(Y_train)})
        print(corr)
        print('Loss: {} Accuracy: {}'.format(cost, acc))
于 2019-07-22T17:50:29.890 回答