with tf.variable_scope("rnn_seq2seq"):
w = tf.get_variable("proj_w", [num_units, seq_width])
w_t = tf.transpose(w)
b = tf.get_variable("proj_b", [seq_width])
output_projection=(w,b)
output,state = rnn_seq2seq(enc_inputs,dec_inputs,cell,output_projection=output_projection,feed_previous=False)
weights=[tf.ones([batch_size * dec_steps])]
loss=[]
for i in xrange(dec_steps -1):
logits = tf.nn.xw_plus_b(output[i],output_projection[0],output_projection[1])
如果我在这里在 logits 上引入一种热编码,程序稍后会给出错误,尽管两者都返回相同的维度。如果我注释掉这一行,程序不会给出任何错误。
prev = logits
logits = tf.to_float(tf.equal(prev,tf.reduce_max(prev,reduction_indices=[1],keep_dims=True)))
print prev
print logits
Tensor("rnn_seq2seq/xw_plus_b:0", shape=TensorShape([Dimension(800), Dimension(14)]), dtype=float32)
Tensor("rnn_seq2seq/ToFloat:0", shape=TensorShape([Dimension(800), Dimension(14)]), dtype=float32)
其余代码:
crossent =tf.nn.softmax_cross_entropy_with_logits(logits,dec_inputs[i+1],name="SequenceLoss/CrossEntropy{0}".format(i))
loss.append(crossent)
cost = tf.reduce_sum(tf.add_n(loss))
final_state = state[-1]
tvars = tf.trainable_variables()
grads,norm = tf.clip_by_global_norm(tf.gradients(cost,tvars),5)
lr = tf.Variable(0.0,name="learningRate")
optimizer = tf.train.GradientDescentOptimizer(lr)
train_op = optimizer.apply_gradients(zip(grads,tvars))
---> 23 毕业生,norm = tf.clip_by_global_norm(tf.gradients(cost,tvars),5)
ValueError:将参数 'values' 列出到 'Pack' Op,长度 0 比最小长度 1 短。