1

我会提前感谢任何形式的帮助。我正在使用 tensorflow 1.15 版,在运行以下代码后,我从优化器那里得到错误,他无法获得任何变量来优化。

在此代码之前,有关数据的所有预处理都已完成。现在我正在训练模型,首先我defined placeholders在运行时获取值。之后对这些占位符应用一些操作来计算positive energyandnegative energy并计算KBC loss. 当我将其提供KBC loss给优化器时,我收到错误“ValueError:没有要优化的变量”。

为什么在将值提供给占位符之前我会收到此错误。

请帮助解决这个问题。我将不胜感激任何帮助。

谢谢


    with tf.name_scope('input'):
        # Relation
        r_input = tf.placeholder(dtype=tf.float32, shape=[None, 100],name="r_input")
        r_neg_input = tf.placeholder(dtype=tf.float32, shape=[None, 100],name="r_input")
    
        h_pos_txt_input = tf.placeholder(dtype=tf.float32, shape=[None, 100], name="h_pos_txt_input")
        h_neg_txt_input = tf.placeholder(dtype=tf.float32, shape=[None, 100], name="h_neg_txt_input")
    
    
        t_pos_txt_input = tf.placeholder(dtype=tf.float32, shape=[None, 100], name="t_pos_txt_input")
        t_neg_txt_input = tf.placeholder(dtype=tf.float32, shape=[None, 100],   name="t_neg_txt_input")

with tf.name_scope('cosine'):

    # Energy calculation for postive and negative triples
    pos_energy = tf.reduce_sum(abs(h_pos_txt_input + r_input - t_pos_txt_input), 1, keep_dims = True,  name="pos_energy")

    negative_energy = tf.reduce_sum(abs(h_neg_txt_input + r_neg_input - t_neg_txt_input), 1, keep_dims = True,   name="negative_energy")

    kbc_loss = tf.maximum(pos_energy - negative_energy + margin, 0) ##Margin defined at 2nd cell
    tf.summary.histogram("loss", kbc_loss)

optimizer = tf.train.AdamOptimizer(initial_learning_rate).minimize(kbc_loss)

在此之后我收到错误

ValueError                                Traceback (most recent call last)
<ipython-input-43-64eddec80472> in <module>
----> 1 optimizer = tf.train.AdamOptimizer(initial_learning_rate).minimize(kbc_loss) #initial LR defined in 2nd cell

~\AppData\Local\Continuum\anaconda3\lib\site-packages\tensorflow_core\python\training\optimizer.py in minimize(self, loss, global_step, var_list, gate_gradients, aggregation_method, colocate_gradients_with_ops, name, grad_loss)
    401         aggregation_method=aggregation_method,
    402         colocate_gradients_with_ops=colocate_gradients_with_ops,
--> 403         grad_loss=grad_loss)
    404 
    405     vars_with_grad = [v for g, v in grads_and_vars if g is not None]

~\AppData\Local\Continuum\anaconda3\lib\site-packages\tensorflow_core\python\training\optimizer.py in compute_gradients(self, loss, var_list, gate_gradients, aggregation_method, colocate_gradients_with_ops, grad_loss)
    504     processors = [_get_processor(v) for v in var_list]
    505     if not var_list:
--> 506       raise ValueError("No variables to optimize.")
    507     var_refs = [p.target() for p in processors]
    508     grads = gradients.gradients(

ValueError: No variables to optimize.

剩余代码如下

summary_op = tf.summary.merge_all()
saver = tf.train.Saver()
log_file = open(log_file,"w") #log file defined in 2nd cell
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
    
    sess.run(tf.global_variables_initializer())

    # Load pre-trained weights if available
    if os.path.isfile(best_valid_model_meta_file): #best_valid_model_meta_file defined in 2nd cell
        print("restore the weights",checkpoint_best_valid_dir) #checkpoint_best_valid_dir defined in 2nd cell
        saver = tf.train.import_meta_graph(best_valid_model_meta_file) #best_valid_model_meta_file defined in 2nd cell
        saver.restore(sess, tf.train.latest_checkpoint(checkpoint_best_valid_dir)) #checkpoint_best_valid_dir defined in 2nd cell
    else:
        print("no weights to load :(")


    writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())

    initial_valid_loss = 100

    for epoch in range(training_epochs): #training_epochs defined in 2nd cell
        gc.collect()
        np.random.shuffle(training_data)
        training_loss = 0.
        total_batch = int(len(training_data) / batch_size) #batch_size defined in 2nd cell

        for i in range(total_batch):

            batch_loss = 0
            start = i * batch_size #batch_size defined in 2nd cell
            end = (i + 1) * batch_size

            h_data_txt, r_data, t_data_txt, \
            t_neg_data_txt, h_neg_data_txt, r_neg_data= get_batch_with_neg_heads_and_neg_tails_relation_multimodal(
                training_data, triples_set, entity_list, start,
                end, entity_embeddings_txt, relation_embeddings, r_head, head_r, r_tail, tail_r)
            
            _, loss, summary = sess.run(
                [optimizer, kbc_loss, summary_op],
                feed_dict={r_input: r_data,
                           r_neg_input: r_neg_data,
                           h_pos_txt_input: h_data_txt,

                           t_pos_txt_input: t_data_txt,
                           

                           t_neg_txt_input: t_neg_data_txt,
                           

                           h_neg_txt_input: h_neg_data_txt
                           
                           })
            
            batch_loss = np.sum(loss)/batch_size #batch_size defined in 2nd cell

            training_loss += batch_loss

            writer.add_summary(summary, epoch * total_batch + i)

        training_loss = training_loss / total_batch

        val_loss = sess.run([kbc_loss],
                            feed_dict={r_input: r_data_valid,
                                       r_neg_input: r_neg_data_valid,

                                       h_pos_txt_input: h_data_valid_txt,
                                       

                                       t_pos_txt_input: t_data_valid_txt,
                                       

                                       t_neg_txt_input: t_neg_data_valid_txt,
                                       

                                       h_neg_txt_input: h_neg_data_valid_txt

                                       })

        val_score = np.sum(val_loss) / len(valid_data)


        print("Epoch:", (epoch + 1),  "loss=", str(round(training_loss, 4)), "val_loss", str(round(val_score, 4)))

        if val_score < initial_valid_loss :
            saver.save(sess, model_weights_best_valid_file) #model_weights_best_valid_file defined in 2nd cell
            log_file.write("save model best validation loss: " + str(initial_valid_loss) + "==>" + str(val_score) + "\n")
            print("save model valid loss: ", str(initial_valid_loss), "==>", str(val_score))
            initial_valid_loss = val_score


        saver.save(sess, model_current_weights_file) #model_current_weights_file defined in 2nd cell

        log_file.write("Epoch:\t" + str(epoch + 1) + "\tloss:\t" + str(round(training_loss, 5)) + "\tval_loss:\t" + str(
            round(val_score, 5)) + "\n")
        log_file.flush()
4

0 回答 0