没有你想的那么容易,在 TF 2.x 环境下运行 TF 1.x 我在网上修复神经元网络的问题时发现了一些错误,需要回顾一些变量的用法。转换为 TF 2.x 是更好的主意。(更容易和适应性)
TF 2.X
while not done:
next_obs, reward, done, info = env.step(action)
env.render()
img = tf.keras.preprocessing.image.array_to_img(
img,
data_format=None,
scale=True
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
predictions = model_self_1.predict(img_array) ### Prediction
### Training: history_highscores = model_highscores.fit(batched_features, epochs=1 ,validation_data=(dataset.shuffle(10))) # epochs=500 # , callbacks=[cp_callback, tb_callback]
TF 1.X
with tf.compat.v1.Session() as sess:
saver = tf.compat.v1.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint(savedir + '\\invader_001'))
train_loss, _ = sess.run([loss, training_op], feed_dict={X:o_obs, y:y_batch, X_action:o_act})
for layer in mainQ_outputs:
model.add(layer)
model.add(tf.keras.layers.Flatten() )
model.add(tf.keras.layers.Dense(6, activation=tf.nn.softmax))
predictions = model.predict(obs) ### Prediction
### Training: summ = sess.run(summaries, feed_dict={X:o_obs, y:y_batch, X_action:o_act})
