我用 Python(版本 3.6)在 Tensorflow(版本 1.5)中使用 LSTM 编写了单层 RNN。我想为这个 RNN 添加 3 个隐藏层(即一个输入层、一个输出层和三个隐藏层)。我已经阅读了有关单元格的状态、堆栈、取消堆栈等信息,但我仍然对如何将这些东西放在一起并升级我的代码感到困惑。下面是我在单层 RNN 中的代码。您能否帮我升级代码(注意:我对 Tensorflow 和 Python 很陌生 :))。`
import tensorflow as tf
from tensorflow.contrib import rnn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
import sklearn.metrics as sm
inputs = 12 #number of columns input
num_hidden = 800 #number of neurons in the layer
outputs = 1 #number of columns output
num_epochs = 100
batch_size = 80
learning_rate = 0.00001
# Training data
input1 = []
output1 = []
with open('train1_leading.csv', 'r') as csv_f:
data = csv.reader (csv_f)
for row in data:
input1.append (row [0:inputs])
output1.append (row [inputs])
csv_f.close()
input11 = []
for i in range(0, len(input1)):
input11.append([])
for j in range(0, inputs):
input11[i].append(float(input1[i][j]))
output1 = [float(x) for x in output1]
input2 = np.array(input11)
output2 = np.array(output1)
x_data = input2[:(len(input2)-(len(input2) % batch_size))]
x_batches = x_data.reshape(-1, batch_size, inputs)
y_data = output2[:(len(output2)-(len(output2) % batch_size))]
y_batches = y_data.reshape(-1, batch_size, outputs)
# Testing data
inputt = []
outputt = []
with open('valid1_leading.csv', 'r') as csv_f:
data = csv.reader (csv_f)
for row in data:
inputt.append (row [0:inputs])
outputt.append (row [inputs])
csv_f.close()
inputtt = []
for i in range(0, len(inputt)):
inputtt.append([])
for j in range(0, inputs):
inputtt[i].append(float(inputt[i][j]))
outputt = [float(x) for x in outputt]
inputt1 = np.array(inputtt)
output1 = np.array(outputt)
X_test = inputt1[:batch_size].reshape(-1, batch_size, inputs)
Y_test = output1[-(batch_size):].reshape(-1, batch_size, outputs)
# Configure RNN
tf.reset_default_graph() #reset graphs
X = tf.placeholder(tf.float32, [None, batch_size, inputs]) #create variables
Y = tf.placeholder(tf.float32, [None, batch_size, outputs]) #create variables
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=num_hidden, activation=tf.nn.softsign) #create RNN object
rnn_output, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32)
stacked_rnn_output = tf.reshape(rnn_output, [-1, num_hidden])
weight = tf.Variable(tf.random_normal([num_hidden, outputs]))
bias = tf.Variable(tf.random_normal([outputs]))
stacked_outputs = tf.matmul(stacked_rnn_output, weight) + bias
outputRNN = tf.reshape(stacked_outputs, [-1, batch_size, outputs]) #results
loss = tf.losses.mean_squared_error(outputRNN, Y) #cost function
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer()) #initialize all the variables
for ep in range(num_epochs):
sess.run(training_op, feed_dict={X: x_batches, Y: y_batches})
mse = loss.eval(feed_dict={X: x_batches, Y: y_batches})
print(ep, "\tMSE:", mse)
y_pred = sess.run(outputRNN, feed_dict={X: X_test})
plt.title("Forecast vs Actual", fontsize=14)
plt.plot(pd.Series(np.ravel(Y_test)), "b", markersize=10, label="Actual")
plt.plot(pd.Series(np.ravel(y_pred)), "r", markersize=10, label="Forecast")
plt.legend(loc="upper left")
plt.xlabel("Time Periods")
plt.show()
tt = sm.mean_squared_error(np.ravel(Y_test), np.ravel(y_pred))
print ('MSE of Test data', tt)`