我是 Python 和神经网络的新手。我一直在尝试修复下面代码中存在的异常,但没有找到轨迹。我该如何纠正?
需要注意的是,代码是从 Github 下载的,它首先模拟 Lorenz 系统得到 nn_input 和 nn_output,然后用这些输入和输出训练一个神经网络。
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from matplotlib import rcParams
from scipy import integrate
from mpl_toolkits.mplot3d import Axes3D
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten, MaxPool2D
from keras import optimizers
from keras.layers import Activation
from keras.utils.generic_utils import get_custom_objects
from keras import backend as K
## Simulate the Lorenz System
dt = 0.01
T = 8
t = np.arange(0,T+dt,dt)
beta = 8/3
sigma = 10
rho = 28
nn_input = np.zeros((100*(len(t)-1),3))
nn_output = np.zeros_like(nn_input)
fig,ax = plt.subplots(1,1,subplot_kw={'projection': '3d'})
def lorenz_deriv(x_y_z, t0, sigma=sigma, beta=beta, rho=rho):
x, y, z = x_y_z
return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z]
np.random.seed(123)
x0 = -15 + 30 * np.random.random((100, 3))
x_t = np.asarray([integrate.odeint(lorenz_deriv, x0_j, t)
for x0_j in x0])
for j in range(100):
nn_input[j*(len(t)-1):(j+1)*(len(t)-1),:] = x_t[j,:-1,:]
nn_output[j*(len(t)-1):(j+1)*(len(t)-1),:] = x_t[j,1:,:]
x, y, z = x_t[j,:,:].T
ax.plot(x, y, z,linewidth=1)
ax.scatter(x0[j,0],x0[j,1],x0[j,2],color='r')
plt.plot(x, t)
ax.view_init(18, -113)
plt.show()
## Neural Net
# Define activation functions
def logsig(x):
return K.variable(np.divide(1,(1+np.exp(-K.eval(x)))))
def radbas(x):
return K.variable(np.exp(-np.power(K.eval(x),2)))
def purelin(x):
return x
#create model
model = Sequential()
#add model layers
model.add(Dense(3, activation=logsig))
model.add(Dense(3, activation=radbas))
model.add(Dense(3, activation=purelin))
sgd_optimizer = tf.keras.optimizers.SGD(
learning_rate=0.01, momentum=0.9)
#sgd_optimizer = optimizers.SGD(momentum=0.9)
model.compile(optimizer=sgd_optimizer, loss='categorical_crossentropy')
#model.summary()
model.fit(nn_input, nn_output, epochs=30)