我正在尝试用 python 和 numpy 实现一个神经网络。问题是当我尝试训练我的网络时,错误库存在 0.5 左右。它无法进一步学习。我尝试了 0.001 和 1 的学习率。我想我在反向传播期间做错了什么。但我还没有弄清楚什么是错的。
ps 我遇到了很多溢出问题,然后我开始使用 np.clip() 方法。
这是我的反向传播代码:
# z2 is softmax output
def calculateBackpropagation(self, z1, z2, y):
delta3 = z2
delta3[range(self.numOfSamples), y] -= 1
dW2 = (np.transpose(z1)).dot(delta3)
db2 = np.sum(delta3, axis=0, keepdims=True)
delta2 = delta3.dot(np.transpose(self.W2)) * ActivationFunction.DRELU(z1)
dW1 = np.dot(np.transpose(self.train_data), delta2)
db1 = np.sum(delta2, axis=0)
self.W1 += -self.alpha * dW1
self.b1 += -self.alpha * db1
self.W2 += -self.alpha * dW2
self.b2 += -self.alpha * db2
# RELU can be approximated with soft max function
# so the derivative of this function is g(x) = log(1+exp(x))
# Source: https://imiloainf.wordpress.com/2013/11/06/rectifier-nonlinearities/
@staticmethod
def DRELU(x):
x = np.clip( x, -500, 500 )
return np.log(1 + np.exp(x))
def softmax(self, x):
"""Compute softmax values for each sets of scores in x."""
x = np.clip( x, -500, 500 )
e = np.exp(x)
return e / np.sum(e, axis=1, keepdims=True)
def train(self):
X = self.train_data
Y = self.train_labels
(row, col) = np.shape(self.train_data)
for i in xrange(self.ephocs):
[p1, z1, p2, z2] = self.feedForward(X)
probs = z2
self.backPropagate(X, Y, z1, probs)
self.learning_rate = self.learning_rate * (self.learning_rate / (self.learning_rate + (self.learning_rate * self.rate_decay)))
def softmax(self, x):
"""Compute softmax values for each sets of scores in x."""
x = np.clip( x, -500, 500 )
e = np.exp(x)
return e / np.sum(e, axis=1, keepdims=True)
def feedForward(self, X):
p1 = X.dot(self.W1) + self.b1
z1 = self.neuron(p1)
p2 = z1.dot(self.W2) + self.b2
# z2 = self.neuron(p2)
z2 = self.softmax(p2)
return [p1, z1, p2, z2]
def predict(self, X):
[p1, z1, p2, z2] = self.feedForward(X)
return np.argmax(z2, axis=1)
# Calculates the cross-entropy loss
# P.S. In some cases true distribution is unknown so cross-entropy cannot be directly calculated.
# hence, I will use the cross entropy estimation formula on wikipedia
# https://en.wikipedia.org/wiki/Cross_entropy
def calculateLoss(self, x):
[p1, z1, p2, z2] = self.feedForward(x)
softmax_probs = self.softmax(p2)
# Calculates the estimated loss based on wiki
return np.sum(-np.log(softmax_probs[range(self.numOfSamples), self.train_labels]))
def neuron(self, p):
return ActivationFunction.RELU(p)
def CreateRandomW(self, row, col):
return np.random.uniform(low=-1.0, high=1.0, size=(row, col))
def normalizeData(self, rawpoints, high=255.0, low=0.0):
return (rawpoints/128.0) - 1
@staticmethod
def RELU(x):
# x = np.clip( x, -1, 1 )
x = np.clip( x, -500, 500 )
return np.maximum(0.001, x)
# RELU can be approximated with soft max function
# so the derivative of this function is g(x) = log(1+exp(x))
# Source: https://imiloainf.wordpress.com/2013/11/06/rectifier-nonlinearities/
@staticmethod
def DRELU(x):
x = np.clip( x, -500, 500 )
return np.log(1 + np.exp(x))