我最初基于在线教程构建了一个仅 numpy 的神经网络,并且已经意识到我应该有某种偏置神经元。但是,我真的一直在努力弄清楚如何在我的代码中实现它,并且非常感谢一些指导。
import numpy as np
class NN():
def __init__(self, layers, type):
"""
layers: a list of layers, eg:
2 input neurons
1 hidden layer of 3 neurons
2 output neurons
will look like [2,3,2]
type: initialisation type, "random" or "uniform" distribution
"""
self.p = 0.1
self.layers = len(layers) - 1
self.inputSize = layers[0]
self.outputSize = layers[self.layers]
self.layerSizes = layers[:-1] #input layer, hiddens, discard output layer
self.inputs = np.zeros(self.inputSize, dtype=float)
self.outputs = np.zeros(self.outputSize, dtype=float)
self.L = {}
if type == "random":
for i in range(1,self.layers+1):
if i < self.layers:
self.L[i] = (np.random.ranf(( self.layerSizes[i-1] , self.layerSizes[i] )).astype(np.float) - 0.5) * 2
else:
self.L[i] = (np.random.ranf(( self.layerSizes[i-1] , self.outputSize )).astype(np.float) - 0.5)*2
elif type == "uniform":
for i in range(1,self.layers+1):
if i < self.layers:
self.L[i] = np.random.uniform( -1 , 1 , (self.layerSizes[i-1],self.layerSizes[i]) )
else:
self.L[i] = np.random.uniform( -1 , 1 , (self.layerSizes[i-1],self.outputSize) )
else:
print("unknown initialization type")
def updateS(self): #forward propogation Sigmoid
for i in range(1,self.layers+1):
if 1 == self.layers: #dodgy no hidden layers fix
self.z = np.dot(self.inputs, self.L[i])
self.outputs = ( self.sigmoid(self.z) - 0.5)*2
elif i == 1: #input layer
self.z = np.dot(self.inputs, self.L[i])
self.temp = self.sigmoid(self.z)
elif i < self.layers: #hidden layers
self.z = np.dot(self.temp, self.L[i])
self.temp = self.sigmoid(self.z)
else: #output layer
self.z = np.dot(self.temp, self.L[i])
self.outputs = ( self.sigmoid(self.z) - 0.5)*2
def sigmoid(self, s):
#activation funtion
return 1/(1+np.exp(-s/self.p))