有我的BP算法的实现。我测试了一下,训练后发现数据不正确。那么,我在哪里犯错了?
double OpenNNL::_changeWeightsByBP(double * trainingInputs, double *trainingOutputs, double speed, double sample_weight)
{
double * localGradients = new double[_neuronsCount];
double * outputs = new double[_neuronsCount];
double * derivatives = new double[_neuronsCount];
calculateNeuronsOutputsAndDerivatives(trainingInputs, outputs, derivatives);
for(int j=0;j<_neuronsPerLayerCount[_layersCount-1];j++)
{
localGradients[indexByLayerAndNeuron(_layersCount-1, j)] = trainingOutputs[j] - outputs[indexByLayerAndNeuron(_layersCount-1, j)];
}
if(_layersCount > 1)
{
for(int i=_layersCount-2;i>=0;i--)
{
for(int j=0;j<_neuronsPerLayerCount[i];j++)
{
localGradients[indexByLayerAndNeuron(i, j)] = 0;
for(int k=0;k<_neuronsPerLayerCount[i+1];k++)
{
localGradients[indexByLayerAndNeuron(i, j)] += _neuronsInputsWeights[indexByLayerNeuronAndInput(i+1, k, j)]
* localGradients[indexByLayerAndNeuron(i+1, k)];
}
}
}
}
for(int j=0;j<_neuronsPerLayerCount[0];j++)
{
for(int k=0;k<_inputsCount;k++)
{
_neuronsInputsWeights[indexByLayerNeuronAndInput(0, j, k)] += speed * localGradients[indexByLayerAndNeuron(0, j)]
* derivatives[indexByLayerAndNeuron(0, j)] * trainingInputs[k];
}
}
for(int i=1;i<_layersCount;i++)
{
for(int j=0;j<_neuronsPerLayerCount[i];j++)
{
for(int k=0;k<_neuronsPerLayerCount[i-1];k++)
{
_neuronsInputsWeights[indexByLayerNeuronAndInput(i, j, k)] += speed * localGradients[indexByLayerAndNeuron(i, j)]
* derivatives[indexByLayerAndNeuron(i, j)] * outputs[indexByLayerAndNeuron(i, j)];
}
}
}
delete[] localGradients;
delete[] outputs;
delete[] derivatives;
}
以及如何计算网络输出的误差以停止训练过程?
以及如何改变神经元的偏差?
如果您需要,这里有我的完整代码:https ://github.com/NicholasShatokhin/OpenNNL。