public double Train(ref double[] input, ref double[] desired, double TrainingRate, double Momentum) { // Parameter Validation if (input.Length != inputSize) { throw new ArgumentException("Invalid input parameter"); } if (desired.Length != layerSize[layerCount - 1]) { throw new ArgumentException("Invalid input parameter"); } // local variables double error = 0.0, sum = 0.0, weightDelta = 0.0, biasDelta = 0.0; double[] output = new double[layerSize[layerCount - 1]]; // Run the Network Run(ref input, out output); // Back propagate the error for (int l = layerCount - 1; l >= 0; l--) { // Output layer if (l == layerCount - 1) { for (int k = 0; k < layerSize[l]; k++) { delta[l][k] = output[k] - desired[k]; error += Math.Pow(delta[l][k], 2); delta[l][k] *= TransferFunctions.EvaluateDerivative(transferFunction[l], layerInput[l][k]); } } else // Hidden Layer { for (int i = 0; i < layerSize[l]; i++) { sum = 0.0; for (int j = 0; j < layerSize[l + 1]; j++) { sum += weight[l + 1][i][j] * delta[l + 1][j]; } sum *= TransferFunctions.EvaluateDerivative(transferFunction[l], layerInput[l][i]); delta[l][i] = sum; } } } // Update the weights and biases for (int l = 0; l < layerCount; l++) { for (int i = 0; i < (l == 0 ? inputSize : layerSize[l - 1]); i++) { for (int j = 0; j < layerSize[l]; j++) { weightDelta = TrainingRate * delta[l][j] * (l == 0 ? input[i] : layerOutput[l - 1][i]); weight[l][i][j] -= weightDelta + Momentum * previousWeight[l][i][j]; previousWeight[l][i][j] = weightDelta; } } } for (int l = 0; l < layerCount; l++) { for (int i = 0; i < layerSize[l]; i++) { biasDelta = TrainingRate * delta[l][i]; biases[l][i] -= biasDelta + Momentum * previousDelta[l][i]; previousDelta[l][i] = biasDelta; } } return(error); }
//ağın eğitilmesi backpropagation public double Train(ref double[] input, ref double[] desired, double TrainingRate, double Momentum) { // Local variable double error = 0.0, sum = 0.0, weightDelta = 0.0, biasDelta = 0.0; double[] output = new double[layerSize[layerCount - 1]]; // Ağın çalıştırılması Run(ref input, out output); // Hatayı geri yaymak for (int l = layerCount - 1; l >= 0; l--) { // Çıktı katmanı if (l == layerCount - 1) { for (int k = 0; k < layerSize[l]; k++) { delta[l][k] = output[k] - desired[k]; error += Math.Pow(delta[l][k], 2); delta[l][k] *= TransferFunctions.EvaluateDerivative(transferFunction[l], layerInput[l][k]); } } else // Gizli katman { for (int i = 0; i < layerSize[l]; i++) { sum = 0.0; for (int j = 0; j < layerSize[l + 1]; j++) { sum += weight[l + 1][i][j] * delta[l + 1][j]; } sum *= TransferFunctions.EvaluateDerivative(transferFunction[l], layerInput[l][i]); delta[l][i] = sum; } } } // Ağırlıklar ve bias değerlerinin güncellenmesi for (int l = 0; l < layerCount; l++) { for (int i = 0; i < (l == 0 ? inputSize : layerSize[l - 1]); i++) { for (int j = 0; j < layerSize[l]; j++) { weightDelta = TrainingRate * delta[l][j] * (l == 0 ? input[i] : layerOutput[l - 1][i]) + Momentum * previousWeightDelta[l][i][j]; weight[l][i][j] -= weightDelta; previousWeightDelta[l][i][j] = weightDelta; } } } for (int l = 0; l < layerCount; l++) { for (int i = 0; i < layerSize[l]; i++) { biasDelta = TrainingRate * delta[l][i]; bias[l][i] -= biasDelta + Momentum * previousBiasDelta[l][i]; previousBiasDelta[l][i] = biasDelta; } } return(error); }