public void Run(ref double[] input, out double[] output) { // make sure we have enough data if (input.Length != inputSize) { throw new ArgumentException("input data not correct dimensions"); } // Dimensions output = new double[layerSize[layerCount - 1]]; /* Run Network */ for (int l = 0; l < layerCount; l++) { for (int j = 0; j < layerSize[l]; j++) { double sum = 0.0; for (int i = 0; i < (l == 0 ? inputSize : layerSize[l - 1]); i++) { sum += weight[l][i][j] * (l == 0 ? input[i] : layerOutput[l - 1][i]); } sum += biases[l][j]; layerInput[l][j] = sum; layerOutput[l][j] = TransferFunctions.Evaluate(transferFunction[l], sum); } } // copy output to output array for (int i = 0; i < layerSize[layerCount - 1]; i++) { output[i] = layerOutput[layerCount - 1][i]; } }
public void Run(ref double[] input, out double[] output) { // Yeterli veri olup olmadığının kontrolü //if (input.Length != inputSize) // throw new ArgumentException("Input data is not of the correct dimension."); // Çıktı katmanındaki nöron sayısı output = new double[layerSize[layerCount - 1]]; /* Ağın çalıştırılması */ for (int l = 0; l < layerCount; l++) { for (int j = 0; j < layerSize[l]; j++) { double sum = 0.0; for (int i = 0; i < (l == 0 ? inputSize : layerSize[l - 1]); i++) { sum += weight[l][i][j] * (l == 0 ? input[i] : layerOutput[l - 1][i]); //girdi değerleri ile agırlıklar çarpılıp toplanarak ilgili norondaki net toplam belirlenir } sum += bias[l][j]; //eşik değeri bu toplama eklenir layerInput[l][j] = sum; //katmanın girdisine atanır bir sonrakinde girdi olarak hesaplanması için layerOutput[l][j] = TransferFunctions.Evaluate(transferFunction[l], sum); //aktivasyon fonksiyonundan geçerek noronun çıktısı belirlenir. } } //son(çıkış) katmanın çıktısı ağın çıktısı olarak atanır for (int i = 0; i < layerSize[layerCount - 1]; i++) { output[i] = layerOutput[layerCount - 1][i]; } }
public void Run(ref double[] input, out double[] output) { // Перевірка, чи введені дані відповідають кількості нейронів у вхідному шарі if (input.Length != inputSize) { throw new ArgumentException("Input data isn't of the correct dimension"); } // Вихідне значення функції output = new double[layerSize[layerCount - 1]]; // Нормалізація вхідних значень double max = input.Max(); // Запуск мережі for (int l = 0; l < layerCount; l++) { for (int j = 0; j < layerSize[l]; j++) { double sum = 0.0; for (int i = 0; i < (l == 0 ? inputSize : layerSize[l - 1]); i++) { sum += weight[l][i][j] * (l == 0 ? input[i] : layerOtput[l - 1][i]); } sum += bias[l][j]; layerInput[l][j] = sum; /*if (l == layerCount - 1) * layerOtput[l][j] = sum; * else*/ layerOtput[l][j] = TransferFunctions.Evaluate(transferFunction[l], sum); } } // копіюємо вихід мережі у вихідний масив for (int i = 0; i < layerSize[layerCount - 1]; i++) { output[i] = layerOtput[layerCount - 1][i]; } }
public double Train(ref double[] input, ref double[] desired, double TrainingRate, double Momentum) { // Parameter Validation if (input.Length != inputSize) { throw new ArgumentException("Invalid input parameter"); } if (desired.Length != layerSize[layerCount - 1]) { throw new ArgumentException("Invalid input parameter"); } // local variables double error = 0.0, sum = 0.0, weightDelta = 0.0, biasDelta = 0.0; double[] output = new double[layerSize[layerCount - 1]]; // Run the Network Run(ref input, out output); // Back propagate the error for (int l = layerCount - 1; l >= 0; l--) { // Output layer if (l == layerCount - 1) { for (int k = 0; k < layerSize[l]; k++) { delta[l][k] = output[k] - desired[k]; error += Math.Pow(delta[l][k], 2); delta[l][k] *= TransferFunctions.EvaluateDerivative(transferFunction[l], layerInput[l][k]); } } else // Hidden Layer { for (int i = 0; i < layerSize[l]; i++) { sum = 0.0; for (int j = 0; j < layerSize[l + 1]; j++) { sum += weight[l + 1][i][j] * delta[l + 1][j]; } sum *= TransferFunctions.EvaluateDerivative(transferFunction[l], layerInput[l][i]); delta[l][i] = sum; } } } // Update the weights and biases for (int l = 0; l < layerCount; l++) { for (int i = 0; i < (l == 0 ? inputSize : layerSize[l - 1]); i++) { for (int j = 0; j < layerSize[l]; j++) { weightDelta = TrainingRate * delta[l][j] * (l == 0 ? input[i] : layerOutput[l - 1][i]); weight[l][i][j] -= weightDelta + Momentum * previousWeight[l][i][j]; previousWeight[l][i][j] = weightDelta; } } } for (int l = 0; l < layerCount; l++) { for (int i = 0; i < layerSize[l]; i++) { biasDelta = TrainingRate * delta[l][i]; biases[l][i] -= biasDelta + Momentum * previousDelta[l][i]; previousDelta[l][i] = biasDelta; } } return(error); }
// Функція навчання public double Train(ref double[] input, ref double[] desired, double TrainingRate, double Momentum) { // Перевірка вхідних параметрів if (input.Length != inputSize) { throw new ArgumentException("Invalid input parameter", "input"); } if (desired.Length != layerSize[layerCount - 1]) { throw new ArgumentException("Invalid input parameter", "desired"); } // Локальні змінні double error = 0.0, sum = 0.0, weigtdelta = 0.0, biasDelta = 0.0; double[] output = new double[layerSize[layerCount - 1]]; // Запуск мережі Run(ref input, out output); //Розмножуємо похибку у зворотньму порядку for (int l = layerCount - 1; l >= 0; l--) { //Вихідний шар if (l == layerCount - 1) { for (int k = 0; k < layerSize[l]; k++) { delta[l][k] = output[k] - desired[k]; error += Math.Pow(delta[l][k], 2); delta[l][k] *= TransferFunctions.DerivativeEvaluate(transferFunction[l], layerInput[l][k]); } } //Прихований шар else { for (int i = 0; i < layerSize[l]; i++) { sum = 0.0; for (int j = 0; j < layerSize[l + 1]; j++) { sum += weight[l + 1][i][j] * delta[l + 1][j]; } sum *= TransferFunctions.DerivativeEvaluate(transferFunction[l], layerInput[l][i]); delta[l][i] = sum; } } } // Оновлення ваг та відхилень for (int l = 0; l < layerCount; l++) { for (int i = 0; i < (l == 0 ? inputSize : layerSize[l - 1]); i++) { for (int j = 0; j < layerSize[l]; j++) { weigtdelta = TrainingRate * delta[l][j] * (l == 0 ? input[i] : layerOtput[l - 1][i]) + Momentum * previousWeightDelta[l][i][j]; weight[l][i][j] -= weigtdelta; previousWeightDelta[l][i][j] = weigtdelta; } } } for (int l = 0; l < layerCount; l++) { for (int i = 0; i < layerSize[l]; i++) { biasDelta = TrainingRate * delta[l][i] + Momentum * previosBiasDelta[l][i]; bias[l][i] -= biasDelta; previosBiasDelta[l][i] = biasDelta; } } return(error); }
// Функція навчання public double Train(ref double[] input, ref double[] desired, double kCoefficient, double bCoefficient, double eCoefficient, double Momentum) { // Перевірка вхідних параметрів if (input.Length != inputSize) { throw new ArgumentException("Invalid input parameter", "input"); } if (desired.Length != layerSize[layerCount - 1]) { throw new ArgumentException("Invalid input parameter", "desired"); } // Локальні змінні double error = 0.0, sum = 0.0, weigtdelta = 0.0, biasDelta = 0.0, phiDelta = 0.0, phiDeltaSecond = 0.0; double[] output = new double[layerSize[layerCount - 1]]; // Запуск мережі Run(ref input, out output); //Розмножуємо похибку у зворотньму порядку for (int l = layerCount - 1; l >= 0; l--) { //Вихідний шар if (l == layerCount - 1) { for (int k = 0; k < layerSize[l]; k++) { delta[l][k] = output[k] - desired[k]; error += Math.Pow(delta[l][k], 2); delta[l][k] *= TransferFunctions.DerivativeEvaluate(transferFunction[l], layerInput[l][k]); } } //Прихований шар else { for (int i = 0; i < layerSize[l]; i++) { sum = 0.0; for (int j = 0; j < layerSize[l + 1]; j++) { sum += weight[l + 1][i][j] * delta[l + 1][j]; } sum *= TransferFunctions.DerivativeEvaluate(transferFunction[l], layerInput[l][i]); delta[l][i] = sum; } } } // Оновлення ваг та відхилень for (int l = 0; l < layerCount; l++) { for (int i = 0; i < (l == 0 ? inputSize : layerSize[l - 1]); i++) { for (int j = 0; j < layerSize[l]; j++) { phiDelta = delta[l][j] * (l == 0 ? input[i] : layerOtput[l - 1][i]); // обрахунок phi для поточної ваги phiDeltaSecond = (1 - eCoefficient) * previousPhiDelta[l][i][j] + eCoefficient * previousPhiDeltaSecond[l][i][j]; // обрахунок phiSecond для поточної ваги // оновлення швидкості навчання для ваги learningRates[l][i][j] += CalculateLearningRateDelta(previousPhiDeltaSecond[l][i][j], phiDelta, kCoefficient, bCoefficient, learningRates[l][i][j]); weigtdelta = learningRates[l][i][j] * phiDelta + Momentum * previousWeightDelta[l][i][j]; weight[l][i][j] -= weigtdelta; previousWeightDelta[l][i][j] = weigtdelta; previousPhiDelta[l][i][j] = phiDelta; previousPhiDeltaSecond[l][i][j] = phiDeltaSecond; } } } for (int l = 0; l < layerCount; l++) { for (int i = 0; i < layerSize[l]; i++) { biasDelta = initialLearningRate * delta[l][i] + Momentum * previosBiasDelta[l][i]; bias[l][i] -= biasDelta; previosBiasDelta[l][i] = biasDelta; } } return(error); }
//ağın eğitilmesi backpropagation public double Train(ref double[] input, ref double[] desired, double TrainingRate, double Momentum) { // Local variable double error = 0.0, sum = 0.0, weightDelta = 0.0, biasDelta = 0.0; double[] output = new double[layerSize[layerCount - 1]]; // Ağın çalıştırılması Run(ref input, out output); // Hatayı geri yaymak for (int l = layerCount - 1; l >= 0; l--) { // Çıktı katmanı if (l == layerCount - 1) { for (int k = 0; k < layerSize[l]; k++) { delta[l][k] = output[k] - desired[k]; error += Math.Pow(delta[l][k], 2); delta[l][k] *= TransferFunctions.EvaluateDerivative(transferFunction[l], layerInput[l][k]); } } else // Gizli katman { for (int i = 0; i < layerSize[l]; i++) { sum = 0.0; for (int j = 0; j < layerSize[l + 1]; j++) { sum += weight[l + 1][i][j] * delta[l + 1][j]; } sum *= TransferFunctions.EvaluateDerivative(transferFunction[l], layerInput[l][i]); delta[l][i] = sum; } } } // Ağırlıklar ve bias değerlerinin güncellenmesi for (int l = 0; l < layerCount; l++) { for (int i = 0; i < (l == 0 ? inputSize : layerSize[l - 1]); i++) { for (int j = 0; j < layerSize[l]; j++) { weightDelta = TrainingRate * delta[l][j] * (l == 0 ? input[i] : layerOutput[l - 1][i]) + Momentum * previousWeightDelta[l][i][j]; weight[l][i][j] -= weightDelta; previousWeightDelta[l][i][j] = weightDelta; } } } for (int l = 0; l < layerCount; l++) { for (int i = 0; i < layerSize[l]; i++) { biasDelta = TrainingRate * delta[l][i]; bias[l][i] -= biasDelta + Momentum * previousBiasDelta[l][i]; previousBiasDelta[l][i] = biasDelta; } } return(error); }