/// <summary> /// Process one training set element. /// </summary> /// <param name="errorCalc">The error calculation to use.</param> /// <param name="input">The network input.</param> /// <param name="ideal">The ideal values.</param> public void Process(IErrorCalculation errorCalc, double[] input, double[] ideal) { _network.Compute(input, _actual); errorCalc.UpdateError(_actual, ideal, 1.0); // Calculate error for the output layer. var outputLayerIndex = _network.Layers.Count - 1; var outputActivation = _network.Layers[outputLayerIndex].Activation; errorFunction.CalculateError( outputActivation, _layerSums, _layerOutput, ideal, _actual, _layerDelta, 0, 1.0); // Apply regularization, if requested. if (_owner.L1 > AIFH.DefaultPrecision || _owner.L1 > AIFH.DefaultPrecision) { var lp = new double[2]; CalculateRegularizationPenalty(lp); for (var i = 0; i < _actual.Length; i++) { var p = lp[0] * _owner.L1 + lp[1] * _owner.L2; _layerDelta[i] += p; } } // Propagate backwards (chain rule from calculus). for (var i = _network.Layers.Count - 1; i > 0; i--) { var layer = _network.Layers[i]; layer.ComputeGradient(this); } }
/// <summary> /// Construct the supervised trainer for DBN. /// </summary> /// <param name="theNetwork">The network to train.</param> /// <param name="theTrainingInput">The input (x) to train.</param> /// <param name="theTrainingIdeal">The expected output (y, or labels) to train.</param> /// <param name="theLearningRate">The learning rate.</param> public SupervisedTrainDBN(DeepBeliefNetwork theNetwork, double[][] theTrainingInput, double[][] theTrainingIdeal, double theLearningRate) { _network = theNetwork; _trainingInput = theTrainingInput; _learningRate = theLearningRate; _trainingIdeal = theTrainingIdeal; ErrorCalc = new ErrorCalculationMSE(); }
/// <summary> /// Calculate the error with the specified error calculation. /// </summary> /// <param name="calc">The error calculation.</param> /// <returns>The error.</returns> public double CalculateError(IErrorCalculation calc) { calc.Clear(); for (int row = 0; row < Actual.Length; row++) { calc.UpdateError(Actual[row], Ideal[row], 1.0); } return calc.Calculate(); }
/// <summary> /// Calculate the error with the specified error calculation. /// </summary> /// <param name="calc">The error calculation.</param> /// <returns>The error.</returns> public double CalculateError(IErrorCalculation calc) { calc.Clear(); for (int row = 0; row < Actual.Length; row++) { calc.UpdateError(Actual[row], Ideal[row], 1.0); } return(calc.Calculate()); }
/// <summary> /// Calculate error for regression. /// </summary> /// <param name="dataset">The dataset.</param> /// <param name="model">The model to evaluate.</param> /// <param name="calc">The error calculation.</param> /// <returns>The error.</returns> public static double CalculateRegressionError(IList <BasicData> dataset, IRegressionAlgorithm model, IErrorCalculation calc) { calc.Clear(); foreach (var item in dataset) { var output = model.ComputeRegression(item.Input); calc.UpdateError(output, item.Ideal, 1.0); } return(calc.Calculate()); }
/// <summary> /// Train and stop when the validation set does not improve anymore. /// </summary> /// <param name="train">The trainer to use.</param> /// <param name="model">The model that is trained.</param> /// <param name="validationData">The validation data.</param> /// <param name="tolerate">Number of iterations to tolerate no improvement to the validation error.</param> /// <param name="errorCalc">The error calculation method.</param> public void PerformIterationsEarlyStop(ILearningMethod train, IRegressionAlgorithm model, IList <BasicData> validationData, int tolerate, IErrorCalculation errorCalc) { var iterationNumber = 0; var done = false; var bestError = double.PositiveInfinity; var badIterations = 0; do { iterationNumber++; train.Iteration(); var validationError = DataUtil.CalculateRegressionError(validationData, model, errorCalc); if (validationError < bestError) { badIterations = 0; bestError = validationError; } else { badIterations++; } if (train.Done) { done = true; } else if (validationError > bestError && badIterations > tolerate) { done = true; } else if (double.IsNaN(train.LastError)) { Console.WriteLine("Training failed."); done = true; } Console.WriteLine("Iteration #" + iterationNumber + ", Iteration Score=" + train.LastError + ", Validation Score=" + validationError + ", " + train.Status); } while (!done); train.FinishTraining(); Console.WriteLine("Final score: " + train.LastError); }
/// <inheritdoc/> public double CalculateScore(IMLMethod algo) { IErrorCalculation ec = ErrorCalc.Create(); IRegressionAlgorithm ralgo = (IRegressionAlgorithm)algo; // evaulate ec.Clear(); foreach (BasicData pair in _trainingData) { double[] output = ralgo.ComputeRegression(pair.Input); ec.UpdateError(output, pair.Ideal, 1.0); } return(ec.Calculate()); }
public static double CalculateError(IErrorCalculation calc, double[][] actual, double[][] ideal) { // First we are going to calculate by passing in 1d arrays to // the error calculation. This is the most common case. calc.Clear(); Assert.AreEqual(double.PositiveInfinity, calc.Calculate(), 0.0001); for (int i = 0; i < actual.Length; i++) { double[] actualData = actual[i]; double[] idealData = ideal[i]; calc.UpdateError(actualData, idealData, 1.0); } Assert.AreEqual(20, calc.SetSize); double error1 = calc.Calculate(); // Secondly we are going to calculate by passing individual // elements. This is less common, but the error calculation // should result in the same as above. calc.Clear(); Assert.AreEqual(double.PositiveInfinity, calc.Calculate(), 0.0001); for (int i = 0; i < actual.Length; i++) { double[] actualData = actual[i]; double[] idealData = ideal[i]; for (int j = 0; j < actualData.Length; j++) { calc.UpdateError(actualData[j], idealData[j]); } } Assert.AreEqual(20, calc.SetSize); double error2 = calc.Calculate(); // these two should always equal Assert.AreEqual(error1, error2, 0.0001); return error2; }
public static double CalculateError(IErrorCalculation calc, double[][] actual, double[][] ideal) { // First we are going to calculate by passing in 1d arrays to // the error calculation. This is the most common case. calc.Clear(); Assert.AreEqual(double.PositiveInfinity, calc.Calculate(), 0.0001); for (int i = 0; i < actual.Length; i++) { double[] actualData = actual[i]; double[] idealData = ideal[i]; calc.UpdateError(actualData, idealData, 1.0); } Assert.AreEqual(20, calc.SetSize); double error1 = calc.Calculate(); // Secondly we are going to calculate by passing individual // elements. This is less common, but the error calculation // should result in the same as above. calc.Clear(); Assert.AreEqual(double.PositiveInfinity, calc.Calculate(), 0.0001); for (int i = 0; i < actual.Length; i++) { double[] actualData = actual[i]; double[] idealData = ideal[i]; for (int j = 0; j < actualData.Length; j++) { calc.UpdateError(actualData[j], idealData[j]); } } Assert.AreEqual(20, calc.SetSize); double error2 = calc.Calculate(); // these two should always equal Assert.AreEqual(error1, error2, 0.0001); return(error2); }
/// <inheritdoc /> public double CalculateScore(IMLMethod algo) { IErrorCalculation ec = ErrorCalc.Create(); var ralgo = (IRegressionAlgorithm)algo; var genome = (IGenome)ralgo; if (genome.Count > _maxLength) { return(double.PositiveInfinity); } // evaulate ec.Clear(); foreach (BasicData pair in _trainingData) { double[] output = ralgo.ComputeRegression(pair.Input); ec.UpdateError(output, pair.Ideal, 1.0); } return(ec.Calculate()); }
/// <summary> /// Construct the function. /// </summary> /// <param name="theTrainingData">The training data.</param> public ScoreRegressionData(IList <BasicData> theTrainingData) { _trainingData = theTrainingData; ErrorCalc = new ErrorCalculationMSE(); }
/// <summary> /// Train and stop when the validation set does not improve anymore. /// </summary> /// <param name="train">The trainer to use.</param> /// <param name="model">The model that is trained.</param> /// <param name="validationData">The validation data.</param> /// <param name="tolerate">Number of iterations to tolerate no improvement to the validation error.</param> /// <param name="errorCalc">The error calculation method.</param> public void PerformIterationsEarlyStop(ILearningMethod train, IRegressionAlgorithm model, IList<BasicData> validationData, int tolerate, IErrorCalculation errorCalc) { var iterationNumber = 0; var done = false; var bestError = double.PositiveInfinity; var badIterations = 0; do { iterationNumber++; train.Iteration(); var validationError = DataUtil.CalculateRegressionError(validationData, model, errorCalc); if (validationError < bestError) { badIterations = 0; bestError = validationError; } else { badIterations++; } if (train.Done) { done = true; } else if (validationError > bestError && badIterations > tolerate) { done = true; } else if (double.IsNaN(train.LastError)) { Console.WriteLine("Training failed."); done = true; } Console.WriteLine("Iteration #" + iterationNumber + ", Iteration Score=" + train.LastError + ", Validation Score=" + validationError + ", " + train.Status); } while (!done); train.FinishTraining(); Console.WriteLine("Final score: " + train.LastError); }
/// <summary> /// Process one training set element. /// </summary> /// <param name="errorCalc">The error calculation to use.</param> /// <param name="input">The network input.</param> /// <param name="ideal">The ideal values.</param> public void Process(IErrorCalculation errorCalc, double[] input, double[] ideal) { _network.Compute(input, _actual); errorCalc.UpdateError(_actual, ideal, 1.0); // Calculate error for the output layer. var outputLayerIndex = _network.Layers.Count - 1; var outputActivation = _network.Layers[outputLayerIndex].Activation; errorFunction.CalculateError( outputActivation, _layerSums, _layerOutput, ideal, _actual, _layerDelta, 0, 1.0); // Apply regularization, if requested. if (_owner.L1 > AIFH.DefaultPrecision || _owner.L1 > AIFH.DefaultPrecision) { var lp = new double[2]; CalculateRegularizationPenalty(lp); for (var i = 0; i < _actual.Length; i++) { var p = lp[0]*_owner.L1 + lp[1]*_owner.L2; _layerDelta[i] += p; } } // Propagate backwards (chain rule from calculus). for (var i = _network.Layers.Count - 1; i > 0; i--) { var layer = _network.Layers[i]; layer.ComputeGradient(this); } }
/// <summary> /// Construct the function. /// </summary> /// <param name="theTrainingData">The training data.</param> public ScoreRegressionData(IList<BasicData> theTrainingData) { _trainingData = theTrainingData; ErrorCalc = new ErrorCalculationMSE(); }