Calculate the error of a neural network. Encog currently supports three error calculation modes. See ErrorCalculationMode for more info.
        /// <summary>
        /// Construct a gradient worker.
        /// </summary>
        ///
        /// <param name="network">The network to train.</param>
        /// <param name="owner">The owner that is doing the training.</param>
        /// <param name="training">The training data.</param>
        /// <param name="low">The low index to use in the training data.</param>
        /// <param name="high">The high index to use in the training data.</param>
        public GradientWorkerCPU(FlatNetwork network,
                TrainFlatNetworkProp owner,
                IEngineIndexableSet training, int low, int high)
        {
            this.errorCalculation = new ErrorCalculation();
            this.network = network;
            this.training = training;
            this.low = low;
            this.high = high;
            this.owner = owner;

            this.stopwatch = new Stopwatch();

            this.layerDelta = new double[network.LayerOutput.Length];
            this.gradients = new double[network.Weights.Length];
            this.actual = new double[network.OutputCount];

            this.weights = network.Weights;
            this.layerIndex = network.LayerIndex;
            this.layerCounts = network.LayerCounts;
            this.weightIndex = network.WeightIndex;
            this.layerOutput = network.LayerOutput;
            this.layerFeedCounts = network.LayerFeedCounts;

            this.pair = BasicEngineData.CreatePair(network.InputCount,
                    network.OutputCount);
        }
예제 #2
0
        /// <summary>
        /// Calculate the error for this neural network. The error is calculated
        /// using root-mean-square(RMS).
        /// </summary>
        ///
        /// <param name="data">The training set.</param>
        /// <returns>The error percentage.</returns>
        public double CalculateError(IEngineIndexableSet data)
        {
            ErrorCalculation errorCalculation = new ErrorCalculation();

            double[] actual = new double[this.outputCount];
            IEngineData pair = BasicEngineData.CreatePair(data.InputSize,
                    data.IdealSize);

            for (int i = 0; i < data.Count; i++)
            {
                data.GetRecord(i, pair);
                Compute(pair.InputArray, actual);
                errorCalculation.UpdateError(actual, pair.IdealArray);
            }
            return errorCalculation.Calculate();
        }
예제 #3
0
        /// <summary>
        /// Perform one training iteration.
        /// </summary>
        public override void Iteration()
        {

            if (this.mustInit)
                InitWeight();

            ErrorCalculation error = new ErrorCalculation();

            foreach (INeuralDataPair pair in this.training)
            {
                INeuralData output = this.parts.InstarSynapse.Compute(
                        pair.Input);
                int j = this.parts.Winner(output);
                for (int i = 0; i < this.parts.OutstarLayer.NeuronCount; i++)
                {
                    double delta = this.learningRate
                            * (pair.Ideal[i] - this.parts
                                    .OutstarSynapse.WeightMatrix[j, i]);
                    this.parts.OutstarSynapse.WeightMatrix.Add(j, i, delta);
                }

                error.UpdateError(output.Data, pair.Ideal.Data);
            }

            this.Error = error.Calculate();
        }
예제 #4
0
        /// <summary>
        /// Evaluate the error for the specified model.
        /// </summary>
        /// <param name="param">The params for the SVN.</param>
        /// <param name="prob">The problem to evaluate.</param>
        /// <param name="target">The output values from the SVN.</param>
        /// <returns>The calculated error.</returns>
        private double Evaluate(svm_parameter param, svm_problem prob,
                double[] target)
        {
            int total_correct = 0;

            ErrorCalculation error = new ErrorCalculation();

            if (param.svm_type == svm_parameter.EPSILON_SVR
                    || param.svm_type == svm_parameter.NU_SVR)
            {
                for (int i = 0; i < prob.l; i++)
                {
                    double ideal = prob.y[i];
                    double actual = target[i];
                    error.UpdateError(actual, ideal);
                }
                return error.Calculate();
            }
            else
            {
                for (int i = 0; i < prob.l; i++)
                    if (target[i] == prob.y[i])
                        ++total_correct;

                return 100.0 * total_correct / prob.l;
            }
        }
예제 #5
0
        /// <summary>
        /// Perform a training iteration.
        /// </summary>
        public override void Iteration()
        {

            ErrorCalculation errorCalculation = new ErrorCalculation();

            ILayer inputLayer = network.GetLayer(BasicNetwork.TAG_INPUT);
            ILayer outputLayer = network.GetLayer(BasicNetwork.TAG_OUTPUT);

            foreach (INeuralDataPair pair in this.training)
            {
                // calculate the error
                INeuralData output = this.network.Compute(pair.Input);

                for (int currentAdaline = 0; currentAdaline < output.Count; currentAdaline++)
                {
                    double diff = pair.Ideal[currentAdaline]
                            - output[currentAdaline];

                    // weights
                    for (int i = 0; i < inputLayer
                            .NeuronCount; i++)
                    {
                        double input = pair.Input[i];
                        synapse.WeightMatrix.Add(i, currentAdaline,
                                learningRate * diff * input);
                    }

                    // bias
                    double t = outputLayer.BiasWeights[
                            currentAdaline];
                    t += learningRate * diff;
                    outputLayer.BiasWeights[currentAdaline] = t;
                }

                errorCalculation.UpdateError(output.Data, pair.Ideal.Data);
            }

            // set the global error
            this.Error = errorCalculation.Calculate();
        }