public double ComputeSuccessPercentage(double pfTreshold)
        {
            //    double fOutput;
            double        fReturn;
            bool          bGood;
            int           lGood;
            int           lGoodExamples;
            sNeuralOutput oOutput;

            lGood   = lGoodExamples = 0;
            fReturn = 0;
            oOutput = new sNeuralOutput();

            //loop through the examples
            for (int lIndex = 0; lIndex < (int)oLearningData.Count; lIndex++)
            {
                ComputeOutput(oLearningData[lIndex].oInput, oOutput);

                if (oOutput.fOutputs.Count == oLearningData[lIndex].oOutput.fOutputs.Count)
                {
                    lGoodExamples++;
                    bGood = true;

                    for (int lOutputIndex = 0; bGood && lOutputIndex < (int)oLearningData[lIndex].oOutput.fOutputs.Count; lOutputIndex++)
                    {
                        if (System.Math.Abs(oLearningData[lIndex].oOutput.fOutputs[lOutputIndex] - oOutput.fOutputs[lOutputIndex]) > pfTreshold)
                        {
                            bGood = false;
                        }
                    }

                    if (bGood)
                    {
                        lGood++;
                    }
                }
            }

            if (lGoodExamples > 0)
            {
                fReturn = (double)((lGood * 100) / lGoodExamples);
            }

            return(fReturn);
        }
        public void ComputeOutput(sNeuralInput oInput, sNeuralOutput oOutput)
        {
            //clear the output structure
            oOutput.fOutputs.Clear();

            //init network
            InitialiseNetwork();

            //fill input nodes
            for (int lIndex = 0; lIndex < (int)oInput.fInputs.Count && lIndex < (int)InputNodes.Count; lIndex++)
            {
                InputNodes[lIndex].SetValue(oInput.fInputs[lIndex]);
            }

            //compute output nodes
            foreach (Node Node in OutputNodes)
            {
                oOutput.fOutputs.Add(ComputeNode(Node.Id));
            }
        }
 public LearningData()
 {
     oInput  = new sNeuralInput();
     oOutput = new sNeuralOutput();
 }
        public double TrainingExecuteRepetition(int piRepetitions)
        {
            int           lRepetition;
            int           lLayer;
            int           lCounter;
            List <double> oTotalErrors = new List <double>(0);
            sNeuralOutput oOutput;
            int           lExample;

            //double fError;
            //double fTotalError;

            //int iFileHandle;
            //int iFileHandle2;

            lCounter = 0;
            oOutput  = new sNeuralOutput();

            //start learning by back-propagation
            lRepetition = 0;

            double fGoodError  = 0;
            int    lGood       = 0;
            double fFaultError = 0;
            int    lFault      = 0;

            if (oLearningData.Count > 0)
            {
                do
                {
                    fGoodError  = 0;
                    lGood       = 0;
                    fFaultError = 0;
                    lFault      = 0;

                    //fTotalError = 0;

                    //Randomize();
                    for (int lIndex = 0; lIndex < (int)oLearningData.Count; lIndex++)
                    {
                        //get an example from the learning vector
                        lExample = _r.Next(oLearningData.Count - 1);

                        //compute the value suggested by the network
                        ComputeOutput(oLearningData[lExample].oInput, oOutput);

                        //make sure the outputs (desired and computed) are the same size
                        if (oOutput.fOutputs.Count != oLearningData[lExample].oOutput.fOutputs.Count)
                        {
                            throw new ApplicationException("Desired and computed output differ in size. Cannot compare.");
                        }

                        //compute error
                        for (int lIndex2 = 0; lIndex2 < (int)oOutput.fOutputs.Count; lIndex2++)
                        {
                            if (oLearningData[lExample].oOutput.fOutputs[lIndex2] > 0.5)
                            {
                                fGoodError += System.Math.Abs(oOutput.fOutputs[lIndex2] - 1);
                                lGood++;
                            }
                            else
                            {
                                fFaultError += System.Math.Abs(oOutput.fOutputs[lIndex2]);
                                lFault++;
                            }

                            //fError += fabs(oOutput.fOutputs[lIndex2] - oLearningData[lIndex].oOutput.fOutputs[lIndex2]);
                        }

                        //fTotalError += fError;

                        //determine the number of the output layer in the network
                        lLayer = OutputNodes[0].lLayer;

                        //set cumulative error to 0 in all nodes
                        for (int lIndex2 = 0; lIndex2 < (int)Nodes.Count; lIndex2++)
                        {
                            Nodes[lIndex2].SetCumulativeErrorDelta(0);
                        }

                        //do the back propagation
                        //start with output layer first
                        for (int lIndex2 = 0; lIndex2 < (int)OutputNodes.Count; lIndex2++)
                        {
                            LearnUpdateWeights(OutputNodes[lIndex2].Id, oLearningData[lExample].oOutput.fOutputs[lIndex2], oOutput.fOutputs[lIndex2], (double)0.50);
                        }

                        //followed by the other nodes
                        lLayer--;

                        do
                        {
                            for (int lIndex2 = 0; lIndex2 < (int)Nodes.Count; lIndex2++)
                            {
                                if (Nodes[lIndex2].lLayer == lLayer /* && oOutputNodes[0].lLayer*/)
                                {
                                    LearnUpdateWeights(lIndex2, 0, 0, (double)0.50); //the outputs don't matter for the hidden layers, therefor they are set to 0
                                }
                            }

                            lLayer--;
                        } while (lLayer > 0);
                    }

                    /*oTotalErrors.Add(fTotalError);
                     *
                     * fAverage = 0.6;
                     *
                     * if (oTotalErrors.Count > 11) {
                     *
                     *  //compute average difference of last 10 repetitions
                     *  fAverage = 0;
                     *  for (int lIndex = oTotalErrors.Count-11; lIndex < oTotalErrors.Count-1; lIndex++) {
                     *
                     *      fAverage += fabs(oTotalErrors[lIndex] - oTotalErrors[lIndex+1]);
                     *      }
                     *
                     *  fAverage = fAverage / 10;
                     *  }  */

                    lRepetition++;

                    fFaultError = fFaultError / (double)lFault;
                    fGoodError  = fGoodError / (double)lGood;

                    lCounter++;

                    //} while (/*fAverage > 0.005 && */lRepetition<450);
                } while (lRepetition < piRepetitions);   // || (fabs(fFaultError-fGoodError)>0.3 && lRepetition<2000));
            }

            return(ComputeSuccessPercentage(0.2));
        }