Exemple #1
0
 public void Train(TrainingData[] td, int iteration)
 {
     for (int i = 0; i < iteration; i++)
     {
         NeuronLayerGroups = _BackPropagation.BackPropagate(NeuronLayerGroups, td);
     }
 }
Exemple #2
0
        public NeuronLayerGroup BackPropagate(NeuronLayerGroup neuronLayerGroup, TrainingData[] trainingData)
        {
            nlgBuffer = neuronLayerGroup;
            int tDr = Rng.GetRngMinMax(0, trainingData.Length);

            OutputLayerBP(neuronLayerGroup, trainingData[tDr]);
            HiddenLayerBP(neuronLayerGroup);
            return(this.nlgBuffer);
        }
Exemple #3
0
 public TestSeer()
 {
     NeuronLayer[] nL = new NeuronLayer[4];
     nL[0] = new NeuronLayer(3, 5);
     nL[1] = new NeuronLayer(5, 5);
     nL[2] = new NeuronLayer(5, 5);
     nL[3] = new NeuronLayer(5, 3, new Logistic());
     //nL[2] = new NeuronLayer(2, 1);
     nlg = new NeuronLayerGroup(nL);
 }
Exemple #4
0
        public override void HiddenLayerBP(NeuronLayerGroup nlg)
        {
            for (int nl = nlg.NeuronLayers.Length - 2; nl >= 0; nl--)    //Start from second to last
            {
                int             nLengh    = nlg.NeuronLayers[nl].Neurons.Length;
                Neuron.Neuron[] _cNeurons = nlg.NeuronLayers[nl].Neurons;
                Neuron.Neuron[] _bNeurons = nlg.NeuronLayers[nl].Neurons;

                /// Calculation of the cost (error term). Hidden cost.
                Parallel.For(0, nLengh, new ParallelOptions {
                    MaxDegreeOfParallelism = Rng.Cores
                }, n => {
                    float sumBuffer           = 0;
                    Neuron.Neuron[] _oNeurons = nlg.NeuronLayers[nl + 1].Neurons;

                    for (int en = 0; en < _oNeurons.Length; en++)                    // Last layer error loop
                    {
                        sumBuffer += _oNeurons[en].Weights[n] * _oNeurons[en].Error; //Error of output layer * connected weight
                    }
                    _cNeurons[n].Error = sumBuffer;                                  //Give the error to the neurons on current layer
                });

                // WEIGHT UPDATE
                Parallel.For(0, nLengh, new ParallelOptions {
                    MaxDegreeOfParallelism = Rng.Cores
                }, n => {
                    float nLearningRate = _cNeurons[n].LearningRate;
                    float d_Cost        = 0;

                    d_Cost             = _cNeurons[n].Error * _cNeurons[n].AxonPrime();
                    _cNeurons[n].Bias += nLearningRate * d_Cost;

                    for (int w = 0; w < _cNeurons[n].Weights.Length; w++)   //Weight loop
                    {
                        d_Cost = _cNeurons[n].Error * _cNeurons[n].AxonPrime();
                        _bNeurons[n].Weights[w] += nLearningRate * d_Cost * _cNeurons[n].Dendrites[w];
                    }
                });

                // COPY TO BUFFER
                for (int n = 0; n < _cNeurons.Length; n++)
                {
                    nlgBuffer.NeuronLayers[nl].Neurons[n].Bias    = _bNeurons[n].Bias;
                    nlgBuffer.NeuronLayers[nl].Neurons[n].Weights = _bNeurons[n].Weights;
                }
            }
        }
Exemple #5
0
        public override void OutputLayerBP(NeuronLayerGroup nlg, TrainingData tD)
        {
            int nLengh = nlg.NeuronLayers[nlg.NeuronLayers.Length - 1].Neurons.Length;

            Neuron.Neuron[] _cNeurons = nlg.NeuronLayers[nlg.NeuronLayers.Length - 1].Neurons;        //Singe this is one thing, just cache the layer. I think neurons will be faster
            Neuron.Neuron[] _bNeurons = nlg.NeuronLayers[nlg.NeuronLayers.Length - 1].Neurons;

            nlg.Predict(tD.Input);  // Propagation forward through the network to generate the output value(s)

            /// Calculation of the cost (error term). Output cost for each output
            Parallel.For(0, nLengh, new ParallelOptions {
                MaxDegreeOfParallelism = Rng.Cores
            }, n => {
                _cNeurons[n].Error = tD.Target[n] - _cNeurons[n].Prediction;
            });

            /// WEIGHT UPDATE FOR OUTPUT BUFFER LAYER
            Parallel.For(0, nLengh, new ParallelOptions {
                MaxDegreeOfParallelism = Rng.Cores
            }, n => {
                float lr     = _cNeurons[n].LearningRate;
                float d_Cost = 0;

                d_Cost             = _cNeurons[n].Error * _cNeurons[n].AxonPrime();
                _bNeurons[n].Bias += lr * d_Cost;

                for (int w = 0; w < _cNeurons[n].Weights.Length; w++)   //Weight loop
                {
                    d_Cost = _cNeurons[n].Error * _cNeurons[n].AxonPrime();
                    _bNeurons[n].Weights[w] += lr * d_Cost * _cNeurons[n].Dendrites[w];
                }
            });
            /// COPY TO BUFFER
            for (int n = 0; n < _cNeurons.Length; n++)
            {
                nlgBuffer.NeuronLayers[nlg.NeuronLayers.Length - 1].Neurons[n].Bias    = _bNeurons[n].Bias;
                nlgBuffer.NeuronLayers[nlg.NeuronLayers.Length - 1].Neurons[n].Weights = _bNeurons[n].Weights;
            }
        }
Exemple #6
0
        public override void OutputLayerBP(NeuronLayerGroup nlg, TrainingData tD)
        {
            int nLengh = nlg.NeuronLayers[nlg.NeuronLayers.Length - 1].Neurons.Length;

            Neuron.Neuron[] _cNeurons = nlg.NeuronLayers[nlg.NeuronLayers.Length - 1].Neurons;        //Singe this is one thing, just cache the layer. I think neurons will be faster
            Neuron.Neuron[] _bNeurons = nlg.NeuronLayers[nlg.NeuronLayers.Length - 1].Neurons;

            nlg.Predict(tD.Input);  // Propagation forward through the network to generate the output value(s)

            // Calculation of the cost (error term). Output cost for each output
            for (int n = 0; n < nLengh; n++)
            {
                //For some reason my error function need to be reversed, I think I messed up the layer back propagation
                _cNeurons[n].Error = tD.Target[n] - _cNeurons[n].Prediction;
            }

            /// WEIGHT UPDATE FOR OUTPUT BUFFER LAYER
            for (int n = 0; n < _cNeurons.Length; n++)
            {
                float lr     = _cNeurons[n].LearningRate;
                float d_Cost = 0;

                d_Cost             = _cNeurons[n].Error * _cNeurons[n].AxonPrime();
                _bNeurons[n].Bias += lr * d_Cost;

                for (int w = 0; w < _cNeurons[n].Weights.Length; w++)   //Weight loop
                {
                    d_Cost = _cNeurons[n].Error * _cNeurons[n].AxonPrime();
                    _bNeurons[n].Weights[w] += lr * d_Cost * _cNeurons[n].Dendrites[w];
                }
            }
            // COPY TO BUFFER
            for (int n = 0; n < _cNeurons.Length; n++)
            {
                nlgBuffer.NeuronLayers[nlg.NeuronLayers.Length - 1].Neurons[n].Bias    = _bNeurons[n].Bias;
                nlgBuffer.NeuronLayers[nlg.NeuronLayers.Length - 1].Neurons[n].Weights = _bNeurons[n].Weights;
            }
        }
Exemple #7
0
        //readonly Genetics _GA;	// might rename this to reprodoctive organ or something

        /// <summary>
        /// This seer creates a fully connected network. If you want to create a custom one, you may want to use Neuron, NeuronLayer and NeuronLayergroup to create what you want
        /// </summary>
        /// <param name="inputCount"></param>
        /// <param name="outputCount"></param>
        /// <param name="numLayers"></param>
        /// <param name="mtBP">Use multi threading for back propagation?</param>
        public Seer(int inputCount, int outputCount, int numLayers = 1, bool mtBP = false, bool mtGA = false)
        {
            _BackPropagation = SetThreadingMode(mtBP);
            //_GA = SetGAThreadingMode(mtGA);

            NeuronLayer[] nL = new NeuronLayer[numLayers];
            /// If single layer
            if (numLayers == 1)
            {
                nL[0]             = new NeuronLayer(inputCount, outputCount);
                NeuronLayerGroups = new NeuronLayerGroup(nL);
                return;
            }
            /// If multilayer
            int hn = (int)Math.Ceiling(outputCount * 1.1d); /// 1.1d to make the hidden layer a bit fat

            nL[0] = new NeuronLayer(inputCount, hn);        /// Add the first one
            for (int nli = 1; nli < numLayers - 1; nli++)   /// Skip the last
            {
                nL[nli] = new NeuronLayer(hn, hn);
            }
            nL[numLayers - 1] = new NeuronLayer(hn, outputCount); /// Add the last
            NeuronLayerGroups = new NeuronLayerGroup(nL);         /// add the layers to the group
        }
Exemple #8
0
 /// <summary>
 /// Create a Seer based on a template
 /// </summary>
 /// <param name="tseer">Seer Template</param>
 public Seer(TSeer tseer, bool mtBP = false, bool mtGA = false)
 {
     _BackPropagation = SetThreadingMode(mtBP);
     //_GA = SetGAThreadingMode(mtGA);
     NeuronLayerGroups = new NeuronLayerGroup(tseer.NeuronLayerGroup);
 }
Exemple #9
0
 /// <summary>
 /// This one should only be used on mutations
 /// </summary>
 /// <param name="neuronLayerGroup"></param>
 public Seer(NeuronLayerGroup neuronLayerGroup, bool mtBP = false, bool mtGA = false)
 {
     _BackPropagation = SetThreadingMode(mtBP);
     //_GA = SetGAThreadingMode(mtGA);
     NeuronLayerGroups = neuronLayerGroup;
 }
Exemple #10
0
 public abstract void HiddenLayerBP(NeuronLayerGroup nlg);
Exemple #11
0
 /// Try to polymorph the back propagation for multithreading
 public abstract void OutputLayerBP(NeuronLayerGroup nlg, TrainingData tD);
Exemple #12
0
 public TestSeer(NeuronLayerGroup nL)
 {
     nlg = nL;
 }