Exemplo n.º 1
0
 public void Attach(Neuron neuron, Weight weight)
 {
     Downstream.Add(neuron);
     DownstreamWeights.Add(weight);
     neuron.Upstream.Add(this);
     neuron.UpstreamWeights.Add(weight);
     WeightChange.Add(new Weight(0));
 }
Exemplo n.º 2
0
 public Synapse(Neuron inputNeuron, Neuron outputNeuron)
 {
     this.InputNeuron  = inputNeuron;
     this.OutputNeuron = outputNeuron;
     Id = Guid.NewGuid();
 }
Exemplo n.º 3
0
 public void setNeuron(int index, ref Neuron neuron)
 {
     neurons[index] = neuron;
 }
Exemplo n.º 4
0
        public List <double> Learn_old(LearnSample learnSample, double learningRate, out String errorMessage, long epochNo)
        {
            #region Validation input data
            errorMessage = null;

            if (!learnSample.InputDataContainsBias)
            {
                learnSample.InputData.Add(1);
                learnSample.InputDataContainsBias = true;
            }

            if (learnSample.InputData.Count != Topology.Layers.First().Neurons.Count)
            {
                errorMessage = "Count of input data in learn sample is different than count of neurons on first layer!";
                return(null);
            }

            if (learnSample.OutputData.Length != Topology.Layers.Last().Neurons.Count)
            {
                errorMessage = "Count of output data in learn sample is different than count of neurons on last layer!";
                return(null);
            }
            #endregion

            #region Propagete values forward to be sure we work on proper data
            PropagateValuesForward(learnSample.InputData);
            #endregion

            #region Main learning
            int    layerNo;
            double error;

            Layer lastLayer = Topology.Layers.Last();

            switch (MethodOfLearning)
            {
            case LearningMethod.LINEAR:
                double neuronExpectedValue = learnSample.OutputData.ElementAt(0);

                for (layerNo = Topology.Layers.Count - 1; layerNo >= 1; layerNo--)     // layerNo >= 1 ---> are you sure >=
                {
                    Layer layer = Topology.Layers.ElementAt(layerNo);

                    for (int neuronNo = 0; neuronNo < layer.Neurons.Count; neuronNo++)
                    {
                        Neuron neuron = layer.Neurons.ElementAt(neuronNo);

                        if (!neuron.IsBias)
                        {
                            //double error = neuronExpectedValue - neuron.OutputValue;
                            error = (neuronExpectedValue - neuron.GetOutputValue()) * neuron.GetOutputValue() * (1 - neuron.GetOutputValue());

                            for (int inputNo = 0; inputNo < neuron.Inputs.Count; inputNo++)
                            {
                                double correction = error * neuron.Inputs[inputNo].Value * learningRate;

                                neuron.Inputs[inputNo].Weight += correction;
                            }
                        }
                    }
                }
                break;

            case LearningMethod.NOT_LINEAR:
                // 1. Counting gradients for all not-bias neurons from layers [n..1]

                // 1A. Counting gradients for last layer:
                for (int outputNeuronNo = 0; outputNeuronNo < lastLayer.Neurons.Count; outputNeuronNo++)
                {
                    double outputValue = Topology.Layers.Last().Neurons[outputNeuronNo].GetOutputValue();
                    error = learnSample.OutputData[outputNeuronNo] - outputValue;

                    double derivative = Calculations.Derivative(
                        outputValue,
                        Topology.Layers.Last().LayerActivationFunction);

                    Topology.Layers.Last().Neurons[outputNeuronNo].Gradient = error * derivative;
                }

                // 1B. Counting gradients for previous layers, starting counting from inputs-connections of output neurons:
                layerNo = Topology.Layers.Count - 1;

                while (layerNo > 0)
                {
                    // for each neuron in current layer (layerNo) change gradients of non-bias connected neurons from previous layer
                    foreach (Neuron neuron in Topology.Layers[layerNo].Neurons)
                    {
                        foreach (Input input in neuron.Inputs)
                        {
                            Neuron connectedNeuron = Topology.Layers[layerNo - 1].Neurons[input.ConnectedWithPreviousLayerNeuronNo];

                            if (!connectedNeuron.IsBias)
                            {
                                double derivative = Calculations.Derivative(
                                    connectedNeuron.GetOutputValue(),
                                    Topology.Layers[layerNo - 1].LayerActivationFunction);

                                connectedNeuron.Gradient = neuron.Gradient * input.Weight * derivative;
                            }
                        }
                    }

                    layerNo--;
                }

                // 2. Changing weights:
                layerNo = Topology.Layers.Count - 1;

                while (layerNo > 0)
                {
                    // for each neuron in current layer (layerNo) change weights of his inputs
                    foreach (Neuron neuron in Topology.Layers[layerNo].Neurons)
                    {
                        foreach (Input input in neuron.Inputs)
                        {
                            Neuron connectedNeuron = Topology.Layers[layerNo - 1].Neurons[input.ConnectedWithPreviousLayerNeuronNo];
                            input.Weight += learningRate * neuron.Gradient * connectedNeuron.GetOutputValue();
                        }
                    }

                    layerNo--;
                }

                // 3. Counting current network error:
                NetworkError    = Calculations.RMSError(learnSample.OutputData, Topology.NetworkOutputValues());
                NetworkErrorSet = true;
                break;
            }

            PropagateValuesForward();
            #endregion

            #region Saving epoch in history
            if (epochNo % CollectSparseHistoryEvery == 0)
            {
                List <double> inputValues = new List <double>();
                foreach (Neuron neuron in Topology.Layers.First().Neurons)
                {
                    inputValues.Add(neuron.Inputs[0].Value);
                }

                List <double> outputValues = new List <double>();
                foreach (Neuron neuron in Topology.Layers.Last().Neurons)
                {
                    outputValues.Add(neuron.GetOutputValue());
                }

                Epoch epoch = new Epoch()
                {
                    InputValues          = inputValues,
                    OutputValues         = outputValues,
                    ExpectedOutputValues = learnSample.OutputData.ToList(),
                    LearningRate         = learningRate,
                    EpochNo = epochNo
                };

                EpochHistory.Add(epoch);
                RmsErrorHistory.Add(
                    new RmsErrorHistoryElement()
                {
                    RmsError = NetworkError, EpochNo = epochNo
                }
                    );
            }
            #endregion

            #region Preparing data to return
            List <double> result = new List <double>();

            for (int neuronNo = 0; neuronNo < lastLayer.Neurons.Count; neuronNo++)
            {
                result.Add(lastLayer.Neurons.ElementAt(neuronNo).GetOutputValue());
            }
            #endregion

            return(result);

            #region examples from other applications

            /*
             * // from Internet example ;)
             * _layers.Last().Neurons.ForEach(neuron =>
             * {
             *  neuron.Inputs.ForEach(connection =>
             *  {
             *      var output = neuron.CalculateOutput();
             *      var netInput = connection.GetOutput();
             *
             *      var expectedOutput = _expectedResult[row][_layers.Last().Neurons.IndexOf(neuron)];
             *
             *      var nodeDelta = (expectedOutput - output) * output * (1 - output);
             *      var delta = -1 * netInput * nodeDelta;
             *
             *      connection.UpdateWeight(_learningRate, delta);
             *
             *      neuron.PreviousPartialDerivate = nodeDelta;
             *  });
             * });*/

            /*
             * //From book's example
             * double neuronExpectedValue = learnSample.OutputData.ElementAt(0);
             *
             * for (int layerNo = Topology.Layers.Count - 1; layerNo >= 1; layerNo--) // layerNo >= 1 ---> are you sure >=
             * {
             *  Layer layer = Topology.Layers.ElementAt(layerNo);
             *
             *  for (int neuronNo = 0; neuronNo < layer.Neurons.Count; neuronNo++)
             *  {
             *      Neuron neuron = layer.Neurons.ElementAt(neuronNo);
             *
             *      if (!neuron.IsBias)
             *      {
             *          //double error = neuronExpectedValue - neuron.OutputValue;
             *          double error = (neuronExpectedValue - neuron.OutputValue) * neuron.OutputValue * (1 - neuron.OutputValue);
             *
             *          for (int inputNo = 0; inputNo < neuron.Inputs.Count; inputNo++)
             *          {
             *              double correction = error * neuron.Inputs[inputNo].Value * learningRate;
             *
             *              neuron.Inputs[inputNo].Weight += correction;
             *          }
             *      }
             *  }
             * }*/
            #endregion
        }
        private void recur_FeedForward(Neuron neuron)
        {
            if (neuron.Fed)
                return;

            foreach (Neuron n in neuron.Upstream)
                recur_FeedForward(n);
            neuron.UpdateOutputValue(); // Safe to call since all upstream values have been updated.  *Note* Only Hidden and Output units' values are adjusted. Constants and Inputs units are left untouched.
            neuron.Fed = true;
        }
        private void recur_BackPropogation(Neuron neuron)
        {
            if (neuron.Fed)
                return;

            foreach (Neuron n in neuron.Downstream)
                recur_BackPropogation(n);
            neuron.UpdateErrorTerm(); // Safe to call since all downstream errorterms have been updated.
            neuron.Fed = true;
        }