Beispiel #1
0
        public virtual void BackPropagate(Layer prevLayer, Layer forwardLayer, int completedBatches)
        {
            if (forwardLayer == null)
            {
                if (OutputLayer == true)
                {
                    BackPropagateOutputLayer(prevLayer, completedBatches);
                }
            }
            else
            {
                for (uint perceptron = 0; perceptron < Perceptrons.Length; perceptron++)
                {
                    Perceptron currPerceptron = Perceptrons[perceptron];

                    for (uint batch = 0; batch < completedBatches; batch++)
                    {
                        currPerceptron.BackPropigationDerivatives[batch] = 0;
                        for (uint connectingPerceptron = 0; connectingPerceptron < forwardLayer.Perceptrons.Length; connectingPerceptron++)
                        {
                            currPerceptron.BackPropigationDerivatives[batch] +=
                                forwardLayer.Perceptrons[connectingPerceptron].BackPropigationDerivatives[batch] *
                                forwardLayer.Perceptrons[connectingPerceptron].OldWeights[perceptron];
                        }

                        //TODO make evaluate function to evaluate derivative in terms of output from the original evaluation
                        currPerceptron.BackPropigationDerivatives[batch] *=
                            ActivationFunction.evaluateDerivative(Perceptrons[perceptron].Outputs[batch], true);
                    }

                    float SummationBackPropDerivative = 0; //currPerceptron.BackPropigationDerivatives * currLayer.ActivationFunction.FunctionLearningRate;
                    int   weight = 0;
                    for (; weight < currPerceptron.Weights.Length - 1; weight++)
                    {
                        currPerceptron.OldWeights[weight] = currPerceptron.Weights[weight];

                        SummationBackPropDerivative = 0;
                        for (uint batch = 0; batch < completedBatches; batch++)
                        {
                            SummationBackPropDerivative += currPerceptron.BackPropigationDerivatives[batch] * prevLayer.Perceptrons[weight].Outputs[batch];
                        }

                        currPerceptron.Weights[weight] -= ActivationFunction.FunctionLearningRate * SummationBackPropDerivative / completedBatches;
                    }

                    //this is adjusting the biases throughout the network since they do not have perceptrons
                    //they connect with from a previous layer
                    currPerceptron.OldWeights[weight] = currPerceptron.Weights[weight];
                    SummationBackPropDerivative       = 0;
                    for (uint batch = 0; batch < completedBatches; batch++)
                    {
                        SummationBackPropDerivative += currPerceptron.BackPropigationDerivatives[batch];
                    }
                    currPerceptron.Weights[weight] -= ActivationFunction.FunctionLearningRate * SummationBackPropDerivative / completedBatches;
                }
            }
        }
Beispiel #2
0
        public virtual void ForwardPropagate(Layer prevLayer, int completedBatches)
        {
            for (uint p = 0; p < Perceptrons.Length; p++)
            {
                Perceptron perceptron = Perceptrons[p];
                perceptron.LinearSums[completedBatches] = perceptron.Weights[perceptron.Weights.Length - 1];

                for (uint w = 0; w < perceptron.Weights.Length - 1; w++)
                {
                    perceptron.LinearSums[completedBatches] += perceptron.Weights[w] * prevLayer.Perceptrons[w].Outputs[completedBatches];
                }

                perceptron.Outputs[completedBatches] = ActivationFunction.evaluate(perceptron.LinearSums[completedBatches], false);
            }
        }
Beispiel #3
0
        public override void BackPropagate(Layer prevLayer, Layer forwardLayer, int completedBatches)
        {
            if (prevLayer == null)
            {
                int currentOutputIndex = 0;
                completedBatches--;
                for (int feature = 0; feature < Props.NumberFeatures; feature++)
                {
                    for (int w = 0; w < FeatureMaps[feature].Weights.Length; w++)
                    {
                        FeatureMaps[feature].OldWeights[w] = FeatureMaps[feature].Weights[w];
                    }
                    for (int poolRow = 0; poolRow < Props.PoolRows; poolRow++)
                    {
                        for (int poolCol = 0; poolCol < Props.PoolCols; poolCol++, currentOutputIndex++)
                        {
                            //calculate the back propagation summation for max pool index (poolRow, poolCol)
                            float backPropSummation = 0;
                            for (int p = 0; p < forwardLayer.Perceptrons.Length; p++)
                            {
                                Perceptron perceptron = forwardLayer.Perceptrons[p];
                                backPropSummation += perceptron.BackPropigationDerivatives[completedBatches] * perceptron.OldWeights[currentOutputIndex];
                            }

                            //now time to go through and update the weights based on the current pool element
                            // back tracking through the feature map and into the Input segment to update weights respectively
                            int yStart = poolRow * Props.PoolSamplingRows + MaxPools[feature].IndexPairs[poolRow][poolCol].row;
                            int yEnd   = yStart + Props.NeighborhoodRows;
                            int xStart = poolCol * Props.PoolSamplingCols + MaxPools[feature].IndexPairs[poolRow][poolCol].col;
                            int xEnd   = xStart + Props.NeighborhoodCols;

                            int weightIndex = 0;
                            for (int y = yStart; y < yEnd; y++)
                            {
                                for (int x = xStart; x < xEnd; x++, weightIndex++)
                                {
                                    FeatureMaps[feature].Weights[weightIndex] -=
                                        ActivationFunction.FunctionLearningRate * backPropSummation * InputSegment[y][x];
                                }
                            }

                            FeatureMaps[feature].Weights[weightIndex] -=
                                ActivationFunction.FunctionLearningRate * backPropSummation;
                        }
                    }
                }
            }
        }
Beispiel #4
0
        public void UpdateNumberPerceptrons(uint numPerceptrons)
        {
            if (numPerceptrons != 0 && numPerceptrons != Perceptrons.Length)
            {
                Perceptron[] PerceptronPlaceHolder = Perceptrons;
                Perceptrons = new Perceptron[numPerceptrons];

                for (uint i = 0; i < PerceptronPlaceHolder.Length && i < numPerceptrons; i++)
                {
                    Perceptrons[i] = PerceptronPlaceHolder[i];
                }

                for (uint i = (uint)PerceptronPlaceHolder.Length; i < numPerceptrons; i++)
                {
                    Perceptrons[i] = new Perceptron(InputCount, Batches, OutputLayer);
                }
            }
        }
Beispiel #5
0
        public Layer(uint numPerceptrons, uint numInputs, ActivationFunctions activationFunction, uint batches = 1, bool outputLayer = false)
        {
            ActivationFunction = activationFunction;
            Perceptrons        = new Perceptron[numPerceptrons];
            InputCount         = numInputs;
            OutputLayer        = outputLayer;

            if (batches == 0)
            {
                batches = 1;
            }
            TotalBatches = batches;

            for (uint i = 0; i < Perceptrons.Length; i++)
            {
                Perceptrons[i] = new Perceptron(InputCount, Batches, OutputLayer);
            }
        }
Beispiel #6
0
        protected virtual void BackPropagateOutputLayer(Layer prevLayer, int completedBatches)
        {
            for (uint outputPerceptron = 0; outputPerceptron < Perceptrons.Length; outputPerceptron++)
            {
                Perceptron currPerceptron = Perceptrons[outputPerceptron];

                for (uint batch = 0; batch < completedBatches; batch++)
                {
                    //could divide by batches and multiply by learning rate here possibly
                    currPerceptron.BackPropigationDerivatives[batch] = (currPerceptron.Outputs[batch] - currPerceptron.ExpectedOutputs[batch]) *
                                                                       ActivationFunction.evaluateDerivative(currPerceptron.Outputs[batch], true);
                }

                float SummationBackPropDerivative = 0;
                int   weight = 0;
                for (; weight < currPerceptron.Weights.Length - 1; weight++)
                {
                    currPerceptron.OldWeights[weight] = currPerceptron.Weights[weight];

                    SummationBackPropDerivative = 0;
                    for (uint batch = 0; batch < completedBatches; batch++)
                    {
                        SummationBackPropDerivative += currPerceptron.BackPropigationDerivatives[batch] * prevLayer.Perceptrons[weight].Outputs[batch];
                    }

                    currPerceptron.Weights[weight] -= ActivationFunction.FunctionLearningRate * SummationBackPropDerivative / completedBatches;
                }

                currPerceptron.OldWeights[weight] = currPerceptron.Weights[weight];

                //this is adjusting the biases throughout the network since they do not have perceptrons
                //they connect with from a previous layer
                SummationBackPropDerivative = 0;
                for (uint batch = 0; batch < completedBatches; batch++)
                {
                    SummationBackPropDerivative += currPerceptron.BackPropigationDerivatives[batch];
                }
                currPerceptron.Weights[weight] -= ActivationFunction.FunctionLearningRate * SummationBackPropDerivative / completedBatches;
            }
        }
Beispiel #7
0
        static void Main(string[] args)
        {
            
            #region Linear Regression

            Console.WriteLine("Linear Regression");
            double[] input = new double[] { -2, -1, 1, 4 };
            double[] output = new double[] { -3, -1, 2, 3 };
            LinearRegression linearRegression = new LinearRegression();
            linearRegression.Training(input, output);
            Console.WriteLine("Result: " + Math.Round(linearRegression.Run(0.5d),2));
            Console.WriteLine("Coefficient Determination: " + Math.Round(linearRegression.CoefficientDetermination,2));
            Console.WriteLine("--------------------------");

            #endregion

            #region Multiple Linear Regression

            double[,] inputTrain = { { 2d, 3d }, { 2.5d, 2d }, { 1.8d, 4d } };
            double[] outputTrain = { 5d, 6d, 4d };
            MultipleLinearRegression mlr = new MultipleLinearRegression(inputTrain.GetLength(1), 0.5d);
            mlr.Training(inputTrain, outputTrain);
            Console.WriteLine("Multiple Linear Regression");
            Console.WriteLine("Result: " + Math.Round(mlr.Run(new[] { 2.6d, 2.1d }), 2));
            Console.WriteLine("--------------------------");

            #endregion

            #region Perceptron

            Console.WriteLine("Perceptron");

            #region AND Gate
            double[,] inputAnd = new double[,] { { 1, 0 }, { 1, 1 }, { 0, 1 }, { 0, 0 } };
            int[] outputAnd = new int[] { 0, 1, 0, 0 };

            Perceptron p1 = new Perceptron();
            p1.Training(inputAnd, outputAnd);
            
            Console.WriteLine("AND Gate");
            Console.WriteLine("Iteration of training: " + p1.Iteration);
            Console.WriteLine("Test 1: " + p1.Run(new double[,] { { 1, 0 } }));
            Console.WriteLine("Test 2: " + p1.Run(new double[,] { { 1, 1 } }));
            #endregion

            #region OR Gate
            double[,] inputOr = new double[,] { { 1, 0 }, { 1, 1 }, { 0, 1 }, { 0, 0 } };
            int[] outputOr = new int[] { 1, 1, 1, 0 };

            Perceptron p2 = new Perceptron();
            p2.Training(inputOr, outputOr);
            Console.WriteLine("OR Gate");
            Console.WriteLine("Iteration of training: " + p2.Iteration);
            Console.WriteLine("Test 1: " + p2.Run(new double[,] { { 0, 1 } }));
            Console.WriteLine("Test 2: " + p2.Run(new double[,] { { 0, 0 } }));
            #endregion
            
            Console.WriteLine("--------------------------");

            #endregion

            #region Multilayer Parceptron
            Console.WriteLine("Multilayer Parceptron");
            MultilayerPerceptron mlp = new MultilayerPerceptron(2, 5, 1);
            mlp.Training(new double[,] { { 1, 1 }, { 1, 0 }, { 0, 0 }, { 0, 1 } }, new double[] { 1, 1, 0, 1 });
            Console.WriteLine("OR Gate: " + Math.Round(mlp.Run(new double[] { 0, 1 }).FirstOrDefault(), 1));
            Console.WriteLine("--------------------------");
            #endregion

            Console.ReadKey();
        }