コード例 #1
0
 public void Calculate(double[,] input, bool output)
 {
     Calculate(Maths.Convert(input), output);
 }
コード例 #2
0
 /// <summary>
 /// Backpropegation of error and calcluation of gradients
 /// </summary>
 /// <param name="input">Previous layer's values</param>
 /// <param name="isoutput">Whether the layer is the output layer</param>
 public void Backprop(double[] input, iLayer outputlayer, bool isoutput, int correct)
 {
     //Calculate error
     if (isoutput)
     {
         Errors = new double[Length];
         for (int i = 0; i < Length; i++)
         {
             Errors[i] = 2d * (Values[i] - (i == correct ? 1d : 0d));
         }
     }
     else
     {
         if (outputlayer is FullyConnectedLayer)
         {
             var FCLOutput = outputlayer as FullyConnectedLayer;
             Errors = new double[Length];
             for (int k = 0; k < FCLOutput.Length; k++)
             {
                 for (int j = 0; j < Length; j++)
                 {
                     Errors[j] += FCLOutput.Weights[k, j] * Maths.TanhDerriv(outputlayer.ZVals[k]) * FCLOutput.Errors[k];
                 }
             }
         }
         if (outputlayer is ConvolutionLayer)
         {
             var CLOutput = outputlayer as ConvolutionLayer;
             Errors = Maths.Convert(CLOutput.FullConvolve(CLOutput.Weights, Maths.Convert(CLOutput.Errors)));
         }
         if (outputlayer is PoolingLayer)
         {
             var PLOutput = outputlayer as PoolingLayer;
             int iterator = 0;
             Errors = new double[Length];
             for (int i = 0; i < Length; i++)
             {
                 if (PLOutput.Mask[i] == 0)
                 {
                     continue;
                 }
                 Errors[i] = PLOutput.Errors[iterator];
                 iterator++;
             }
         }
     }
     //Calculate gradients
     for (int i = 0; i < Length; i++)
     {
         for (int ii = 0; ii < InputLength; ii++)
         {
             //Weight gradients
             WeightGradient[i, ii] = -1 * input[ii] * Maths.TanhDerriv(ZVals[i]) * Errors[i];
             if (NN.UseMomentum)
             {
                 if (NN.UseNesterov)
                 {
                     //Nesterov momentum formula
                     WeightGradient[i, ii] = ((1 + NN.Momentum) * (NN.LearningRate * WeightGradient[i, ii]))
                                             + (NN.Momentum * NN.Momentum * WMomentum[i, ii]);
                 }
                 else
                 {
                     //Standard momentum formula
                     WeightGradient[i, ii] = (WMomentum[i, ii] * NN.Momentum) + (NN.LearningRate * WeightGradient[i, ii]);
                 }
                 //Momentum is the previous iteration's gradient
                 WMomentum[i, ii] = WeightGradient[i, ii];
             }
         }
         if (isoutput)
         {
             continue;
         }
         //Bias gradients
         BiasGradient[i] = -1 * Maths.TanhDerriv(ZVals[i]) * Errors[i];
         if (NN.UseMomentum)
         {
             if (NN.UseNesterov)
             {
                 BiasGradient[i] = ((1 + NN.Momentum) * (NN.LearningRate * BiasGradient[i]))
                                   + (NN.Momentum * NN.Momentum * BMomentum[i]);
             }
             else
             {
                 BiasGradient[i] = (BMomentum[i] * NN.Momentum) + (NN.LearningRate * BiasGradient[i]);
             }
             //Momentum is the previous iteration's gradient
             BMomentum[i] = BiasGradient[i];
         }
     }
 }
コード例 #3
0
ファイル: PoolingLayer.cs プロジェクト: qdm097/MNIST-CNN
 public void Calculate(double[] input, bool useless)
 {
     Calculate(Maths.Convert(input), useless);
 }
コード例 #4
0
 /// <summary>
 /// Calculates the dot product of the kernel and input matrix.
 /// Matrices should be size [x, y] and [y], respectively, where x is the output size and y is the latent space's size
 /// </summary>
 /// <param name="input">The input matrix</param>
 /// <param name="isoutput">Whether to use hyperbolic tangent on the output</param>
 /// <returns></returns>
 public void Calculate(double[] input, bool isoutput)
 {
     Calculate(Maths.Convert(input), isoutput);
 }
コード例 #5
0
 public void Backprop(double[] input, iLayer outputlayer, bool uselessbool, int uselessint)
 {
     //Calc errors
     double[,] Input = Maths.Convert(input);
     if (outputlayer is FullyConnectedLayer)
     {
         //Errors with respect to the output of the convolution
         //dl/do
         Errors = new double[outputlayer.InputLength];
         for (int k = 0; k < outputlayer.Length; k++)
         {
             for (int j = 0; j < outputlayer.InputLength; j++)
             {
                 Errors[j] += outputlayer.Weights[k, j] * Maths.TanhDerriv(outputlayer.ZVals[k]) * outputlayer.Errors[k];
             }
         }
     }
     if (outputlayer is ConvolutionLayer)
     {
         var CLOutput = outputlayer as ConvolutionLayer;
         //Flipped?
         Errors = Maths.Convert(CLOutput.FullConvolve(CLOutput.Weights, Maths.Convert(CLOutput.Errors)));
     }
     if (outputlayer is PoolingLayer)
     {
         var PLOutput = outputlayer as PoolingLayer;
         int iterator = 0;
         Errors = new double[ZVals.Length];
         for (int i = 0; i < ZVals.Length; i++)
         {
             if (PLOutput.Mask[i] == 0)
             {
                 continue;
             }
             Errors[i] = PLOutput.Errors[iterator];
             iterator++;
         }
     }
     //Calc gradients (errors with respect to the filter)
     Gradients = Convolve(Maths.Convert(Maths.Scale(-1, Errors)), Input);
     if (NN.UseMomentum)
     {
         for (int i = 0; i < KernelSize; i++)
         {
             for (int ii = 0; ii < KernelSize; ii++)
             {
                 if (NN.UseNesterov)
                 {
                     //Nesterov momentum formula
                     Gradients[i, ii] = ((1 + NN.Momentum) * (NN.LearningRate * Gradients[i, ii]))
                                        + (NN.Momentum * NN.Momentum * WMomentum[i, ii]);
                 }
                 else
                 {
                     //Standard momentum formula
                     Gradients[i, ii] = (WMomentum[i, ii] * NN.Momentum) + (NN.LearningRate * Gradients[i, ii]);
                 }
                 //Momentum is the previous iteration's gradient
                 WMomentum[i, ii] = Gradients[i, ii];
             }
         }
     }
 }
コード例 #6
0
ファイル: Form1.cs プロジェクト: qdm097/MNIST-CNN
        void Learn()
        {
            Thread thread = new Thread(() =>
            {
                int imageiterator = 0;
                while (Run)
                {
                    List <double[]> Images = new List <double[]>();
                    //[0] = mean of image; [1] = stddev of image
                    List <double[]> Stats = new List <double[]>();
                    List <int> Labels     = new List <int>();
                    if (!Testing)
                    {
                        double mean   = 0;
                        double stddev = 0;
                        //Batch generation
                        for (int i = 0; i < BatchSize; i++)
                        {
                            //Find image and label
                            Images.Add(IO.ReadNextImage()); Labels.Add(IO.ReadNextLabel());
                            //Generate stats for image
                            double samplemean = Maths.CalcMean(Images[i]);
                            Stats.Add(new double[] { samplemean, Maths.CalcStdDev(Images[i], samplemean) });
                            mean += Stats[i][0]; stddev += Stats[i][1];
                        }
                        //Adjust stats for batchsize
                        mean   /= BatchSize;
                        stddev /= BatchSize;
                        for (int i = 0; i < BatchSize; i++)
                        {
                            //Batch normalization
                            if (BatchNrmlCB.Checked)
                            {
                                nn.Run(Maths.Normalize(Images[i], mean, stddev), Labels[i], false);
                            }
                            else
                            {
                                nn.Run(Maths.Normalize(Images[i]), Labels[i], false);
                            }
                        }
                        nn.Run(BatchSize);
                    }
                    else
                    {
                        if (testiterator >= 10000)
                        {
                            Run = false; MessageBox.Show("Full epoch completed");
                        }
                        //Find image and label
                        Images.Add(IO.ReadNextImage()); Labels.Add(IO.ReadNextLabel());
                        double mean = Maths.CalcMean(Images[testiterator]);
                        nn.Run(Maths.Normalize(Images[testiterator], mean, Maths.CalcStdDev(Images[testiterator], mean)), Labels[testiterator], true); testiterator++;
                    }
                    image = Images[Images.Count - 1];
                    Invoke((Action) delegate {
                        AvgGradTxt.Text    = Math.Round(nn.AvgGradient, 15).ToString();
                        AvgCorrectTxt.Text = Math.Round(nn.PercCorrect, 15).ToString();
                        ErrorTxt.Text      = Math.Round(nn.Error, 15).ToString();
                        if (imageiterator >= imagespeed)
                        {
                            imageiterator     = 0;
                            pictureBox1.Image = FromTwoDimIntArrayGray(ResizeImg(Maths.Convert(image)));
                            GuessTxt.Text     = nn.Guess.ToString();
                        }
                        imageiterator++;
                    });
                }
                IO.Write(nn);
            });

            thread.IsBackground = true;
            thread.Start();
        }