예제 #1
0
        private void UpdateMiniBatch(List <Tuple <double[, ], double[, ]> > miniBatch, double eta)
        {
            List <double[, ]> nablaB = new List <double[, ]>();
            List <double[, ]> nablaW = new List <double[, ]>();

            for (int i = 0; i < numberLayers - 1; i++)
            {
                nablaB.Add(new double[biases[i].GetLength(0), biases[i].GetLength(1)]);
                nablaW.Add(new double[weights[i].GetLength(0), weights[i].GetLength(1)]);
            }

            foreach (Tuple <double[, ], double[, ]> batch in miniBatch)
            {
                Tuple <List <double[, ]>, List <double[, ]> > deltaNablas = BackPropogation(batch);
                for (int j = 0; j < numberLayers - 1; j++)
                {
                    nablaB[j] = NeuralMath.AddMatrix(nablaB[j], deltaNablas.Item1[j]);
                    nablaW[j] = NeuralMath.AddMatrix(nablaW[j], deltaNablas.Item2[j]);
                }
            }

            for (int i = 0; i < numberLayers - 1; i++)
            {
                biases[i]  = NeuralMath.SubtractMatrix(biases[i], NeuralMath.ScaleMatrix(nablaB[i], eta / miniBatch.Count));
                weights[i] = NeuralMath.SubtractMatrix(weights[i], NeuralMath.ScaleMatrix(nablaW[i], eta / miniBatch.Count));
            }
        }
예제 #2
0
        private double[,] FeedForward(double[,] a)
        {
            double[,] output = a;

            for (int layer = 0; layer < numberLayers - 1; layer++)
            {
                double[,] wa = NeuralMath.DotMatrix(weights[layer], output);
                output       = NeuralMath.Sigmoid(NeuralMath.AddMatrix(wa, biases[layer]));
            }

            return(output);
        }
예제 #3
0
        private Tuple <List <double[, ]>, List <double[, ]> > BackPropogation(Tuple <double[, ], double[, ]> batch)
        {
            List <double[, ]> nablaB = new List <double[, ]>();
            List <double[, ]> nablaW = new List <double[, ]>();

            for (int i = 0; i < numberLayers - 1; i++)
            {
                nablaB.Add(new double[biases[i].GetLength(0), biases[i].GetLength(1)]);
                nablaW.Add(new double[weights[i].GetLength(0), weights[i].GetLength(1)]);
            }

            // feed forward
            double[,] activation = batch.Item1;
            List <double[, ]> activations = new List <double[, ]> {
                batch.Item1
            };
            List <double[, ]> zs = new List <double[, ]>();

            for (int i = 0; i < numberLayers - 1; i++)
            {
                double[,] z = NeuralMath.AddMatrix(NeuralMath.DotMatrix(weights[i], activation), biases[i]);
                zs.Add(z);
                activation = NeuralMath.Sigmoid(z);
                activations.Add(activation);
            }

            // backward pass
            double[,] delta = NeuralMath.MultiplyMatrix(NeuralMath.CostDerivative(activations[activations.Count - 1], batch.Item2),
                                                        NeuralMath.SigmoidPrime(zs[zs.Count - 1]));
            nablaB[nablaB.Count - 1] = delta;
            nablaW[nablaW.Count - 1] = NeuralMath.DotMatrix(delta, NeuralMath.Transpose(activations[activations.Count - 2]));

            for (int l = 2; l < numberLayers; l++)
            {
                double[,] z  = zs[zs.Count - l];
                double[,] sp = NeuralMath.SigmoidPrime(z);
                delta        = NeuralMath.MultiplyMatrix(NeuralMath.DotMatrix(
                                                             NeuralMath.Transpose(weights[weights.Count - l + 1]), delta), sp);
                nablaB[nablaB.Count - l] = delta;
                nablaW[nablaW.Count - l] = NeuralMath.DotMatrix(delta, NeuralMath.Transpose(activations[activations.Count - l - 1]));
            }
            return(Tuple.Create(nablaB, nablaW));
        }