private double[,] FeedForward(double[,] a) { double[,] output = a; for (int layer = 0; layer < numberLayers - 1; layer++) { double[,] wa = NeuralMath.DotMatrix(weights[layer], output); output = NeuralMath.Sigmoid(NeuralMath.AddMatrix(wa, biases[layer])); } return(output); }
private Tuple <List <double[, ]>, List <double[, ]> > BackPropogation(Tuple <double[, ], double[, ]> batch) { List <double[, ]> nablaB = new List <double[, ]>(); List <double[, ]> nablaW = new List <double[, ]>(); for (int i = 0; i < numberLayers - 1; i++) { nablaB.Add(new double[biases[i].GetLength(0), biases[i].GetLength(1)]); nablaW.Add(new double[weights[i].GetLength(0), weights[i].GetLength(1)]); } // feed forward double[,] activation = batch.Item1; List <double[, ]> activations = new List <double[, ]> { batch.Item1 }; List <double[, ]> zs = new List <double[, ]>(); for (int i = 0; i < numberLayers - 1; i++) { double[,] z = NeuralMath.AddMatrix(NeuralMath.DotMatrix(weights[i], activation), biases[i]); zs.Add(z); activation = NeuralMath.Sigmoid(z); activations.Add(activation); } // backward pass double[,] delta = NeuralMath.MultiplyMatrix(NeuralMath.CostDerivative(activations[activations.Count - 1], batch.Item2), NeuralMath.SigmoidPrime(zs[zs.Count - 1])); nablaB[nablaB.Count - 1] = delta; nablaW[nablaW.Count - 1] = NeuralMath.DotMatrix(delta, NeuralMath.Transpose(activations[activations.Count - 2])); for (int l = 2; l < numberLayers; l++) { double[,] z = zs[zs.Count - l]; double[,] sp = NeuralMath.SigmoidPrime(z); delta = NeuralMath.MultiplyMatrix(NeuralMath.DotMatrix( NeuralMath.Transpose(weights[weights.Count - l + 1]), delta), sp); nablaB[nablaB.Count - l] = delta; nablaW[nablaW.Count - l] = NeuralMath.DotMatrix(delta, NeuralMath.Transpose(activations[activations.Count - l - 1])); } return(Tuple.Create(nablaB, nablaW)); }