/// <summary> /// Applies the gradients to the weights as a batch /// </summary> /// <param name="batchsize">The number of trials run per cycle</param> /// <param name="clipparameter">What the max/min </param> /// <param name="RMSDecay">How quickly the RMS gradients decay</param> public override void Descend() { //Calculate gradients WUpdates = new double[Length, InputLength]; BUpdates = new double[Length]; for (int i = 0; i < Length; i++) { for (int ii = 0; ii < InputLength; ii++) { //Normal gradient descent update WUpdates[i, ii] = WeightGradient[i, ii] * (2d / NN.BatchSize); //Root mean square propegation if (NN.UseRMSProp) { WRMSGrad[i, ii] = (WRMSGrad[i, ii] * NN.RMSDecay) + ((1 - NN.RMSDecay) * (WUpdates[i, ii] * WUpdates[i, ii])); WUpdates[i, ii] = (WUpdates[i, ii] / (Math.Sqrt(WRMSGrad[i, ii]) /* + NN.Infinitesimal*/)); } WUpdates[i, ii] *= NN.LearningRate; } //Normal gradient descent update BUpdates[i] = BiasGradient[i] * (2d / NN.BatchSize); //Root mean square propegation if (NN.UseRMSProp) { BRMSGrad[i] = (BRMSGrad[i] * NN.RMSDecay) + ((1 - NN.RMSDecay) * (BUpdates[i] * BUpdates[i])); BUpdates[i] = (BUpdates[i] / (Math.Sqrt(BRMSGrad[i]) /* + NN.Infinitesimal*/)); } BUpdates[i] *= NN.LearningRate; } //Gradient normalization if (NN.NormGradients) { WUpdates = Maths.Scale(NN.LearningRate, Maths.Normalize(WUpdates)); BUpdates = Maths.Scale(NN.LearningRate, Maths.Normalize(BUpdates)); } //Apply updates for (int i = 0; i < Length; i++) { for (int ii = 0; ii < InputLength; ii++) { //Update weight and average Weights[i, ii] -= WUpdates[i, ii]; AvgGradient -= WUpdates[i, ii]; //Weight clipping if (NN.UseClipping) { if (Weights[i, ii] > NN.ClipParameter) { Weights[i, ii] = NN.ClipParameter; } if (Weights[i, ii] < -NN.ClipParameter) { Weights[i, ii] = -NN.ClipParameter; } } } Biases[i] -= BUpdates[i]; //Bias clipping if (NN.UseClipping) { if (Biases[i] > NN.ClipParameter) { Biases[i] = NN.ClipParameter; } if (Biases[i] < -NN.ClipParameter) { Biases[i] = -NN.ClipParameter; } } } //Reset gradients WeightGradient = new double[Length, InputLength]; BiasGradient = new double[Length]; }
/// <summary> /// Calculates the dot product of the kernel and input matrix. /// Matrices should be size [x, y] and [y], respectively, where x is the output size and y is the latent space's size /// </summary> /// <param name="inputs">The input matrix</param> /// <param name="isoutput">Whether to use hyperbolic tangent on the output</param> /// <returns></returns> public override void Calculate(List <double[]> inputs, bool isoutput) { ZVals = new List <double[]>(); for (int b = 0; b < NN.BatchSize; b++) { ZVals.Add(Maths.Convert(DownOrUp ? Convolve(Weights, Pad(Maths.Convert(inputs[b]))) : FullConvolve(Weights, Pad(Maths.Convert(inputs[b]))))); } //If normalizing, do so, but only if it won't return an all-zero matrix if (NN.NormOutputs && ZVals[0].Length > 1) { ZVals = Maths.Normalize(ZVals); } //Use the specified type of activation function if (ActivationFunction == 0) { Values = Maths.Tanh(ZVals); return; } if (ActivationFunction == 1) { Values = Maths.ReLu(ZVals); return; } Values = ZVals; }
/// <summary> /// Computes the error signal of the layer, also gradients if applicable /// </summary> /// <param name="input">Previous layer's values</param> /// <param name="output">Whether the layer is the output layer</param> /// <param name="loss">The loss of the layer</param> /// <param name="calcgradients">Whether or not to calculate gradients in the layer</param> public void Backprop(List <double[]> inputs, Layer outputlayer, double loss, bool calcgradients) { //Reset errors Errors = new List <double[]>(); //Calculate errors if (outputlayer is null) { for (int j = 0; j < inputs.Count; j++) { Errors.Add(new double[Length]); for (int i = 0; i < Length; i++) { //(i == loss ? 1d : 0d) Errors[j][i] = 2d * (Values[j][i] - loss); } } } else { for (int i = 0; i < inputs.Count; i++) { Errors.Add(new double[outputlayer.InputLength]); } if (outputlayer is SumLayer) { //Errors with respect to the output of the convolution //dl/do for (int i = 0; i < outputlayer.ZVals.Count; i++) { for (int k = 0; k < outputlayer.Length; k++) { for (int j = 0; j < outputlayer.InputLength; j++) { Errors[i][j] += outputlayer.Errors[i][k]; } } } } //Apply tanhderriv, if applicable, to the output's zvals var outputZVals = outputlayer.ZVals; if (outputlayer.ActivationFunction == 0) { outputZVals = Maths.TanhDerriv(outputlayer.ZVals); } if (outputlayer.ActivationFunction == 1) { outputZVals = Maths.ReLuDerriv(outputlayer.ZVals); } if (outputlayer is FullyConnectedLayer) { var FCLOutput = outputlayer as FullyConnectedLayer; for (int i = 0; i < outputlayer.ZVals.Count; i++) { for (int k = 0; k < FCLOutput.Length; k++) { for (int j = 0; j < FCLOutput.InputLength; j++) { Errors[i][j] += FCLOutput.Weights[k, j] * outputZVals[i][k] * FCLOutput.Errors[i][k]; } } } } if (outputlayer is ConvolutionLayer) { var CLOutput = outputlayer as ConvolutionLayer; for (int i = 0; i < outputlayer.ZVals.Count; i++) { if ((outputlayer as ConvolutionLayer).DownOrUp) { Errors[i] = Maths.Convert(CLOutput.UnPad(CLOutput.FullConvolve(CLOutput.Weights, Maths.Convert(CLOutput.Errors[i])))); } else { Errors[i] = Maths.Convert(CLOutput.UnPad(CLOutput.Convolve(CLOutput.Weights, Maths.Convert(CLOutput.Errors[i])))); } } //Errors = Maths.Convert(CLOutput.UnPad(CLOutput.FullConvolve(CLOutput.Weights, Maths.Convert(CLOutput.Errors)))); } if (outputlayer is PoolingLayer) { var PLOutput = outputlayer as PoolingLayer; for (int b = 0; b < NN.BatchSize; b++) { if (PLOutput.DownOrUp) { int iterator = 0; var wets = Maths.Convert(PLOutput.Weights); for (int i = 0; i < Length; i++) { if (wets[i] == 0) { continue; } Errors[b][i] = PLOutput.Errors[b][iterator]; iterator++; } } else { //Sum the errors double[,] outputerrors = Maths.Convert(PLOutput.Errors[b]); int oel = outputerrors.GetLength(0); int oew = outputerrors.GetLength(1); double[,] errors = new double[oel / PLOutput.PoolSize, oew / PLOutput.PoolSize]; for (int i = 0; i < oel; i++) { for (int ii = 0; ii < oew; ii++) { errors[i / PLOutput.PoolSize, ii / PLOutput.PoolSize] += outputerrors[i, ii]; } } Errors[b] = Maths.Convert(errors); } } } } //Normalize errors (if applicable) if (NN.NormErrors && Errors[0].Length > 1) { Errors = Maths.Normalize(Errors); } if (calcgradients) { if (this is FullyConnectedLayer) { (this as FullyConnectedLayer).CalcGradients(inputs, outputlayer); } if (this is ConvolutionLayer) { (this as ConvolutionLayer).CalcGradients(inputs, outputlayer); } if (this is PoolingLayer) { return; } if (this is SumLayer) { return; } } }