예제 #1
0
파일: Layer.cs 프로젝트: kpwelsh/NeuralNet
        internal override Vector <double> PropogateError(Vector <double> outputError, double errorWeight, Vector <double> inputCacheOverride = null, Vector <double> additionalError = null)
        {
            Vector <double> inputError;

            if (inputCacheOverride == null)
            {
                inputError = outputError.PointwiseMultiply(ActFunc.Derivative(InputCache * Weights));
            }
            else
            {
                inputError = outputError.PointwiseMultiply(ActFunc.Derivative(inputCacheOverride * Weights));
            }

            if (additionalError != null)
            {
                inputError -= additionalError;
            }

            BiasErrorCache   -= inputError * errorWeight;
            WeightErrorCache -= errorWeight * InputCache.OuterProduct(inputError);
            if (RegMode == RegularizationMode.L2)
            {
                WeightErrorCache -= errorWeight * NormalizationWeight * Weights;
            }
            return(Weights * inputError);
        }
예제 #2
0
        internal override Vector <double> PropogateError(Vector <double> outputError, double errorWeight, Vector <double> inputCacheOverride = null, Vector <double> additionalError = null)
        {
            if (additionalError != null)
            {
                throw new NNException("Additional error is not supported for Convolutional Layers");
            }
            int             nNodes           = OutputHeight * OutputWidth;
            int             nInputNodes      = InputHeight * InputWidth;
            Vector <double> directInputError = 10 * outputError.PointwiseMultiply(ActFunc.Derivative(DirectInputCache)); // DEBUG 10*
            Vector <double> inputError       = new DenseVector(InputDimension);

            for (var channel = 0; channel < InputDepth; channel++)
            {
                for (var f = 0; f < NFilters; f++)
                {
                    int outputLayer = channel * NFilters + f;
                    for (var i = 0; i < OutputHeight; i++)
                    {
                        for (var j = 0; j < OutputWidth; j++)
                        {
                            double nodeError = directInputError[outputLayer * nNodes + i * OutputWidth + j];
                            BiasErrorCache[f] -= errorWeight * nodeError / nNodes; // Average the update over all instances of the same filter.

                            // Get the matrix of weight errors by multiplying the nodeError by the matrix in the input cache.
                            Matrix <double> weightError = new DenseMatrix(Weights[f].RowCount, Weights[f].ColumnCount);
                            for (var m = 0; m < weightError.RowCount; m++)
                            {
                                for (var n = 0; n < weightError.ColumnCount; n++)
                                {
                                    int inputIndex = channel * nInputNodes + (m + i * Stride) * InputWidth + n + j * Stride;
                                    weightError[m, n]      = nodeError * InputCache[inputIndex];
                                    inputError[inputIndex] = Weights[f][m, n] * nodeError;
                                }
                            }

                            WeightErrorCache[f] -= errorWeight * weightError / nNodes; // Average the update over all instances of the same filter.
                        }
                    }
                }
            }

            return(inputError);
        }