Ejemplo n.º 1
0
        private void Forward(TensorOld input, int sampleIndex, int filterIndex)
        {
            var filterOff = Filters.GetRawOffset(filterIndex, 0, 0, 0);

            Parallel.For(0, outRows, row =>
            {
                var startRow = row * RowStride;
                Parallel.For(0, outColumns, col =>
                {
                    var startCol = col * ColumnStride;
                    var sum      = 0d;

                    for (int i = 0; i < FilterRows; i++)
                    {
                        var filterStart = i * filterRowLength;
                        var inputOff    = input.GetRawOffset(sampleIndex, startRow + i, startCol, 0);
                        for (int j = 0; j < filterRowLength; j++)
                        {
                            sum += input.values[inputOff + j] * Filters.values[filterStart + j];
                        }
                    }
                    ForwardOutput.SetValueFast(sum + Bias.values[filterIndex], sampleIndex, row, col, filterIndex);
                });
            });
        }
Ejemplo n.º 2
0
        private void SetPaddingInput(TensorOld input)
        {
            var inputData   = input.GetRawValues();
            var paddingData = PaddingInput.GetRawValues();

            for (int sample = 0; sample < samples; sample++)
            {
                for (int channel = 0; channel < channels; channel++)
                {
                    for (int i = 0; i < input.shape[2]; i++)
                    {
                        var inputStart   = input.GetRawOffset(sample, channel, i, 0);
                        var paddingStart = PaddingInput.GetRawOffset(sample, channel, i + RowPadding, ColumnPadding);
                        Array.Copy(inputData, inputStart, paddingData, paddingStart, input.shape[3]);
                    }
                }
            }
        }
        //这个方法不会产生多余的临时对象,问题就是不再存储Derivative
        //private void ErrorBP(Tensor output, Tensor error, Tensor result, int sampleIndex)
        //{
        //    for (int i = 0; i < categoryNumber; i++)
        //    {
        //        var der = 0d;
        //        for (int j = 0; j < categoryNumber; j++)
        //        {
        //            if (i == j)
        //                der += output[sampleIndex, i] * (1 - output[sampleIndex, j]) * error[sampleIndex, j];
        //            else
        //                der += -output[sampleIndex, i] * output[sampleIndex, j] * error[sampleIndex, j];
        //        }
        //        result[sampleIndex, i] = der;
        //    }
        //}

        private void ErrorBP(TensorOld error)
        {
            var derData   = Derivative.GetRawValues();
            var errorData = error.GetRawValues();
            var outData   = BackwardOutput.GetRawValues();

            Parallel.For(0, sampleNumber, sampleIndex =>
            {
                var errorStart = error.GetRawOffset(sampleIndex, 0);
                //这里的两层嵌套执行的并不是严格的矩阵运算,导数应该是:error*jacob,
                //因为jacob矩阵是对称的所以使用jacob每行和error相乘的内积,循环写起来方便
                Parallel.For(0, categoryNumber, i =>
                {
                    var derStart = Derivative.GetRawOffset(sampleIndex, i, 0);
                    var sum      = 0d;
                    for (int j = 0; j < categoryNumber; j++)
                    {
                        sum += derData[derStart + j] * errorData[errorStart + j];
                    }
                    outData[errorStart + i] = sum;
                });
            });
        }