예제 #1
0
        public static long[] OutputSize(long[] inputSizes, bool ceilMode, ConvolutionDesc2d cd)
        {
            var dimw = 3;
            var dimh = 2;

            var iwidth  = inputSizes[dimw];
            var iheight = inputSizes[dimh];

            long oheight, owidth;

            if (ceilMode)
            {
                // ReSharper disable once ArrangeRedundantParentheses
                oheight = (long)(Math.Ceiling((float)(iheight - cd.kH + 2 * cd.padH) / cd.dH)) + 1;
                // ReSharper disable once ArrangeRedundantParentheses
                owidth = (long)(Math.Ceiling((float)(iwidth - cd.kW + 2 * cd.padW) / cd.dW)) + 1;
            }
            else
            {
                // ReSharper disable once ArrangeRedundantParentheses
                oheight = (long)(Math.Floor((float)(iheight - cd.kH + 2 * cd.padH) / cd.dH)) + 1;
                // ReSharper disable once ArrangeRedundantParentheses
                owidth = (long)(Math.Floor((float)(iwidth - cd.kW + 2 * cd.padW) / cd.dW)) + 1;
            }

            return(new long[] { inputSizes[0], inputSizes[1], oheight, owidth });
        }
예제 #2
0
        public static long[] OutputSize(long[] inputSizes, long[] weightSizes, ConvolutionDesc2d cd)
        {
            //int dimf = 1;
            int dimw = 3;
            int dimh = 2;

            long n            = inputSizes[0];
            long inputWidth   = inputSizes[dimw];
            long inputHeight  = inputSizes[dimh];
            long nOutputPlane = weightSizes[0];

            long outputWidth  = (inputWidth + 2 * cd.padW - cd.kW) / cd.dW + 1;
            long outputHeight = (inputHeight + 2 * cd.padH - cd.kH) / cd.dH + 1;

            return(new long[] { n, nOutputPlane, outputHeight, outputWidth });
        }
예제 #3
0
        public static long[] OutputSize(long[] inputSizes, bool ceilMode, ConvolutionDesc2d cd)
        {
            int dimw = 3;
            int dimh = 2;

            var iwidth  = inputSizes[dimw];
            var iheight = inputSizes[dimh];

            long oheight, owidth;

            if (ceilMode)
            {
                oheight = (long)(Math.Ceiling((float)(iheight - cd.kH + 2 * cd.padH) / cd.dH)) + 1;
                owidth  = (long)(Math.Ceiling((float)(iwidth - cd.kW + 2 * cd.padW) / cd.dW)) + 1;
            }
            else
            {
                oheight = (long)(Math.Floor((float)(iheight - cd.kH + 2 * cd.padH) / cd.dH)) + 1;
                owidth  = (long)(Math.Floor((float)(iwidth - cd.kW + 2 * cd.padW) / cd.dW)) + 1;
            }

            return(new long[] { inputSizes[0], inputSizes[1], oheight, owidth });
        }
예제 #4
0
        public static void Conv2Forward(Tensor input, Tensor output, Tensor weight, Tensor bias, Tensor finput, ConvolutionDesc2d cd)
        {
            int dimf = 1;
            int dimw = 3;
            int dimh = 2;

            long n            = input.Sizes[0];
            long nInputPlane  = input.Sizes[dimf];
            long inputWidth   = input.Sizes[dimw];
            long inputHeight  = input.Sizes[dimh];
            long nOutputPlane = weight.Sizes[0];

            long outputWidth  = (inputWidth + 2 * cd.padW - cd.kW) / cd.dW + 1;
            long outputHeight = (inputHeight + 2 * cd.padH - cd.kH) / cd.dH + 1;

            if (bias != null && (bias.Sizes[0] != nOutputPlane))
            {
                throw new InvalidOperationException("bias has incorrect size. Expected 1D tensor of size " + nOutputPlane);
            }

            if (outputWidth < 1 || outputHeight < 1)
            {
                throw new InvalidOperationException(string.Format(
                                                        "Output size too small; calculated output size = ({0}x{1}x{2}", nOutputPlane, outputHeight, outputWidth));
            }

            if (nInputPlane * cd.kW * cd.kH != weight.Sizes[1])
            {
                throw new InvalidOperationException(
                          string.Format("Input has incorrect number of channels. Got {0}, expected {1}", nInputPlane, weight.Sizes[1] / ((float)(cd.kW * cd.kH))));
            }

            if (input.DimensionCount != 4)
            {
                throw new InvalidOperationException("4D input expected (NCHW order)");
            }

            if (finput.Sizes[0] != n || finput.Sizes[1] != cd.kW * cd.kH * nInputPlane || finput.Sizes[2] != outputHeight * outputWidth)
            {
                throw new InvalidOperationException("finput is incorrect size");
            }

            if (output.Sizes[0] != n || output.Sizes[1] != nOutputPlane || output.Sizes[2] != outputHeight || output.Sizes[3] != outputWidth)
            {
                throw new InvalidOperationException("output is incorrect size");
            }

            for (int i = 0; i < n; ++i)
            {
                using Tensor input_i  = input.Select(0, i);
                using Tensor output_i = output.Select(0, i);
                using Tensor finput_i = finput.Select(0, i);
                Conv2ForwardFrame(input_i, output_i, weight, bias, finput_i,
                                  cd.kW, cd.kH, cd.dW, cd.dW, cd.padW, cd.padH,
                                  nInputPlane, inputWidth, inputHeight,
                                  nOutputPlane, outputWidth, outputHeight);
            }
        }
예제 #5
0
 public static long[] FInputSize(long[] inputSizes, long[] outputSizes, ConvolutionDesc2d cd)
 {
     return(new long[] { inputSizes[0], cd.kW *cd.kH *inputSizes[1], outputSizes[2] * outputSizes[3] });
 }
예제 #6
0
        private static void Conv2BackwardFilterFrame(Tensor gradOutput, Tensor gradWeight, Tensor gradBias, Tensor finput, ConvolutionDesc2d cd)
        {
            if (gradOutput is null)
            {
                throw new ArgumentNullException(nameof(gradOutput));
            }

            if (gradWeight is null)
            {
                throw new ArgumentNullException(nameof(gradWeight));
            }

            if (gradBias is null)
            {
                throw new ArgumentNullException(nameof(gradBias));
            }

            if (finput is null)
            {
                throw new ArgumentNullException(nameof(finput));
            }

            if (cd is null)
            {
                throw new ArgumentNullException(nameof(cd));
            }

            using Tensor gradOutput2d = gradOutput.View(gradOutput.Sizes[0], gradOutput.Sizes[1] * gradOutput.Sizes[2]);
            using Tensor finputT      = finput.Transpose();
            Ops.Addmm(gradWeight, 1, gradWeight, 1, gradOutput2d, finputT);
            Ops.Sum(gradBias, gradOutput2d, 1);
        }
예제 #7
0
        public static void Conv2BackwardFilter(Tensor input, Tensor gradOutput, Tensor gradWeight, Tensor gradBias, Tensor finput, Tensor fgradInput, ConvolutionDesc2d cd)
        {
            long nOutputPlane = gradWeight.Sizes[0];
            long n            = input.Sizes[0];

            if (gradOutput.Sizes[1] != nOutputPlane)
            {
                throw new InvalidOperationException("Number of output features must equal nOutputPlane");
            }

            if (cd.kW <= 0 && cd.kH <= 0)
            {
                throw new InvalidOperationException("Kernel size should be greater than zero");
            }

            if (cd.dW <= 0 && cd.dH <= 0)
            {
                throw new InvalidOperationException("stride should be greater than zero");
            }

            for (int i = 0; i < n; ++i)
            {
                using Tensor gradOutput_i = gradOutput.Select(0, i);
                using Tensor finput_i     = finput.Select(0, i);
                Conv2BackwardFilterFrame(gradOutput_i, gradWeight, gradBias, finput_i, cd);
            }
        }
예제 #8
0
        private static void Conv2BackwardInputFrame(Tensor gradOutput, Tensor gradInput, Tensor weight, Tensor fgradInput, ConvolutionDesc2d cd)
        {
            using (Tensor gradOutput2d = gradOutput.View(gradOutput.Sizes[0], gradOutput.Sizes[1] * gradOutput.Sizes[2]))
            {
                Ops.Addmm(fgradInput, 0, fgradInput, 1, weight, gradOutput2d);
            }

            Ops.Fill(gradInput, 0);

            using (NativeWrapper.BuildTensorRefPtr(fgradInput, out IntPtr fgradInputPtr))
                using (NativeWrapper.BuildTensorRefPtr(gradInput, out IntPtr gradInputPtr))
                {
                    CpuOpsNative.TS_Unfolded_Acc(fgradInputPtr, gradInputPtr, cd.kW, cd.kH, cd.dW, cd.dH, cd.padW, cd.padH,
                                                 (int)gradInput.Sizes[0], (int)gradInput.Sizes[2], (int)gradInput.Sizes[1],
                                                 (int)gradOutput.Sizes[2], (int)gradOutput.Sizes[1]);
                }
        }
예제 #9
0
        public static void SpatialMaxPoolingForward(Tensor input, Tensor output, Tensor indices, ConvolutionDesc2d cd, bool ceilMode)
        {
            if (input.DimensionCount != 4)
            {
                throw new ArgumentException("input must be a 4D tensor");
            }

            var dimw = 3;
            var dimh = 2;
            var dimc = 1;

            if (input.Sizes[dimw] < cd.kW - cd.padW || input.Sizes[dimh] < cd.kH - cd.padH)
            {
                throw new InvalidOperationException("input image is smaller than kernel size");
            }

            if (cd.padW > cd.kW / 2 || cd.padH > cd.kH / 2)
            {
                throw new InvalidOperationException("pad should be smaller than half of the kernel size");
            }

            var nbatch  = input.Sizes[0];
            var nslices = input.Sizes[dimc];
            var iheight = input.Sizes[dimh];
            var iwidth  = input.Sizes[dimw];

            long owidth;
            long oheight;

            if (ceilMode)
            {
                // ReSharper disable once ArrangeRedundantParentheses
                oheight = (long)(Math.Ceiling((float)(iheight - cd.kH + 2 * cd.padH) / cd.dH)) + 1;
                // ReSharper disable once ArrangeRedundantParentheses
                owidth = (long)(Math.Ceiling((float)(iwidth - cd.kW + 2 * cd.padW) / cd.dW)) + 1;
            }
            else
            {
                // ReSharper disable once ArrangeRedundantParentheses
                oheight = (long)(Math.Floor((float)(iheight - cd.kH + 2 * cd.padH) / cd.dH)) + 1;
                // ReSharper disable once ArrangeRedundantParentheses
                owidth = (long)(Math.Floor((float)(iwidth - cd.kW + 2 * cd.padW) / cd.dW)) + 1;
            }

            if (cd.padW != 0 || cd.padH != 0)
            {
                // ensure that the last pooling starts inside the image
                if ((oheight - 1) * cd.dH >= iheight + cd.padH)
                {
                    --oheight;
                }

                if ((owidth - 1) * cd.dW >= iwidth + cd.padW)
                {
                    --owidth;
                }
            }

            using var inputContig = Ops.AsContiguous(input);
            for (var i = 0; i < nbatch; ++i)
            {
                using var input_i   = inputContig.Select(0, i);
                using var output_i  = output.Select(0, i);
                using var indices_i = indices.Select(0, i);
                using (NativeWrapper.BuildTensorRefPtr(input_i, out var input_iPtr))
                {
                    using (NativeWrapper.BuildTensorRefPtr(output_i, out var output_iPtr))
                    {
                        using (NativeWrapper.BuildTensorRefPtr(indices_i, out var indices_iPtr))
                        {
                            CpuOpsNative.TS_SpatialMaxPooling_updateOutput_frame(input_iPtr, output_iPtr, indices_iPtr,
                                                                                 nslices, iwidth, iheight,
                                                                                 owidth, oheight,
                                                                                 cd.kW, cd.kH, cd.dW, cd.dH, cd.padW, cd.padH);
                        }
                    }
                }
            }
        }
예제 #10
0
        public static void SpatialMaxPoolingBackward(Tensor input, Tensor gradOutput, Tensor gradInput, Tensor indices, ConvolutionDesc2d cd, bool ceilMode)
        {
            var dimw = 3;
            var dimh = 2;
            var dimc = 1;

            var nbatch  = input.Sizes[0];
            var nslices = input.Sizes[dimc];
            var iheight = input.Sizes[dimh];
            var iwidth  = input.Sizes[dimw];
            var owidth  = gradOutput.Sizes[dimw];
            var oheight = gradOutput.Sizes[dimh];

            Ops.Fill(gradInput, 0);

            using var gradOutputContig = Ops.AsContiguous(gradOutput);
            for (var i = 0; i < nbatch; ++i)
            {
                using var gradInput_i  = gradInput.Select(0, i);
                using var gradOutput_i = gradOutputContig.Select(0, i);
                using var indices_i    = indices.Select(0, i);
                using (NativeWrapper.BuildTensorRefPtr(gradInput_i, out var gradInput_iPtr))
                {
                    using (NativeWrapper.BuildTensorRefPtr(gradOutput_i, out var gradOutput_iPtr))
                    {
                        using (NativeWrapper.BuildTensorRefPtr(indices_i, out var indices_iPtr))
                        {
                            CpuOpsNative.TS_SpatialMaxPooling_updateGradInput_frame(gradInput_iPtr, gradOutput_iPtr, indices_iPtr,
                                                                                    nslices, iwidth, iheight,
                                                                                    owidth, oheight,
                                                                                    cd.dW, cd.dH);
                        }
                    }
                }
            }
        }
예제 #11
0
 private static void Conv2BackwardFilterFrame(Tensor gradOutput, Tensor gradWeight, Tensor gradBias, Tensor finput, ConvolutionDesc2d cd)
 {
     using (var gradOutput2d = gradOutput.View(gradOutput.Sizes[0], gradOutput.Sizes[1] * gradOutput.Sizes[2]))
         using (var finputT = finput.Transpose())
         {
             Ops.Addmm(gradWeight, 1, gradWeight, 1, gradOutput2d, finputT);
             Ops.Sum(gradBias, gradOutput2d, 1);
         }
 }
예제 #12
0
        public static void Conv2BackwardInput(Tensor input, Tensor gradOutput, Tensor gradInput, Tensor weight, Tensor finput, Tensor fgradInput, ConvolutionDesc2d cd)
        {
            var nOutputPlane = weight.Sizes[0];

            if (gradOutput.Sizes[1] != nOutputPlane)
            {
                throw new InvalidOperationException("Number of output features must equal nOutputPlane");
            }

            if (cd.kW <= 0 && cd.kH <= 0)
            {
                throw new InvalidOperationException("Kernel size should be greater than zero");
            }

            if (cd.dW <= 0 && cd.dH <= 0)
            {
                throw new InvalidOperationException("stride should be greater than zero");
            }

            using (var weightT = weight.Transpose())
            {
                var n = input.Sizes[0];

                for (int i = 0; i < n; ++i)
                {
                    using (var gradInput_i = gradInput.Select(0, i))
                        using (var gradOutput_i = gradOutput.Select(0, i))
                            using (var fgradInput_i = fgradInput.Select(0, i))
                            {
                                Conv2BackwardInputFrame(gradOutput_i, gradInput_i, weightT, fgradInput_i, cd);
                            }
                }
            }
        }
예제 #13
0
        /// <summary>
        /// Conv2s the backward filter.
        /// </summary>
        /// <param name="input">The input.</param>
        /// <param name="gradOutput">The grad output.</param>
        /// <param name="gradWeight">The grad weight.</param>
        /// <param name="gradBias">The grad bias.</param>
        /// <param name="finput">The finput.</param>
        /// <param name="fgradInput">The fgrad input.</param>
        /// <param name="cd">The cd.</param>
        /// <exception cref="InvalidOperationException">
        /// Number of output features must equal nOutputPlane
        /// or
        /// Kernel size should be greater than zero
        /// or
        /// stride should be greater than zero
        /// </exception>
        public static void Conv2BackwardFilter(NDArray input, NDArray gradOutput, NDArray gradWeight, NDArray gradBias, NDArray finput, NDArray fgradInput, ConvolutionDesc2d cd)
        {
            var nOutputPlane = gradWeight.Shape[0];
            var n            = input.Shape[0];

            if (gradOutput.Shape[1] != nOutputPlane)
            {
                throw new InvalidOperationException("Number of output features must equal nOutputPlane");
            }

            if (cd.kW <= 0 && cd.kH <= 0)
            {
                throw new InvalidOperationException("Kernel size should be greater than zero");
            }

            if (cd.dW <= 0 && cd.dH <= 0)
            {
                throw new InvalidOperationException("stride should be greater than zero");
            }

            for (int i = 0; i < n; ++i)
            {
                using (var gradOutput_i = gradOutput.Select(0, i))
                    using (var finput_i = finput.Select(0, i))
                    {
                        Conv2BackwardFilterFrame(gradOutput_i, gradWeight, gradBias, finput_i, cd);
                    }
            }
        }
예제 #14
0
        public static void Conv2Forward(Tensor input, Tensor output, Tensor weight, Tensor bias, Tensor finput, ConvolutionDesc2d cd)
        {
            var dimf = 1;
            var dimw = 3;
            var dimh = 2;

            var n            = input.Sizes[0];
            var nInputPlane  = input.Sizes[dimf];
            var inputWidth   = input.Sizes[dimw];
            var inputHeight  = input.Sizes[dimh];
            var nOutputPlane = weight.Sizes[0];

            var outputWidth  = (inputWidth + 2 * cd.padW - cd.kW) / cd.dW + 1;
            var outputHeight = (inputHeight + 2 * cd.padH - cd.kH) / cd.dH + 1;

            // ReSharper disable once ArrangeRedundantParentheses
            if (bias != null && (bias.Sizes[0] != nOutputPlane))
            {
                throw new InvalidOperationException("bias has incorrect size. Expected 1D tensor of size " + nOutputPlane);
            }

            if (outputWidth < 1 || outputHeight < 1)
            {
                throw new InvalidOperationException($"Output size too small; calculated output size = ({nOutputPlane}x{outputHeight}x{outputWidth}");
            }

            if (nInputPlane * cd.kW * cd.kH != weight.Sizes[1])
            {
                throw new InvalidOperationException($"Input has incorrect number of channels. Got {nInputPlane}, expected {weight.Sizes[1] / (float)(cd.kW * cd.kH)}");
            }

            if (input.DimensionCount != 4)
            {
                throw new InvalidOperationException("4D input expected (NCHW order)");
            }

            if (finput.Sizes[0] != n || finput.Sizes[1] != cd.kW * cd.kH * nInputPlane || finput.Sizes[2] != outputHeight * outputWidth)
            {
                throw new InvalidOperationException("finput is incorrect size");
            }

            if (output.Sizes[0] != n || output.Sizes[1] != nOutputPlane || output.Sizes[2] != outputHeight || output.Sizes[3] != outputWidth)
            {
                throw new InvalidOperationException("output is incorrect size");
            }

            for (var i = 0; i < n; ++i)
            {
                using var input_i  = input.Select(0, i);
                using var output_i = output.Select(0, i);
                using var finput_i = finput.Select(0, i);
                Conv2ForwardFrame(input_i, output_i, weight, bias, finput_i,
                                  cd.kW, cd.kH, cd.dW, cd.dW, cd.padW, cd.padH,
                                  nInputPlane, inputWidth, inputHeight,
                                  nOutputPlane, outputWidth, outputHeight);
            }
        }