예제 #1
0
 public static void NeedPreviousBackwardCpu(this ICompressibleActivation compressibleActivation, NdArray y, NdArray x)
 {
     for (int i = 0; i < x.Grad.Length; i++)
     {
         x.Grad[i] += compressibleActivation.BackwardActivate(y.Grad[i], y.Data[i]);
     }
 }
예제 #2
0
        public Convolution2D(int inputChannels, int outputChannels, int[] kernelSize, int[] stride = null, int[] pad = null, bool noBias = false, Array initialW = null, Array initialb = null, ICompressibleActivation <T> activation = null, string name = FUNCTION_NAME, string[] inputNames = null, string[] outputNames = null) : base(name, inputNames, outputNames)
        {
            if (pad == null)
            {
                pad = new[] { 0, 0 }
            }
            ;

            if (stride == null)
            {
                stride = new[] { 1, 1 }
            }
            ;

            this.Weight = new NdArray <T>(outputChannels, inputChannels, kernelSize[1], kernelSize[0]);
            if (!noBias)
            {
                this.Bias = new NdArray <T>(outputChannels);
            }

            this.StrideX = stride[0];
            this.StrideY = stride[1];
            this.PadX    = pad[0];
            this.PadY    = pad[1];

            this.Parameters = new NdArray <T> [noBias ? 1: 2];

            this.Activation = activation;

            this.Initialize(initialW, initialb);
            InitFunc(new StreamingContext());
        }
예제 #3
0
        public static NdArray <Real> NeedPreviousForwardGpu(this ICompressibleActivation <Real> compressibleActivation, NdArray <Real> x)
        {
            Real[] y = new Real[x.Data.Length];

            using (ComputeBuffer <Real> gpuX = new ComputeBuffer <Real>(OpenCL.Context, ComputeMemoryFlags.ReadOnly | ComputeMemoryFlags.UseHostPointer, x.Data))
                using (ComputeBuffer <Real> gpuY = new ComputeBuffer <Real>(OpenCL.Context, ComputeMemoryFlags.WriteOnly | ComputeMemoryFlags.AllocateHostPointer, y.Length))
                {
                    compressibleActivation.ForwardKernel.SetMemoryArgument(0, gpuX);
                    compressibleActivation.ForwardKernel.SetMemoryArgument(1, gpuY);

                    OpenCL.CommandQueue.Execute
                    (
                        compressibleActivation.ForwardKernel,
                        null,
                        new long[] { x.Data.Length },
                        null,
                        null
                    );

                    OpenCL.CommandQueue.Finish();
                    OpenCL.CommandQueue.ReadFromBuffer(gpuY, ref y, true, null);
                }

            return(NdArray.Convert(y, x.Shape, x.BatchCount, compressibleActivation));
        }
예제 #4
0
        public static bool SetParallel <T>(this ICompressibleActivation <T> compressibleActivation, bool enable, KeyValuePair <string, string>[] activationParameters = null) where T : unmanaged, IComparable <T>
        {
            compressibleActivation.IsParallel = enable & OpenCL.Enable;

            if (compressibleActivation.IsParallel)
            {
                string kernelNameBase = compressibleActivation.FunctionName.Replace(" ", "");
                compressibleActivation.ActivateKernelString = OpenCL.GetKernelSource(Resources.Activation).Replace("/*kernelNameBase*/", kernelNameBase);
                compressibleActivation.ForwardKernelName    = kernelNameBase + "Forward";
                compressibleActivation.BackwardKernelName   = kernelNameBase + "Backward";

                string kernelSource = compressibleActivation.KernelSource;

                if (activationParameters != null)
                {
                    foreach (var parameter in activationParameters)
                    {
                        kernelSource = kernelSource.Replace(parameter.Key, parameter.Value);
                    }
                }

                kernelSource += compressibleActivation.ActivateKernelString;

                ComputeProgram program = OpenCL.CreateProgram <T>(kernelSource);
                compressibleActivation.ForwardKernel  = program.CreateKernel(compressibleActivation.ForwardKernelName);
                compressibleActivation.BackwardKernel = program.CreateKernel(compressibleActivation.BackwardKernelName);
            }

            return(compressibleActivation.IsParallel);
        }
예제 #5
0
        public Linear(int inputCount, int outputCount, bool noBias = false, Array initialW = null, Array initialb = null, ICompressibleActivation <T> activation = null, string name = FUNCTION_NAME, string[] inputNames = null, string[] outputNames = null) : base(name, inputNames, outputNames)
        {
            this.Weight      = new NdArray <T>(outputCount, inputCount);
            this.Weight.Name = this.Name + " Weight";

            this.Parameters = new NdArray <T> [noBias ? 1: 2];

            this.Activation = activation;

            if (initialW == null)
            {
                Initializer.InitHeNorm(this.Weight);
            }
            else
            {
                this.Weight.Data = initialW.FlattenEx <T>();
            }

            this.Parameters[0] = this.Weight;

            if (!noBias)
            {
                this.Bias      = new NdArray <T>(outputCount);
                this.Bias.Name = this.Name + " Bias";

                if (initialb != null)
                {
                    this.Bias.Data = initialb.FlattenEx <T>();
                }

                this.Parameters[1] = this.Bias;
            }

            InitFunc(new StreamingContext());
        }
예제 #6
0
        public static void NeedPreviousBackwardGpu(this ICompressibleActivation <Real> compressibleActivation, NdArray <Real> y, NdArray <Real> x)
        {
            Real[] gx = new Real[y.Grad.Length];

            using (ComputeBuffer <Real> gpugY = new ComputeBuffer <Real>(OpenCL.Context, ComputeMemoryFlags.ReadOnly | ComputeMemoryFlags.UseHostPointer, y.Grad))
                using (ComputeBuffer <Real> gpuY = new ComputeBuffer <Real>(OpenCL.Context, ComputeMemoryFlags.ReadOnly | ComputeMemoryFlags.UseHostPointer, y.Data))
                    using (ComputeBuffer <Real> gpugX = new ComputeBuffer <Real>(OpenCL.Context, ComputeMemoryFlags.WriteOnly | ComputeMemoryFlags.AllocateHostPointer, gx.Length))
                    {
                        compressibleActivation.BackwardKernel.SetMemoryArgument(0, gpugY);
                        compressibleActivation.BackwardKernel.SetMemoryArgument(1, gpuY);
                        compressibleActivation.BackwardKernel.SetMemoryArgument(2, gpugX);

                        OpenCL.CommandQueue.Execute
                        (
                            compressibleActivation.BackwardKernel,
                            null,
                            new long[] { y.Grad.Length },
                            null,
                            null
                        );

                        OpenCL.CommandQueue.Finish();
                        OpenCL.CommandQueue.ReadFromBuffer(gpugX, ref gx, true, null);
                    }

            for (int i = 0; i < x.Grad.Length; i++)
            {
                x.Grad[i] += gx[i];
            }
        }
예제 #7
0
        public Deconvolution2D(int inputChannels, int outputChannels, int[] kernelSize, int[] subSample = null, int[] trim = null, bool noBias = false, Array initialW = null, Array initialb = null, ICompressibleActivation activation = null, string name = FUNCTION_NAME, string[] inputNames = null, string[] outputNames = null) : base(name, inputNames, outputNames)
        {
            if (subSample == null)
            {
                subSample = new[] { 1, 1 }
            }
            ;

            if (trim == null)
            {
                trim = new[] { 0, 0 }
            }
            ;

            this.KernelWidth  = kernelSize[0];
            this.KernelHeight = kernelSize[1];
            this.PadX         = trim[0];
            this.PadY         = trim[1];
            this.NoBias       = noBias;

            this.StrideX = subSample[0];
            this.StrideY = subSample[1];

            this.Parameters = new NdArray[noBias ? 1 : 2];

            this.OutputCount = outputChannels;
            this.InputCount  = inputChannels;

            this.Activation = activation;

            this.Initialize(initialW, initialb);
        }
예제 #8
0
        public Linear(CPU.Linear <T> linear) : base(linear.Name, linear.InputNames, linear.OutputNames)
        {
            this.Weight = linear.Weight;
            this.Bias   = linear.Bias;

            this.Parameters = linear.Parameters;

            this.Activation = (ICompressibleActivation <T>)CLConverter.Convert(linear.Activation);

            this.SetParallel(true);
        }
예제 #9
0
        public static Real[] GetActivatedgy(this ICompressibleActivation <Real> compressibleActivation, NdArray <Real> y, NdArray <Real> x)
        {
            Real[] activatedgy = new Real[y.Grad.Length];

            for (int i = 0; i < activatedgy.Length; i++)
            {
                activatedgy[i] = compressibleActivation.BackwardActivate(y.Grad[i], y.Data[i], x.Data[i]);
            }

            return(activatedgy);
        }
예제 #10
0
        public static NdArray NeedPreviousForwardCpu(this ICompressibleActivation compressibleActivation, NdArray x)
        {
            Real[] y = new Real[x.Data.Length];

            for (int i = 0; i < y.Length; i++)
            {
                y[i] = compressibleActivation.ForwardActivate(x.Data[i]);
            }

            return(NdArray.Convert(y, x.Shape, x.BatchCount, compressibleActivation));
        }
        public static string GetActivateSource(this ICompressibleActivation compressibleActivation)
        {
            string activationSource = compressibleActivation.KernelSource;

            if (compressibleActivation.ActivationParameters != null)
            {
                foreach (var activationParameter in compressibleActivation.ActivationParameters)
                {
                    activationSource = activationSource.Replace(activationParameter.Key, activationParameter.Value);
                }
            }

            return(activationSource);
        }
예제 #12
0
        public Convolution2D(Linear <T> linear) : base(linear.Name, linear.InputNames, linear.OutputNames)
        {
            this.StrideX = 1;
            this.StrideY = 1;
            this.PadX    = 0;
            this.PadY    = 0;

            this.Parameters = linear.Parameters;

            this.Weight = linear.Weight;
            this.Weight.Reshape(this.Weight.Shape[0], this.Weight.Shape[1], 1, 1);
            this.Bias       = linear.Bias;
            this.Activation = linear.Activation;
            InitFunc(new StreamingContext());
        }
예제 #13
0
        //Convert
        public Convolution2D(CPU.Convolution2D <T> conv2d) : base(conv2d.Name, conv2d.InputNames, conv2d.OutputNames)
        {
            this.StrideX = conv2d.StrideX;
            this.StrideY = conv2d.StrideY;
            this.PadX    = conv2d.PadX;
            this.PadY    = conv2d.PadY;

            this.Weight = conv2d.Weight;
            this.Bias   = conv2d.Bias;

            this.Parameters = conv2d.Parameters;

            this.Activation = (ICompressibleActivation <T>)CLConverter.Convert(conv2d.Activation);

            this.SetParallel(true);
            this.InitFunc(new StreamingContext());
        }
예제 #14
0
        public Convolution2D(Linear linear) : base(linear.Name, linear.InputNames, linear.OutputNames)
        {
            this.KernelWidth  = 1;
            this.KernelHeight = 1;
            this.StrideX      = 1;
            this.StrideY      = 1;
            this.PadX         = 0;
            this.PadY         = 0;

            this.Parameters = linear.Parameters;

            this.Weight = linear.Weight;
            this.Weight.Reshape(OutputCount, InputCount, this.KernelHeight, this.KernelWidth);
            this.Bias       = linear.Bias;
            this.NoBias     = linear.NoBias;
            this.Activation = linear.Activation;
        }
예제 #15
0
        public Deconvolution2D(CPU.Deconvolution2D <T> deconv2D) : base(deconv2D.Name, deconv2D.InputNames, deconv2D.OutputNames)
        {
            this.PadX    = deconv2D.PadX;
            this.PadY    = deconv2D.PadY;
            this.StrideX = deconv2D.StrideX;
            this.StrideY = deconv2D.StrideY;

            this.Weight = deconv2D.Weight;
            this.Bias   = deconv2D.Bias;

            this.Parameters = deconv2D.Parameters;

            this.Activation = (ICompressibleActivation <T>)CLConverter.Convert(deconv2D.Activation);

            this.SetParallel(true);
            this.InitFunc(new StreamingContext());
        }
예제 #16
0
        public Deconvolution2D(int inputChannels, int outputChannels, int kernelSize, int stride = 1, int pad = 0, bool noBias = false, Array initialW = null, Array initialb = null, ICompressibleActivation activation = null, string name = FUNCTION_NAME, string[] inputNames = null, string[] outputNames = null) : base(name, inputNames, outputNames)
        {
            this.KernelWidth  = kernelSize;
            this.KernelHeight = kernelSize;
            this.PadX         = pad;
            this.PadY         = pad;
            this.StrideX      = stride;
            this.StrideY      = stride;
            this.NoBias       = noBias;

            this.Parameters = new NdArray[noBias ? 1 : 2];

            this.OutputCount = outputChannels;
            this.InputCount  = inputChannels;

            this.Activation = activation;

            this.Initialize(initialW, initialb);
        }
예제 #17
0
        public Convolution2D(int inputChannels, int outputChannels, int kernelSize, int stride = 1, int pad = 0, bool noBias = false, Array initialW = null, Array initialb = null, ICompressibleActivation <T> activation = null, string name = FUNCTION_NAME, string[] inputNames = null, string[] outputNames = null) : base(name, inputNames, outputNames)
        {
            this.StrideX = stride;
            this.StrideY = stride;
            this.PadX    = pad;
            this.PadY    = pad;

            this.Parameters = new NdArray <T> [noBias ? 1: 2];

            this.Weight = new NdArray <T>(outputChannels, inputChannels, kernelSize, kernelSize);
            if (!noBias)
            {
                this.Bias = new NdArray <T>(outputChannels);
            }

            this.Activation = activation;

            this.Initialize(initialW, initialb);
            InitFunc(new StreamingContext());
        }
예제 #18
0
        public Linear(int inputCount, int outputCount, bool noBias = false, Array initialW = null, Array initialb = null, ICompressibleActivation activation = null, string name = FUNCTION_NAME, string[] inputNames = null, string[] outputNames = null) : base(name, inputNames, outputNames)
        {
            this.OutputCount = outputCount;
            this.InputCount  = inputCount;

            this.Weight      = new NdArray(outputCount, inputCount);
            this.Weight.Name = this.Name + " Weight";

            this.NoBias = noBias;

            this.Parameters = new NdArray[noBias ? 1 : 2];

            this.Activation = activation;

            if (initialW == null)
            {
                Initializer.InitWeight(this.Weight);
            }
            else
            {
                this.Weight.Data = Real.ToRealArray(initialW);
            }

            this.Parameters[0] = this.Weight;

            if (!noBias)
            {
                this.Bias      = new NdArray(outputCount);
                this.Bias.Name = this.Name + " Bias";

                if (initialb != null)
                {
                    this.Bias.Data = Real.ToRealArray(initialb);
                }

                this.Parameters[1] = this.Bias;
            }
        }
예제 #19
0
        public static void SingleOutputBackward(NdArray <Real> y, NdArray <Real> x, NdArray <Real> weight, NdArray <Real> bias, int strideX, int strideY, int padX, int padY, ComputeKernel backwardgWKernel, ComputeKernel backwardgXKernel, ICompressibleActivation <Real> activation)
        {
            int inputCount   = weight.Shape[0];
            int outputCount  = weight.Shape[1];
            int kernelHeight = weight.Shape[2];
            int kernelWidth  = weight.Shape[3];

            Real[] gx          = new Real[x.Data.Length];
            Real[] activatedgy = activation != null?activation.GetActivatedgy(y, x) : y.Grad;

            if (bias != null)
            {
                Deconvolution2DFunc.CalcBiasGrad(activatedgy, bias.Grad, y.Shape, y.BatchCount);
            }

            //gyは共通で使用
            using (ComputeBuffer <Real> gpugY = new ComputeBuffer <Real>(OpenCL.Context, ComputeMemoryFlags.ReadOnly | ComputeMemoryFlags.UseHostPointer, activatedgy))
            {
                using (ComputeBuffer <Real> gpugW = new ComputeBuffer <Real>(OpenCL.Context, ComputeMemoryFlags.ReadWrite | ComputeMemoryFlags.UseHostPointer, weight.Grad))
                    using (ComputeBuffer <Real> gpuX = new ComputeBuffer <Real>(OpenCL.Context, ComputeMemoryFlags.ReadOnly | ComputeMemoryFlags.UseHostPointer, x.Data))
                    {
                        backwardgWKernel.SetMemoryArgument(0, gpugY);
                        backwardgWKernel.SetMemoryArgument(1, gpuX);
                        backwardgWKernel.SetMemoryArgument(2, gpugW);
                        backwardgWKernel.SetValueArgument(3, y.BatchCount);
                        backwardgWKernel.SetValueArgument(4, outputCount);
                        backwardgWKernel.SetValueArgument(5, y.Length);
                        backwardgWKernel.SetValueArgument(6, y.Shape[1]);
                        backwardgWKernel.SetValueArgument(7, y.Shape[2]);
                        backwardgWKernel.SetValueArgument(8, x.Shape[1]);
                        backwardgWKernel.SetValueArgument(9, x.Shape[2]);
                        backwardgWKernel.SetValueArgument(10, x.Length);
                        backwardgWKernel.SetValueArgument(11, strideX);
                        backwardgWKernel.SetValueArgument(12, strideY);
                        backwardgWKernel.SetValueArgument(13, padX);
                        backwardgWKernel.SetValueArgument(14, padY);
                        backwardgWKernel.SetValueArgument(15, kernelHeight);
                        backwardgWKernel.SetValueArgument(16, kernelWidth);

                        OpenCL.CommandQueue.Execute
                        (
                            backwardgWKernel,
                            null,
                            new long[] { inputCount *outputCount, kernelHeight, kernelWidth },
                            null,
                            null
                        );

                        OpenCL.CommandQueue.Finish();
                        OpenCL.CommandQueue.ReadFromBuffer(gpugW, ref weight.Grad, true, null);
                    }

                using (ComputeBuffer <Real> gpugX = new ComputeBuffer <Real>(OpenCL.Context, ComputeMemoryFlags.WriteOnly | ComputeMemoryFlags.AllocateHostPointer, gx.Length))
                    using (ComputeBuffer <Real> gpuW = new ComputeBuffer <Real>(OpenCL.Context, ComputeMemoryFlags.ReadOnly | ComputeMemoryFlags.UseHostPointer, weight.Data))
                    {
                        backwardgXKernel.SetMemoryArgument(0, gpugY);
                        backwardgXKernel.SetMemoryArgument(1, gpuW);
                        backwardgXKernel.SetMemoryArgument(2, gpugX);
                        backwardgXKernel.SetValueArgument(3, outputCount);
                        backwardgXKernel.SetValueArgument(4, inputCount);
                        backwardgXKernel.SetValueArgument(5, y.Length);
                        backwardgXKernel.SetValueArgument(6, y.Shape[1]);
                        backwardgXKernel.SetValueArgument(7, y.Shape[2]);
                        backwardgXKernel.SetValueArgument(8, x.Shape[1]);
                        backwardgXKernel.SetValueArgument(9, x.Shape[2]);
                        backwardgXKernel.SetValueArgument(10, x.Length);
                        backwardgXKernel.SetValueArgument(11, strideX);
                        backwardgXKernel.SetValueArgument(12, strideY);
                        backwardgXKernel.SetValueArgument(13, padX);
                        backwardgXKernel.SetValueArgument(14, padY);
                        backwardgXKernel.SetValueArgument(15, kernelHeight);
                        backwardgXKernel.SetValueArgument(16, kernelWidth);

                        OpenCL.CommandQueue.Execute
                        (
                            backwardgXKernel,
                            null,
                            new long[] { x.BatchCount *x.Shape[0], x.Shape[1], x.Shape[2] },
                            null,
                            null
                        );

                        OpenCL.CommandQueue.Finish();
                        OpenCL.CommandQueue.ReadFromBuffer(gpugX, ref gx, true, null);
                    }
            }

            for (int i = 0; i < x.Grad.Length; i++)
            {
                x.Grad[i] += gx[i];
            }
        }
예제 #20
0
        public static void SingleOutputBackward(NdArray <Real> y, NdArray <Real> x, NdArray <Real> weight, NdArray <Real> bias, ICompressibleActivation <Real> activation)
        {
            int outputCount = weight.Shape[0];
            int inputCount  = weight.Shape[1];

            Real[] activatedgy = activation != null?activation.GetActivatedgy(y, x) : y.Grad;

            if (bias != null)
            {
                CalcBiasGrad(activatedgy, y.BatchCount, outputCount, bias.Grad);
            }

            for (int batchCount = 0; batchCount < y.BatchCount; batchCount++)
            {
                for (int i = 0; i < outputCount; i++)
                {
                    Real gyData = activatedgy[i + batchCount * outputCount];

                    for (int j = 0; j < inputCount; j++)
                    {
                        weight.Grad[i * inputCount + j]     += x.Data[batchCount * inputCount + j] * gyData;
                        x.Grad[batchCount * inputCount + j] += weight.Data[i * inputCount + j] * gyData;
                    }
                }
            }
        }
예제 #21
0
        public static NdArray <Real> SingleInputForward(NdArray <Real> x, NdArray <Real> weight, NdArray <Real> bias, ICompressibleActivation <Real> activation, IFunction <Real> linear)
        {
            int outputCount = weight.Shape[0];
            int inputCount  = weight.Shape[1];

            Real[] y = bias == null ? new Real[outputCount * x.BatchCount] : GetBiasedValue(x.BatchCount, outputCount, bias.Data);

            for (int batchCount = 0; batchCount < x.BatchCount; batchCount++)
            {
                for (int i = 0; i < outputCount; i++)
                {
                    for (int j = 0; j < inputCount; j++)
                    {
                        y[batchCount * outputCount + i] += x.Data[batchCount * inputCount + j] * weight.Data[i * inputCount + j];
                    }
                }
            }

            if (activation != null)
            {
                for (int i = 0; i < y.Length; i++)
                {
                    y[i] = activation.ForwardActivate(y[i]);
                }
            }

            return(NdArray.Convert(y, new[] { outputCount }, x.BatchCount, linear));
        }
예제 #22
0
 public Linear(int inputCount, int outputCount, bool noBias = false, Array initialW = null, T[] initialb = null, Action <NdArray <T> > weightInitializer = null, ICompressibleActivation <T> activation = null, string name = "Linear", string[] inputNames = null, string[] outputNames = null, bool gpuEnable = false) : base(inputCount, outputCount, noBias, initialW, initialb, weightInitializer, activation, name, inputNames, outputNames)
 {
     this.SetParallel(gpuEnable);
 }
예제 #23
0
        public static NdArray <Real> SingleInputForward(NdArray <Real> x, NdArray <Real> weight, NdArray <Real> bias, int strideX, int strideY, int padX, int padY, ICompressibleActivation <Real> activation, IFunction <Real> conv2d)
        {
            int outputCount  = weight.Shape[0];
            int inputCount   = weight.Shape[1];
            int kernelHeight = weight.Shape[2];
            int kernelWidth  = weight.Shape[3];

            int outputHeight = (int)Math.Floor((x.Shape[1] - kernelHeight + padY * 2.0f) / strideY) + 1;
            int outputWidth  = (int)Math.Floor((x.Shape[2] - kernelWidth + padX * 2.0f) / strideX) + 1;

            Real[] y = new Real[x.BatchCount * outputCount * outputHeight * outputWidth];

            for (int batchCounter = 0; batchCounter < x.BatchCount; batchCounter++)
            {
                int yBatchOffset = batchCounter * outputCount * outputHeight * outputWidth;
                int xBatchOffset = batchCounter * x.Length;

                for (int och = 0; och < outputCount; och++)
                {
                    int kOchOffset = och * inputCount * kernelHeight * kernelWidth;

                    int yChOffset = yBatchOffset + och * outputHeight * outputWidth;

                    for (int oy = 0; oy < outputHeight * strideY; oy += strideY)
                    {
                        int iyStart = oy - padY < 0 ? 0 : oy - padY;
                        int iyLimit = kernelHeight + oy - padY < x.Shape[1] ? kernelHeight + oy - padY : x.Shape[1];

                        for (int ox = 0; ox < outputWidth * strideX; ox += strideX)
                        {
                            int ixStart = ox - padX < 0 ? 0 : ox - padX;
                            int ixLimit = kernelWidth + ox - padX < x.Shape[2] ? kernelWidth + ox - padX : x.Shape[2];

                            int yIndex = yChOffset + oy / strideY * outputWidth + ox / strideX;

                            for (int ich = 0; ich < inputCount; ich++)
                            {
                                int kIchOffset = kOchOffset + ich * kernelHeight * kernelWidth;

                                int xChOffset = xBatchOffset + ich * x.Shape[1] * x.Shape[2];

                                for (int iy = iyStart; iy < iyLimit; iy++)
                                {
                                    for (int ix = ixStart; ix < ixLimit; ix++)
                                    {
                                        int wIndex = kIchOffset + (iy - oy + padY) * kernelWidth + ix - ox + padX;
                                        int xIndex = xChOffset + iy * x.Shape[2] + ix;

                                        y[yIndex] += x.Data[xIndex] * weight.Data[wIndex];
                                    }
                                }
                            }
                        }
                    }
                }
            }

            if (activation != null && bias != null)
            {
                for (int batchCounter = 0; batchCounter < x.BatchCount; batchCounter++)
                {
                    int resultIndex = batchCounter * outputCount * outputHeight * outputWidth;

                    for (int och = 0; och < outputCount; och++)
                    {
                        for (int location = 0; location < outputHeight * outputWidth; location++)
                        {
                            y[resultIndex] += bias.Data[och];
                            y[resultIndex]  = activation.ForwardActivate(y[resultIndex]);

                            resultIndex++;
                        }
                    }
                }
            }
            else if (bias != null)
            {
                for (int batchCounter = 0; batchCounter < x.BatchCount; batchCounter++)
                {
                    int resultIndex = batchCounter * outputCount * outputHeight * outputWidth;

                    for (int och = 0; och < outputCount; och++)
                    {
                        for (int location = 0; location < outputHeight * outputWidth; location++)
                        {
                            y[resultIndex] += bias.Data[och];
                            resultIndex++;
                        }
                    }
                }
            }
            else if (activation != null)
            {
                for (int i = 0; i < y.Length; i++)
                {
                    y[i] = activation.ForwardActivate(y[i]);
                }
            }

            return(NdArray.Convert(y, new[] { outputCount, outputHeight, outputWidth }, x.BatchCount, conv2d));
        }
예제 #24
0
        public static void SingleOutputBackward(NdArray <Real> y, NdArray <Real> x, NdArray <Real> weight, NdArray <Real> bias, int strideX, int strideY, int padX, int padY, ICompressibleActivation <Real> activation)
        {
            //int outputCount = weight.Shape[0];
            int inputCount   = weight.Shape[1];
            int kernelHeight = weight.Shape[2];
            int kernelWidth  = weight.Shape[3];

            Real[] activatedgy = activation != null?activation.GetActivatedgy(y, x): y.Grad;

            if (bias != null)
            {
                CalcBiasGrad(activatedgy, y.Shape, y.BatchCount, bias.Grad);
            }

            for (int batchCounter = 0; batchCounter < y.BatchCount; batchCounter++)
            {
                int yBatchOffset = batchCounter * y.Length;
                int xBatchOffset = batchCounter * x.Length;

                for (int och = 0; och < y.Shape[0]; och++)
                {
                    int wOchOffset = och * inputCount * kernelHeight * kernelWidth;

                    int yChOffset = och * y.Shape[1] * y.Shape[2];

                    for (int oy = 0; oy < y.Shape[1] * strideY; oy += strideY)
                    {
                        int iyStart = oy - padY < 0 ? 0 : oy - padY;
                        int iyLimit = kernelHeight + oy - padY < x.Shape[1] ? kernelHeight + oy - padY : x.Shape[1];

                        for (int ox = 0; ox < y.Shape[2] * strideX; ox += strideX)
                        {
                            int ixStart = ox - padX < 0 ? 0 : ox - padX;
                            int ixLimit = kernelWidth + ox - padX < x.Shape[2] ? kernelWidth + ox - padX : x.Shape[2];

                            int gyIndex = yBatchOffset + yChOffset + oy / strideY * y.Shape[2] + ox / strideX;

                            for (int ich = 0; ich < x.Shape[0]; ich++)
                            {
                                int wIchOffset = wOchOffset + ich * kernelHeight * kernelWidth;

                                int xChOffset = xBatchOffset + ich * x.Shape[1] * x.Shape[2];

                                for (int iy = iyStart; iy < iyLimit; iy++)
                                {
                                    for (int ix = ixStart; ix < ixLimit; ix++)
                                    {
                                        int wIndex = wIchOffset + (iy - oy + padY) * kernelWidth + ix - ox + padX;
                                        int xIndex = xChOffset + iy * x.Shape[2] + ix;

                                        weight.Grad[wIndex] += x.Data[xIndex] * activatedgy[gyIndex];
                                        x.Grad[xIndex]      += weight.Data[wIndex] * activatedgy[gyIndex];
                                    }
                                }
                            }
                        }
                    }
                }
            }
        }
예제 #25
0
 public Deconvolution2D(int inputChannels, int outputChannels, int[] kSize, int[] subSample = null, int[] trim = null, bool noBias = false, Array initialW = null, Array initialb = null, ICompressibleActivation activation = null, string name = "Deconvolution2D", string[] inputNames = null, string[] outputNames = null, bool gpuEnable = false) : base(inputChannels, outputChannels, kSize, subSample, trim, noBias, initialW, initialb, activation, name, inputNames, outputNames)
 {
     this.SetParallel(gpuEnable);
 }
예제 #26
0
 public Deconvolution2D(int inputChannels, int outputChannels, int kernelSize, int stride = 1, int pad = 0, bool noBias = false, Array initialW = null, Array initialb = null, ICompressibleActivation activation = null, string name = "Deconvolution2D", string[] inputNames = null, string[] outputNames = null, bool gpuEnable = false) : base(inputChannels, outputChannels, kernelSize, stride, pad, noBias, initialW, initialb, activation, name, inputNames, outputNames)
 {
     this.SetParallel(gpuEnable);
 }
예제 #27
0
 public MaskedLinear(int inputCount, int outputCount, bool noBias = false, Array initialW = null, Array initialb = null, Action <NdArray <T> > weightInitializer = null, ICompressibleActivation <T> activation = null, string name = FUNCTION_NAME, string[] inputNames = null, string[] outputNames = null) : base(inputCount, outputCount, noBias, initialW, initialb, weightInitializer, activation, name, inputNames, outputNames)
 {
     this.Mask = new NdArray <T>(outputCount, inputCount);
     this.Mask.InitGrad();//Maskは更新されない非パラメータなので自分で初期化する
 }
예제 #28
0
        public static void SingleOutputBackward(NdArray <Real> y, NdArray <Real> x, NdArray <Real> mask, NdArray <Real> weight, NdArray <Real> bias, ComputeKernel backwardgWKernel, ComputeKernel backwardgXKernel, ICompressibleActivation <Real> activation)
        {
            NdArray <Real> maskedWeight = weight * mask;

            maskedWeight.InitGrad();//MaskedWeightはOptimizerの対象にならない非パラメータの為初期化が必要

            SingleOutputBackward(y, x, maskedWeight, bias, backwardgWKernel, backwardgXKernel, activation);

            for (int i = 0; i < weight.Data.Length; i++)
            {
                mask.Grad[i]    = maskedWeight.Grad[i];                //マスク前の重みの傾きをマスクの傾きへ退避
                weight.Grad[i] += mask.Data[i] * maskedWeight.Grad[i]; //マスクした傾きを適用
            }
        }
예제 #29
0
 public MaskedLinear(int inputCount, int outputCount, bool noBias = false, Array initialW = null, T[] initialb = null, Action <NdArray <T> > weightInitializer = null, ICompressibleActivation <T> activation = null, string name = "Linear", string[] inputNames = null, string[] outputNames = null, bool gpuEnable = false) : base(inputCount, outputCount, noBias, initialW, initialb, weightInitializer, activation, name, inputNames, outputNames, gpuEnable)
 {
     this.Mask = new NdArray <T>(outputCount, inputCount);
     this.Mask.InitGrad();//Maskは更新されない非パラメータなので自分で初期化する
     this.InitMaskedFunc(new StreamingContext());
 }
예제 #30
0
 public static NdArray <Real> SingleInputForward(NdArray <Real> x, NdArray <Real> mask, NdArray <Real> weight, NdArray <Real> bias, ICompressibleActivation <Real> activation, IFunction <Real> linear)
 {
     return(SingleInputForward(x, weight * mask, bias, activation, linear));
 }