Пример #1
0
        public static Function ConvolutionTranspose2D(Variable input, int outFeatureMapCount, int kernelWidth, int kernelHeight, DeviceDescriptor device,
                                                      int stride = 1, bool pad = false, bool bias = true, string name = "")
        {
            int numInputChannels = input.Shape[input.Shape.Rank - 1];
            //kernal
            var convParams = new Parameter(new int[] { kernelWidth, kernelHeight, numInputChannels, outFeatureMapCount },
                                           DataType.Float, CNTKLib.GlorotUniformInitializer(1, -1, 2), device, name + WeightSuffix);

            //deconv
            var convFunction = CNTKLib.ConvolutionTranspose(convParams, input, new int[] { stride, stride, numInputChannels }, BoolVector.Repeat(true, 3), new BoolVector(new bool[] { pad, pad, false }));

            //bias
            if (bias)
            {
                var b = new Parameter(new int[] { 1, 1, outFeatureMapCount }, DataType.Float, CNTKLib.ConstantInitializer(0), device, name + BiasSuffix);
                convFunction = b + convFunction;
            }
            //name
            if (!string.IsNullOrEmpty(name))
            {
                convFunction.SetName(name);
            }
            return(convFunction);
        }
Пример #2
0
        public QNetworkConvSimple(int inputWidth, int inputHeight, int inputDepth, int actionSize,
                                  int[] filterSizes, int[] filterDepths, int[] strides, bool[] pooling,
                                  int densehiddenLayers, int densehiddenSize, bool denseUseBias, DeviceDescriptor device, float denseInitialWeightScale = 0.01f)
        {
            Device         = device;
            StateSize      = inputWidth * inputHeight * inputDepth;
            ActionSize     = actionSize;
            InputDimension = new int[3] {
                inputWidth, inputHeight, inputDepth
            };


            //create actor network part
            InputState = CNTKLib.InputVariable(InputDimension, DataType.Float);

            Debug.Assert(filterSizes.Length == strides.Length && filterDepths.Length == filterSizes.Length, "Length of filterSizes,strides and filterDepth are not the same");

            var lastLayer = InputState;

            for (int i = 0; i < filterSizes.Length; ++i)
            {
                //conv layers. Use selu activaion and selu initlaization
                lastLayer = Layers.Convolution2D(lastLayer, filterDepths[i],
                                                 filterSizes[i], filterSizes[i], device, strides[i], true, true, "QConv_" + i, Mathf.Sqrt((1.0f / (filterSizes[i] * filterSizes[i]))));
                lastLayer = new SELUDef().BuildNew(lastLayer, device, "");
                //pooling
                if (pooling[i])
                {
                    lastLayer = CNTKLib.Pooling(lastLayer, PoolingType.Max, new int[] { 2, 2 }, new int[] { 2, 2 }, BoolVector.Repeat(true, 2), false, true, "pool2");
                }
            }

            lastLayer = CNTKLib.Flatten(lastLayer, new Axis(3), "Flatten");

            //dense layers
            var inputA  = new InputLayerCNTKVar(lastLayer);
            var outputA = new OutputLayerDense(actionSize, null, OutputLayerDense.LossFunction.None);

            outputA.HasBias            = false;
            outputA.InitialWeightScale = denseInitialWeightScale;
            SequentialNetworkDense qNetwork = new SequentialNetworkDense(inputA, LayerDefineHelper.DenseLayers(densehiddenLayers, densehiddenSize, denseUseBias, NormalizationMethod.None, 0, denseInitialWeightScale, new ReluDef()), outputA, device);

            //OutputQs = outputA.GetOutputVariable();
            OutputQs = outputA.GetOutputVariable();
        }
        /// <summary>
        /// Create the vgg19 convolutional encoders for UST model
        /// </summary>
        /// <param name="imageDimension"></param>
        /// <returns></returns>
        protected Function CreateEncoders(Vector2Int imageDimension)
        {
            Function       encoderLayers;
            VariableVector outputs = new VariableVector();
            //input variables
            Variable prev = Variable.InputVariable(new int[] { imageDimension.x, imageDimension.y, 3 }, DataType.Float, "input");

            //encoderLayers["input"] = prev;

            //vgg preprocessing
            prev = Layers.Convolution2D(prev, 3, 1, 1, device, 1, false, true, "conv0_preprocessing");
            //encoderLayers["conv0_preprocessing"] = prev;

            //----conv1----
            //conv1_1
            prev = CNTKLib.Pad(prev, PaddingMode.REFLECTPAD, new SizeTVector(new uint[] { 1, 1, 0 }), new SizeTVector(new uint[] { 1, 1, 0 }));
            prev = Layers.Convolution2D(prev, 64, 3, 3, device, 1, false, true, "conv1_1");
            prev = CNTKLib.ReLU(prev, "relu1_1");
            //encoderLayers["relu1_1"] = prev;
            outputs.Add(prev);

            //conv1_2
            prev = CNTKLib.Pad(prev, PaddingMode.REFLECTPAD, new SizeTVector(new uint[] { 1, 1, 0 }), new SizeTVector(new uint[] { 1, 1, 0 }));
            prev = Layers.Convolution2D(prev, 64, 3, 3, device, 1, false, true, "conv1_2");
            prev = CNTKLib.ReLU(prev, "relu1_2");
            //maxpooling 1
            prev = CNTKLib.Pooling(prev, PoolingType.Max, new int[] { 2, 2 }, new int[] { 2, 2 }, BoolVector.Repeat(true, 2), false, true, "pool1");

            //----conv2----
            //conv2_1
            prev = CNTKLib.Pad(prev, PaddingMode.REFLECTPAD, new SizeTVector(new uint[] { 1, 1, 0 }), new SizeTVector(new uint[] { 1, 1, 0 }));
            prev = Layers.Convolution2D(prev, 128, 3, 3, device, 1, false, true, "conv2_1");
            prev = CNTKLib.ReLU(prev, "relu2_1");
            outputs.Add(prev);
            // encoderLayers["relu2_1"] = prev;
            //conv2_2
            prev = CNTKLib.Pad(prev, PaddingMode.REFLECTPAD, new SizeTVector(new uint[] { 1, 1, 0 }), new SizeTVector(new uint[] { 1, 1, 0 }));
            prev = Layers.Convolution2D(prev, 128, 3, 3, device, 1, false, true, "conv2_2");
            prev = CNTKLib.ReLU(prev, "relu2_2");
            //maxpooling 2
            prev = CNTKLib.Pooling(prev, PoolingType.Max, new int[] { 2, 2 }, new int[] { 2, 2 }, BoolVector.Repeat(true, 2), false, true, "pool2");

            //----conv3----
            //conv3_1
            prev = CNTKLib.Pad(prev, PaddingMode.REFLECTPAD, new SizeTVector(new uint[] { 1, 1, 0 }), new SizeTVector(new uint[] { 1, 1, 0 }));
            prev = Layers.Convolution2D(prev, 256, 3, 3, device, 1, false, true, "conv3_1");
            prev = CNTKLib.ReLU(prev, "relu3_1");
            outputs.Add(prev);
            //encoderLayers["relu3_1"] = prev;
            //conv3_2
            prev = CNTKLib.Pad(prev, PaddingMode.REFLECTPAD, new SizeTVector(new uint[] { 1, 1, 0 }), new SizeTVector(new uint[] { 1, 1, 0 }));
            prev = Layers.Convolution2D(prev, 256, 3, 3, device, 1, false, true, "conv3_2");
            prev = CNTKLib.ReLU(prev, "relu3_2");
            //conv3_3
            prev = CNTKLib.Pad(prev, PaddingMode.REFLECTPAD, new SizeTVector(new uint[] { 1, 1, 0 }), new SizeTVector(new uint[] { 1, 1, 0 }));
            prev = Layers.Convolution2D(prev, 256, 3, 3, device, 1, false, true, "conv3_3");
            prev = CNTKLib.ReLU(prev, "relu3_3");
            //conv3_4
            prev = CNTKLib.Pad(prev, PaddingMode.REFLECTPAD, new SizeTVector(new uint[] { 1, 1, 0 }), new SizeTVector(new uint[] { 1, 1, 0 }));
            prev = Layers.Convolution2D(prev, 256, 3, 3, device, 1, false, true, "conv3_4");
            prev = CNTKLib.ReLU(prev, "relu3_4");
            //maxpooling 3
            prev = CNTKLib.Pooling(prev, PoolingType.Max, new int[] { 2, 2 }, new int[] { 2, 2 }, BoolVector.Repeat(true, 2), false, true, "pool3");

            //----conv4----
            //conv4_1
            prev = CNTKLib.Pad(prev, PaddingMode.REFLECTPAD, new SizeTVector(new uint[] { 1, 1, 0 }), new SizeTVector(new uint[] { 1, 1, 0 }));
            prev = Layers.Convolution2D(prev, 512, 3, 3, device, 1, false, true, "conv4_1");
            prev = CNTKLib.ReLU(prev, "relu4_1");
            outputs.Add(prev);
            //encoderLayers["relu4_1"] = prev;
            //conv4_2
            prev = CNTKLib.Pad(prev, PaddingMode.REFLECTPAD, new SizeTVector(new uint[] { 1, 1, 0 }), new SizeTVector(new uint[] { 1, 1, 0 }));
            prev = Layers.Convolution2D(prev, 512, 3, 3, device, 1, false, true, "conv4_2");
            prev = CNTKLib.ReLU(prev, "relu4_2");
            //conv4_3
            prev = CNTKLib.Pad(prev, PaddingMode.REFLECTPAD, new SizeTVector(new uint[] { 1, 1, 0 }), new SizeTVector(new uint[] { 1, 1, 0 }));
            prev = Layers.Convolution2D(prev, 512, 3, 3, device, 1, false, true, "conv4_3");
            prev = CNTKLib.ReLU(prev, "relu4_3");
            //conv4_4
            prev = CNTKLib.Pad(prev, PaddingMode.REFLECTPAD, new SizeTVector(new uint[] { 1, 1, 0 }), new SizeTVector(new uint[] { 1, 1, 0 }));
            prev = Layers.Convolution2D(prev, 512, 3, 3, device, 1, false, true, "conv4_4");
            prev = CNTKLib.ReLU(prev, "relu4_4");
            //maxpooling 4
            prev = CNTKLib.Pooling(prev, PoolingType.Max, new int[] { 2, 2 }, new int[] { 2, 2 }, BoolVector.Repeat(true, 2), false, true, "pool4");

            //----conv5----
            //conv5_1
            prev = CNTKLib.Pad(prev, PaddingMode.REFLECTPAD, new SizeTVector(new uint[] { 1, 1, 0 }), new SizeTVector(new uint[] { 1, 1, 0 }));
            prev = Layers.Convolution2D(prev, 512, 3, 3, device, 1, false, true, "conv5_1");
            prev = CNTKLib.ReLU(prev, "relu5_1");
            outputs.Add(prev);

            encoderLayers = CNTKLib.Combine(outputs);
            //encoderLayers["relu5_1"] = prev;
            return(encoderLayers);
        }