////////////////////////////////////////////////////////////////////////////////////////////////////
        /// <summary>   Gets kernel pad. </summary>
        ///
        /// <param name="param">    The parameter. </param>
        ///
        /// <returns>   The kernel pad. </returns>
        ////////////////////////////////////////////////////////////////////////////////////////////////////

        static Size GetKernelPad(PoolingParameter param)
        {
            if (param.PadH > 0)
            {
                return(new Size((int)param.PadW, (int)param.PadH));
            }

            return(new Size((int)param.Pad, (int)param.Pad));
        }
        ////////////////////////////////////////////////////////////////////////////////////////////////////
        /// <summary>   Gets kernel size. </summary>
        ///
        /// <param name="param">    The parameter. </param>
        ///
        /// <returns>   The kernel size. </returns>
        ////////////////////////////////////////////////////////////////////////////////////////////////////

        static Size GetKernelSize(PoolingParameter param)
        {
            if (param.KernelH > 0)
            {
                return(new Size((int)param.KernelW, (int)param.KernelH));
            }

            return(new Size((int)param.KernelSize, (int)param.KernelSize));
        }
        ////////////////////////////////////////////////////////////////////////////////////////////////////
        /// <summary>   Gets kernel stride. </summary>
        ///
        /// <param name="param">    The parameter. </param>
        ///
        /// <returns>   The kernel stride. </returns>
        ////////////////////////////////////////////////////////////////////////////////////////////////////

        static Size GetKernelStride(PoolingParameter param)
        {
            if (param.StrideH > 0)
            {
                return(new Size((int)param.StrideW, (int)param.StrideH));
            }

            return(new Size((int)param.Stride, (int)param.Stride));
        }
Esempio n. 4
0
        static int[] GetKernelPad(PoolingParameter param)
        {
            if (param.PadH > 0)
            {
                return(new [] { (int)param.PadW, (int)param.PadH });
            }

            return(new [] { (int)param.Pad, (int)param.Pad });
        }
Esempio n. 5
0
        static int[] GetKernelStride(PoolingParameter param)
        {
            if (param.StrideH > 0)
            {
                return(new [] { (int)param.StrideW, (int)param.StrideH });
            }

            return(new [] { (int)param.Stride, (int)param.Stride });
        }
Esempio n. 6
0
        static int[] GetKernelSize(PoolingParameter param)
        {
            if (param.KernelH > 0)
            {
                return(new [] { (int)param.KernelW, (int)param.KernelH });
            }

            return(new [] { (int)param.KernelSize, (int)param.KernelSize });
        }
        /// <summary>
        /// Parses the parameter from a RawProto.
        /// </summary>
        /// <param name="rp">Specifies the RawProto to parse.</param>
        /// <returns>A new instance of the parameter is returned.</returns>
        public static new UnPoolingParameter FromProto(RawProto rp)
        {
            UnPoolingParameter p = new UnPoolingParameter();

            ((PoolingParameter)p).Copy(PoolingParameter.FromProto(rp));

            p.m_rgUnpool = rp.FindArray <uint>("unpool_size");
            p.m_nUnPoolH = (uint?)rp.FindValue("unpool_h", typeof(uint));
            p.m_nUnPoolW = (uint?)rp.FindValue("unpool_w", typeof(uint));

            return(p);
        }
        ////////////////////////////////////////////////////////////////////////////////////////////////////
        /// <summary>   Sets up the pooling. </summary>
        ///
        /// <param name="param">        The parameter. </param>
        /// <param name="name">         The name. </param>
        /// <param name="inputNames">   List of names of the inputs. </param>
        /// <param name="outputNames">  List of names of the outputs. </param>
        ///
        /// <returns>   A Function. </returns>
        ////////////////////////////////////////////////////////////////////////////////////////////////////

        static Function SetupPooling(PoolingParameter param, string name, string[] inputNames, string[] outputNames)
        {
            Size ksize  = GetKernelSize(param);
            Size stride = GetKernelStride(param);
            Size pad    = GetKernelPad(param);

            switch (param.Pool)
            {
            case PoolingParameter.PoolMethod.Max:
                return(new MaxPooling(ksize, stride, pad, name: name, inputNames: inputNames, outputNames: outputNames));

            case PoolingParameter.PoolMethod.Ave:
                return(new AveragePooling(ksize, stride, pad, name, inputNames, outputNames));
            }

            return(null);
        }
Esempio n. 9
0
        static Function <T> SetupPooling <T>(PoolingParameter param, string name, string[] inputNames, string[] outputNames) where T : unmanaged, IComparable <T>
        {
            int[] ksize  = GetKernelSize(param);
            int[] stride = GetKernelStride(param);
            int[] pad    = GetKernelPad(param);

            switch (param.Pool)
            {
            case PoolingParameter.PoolMethod.Max:
                return(new MaxPooling2D <T>(ksize, stride, pad, name: name, inputNames: inputNames, outputNames: outputNames));

            case PoolingParameter.PoolMethod.Ave:
                return(new AveragePooling2D <T>(ksize, stride, pad, name, inputNames, outputNames));
            }

            return(null);
        }
Esempio n. 10
0
        /// <summary>
        /// Setup the layer for use with both Engine.CAFFE and Engine.CUDNN modes.
        /// </summary>
        /// <param name="colBottom">Specifies the collection of bottom (input) Blobs.</param>
        /// <param name="colTop">Specifies the collection of top (output) Blobs.</param>
        public override void LayerSetUp(BlobCollection <T> colBottom, BlobCollection <T> colTop)
        {
            PoolingParameter p = m_param.pooling_param;

            if (p.global_pooling)
            {
                m_log.CHECK(!(p.kernel_size.Count > 0 || p.kernel_h.HasValue || p.kernel_w.HasValue), "With global pooling = true, Filter size cannot be specified.");
            }
            else
            {
                m_log.CHECK(!(p.kernel_size.Count > 0) != !(p.kernel_h.HasValue && p.kernel_w.HasValue), "Filter size is kernel_size OR kernel_h and kernel_w; not both.");
                m_log.CHECK(p.kernel_size.Count > 0 || (p.kernel_h.HasValue && p.kernel_w.HasValue), "For non-square filters, both kernel_h and kernel_w are required.");
            }

            m_log.CHECK(((p.pad.Count > 0) && p.pad_h.HasValue && p.pad_w.HasValue) || (!p.pad_h.HasValue && !p.pad_w.HasValue), "Pad is pad or pad_h and pad_w are required.");
            m_log.CHECK(((p.stride.Count > 0) && p.stride_h.HasValue && p.stride_w.HasValue) || (!p.stride_h.HasValue && !p.stride_w.HasValue), "Stride is stride or stride_h and stride_w are required.");
            m_bGlobalPooling = p.global_pooling;


            //---- Kernel Size ----

            if (m_bGlobalPooling)
            {
                m_nKernelH = colBottom[0].height;
                m_nKernelW = colBottom[0].width;
            }
            else
            {
                if (p.kernel_size.Count > 0)
                {
                    m_nKernelH = (int)p.kernel_size[0];
                    m_nKernelW = (int)p.kernel_size[0];
                }
                else
                {
                    m_nKernelH = (int)p.kernel_h.Value;
                    m_nKernelW = (int)p.kernel_w.Value;
                }
            }

            m_log.CHECK_GT(m_nKernelH, 0, "Filter dimensions cannot be zero.");
            m_log.CHECK_GT(m_nKernelW, 0, "Filter dimensions cannot be zero.");


            //---- Pad ----

            if (p.pad.Count > 0)
            {
                m_nPadH = (int)p.pad[0];
                m_nPadW = (int)p.pad[0];
            }
            else
            {
                m_nPadH = (p.pad_h.HasValue) ? (int)p.pad_h.Value : 0;
                m_nPadW = (p.pad_w.HasValue) ? (int)p.pad_w.Value : 0;
            }


            //---- Stride ----

            if (p.stride.Count > 0)
            {
                m_nStrideH = (int)p.stride[0];
                m_nStrideW = (int)p.stride[0];
            }
            else
            {
                m_nStrideH = (p.stride_h.HasValue) ? (int)p.stride_h.Value : 1;
                m_nStrideW = (p.stride_w.HasValue) ? (int)p.stride_w.Value : 1;
            }

            if (m_bGlobalPooling)
            {
                m_log.CHECK(m_nPadH == 0 && m_nPadW == 0 && m_nStrideH == 1 && m_nStrideW == 1, "With global pooling = true, only pad = 0 and stride = 1 allowed.");
            }

            if (m_nPadH != 0 || m_nPadW != 0)
            {
                m_log.CHECK(m_param.pooling_param.pool == PoolingParameter.PoolingMethod.AVE ||
                            m_param.pooling_param.pool == PoolingParameter.PoolingMethod.MAX, "Padding implemented for AVE and MAX pooling only.");
                m_log.CHECK_LT(m_nPadH, m_nKernelH, "The pad_h must be <= kernel_h.");
                m_log.CHECK_LT(m_nPadW, m_nKernelW, "The pad_w must be <= kernel_w.");
            }
        }
Esempio n. 11
0
        /// <summary>
        /// Setup the layer for use with both Engine.CAFFE and Engine.CUDNN modes.
        /// </summary>
        /// <param name="colBottom">Specifies the collection of bottom (input) Blobs.</param>
        /// <param name="colTop">Specifies the collection of top (output) Blobs.</param>
        public override void LayerSetUp(BlobCollection <T> colBottom, BlobCollection <T> colTop)
        {
            PoolingParameter p = m_param.pooling_param;

            if (p.global_pooling)
            {
                if (!(p.kernel_size.Count > 0 || p.kernel_h.HasValue || p.kernel_w.HasValue))
                {
                    m_log.WriteLine("WARNING: With global pooling = true, Filter size cannot be specified, the bottom hxw = '" + colBottom[0].height.ToString() + "x" + colBottom[0].width.ToString() + "' will be used instead for the kernel size.");
                }
            }
            else
            {
                m_log.CHECK(!(p.kernel_size.Count > 0) != !(p.kernel_h.HasValue && p.kernel_w.HasValue), "Filter size is kernel_size OR kernel_h and kernel_w; not both.");
                m_log.CHECK(p.kernel_size.Count > 0 || (p.kernel_h.HasValue && p.kernel_w.HasValue), "For non-square filters, both kernel_h and kernel_w are required.");
            }

            m_log.CHECK(((p.pad.Count == 0) && p.pad_h.HasValue && p.pad_w.HasValue) || (!p.pad_h.HasValue && !p.pad_w.HasValue), "Pad is pad or pad_h and pad_w are required.");
            m_log.CHECK(((p.stride.Count == 0) && p.stride_h.HasValue && p.stride_w.HasValue) || (!p.stride_h.HasValue && !p.stride_w.HasValue), "Stride is stride or stride_h and stride_w are required.");
            m_bGlobalPooling = p.global_pooling;


            //---- Kernel Size ----

            if (m_bGlobalPooling)
            {
                m_nKernelH = colBottom[0].height;
                m_nKernelW = colBottom[0].width;
            }
            else
            {
                if (p.kernel_size.Count > 0)
                {
                    m_nKernelH = (int)p.kernel_size[0];
                    m_nKernelW = (int)p.kernel_size[0];
                }
                else
                {
                    m_nKernelH = (int)p.kernel_h.Value;
                    m_nKernelW = (int)p.kernel_w.Value;
                }
            }

            m_log.CHECK_GT(m_nKernelH, 0, "Filter dimensions cannot be zero.");
            m_log.CHECK_GT(m_nKernelW, 0, "Filter dimensions cannot be zero.");


            //---- Pad ----

            if (p.pad.Count > 0)
            {
                m_nPadH = (int)p.pad[0];
                m_nPadW = (int)p.pad[0];
            }
            else
            {
                m_nPadH = (p.pad_h.HasValue) ? (int)p.pad_h.Value : 0;
                m_nPadW = (p.pad_w.HasValue) ? (int)p.pad_w.Value : 0;
            }


            //---- Stride ----

            if (p.stride.Count > 0)
            {
                m_nStrideH = (int)p.stride[0];
                m_nStrideW = (int)p.stride[0];
            }
            else
            {
                m_nStrideH = (p.stride_h.HasValue) ? (int)p.stride_h.Value : 1;
                m_nStrideW = (p.stride_w.HasValue) ? (int)p.stride_w.Value : 1;
            }

            if (m_bGlobalPooling)
            {
                m_log.CHECK(m_nPadH == 0 && m_nPadW == 0 && m_nStrideH == 1 && m_nStrideW == 1, "With global pooling = true, only pad = 0 and stride = 1 allowed.");
            }

            if (m_nPadH != 0 || m_nPadW != 0)
            {
                m_log.CHECK(m_param.pooling_param.pool == PoolingParameter.PoolingMethod.AVE ||
                            m_param.pooling_param.pool == PoolingParameter.PoolingMethod.MAX, "Padding implemented for AVE and MAX pooling only.");
                m_log.CHECK_LT(m_nPadH, m_nKernelH, "The pad_h must be <= kernel_h.");
                m_log.CHECK_LT(m_nPadW, m_nKernelW, "The pad_w must be <= kernel_w.");
            }

            if (!m_param.pooling_param.useCudnn())
            {
                return;
            }


            //---------------------------------------------
            //  cuDnn specific pooling.
            //
            //  Note only MAX and AVE pooling are supported.
            //---------------------------------------------

            // Setup the convert to half flags used by the Layer just before calling forward and backward.
            m_bUseHalfSize = m_param.use_halfsize;

            if (m_param.pooling_param.pool == PoolingParameter.PoolingMethod.MAX)
            {
                m_method = PoolingMethod.MAX;
            }
            else
            {
                m_method = PoolingMethod.AVE;
            }

            m_hCudnn       = m_cuda.CreateCuDNN();
            m_hBottomDesc  = m_cuda.CreateTensorDesc();
            m_hTopDesc     = m_cuda.CreateTensorDesc();
            m_hPoolingDesc = m_cuda.CreatePoolingDesc();
            m_cuda.SetPoolingDesc(m_hPoolingDesc, m_method, m_nKernelH, m_nKernelW, m_nPadH, m_nPadW, m_nStrideH, m_nStrideW);
        }