Esempio n. 1
0
        ///<summary>
        ///Performs a convolution using the specific <paramref name="kernel"/>
        ///</summary>
        ///<param name="kernel">The convolution kernel</param>
        /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
        ///<returns>The result of the convolution</returns>
        public GpuImage <TColor, TDepth> Convolution(ConvolutionKernelF kernel, Stream stream)
        {
            GpuImage <TColor, TDepth> result = new GpuImage <TColor, TDepth>(Size);

            using (GpuLinearFilter <TColor, TDepth> linearFilter = new GpuLinearFilter <TColor, TDepth>(kernel, kernel.Center, CvEnum.BORDER_TYPE.REFLECT101, new MCvScalar()))
            {
                linearFilter.Apply(this, result, stream);
            }

            return(result);
        }
Esempio n. 2
0
 /// <summary>
 /// Perfroms object detection with increasing detection window.
 /// </summary>
 /// <param name="image">The GpuImage to search in</param>
 /// <param name="hitThreshold">The threshold for the distance between features and classifying plane.</param>
 /// <param name="winStride">Window stride. Must be a multiple of block stride.</param>
 /// <param name="padding">Mock parameter to keep CPU interface compatibility. Must be (0,0).</param>
 /// <param name="scale">Coefficient of the detection window increase.</param>
 /// <param name="groupThreshold">After detection some objects could be covered by many rectangles. This coefficient regulates similarity threshold. 0 means don't perform grouping.</param>
 /// <returns>The regions where positives are found</returns>
 public Rectangle[] DetectMultiScale(
     GpuImage <Gray, Byte> image,
     double hitThreshold,
     Size winStride,
     Size padding,
     double scale,
     int groupThreshold)
 {
     gpuHOGDescriptorDetectMultiScale(_ptr, image, _rectSeq, hitThreshold, winStride, padding, scale, groupThreshold);
     return(_rectSeq.ToArray());
 }
Esempio n. 3
0
        ///<summary>
        ///Split current Image into an array of gray scale images where each element
        ///in the array represent a single color channel of the original image
        ///</summary>
        ///<returns>
        ///An array of gray scale images where each element
        ///in the array represent a single color channel of the original image
        ///</returns>
        public new GpuImage <Gray, TDepth>[] Split()
        {
            GpuImage <Gray, TDepth>[] result = new GpuImage <Gray, TDepth> [NumberOfChannels];
            Size size = Size;

            for (int i = 0; i < result.Length; i++)
            {
                result[i] = new GpuImage <Gray, TDepth>(size);
            }

            SplitInto(result);
            return(result);
        }
 /// <summary>
 /// Finds rectangular regions in the given image that are likely to contain objects the cascade has been trained for and returns those regions as a sequence of rectangles.
 /// </summary>
 /// <param name="image">The image where search will take place</param>
 /// <param name="scaleFactor">The factor by which the search window is scaled between the subsequent scans, for example, 1.1 means increasing window by 10%. Use 1.2 for default.</param>
 /// <param name="minNeighbors">Minimum number (minus 1) of neighbor rectangles that makes up an object. All the groups of a smaller number of rectangles than min_neighbors-1 are rejected. If min_neighbors is 0, the function does not any grouping at all and returns all the detected candidate rectangles, which may be useful if the user wants to apply a customized grouping procedure. Use 4 for default.</param>
 /// <param name="minSize">Minimum window size. By default, it is set to the size of samples the classifier has been trained on (~20x20 for face detection). Use Size.Empty for default</param>
 /// <returns>An array of regions for the detected objects</returns>
 public Rectangle[] DetectMultiScale <TColor>(GpuImage <TColor, Byte> image, double scaleFactor, int minNeighbors, Size minSize) where TColor : struct, IColor
 {
     try
     {
         Seq <Rectangle> regions = new Seq <Rectangle>(_stor);
         int             count   = gpuCascadeClassifierDetectMultiScale(_ptr, image, _buffer, scaleFactor, minNeighbors, minSize, regions);
         if (count == 0)
         {
             return(new Rectangle[0]);
         }
         Rectangle[] result = regions.ToArray();
         return(result);
     }
     finally
     {
         _stor.Clear();
     }
 }
 /// <summary>
 /// Compute the dense optical flow.
 /// </summary>
 /// <param name="frame0">Source frame</param>
 /// <param name="frame1">Frame to track (with the same size as <paramref name="frame0"/>)</param>
 /// <param name="u">Flow horizontal component (along x axis)</param>
 /// <param name="v">Flow vertical component (along y axis)</param>
 public void Dense(GpuImage <Gray, byte> frame0, GpuImage <Gray, byte> frame1, GpuImage <Gray, float> u, GpuImage <Gray, float> v)
 {
     GpuInvoke.gpuPryLKOpticalFlowDense(_ptr, frame0, frame1, u, v, IntPtr.Zero);
 }
 /// <summary>
 /// Compute the optical flow.
 /// </summary>
 /// <param name="frame0">Source frame</param>
 /// <param name="frame1">Frame to track (with the same size as <paramref name="frame0"/>)</param>
 /// <param name="u">Flow horizontal component (along x axis)</param>
 /// <param name="v">Flow vertical component (along y axis)</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Compute(GpuImage <Gray, float> frame0, GpuImage <Gray, float> frame1, GpuImage <Gray, Byte> u, GpuImage <Gray, Byte> v, Stream stream)
 {
     GpuInvoke.gpuBroxOpticalFlowCompute(_ptr, frame0, frame1, u, v, stream);
 }
Esempio n. 7
0
 /// <summary>
 /// Perfroms object detection with increasing detection window.
 /// </summary>
 /// <param name="image">The GpuImage to search in</param>
 /// <returns>The regions where positives are found</returns>
 public Rectangle[] DetectMultiScale(GpuImage <Gray, Byte> image)
 {
     return(DetectMultiScale(image, 0, new Size(8, 8), new Size(0, 0), 1.05, 2));
 }
Esempio n. 8
0
 /// <summary>
 /// Compute the keypoints and descriptors given the image
 /// </summary>
 /// <param name="image">The image where the keypoints and descriptors will be computed from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="descriptors">The resulting descriptors</param>
 /// <param name="keyPoints">The resulting keypoints</param>
 public void ComputeRaw(GpuImage <Gray, Byte> image, GpuImage <Gray, byte> mask, out GpuMat <float> keyPoints, out GpuMat <Byte> descriptors)
 {
     keyPoints   = new GpuMat <float>();
     descriptors = new GpuMat <byte>();
     GpuInvoke.gpuORBDetectorCompute(_ptr, image, mask, keyPoints, descriptors);
 }
Esempio n. 9
0
 /// <summary>
 /// Create a GpuImage from the specific region of <paramref name="image"/>. The data is shared between the two GpuImage
 /// </summary>
 /// <param name="image">The GpuImage where the region is extracted from</param>
 /// <param name="colRange">The column range. Use MCvSlice.WholeSeq for all columns.</param>
 /// <param name="rowRange">The row range. Use MCvSlice.WholeSeq for all rows.</param>
 public GpuImage(GpuImage <TColor, TDepth> image, MCvSlice rowRange, MCvSlice colRange)
     : this(GpuInvoke.GpuMatGetRegion(image, rowRange, colRange))
 {
 }
Esempio n. 10
0
        /// <summary>
        /// Convert the source image to the current image, if the size are different, the current image will be a resized version of the srcImage.
        /// </summary>
        /// <typeparam name="TSrcColor">The color type of the source image</typeparam>
        /// <typeparam name="TSrcDepth">The color depth of the source image</typeparam>
        /// <param name="srcImage">The sourceImage</param>
        public void ConvertFrom <TSrcColor, TSrcDepth>(GpuImage <TSrcColor, TSrcDepth> srcImage)
            where TSrcColor : struct, IColor
            where TSrcDepth : new()
        {
            if (!Size.Equals(srcImage.Size))
            { //if the size of the source image do not match the size of the current image
                using (GpuImage <TSrcColor, TSrcDepth> tmp = srcImage.Resize(Size, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR, null))
                {
                    ConvertFrom(tmp);
                    return;
                }
            }

            if (typeof(TColor) == typeof(TSrcColor))
            {
                #region same color
                if (typeof(TDepth) == typeof(TSrcDepth)) //same depth
                {
                    GpuInvoke.Copy(srcImage.Ptr, Ptr, IntPtr.Zero);
                }
                else //different depth
                {
                    if (typeof(TDepth) == typeof(Byte) && typeof(TSrcDepth) != typeof(Byte))
                    {
                        double[] minVal, maxVal;
                        Point[]  minLoc, maxLoc;
                        srcImage.MinMax(out minVal, out maxVal, out minLoc, out maxLoc);
                        double min = minVal[0];
                        double max = maxVal[0];
                        for (int i = 1; i < minVal.Length; i++)
                        {
                            min = Math.Min(min, minVal[i]);
                            max = Math.Max(max, maxVal[i]);
                        }
                        double scale = 1.0, shift = 0.0;
                        if (max > 255.0 || min < 0)
                        {
                            scale = (max == min) ? 0.0 : 255.0 / (max - min);
                            shift = (scale == 0) ? min : -min * scale;
                        }

                        GpuInvoke.ConvertTo(srcImage.Ptr, Ptr, scale, shift);
                    }
                    else
                    {
                        GpuInvoke.ConvertTo(srcImage.Ptr, Ptr, 1.0, 0.0);
                    }
                }
                #endregion
            }
            else
            {
                #region different color
                if (typeof(TDepth) == typeof(TSrcDepth))
                { //same depth
                    ConvertColor(srcImage.Ptr, Ptr, typeof(TSrcColor), typeof(TColor), Size, null);
                }
                else
                {                                                                                     //different depth
                    using (GpuImage <TSrcColor, TDepth> tmp = srcImage.Convert <TSrcColor, TDepth>()) //convert depth
                        ConvertColor(tmp.Ptr, Ptr, typeof(TSrcColor), typeof(TColor), Size, null);
                }
                #endregion
            }
        }
Esempio n. 11
0
 /// <summary>
 /// Apply the filter to the disparity image
 /// </summary>
 /// <param name="disparity">The input disparity map</param>
 /// <param name="image">The image</param>
 /// <param name="dst">The output disparity map, should have the same size as the input disparity map</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Apply(GpuImage <Gray, Byte> disparity, GpuImage <Gray, Byte> image, GpuImage <Gray, byte> dst, Stream stream)
 {
     GpuDisparityBilateralFilterApply(_ptr, disparity, image, dst, stream);
 }
Esempio n. 12
0
 /// <summary>
 /// Transform the image using the lookup table
 /// </summary>
 /// <typeparam name="TColor">The type of color, should be either 3 channel or 1 channel</typeparam>
 /// <param name="image">The image to be transformed</param>
 /// <param name="dst">The transformation result</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Transform <TColor>(GpuImage <TColor, byte> image, GpuImage <TColor, byte> dst, Stream stream)
     where TColor : struct, IColor
 {
     GpuInvoke.gpuLookUpTableTransform(_ptr, image, dst, stream);
 }
Esempio n. 13
0
 /// <summary>
 /// Computes disparity map for the input rectified stereo pair.
 /// </summary>
 /// <param name="left">The left single-channel, 8-bit image</param>
 /// <param name="right">The right image of the same size and the same type</param>
 /// <param name="disparity">The disparity map</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void FindStereoCorrespondence(GpuImage <Gray, Byte> left, GpuImage <Gray, Byte> right, GpuImage <Gray, Byte> disparity, Stream stream)
 {
     GpuStereoBMFindStereoCorrespondence(_ptr, left, right, disparity, stream);
 }
Esempio n. 14
0
 /// <summary>
 /// Computes disparity map for the input rectified stereo pair.
 /// </summary>
 /// <param name="left">The left single-channel, 8-bit image</param>
 /// <param name="right">The right image of the same size and the same type</param>
 /// <param name="disparity">The disparity map</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void FindStereoCorrespondence(GpuImage <Gray, Byte> left, GpuImage <Gray, Byte> right, GpuImage <Gray, Byte> disparity, Stream stream)
 {
     GpuInvoke.GpuStereoConstantSpaceBPFindStereoCorrespondence(_ptr, left, right, disparity, stream);
 }
 /// <summary>
 ///  This function is similiar to cvCalcBackProjectPatch. It slids through image, compares overlapped patches of size wxh with templ using the specified method and stores the comparison results to result
 /// </summary>
 /// <param name="image">Image where the search is running. It should be 8-bit or 32-bit floating-point</param>
 /// <param name="templ">Searched template; must be not greater than the source image and the same data type as the image</param>
 /// <param name="result">A map of comparison results; single-channel 32-bit floating-point. If image is WxH and templ is wxh then result must be W-w+1xH-h+1.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Match(GpuImage <TColor, TDepth> image, GpuImage <TColor, TDepth> templ, GpuImage <Gray, float> result, Stream stream)
 {
     if (_ptr == IntPtr.Zero)
     {
         _ptr = GpuInvoke.gpuTemplateMatchingCreate(image.Type, _method, ref _blockSize);
     }
     GpuInvoke.gpuTemplateMatchingMatch(_ptr, image, templ, result, stream);
 }
 public void Apply(GpuImage <TColor, TDepth> image, GpuImage <TColor, TDepth> dst, Stream stream)
 {
     GpuInvoke.gpuFilterApply(_ptr, image, dst, stream);
 }