This class wraps the functional calls to the opencv_gpu module
 protected override void DisposeObject()
 {
     if (_ptr != null)
     {
         GpuInvoke.gpuFilterRelease(ref _ptr);
     }
 }
 /// <summary>
 /// Create a GPU cascade classifier using the specific file
 /// </summary>
 /// <param name="fileName">The file to create the classifier from</param>
 public GpuCascadeClassifier(String fileName)
 {
     Debug.Assert(File.Exists(fileName), String.Format("The Cascade file {0} does not exist.", fileName));
     _ptr    = GpuInvoke.gpuCascadeClassifierCreate(fileName);
     _buffer = new GpuMat <int>(1, 100, 4);
     _stor   = new MemStorage();
 }
 /// <summary>
 /// Calculate an optical flow for a sparse feature set.
 /// </summary>
 /// <param name="frame0">First 8-bit input image (supports both grayscale and color images).</param>
 /// <param name="frame1">Second input image of the same size and the same type as <paramref name="frame0"/></param>
 /// <param name="points0">
 /// Vector of 2D points for which the flow needs to be found. It must be one row
 /// matrix with 2 channels
 /// </param>
 /// <param name="points1">
 /// Output vector of 2D points (with single-precision two channel floating-point coordinates)
 /// containing the calculated new positions of input features in the second image.</param>
 /// <param name="status">
 /// Output status vector (CV_8UC1 type). Each element of the vector is set to 1 if the
 /// flow for the corresponding features has been found. Otherwise, it is set to 0.
 /// </param>
 /// <param name="err">
 /// Output vector (CV_32FC1 type) that contains the difference between patches around
 /// the original and moved points or min eigen value if getMinEigenVals is checked. It can be
 /// null, if not needed.
 /// </param>
 public void Sparse(GpuImage <Gray, byte> frame0, GpuImage <Gray, byte> frame1, GpuMat <float> points0, out GpuMat <float> points1, out GpuMat <Byte> status, out GpuMat <float> err)
 {
     points1 = new GpuMat <float>();
     status  = new GpuMat <byte>();
     err     = new GpuMat <float>();
     GpuInvoke.gpuPryLKOpticalFlowSparse(_ptr, frame0, frame1, points0, points1, status, err);
 }
Beispiel #4
0
        /// <summary>
        /// Returns the min / max location and values for the image
        /// </summary>
        /// <param name="maxLocations">The maximum locations for each channel </param>
        /// <param name="maxValues">The maximum values for each channel</param>
        /// <param name="minLocations">The minimum locations for each channel</param>
        /// <param name="minValues">The minimum values for each channel</param>
        public void MinMax(out double[] minValues, out double[] maxValues, out Point[] minLocations, out Point[] maxLocations)
        {
            minValues    = new double[NumberOfChannels];
            maxValues    = new double[NumberOfChannels];
            minLocations = new Point[NumberOfChannels];
            maxLocations = new Point[NumberOfChannels];

            if (NumberOfChannels == 1)
            {
                GpuInvoke.MinMaxLoc(Ptr, ref minValues[0], ref maxValues[0], ref minLocations[0], ref maxLocations[0], IntPtr.Zero);
            }
            else
            {
                GpuMat <TDepth>[] channels = Split(null);
                try
                {
                    for (int i = 0; i < NumberOfChannels; i++)
                    {
                        GpuInvoke.MinMaxLoc(Ptr, ref minValues[i], ref maxValues[i], ref minLocations[i], ref maxLocations[i], IntPtr.Zero);
                    }
                }
                finally
                {
                    foreach (GpuMat <TDepth> mat in channels)
                    {
                        mat.Dispose();
                    }
                }
            }
        }
Beispiel #5
0
        /// <summary>
        /// Makes multi-channel array out of several single-channel arrays
        /// </summary>
        ///<param name="gpuMats">
        ///An array of single channel GpuMat where each item
        ///in the array represent a single channel of the GpuMat
        ///</param>
        /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
        public void MergeFrom(GpuMat <TDepth>[] gpuMats, Stream stream)
        {
            Debug.Assert(NumberOfChannels == gpuMats.Length, "Number of channels does not agrees with the length of gpuMats");
            //If single channel, perform a copy
            if (NumberOfChannels == 1)
            {
                if (stream == null)
                {
                    GpuInvoke.Copy(gpuMats[0].Ptr, _ptr, IntPtr.Zero);
                }
                else
                {
                    stream.Copy <TDepth>(gpuMats[0], this);
                }
            }

            //handle multiple channels
            Size size = Size;

            IntPtr[] ptrs = new IntPtr[gpuMats.Length];
            for (int i = 0; i < gpuMats.Length; i++)
            {
                Debug.Assert(gpuMats[i].Size == size, "Size mismatch");
                ptrs[i] = gpuMats[i].Ptr;
            }
            GCHandle handle = GCHandle.Alloc(ptrs, GCHandleType.Pinned);

            GpuInvoke.Merge(handle.AddrOfPinnedObject(), _ptr, stream);
            handle.Free();
        }
 private static void ConvertColor(IntPtr src, IntPtr dest, Type srcColor, Type destColor, Size size, Stream stream)
 {
     try
     {
         // if the direct conversion exist, apply the conversion
         GpuInvoke.CvtColor(src, dest, CvToolbox.GetColorCvtCode(srcColor, destColor), stream);
     }
     catch
     {
         try
         {
             //if a direct conversion doesn't exist, apply a two step conversion
             using (GpuImage <Bgr, TDepth> tmp = new GpuImage <Bgr, TDepth>(size))
             {
                 GpuInvoke.CvtColor(src, tmp.Ptr, CvToolbox.GetColorCvtCode(srcColor, typeof(Bgr)), stream);
                 GpuInvoke.CvtColor(tmp.Ptr, dest, CvToolbox.GetColorCvtCode(typeof(Bgr), destColor), stream);
             }
         }
         catch
         {
             throw new NotSupportedException(String.Format(
                                                 "Convertion from Image<{0}, {1}> to Image<{2}, {3}> is not supported by OpenCV",
                                                 srcColor.ToString(),
                                                 typeof(TDepth).ToString(),
                                                 destColor.ToString(),
                                                 typeof(TDepth).ToString()));
         }
     }
 }
Beispiel #7
0
        /// <summary>
        /// Create a clone of this GpuImage
        /// </summary>
        /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
        /// <returns>A clone of this GpuImage</returns>
        public GpuImage <TColor, TDepth> Clone()
        {
            GpuImage <TColor, TDepth> result = new GpuImage <TColor, TDepth>(Size);

            GpuInvoke.Copy(_ptr, result, IntPtr.Zero);
            return(result);
        }
Beispiel #8
0
 private static void ConvertColor(IntPtr src, IntPtr dest, Type srcColor, Type destColor, Size size, Stream stream)
 {
     try
     {
         // if the direct conversion exist, apply the conversion
         GpuInvoke.CvtColor(src, dest, CvToolbox.GetColorCvtCode(srcColor, destColor), stream);
     } catch
     {
         try
         {
             //if a direct conversion doesn't exist, apply a two step conversion
             //in this case, needs to wait for the completion of the stream because a temporary local image buffer is used
             //we don't want the tmp image to be released before the operation is completed.
             using (GpuImage <Bgr, TDepth> tmp = new GpuImage <Bgr, TDepth>(size))
             {
                 GpuInvoke.CvtColor(src, tmp.Ptr, CvToolbox.GetColorCvtCode(srcColor, typeof(Bgr)), stream);
                 GpuInvoke.CvtColor(tmp.Ptr, dest, CvToolbox.GetColorCvtCode(typeof(Bgr), destColor), stream);
                 stream.WaitForCompletion();
             }
         } catch
         {
             throw new NotSupportedException(String.Format(
                                                 "Convertion from Image<{0}, {1}> to Image<{2}, {3}> is not supported by OpenCV",
                                                 srcColor.ToString(),
                                                 typeof(TDepth).ToString(),
                                                 destColor.ToString(),
                                                 typeof(TDepth).ToString()));
         }
     }
 }
Beispiel #9
0
        ///<summary>
        ///Split current Image into an array of gray scale images where each element
        ///in the array represent a single color channel of the original image
        ///</summary>
        ///<param name="gpuMats">
        ///An array of single channel GpuMat where each item
        ///in the array represent a single channel of the original GpuMat
        ///</param>
        /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
        public void SplitInto(GpuMat <TDepth>[] gpuMats, Stream stream)
        {
            Debug.Assert(NumberOfChannels == gpuMats.Length, "Number of channels does not agrees with the length of gpuMats");

            if (NumberOfChannels == 1)
            {
                //If single channel, return a copy
                if (stream == null)
                {
                    GpuInvoke.Copy(_ptr, gpuMats[0], IntPtr.Zero);
                }
                else
                {
                    stream.Copy <TDepth>(this, gpuMats[0]);
                }
            }
            else
            {
                //handle multiple channels
                Size     size = Size;
                IntPtr[] ptrs = new IntPtr[gpuMats.Length];
                for (int i = 0; i < ptrs.Length; i++)
                {
                    Debug.Assert(gpuMats[i].Size == size, "Size mismatch");
                    ptrs[i] = gpuMats[i].Ptr;
                }
                GCHandle handle = GCHandle.Alloc(ptrs, GCHandleType.Pinned);
                GpuInvoke.Split(_ptr, handle.AddrOfPinnedObject(), stream);
                handle.Free();
            }
        }
 /// <summary>
 /// Release the buffer
 /// </summary>
 protected override void DisposeObject()
 {
     if (_ptr != IntPtr.Zero)
     {
         GpuInvoke.gpuTemplateMatchingRelease(ref _ptr);
     }
 }
Beispiel #11
0
        public GpuImage <TColor, Single> Convolution(ConvolutionKernelF kernel, Stream stream)
        {
            GpuImage <TColor, Single> result = new GpuImage <TColor, float>(Size);

            GpuInvoke.Filter2D(_ptr, result, kernel, kernel.Center, CvEnum.BORDER_TYPE.REFLECT101, stream);
            return(result);
        }
Beispiel #12
0
        /// <summary>
        /// Find the good features to track
        /// </summary>
        public GpuMat <float> Detect(GpuImage <Gray, byte> image, GpuImage <Gray, byte> mask)
        {
            GpuMat <float> corners = new GPU.GpuMat <float>();

            GpuInvoke.gpuGoodFeaturesToTrackDetectorDetect(_ptr, image, corners, mask);
            return(corners);
        }
 /// <summary>
 /// Release all the unmanaged memory associate with this Canny edge detector.
 /// </summary>
 protected override void DisposeObject()
 {
     if (_ptr != null)
     {
         GpuInvoke.gpuCannyEdgeDetectorRelease(ref _ptr);
     }
 }
        /// <summary>
        /// Create a GPUBruteForce Matcher using the specific distance type
        /// </summary>
        /// <param name="distanceType">The distance type</param>
        public GpuBruteForceMatcher(DistanceType distanceType)
        {
            if (distanceType == DistanceType.Hamming)
            {
                if (typeof(T) != typeof(byte))
                {
                    throw new ArgumentException("Hamming distance type requires model descriptor to be Matrix<Byte>");
                }
            }

            if (typeof(T) != typeof(byte) && typeof(T) != typeof(float))
            {
                throw new NotImplementedException(String.Format("Data type of {0} is not supported", typeof(T).ToString()));
            }

            switch (distanceType)
            {
            case (DistanceType.Hamming):
                _distanceType = GpuMatcherDistanceType.HammingDist;
                break;

            case (DistanceType.L1):
                _distanceType = GpuMatcherDistanceType.L1Dist;
                break;

            case (DistanceType.L2):
                _distanceType = GpuMatcherDistanceType.L2Dist;
                break;

            default:
                throw new NotImplementedException(String.Format("Distance type of {0} is not implemented in GPU.", distanceType.ToString()));
            }
            _ptr = GpuInvoke.gpuBruteForceMatcherCreate(_distanceType);
        }
Beispiel #15
0
        /// <summary>
        /// Resize the GpuImage. The calling GpuMat be GpuMat%lt;Byte&gt;. If stream is specified, it has to be either 1 or 4 channels.
        /// </summary>
        /// <param name="size">The new size</param>
        /// <param name="interpolationType">The interpolation type</param>
        /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
        /// <returns>A GpuImage of the new size</returns>
        public GpuImage <TColor, TDepth> Resize(Size size, CvEnum.INTER interpolationType, Stream stream)
        {
            GpuImage <TColor, TDepth> result = new GpuImage <TColor, TDepth>(size);

            GpuInvoke.Resize(_ptr, result, interpolationType, stream);
            return(result);
        }
        ///<summary>
        ///Performs a convolution using the specific <paramref name="kernel"/>
        ///</summary>
        ///<param name="kernel">The convolution kernel</param>
        ///<returns>The result of the convolution</returns>
        public GpuImage <TColor, Single> Convolution(ConvolutionKernelF kernel)
        {
            GpuImage <TColor, Single> result = new GpuImage <TColor, float>(Size);

            GpuInvoke.Filter2D(_ptr, result, kernel, kernel.Center);
            return(result);
        }
Beispiel #17
0
        /// <summary>
        /// Compute the descriptor given the image and the point location
        /// </summary>
        /// <param name="image">The image where the descriptor will be computed from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <param name="keyPoints">The keypoint where the descriptor will be computed from. The order of the keypoints might be changed unless the GPU_SURF detector is UP-RIGHT.</param>
        /// <returns>The image features founded on the keypoint location</returns>
        public GpuMat <float> ComputeDescriptorsRaw(GpuImage <Gray, Byte> image, GpuImage <Gray, byte> mask, GpuMat <float> keyPoints)
        {
            GpuMat <float> descriptors = new GpuMat <float>(keyPoints.Size.Height, DescriptorSize, 1);

            GpuInvoke.gpuSURFDetectorCompute(_ptr, image, mask, keyPoints, descriptors, true);
            return(descriptors);
        }
Beispiel #18
0
        /// <summary>
        /// Detect keypoints in the GpuImage
        /// </summary>
        /// <param name="img">The image where keypoints will be detected from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <returns>
        /// The keypoints GpuMat that will have 1 row.
        /// keypoints.at&lt;float[6]&gt;(1, i) contains i'th keypoint
        /// format: (x, y, size, response, angle, octave)
        /// </returns>
        public GpuMat <float> DetectKeyPointsRaw(GpuImage <Gray, Byte> img, GpuImage <Gray, Byte> mask)
        {
            GpuMat <float> result = new GpuMat <float>();

            GpuInvoke.gpuSURFDetectorDetectKeyPoints(_ptr, img, mask, result);
            return(result);
        }
Beispiel #19
0
 /// <summary>
 /// Updates the background model
 /// </summary>
 /// <param name="frame">Next video frame.</param>
 /// <param name="learningRate">The learning rate, use -1.0f for default value.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Update(GpuImage <TColor, Byte> frame, float learningRate, Stream stream)
 {
     if (_forgroundMask == null)
     {
         _forgroundMask = new GpuImage <Gray, byte>(frame.Size);
     }
     GpuInvoke.gpuMog2Compute(_ptr, frame, learningRate, _forgroundMask, stream);
 }
Beispiel #20
0
 /// <summary>
 /// Returns coefficients of the classifier trained for people detection (for size 64x128).
 /// </summary>
 /// <returns>The people detector of 64x128 resolution.</returns>
 public static float[] GetPeopleDetector64x128()
 {
     using (VectorOfFloat f = new VectorOfFloat())
     {
         GpuInvoke.gpuHOGDescriptorGetPeopleDetector64x128(f);
         return(f.ToArray());
     }
 }
Beispiel #21
0
 /// <summary>
 /// Set the SVM detector
 /// </summary>
 /// <param name="detector">The SVM detector</param>
 public void SetSVMDetector(float[] detector)
 {
     using (VectorOfFloat vec = new VectorOfFloat())
     {
         vec.Push(detector);
         GpuInvoke.gpuHOGSetSVMDetector(_ptr, vec);
     }
 }
Beispiel #22
0
        /// <summary>
        /// Convert this GpuMat to different depth
        /// </summary>
        /// <typeparam name="TOtherDepth">The depth type to convert to</typeparam>
        /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
        /// <returns>GpuMat of different depth</returns>
        public GpuMat <TOtherDepth> Convert <TOtherDepth>(Stream stream)
            where TOtherDepth : new()
        {
            GpuMat <TOtherDepth> res = new GpuMat <TOtherDepth>(Size, NumberOfChannels);

            GpuInvoke.ConvertTo(Ptr, res.Ptr, 1.0, 0.0, stream);
            return(res);
        }
 /// <summary>
 ///  This function is similiar to cvCalcBackProjectPatch. It slids through image, compares overlapped patches of size wxh with templ using the specified method and stores the comparison results to result
 /// </summary>
 /// <param name="image">Image where the search is running. It should be 8-bit or 32-bit floating-point</param>
 /// <param name="templ">Searched template; must be not greater than the source image and the same data type as the image</param>
 /// <param name="result">A map of comparison results; single-channel 32-bit floating-point. If image is WxH and templ is wxh then result must be W-w+1xH-h+1.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Match(GpuImage <TColor, TDepth> image, GpuImage <TColor, TDepth> templ, GpuImage <Gray, float> result, Stream stream)
 {
     if (_ptr == IntPtr.Zero)
     {
         _ptr = GpuInvoke.gpuTemplateMatchingCreate(image.Type, _method, ref _blockSize);
     }
     GpuInvoke.gpuTemplateMatchingMatch(_ptr, image, templ, result, stream);
 }
        /*
         * /// <summary>
         * /// Add the model descriptors
         * /// </summary>
         * /// <param name="modelDescriptors">The model discriptors</param>
         * public void Add(Matrix<Byte> modelDescriptors)
         * {
         * if (!(_distanceType == DistanceType.HammingDist))
         *    throw new ArgumentException("Hamming distance type requires model descriptor to be Matrix<Byte>");
         * gpuBruteForceMatcherAdd(_ptr, modelDescriptors);
         * }
         *
         * /// <summary>
         * /// Add the model descriptors
         * /// </summary>
         * /// <param name="modelDescriptors">The model discriptors</param>
         * public void Add(Matrix<float> modelDescriptors)
         * {
         * if (!(_distanceType == DistanceType.L2 || _distanceType == DistanceType.L1))
         *    throw new ArgumentException("L1 / L2 distance type requires model descriptor to be Matrix<float>");
         * gpuBruteForceMatcherAdd(_ptr, modelDescriptors);
         * }*/

        /// <summary>
        /// Find the k nearest neighbour using the brute force matcher.
        /// </summary>
        /// <param name="queryDescriptors">The query descriptors</param>
        /// <param name="modelDescriptors">The model descriptors</param>
        /// <param name="modelIdx">The model index. A n x <paramref name="k"/> matrix where n = <paramref name="queryDescriptors"/>.Cols</param>
        /// <param name="distance">The matrix where the distance valus is stored. A n x <paramref name="k"/> matrix where n = <paramref name="queryDescriptors"/>.Size.Height</param>
        /// <param name="k">The number of nearest neighbours to be searched</param>
        /// <param name="mask">The mask</param>
        /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
        public void KnnMatchSingle(GpuMat <T> queryDescriptors, GpuMat <T> modelDescriptors, GpuMat <int> modelIdx, GpuMat <float> distance, int k, GpuMat <Byte> mask, Stream stream)
        {
            if (k == 2 && !(modelIdx.IsContinuous && distance.IsContinuous))
            {
                throw new ArgumentException("For k == 2, the allocated index matrix and distance matrix must be continuous");
            }
            GpuInvoke.gpuBruteForceMatcherKnnMatchSingle(_ptr, queryDescriptors, modelDescriptors, modelIdx, distance, k, mask, stream);
        }
Beispiel #25
0
 /// <summary>
 /// Query the information of the gpu device with the specific id.
 /// </summary>
 /// <param name="deviceId">The device id</param>
 public GpuDevice(int deviceId)
 {
     ID   = deviceId;
     Name = GpuInvoke.GetDeviceName(deviceId);
     GpuInvoke.GetComputeCapability(deviceId, ref CudaComputeCapability.Major, ref CudaComputeCapability.Minor);
     NumberOfSMs            = GpuInvoke.GetNumberOfSMs(deviceId);
     HasNativeDoubleSupport = GpuInvoke.HasNativeDoubleSupport(deviceId);
     HasAtomicSupport       = GpuInvoke.HasAtomicsSupport(deviceId);
 }
Beispiel #26
0
 /// <summary>
 /// Create a GPU SURF detector
 /// </summary>
 /// <param name="hessianThreshold">The interest operator threshold. Use 100 for default</param>
 /// <param name="nOctaves">The number of octaves to process. Use 4 for default</param>
 /// <param name="nIntervals">The number of intervals in each octave. Use 4 for default</param>
 /// <param name="extended">True, if generate 128-len descriptors, false - 64-len descriptors. Use true for default.</param>
 /// <param name="featuresRatio">Max features = featuresRatio * img.size().srea(). Use 0.01 for default</param>
 /// <param name="upright">Use false for default. If set to true, the orientation is not computed for the keypoints</param>
 public GpuSURFDetector(
     float hessianThreshold,
     int nOctaves,
     int nIntervals,
     bool extended,
     float featuresRatio,
     bool upright)
 {
     _ptr = GpuInvoke.gpuSURFDetectorCreate(hessianThreshold, nOctaves, nIntervals, extended, featuresRatio, upright);
 }
 /// <summary>
 ///
 /// </summary>
 /// <param name="numLevels"></param>
 /// <param name="pyrScale"></param>
 /// <param name="fastPyramids"></param>
 /// <param name="winSize"></param>
 /// <param name="numIters"></param>
 /// <param name="polyN"></param>
 /// <param name="polySigma"></param>
 /// <param name="flags"></param>
 public GpuFarnebackOpticalFlow(
     int numLevels,
     double pyrScale,
     bool fastPyramids,
     int winSize,
     int numIters,
     int polyN,
     double polySigma,
     int flags)
 {
     _ptr = GpuInvoke.gpuFarnebackOpticalFlowCreate(numLevels, pyrScale, fastPyramids, winSize, numIters, polyN, polySigma, flags);
 }
Beispiel #28
0
        /// <summary>
        /// Create a GpuMat of the specified size
        /// </summary>
        /// <param name="rows">The number of rows (height)</param>
        /// <param name="cols">The number of columns (width)</param>
        /// <param name="channels">The number of channels</param>
        /// <param name="continuous">Indicates if the data should be continuous</param>
        public GpuMat(int rows, int cols, int channels, bool continuous)
        {
            int matType = CvInvoke.CV_MAKETYPE((int)CvToolbox.GetMatrixDepth(typeof(TDepth)), channels);

            if (continuous)
            {
                _ptr = GpuInvoke.GpuMatCreateContinuous(rows, cols, matType);
            }
            else
            {
                _ptr = GpuInvoke.GpuMatCreate(rows, cols, matType);
            }
        }
Beispiel #29
0
 private Rectangle[] DetectMultiScale(
     IntPtr image,
     double hitThreshold,
     Size winStride,
     Size padding,
     double scale,
     int groupThreshold)
 {
     using (MemStorage storage = new MemStorage())
     {
         Seq <Rectangle> rectSeq = new Seq <Rectangle>(storage);
         GpuInvoke.gpuHOGDescriptorDetectMultiScale(_ptr, image, rectSeq, hitThreshold, winStride, padding, scale, groupThreshold);
         return(rectSeq.ToArray());
     }
 }
 /// <summary>
 /// Finds rectangular regions in the given image that are likely to contain objects the cascade has been trained for and returns those regions as a sequence of rectangles.
 /// </summary>
 /// <param name="image">The image where search will take place</param>
 /// <param name="scaleFactor">The factor by which the search window is scaled between the subsequent scans, for example, 1.1 means increasing window by 10%. Use 1.2 for default.</param>
 /// <param name="minNeighbors">Minimum number (minus 1) of neighbor rectangles that makes up an object. All the groups of a smaller number of rectangles than min_neighbors-1 are rejected. If min_neighbors is 0, the function does not any grouping at all and returns all the detected candidate rectangles, which may be useful if the user wants to apply a customized grouping procedure. Use 4 for default.</param>
 /// <param name="minSize">Minimum window size. By default, it is set to the size of samples the classifier has been trained on (~20x20 for face detection). Use Size.Empty for default</param>
 /// <returns>An array of regions for the detected objects</returns>
 public Rectangle[] DetectMultiScale <TColor>(GpuImage <TColor, Byte> image, double scaleFactor, int minNeighbors, Size minSize) where TColor : struct, IColor
 {
     try
     {
         Seq <Rectangle> regions = new Seq <Rectangle>(_stor);
         int             count   = GpuInvoke.gpuCascadeClassifierDetectMultiScale(_ptr, image, _buffer, scaleFactor, minNeighbors, minSize, regions);
         if (count == 0)
         {
             return(new Rectangle[0]);
         }
         Rectangle[] result = regions.ToArray();
         return(result);
     }
     finally
     {
         _stor.Clear();
     }
 }