/// <summary>
 /// Set the SVM detector
 /// </summary>
 /// <param name="detector">The SVM detector</param>
 public void SetSVMDetector(float[] detector)
 {
     using (VectorOfFloat vec = new VectorOfFloat())
     {
         vec.Push(detector);
         OclInvoke.oclHOGSetSVMDetector(_ptr, vec);
     }
 }
 /// <summary>
 /// Returns coefficients of the classifier trained for people detection (for size 64x128).
 /// </summary>
 /// <returns>The people detector of 64x128 resolution.</returns>
 public static float[] GetPeopleDetector64x128()
 {
     using (VectorOfFloat f = new VectorOfFloat())
     {
         OclInvoke.oclHOGDescriptorGetPeopleDetector64x128(f);
         return(f.ToArray());
     }
 }
        /*
         * /// <summary>
         * /// Add the model descriptors
         * /// </summary>
         * /// <param name="modelDescriptors">The model discriptors</param>
         * public void Add(Matrix<Byte> modelDescriptors)
         * {
         * if (!(_distanceType == DistanceType.HammingDist))
         *    throw new ArgumentException("Hamming distance type requires model descriptor to be Matrix<Byte>");
         * gpuBruteForceMatcherAdd(_ptr, modelDescriptors);
         * }
         *
         * /// <summary>
         * /// Add the model descriptors
         * /// </summary>
         * /// <param name="modelDescriptors">The model discriptors</param>
         * public void Add(Matrix<float> modelDescriptors)
         * {
         * if (!(_distanceType == DistanceType.L2 || _distanceType == DistanceType.L1))
         *    throw new ArgumentException("L1 / L2 distance type requires model descriptor to be Matrix<float>");
         * gpuBruteForceMatcherAdd(_ptr, modelDescriptors);
         * }*/

        /// <summary>
        /// Find the k nearest neighbour using the brute force matcher.
        /// </summary>
        /// <param name="queryDescriptors">The query descriptors</param>
        /// <param name="modelDescriptors">The model descriptors</param>
        /// <param name="modelIdx">The model index. A n x <paramref name="k"/> matrix where n = <paramref name="queryDescriptors"/>.Cols</param>
        /// <param name="distance">The matrix where the distance valus is stored. A n x <paramref name="k"/> matrix where n = <paramref name="queryDescriptors"/>.Size.Height</param>
        /// <param name="k">The number of nearest neighbours to be searched</param>
        /// <param name="mask">The mask</param>
        /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
        public void KnnMatchSingle(OclMat <T> queryDescriptors, OclMat <T> modelDescriptors, OclMat <int> modelIdx, OclMat <float> distance, int k, OclMat <Byte> mask)
        {
            /*
             * if (k == 2 && !(modelIdx.IsContinuous && distance.IsContinuous))
             * {
             * throw new ArgumentException("For k == 2, the allocated index matrix and distance matrix must be continuous");
             * }*/
            OclInvoke.oclBruteForceMatcherKnnMatchSingle(_ptr, queryDescriptors, modelDescriptors, modelIdx, distance, k, mask);
        }
 /// <summary>
 /// Create an OpenCL SURF detector
 /// </summary>
 /// <param name="hessianThreshold">The interest operator threshold. Use 100 for default</param>
 /// <param name="nOctaves">The number of octaves to process. Use 4 for default</param>
 /// <param name="nIntervals">The number of intervals in each octave. Use 4 for default</param>
 /// <param name="extended">True, if generate 128-len descriptors, false - 64-len descriptors. Use true for default.</param>
 /// <param name="featuresRatio">Max features = featuresRatio * img.size().srea(). Use 0.01 for default</param>
 /// <param name="upright">Use false for default. If set to true, the orientation is not computed for the keypoints</param>
 public OclSURFDetector(
     float hessianThreshold,
     int nOctaves,
     int nIntervals,
     bool extended,
     float featuresRatio,
     bool upright)
 {
     _ptr = OclInvoke.oclSURFDetectorCreate(hessianThreshold, nOctaves, nIntervals, extended, featuresRatio, upright);
 }
 private Rectangle[] DetectMultiScale(
     IntPtr image,
     double hitThreshold,
     Size winStride,
     Size padding,
     double scale,
     int groupThreshold)
 {
     using (MemStorage storage = new MemStorage())
     {
         Seq <Rectangle> rectSeq = new Seq <Rectangle>(storage);
         OclInvoke.oclHOGDescriptorDetectMultiScale(_ptr, image, rectSeq, hitThreshold, winStride, padding, scale, groupThreshold);
         return(rectSeq.ToArray());
     }
 }
예제 #6
0
        /// <summary>
        /// Create a OpenCL cascade classifier using the specific file
        /// </summary>
        /// <param name="fileName">The file to create the classifier from</param>
        public OclCascadeClassifier(String fileName)
            : base()
        {
#if !NETFX_CORE
            FileInfo file = new FileInfo(fileName);
            if (!file.Exists)
            {
                throw new FileNotFoundException("File not found", file.FullName);
            }
#endif

            _ptr = OclInvoke.oclCascadeClassifierCreate(fileName);

            if (_ptr == IntPtr.Zero)
            {
                throw new NullReferenceException(String.Format("Fail to create OpenCL HaarCascade object: {0}", fileName));
            }
        }
 /// <summary>
 /// Create a new HOGDescriptor using the specific parameters
 /// </summary>
 /// <param name="blockSize">Block size in cells. Use (16, 16) for default.</param>
 /// <param name="cellSize">Cell size. Use (8, 8) for default.</param>
 /// <param name="blockStride">Block stride. Must be a multiple of cell size. Use (8,8) for default.</param>
 /// <param name="gammaCorrection">Do gamma correction preprocessing or not. Use true for default.</param>
 /// <param name="L2HysThreshold">L2-Hys normalization method shrinkage. Use 0.2 for default.</param>
 /// <param name="nbins">Number of bins. Use 9 bins per cell for deafault.</param>
 /// <param name="nLevels">Maximum number of detection window increases. Use 64 for default</param>
 /// <param name="winSigma">Gaussian smoothing window parameter. Use -1 for default.</param>
 /// <param name="winSize">Detection window size. Must be aligned to block size and block stride. Must match the size of the training image. Use (64, 128) for default.</param>
 public OclHOGDescriptor(
     Size winSize,
     Size blockSize,
     Size blockStride,
     Size cellSize,
     int nbins,
     double winSigma,
     double L2HysThreshold,
     bool gammaCorrection,
     int nLevels)
 {
     _ptr = OclInvoke.oclHOGDescriptorCreate(
         ref winSize,
         ref blockSize,
         ref blockStride,
         ref cellSize,
         nbins,
         winSigma,
         L2HysThreshold,
         gammaCorrection,
         nLevels);
 }
 /// <summary>
 /// Obtain the keypoints array from OclMat
 /// </summary>
 /// <param name="src">The keypoints obtained from DetectKeyPointsRaw</param>
 /// <param name="dst">The vector of keypoints</param>
 public void DownloadKeypoints(OclMat <float> src, VectorOfKeyPoint dst)
 {
     OclInvoke.oclSURFDownloadKeypoints(_ptr, src, dst);
 }
예제 #9
0
 /// <summary>
 /// Release the unmanaged memory associated with this OclMat
 /// </summary>
 protected override void DisposeObject()
 {
     OclInvoke.OclMatRelease(ref _ptr);
 }
예제 #10
0
 /// <summary>
 /// Create an empty OclMat
 /// </summary>
 public OclMat()
     : this(OclInvoke.OclMatCreateDefault())
 {
 }
 /// <summary>
 /// Computes disparity map for the input rectified stereo pair.
 /// </summary>
 /// <param name="left">The left single-channel, 8-bit image</param>
 /// <param name="right">The right image of the same size and the same type</param>
 /// <param name="disparity">The disparity map</param>
 public void FindStereoCorrespondence(OclImage <Gray, Byte> left, OclImage <Gray, Byte> right, OclImage <Gray, Byte> disparity)
 {
     OclInvoke.oclStereoConstantSpaceBPFindStereoCorrespondence(_ptr, left, right, disparity);
 }
예제 #12
0
 /// <summary>
 /// Release the standard vector
 /// </summary>
 protected override void DisposeObject()
 {
     OclInvoke.VectorOfOclInfoRelease(_ptr);
 }
예제 #13
0
 /// <summary>
 /// Create an standard vector of OclInfo of the specific size
 /// </summary>
 /// <param name="size">The size of the vector</param>
 public VectorOfOclInfo(int size)
 {
     _ptr = OclInvoke.VectorOfOclInfoCreateSize(size);
 }
예제 #14
0
 /// <summary>
 /// Release all the unmanaged memory associated with this optical flow solver.
 /// </summary>
 protected override void DisposeObject()
 {
     OclInvoke.oclOpticalFlowDualTVL1Release(ref _ptr);
 }
예제 #15
0
 /// <summary>
 /// Create the Dual TV L1 optical flow solver
 /// </summary>
 public OclOpticalFlowDual_TVL1()
 {
     _ptr = OclInvoke.oclOpticalFlowDualTVL1Create();
 }
예제 #16
0
        /// <summary>
        /// Convert the source image to the current image, if the size are different, the current image will be a resized version of the srcImage.
        /// </summary>
        /// <typeparam name="TSrcColor">The color type of the source image</typeparam>
        /// <typeparam name="TSrcDepth">The color depth of the source image</typeparam>
        /// <param name="srcImage">The sourceImage</param>
        public void ConvertFrom <TSrcColor, TSrcDepth>(OclImage <TSrcColor, TSrcDepth> srcImage)
            where TSrcColor : struct, IColor
            where TSrcDepth : new()
        {
            if (!Size.Equals(srcImage.Size))
            { //if the size of the source image do not match the size of the current image
                using (OclImage <TSrcColor, TSrcDepth> tmp = srcImage.Resize(Size, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR))
                {
                    ConvertFrom(tmp);
                    return;
                }
            }

            if (typeof(TColor) == typeof(TSrcColor))
            {
                #region same color
                if (typeof(TDepth) == typeof(TSrcDepth)) //same depth
                {
                    OclInvoke.Copy(srcImage.Ptr, Ptr, IntPtr.Zero);
                }
                else //different depth
                {
                    if (typeof(TDepth) == typeof(Byte) && typeof(TSrcDepth) != typeof(Byte))
                    {
                        double[] minVal, maxVal;
                        Point[]  minLoc, maxLoc;
                        srcImage.MinMax(out minVal, out maxVal, out minLoc, out maxLoc);
                        double min = minVal[0];
                        double max = maxVal[0];
                        for (int i = 1; i < minVal.Length; i++)
                        {
                            min = Math.Min(min, minVal[i]);
                            max = Math.Max(max, maxVal[i]);
                        }
                        double scale = 1.0, shift = 0.0;
                        if (max > 255.0 || min < 0)
                        {
                            scale = (max == min) ? 0.0 : 255.0 / (max - min);
                            shift = (scale == 0) ? min : -min * scale;
                        }

                        OclInvoke.ConvertTo(srcImage.Ptr, Ptr, scale, shift);
                    }
                    else
                    {
                        OclInvoke.ConvertTo(srcImage.Ptr, Ptr, 1.0, 0.0);
                    }
                }
                #endregion
            }
            else
            {
                #region different color
                if (typeof(TDepth) == typeof(TSrcDepth))
                { //same depth
                    ConvertColor(srcImage.Ptr, Ptr, typeof(TSrcColor), typeof(TColor), Size);
                }
                else
                {                                                                                     //different depth
                    using (OclImage <TSrcColor, TDepth> tmp = srcImage.Convert <TSrcColor, TDepth>()) //convert depth
                        ConvertColor(tmp.Ptr, Ptr, typeof(TSrcColor), typeof(TColor), Size);
                }
                #endregion
            }
        }
 /// <summary>
 /// Create a new HOGDescriptor
 /// </summary>
 public OclHOGDescriptor()
 {
     _ptr = OclInvoke.oclHOGDescriptorCreateDefault();
 }
 /// <summary>
 /// Release the unmanaged memory associated with this HOGDescriptor
 /// </summary>
 protected override void DisposeObject()
 {
     OclInvoke.oclHOGDescriptorRelease(ref _ptr);
 }
 /// <summary>
 /// Release all the unmanaged memory associated with this matcher
 /// </summary>
 protected override void DisposeObject()
 {
     OclInvoke.oclBruteForceMatcherRelease(ref _ptr);
 }
예제 #20
0
 /// <summary>
 /// Create a OclMatchTemplateBuf
 /// </summary>
 public OclMatchTemplateBuf()
 {
     _ptr = OclInvoke.oclMatchTemplateBufCreate();
 }
예제 #21
0
 /// <summary>
 /// Compute the dense optical flow.
 /// </summary>
 /// <param name="frame0">Source frame</param>
 /// <param name="frame1">Frame to track (with the same size as <paramref name="frame0"/>)</param>
 /// <param name="u">Flow horizontal component (along x axis)</param>
 /// <param name="v">Flow vertical component (along y axis)</param>
 public void Dense(OclImage <Gray, byte> frame0, OclImage <Gray, byte> frame1, OclImage <Gray, float> u, OclImage <Gray, float> v)
 {
     OclInvoke.oclOpticalFlowDualTVL1Compute(_ptr, frame0, frame1, u, v);
 }
예제 #22
0
 /// <summary>
 /// Release the buffer
 /// </summary>
 protected override void DisposeObject()
 {
     OclInvoke.oclMatchTemplateBufRelease(ref _ptr);
 }
예제 #23
0
 /// <summary>
 /// Create an empty standard vector of OclInfo
 /// </summary>
 public VectorOfOclInfo()
 {
     _ptr = OclInvoke.VectorOfOclInfoCreate();
 }
예제 #24
0
 /// <summary>
 /// Create a stereoBM
 /// </summary>
 /// <param name="preset">Preset type</param>
 /// <param name="numberOfDisparities">The number of disparities. Must be multiple of 8. Use 64 for default </param>
 /// <param name="winSize">The SAD window size. Use 19 for default</param>
 public OclStereoBM(PresetType preset, int numberOfDisparities, int winSize)
 {
     _ptr = OclInvoke.oclStereoBMCreate(preset, numberOfDisparities, winSize);
 }
예제 #25
0
 /// <summary>
 /// Clear the vector
 /// </summary>
 public void Clear()
 {
     OclInvoke.VectorOfOclInfoClear(_ptr);
 }
예제 #26
0
 /// <summary>
 /// Release the stereo state and all the memory associate with it
 /// </summary>
 protected override void DisposeObject()
 {
     OclInvoke.oclStereoBMRelease(ref _ptr);
 }
 /// <summary>
 /// A Constant-Space Belief Propagation Algorithm for Stereo Matching
 /// </summary>
 /// <param name="ndisp">The number of disparities. Use 128 as default</param>
 /// <param name="iters">The number of BP iterations on each level. Use 8 as default.</param>
 /// <param name="levels">The number of levels. Use 4 as default</param>
 /// <param name="nrPlane">The number of active disparity on the first level. Use 4 as default.</param>
 public OclStereoConstantSpaceBP(int ndisp, int iters, int levels, int nrPlane)
 {
     _ptr = OclInvoke.oclStereoConstantSpaceBPCreate(ndisp, iters, levels, nrPlane);
 }
 /// <summary>
 /// Obtain an OclMat from the keypoints array
 /// </summary>
 /// <param name="src">The keypoints array</param>
 /// <param name="dst">An OclMat that represent the keypoints</param>
 public void UploadKeypoints(VectorOfKeyPoint src, OclMat <float> dst)
 {
     OclInvoke.oclSURFUploadKeypoints(_ptr, src, dst);
 }
 /// <summary>
 /// Release the unmanaged memory
 /// </summary>
 protected override void DisposeObject()
 {
     OclInvoke.oclStereoConstantSpaceBPRelease(ref _ptr);
 }
 /// <summary>
 /// Release the unmanaged resource associate to the Detector
 /// </summary>
 protected override void DisposeObject()
 {
     OclInvoke.oclSURFDetectorRelease(ref _ptr);
 }