예제 #1
0
        /// <summary>
        /// Recognize text using the tesseract-ocr API.
        /// Takes image on input and returns recognized text in the output_text parameter.
        /// Optionally provides also the Rects for individual text elements found(e.g.words),
        /// and the list of those text elements with their confidence values.
        /// </summary>
        /// <param name="image">Input image CV_8UC1 or CV_8UC3</param>
        /// <param name="outputText">Output text of the tesseract-ocr.</param>
        /// <param name="componentRects">If provided the method will output a list of Rects for the individual
        /// text elements found(e.g.words or text lines).</param>
        /// <param name="componentTexts">If provided the method will output a list of text strings for the
        /// recognition of individual text elements found(e.g.words or text lines).</param>
        /// <param name="componentConfidences">If provided the method will output a list of confidence values
        /// for the recognition of individual text elements found(e.g.words or text lines).</param>
        /// <param name="componentLevel">OCR_LEVEL_WORD (by default), or OCR_LEVEL_TEXT_LINE.</param>
        public override void Run(
            Mat image,
            out string outputText,
            out Rect[] componentRects,
            out string?[] componentTexts,
            out float[] componentConfidences,
            ComponentLevels componentLevel = ComponentLevels.Word)
        {
            if (image == null)
            {
                throw new ArgumentNullException(nameof(image));
            }
            image.ThrowIfDisposed();

            using var outputTextString           = new StdString();
            using var componentRectsVector       = new VectorOfRect();
            using var componentTextsVector       = new VectorOfString();
            using var componentConfidencesVector = new VectorOfFloat();
            NativeMethods.HandleException(
                NativeMethods.text_OCRTesseract_run1(
                    ptr,
                    image.CvPtr,
                    outputTextString.CvPtr,
                    componentRectsVector.CvPtr,
                    componentTextsVector.CvPtr,
                    componentConfidencesVector.CvPtr,
                    (int)componentLevel));
            outputText           = outputTextString.ToString();
            componentRects       = componentRectsVector.ToArray();
            componentTexts       = componentTextsVector.ToArray();
            componentConfidences = componentConfidencesVector.ToArray();

            GC.KeepAlive(this);
            GC.KeepAlive(image);
        }
        private static double ComputeAngleBetweenCameraNormAndPlaneNorm(VectorOfPoint3D32F trackedFeatures3D, Matrix <double> normal, VectorOfFloat raux, VectorOfFloat taux)
        {
            var tvec = taux.ToArray().Select(i => (double)i).ToArray();

            var rotationMat = new Mat();

            CvInvoke.Rodrigues(raux, rotationMat);
            var rotationMatrix = new Matrix <double>(rotationMat.Rows, rotationMat.Cols, rotationMat.DataPointer);

            // ???
            Utils.Negotiate(ref rotationMatrix);

            var cameraPosition      = rotationMatrix * new Matrix <double>(tvec);
            var cameraPositionPoint = new MCvPoint3D32f((float)cameraPosition[0, 0], (float)cameraPosition[1, 0], (float)cameraPosition[2, 0]);

            var cameraVector = trackedFeatures3D[0] - cameraPositionPoint;

            Func <double, double> radianToDegree = angle => angle * (180.0 / Math.PI);

            double dotProduct = new double[] { cameraVector.X, cameraVector.Y, cameraVector.Z }.Dot(new[] { normal[0, 0], normal[0, 1], normal[0, 2] });
            double acos       = Math.Acos(dotProduct);
            double anglResult = radianToDegree(acos);

            Console.WriteLine($"Normal: [{normal.Data[0, 0]}, {normal.Data[0, 1]}, {normal.Data[0, 2]}]");
            Console.WriteLine($"Angle: {anglResult}");
            Console.WriteLine($"Dot product: {dotProduct}");

            return(anglResult);
        }
예제 #3
0
        /// <summary>
        /// Detect scene text from the given image
        /// </summary>
        /// <param name="image">The image</param>
        /// <returns>The detected scene text.</returns>
        public DetectedObject[] Detect(IInputArray image)
        {
            using (VectorOfVectorOfPoint vvp = new VectorOfVectorOfPoint())
                using (VectorOfFloat confidents = new VectorOfFloat())
                {
                    _textDetector.Detect(image, vvp, confidents);

                    Point[][]             detectionResults = vvp.ToArrayOfArray();
                    float[]               confidentResult  = confidents.ToArray();
                    List <DetectedObject> results          = new List <DetectedObject>();
                    for (int i = 0; i < detectionResults.Length; i++)
                    {
                        DetectedObject st             = new DetectedObject();
                        PointF[]       detectedPointF =
                            Array.ConvertAll(detectionResults[i], p => new PointF((float)p.X, (float)p.Y));
                        st.Region    = CvInvoke.BoundingRectangle(detectionResults[i]);
                        st.Confident = confidentResult[i];

                        using (Mat textSubMat = new Mat())
                        {
                            FourPointsTransform(image, detectedPointF, new Size(100, 32), textSubMat);
                            String text = _ocr.Recognize(textSubMat);
                            st.Label = text;
                        }

                        results.Add(st);
                    }

                    return(results.ToArray());
                }
        }
예제 #4
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="img"></param>
        /// <param name="templ"></param>
        /// <param name="results"></param>
        /// <param name="cost"></param>
        /// <param name="templScale"></param>
        /// <param name="maxMatches"></param>
        /// <param name="minMatchDistance"></param>
        /// <param name="padX"></param>
        /// <param name="padY"></param>
        /// <param name="scales"></param>
        /// <param name="minScale"></param>
        /// <param name="maxScale"></param>
        /// <param name="orientationWeight"></param>
        /// <param name="truncate"></param>
        /// <returns></returns>
        public static int ChamferMatching(
            Mat img, Mat templ,
                                  out Point[][] results, out float[] cost,
                                  double templScale=1, int maxMatches = 20,
                                  double minMatchDistance = 1.0, int padX = 3,
                                  int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6,
                                  double orientationWeight = 0.5, double truncate = 20)
        {
            if (img == null)
                throw new ArgumentNullException("img");
            if (templ == null)
                throw new ArgumentNullException("templ");
            img.ThrowIfDisposed();
            templ.ThrowIfDisposed();
            
            using (var resultsVec = new VectorOfVectorPoint())
            using (var costVec = new VectorOfFloat())
            {
                int ret = NativeMethods.contrib_chamerMatching(
                    img.CvPtr, templ.CvPtr, resultsVec.CvPtr, costVec.CvPtr, 
                    templScale, maxMatches, minMatchDistance,
                    padX, padY, scales, minScale, maxScale, orientationWeight, truncate);
                GC.KeepAlive(img);
                GC.KeepAlive(templ);

                results = resultsVec.ToArray();
                cost = costVec.ToArray();

                return ret;
            }
        }
예제 #5
0
파일: Yolo.cs 프로젝트: jetyen/emgucv
        /// <summary>
        /// Detect objects using Yolo model
        /// </summary>
        /// <param name="image">The input image</param>
        /// <param name="confThreshold">The confident threshold. Only detection with confident larger than this will be returned.</param>
        /// <param name="nmsThreshold">If positive, will perform non-maximum suppression using the threshold value. If less than or equals to 0, will not perform Non-maximum suppression.</param>
        /// <returns>The detected objects</returns>
        public DetectedObject[] Detect(Mat image, double confThreshold = 0.5, double nmsThreshold = 0.5)
        {
            if (_yoloDetectionModel == null)
            {
                throw new Exception("Please initialize the model first");
            }

            using (VectorOfInt classIds = new VectorOfInt())
                using (VectorOfFloat confidents = new VectorOfFloat())
                    using (VectorOfRect regions = new VectorOfRect())
                    {
                        _yoloDetectionModel.Detect(image, classIds, confidents, regions, (float)confThreshold, (float)nmsThreshold);
                        var classIdArr   = classIds.ToArray();
                        var confidentArr = confidents.ToArray();
                        var regionArr    = regions.ToArray();
                        List <DetectedObject> nmsResults = new List <DetectedObject>();
                        for (int i = 0; i < classIdArr.Length; i++)
                        {
                            DetectedObject o = new DetectedObject();
                            o.ClassId   = classIdArr[i];
                            o.Confident = confidentArr[i];
                            o.Region    = regionArr[i];
                            o.Label     = _labels[o.ClassId];
                            nmsResults.Add(o);
                        }
                        return(nmsResults.ToArray());
                    }
        }
        /// <summary>
        /// Compute the descriptor given the image and the point location
        /// </summary>
        /// <param name="image">The image where the descriptor will be computed from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
        /// <returns>The image features founded on the keypoint location</returns>
        public ImageFeature[] ComputeDescriptors(Image <Gray, Byte> image, Image <Gray, byte> mask, MKeyPoint[] keyPoints)
        {
            if (keyPoints.Length == 0)
            {
                return(new ImageFeature[0]);
            }
            using (VectorOfFloat descVec = new VectorOfFloat())
                using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
                {
                    kpts.Push(keyPoints);
                    CvSIFTDetectorComputeDescriptors(_ptr, image, mask, kpts, descVec);

                    int     n     = keyPoints.Length;
                    float[] descs = descVec.ToArray();
                    //long address = descVec.StartAddress.ToInt64();

                    ImageFeature[] features         = new ImageFeature[n];
                    int            sizeOfdescriptor = DescriptorSize;
                    for (int i = 0; i < n; i++)
                    {
                        features[i].KeyPoint = keyPoints[i];
                        float[] d = new float[sizeOfdescriptor];
                        Array.Copy(descs, i * sizeOfdescriptor, d, 0, sizeOfdescriptor);
                        features[i].Descriptor = d;
                    }
                    return(features);
                }
        }
예제 #7
0
 /// <summary>
 /// Returns coefficients of the classifier trained for people detection (for size 64x128).
 /// </summary>
 /// <returns>The people detector of 64x128 resolution.</returns>
 public static float[] GetPeopleDetector64x128()
 {
     using (VectorOfFloat f = new VectorOfFloat())
     {
         GpuInvoke.gpuHOGDescriptorGetPeopleDetector64x128(f);
         return(f.ToArray());
     }
 }
예제 #8
0
 /// <summary>
 /// Return the list of the rectangles' objectness value.
 /// </summary>
 /// <returns>The list of the rectangles' objectness value.</returns>
 public float[] GetObjectnessValues()
 {
     using (VectorOfFloat vector = new VectorOfFloat())
     {
         SaliencyInvoke.cveObjectnessBINGGetObjectnessValues(_ptr, vector);
         return(vector.ToArray());
     }
 }
 /// <summary>
 /// Returns coefficients of the classifier trained for people detection (for size 64x128). Only compatible with HOG detector with the same windows size.
 /// </summary>
 /// <returns>The people detector of 48x96 resolution</returns>
 public static float[] GetPeopleDetector48x96()
 {
     using (VectorOfFloat f = new VectorOfFloat())
     {
         OclInvoke.oclHOGDescriptorGetPeopleDetector48x96(f);
         return(f.ToArray());
     }
 }
예제 #10
0
 /// <summary>
 ///
 /// </summary>
 /// <param name="image"></param>
 /// <param name="winStride"></param>
 /// <param name="locations"></param>
 /// <returns></returns>
 public float[] Compute(Mat image, Size winStride, Point[] locations)
 {
     using (VectorOfFloat vof = new VectorOfFloat())
         using (VectorOfPoint vp = new VectorOfPoint(locations))
         {
             CvSelfSimDescriptorCompute(_ptr, image, vof, ref winStride, vp);
             return(vof.ToArray());
         }
 }
예제 #11
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="image"></param>
 /// <param name="winStride"></param>
 /// <param name="locations"></param>
 /// <returns></returns>
 public float[] Compute(Image<Gray, Byte> image, Size winStride, Point[] locations)
 {
    using (VectorOfFloat vof = new VectorOfFloat())
    {
       GCHandle handle = GCHandle.Alloc(locations, GCHandleType.Pinned);
       CvSelfSimDescriptorCompute(_ptr, image, vof, ref winStride, handle.AddrOfPinnedObject(), locations.Length);
       handle.Free();
       return vof.ToArray();
    }
 }
예제 #12
0
 /// <summary>
 ///
 /// </summary>
 /// <param name="image"></param>
 /// <param name="winStride"></param>
 /// <param name="locations"></param>
 /// <returns></returns>
 public float[] Compute(Image <Gray, Byte> image, Size winStride, Point[] locations)
 {
     using (VectorOfFloat vof = new VectorOfFloat())
     {
         GCHandle handle = GCHandle.Alloc(locations, GCHandleType.Pinned);
         CvInvoke.CvSelfSimDescriptorCompute(_ptr, image, vof, ref winStride, handle.AddrOfPinnedObject(), locations.Length);
         handle.Free();
         return(vof.ToArray());
     }
 }
예제 #13
0
 /// <summary>
 /// Find bounding boxes of text words given an input image.
 /// </summary>
 /// <param name="inputImage">An image expected to be a CV_U8C3 of any size</param>
 /// <returns>The text regions found.</returns>
 public TextRegion[] Detect(IInputArray inputImage)
 {
     using (InputArray iaImage = inputImage.GetInputArray())
         using (VectorOfRect vr = new VectorOfRect())
             using (VectorOfFloat vf = new VectorOfFloat())
             {
                 TextInvoke.cveTextDetectorCNNDetect(_ptr, iaImage, vr, vf);
                 Rectangle[]  bboxes     = vr.ToArray();
                 float[]      confidents = vf.ToArray();
                 TextRegion[] regions    = new TextRegion[bboxes.Length];
                 for (int i = 0; i < regions.Length; i++)
                 {
                     TextRegion tr = new TextRegion();
                     tr.BBox      = bboxes[i];
                     tr.Confident = confidents[i];
                     regions[i]   = tr;
                 }
                 return(regions);
             }
 }
        /// <summary>
        /// Detect image features from the given image
        /// </summary>
        /// <param name="image">The image to detect features from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <returns>The Image features detected from the given image</returns>
        public ImageFeature[] DetectFeatures(Image <Gray, Byte> image, Image <Gray, byte> mask)
        {
            using (VectorOfKeyPoint pts = new VectorOfKeyPoint())
                using (VectorOfFloat descVec = new VectorOfFloat())
                {
                    CvSIFTDetectorDetectFeature(_ptr, image, mask, pts, descVec);
                    MKeyPoint[] kpts             = pts.ToArray();
                    float[]     desc             = descVec.ToArray();
                    int         n                = kpts.Length;
                    int         sizeOfdescriptor = DescriptorSize;

                    ImageFeature[] features = new ImageFeature[n];
                    for (int i = 0; i < n; i++)
                    {
                        features[i].KeyPoint = kpts[i];
                        float[] d = new float[sizeOfdescriptor];
                        Array.Copy(desc, i * sizeOfdescriptor, d, 0, sizeOfdescriptor);
                        features[i].Descriptor = d;
                    }
                    return(features);
                }
        }
예제 #15
0
        /// <summary>
        /// Given the input frame, create input blob, run net and return result detections.
        /// </summary>
        /// <param name="model">The Dnn DetectionModel</param>
        /// <param name="frame">The input image.</param>
        /// <param name="confThreshold">A threshold used to filter boxes by confidences.</param>
        /// <param name="nmsThreshold">A threshold used in non maximum suppression. The default value 0 means we will not perform non-maximum supression.</param>
        /// <param name="labels">Optional labels mapping, if provided, it will use classId as lookup index to get the Label. If null, the Label field of the DetectedObject will be null.</param>
        /// <returns>The array of detected objects</returns>
        public static DetectedObject[] Detect(
            this Emgu.CV.Dnn.DetectionModel model,
            IInputArray frame,
            float confThreshold = 0.5f,
            float nmsThreshold  = 0.5f,
            String[] labels     = null)
        {
            using (VectorOfInt classIds = new VectorOfInt())
                using (VectorOfFloat confidents = new VectorOfFloat())
                    using (VectorOfRect regions = new VectorOfRect())
                    {
                        model.Detect(
                            frame,
                            classIds,
                            confidents,
                            regions,
                            (float)confThreshold,
                            (float)nmsThreshold);
                        var classIdArr   = classIds.ToArray();
                        var confidentArr = confidents.ToArray();
                        var regionArr    = regions.ToArray();
                        List <DetectedObject> results = new List <DetectedObject>();
                        for (int i = 0; i < classIdArr.Length; i++)
                        {
                            DetectedObject o = new DetectedObject();
                            o.ClassId   = classIdArr[i];
                            o.Confident = confidentArr[i];
                            o.Region    = regionArr[i];
                            if (labels != null)
                            {
                                o.Label = labels[o.ClassId];
                            }
                            results.Add(o);
                        }

                        return(results.ToArray());
                    }
        }
예제 #16
0
        /// <summary>
        /// Apply cascade to an input frame and return the vector of Detection objects.
        /// </summary>
        /// <param name="image">A frame on which detector will be applied.</param>
        /// <param name="rois">A vector of regions of interest. Only the objects that fall into one of the regions will be returned.</param>
        /// <returns>An output array of Detections.</returns>
        public Detection[] Detect(IInputArray image, Rectangle[] rois = null)
        {
            using (VectorOfRect roiRects = new VectorOfRect())
                using (VectorOfRect regions = new VectorOfRect())
                    using (VectorOfFloat confidents = new VectorOfFloat())
                    {
                        IntPtr roisPtr;
                        if (rois == null || rois.Length == 0)
                        {
                            roisPtr = IntPtr.Zero;
                        }
                        else
                        {
                            roiRects.Push(rois);
                            roisPtr = roiRects.Ptr;
                        }
                        using (InputArray iaImage = image.GetInputArray())
                            SoftCascadeInvoke.cveSoftCascadeDetectorDetect(_ptr, iaImage, roisPtr, regions, confidents);

                        if (regions.Size == 0)
                        {
                            return(new Detection[0]);
                        }
                        else
                        {
                            Rectangle[] regionArr    = regions.ToArray();
                            float[]     confidentArr = confidents.ToArray();
                            Detection[] results      = new Detection[regionArr.Length];
                            for (int i = 0; i < results.Length; i++)
                            {
                                results[i] = new Detection(regionArr[i], confidentArr[i]);
                            }
                            return(results);
                        }
                    }
        }
예제 #17
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="image">The image</param>
 /// <param name="winStride">Window stride. Must be a multiple of block stride. Use Size.Empty for default</param>
 /// <param name="padding">Padding. Use Size.Empty for default</param>
 /// <param name="locations">Locations for the computation. Can be null if not needed</param>
 /// <returns>The descriptor vector</returns>
 public float[] Compute(IInputArray image, Size winStride = new Size(), Size padding = new Size(),
    Point[] locations = null)
 {
    using (VectorOfFloat desc = new VectorOfFloat())
    using (InputArray iaImage = image.GetInputArray())
    {
       if (locations == null)
       {
          CvInvoke.cveHOGDescriptorCompute(_ptr, iaImage, desc, ref winStride, ref padding, IntPtr.Zero);
       }
       else
       {
          using (VectorOfPoint vp = new VectorOfPoint(locations))
          {
             CvInvoke.cveHOGDescriptorCompute(_ptr, iaImage, desc, ref winStride, ref padding, vp);
          }
       }
       return desc.ToArray();
    }
 }
예제 #18
0
      /// <summary>
      /// Fits line to 2D or 3D point set 
      /// </summary>
      /// <param name="points">Input vector of 2D points.</param>
      /// <param name="distType">The distance used for fitting </param>
      /// <param name="param">Numerical parameter (C) for some types of distances, if 0 then some optimal value is chosen</param>
      /// <param name="reps">Sufficient accuracy for radius (distance between the coordinate origin and the line),  0.01 would be a good default</param>
      /// <param name="aeps">Sufficient accuracy for angle, 0.01 would be a good default</param>
      /// <param name="direction">A normalized vector collinear to the line </param>
      /// <param name="pointOnLine">A point on the line.</param>
      public static void FitLine(
          PointF[] points,
          out PointF direction,
          out PointF pointOnLine,
          CvEnum.DistType distType,
          double param,
          double reps,
          double aeps)
      {
         using (VectorOfPointF pv = new VectorOfPointF(points))
         using (VectorOfFloat line = new VectorOfFloat())
         using (InputArray iaPv = pv.GetInputArray())
         using (OutputArray oaLine = line.GetOutputArray())
         {
            cveFitLine(iaPv, oaLine, distType, param, reps, aeps);
            float[] values = line.ToArray();
            direction = new PointF(values[0], values[1]);
            pointOnLine = new PointF(values[2], values[3]);

         }
      }
예제 #19
0
 /// <summary>
 /// Return the default people detector
 /// </summary>
 /// <returns>The default people detector</returns>
 public static float[] GetDefaultPeopleDetector()
 {
    using (Util.VectorOfFloat desc = new VectorOfFloat())
    {
       CvInvoke.cveHOGDescriptorPeopleDetectorCreate(desc);
       return desc.ToArray();
    }
 }
예제 #20
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="img"></param>
        /// <param name="winStride"></param>
        /// <param name="padding"></param>
        /// <param name="locations"></param>
        /// <returns></returns>
        public virtual float[] Compute(Mat img, Size? winStride = null, Size? padding = null, Point[] locations = null)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");

            Size winStride0 = winStride.GetValueOrDefault(new Size());
            Size padding0 = padding.GetValueOrDefault(new Size());
            using (var flVec = new VectorOfFloat())
            {
                int length = (locations != null) ? locations.Length : 0;
                NativeMethods.objdetect_HOGDescriptor_compute(ptr, img.CvPtr, flVec.CvPtr, winStride0, padding0, locations, length);
                return flVec.ToArray();
            }
        }
예제 #21
0
파일: SURF.cs 프로젝트: MJunak/opencvsharp
        /// <summary>
        /// keypoint を検出し,その SURF ディスクリプタを計算します.[useProvidedKeypoints = true]
        /// </summary>
        /// <param name="img"></param>
        /// <param name="mask"></param>
        /// <param name="keypoints"></param>
        /// <param name="descriptors"></param>
        /// <param name="useProvidedKeypoints"></param>
#else
        /// <summary>
        /// detects keypoints and computes the SURF descriptors for them. [useProvidedKeypoints = true]
        /// </summary>
        /// <param name="img"></param>
        /// <param name="mask"></param>
        /// <param name="keypoints"></param>
        /// <param name="descriptors"></param>
        /// <param name="useProvidedKeypoints"></param>
#endif
        public void Run(InputArray img, InputArray mask, out KeyPoint[] keypoints, out float[] descriptors,
            bool useProvidedKeypoints = false)
        {
            ThrowIfDisposed();
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();

            using (VectorOfKeyPoint keypointsVec = new VectorOfKeyPoint())
            using (VectorOfFloat descriptorsVec = new VectorOfFloat())
            {
                NativeMethods.nonfree_SURF_run2_vector(ptr, img.CvPtr, Cv2.ToPtr(mask), keypointsVec.CvPtr,
                    descriptorsVec.CvPtr, useProvidedKeypoints ? 1 : 0);

                keypoints = keypointsVec.ToArray();
                descriptors = descriptorsVec.ToArray();
            }
        }
예제 #22
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="image">The image</param>
 /// <param name="winStride">Window stride. Must be a multiple of block stride. Use Size.Empty for default</param>
 /// <param name="padding">Padding. Use Size.Empty for default</param>
 /// <param name="locations">Locations for the computation. Can be null if not needed</param>
 /// <returns>The descriptor vector</returns>
 public float[] Compute(Image<Bgr, Byte> image, Size winStride, Size padding, Point[] locations)
 {
     using (VectorOfFloat desc = new VectorOfFloat())
      {
     if (locations == null)
        CvInvoke.CvHOGDescriptorCompute(_ptr, image, desc, winStride, padding, IntPtr.Zero);
     else
     {
        using (MemStorage stor = new MemStorage())
        {
           Seq<Point> locationSeq = new Seq<Point>(stor);
           CvInvoke.CvHOGDescriptorCompute(_ptr, image, desc, winStride, padding, locationSeq);
        }
     }
     return desc.ToArray();
      }
 }
예제 #23
0
        /// <summary>
        /// computes sparse optical flow using multi-scale Lucas-Kanade algorithm
        /// </summary>
        /// <param name="prevImg"></param>
        /// <param name="nextImg"></param>
        /// <param name="prevPts"></param>
        /// <param name="nextPts"></param>
        /// <param name="status"></param>
        /// <param name="err"></param>
        /// <param name="winSize"></param>
        /// <param name="maxLevel"></param>
        /// <param name="criteria"></param>
        /// <param name="flags"></param>
        /// <param name="minEigThreshold"></param>
        public static void CalcOpticalFlowPyrLK(
            InputArray prevImg, InputArray nextImg,
            Point2f[] prevPts, ref Point2f[] nextPts,
            out byte[] status, out float[] err,
            Size? winSize = null,
            int maxLevel = 3,
            TermCriteria? criteria = null,
            OpticalFlowFlags flags = OpticalFlowFlags.None,
            double minEigThreshold = 1e-4)
        {
            if (prevImg == null)
                throw new ArgumentNullException("prevImg");
            if (nextImg == null)
                throw new ArgumentNullException("nextImg");
            if (prevPts == null)
                throw new ArgumentNullException("prevPts");
            if (nextPts == null)
                throw new ArgumentNullException("nextPts");
            prevImg.ThrowIfDisposed();
            nextImg.ThrowIfDisposed();

            Size winSize0 = winSize.GetValueOrDefault(new Size(21, 21));
            TermCriteria criteria0 = criteria.GetValueOrDefault(
                TermCriteria.Both(30, 0.01));

            using (var nextPtsVec = new VectorOfPoint2f())
            using (var statusVec = new VectorOfByte())
            using (var errVec = new VectorOfFloat())
            {
                NativeMethods.video_calcOpticalFlowPyrLK_vector(
                    prevImg.CvPtr, nextImg.CvPtr, prevPts, prevPts.Length,
                    nextPtsVec.CvPtr, statusVec.CvPtr, errVec.CvPtr, 
                    winSize0, maxLevel, criteria0, (int)flags, minEigThreshold);
                nextPts = nextPtsVec.ToArray();
                status = statusVec.ToArray();
                err = errVec.ToArray();
            }
        }
예제 #24
0
 public void Test_VectorOfFloat()
 {
    VectorOfFloat vf = new VectorOfFloat();
    float[] values = new float[20];
    for (int i = 0; i < values.Length; i++)
       values[i] = i;
    vf.Push(values);
    float[] valuesCopy = vf.ToArray();
    for (int i = 0; i < values.Length; i++)
       EmguAssert.AreEqual(values[i], valuesCopy[i]);
 }