コード例 #1
0
        private void UpdatePosition(VectorOfDouble tValues, VectorOfDouble rValues)
        {
            if (this.InvokeRequired)
            {
                this.Invoke(new Action(() =>
                {
                    textBoxX.Text = tValues[0].ToString();
                    textBoxY.Text = tValues[1].ToString();
                    textBoxZ.Text = tValues[2].ToString();

                    textBoxU.Text = rValues[0].ToString();
                    textBoxV.Text = rValues[1].ToString();
                    textBoxW.Text = rValues[2].ToString();
                }
                                       ));
                return;
            }
            else
            {
                textBoxX.Text = tValues[0].ToString();
                textBoxY.Text = tValues[1].ToString();
                textBoxZ.Text = tValues[2].ToString();

                textBoxU.Text = rValues[0].ToString();
                textBoxV.Text = rValues[1].ToString();
                textBoxW.Text = rValues[2].ToString();
            }
        }
コード例 #2
0
 /// <summary>
 /// Performs object detection with increasing detection window.
 /// </summary>
 /// <param name="image">The image to search in</param>
 /// <param name="hitThreshold">
 /// Threshold for the distance between features and SVM classifying plane.
 /// Usually it is 0 and should be specified in the detector coefficients (as the last free coefficient).
 /// But if the free coefficient is omitted (which is allowed), you can specify it manually here.
 ///</param>
 /// <param name="winStride">Window stride. Must be a multiple of block stride.</param>
 /// <param name="padding"></param>
 /// <param name="scale">Coefficient of the detection window increase.</param>
 /// <param name="finalThreshold">After detection some objects could be covered by many rectangles. This coefficient regulates similarity threshold. 0 means don't perform grouping. Should be an integer if not using meanshift grouping. Use 2.0 for default</param>
 /// <param name="useMeanshiftGrouping">If true, it will use meanshift grouping.</param>
 /// <returns>The regions where positives are found</returns>
 public MCvObjectDetection[] DetectMultiScale(
     IInputArray image,
     double hitThreshold       = 0,
     Size winStride            = new Size(),
     Size padding              = new Size(),
     double scale              = 1.05,
     double finalThreshold     = 2.0,
     bool useMeanshiftGrouping = false)
 {
     using (Util.VectorOfRect vr = new VectorOfRect())
         using (Util.VectorOfDouble vd = new VectorOfDouble())
             using (InputArray iaImage = image.GetInputArray())
             {
                 CvHOGDescriptorDetectMultiScale(_ptr, iaImage, vr, vd, hitThreshold, ref winStride, ref padding, scale, finalThreshold, useMeanshiftGrouping);
                 Rectangle[]          location = vr.ToArray();
                 double[]             weight   = vd.ToArray();
                 MCvObjectDetection[] result   = new MCvObjectDetection[location.Length];
                 for (int i = 0; i < result.Length; i++)
                 {
                     MCvObjectDetection od = new MCvObjectDetection();
                     od.Rect   = location[i];
                     od.Score  = (float)weight[i];
                     result[i] = od;
                 }
                 return(result);
             }
 }
コード例 #3
0
 /// <summary>
 /// Performs object detection with a multi-scale window.
 /// </summary>
 /// <param name="image">Source image.</param>
 /// <param name="objects">Detected objects boundaries.</param>
 /// <param name="confident">Optional output array for confidences.</param>
 public void DetectMultiScale(IInputArray image, VectorOfRect objects, VectorOfDouble confident = null)
 {
     using (InputArray iaImage = image.GetInputArray())
     {
         CudaInvoke.cudaHOGDetectMultiScale(_ptr, iaImage, objects, confident);
     }
 }
コード例 #4
0
ファイル: MCvMoments.cs プロジェクト: swiggins/emgucv
 /// <summary>
 /// Get the HuMoments
 /// </summary>
 /// <returns>The Hu moment computed from this moment</returns>
 public double[] GetHuMoment()
 {
     using (VectorOfDouble vd = new VectorOfDouble())
     {
         CvInvoke.HuMoments(this, vd);
         return(vd.ToArray());
     }
 }
コード例 #5
0
 /// <summary>
 ///
 /// </summary>
 /// <returns></returns>
 public double[] GetMean()
 {
     ThrowIfDisposed();
     using var meanVec = new VectorOfDouble();
     NativeMethods.HandleException(
         NativeMethods.img_hash_BlockMeanHash_getMean(ptr, meanVec.CvPtr));
     GC.KeepAlive(this);
     return(meanVec.ToArray());
 }
コード例 #6
0
 bool allZeroes(VectorOfDouble check)
 {
     foreach (double d in check)
     {
         if (d > 1E-10)
         {
             return(false);
         }
     }
     return(true);
 }
コード例 #7
0
        public Rectangle[] FindBodyHOG(Mat image, out double[] confidence)
        {
            // If can't use Cuda then go for Without cuda implementation
            if (!Global.canRunCuda)
            {
                confidence = null;
                return(FindBodyHOG_WithoutGpu(image));
            }
            if (des == null)
            {
                InitalizeBodyTracker();
            }



            List <Rectangle> regions  = new List <Rectangle>();
            VectorOfRect     rects    = new VectorOfRect();
            VectorOfDouble   confColl = new VectorOfDouble();

            confidence = new double[0];
            try
            {
                des.SetSVMDetector(des.GetDefaultPeopleDetector());

                des.GroupThreshold = 1;
                des.HitThreshold   = 0;
                des.NumLevels      = 15;
                des.ScaleFactor    = 1.05;


                using (GpuMat cudaBgr = new GpuMat(image))
                    using (GpuMat cudaBgra = new GpuMat())
                    {
                        CudaInvoke.CvtColor(cudaBgr, cudaBgra, ColorConversion.Bgr2Bgra);
                        //des.DetectMultiScale(cudaBgra, rects, confColl);
                        des.DetectMultiScale(cudaBgra, rects, null);
                    }


                //confidence = confColl.ToArray();

                for (int i = 0; i < rects.ToArray().Length; i++)
                {
                    //if (confidence[i] > 0.5)
                    regions.Add(rects[i]);
                }
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
            }

            return(regions.ToArray());//rects.ToArray();
        }
コード例 #8
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="fx"></param>
        /// <param name="fy"></param>
        /// <param name="cx"></param>
        /// <param name="cy"></param>
        /// <param name="distorsionCoeff"></param>
        public CameraCalibrationInfo(double fx, double fy, double cx, double cy, double[] distorsionCoeff)
        {
            _intrinsic = new Matrix <double>(3, 3);
            _intrinsic.SetIdentity();
            _distortion = new VectorOfDouble(distorsionCoeff);

            Fx = fx;
            Fy = fy;
            Cx = cx;
            Cy = cy;
        }
コード例 #9
0
        /// <summary>
        /// Returns overall time for inference and timings (in ticks) for layers.
        /// Indexes in returned vector correspond to layers ids.Some layers can be fused with others,
        /// in this case zero ticks count will be return for that skipped layers.
        /// </summary>
        /// <param name="timings">vector for tick timings for all layers.</param>
        /// <returns>overall ticks for model inference.</returns>
        public long GetPerfProfile(out double[] timings)
        {
            ThrowIfDisposed();

            using var timingsVec = new VectorOfDouble();
            NativeMethods.HandleException(
                NativeMethods.dnn_Net_getPerfProfile(ptr, timingsVec.CvPtr, out var ret));
            GC.KeepAlive(this);

            timings = timingsVec.ToArray();
            return(ret);
        }
コード例 #10
0
ファイル: Net.cs プロジェクト: vnvizitiu/emgucv
 /// <summary>
 /// Returns overall time for inference and timings (in ticks) for layers. Indexes in returned vector correspond to layers ids. Some layers can be fused with others, in this case zero ticks count will be return for that skipped layers.
 /// </summary>
 /// <param name="timings">Vector for tick timings for all layers.</param>
 /// <returns>Overall ticks for model inference.</returns>
 public Int64 GetPerfProfile(VectorOfDouble timings = null)
 {
     if (timings != null)
     {
         return(DnnInvoke.cveDnnNetGetPerfProfile(_ptr, timings));
     }
     else
     {
         using (VectorOfDouble vd = new VectorOfDouble())
             return(DnnInvoke.cveDnnNetGetPerfProfile(_ptr, vd));
     }
 }
コード例 #11
0
        /// <summary>
        /// Add a plot of the 1D histogram.
        /// </summary>
        /// <param name="name">The name of the histogram</param>
        /// <param name="color">The drawing color</param>
        /// <param name="histogram">The 1D histogram to be drawn</param>
        /// <param name="binSize">The size of the bin</param>
        /// <param name="ranges">The ranges</param>
        /// <returns>The image of the histogram</returns>
        public Mat GenerateHistogram(String name, Color color, Mat histogram, int binSize, float[] ranges)
        {
            //Debug.Assert(histogram.Dimension == 1, Properties.StringTable.Only1DHistogramSupported);

            #region draw the histogram
            RangeF range = new RangeF(ranges[0], ranges[1]);

            float    step  = (range.Max - range.Min) / binSize;
            float    start = range.Min;
            double[] bin   = new double[binSize];
            for (int binIndex = 0; binIndex < binSize; binIndex++)
            {
                bin[binIndex] = start;
                start        += step;
            }

            double[] binVal = new double[histogram.Size.Height];
            GCHandle handle = GCHandle.Alloc(binVal, GCHandleType.Pinned);

            using (Matrix <double> m = new Matrix <double>(binVal.Length, 1, handle.AddrOfPinnedObject(),
                                                           sizeof(double)))
            {
                histogram.ConvertTo(m, DepthType.Cv64F);
            }
            handle.Free();

            using (VectorOfDouble x = new VectorOfDouble(bin))
                using (VectorOfDouble y = new VectorOfDouble(binVal))
                {
                    using (Emgu.CV.Plot.Plot2d plot = new Plot2d(x, y))
                    {
                        plot.SetShowText(false);
                        plot.SetPlotBackgroundColor(new MCvScalar(255, 255, 255));
                        plot.SetPlotLineColor(new MCvScalar(0, 0, 0));
                        plot.SetPlotGridColor(new MCvScalar(220, 220, 220));
                        plot.SetGridLinesNumber(255);
                        plot.SetPlotSize(512, 200);
                        plot.SetMinX(0);
                        plot.SetMaxX(256);
                        plot.SetMinY(-1);
                        plot.SetMaxY(binVal.Max());
                        plot.SetInvertOrientation(true);

                        Mat render = new Mat();
                        plot.Render(render);
                        CvInvoke.PutText(render, name, new Point(20, 30), FontFace.HersheyComplex, 0.8,
                                         new MCvScalar(0, 0, 255));
                        return(render);
                    }
                }
            #endregion
        }
コード例 #12
0
        public static double[] KukaSorted(VectorOfDouble jointAngleCollection)
        {
            double[] firstJointAngleCollectionSorted = new double[7];

            //VectorOfDouble firstJointAngleCollectionSorted = new VectorOfDouble(jointAngleCollection.Count);
            firstJointAngleCollectionSorted[0] = jointAngleCollection.ElementAt(0); //A1
            firstJointAngleCollectionSorted[1] = jointAngleCollection.ElementAt(1); //A2
            firstJointAngleCollectionSorted[2] = jointAngleCollection.ElementAt(3); //A3
            firstJointAngleCollectionSorted[3] = jointAngleCollection.ElementAt(4); //A4
            firstJointAngleCollectionSorted[4] = jointAngleCollection.ElementAt(5); //A5
            firstJointAngleCollectionSorted[5] = jointAngleCollection.ElementAt(6); //A6
            firstJointAngleCollectionSorted[6] = jointAngleCollection.ElementAt(2); //A7

            return(firstJointAngleCollectionSorted);
        }
コード例 #13
0
        private void UpdatePosition(VectorOfDouble tValues, VectorOfDouble rValues)
        {
            if (this.InvokeRequired)
            {
                this.Invoke((Action) delegate { UpdatePosition(tValues, rValues); });
                return;
            }

            textBoxX.Text = tValues[0].ToString();
            textBoxY.Text = tValues[1].ToString();
            textBoxZ.Text = tValues[2].ToString();

            textBoxU.Text = rValues[0].ToString();
            textBoxV.Text = rValues[1].ToString();
            textBoxW.Text = rValues[2].ToString();
        }
コード例 #14
0
 /// <summary>
 /// Performs object detection with increasing detection window.
 /// </summary>
 /// <param name="image">The CudaImage to search in</param>
 /// <returns>The regions where positives are found</returns>
 public MCvObjectDetection[] DetectMultiScale(IInputArray image)
 {
     using (Util.VectorOfRect vr = new VectorOfRect())
         using (Util.VectorOfDouble vd = new VectorOfDouble())
         {
             DetectMultiScale(image, vr, vd);
             Rectangle[]          location = vr.ToArray();
             double[]             weight   = vd.ToArray();
             MCvObjectDetection[] result   = new MCvObjectDetection[location.Length];
             for (int i = 0; i < result.Length; i++)
             {
                 MCvObjectDetection od = new MCvObjectDetection();
                 od.Rect   = location[i];
                 od.Score  = (float)weight[i];
                 result[i] = od;
             }
             return(result);
         }
 }
コード例 #15
0
        private void _cameraCapture_ImageGrabbed(object sender, EventArgs e)
        {
            Mat capturedImage = new Mat();

            _cameraCapture.Retrieve(capturedImage, 0);

            _webCamView.Image = capturedImage;

            Mat filteredImage = new Mat();

            IInputArray lowerArray =
                new VectorOfDouble(new double[] { _targetHsv.Hue.LowerLimit(_hsvAccuracy.Hue),
                                                  _targetHsv.Satuation.LowerLimit(_hsvAccuracy.Satuation) / 100.0,
                                                  _targetHsv.Value.LowerLimit(_hsvAccuracy.Value) / 100.0 });
            IInputArray upperArray =
                new VectorOfDouble(new double[] { _targetHsv.Hue.UpperLimit(_hsvAccuracy.Hue),
                                                  _targetHsv.Satuation.UpperLimit(_hsvAccuracy.Satuation) / 100.0,
                                                  _targetHsv.Value.UpperLimit(_hsvAccuracy.Value) / 100.0 });

            CvInvoke.InRange(capturedImage, lowerArray, upperArray, filteredImage);
            filteredImageBox.Image = filteredImage;
        }
コード例 #16
0
 /// <summary>
 /// Groups the object candidate rectangles.
 /// </summary>
 /// <param name="rectList">Input/output vector of rectangles. Output vector includes retained and grouped rectangles.</param>
 /// <param name="foundWeights">found weights</param>
 /// <param name="foundScales">found scales</param>
 /// <param name="detectThreshold">detect threshold, use 0 for default</param>
 /// <param name="winDetSize">win det size, use (64, 128) for default</param>
 public static void GroupRectanglesMeanshift(VectorOfRect rectList, VectorOfDouble foundWeights, VectorOfDouble foundScales, double detectThreshold, Size winDetSize)
 {
     cveGroupRectanglesMeanshift(rectList, foundWeights, foundScales, detectThreshold, ref winDetSize);
 }
コード例 #17
0
 /// <summary>
 /// Groups the object candidate rectangles.
 /// </summary>
 /// <param name="rectList">Input/output vector of rectangles. Output vector includes retained and grouped rectangles.</param>
 /// <param name="groupThreshold">Minimum possible number of rectangles minus 1. The threshold is used in a group of rectangles to retain it.</param>
 /// <param name="eps">Relative difference between sides of the rectangles to merge them into a group.</param>
 /// <param name="weights">weights</param>
 /// <param name="levelWeights">level weights</param>
 public static void GroupRectangles(VectorOfRect rectList, int groupThreshold, double eps, VectorOfInt weights, VectorOfDouble levelWeights)
 {
     cveGroupRectangles3(rectList, groupThreshold, eps, weights, levelWeights);
 }
コード例 #18
0
 /// <summary>
 /// Groups the object candidate rectangles.
 /// </summary>
 /// <param name="rectList">Input/output vector of rectangles. Output vector includes retained and grouped rectangles.</param>
 /// <param name="rejectLevels">reject levels</param>
 /// <param name="levelWeights">level weights</param>
 /// <param name="groupThreshold">Minimum possible number of rectangles minus 1. The threshold is used in a group of rectangles to retain it.</param>
 /// <param name="eps">Relative difference between sides of the rectangles to merge them into a group.</param>
 public static void GroupRectangles(VectorOfRect rectList, VectorOfInt rejectLevels, VectorOfDouble levelWeights, int groupThreshold, double eps = 0.2)
 {
     cveGroupRectangles4(rectList, rejectLevels, levelWeights, groupThreshold, eps);
 }
コード例 #19
0
        /// <summary>
        /// Finds lines in the input image.
        /// This is the output of the default parameters of the algorithm on the above shown image.
        /// </summary>
        /// <param name="image">A grayscale (CV_8UC1) input image. </param>
        /// <param name="lines">A vector of Vec4i or Vec4f elements specifying the beginning and ending point of a line. 
        /// Where Vec4i/Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly oriented depending on the gradient.</param>
        /// <param name="width">Vector of widths of the regions, where the lines are found. E.g. Width of line.</param>
        /// <param name="prec">Vector of precisions with which the lines are found.</param>
        /// <param name="nfa">Vector containing number of false alarms in the line region, 
        /// with precision of 10%. The bigger the value, logarithmically better the detection.</param>
        public virtual void Detect(InputArray image, out Vec4f[] lines,
            out double[] width, out double[] prec, out double[] nfa)
        {
            if (image == null)
                throw new ArgumentNullException(nameof(image));
            image.ThrowIfDisposed();

            using (var linesVec = new VectorOfVec4f())
            using (var widthVec = new VectorOfDouble())
            using (var precVec = new VectorOfDouble())
            using (var nfaVec = new VectorOfDouble())
            {
                NativeMethods.imgproc_LineSegmentDetector_detect_vector(ptr, image.CvPtr,
                    linesVec.CvPtr, widthVec.CvPtr, precVec.CvPtr, nfaVec.CvPtr);

                lines = linesVec.ToArray();
                width = widthVec.ToArray();
                prec = precVec.ToArray();
                nfa = nfaVec.ToArray();
            }

            GC.KeepAlive(image);
        }
コード例 #20
0
      /*
      /// <summary>
      /// Performs object detection with increasing detection window.
      /// </summary>
      /// <param name="image">The CudaImage to search in</param>
      /// <returns>The regions where positives are found</returns>
      public MCvObjectDetection[] DetectMultiScale(IInputArray image)
      {
         using (Util.VectorOfRect vr = new VectorOfRect())
         using (Util.VectorOfDouble vd = new VectorOfDouble())
         {
            DetectMultiScale(image, vr, vd);
            Rectangle[] location = vr.ToArray();
            double[] weight = vd.ToArray();
            MCvObjectDetection[] result = new MCvObjectDetection[location.Length];
            for (int i = 0; i < result.Length; i++)
            {
               MCvObjectDetection od = new MCvObjectDetection();
               od.Rect = location[i];
               od.Score = (float)weight[i];
               result[i] = od;
            }
            return result;
         }
      }*/

      public void DetectMultiScale(IInputArray image, VectorOfRect objects, VectorOfDouble confident = null)
      {
         using (InputArray iaImage = image.GetInputArray())
         {
            CudaInvoke.cudaHOGDetectMultiScale(_ptr, iaImage, objects, confident);
         }
      }
コード例 #21
0
ファイル: MCvMoments.cs プロジェクト: neutmute/emgucv
 /// <summary>
 /// Get the HuMoments 
 /// </summary>
 /// <returns>The Hu moment computed from this moment</returns>
 public double[] GetHuMoment()
 {
    using (VectorOfDouble vd = new VectorOfDouble())
    {
       CvInvoke.HuMoments(this, vd);
       return vd.ToArray();
    }
   
 }
コード例 #22
0
        /// <summary>
        /// Trigger and receive a concrete motionPlan from startFrame to goalFrame.
        /// Obstacles must be added to the motionPlanner instance of the robot before.
        /// </summary>
        /// <param name="robot"></param>The robot for which a motion should be planned.
        /// <param name="motionPlan"></param>The preconfigured motionPlan which should be used.
        /// <param name="startFrame"></param>The frame where the motion should start.
        /// <param name="goalFrame"></param>The frame where the motion should end.
        /// <returns></returns>
        public VectorOfDoubleVector planMotion(IRobot robot, MotionPlan motionPlan, String startFrame, String goalFrame)
        {
            MotionPlanRobotDescription description = motionPlan.getMotionPlanRobotDescription();

            // we need the current position of the robot to enhance the result of the inverse kinematics
            VectorOfDouble currentPositionJointAngles = new VectorOfDouble(robot.RobotController.Joints.Count);

            if (robot.RobotController.Joints.Count == 7)
            {
                currentPositionJointAngles.Add(robot.RobotController.Joints[0].Value);
                currentPositionJointAngles.Add(robot.RobotController.Joints[1].Value);
                currentPositionJointAngles.Add(robot.RobotController.Joints[6].Value);
                currentPositionJointAngles.Add(robot.RobotController.Joints[2].Value);
                currentPositionJointAngles.Add(robot.RobotController.Joints[3].Value);
                currentPositionJointAngles.Add(robot.RobotController.Joints[4].Value);
                currentPositionJointAngles.Add(robot.RobotController.Joints[5].Value);
            }
            else
            {
                currentPositionJointAngles.Add(robot.RobotController.Joints[0].Value);
                currentPositionJointAngles.Add(robot.RobotController.Joints[1].Value);
                currentPositionJointAngles.Add(robot.RobotController.Joints[2].Value);
                currentPositionJointAngles.Add(robot.RobotController.Joints[3].Value);
                currentPositionJointAngles.Add(robot.RobotController.Joints[4].Value);
                currentPositionJointAngles.Add(robot.RobotController.Joints[5].Value);
            }



            //TODO: Make those frames global?
            IFeature startNode = robot.Component.FindFeature(startFrame);

            if (startNode == null)
            {
                IoC.Get <IMessageService>().AppendMessage("Start Frame \"" + startFrame + "\" was not found.", MessageLevel.Error);
                return(null);
            }
            IFeature goalNode = robot.Component.FindFeature(goalFrame);

            if (goalNode == null)
            {
                IoC.Get <IMessageService>().AppendMessage("Goal Frame \"" + goalFrame + "\" was not found.", MessageLevel.Error);
                return(null);
            }


            Matrix         startPosition    = robot.Component.RootNode.GetFeatureTransformationInWorld(startNode);
            Matrix         goalPosition     = robot.Component.RootNode.GetFeatureTransformationInWorld(goalNode);
            Vector3        startRotation    = startPosition.GetWPR();
            Vector3        goalRotation     = goalPosition.GetWPR();
            Vector3        robotPosition    = robot.Component.TransformationInWorld.GetP();
            VectorOfDouble startJointAngles = currentPositionJointAngles;
            //IoC.Get<IMessageService>().AppendMessage("Goal Frame \"" + goalFrame + "\": [" + String.Format("{0:0.000}", (goalPosition.GetP().X / 1000)) + ", " + String.Format("{0:0.000}", (goalPosition.GetP().Y / 1000)) + ", " + String.Format("{0:0.000}", (goalPosition.GetP().Z / 1000)) + "] ["+ String.Format("{0:0.000}", goalRotation.X)+" "+ String.Format("{0:0.000}", goalRotation.Y)+" "+ String.Format("{0:0.000}", goalRotation.Z) + "]", MessageLevel.Warning);

            VectorOfDouble goalJointAngles = description.getIK(goalPosition.GetP().X / 1000,
                                                               goalPosition.GetP().Y / 1000,
                                                               goalPosition.GetP().Z / 1000,
                                                               goalRotation.X, goalRotation.Y, goalRotation.Z, startJointAngles, 0.5, "Distance");

            motionPlan.setStartPosition(startJointAngles);
            motionPlan.setGoalPosition(goalJointAngles);

            String startOut = "[", goalOut = "[";

            for (int i = 0; i < startJointAngles.Count; i++)
            {
                startOut += String.Format("{0:0.000}", startJointAngles[i]) + " ";
            }
            for (int i = 0; i < goalJointAngles.Count; i++)
            {
                goalOut += String.Format("{0:0.000}", goalJointAngles[i]) + " ";
            }
            startOut += "]";
            goalOut  += "]";

            motionPlan.setSolveTime(10.0);
            motionPlan.setStateValidityCheckingResolution(0.01);
            //motionPlan.setReportFirstExactSolution(true);
            motionPlan.setPlannerByString("RRTConnect");

            if (motionPlan.plan() > 0)
            {
                //IoC.Get<IMessageService>().AppendMessage("Found motion from " + startFrame + "@" + startOut + " to " + goalFrame + "@" + goalOut + ": ", MessageLevel.Warning);
                VectorOfDoubleVector plan = motionPlan.getLastResult();
                //motionPlan.showSetupInInspector();
                foreach (VectorOfDouble jointConfiguration in plan)
                {
                    String motionBuf = "[", sep = "";
                    foreach (double jointAngle in jointConfiguration)
                    {
                        motionBuf += sep + String.Format("{0:0.000}", jointAngle);
                        sep        = ",";
                    }

                    //IoC.Get<IMessageService>().AppendMessage(motionBuf + "]", MessageLevel.Warning);
                }

                //IoC.Get<IMessageService>().AppendMessage("Found motion END", MessageLevel.Warning);
                return(plan);
            }
            else
            {
                //motionPlan.showSetupInInspector();
                IoC.Get <IMessageService>().AppendMessage("Failed to find motion from " + startOut + " to " + goalOut + ": " + motionPlan.getLastPlanningError(), MessageLevel.Warning);
            }


            return(null);
        }
コード例 #23
0
        //double lastTime = 0.0;
        public void RegularTick(object sender, EventArgs e)
        {
            //double deltaTime = app.Simulation.Elapsed - lastTime;
            //lastTime = app.Simulation.Elapsed;

            foreach (ILaserScanner laserScanner in laser_scanners)
            {
                laserScanner.Scan();
            }

            double appElapsed = app.Simulation.Elapsed;

            try
            {
                robotListLock.EnterReadLock();
                foreach (IRobot robot in robotList.Keys)
                {
                    if (!robot.IsValid)
                    {
                        continue;
                    }
                    UpdateVisualizationDistance(robot);
                    double deltaTime = appElapsed - robotList[robot].LastTimeElapsed;
                    if (human != null)
                    {
                        MotionInterpolationInstance.CalculateCurrentRobotSpeed(robot, ref robotList, TICK_INTERVAL, human.TransformationInWorld.GetP()); //robotList[robot].closestHumanWorldPosition
                    }
                    RobotParameters param = robotList[robot];
                    if (param.motionPlan == null)
                    {
                        continue;
                    }
                    MotionInterpolator mp = param.motionPlan.getMotionInterpolator();
                    if (param.motionPlan != null && !param.motionPlan.getMotionInterpolator().motionDone())
                    {
                        VectorOfDouble result = param.motionPlan.getMotionInterpolator().interpolate_tick(deltaTime);

                        robot.RobotController.InvalidateKinChains();

                        if (robot.RobotController.Joints.Count == 7)
                        {
                            robot.RobotController.SetJointValues(MotionInterpolation.KukaSorted(result));
                        }
                        else
                        {
                            double[] firstJointAngleCollectionSorted = new double[result.Count];
                            firstJointAngleCollectionSorted[0] = result.ElementAt(0); //A1
                            firstJointAngleCollectionSorted[1] = result.ElementAt(1); //A2
                            firstJointAngleCollectionSorted[2] = result.ElementAt(2); //A3
                            firstJointAngleCollectionSorted[3] = result.ElementAt(3); //A4
                            firstJointAngleCollectionSorted[4] = result.ElementAt(4); //A5
                            firstJointAngleCollectionSorted[5] = result.ElementAt(5); //A6
                            robot.RobotController.SetJointValues(firstJointAngleCollectionSorted);
                        }
                    }
                    else
                    {
                        //ms.AppendMessage("!!MovementFinished!!", MessageLevel.Warning);
                        // After this command, the video recording breaks...

                        // set movement done!
                        IBehavior movementFinished = (IBehavior)robot.Component.FindBehavior("MovementFinished");
                        if (movementFinished is IStringSignal)
                        {
                            IStringSignal movementFinishedStringSignal = (IStringSignal)movementFinished;

                            if (!String.Equals(movementFinishedStringSignal.Value, robotList[robot].payloadOnFinishMovement))
                            {
                                movementFinishedStringSignal.Value = robotList[robot].payloadOnFinishMovement;
                            }
                        }
                        else
                        {
                            ms.AppendMessage("\"MovementFinished\" behavior was null or not of type IStringSignal. Abort!", MessageLevel.Warning);
                        }
                    }
                    robotList[robot].LastTimeElapsed = appElapsed;
                }
            }
            finally
            {
                robotListLock.ExitReadLock();
            }
        }
コード例 #24
0
ファイル: beeagv_aruco.cs プロジェクト: yytx0802/CheatingCode
        private void ProcessFrame()
        {
            while (_capture != null && _capture.Ptr != IntPtr.Zero)
            {
                _capture.Retrieve(_frame, 0);
                _frame.CopyTo(_frameCopy);

                using (VectorOfInt ids = new VectorOfInt())
                    using (VectorOfVectorOfPointF corners = new VectorOfVectorOfPointF())
                        using (VectorOfVectorOfPointF rejected = new VectorOfVectorOfPointF())
                        {
                            //DetectorParameters p = DetectorParameters.GetDefault();
                            ArucoInvoke.DetectMarkers(_frameCopy, ArucoDictionary, corners, ids, _detectorParameters, rejected);

                            if (ids.Size > 0)
                            {
                                //ArucoInvoke.RefineDetectedMarkers(_frameCopy, ArucoBoard, corners, ids, rejected, null, null, 10, 3, true, null, _detectorParameters);
                                //cameraButton.Text = "Calibrate camera";
                                ArucoInvoke.DrawDetectedMarkers(_frameCopy, corners, ids, new MCvScalar(0, 255, 0));
                                if (!_cameraMatrix.IsEmpty && !_distCoeffs.IsEmpty)
                                {
                                    ArucoInvoke.EstimatePoseSingleMarkers(corners, markersLength, _cameraMatrix, _distCoeffs, rvecs, tvecs);
                                    for (int i = 0; i < ids.Size; i++)
                                    {
                                        using (Mat rvecMat = rvecs.Row(i))
                                            using (Mat tvecMat = tvecs.Row(i))
                                                using (VectorOfDouble rvec = new VectorOfDouble())
                                                    using (VectorOfDouble tvec = new VectorOfDouble())
                                                    {
                                                        double[] values = new double[3];
                                                        rvecMat.CopyTo(values);
                                                        rvec.Push(values);
                                                        tvecMat.CopyTo(values);
                                                        tvec.Push(values);
                                                        ArucoInvoke.DrawAxis(_frameCopy, _cameraMatrix, _distCoeffs, rvec, tvec,
                                                                             markersLength * 0.5f);
                                                    }
                                    }
                                }
                                float counterX = 0, counterY = 0;
                                int   count = corners.Size;
                                for (int i = 0; i < count; ++i)
                                {
                                    using (VectorOfPointF corner = corners[i])
                                    {
                                        PointF[] cor = corner.ToArray();
                                        for (int j = 0; j < cor.Length; j++)
                                        {
                                            //Console.WriteLine(cor[j].X);
                                            counterX += cor[j].X;
                                            counterY += cor[j].Y;
                                        }
                                        markersX = counterX / 4;
                                        markersY = counterY / 4;
                                    }
                                }
                            }
                        }
                CvInvoke.Undistort(_frameCopy, _output, _cameraMatrix, _distCoeffs);
                CvInvoke.Imshow("out", _output);
                CvInvoke.WaitKey(10);
                //Console.WriteLine("markersX is " + markersX);
                // Console.WriteLine("markersY is " + markersY);
            }
            //else
            //{
            Console.WriteLine("VideoCapture was not created");
            //}
        }
コード例 #25
0
ファイル: Form1.cs プロジェクト: neutmute/emgucv
      private void ProcessFrame(object sender, EventArgs arg)
      {
         if (_capture != null && _capture.Ptr != IntPtr.Zero)
         {
            _capture.Retrieve(_frame, 0);

            //cameraImageBox.Image = _frame;

            using (VectorOfInt ids = new VectorOfInt())
            using (VectorOfVectorOfPointF corners = new VectorOfVectorOfPointF())
            using (VectorOfVectorOfPointF rejected = new VectorOfVectorOfPointF())
            {
               DetectorParameters p = DetectorParameters.GetDefault();
               ArucoInvoke.DetectMarkers(_frame, ArucoDictionary, corners, ids, p, rejected);
               ArucoInvoke.RefineDetectedMarkers(_frame, ArucoBoard, corners, ids, rejected, null, null, 10, 3, true, null, p);
               _frame.CopyTo(_frameCopy);
               if (ids.Size > 0)
               {
                  //cameraButton.Text = "Calibrate camera";
                  this.Invoke((Action) delegate
                  {
                     useThisFrameButton.Enabled = true;
                  });
                  ArucoInvoke.DrawDetectedMarkers(_frameCopy, corners, ids, new MCvScalar(0, 255, 0));

                  if (!_cameraMatrix.IsEmpty && !_distCoeffs.IsEmpty)
                  {
                     ArucoInvoke.EstimatePoseSingleMarkers(corners, markersLength, _cameraMatrix, _distCoeffs, rvecs, tvecs);
                     for (int i = 0; i < ids.Size; i++)
                     {
                        using (Mat rvecMat = rvecs.Row(i))
                        using (Mat tvecMat = tvecs.Row(i))
                        using (VectorOfDouble rvec = new VectorOfDouble())
                        using (VectorOfDouble tvec = new VectorOfDouble())
                        {
                           double[] values = new double[3];
                           rvecMat.CopyTo(values);
                           rvec.Push(values);
                           tvecMat.CopyTo(values);
                           tvec.Push(values);

                           
                              ArucoInvoke.DrawAxis(_frameCopy, _cameraMatrix, _distCoeffs, rvec, tvec,
                                 markersLength*0.5f);
                           
                        }
                     }
                  }

                  if (_useThisFrame)
                  {
                     _allCorners.Push(corners);
                     _allIds.Push(ids);
                     _markerCounterPerFrame.Push(new int[] { corners.Size });
                     _imageSize = _frame.Size;
                     UpdateMessage(String.Format("Using {0} points", _markerCounterPerFrame.ToArray().Sum()));
                     _useThisFrame = false;
                  }
               }
               else
               {
                  this.Invoke((Action) delegate
                  {
                     useThisFrameButton.Enabled = false;
                  });

                  //cameraButton.Text = "Stop Capture";
               }
               cameraImageBox.Image = _frameCopy;
            }
         }
      }
コード例 #26
0
        static void Main(string[] args)
        {
            NetworkTable.SetClientMode();
            NetworkTable.SetTeam(4488);
            NetworkTable.SetIPAddress("10.44.88.2");
#if KANGAROO
            NetworkTable.SetNetworkIdentity("Kangaroo");
#else
            NetworkTable.SetNetworkIdentity("CameraTracking");
#endif
            //Switch between Kangaroo and Desktop.
            //On kangaroo, use different table and don't display image
            visionTable = NetworkTable.GetTable("SmartDashboard");

            //ImageGrabber imageGrabber = new ImageGrabber(visionTable);

            Mat HsvIn = new Mat(), HsvOut = new Mat(), output = new Mat(), Temp = new Mat();
            VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();

            //VectorOfVectorOfPoint filteredContours = new VectorOfVectorOfPoint();

            //MCvScalar low = new MCvScalar(63, 44, 193);
            //MCvScalar high = new MCvScalar(97, 255, 255);

            double[] defaultLow            = new double[] { 50, 44, 193 };
            double[] defaultHigh           = new double[] { 90, 255, 255 };

            VectorOfDouble arrayLow  = new VectorOfDouble(3);
            VectorOfDouble arrayHigh = new VectorOfDouble(3);

            Point TopMidPoint    = new Point((int)(ImageWidth / 2), 0);
            Point BottomMidPoint = new Point((int)(ImageWidth / 2), (int)ImageHeight);

            Point LeftMidPoint  = new Point(0, (int)(ImageHeight / 2));
            Point RightMidPoint = new Point((int)ImageWidth, (int)(ImageHeight / 2));

            Stopwatch sw = new Stopwatch();

            CameraWatcher cameraChecker = new CameraWatcher();
            //UsbManager2 cameraChecker = new UsbManager2();
            //cameraChecker.startWatcher();

            int count = 0;

            //visionTable.PutNumberArray("HSVLow", defaultLow);
            //visionTable.PutNumberArray("HSVHigh", defaultHigh);

            //visionTable.PutNumber("ShooterOffsetDegreesX", ShooterOffsetDegreesX);
            //visionTable.PutNumber("ShooterOffsetDegreesY", ShooterOffsetDegreesY);

            Thread timer = new Thread(() =>
            {
                while (true)
                {
                    // update kangaroo battery info
                    visionTable.PutNumber("KangarooBattery",
                                          System.Windows.Forms.SystemInformation.PowerStatus.BatteryLifePercent);

                    // check camera status
                    int cameraState = cameraChecker.CheckState;
                    // camera states:
                    // 0 = Camera is found and working
                    // 1 = Camera is not found, waiting for reconnect to reinitialize
                    // 2 = Camera was found again, re-init was kicked off
                    visionTable.PutNumber("CameraState", cameraState);
                    if (cameraState == 0)
                    {
                        // Camera is connected and fine
                        //Console.WriteLine("Camera alive");
                    }
                    else if (cameraState == 1)
                    {
                        // Camera is disconnected or having problems
                        //Console.WriteLine("Camera dead, waiting for reconnect");
                    }
                    else if (cameraState == 2)
                    {
                        // Camera reconnected
                        //Console.WriteLine("Camera found again, reinitializing");
                        Process.Start("C:/Users/Shockwave/Desktop/NewKangaroo/cameraRestart.exe");     // Launch external exe to kill process, set up camera, and restart
                    }

                    Thread.Sleep(5000);
                }
            });
            timer.Start();
            GC.KeepAlive(timer);
            int imageCount = 0;

            ImageBuffer im  = new ImageBuffer();
            Capture     cap = new Capture(0); //Change me to 1 to use external camera
            cap.FlipVertical = true;

            cap.SetCaptureProperty(Emgu.CV.CvEnum.CapProp.FrameWidth, 1280);
            cap.SetCaptureProperty(Emgu.CV.CvEnum.CapProp.FrameHeight, 720);

            ImageSaver saver = new ImageSaver();
            //int saveCount = 0;
            int  rdi        = 1;
            int  kernalSize = 6 * rdi + 1;
            Size ksize      = new Size(kernalSize, kernalSize);

            while (true)
            {
                count++;
                sw.Restart();
                //ImageBuffer image = imageGrabber.Image();
                cap.Grab();
                im.GyroAngle = visionTable.GetNumber("Gyro", 0.0);
                cap.Retrieve(im.Image);

                ImageBuffer image = im.Clone();

#if KANGAROO
                visionTable.PutNumber("KangarooHeartBeat", count);
#endif
                if (image == null || image.IsEmpty)
                {
                    image?.Dispose();
                    Thread.Yield();
                    continue;
                }

                /*
                 * // Image saver for debugging
                 * if (visionTable.GetBoolean("LightsOn", false))
                 * {
                 *  saveCount++;
                 *  if (saveCount >= 6)
                 *  {
                 *      saver.AddToQueue(image.Image);
                 *      saveCount = 0;
                 *  }
                 * }*/

                double[] ntLow  = visionTable.GetNumberArray("HSVLow", defaultLow);
                double[] ntHigh = visionTable.GetNumberArray("HSVHigh", defaultHigh);

                if (ntLow.Length != 3)
                {
                    ntLow = defaultLow;
                }
                if (ntHigh.Length != 3)
                {
                    ntHigh = defaultHigh;
                }

                arrayLow.Clear();
                arrayLow.Push(ntLow);
                arrayHigh.Clear();
                arrayHigh.Push(ntHigh);

                Mat BlurTemp = new Mat();
                CvInvoke.GaussianBlur(image.Image, BlurTemp, ksize, rdi);
                Mat oldImage = image.Image;
                image.Image = BlurTemp;
                oldImage.Dispose();

                //HSV Filter
                CvInvoke.CvtColor(image.Image, HsvIn, Emgu.CV.CvEnum.ColorConversion.Bgr2Hsv);
                CvInvoke.InRange(HsvIn, arrayLow, arrayHigh, HsvOut);

                HsvOut.ConvertTo(Temp, DepthType.Cv8U);
                //Contours
                CvInvoke.FindContours(Temp, contours, null, RetrType.List, ChainApproxMethod.ChainApproxTc89Kcos);
                //CvInvoke.DrawContours(output, contours, -1, new MCvScalar(0, 0, 0));

                VectorOfVectorOfPoint convexHulls = new VectorOfVectorOfPoint(contours.Size);

                for (int i = 0; i < contours.Size; i++)
                {
                    CvInvoke.ConvexHull(contours[i], convexHulls[i]);
                }

                Rectangle?largestRectangle   = null;
                double    currentLargestArea = 0.0;

                //Filter contours
                for (int i = 0; i < convexHulls.Size; i++)
                {
                    VectorOfPoint contour = convexHulls[i];
                    VectorOfPoint polygon = new VectorOfPoint(convexHulls.Size);
                    CvInvoke.ApproxPolyDP(contour, polygon, 10, true);

                    //VectorOfVectorOfPoint cont = new VectorOfVectorOfPoint(1);
                    //cont.Push(polygon);

                    //CvInvoke.DrawContours(image.Image, cont,-1, Green, 2);

                    // Filter if shape has more than 4 corners after contour is applied
                    if (polygon.Size != 4)
                    {
                        polygon.Dispose();
                        continue;
                    }

                    // Filter if not convex
                    if (!CvInvoke.IsContourConvex(polygon))
                    {
                        polygon.Dispose();
                        continue;
                    }

                    ///////////////////////////////////////////////////////////////////////
                    // Filter if there isn't a nearly horizontal line
                    ///////////////////////////////////////////////////////////////////////
                    //int numVertical = 0;
                    int numHorizontal = 0;
                    for (int j = 0; j < 4; j++)
                    {
                        double dx    = polygon[j].X - polygon[(j + 1) % 4].X;
                        double dy    = polygon[j].Y - polygon[(j + 1) % 4].Y;
                        double slope = double.MaxValue;

                        if (dx != 0)
                        {
                            slope = Math.Abs(dy / dx);
                        }

                        double nearlyHorizontalSlope = Math.Tan(ToRadians(20));
                        //double rad = ToRadians(60);
                        //double nearlyVerticalSlope = Math.Tan(rad);

                        //if (slope > nearlyVerticalSlope) numVertical++;
                        if (slope < nearlyHorizontalSlope)
                        {
                            numHorizontal++;
                        }
                    }

                    if (numHorizontal < 1)
                    {
                        polygon.Dispose();
                        continue;
                    }
                    ///////////////////////////////////////////////////////////////////////
                    //CvInvoke.PutText(image.Image, "Number of horizontal (>=1): " + (numHorizontal).ToString(), TextPoint4, FontFace.HersheyPlain, 2, Green);

                    ///////////////////////////////////////////////////////////////////////
                    // Filter if polygon is above a set limit. This should remove overhead lights and windows
                    ///////////////////////////////////////////////////////////////////////
                    Rectangle bounds = CvInvoke.BoundingRectangle(polygon);
                    CvInvoke.PutText(image.Image, "Vertical (>=300): " + (bounds.Location.Y).ToString(), TextPoint, FontFace.HersheyPlain, 2, Green);
                    int topY = 300;
                    if (bounds.Location.Y < topY)
                    {
                        polygon.Dispose();
                        continue;
                    }
                    ///////////////////////////////////////////////////////////////////////

                    CvInvoke.PutText(image.Image, "Image Height (45-115) and Width (65-225): " + bounds.Height.ToString() + " , " + bounds.Width, TextPoint2, FontFace.HersheyPlain, 2, Green);

                    ///////////////////////////////////////////////////////////////////////
                    // Filter by minimum and maximum height
                    ///////////////////////////////////////////////////////////////////////
                    if (bounds.Height < 45 || bounds.Height > 115)
                    {
                        polygon.Dispose();
                        continue;
                    }
                    ///////////////////////////////////////////////////////////////////////

                    ///////////////////////////////////////////////////////////////////////
                    // Filter by minimum and maximum width
                    ///////////////////////////////////////////////////////////////////////
                    if (bounds.Width < 65 || bounds.Width > 225)
                    {
                        polygon.Dispose();
                        continue;
                    }
                    ///////////////////////////////////////////////////////////////////////

                    ///////////////////////////////////////////////////////////////////////
                    // Filter by height to width ratio
                    ///////////////////////////////////////////////////////////////////////
                    double ratio = (double)bounds.Height / bounds.Width;
                    CvInvoke.PutText(image.Image, "Ratio: " + ratio.ToString(), TextPoint3, FontFace.HersheyPlain, 2, Green);
                    if (ratio > 1.0 || ratio < .3)
                    {
                        polygon.Dispose();
                        continue;
                    }
                    ///////////////////////////////////////////////////////////////////////

                    ///////////////////////////////////////////////////////////////////////
                    // Filter by area to vertical position ratio
                    ///////////////////////////////////////////////////////////////////////
                    double area          = CvInvoke.ContourArea(contour);
                    double areaVertRatio = area / (1280 - bounds.Location.Y);
                    CvInvoke.PutText(image.Image, "Area/Vert Ratio (8-19): " + areaVertRatio.ToString(), TextPoint4, FontFace.HersheyPlain, 2, Green);

                    if (areaVertRatio < 8 || areaVertRatio > 19)
                    {
                        polygon.Dispose();
                        continue;
                    }
                    ///////////////////////////////////////////////////////////////////////

                    //CvInvoke.PutText(image.Image, "Area: " + area.ToString(), TextPoint2, FontFace.HersheyPlain, 2, Green);

                    CvInvoke.Rectangle(image.Image, bounds, Blue, 2);

                    if (area > currentLargestArea)
                    {
                        largestRectangle = bounds;
                    }

                    //filteredContours.Push(contour);

                    polygon.Dispose();
                }
                visionTable.PutBoolean("TargetFound", largestRectangle != null);
                //CvInvoke.PutText(image.Image, "Target found: " + (largestRectangle != null).ToString(), TextPoint5, FontFace.HersheyPlain, 2, Green);


                if (largestRectangle != null)
                {
                    ProcessData(largestRectangle.Value, image);
                    CvInvoke.Rectangle(image.Image, largestRectangle.Value, Red, 5);
                }

                //ToDo, Draw Crosshairs
                //CvInvoke.Line(image.Image, TopMidPoint, BottomMidPoint, Blue, 3);
                //CvInvoke.Line(image.Image, LeftMidPoint, RightMidPoint, Blue, 3);

                //int fps = (int)(1.0 / sw.Elapsed.TotalSeconds);
                //CvInvoke.PutText(image.Image, fps.ToString(), TextPoint, FontFace.HersheyPlain, 2, Green);

                imageCount++;

                // Uncomment below to see the HSV window
                //CvInvoke.Imshow("HSV", HsvOut);
                // Uncomment below to see the main image window
                CvInvoke.Imshow("MainWindow", image.Image);
                image.Dispose();



                //report to NetworkTables

                //Cleanup

                for (int i = 0; i < contours.Size; i++)
                {
                    contours[i].Dispose();
                }
                contours.Clear();

                for (int i = 0; i < convexHulls.Size; i++)
                {
                    convexHulls[i].Dispose();
                }
                convexHulls.Dispose();

                /*
                 * for (int i = 0; i < filteredContours.Size; i++)
                 * {
                 *  filteredContours[i].Dispose();
                 * }
                 * filteredContours.Clear();
                 */

                CvInvoke.WaitKey(1);
            }
            ;
        }
コード例 #27
0
        void rgbReader_FrameArrived(object sender, ColorFrameArrivedEventArgs e)
        {
            using (var frame = e.FrameReference.AcquireFrame())
            {
                if (frame != null)
                {
                    var width  = frame.FrameDescription.Width;
                    var height = frame.FrameDescription.Height;
                    var bitmap = frame.ToBitmap();
                    var image  = bitmap.ToOpenCVImage <Bgr, byte>().Mat;

                    //do something here with the IImage

                    int frameSkip = 1;
                    //every 10 frames

                    if (++frameCount == frameSkip)
                    {
                        frameCount = 0;
                        using (VectorOfInt ids = new VectorOfInt())
                            using (VectorOfVectorOfPointF corners = new VectorOfVectorOfPointF())
                                using (VectorOfVectorOfPointF rejected = new VectorOfVectorOfPointF())
                                {
                                    ArucoInvoke.DetectMarkers(image, ArucoDictionary, corners, ids, _detectorParameters, rejected);

                                    if (ids.Size > 0)
                                    {
                                        ArucoInvoke.RefineDetectedMarkers(image, ArucoBoard, corners, ids, rejected, null, null, 10, 3, true, null, _detectorParameters);

                                        ArucoInvoke.DrawDetectedMarkers(image, corners, ids, new MCvScalar(0, 255, 0));

                                        if (!_cameraMatrix.IsEmpty && !_distCoeffs.IsEmpty)
                                        {
                                            ArucoInvoke.EstimatePoseSingleMarkers(corners, markersLength, _cameraMatrix, _distCoeffs, rvecs, tvecs);
                                            for (int i = 0; i < ids.Size; i++)
                                            {
                                                using (Mat rvecmat = rvecs.Row(i))
                                                    using (Mat tvecmat = tvecs.Row(i))
                                                        using (VectorOfDouble rvec = new VectorOfDouble())
                                                            using (VectorOfDouble tvec = new VectorOfDouble())
                                                            {
                                                                double[] values = new double[3];
                                                                rvecmat.CopyTo(values);
                                                                rvec.Push(values);
                                                                tvecmat.CopyTo(values);
                                                                tvec.Push(values);

                                                                ArucoInvoke.DrawAxis(image, _cameraMatrix, _distCoeffs, rvec, tvec,
                                                                                     markersLength * 0.5f);
                                                            }
                                            }
                                        }

                                        if (calibrate)
                                        {
                                            _allCorners.Push(corners);
                                            _allIds.Push(ids);
                                            _markerCounterPerFrame.Push(new int[] { corners.Size });
                                            _imageSize  = image.Size;
                                            calibrated += 1;

                                            if (calibrated >= calibrationFreq)
                                            {
                                                calibrate = false;
                                            }
                                        }

                                        int totalPoints = _markerCounterPerFrame.ToArray().Sum();
                                        if (calibrated >= calibrationFreq && totalPoints > 0)
                                        {
                                            ArucoInvoke.CalibrateCameraAruco(_allCorners, _allIds, _markerCounterPerFrame, ArucoBoard, _imageSize,
                                                                             _cameraMatrix, _distCoeffs, null, null, CalibType.Default, new MCvTermCriteria(30, double.Epsilon));

                                            _allCorners.Clear();
                                            _allIds.Clear();
                                            _markerCounterPerFrame.Clear();
                                            _imageSize = System.Drawing.Size.Empty;
                                            calibrated = 0;
                                            Console.WriteLine("Calibrated");
                                        }
                                    }
                                }
                    }
                    //end doing something

                    this.background = image.Bitmap.XNATextureFromBitmap(background);
                    bitmap.Dispose();
                    image.Dispose();
                }
            }
        }
コード例 #28
0
        /// <summary>
        /// Detects objects of different sizes in the input image. The detected objects are returned as a list of rectangles.
        /// </summary>
        /// <param name="image">Matrix of the type CV_8U containing an image where objects are detected.</param>
        /// <param name="rejectLevels"></param>
        /// <param name="levelWeights"></param>
        /// <param name="scaleFactor">Parameter specifying how much the image size is reduced at each image scale.</param>
        /// <param name="minNeighbors">Parameter specifying how many neighbors each candidate rectangle should have to retain it.</param>
        /// <param name="flags">Parameter with the same meaning for an old cascade as in the function cvHaarDetectObjects. 
        /// It is not used for a new cascade.</param>
        /// <param name="minSize">Minimum possible object size. Objects smaller than that are ignored.</param>
        /// <param name="maxSize">Maximum possible object size. Objects larger than that are ignored.</param>
        /// <param name="outputRejectLevels"></param>
        /// <returns>Vector of rectangles where each rectangle contains the detected object.</returns>
        public virtual Rect[] DetectMultiScale(
            Mat image,
            out int[] rejectLevels,
            out double[] levelWeights,
            double scaleFactor = 1.1,
            int minNeighbors = 3,
            HaarDetectionType flags = HaarDetectionType.Zero,
            Size? minSize = null,
            Size? maxSize = null,
            bool outputRejectLevels = false)
        {
            if (disposed)
                throw new ObjectDisposedException("CascadeClassifier");
            if (image == null)
                throw new ArgumentNullException("image");
            image.ThrowIfDisposed();

            Size minSize0 = minSize.GetValueOrDefault(new Size());
            Size maxSize0 = maxSize.GetValueOrDefault(new Size());

            using (var objectsVec = new VectorOfRect())
            using (var rejectLevelsVec = new VectorOfInt32())
            using (var levelWeightsVec = new VectorOfDouble())
            {
                NativeMethods.objdetect_CascadeClassifier_detectMultiScale(
                    ptr, image.CvPtr, objectsVec.CvPtr, rejectLevelsVec.CvPtr, levelWeightsVec.CvPtr,
                    scaleFactor, minNeighbors, (int)flags, minSize0, maxSize0, outputRejectLevels ? 1 : 0);

                rejectLevels = rejectLevelsVec.ToArray();
                levelWeights = levelWeightsVec.ToArray();
                return objectsVec.ToArray();
            }
        }
コード例 #29
0
ファイル: AutoTestVarious.cs プロジェクト: Delaley/emgucv
      public void TestProjectPoints()
      {
         Mat cameraMatrix = new Mat(3, 3, DepthType.Cv64F, 1);
         CvInvoke.SetIdentity(cameraMatrix, new MCvScalar(1));

         Mat distortionCoeff = new Mat(8, 1, DepthType.Cv64F, 1);
         distortionCoeff.SetTo(new MCvScalar());

         VectorOfDouble rotationVector = new VectorOfDouble(new double[]{0,0,0});
         VectorOfDouble translationVector = new VectorOfDouble(new double[] { 0, 0, 0 });
                 
         MCvPoint3D32f point = new MCvPoint3D32f(12, 32, 9);

         PointF[] points = CvInvoke.ProjectPoints(
            new MCvPoint3D32f[] { point }, 
            rotationVector, 
            translationVector, 
            cameraMatrix, 
            distortionCoeff);
      }
コード例 #30
0
ファイル: WBDetector.cs プロジェクト: zanker99/emgucv
 /// <summary>
 /// Detect objects on image using WaldBoost detector.
 /// </summary>
 /// <param name="image">Input image for detection</param>
 /// <param name="bboxes">Bounding boxes coordinates output vector</param>
 /// <param name="confidences">Confidence values for bounding boxes output vector</param>
 public void Detect(Mat image, VectorOfRect bboxes, VectorOfDouble confidences)
 {
     XObjdetectInvoke.cveWBDetectorDetect(_ptr, image, bboxes, confidences);
 }
コード例 #31
0
ファイル: HOGDescriptor.cs プロジェクト: kaorun55/opencvsharp
        /// <summary>
        /// Performs object detection with a multi-scale window.
        /// </summary>
        /// <param name="img">Source image. CV_8UC1 and CV_8UC4 types are supported for now.</param>
        /// <param name="foundWeights"></param>
        /// <param name="hitThreshold">Threshold for the distance between features and SVM classifying plane.</param>
        /// <param name="winStride">Window stride. It must be a multiple of block stride.</param>
        /// <param name="padding">Mock parameter to keep the CPU interface compatibility. It must be (0,0).</param>
        /// <param name="scale">Coefficient of the detection window increase.</param>
        /// <param name="groupThreshold">Coefficient to regulate the similarity threshold. 
        /// When detected, some objects can be covered by many rectangles. 0 means not to perform grouping.</param>
        /// <returns>Detected objects boundaries.</returns>
        public virtual Rect[] DetectMultiScale(Mat img, out double[] foundWeights,
            double hitThreshold = 0, Size? winStride = null, Size? padding = null, double scale = 1.05, int groupThreshold = 2)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();

            Size winStride0 = winStride.GetValueOrDefault(new Size());
            Size padding0 = padding.GetValueOrDefault(new Size());
            using (var flVec = new VectorOfRect())
            using (var foundWeightsVec = new VectorOfDouble())
            {
                NativeMethods.objdetect_HOGDescriptor_detectMultiScale(ptr, img.CvPtr, flVec.CvPtr, foundWeightsVec.CvPtr,
                    hitThreshold, winStride0, padding0, scale, groupThreshold);
                foundWeights = foundWeightsVec.ToArray();
                return flVec.ToArray();
            }
        }
コード例 #32
0
        /// <summary>
        /// Performs images matching.
        /// </summary>
        /// <param name="features">Features of the source images</param>
        /// <param name="mask">Mask indicating which image pairs must be matched</param>
        /// <returns>Found pairwise matches</returns>
        public virtual MatchesInfo[] Apply(
            IEnumerable <ImageFeatures> features, Mat?mask = null)
        {
            if (features == null)
            {
                throw new ArgumentNullException(nameof(features));
            }
            ThrowIfDisposed();

            var featuresArray = features.CastOrToArray();

            if (featuresArray.Length == 0)
            {
                throw new ArgumentException("Empty features array", nameof(features));
            }

            var keypointVecs   = new VectorOfKeyPoint?[featuresArray.Length];
            var wImageFeatures = new WImageFeatures[featuresArray.Length];

            try
            {
                for (int i = 0; i < featuresArray.Length; i++)
                {
                    if (featuresArray[i].Descriptors == null)
                    {
                        throw new ArgumentException("features contain null descriptor mat", nameof(features));
                    }
                    featuresArray[i].Descriptors.ThrowIfDisposed();

                    keypointVecs[i]   = new VectorOfKeyPoint();
                    wImageFeatures[i] = new WImageFeatures
                    {
                        ImgIdx      = featuresArray[i].ImgIdx,
                        ImgSize     = featuresArray[i].ImgSize,
                        Keypoints   = keypointVecs[i] !.CvPtr,
                        Descriptors = featuresArray[i].Descriptors.CvPtr,
                    };
                }

                using var srcImgIndexVecs = new VectorOfInt32();
                using var dstImgIndexVecs = new VectorOfInt32();
                using var matchesVec      = new VectorOfVectorDMatch();
                using var inlinersMaskVec = new VectorOfVectorByte();
                using var numInliersVecs  = new VectorOfInt32();
                using var hVecs           = new VectorOfMat();
                using var confidenceVecs  = new VectorOfDouble();
                NativeMethods.HandleException(
                    NativeMethods.stitching_FeaturesMatcher_apply2(
                        ptr,
                        wImageFeatures, wImageFeatures.Length,
                        mask?.CvPtr ?? IntPtr.Zero,
                        srcImgIndexVecs.CvPtr,
                        dstImgIndexVecs.CvPtr,
                        matchesVec.CvPtr,
                        inlinersMaskVec.CvPtr,
                        numInliersVecs.CvPtr,
                        hVecs.CvPtr,
                        confidenceVecs.CvPtr
                        ));

                var srcImgIndices = srcImgIndexVecs.ToArray();
                var dstImgIndices = dstImgIndexVecs.ToArray();
                var matches       = matchesVec.ToArray();
                var inlinersMasks = inlinersMaskVec.ToArray();
                var numInliers    = numInliersVecs.ToArray();
                var hs            = hVecs.ToArray();
                var confidences   = confidenceVecs.ToArray();
                var result        = new MatchesInfo[srcImgIndices.Length];
                for (int i = 0; i < srcImgIndices.Length; i++)
                {
                    result[i] = new MatchesInfo(
                        srcImgIndices[i],
                        dstImgIndices[i],
                        matches[i],
                        inlinersMasks[i],
                        numInliers[i],
                        hs[i],
                        confidences[i]);
                }
                return(result);
            }
            finally
            {
                foreach (var vec in keypointVecs)
                {
                    vec?.Dispose();
                }
                GC.KeepAlive(this);
            }
        }
コード例 #33
0
ファイル: HOGDescriptor.cs プロジェクト: kaorun55/opencvsharp
        /// <summary>
        /// evaluate specified ROI and return confidence value for each location in multiple scales
        /// </summary>
        /// <param name="img"></param>
        /// <param name="foundLocations"></param>
        /// <param name="locations"></param>
        /// <param name="hitThreshold"></param>
        /// <param name="groupThreshold"></param>
        public void DetectMultiScaleROI(
            Mat img,
            out Rect[] foundLocations,
            out DetectionROI[] locations,
            double hitThreshold = 0,
            int groupThreshold = 0)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();

            using (var flVec = new VectorOfRect())
            using (var scalesVec = new VectorOfDouble())
            using (var locationsVec = new VectorOfVectorPoint())
            using (var confidencesVec = new VectorOfVectorDouble())
            {
                NativeMethods.objdetect_HOGDescriptor_detectMultiScaleROI(
                    ptr, img.CvPtr, flVec.CvPtr, 
                    scalesVec.CvPtr, locationsVec.CvPtr, confidencesVec.CvPtr,
                    hitThreshold, groupThreshold);
                foundLocations = flVec.ToArray();

                double[] s = scalesVec.ToArray();
                Point[][] l = locationsVec.ToArray();
                double[][] c = confidencesVec.ToArray();

                if(s.Length != l.Length || l.Length != c.Length)
                    throw new OpenCvSharpException("Invalid result data 'locations'");
                locations = new DetectionROI[s.Length];
                for (int i = 0; i < s.Length; i++)
                {
                    locations[i] = new DetectionROI
                    {
                        Scale = s[i],
                        Locations = l[i],
                        Confidences = c[i]
                    };
                }
            }
        }
コード例 #34
0
ファイル: CudaHOG.cs プロジェクト: neutmute/emgucv
 /// <summary>
 /// Performs object detection with increasing detection window.
 /// </summary>
 /// <param name="image">The CudaImage to search in</param>
 /// <returns>The regions where positives are found</returns>
 public MCvObjectDetection[] DetectMultiScale(IInputArray image)
 {
    using (Util.VectorOfRect vr = new VectorOfRect())
    using (Util.VectorOfDouble vd = new VectorOfDouble())
    {
       DetectMultiScale(image, vr, vd);
       Rectangle[] location = vr.ToArray();
       double[] weight = vd.ToArray();
       MCvObjectDetection[] result = new MCvObjectDetection[location.Length];
       for (int i = 0; i < result.Length; i++)
       {
          MCvObjectDetection od = new MCvObjectDetection();
          od.Rect = location[i];
          od.Score = (float)weight[i];
          result[i] = od;
       }
       return result;
    }
 }
コード例 #35
0
        String getPositionHashKey(String framePrefix, Vector3 position, Vector3 rotation, VectorOfDouble currentPositionJointAngles)
        {
            String pos   = rD(position.X) + "," + rD(position.Y) + "," + rD(position.Z);
            String rot   = rD(rotation.X) + "," + rD(rotation.Y) + "," + rD(rotation.Z);
            String joint = "";

            foreach (double d in currentPositionJointAngles)
            {
                joint += rD(d) + ",";
            }
            return(framePrefix + "@" + pos + "," + rot + "," + joint);
        }
コード例 #36
0
ファイル: HOGDescriptor.cs プロジェクト: kaorun55/opencvsharp
        /// <summary>
        /// Performs object detection without a multi-scale window.
        /// </summary>
        /// <param name="img">Source image. CV_8UC1 and CV_8UC4 types are supported for now.</param>
        /// <param name="weights"></param>
        /// <param name="hitThreshold">Threshold for the distance between features and SVM classifying plane. 
        /// Usually it is 0 and should be specfied in the detector coefficients (as the last free coefficient). 
        /// But if the free coefficient is omitted (which is allowed), you can specify it manually here.</param>
        /// <param name="winStride">Window stride. It must be a multiple of block stride.</param>
        /// <param name="padding">Mock parameter to keep the CPU interface compatibility. It must be (0,0).</param>
        /// <param name="searchLocations"></param>
        /// <returns>Left-top corner points of detected objects boundaries.</returns>
        public virtual Point[] Detect(Mat img, out double[] weights, 
            double hitThreshold = 0, Size? winStride = null, Size? padding = null, Point[] searchLocations = null)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();

            Size winStride0 = winStride.GetValueOrDefault(new Size());
            Size padding0 = padding.GetValueOrDefault(new Size());
            using (var flVec = new VectorOfPoint())
            using (var weightsVec = new VectorOfDouble())
            {
                int slLength = (searchLocations != null) ? searchLocations.Length : 0;
                NativeMethods.objdetect_HOGDescriptor_detect(ptr, img.CvPtr, flVec.CvPtr, weightsVec.CvPtr,
                    hitThreshold, winStride0, padding0, searchLocations, slLength);
                weights = weightsVec.ToArray();
                return flVec.ToArray();
            }
        }
コード例 #37
0
ファイル: CvInvokeCore.cs プロジェクト: Delaley/emgucv
      /// <summary>
      /// The function cvAvgSdv calculates the average value and standard deviation of array elements, independently for each channel
      /// </summary>
      /// <remarks>If the array is IplImage and COI is set, the function processes the selected channel only and stores the average and standard deviation to the first compoenents of output scalars (M0 and S0).</remarks>
      /// <param name="arr">The array</param>
      /// <param name="mean">Pointer to the mean value</param>
      /// <param name="stdDev">Pointer to the standard deviation</param>
      /// <param name="mask">The optional operation mask</param>
      public static void MeanStdDev(IInputArray arr, ref MCvScalar mean, ref MCvScalar stdDev, IInputArray mask = null)
      {
         using (VectorOfDouble meanVec = new VectorOfDouble(4))
         using (VectorOfDouble stdDevVec = new VectorOfDouble(4))
         {
            MeanStdDev(arr, meanVec, stdDevVec, mask);
            double[] meanVal = meanVec.ToArray();
            double[] stdVal = stdDevVec.ToArray();
            mean.V0 = meanVal[0];
            stdDev.V0 = stdVal[0];

            if (meanVal.Length > 1)
            {
               mean.V1 = meanVal[1];
               stdDev.V1 = stdVal[1];
            }

            if (meanVal.Length > 2)
            {
               mean.V2 = meanVal[2];
               stdDev.V2 = stdVal[2];
            }

            if (meanVal.Length > 3)
            {
               mean.V3 = meanVal[3];
               stdDev.V3 = stdVal[3];
            }
         }
      }
コード例 #38
0
ファイル: HOGDescriptor.cs プロジェクト: kaorun55/opencvsharp
        /// <summary>
        /// evaluate specified ROI and return confidence value for each location
        /// </summary>
        /// <param name="img"></param>
        /// <param name="locations"></param>
        /// <param name="foundLocations"></param>
        /// <param name="confidences"></param>
        /// <param name="hitThreshold"></param>
        /// <param name="winStride"></param>
        /// <param name="padding"></param>
        public void DetectROI(
            Mat img, Point[] locations, out Point[] foundLocations, out double[] confidences,
            double hitThreshold = 0, Size? winStride = null, Size? padding = null)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");
            if (locations == null)
                throw new ArgumentNullException("locations");
            img.ThrowIfDisposed();

            Size winStride0 = winStride.GetValueOrDefault(new Size());
            Size padding0 = padding.GetValueOrDefault(new Size());
            using (var flVec = new VectorOfPoint())
            using (var cVec = new VectorOfDouble())
            {
                NativeMethods.objdetect_HOGDescriptor_detectROI(ptr, img.CvPtr, locations, locations.Length,
                    flVec.CvPtr, cVec.CvPtr, hitThreshold, winStride0, padding0);
                foundLocations = flVec.ToArray();
                confidences = cVec.ToArray();
            }
        }
コード例 #39
0
        private void VideoSource_NewFrame(object sender, AForge.Video.NewFrameEventArgs eventArgs)
        {
            if (pictureBoxVideo.Image != null)
            {
                pictureBoxVideo.Image.Dispose();
            }
            if (eventArgs.Frame != null)
            {
                Image <Bgr, Byte> frameimage = eventArgs.Frame.ToImage <Bgr, Byte>().Clone();
                _frameCopy = frameimage.Mat;

                using (VectorOfInt ids = new VectorOfInt())
                    using (VectorOfVectorOfPointF corners = new VectorOfVectorOfPointF())
                        using (VectorOfVectorOfPointF rejected = new VectorOfVectorOfPointF())
                        {
                            //DetectorParameters p = DetectorParameters.GetDefault();
                            ArucoInvoke.DetectMarkers(_frameCopy, ArucoDictionary, corners, ids, _detectorParameters, rejected);

                            if (ids.Size > 0)
                            {
                                ArucoInvoke.RefineDetectedMarkers(_frameCopy, ArucoBoard, corners, ids, rejected, null, null, 10, 3, true, null, _detectorParameters);
                                //cameraButton.Text = "Calibrate camera";
                                this.Invoke((Action) delegate
                                {
                                    useThisFrameButton.Enabled = true;
                                });
                                ArucoInvoke.DrawDetectedMarkers(_frameCopy, corners, ids, new MCvScalar(0, 255, 0));

                                if (!_cameraMatrix.IsEmpty && !_distCoeffs.IsEmpty)
                                {
                                    ArucoInvoke.EstimatePoseSingleMarkers(corners, markersLength, _cameraMatrix, _distCoeffs, rvecs, tvecs);
                                    for (int i = 0; i < ids.Size; i++)
                                    {
                                        using (Mat rvecMat = rvecs.Row(i))
                                            using (Mat tvecMat = tvecs.Row(i))
                                                using (VectorOfDouble rvec = new VectorOfDouble())
                                                    using (VectorOfDouble tvec = new VectorOfDouble())
                                                    {
                                                        double[] values = new double[3];
                                                        rvecMat.CopyTo(values);
                                                        rvec.Push(values);
                                                        tvecMat.CopyTo(values);
                                                        tvec.Push(values);
                                                        if (ids.Size == 1)
                                                        {
                                                            UpdatePosition(tvec, rvec);
                                                        }

                                                        ArucoInvoke.DrawAxis(_frameCopy, _cameraMatrix, _distCoeffs, rvec, tvec,
                                                                             markersLength * 0.5f);
                                                    }
                                    }
                                }

                                if (_useThisFrame)
                                {
                                    _allCorners.Push(corners);
                                    _allIds.Push(ids);
                                    _markerCounterPerFrame.Push(new int[] { corners.Size });
                                    _imageSize = _frameCopy.Size;
                                    UpdateMessage(String.Format("Using {0} points", _markerCounterPerFrame.ToArray().Sum()));
                                    _useThisFrame = false;
                                    CalibrateCamera();
                                }
                            }
                            else
                            {
                                this.Invoke((Action) delegate
                                {
                                    useThisFrameButton.Enabled = false;
                                });

                                //cameraButton.Text = "Stop Capture";
                            }
                            pictureBoxVideo.Image = _frameCopy.ToBitmap();
                        }
            }
        }
コード例 #40
0
ファイル: HOGDescriptor.cs プロジェクト: kaorun55/opencvsharp
        /// <summary>
        /// 
        /// </summary>
        /// <param name="rectList"></param>
        /// <param name="weights"></param>
        /// <param name="groupThreshold"></param>
        /// <param name="eps"></param>
        public void GroupRectangles(out Rect[] rectList, out double[] weights, int groupThreshold, double eps)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");

            using (var rectListVec = new VectorOfRect())
            using (var weightsVec = new VectorOfDouble())
            {
                NativeMethods.objdetect_HOGDescriptor_groupRectangles(
                    ptr, rectListVec.CvPtr, weightsVec.CvPtr, groupThreshold, eps);
                rectList = rectListVec.ToArray();
                weights = weightsVec.ToArray();
            }
        }
コード例 #41
0
ファイル: Form2.cs プロジェクト: ElenXYZ/Emgu
        private void btnCamera_Click(object sender, EventArgs e)
        {
            capture = new Capture();
            while (CvInvoke.WaitKey(30) != 'q')
            {
                src = capture.QueryFrame();
                if (true)
                {
                    CvInvoke.CvtColor(src, gray, ColorConversion.Bgr2Gray);
                    CvInvoke.Blur(gray, bw, new Size(3, 3), new Point(-1, -1));
                    CvInvoke.Canny(gray, bw, 80, 240, 3);
                    CvInvoke.Imshow("bw", bw);

                    // Find contours

                    CvInvoke.FindContours(bw.Clone(), contours, null, RetrType.External, ChainApproxMethod.ChainApproxSimple);
                    src.CopyTo(dst);
                    for (int i = 0; i < contours.Size; i++)
                    {
                        // Approximate contour with accuracy proportional
                        // to the contour perimeter

                        CvInvoke.ApproxPolyDP(contours[i], approx, CvInvoke.ArcLength(contours[i], true) * 0.02, true);

                        // Skip small or non-convex objects
                        if (Math.Abs(CvInvoke.ContourArea(contours[i])) < 100 || !CvInvoke.IsContourConvex(approx))
                        {
                            continue;
                        }
                        if (CvInvoke.ContourArea(contours[i]) < 100 || !CvInvoke.IsContourConvex(approx))
                        {
                            continue;
                        }

                        if (approx.Size == 3)
                        {
                            setLabel(dst, "TRI", contours[i]); // Triangles
                        }
                        else if (approx.Size >= 4 && approx.Size <= 6)
                        {
                            // Number of vertices of polygonal curve
                            int vtc = approx.Size;

                            // Get the cosines of all corners
                            VectorOfDouble cos    = new VectorOfDouble();
                            List <double>  angles = new List <double>();
                            for (int j = 2; j < vtc + 1; j++)
                            {
                                angles.Add(angle(approx[j % vtc], approx[j - 2], approx[j - 1]));
                            }
                            cos.Push(angles.ToArray());


                            // Sort ascending the cosine values
                            Array.Sort(cos.ToArray());
                            //std::sort(cos.begin(), cos.end());

                            // Get the lowest and the highest cosine
                            double mincos = cos[0];
                            double maxcos = cos[cos.Size - 1];

                            // Use the degrees obtained above and the number of vertices
                            // to determine the shape of the contour
                            if (vtc == 4)
                            {
                                setLabel(dst, "RECT", contours[i]);
                            }
                            else if (vtc == 5)
                            {
                                setLabel(dst, "PENTA", contours[i]);
                            }
                            else if (vtc == 6)
                            {
                                setLabel(dst, "HEXA", contours[i]);
                            }
                        }
                        else
                        {
                            // Detect and label circles
                            double area = CvInvoke.ContourArea(contours[i]);
                            //cv::Rect r = cv::boundingRect(contours[i]);
                            Rectangle r      = CvInvoke.BoundingRectangle(contours[i]);
                            int       radius = r.Width / 2;

                            if (Math.Abs(1 - ((double)r.Width / r.Height)) <= 0.2 &&
                                Math.Abs(1 - (area / (Math.PI * (radius * radius)))) <= 0.2)
                            {
                                setLabel(dst, "CIR", contours[i]);
                            }
                        }
                    }
                    CvInvoke.Imshow("src", src);
                    CvInvoke.Imshow("dst", dst);
                }
                else
                {
                    break;
                }
            }
        }
コード例 #42
0
ファイル: HOGDescriptor.cs プロジェクト: neutmute/emgucv
 /// <summary>
 /// Performs object detection with increasing detection window.
 /// </summary>
 /// <param name="image">The image to search in</param>
 /// <param name="hitThreshold">
 /// Threshold for the distance between features and SVM classifying plane.
 /// Usually it is 0 and should be specified in the detector coefficients (as the last free coefficient).
 /// But if the free coefficient is omitted (which is allowed), you can specify it manually here.
 ///</param>
 /// <param name="winStride">Window stride. Must be a multiple of block stride.</param>
 /// <param name="padding"></param>
 /// <param name="scale">Coefficient of the detection window increase.</param>
 /// <param name="finalThreshold">After detection some objects could be covered by many rectangles. This coefficient regulates similarity threshold. 0 means don't perform grouping. Should be an integer if not using meanshift grouping. Use 2.0 for default</param>
 /// <param name="useMeanshiftGrouping">If true, it will use meanshift grouping.</param>
 /// <returns>The regions where positives are found</returns>
 public MCvObjectDetection[] DetectMultiScale(
    IInputArray image,
    double hitThreshold = 0,
    Size winStride = new Size(),
    Size padding = new Size(),
    double scale = 1.05,
    double finalThreshold = 2.0,
    bool useMeanshiftGrouping = false)
 {
    using (Util.VectorOfRect vr = new VectorOfRect())
    using (Util.VectorOfDouble vd = new VectorOfDouble())
    using (InputArray iaImage = image.GetInputArray())
    {
       CvInvoke.cveHOGDescriptorDetectMultiScale(_ptr, iaImage, vr, vd, hitThreshold, ref winStride, ref padding, scale,
          finalThreshold, useMeanshiftGrouping);
       Rectangle[] location = vr.ToArray();
       double[] weight = vd.ToArray();
       MCvObjectDetection[] result = new MCvObjectDetection[location.Length];
       for (int i = 0; i < result.Length; i++)
       {
          MCvObjectDetection od = new MCvObjectDetection();
          od.Rect = location[i];
          od.Score = (float)weight[i];
          result[i] = od;
       }
       return result;
    }
 }
コード例 #43
0
        private void ProcessFrame(object sender, EventArgs arg)
        {
            if (_capture != null && _capture.Ptr != IntPtr.Zero)
            {
                _capture.Retrieve(_frame, 0);
                _frame.CopyTo(_frameCopy);

                //cameraImageBox.Image = _frame;
                using (VectorOfInt ids = new VectorOfInt())
                    using (VectorOfVectorOfPointF corners = new VectorOfVectorOfPointF())
                        using (VectorOfVectorOfPointF rejected = new VectorOfVectorOfPointF())
                        {
                            //DetectorParameters p = DetectorParameters.GetDefault();
                            ArucoInvoke.DetectMarkers(_frameCopy, ArucoDictionary, corners, ids, _detectorParameters, rejected);

                            if (ids.Size > 0)
                            {
                                ArucoInvoke.RefineDetectedMarkers(_frameCopy, ArucoBoard, corners, ids, rejected, null, null, 10, 3, true, null, _detectorParameters);
                                //cameraButton.Text = "Calibrate camera";
                                this.Invoke((Action) delegate
                                {
                                    useThisFrameButton.Enabled = true;
                                });
                                ArucoInvoke.DrawDetectedMarkers(_frameCopy, corners, ids, new MCvScalar(0, 255, 0));

                                if (!_cameraMatrix.IsEmpty && !_distCoeffs.IsEmpty)
                                {
                                    ArucoInvoke.EstimatePoseSingleMarkers(corners, markersLength, _cameraMatrix, _distCoeffs, rvecs, tvecs);
                                    for (int i = 0; i < ids.Size; i++)
                                    {
                                        using (Mat rvecMat = rvecs.Row(i))
                                            using (Mat tvecMat = tvecs.Row(i))
                                                using (VectorOfDouble rvec = new VectorOfDouble())
                                                    using (VectorOfDouble tvec = new VectorOfDouble())
                                                    {
                                                        double[] values = new double[3];
                                                        rvecMat.CopyTo(values);
                                                        rvec.Push(values);
                                                        tvecMat.CopyTo(values);
                                                        tvec.Push(values);


                                                        ArucoInvoke.DrawAxis(_frameCopy, _cameraMatrix, _distCoeffs, rvec, tvec,
                                                                             markersLength * 0.5f);
                                                    }
                                    }
                                }

                                if (_useThisFrame)
                                {
                                    _allCorners.Push(corners);
                                    _allIds.Push(ids);
                                    _markerCounterPerFrame.Push(new int[] { corners.Size });
                                    _imageSize = _frame.Size;
                                    UpdateMessage(String.Format("Using {0} points", _markerCounterPerFrame.ToArray().Sum()));
                                    _useThisFrame = false;
                                }
                            }
                            else
                            {
                                this.Invoke((Action) delegate
                                {
                                    useThisFrameButton.Enabled = false;
                                });

                                //cameraButton.Text = "Stop Capture";
                            }
                            cameraImageBox.Image = _frameCopy.Clone();
                        }
            }
            else
            {
                UpdateMessage("VideoCapture was not created");
            }
        }