Example #1
1
        public void Run()
        {
            using (var img1 = new Mat(FilePath.Image.SurfBox))
            using (var img2 = new Mat(FilePath.Image.SurfBoxinscene))
            using (var descriptors1 = new Mat())
            using (var descriptors2 = new Mat())
            using (var matcher = new BFMatcher(NormTypes.L2SQR))
            using (var kaze = KAZE.Create())
            {
                KeyPoint[] keypoints1, keypoints2;
                kaze.DetectAndCompute(img1, null, out keypoints1, descriptors1);
                kaze.DetectAndCompute(img2, null, out keypoints2, descriptors2);

                DMatch[][] matches = matcher.KnnMatch(descriptors1, descriptors2, 2);
                using (Mat mask = new Mat(matches.Length, 1, MatType.CV_8U))
                {
                    mask.SetTo(new Scalar(255));
                    int nonZero = Cv2.CountNonZero(mask);
                    VoteForUniqueness(matches, mask);
                    nonZero = Cv2.CountNonZero(mask);
                    nonZero = VoteForSizeAndOrientation(keypoints2, keypoints1, matches, mask, 1.5f, 20);

                    List<Point2f> obj = new List<Point2f>();
                    List<Point2f> scene = new List<Point2f>();
                    List<DMatch> goodMatchesList = new List<DMatch>();
                    //iterate through the mask only pulling out nonzero items because they're matches
                    for (int i = 0; i < mask.Rows; i++)
                    {
                        MatIndexer<byte> maskIndexer = mask.GetGenericIndexer<byte>();
                        if (maskIndexer[i] > 0)
                        {
                            obj.Add(keypoints1[matches[i][0].QueryIdx].Pt);
                            scene.Add(keypoints2[matches[i][0].TrainIdx].Pt);
                            goodMatchesList.Add(matches[i][0]);
                        }
                    }

                    List<Point2d> objPts = obj.ConvertAll(Point2fToPoint2d);
                    List<Point2d> scenePts = scene.ConvertAll(Point2fToPoint2d);
                    if (nonZero >= 4)
                    {
                        Mat homography = Cv2.FindHomography(objPts, scenePts, HomographyMethods.Ransac, 1.5, mask);
                        nonZero = Cv2.CountNonZero(mask);

                        if (homography != null)
                        {
                            Point2f[] objCorners = { new Point2f(0, 0),
                                      new Point2f(img1.Cols, 0),
                                      new Point2f(img1.Cols, img1.Rows),
                                      new Point2f(0, img1.Rows) };

                            Point2d[] sceneCorners = MyPerspectiveTransform3(objCorners, homography);

                            //This is a good concat horizontal
                            using (Mat img3 = new Mat(Math.Max(img1.Height, img2.Height), img2.Width + img1.Width, MatType.CV_8UC3))
                            using (Mat left = new Mat(img3, new Rect(0, 0, img1.Width, img1.Height)))
                            using (Mat right = new Mat(img3, new Rect(img1.Width, 0, img2.Width, img2.Height)))
                            {
                                img1.CopyTo(left);
                                img2.CopyTo(right);

                                byte[] maskBytes = new byte[mask.Rows * mask.Cols];
                                mask.GetArray(0, 0, maskBytes);
                                Cv2.DrawMatches(img1, keypoints1, img2, keypoints2, goodMatchesList, img3, Scalar.All(-1), Scalar.All(-1), maskBytes, DrawMatchesFlags.NotDrawSinglePoints);

                                List<List<Point>> listOfListOfPoint2D = new List<List<Point>>();
                                List<Point> listOfPoint2D = new List<Point>();
                                listOfPoint2D.Add(new Point(sceneCorners[0].X + img1.Cols, sceneCorners[0].Y));
                                listOfPoint2D.Add(new Point(sceneCorners[1].X + img1.Cols, sceneCorners[1].Y));
                                listOfPoint2D.Add(new Point(sceneCorners[2].X + img1.Cols, sceneCorners[2].Y));
                                listOfPoint2D.Add(new Point(sceneCorners[3].X + img1.Cols, sceneCorners[3].Y));
                                listOfListOfPoint2D.Add(listOfPoint2D);
                                img3.Polylines(listOfListOfPoint2D, true, Scalar.LimeGreen, 2);

                                //This works too
                                //Cv2.Line(img3, scene_corners[0] + new Point2d(img1.Cols, 0), scene_corners[1] + new Point2d(img1.Cols, 0), Scalar.LimeGreen);
                                //Cv2.Line(img3, scene_corners[1] + new Point2d(img1.Cols, 0), scene_corners[2] + new Point2d(img1.Cols, 0), Scalar.LimeGreen);
                                //Cv2.Line(img3, scene_corners[2] + new Point2d(img1.Cols, 0), scene_corners[3] + new Point2d(img1.Cols, 0), Scalar.LimeGreen);
                                //Cv2.Line(img3, scene_corners[3] + new Point2d(img1.Cols, 0), scene_corners[0] + new Point2d(img1.Cols, 0), Scalar.LimeGreen);

                                img3.SaveImage("Kaze_Output.png");
                                Window.ShowImages(img3);
                            }
                        }
                    }
                }
            }
        }
        /// <summary>
        /// Filter a image with the specified label values.
        /// </summary>
        /// <param name="src">Source image.</param>
        /// <param name="dst">Destination image.</param>
        /// <param name="labelValues">Label values.</param>
        /// <returns>Filtered image.</returns>
        public Mat FilterByLabels(Mat src, Mat dst, IEnumerable <int> labelValues)
        {
            if (src == null)
            {
                throw new ArgumentNullException(nameof(src));
            }
            if (dst == null)
            {
                throw new ArgumentNullException(nameof(dst));
            }
            if (labelValues == null)
            {
                throw new ArgumentNullException(nameof(labelValues));
            }
            var labelArray = labelValues.ToArray();

            if (labelArray.Length == 0)
            {
                throw new ArgumentException("empty labelValues");
            }

            foreach (var labelValue in labelArray)
            {
                if (labelValue < 0 || labelValue >= LabelCount)
                {
                    throw new ArgumentException("0 <= x < LabelCount");
                }
            }

            // マスク用Matを用意し、Andで切り抜く
            using (var mask = GetLabelMask(labelArray[0]))
            {
                for (var i = 1; i < labelArray.Length; i++)
                {
                    using (var maskI = GetLabelMask(labelArray[i]))
                    {
                        Cv2.BitwiseOr(mask, maskI, mask);
                    }
                }
                src.CopyTo(dst, mask);
                return(dst);
            }
        }
    public void generate_view(ref sl.Objects objects, sl.Pose current_camera_pose, ref OpenCvSharp.Mat tracking_view, bool tracking_enabled)
    {
        // To get position in WORLD reference
        for (int i = 0; i < objects.numObject; i++)
        {
            sl.ObjectData obj = objects.objectData[i];

            Vector3 pos     = obj.position;
            Vector3 new_pos = Vector3.Transform(pos, current_camera_pose.rotation) + current_camera_pose.translation;
            obj.position = new_pos;
        }

        // Initialize visualization
        if (!has_background_ready)
        {
            generateBackground();
        }

        background.CopyTo(tracking_view);
        // Scale
        drawScale(ref tracking_view);

        if (tracking_enabled)
        {
            // First add new points, and remove the ones that are too old
            ulong current_timestamp = objects.timestamp;
            addToTracklets(ref objects);
            detectUnchangedTrack(current_timestamp);
            pruneOldPoints(current_timestamp);

            // Draw all tracklets
            drawTracklets(ref tracking_view, current_camera_pose);
        }
        else
        {
            drawPosition(ref objects, ref tracking_view, current_camera_pose);
        }
    }
        /// <summary>
        /// Filter a image with the specified label values. 
        /// </summary>
        /// <param name="src">Source image.</param>
        /// <param name="dst">Destination image.</param>
        /// <param name="labelValues">Label values.</param>
        /// <returns>Filtered image.</returns>
        public Mat FilterByLabels(Mat src, Mat dst, IEnumerable<int> labelValues)
        {
            if (src == null)
                throw new ArgumentNullException("src");
            if (dst == null)
                throw new ArgumentNullException("dst");
            if (labelValues == null)
                throw new ArgumentNullException("labelValues");
            int[] labelArray = EnumerableEx.ToArray(labelValues);
            if (labelArray.Length == 0)
                throw new ArgumentException("empty labelValues");

            foreach (int labelValue in labelArray)
            {
                if (labelValue < 0 || labelValue >= LabelCount)
                    throw new ArgumentException("0 <= x < LabelCount");
            }

            // マスク用Matを用意し、Andで切り抜く
            using (Mat mask = GetLabelMask(labelArray[0]))
            {
                for (int i = 1; i < labelArray.Length; i++)
                {
                    using (var maskI = GetLabelMask(labelArray[i]))
                    {
                        Cv2.BitwiseOr(mask, maskI, mask);
                    }
                }
                src.CopyTo(dst, mask);
                return dst;
            }
        }
Example #5
0
    // Render loop
    private void NativeWindow_Render(object sender, NativeWindowEventArgs e)
    {
        OpenGL.CoreUI.NativeWindow nativeWindow = (OpenGL.CoreUI.NativeWindow)sender;
        Gl.Viewport(0, 0, (int)nativeWindow.Width, (int)nativeWindow.Height);
        Gl.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);

        ERROR_CODE err = ERROR_CODE.FAILURE;

        if (viewer.isAvailable() && zedCamera.Grab(ref runtimeParameters) == ERROR_CODE.SUCCESS)
        {
            foreach (var it in obj_runtime_parameters.objectClassFilter)
            {
                obj_runtime_parameters.objectConfidenceThreshold[it] = detection_confidence;
            }

            // Retrieve Objects
            err = zedCamera.RetrieveObjects(ref objects, ref obj_runtime_parameters);

            if (err == ERROR_CODE.SUCCESS && objects.isNew != 0)
            {
                // Retrieve left image
                zedCamera.RetrieveMeasure(pointCloud, MEASURE.XYZRGBA, MEM.CPU, pcRes);
                zedCamera.GetPosition(ref camWorldPose, REFERENCE_FRAME.WORLD);
                zedCamera.GetPosition(ref camCameraPose, REFERENCE_FRAME.CAMERA);
                zedCamera.RetrieveImage(imageLeft, VIEW.LEFT, MEM.CPU, displayRes);

                bool update_render_view   = true;
                bool update_3d_view       = true;
                bool update_tracking_view = true;
                int  nbBatches            = 0;

                if (USE_BATCHING)
                {
                    List <ObjectsBatch> objectsBatch = new List <ObjectsBatch>();
                    zedCamera.UpdateObjectsBatch(out nbBatches);
                    for (int i = 0; i < nbBatches; i++)
                    {
                        ObjectsBatch obj_batch = new ObjectsBatch();
                        zedCamera.GetObjectsBatch(i, ref obj_batch);
                        objectsBatch.Add(obj_batch);
                    }
                    batchHandler.push(camCameraPose, camWorldPose, imageLeft, pointCloud, ref objectsBatch);
                    batchHandler.pop(ref camCameraPose, ref camWorldPose, ref imageLeft, ref pointCloud, ref objects);
                    update_render_view = BatchSystemHandler.WITH_IMAGE_RETENTION ? Convert.ToBoolean(objects.isNew) : true;
                    update_3d_view     = BatchSystemHandler.WITH_IMAGE_RETENTION ? Convert.ToBoolean(objects.isNew) : true;
                }
                if (update_render_view)
                {
                    imageRenderLeft.CopyTo(imageLeftOcv);
                    TrackingViewer.render_2D(ref imageLeftOcv, imgScale, ref objects, true, isTrackingON);
                }
                if (update_3d_view)
                {
                    //Update GL View
                    viewer.update(pointCloud, objects, camWorldPose);
                    viewer.render();
                }
                if (update_tracking_view)
                {
                    trackViewGenerator.generate_view(ref objects, camCameraPose, ref imageTrackOcv, Convert.ToBoolean(objects.isTracked));
                }
            }

            if (isPlayback && zedCamera.GetSVOPosition() == zedCamera.GetSVONumberOfFrames())
            {
                return;
            }

            Cv2.ImShow(window_name, globalImage);
        }
    }
Example #6
0
 private static void VoteForUniqueness(DMatch[][] matches, Mat mask, float uniqnessThreshold = 0.80f)
 {
     byte[] maskData = new byte[matches.Length];
     GCHandle maskHandle = GCHandle.Alloc(maskData, GCHandleType.Pinned);
     using (Mat m = new Mat(matches.Length, 1, MatType.CV_8U, maskHandle.AddrOfPinnedObject()))
     {
         mask.CopyTo(m);
         for (int i = 0; i < matches.Length; i++)
         {
             //This is also known as NNDR Nearest Neighbor Distance Ratio
             if ((matches[i][0].Distance / matches[i][1].Distance) <= uniqnessThreshold)
                 maskData[i] = 255;
             else
                 maskData[i] = 0;
         }
         m.CopyTo(mask);
     }
     maskHandle.Free();
 }
Example #7
0
        static int VoteForSizeAndOrientation(KeyPoint[] modelKeyPoints, KeyPoint[] observedKeyPoints, DMatch[][] matches, Mat mask, float scaleIncrement, int rotationBins)
        {
            int idx = 0;
            int nonZeroCount = 0;
            byte[] maskMat = new byte[mask.Rows];
            GCHandle maskHandle = GCHandle.Alloc(maskMat, GCHandleType.Pinned);
            using (Mat m = new Mat(mask.Rows, 1, MatType.CV_8U, maskHandle.AddrOfPinnedObject()))
            {
                mask.CopyTo(m);
                List<float> logScale = new List<float>();
                List<float> rotations = new List<float>();
                double s, maxS, minS, r;
                maxS = -1.0e-10f; minS = 1.0e10f;

                //if you get an exception here, it's because you're passing in the model and observed keypoints backwards.  Just switch the order.
                for (int i = 0; i < maskMat.Length; i++)
                {
                    if (maskMat[i] > 0)
                    {
                        KeyPoint observedKeyPoint = observedKeyPoints[i];
                        KeyPoint modelKeyPoint = modelKeyPoints[matches[i][0].TrainIdx];
                        s = Math.Log10(observedKeyPoint.Size / modelKeyPoint.Size);
                        logScale.Add((float)s);
                        maxS = s > maxS ? s : maxS;
                        minS = s < minS ? s : minS;

                        r = observedKeyPoint.Angle - modelKeyPoint.Angle;
                        r = r < 0.0f ? r + 360.0f : r;
                        rotations.Add((float)r);
                    }
                }

                int scaleBinSize = (int)Math.Ceiling((maxS - minS) / Math.Log10(scaleIncrement));
                if (scaleBinSize < 2)
                    scaleBinSize = 2;
                float[] scaleRanges = { (float)minS, (float)(minS + scaleBinSize + Math.Log10(scaleIncrement)) };

                using (MatOfFloat scalesMat = new MatOfFloat(rows: logScale.Count, cols: 1, data: logScale.ToArray()))
                using (MatOfFloat rotationsMat = new MatOfFloat(rows: rotations.Count, cols: 1, data: rotations.ToArray()))
                using (MatOfFloat flagsMat = new MatOfFloat(logScale.Count, 1))
                using (Mat hist = new Mat())
                {
                    flagsMat.SetTo(new Scalar(0.0f));
                    float[] flagsMatFloat1 = flagsMat.ToArray();

                    int[] histSize = { scaleBinSize, rotationBins };
                    float[] rotationRanges = { 0.0f, 360.0f };
                    int[] channels = { 0, 1 };
                    Rangef[] ranges = { new Rangef(scaleRanges[0], scaleRanges[1]), new Rangef(rotations.Min(), rotations.Max()) };
                    double minVal, maxVal;

                    Mat[] arrs = { scalesMat, rotationsMat };
                    Cv2.CalcHist(arrs, channels, null, hist, 2, histSize, ranges);
                    Cv2.MinMaxLoc(hist, out minVal, out maxVal);

                    Cv2.Threshold(hist, hist, maxVal * 0.5, 0, ThresholdTypes.Tozero);
                    Cv2.CalcBackProject(arrs, channels, hist, flagsMat, ranges);

                    MatIndexer<float> flagsMatIndexer = flagsMat.GetIndexer();

                    for (int i = 0; i < maskMat.Length; i++)
                    {
                        if (maskMat[i] > 0)
                        {
                            if (flagsMatIndexer[idx++] != 0.0f)
                            {
                                nonZeroCount++;
                            }
                            else
                                maskMat[i] = 0;
                        }
                    }
                    m.CopyTo(mask);
                }
            }
            maskHandle.Free();

            return nonZeroCount;
        }
Example #8
0
        static int MatDecompose(cv.Mat m, out cv.Mat lum, out int[] perm)
        {
            // Crout's LU decomposition for matrix determinant and inverse
            // stores combined lower & upper in lum[][]
            // stores row permuations into perm[]
            // returns +1 or -1 according to even or odd number of row permutations
            // lower gets dummy 1.0s on diagonal (0.0s above)
            // upper gets lum values on diagonal (0.0s below)

            int toggle = +1; // even (+1) or odd (-1) row permutatuions
            int n      = m.Rows;

            // make a copy of m[][] into result lum[][]
            lum = m.Clone();

            // make perm[]
            perm = new int[n];
            for (int i = 0; i < n; ++i)
            {
                perm[i] = i;
            }

            for (int j = 0; j < n - 1; ++j) // process by column. note n-1
            {
                double max = Math.Abs(lum.At <double>(j, j));
                int    piv = j;

                for (int i = j + 1; i < n; ++i) // find pivot index
                {
                    double xij = Math.Abs(lum.At <double>(i, j));
                    if (xij > max)
                    {
                        max = xij;
                        piv = i;
                    }
                } // i

                if (piv != j)
                {
                    cv.Mat tmp = lum.Row(piv).Clone(); // swap rows j, piv
                    lum.Row(j).CopyTo(lum.Row(piv));
                    tmp.CopyTo(lum.Row(j));

                    int t = perm[piv]; // swap perm elements
                    perm[piv] = perm[j];
                    perm[j]   = t;

                    toggle = -toggle;
                }

                double xjj = lum.At <double>(j, j);
                if (xjj != 0.0)
                {
                    for (int i = j + 1; i < n; ++i)
                    {
                        double xij = lum.At <double>(i, j) / xjj;
                        lum.Set <double>(i, j, xij);
                        for (int k = j + 1; k < n; ++k)
                        {
                            lum.Set <double>(i, k, lum.At <double>(i, k) - xij * lum.At <double>(j, k));
                        }
                    }
                }
            }

            return(toggle);  // for determinant
        }
Example #9
0
        public static Mat Extract(Mat srcMat, ColorConversionCodes code,
        int ch1Lower, int ch1Upper,
        int ch2Lower, int ch2Upper,
        int ch3Lower, int ch3Upper)
        {
            var maskMat = ExtractMask(srcMat,
                code,
                ch1Lower, ch1Upper,
                ch2Lower, ch2Upper,
                ch3Lower, ch3Upper
                );
            srcMat.CopyTo(maskMat, maskMat);

            return maskMat;
        }
Example #10
0
        public void Run()
        {
            Mat img = Cv2.ImRead(FilePath.Image.Lenna, ImreadModes.GrayScale);

            // expand input image to optimal size
            Mat padded = new Mat(); 
            int m = Cv2.GetOptimalDFTSize(img.Rows);
            int n = Cv2.GetOptimalDFTSize(img.Cols); // on the border add zero values
            Cv2.CopyMakeBorder(img, padded, 0, m - img.Rows, 0, n - img.Cols, BorderTypes.Constant, Scalar.All(0));
            
            // Add to the expanded another plane with zeros
            Mat paddedF32 = new Mat();
            padded.ConvertTo(paddedF32, MatType.CV_32F);
            Mat[] planes = { paddedF32, Mat.Zeros(padded.Size(), MatType.CV_32F) };
            Mat complex = new Mat();
            Cv2.Merge(planes, complex);         

            // this way the result may fit in the source matrix
            Mat dft = new Mat();
            Cv2.Dft(complex, dft);            

            // compute the magnitude and switch to logarithmic scale
            // => log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2))
            Mat[] dftPlanes;
            Cv2.Split(dft, out dftPlanes);  // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))

            // planes[0] = magnitude
            Mat magnitude = new Mat();
            Cv2.Magnitude(dftPlanes[0], dftPlanes[1], magnitude);

            magnitude += Scalar.All(1);  // switch to logarithmic scale
            Cv2.Log(magnitude, magnitude);

            // crop the spectrum, if it has an odd number of rows or columns
            Mat spectrum = magnitude[
                new Rect(0, 0, magnitude.Cols & -2, magnitude.Rows & -2)];

            // rearrange the quadrants of Fourier image  so that the origin is at the image center
            int cx = spectrum.Cols / 2;
            int cy = spectrum.Rows / 2;

            Mat q0 = new Mat(spectrum, new Rect(0, 0, cx, cy));   // Top-Left - Create a ROI per quadrant
            Mat q1 = new Mat(spectrum, new Rect(cx, 0, cx, cy));  // Top-Right
            Mat q2 = new Mat(spectrum, new Rect(0, cy, cx, cy));  // Bottom-Left
            Mat q3 = new Mat(spectrum, new Rect(cx, cy, cx, cy)); // Bottom-Right

            // swap quadrants (Top-Left with Bottom-Right)
            Mat tmp = new Mat();                           
            q0.CopyTo(tmp);
            q3.CopyTo(q0);
            tmp.CopyTo(q3);

            // swap quadrant (Top-Right with Bottom-Left)
            q1.CopyTo(tmp);                    
            q2.CopyTo(q1);
            tmp.CopyTo(q2);

            // Transform the matrix with float values into a
            Cv2.Normalize(spectrum, spectrum, 0, 1, NormTypes.MinMax); 
                                     
            // Show the result
            Cv2.ImShow("Input Image"       , img);
            Cv2.ImShow("Spectrum Magnitude", spectrum);

            // calculating the idft
            Mat inverseTransform = new Mat();
            Cv2.Dft(dft, inverseTransform, DftFlags.Inverse | DftFlags.RealOutput);
            Cv2.Normalize(inverseTransform, inverseTransform, 0, 1, NormTypes.MinMax);
            Cv2.ImShow("Reconstructed by Inverse DFT", inverseTransform);
            Cv2.WaitKey();
        }