Ejemplo n.º 1
0
      /*
      /// <summary>
      /// Create a LevMarqSparse solver
      /// </summary>
      public LevMarqSparse()
      {
         _ptr = CvInvoke.CvCreateLevMarqSparse();
      }*/

      /// <summary>
      /// Useful function to do simple bundle adjustment tasks
      /// </summary>
      /// <param name="points">Positions of points in global coordinate system (input and output), values will be modified by bundle adjustment</param>
      /// <param name="imagePoints">Projections of 3d points for every camera</param>
      /// <param name="visibility">Visibility of 3d points for every camera</param>
      /// <param name="cameraMatrix">Intrinsic matrices of all cameras (input and output), values will be modified by bundle adjustment</param>
      /// <param name="R">rotation matrices of all cameras (input and output), values will be modified by bundle adjustment</param>
      /// <param name="T">translation vector of all cameras (input and output), values will be modified by bundle adjustment</param>
      /// <param name="distCoeffcients">distortion coefficients of all cameras (input and output), values will be modified by bundle adjustment</param>
      /// <param name="termCrit">Termination criteria, a reasonable value will be (30, 1.0e-12) </param>
      public static void BundleAdjust(
         MCvPoint3D64f[] points, MCvPoint2D64f[][] imagePoints, int[][] visibility,
         Matrix<double>[] cameraMatrix, Matrix<double>[] R, Matrix<double>[] T, Matrix<double>[] distCoeffcients, MCvTermCriteria termCrit)
      {
         using (Matrix<double> imagePointsMat = CvToolbox.GetMatrixFromPoints(imagePoints))
         using (Matrix<int> visibilityMat = CvToolbox.GetMatrixFromArrays(visibility))
         using (VectorOfMat cameraMatVec = new VectorOfMat())
         using (VectorOfMat rMatVec = new VectorOfMat())
         using (VectorOfMat tMatVec = new VectorOfMat())
         using (VectorOfMat distorMatVec = new VectorOfMat())
         {
            cameraMatVec.Push(cameraMatrix);
            rMatVec.Push(R);
            tMatVec.Push(T);
            distorMatVec.Push(distCoeffcients);


            GCHandle handlePoints = GCHandle.Alloc(points, GCHandleType.Pinned);

            CvInvoke.CvLevMarqSparseAdjustBundle(
               cameraMatrix.Length,
               points.Length, handlePoints.AddrOfPinnedObject(),
               imagePointsMat, visibilityMat, cameraMatVec, rMatVec, tMatVec, distorMatVec, ref termCrit);

            handlePoints.Free();

         }
      }
Ejemplo n.º 2
0
   // Use this for initialization
   void Start()
   {  
		String[] textureNames = new string[] { "stitch1", "stitch2", "stitch3", "stitch4"};
		Mat[] imgs = new Mat[textureNames.Length];
		Mat tmp = new Mat ();
		for (int i = 0; i < textureNames.Length; i++) {
			Texture2D tex = Resources.Load<Texture2D>(textureNames[i]);
			imgs [i] = new Mat ();
			TextureConvert.Texture2dToOutputArray(tex, tmp);
			CvInvoke.Flip(tmp, tmp, FlipType.Vertical);
			CvInvoke.CvtColor (tmp, imgs [i], ColorConversion.Bgra2Bgr);
			if (imgs [i].IsEmpty)
				Debug.Log ("Image " + i + " is empty");
			else
				Debug.Log ("Image " + i + " is " + imgs[i].NumberOfChannels + " channels "  + imgs [i].Width + "x" + imgs [i].Height);
		}
		Emgu.CV.Stitching.Stitcher stitcher = new Emgu.CV.Stitching.Stitcher (false);
		Mat result = new Mat ();
		using (VectorOfMat vms = new VectorOfMat (imgs))
			stitcher.Stitch (vms, result);
		//CvInvoke.Flip(result, result, FlipType.Vertical);

		Texture2D texture = TextureConvert.InputArrayToTexture2D(result, FlipType.Vertical);

		this.GetComponent<GUITexture>().texture = texture;
		Size s = result.Size;
		this.GetComponent<GUITexture>().pixelInset = new Rect(-s.Width / 2, -s.Height / 2, s.Width, s.Height);

   }
Ejemplo n.º 3
0
 /// <summary>
 /// Returns a training set of descriptors.
 /// </summary>
 /// <returns></returns>
 public Mat[] GetDescriptors()
 {
     using (var descriptors = new VectorOfMat())
     {
         NativeMethods.features2d_BOWTrainer_getDescriptors(ptr, descriptors.CvPtr);
         return descriptors.ToArray();
     }
 }
Ejemplo n.º 4
0
        /// <summary>
        /// Loads a multi-page image from a file. 
        /// </summary>
        /// <param name="filename">Name of file to be loaded.</param>
        /// <param name="mats">A vector of Mat objects holding each page, if more than one.</param>
        /// <param name="flags">Flag that can take values of @ref cv::ImreadModes, default with IMREAD_ANYCOLOR.</param>
        /// <returns></returns>
        public static bool ImReadMulti(string filename, out Mat[] mats, ImreadModes flags = ImreadModes.AnyColor)
        {
            if (filename == null) 
                throw new ArgumentNullException("filename");

            using (var matsVec = new VectorOfMat())
            {
                int ret = NativeMethods.imgcodecs_imreadmulti(filename, matsVec.CvPtr, (int) flags);
                mats = matsVec.ToArray();
                return ret != 0;
            }
        }
Ejemplo n.º 5
0
      private void selectImagesButton_Click(object sender, EventArgs e)
      {
         OpenFileDialog dlg = new OpenFileDialog();
         dlg.CheckFileExists = true;
         dlg.Multiselect = true;

         if (dlg.ShowDialog() == System.Windows.Forms.DialogResult.OK)
         {
            sourceImageDataGridView.Rows.Clear();

            Image<Bgr, byte>[] sourceImages = new Image<Bgr, byte>[dlg.FileNames.Length];
            
            for (int i = 0; i < sourceImages.Length; i++)
            {
               sourceImages[i] = new Image<Bgr, byte>(dlg.FileNames[i]);

               using (Image<Bgr, byte> thumbnail = sourceImages[i].Resize(200, 200, Emgu.CV.CvEnum.Inter.Cubic, true))
               {
                  DataGridViewRow row = sourceImageDataGridView.Rows[sourceImageDataGridView.Rows.Add()];
                  row.Cells["FileNameColumn"].Value = dlg.FileNames[i];
                  row.Cells["ThumbnailColumn"].Value = thumbnail.ToBitmap();
                  row.Height = 200;
               }
            }
            try
            {
               using (Stitcher stitcher = new Stitcher(true))
               {
                  using (VectorOfMat vm = new VectorOfMat())
                  {
                     Mat result = new Mat();
                     vm.Push(sourceImages);
                     Stitcher.Status stitchStatus = stitcher.Stitch(vm, result);
                     if (stitchStatus == Stitcher.Status.Ok)
                        resultImageBox.Image = result;
                     else
                     {
                        MessageBox.Show(this, String.Format("Stiching Error: {0}", stitchStatus));
                        resultImageBox.Image = null;
                     }
                  }
               }
            }
            finally
            {
               foreach (Image<Bgr, Byte> img in sourceImages)
               {
                  img.Dispose();
               }
            }
         }
      }
Ejemplo n.º 6
0
        /// <summary>
        /// 
        /// </summary>
        public override void AssignResult()
        {
            if (!IsReady())
                throw new NotSupportedException();

            // Matで結果取得
            using (var vectorOfMat = new VectorOfMat())
            {
                NativeMethods.core_OutputArray_getVectorOfMat(ptr, vectorOfMat.CvPtr);
                list.Clear();
                list.AddRange(vectorOfMat.ToArray());
            }
        }
Ejemplo n.º 7
0
        public async Task<Stream> StitchImages(List<string> imageUrls)
        {
            if (imageUrls == null || !imageUrls.Any())
            {
                return null;
            }

            var httpClient = new HttpClient();
            var imageStreams = new List<Stream>();

            foreach (var imageUrl in imageUrls)
            {
                var imageStream = await httpClient.GetStreamAsync(imageUrl);
                imageStreams.Add(imageStream);
            }

            var imageBitmaps = new List<Bitmap>();
            foreach (var imageStream in imageStreams)
            {
                var imageBitmap = new Bitmap(imageStream);
                imageBitmaps.Add(imageBitmap);
            }

            var emguImages = new List<Image<Bgr, byte>>();
            foreach (var imageBitmap in imageBitmaps)
            {
                var image = new Image<Bgr, byte>(imageBitmap);
                emguImages.Add(image);
            }

            var arr = new VectorOfMat();
            foreach (var emguImage in emguImages)
            {
                arr.Push(emguImage.Mat);
            }

            var stitchedImage = new Mat();

            using (var stitcher = new Stitcher(false))
            {
                stitcher.Stitch(arr, stitchedImage);
            }

            var resultMemStream = new MemoryStream();

            stitchedImage.Bitmap.Save(resultMemStream, ImageFormat.Jpeg);
            resultMemStream.Position = 0;

            return resultMemStream;
        }
Ejemplo n.º 8
0
      private void selectImagesButton_Click(object sender, EventArgs e)
      {
         OpenFileDialog dlg = new OpenFileDialog();
         dlg.CheckFileExists = true;
         dlg.Multiselect = true;

         if (dlg.ShowDialog() == System.Windows.Forms.DialogResult.OK)
         {
            sourceImageDataGridView.Rows.Clear();

            Image<Bgr, Byte>[] sourceImages = new Image<Bgr, byte>[dlg.FileNames.Length];
            
            for (int i = 0; i < sourceImages.Length; i++)
            {
               sourceImages[i] = new Image<Bgr, byte>(dlg.FileNames[i]);

               using (Image<Bgr, byte> thumbnail = sourceImages[i].Resize(200, 200, Emgu.CV.CvEnum.Inter.Cubic, true))
               {
                  DataGridViewRow row = sourceImageDataGridView.Rows[sourceImageDataGridView.Rows.Add()];
                  row.Cells["FileNameColumn"].Value = dlg.FileNames[i];
                  row.Cells["ThumbnailColumn"].Value = thumbnail.ToBitmap();
                  row.Height = 200;
               }
            }
            try
            {
               //using (Stitcher stitcher = new Stitcher(true))
               //CUDA bruteforce matcher seems to cause issue in this release, not using CUDA for matching for this reason
               using (Stitcher stitcher = new Stitcher(false))
               {
                  using (VectorOfMat vm = new VectorOfMat())
                  {
                     Mat result = new Mat();
                     vm.Push(sourceImages);
                     stitcher.Stitch(vm, result);
                     resultImageBox.Image = result;
                  }
               }
            }
            finally
            {
               foreach (Image<Bgr, Byte> img in sourceImages)
               {
                  img.Dispose();
               }
            }
         }
      }
Ejemplo n.º 9
0
        /// <summary>
        /// Estimates intrinsic camera parameters and extrinsic parameters for each of the views
        /// </summary>
        /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
        /// <param name="imagePoints">The 2D image location of the points. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="imageSize">The size of the image, used only to initialize intrinsic camera matrix</param>
        /// <param name="intrinsicParam">The intrisinc parameters, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="calibrationType">cCalibration type</param>
        /// <param name="termCriteria">The termination criteria</param>
        /// <param name="extrinsicParams">The output array of extrinsic parameters.</param>
        /// <returns>The final reprojection error</returns>
        public static double CalibrateCamera(
            MCvPoint3D32f[][] objectPoints,
            PointF[][] imagePoints,
            Size imageSize,
            IntrinsicCameraParameters intrinsicParam,
            CvEnum.CalibType calibrationType,
            MCvTermCriteria termCriteria,
            out ExtrinsicCameraParameters[] extrinsicParams)
        {
            Debug.Assert(objectPoints.Length == imagePoints.Length, "The number of images for objects points should be equal to the number of images for image points");
            int imageCount = objectPoints.Length;

            using (VectorOfVectorOfPoint3D32F vvObjPts = new VectorOfVectorOfPoint3D32F(objectPoints))
                using (VectorOfVectorOfPointF vvImgPts = new VectorOfVectorOfPointF(imagePoints))
                {
                    double reprojectionError = -1;
                    using (VectorOfMat rotationVectors = new VectorOfMat())
                        using (VectorOfMat translationVectors = new VectorOfMat())
                        {
                            Mat cameraMat   = new Mat();
                            Mat distorCoeff = new Mat();
                            reprojectionError = CvInvoke.CalibrateCamera(
                                vvObjPts,
                                vvImgPts,
                                imageSize,
                                intrinsicParam.IntrinsicMatrix,
                                intrinsicParam.DistortionCoeffs,
                                rotationVectors,
                                translationVectors,
                                calibrationType,
                                termCriteria);

                            extrinsicParams = new ExtrinsicCameraParameters[imageCount];
                            for (int i = 0; i < imageCount; i++)
                            {
                                ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();
                                using (Mat matR = rotationVectors[i])
                                    matR.CopyTo(p.RotationVector);
                                using (Mat matT = translationVectors[i])
                                    matT.CopyTo(p.TranslationVector);
                                extrinsicParams[i] = p;
                            }
                        }
                    return(reprojectionError);
                }
        }
Ejemplo n.º 10
0
        private void Window_Loaded(object sender, RoutedEventArgs e)
        {
            Image <Hsv, Byte> img  = new Image <Hsv, byte>(_bitmap);
            VectorOfMat       mats = new VectorOfMat();

            CvInvoke.Split(img, mats);
            //imageH.Image = mats[0];
            //imageS.Image = mats[1];
            //imageB.Image = mats[2];


            UMat uimage1  = new UMat();
            UMat pyrDown1 = new UMat();

            CvInvoke.PyrDown(mats[0], pyrDown1);
            CvInvoke.PyrUp(pyrDown1, uimage1);
            UMat cannyEdges1 = new UMat();

            CvInvoke.Canny(uimage1, cannyEdges1, 80, 80 * 0.6);
            imageHCanny.Image = cannyEdges1;

            UMat uimage2  = new UMat();
            UMat pyrDown2 = new UMat();

            CvInvoke.PyrDown(mats[1], pyrDown2);
            CvInvoke.PyrUp(pyrDown2, uimage2);
            UMat cannyEdges2 = new UMat();

            CvInvoke.Canny(uimage2, cannyEdges2, 80, 80 * 0.6);
            imageSCanny.Image = cannyEdges2;

            UMat uimage3  = new UMat();
            UMat pyrDown3 = new UMat();

            CvInvoke.PyrDown(mats[2], pyrDown3);
            CvInvoke.PyrUp(pyrDown3, uimage3);
            UMat cannyEdges3 = new UMat();

            CvInvoke.Canny(uimage3, cannyEdges3, 80, 80 * 0.6);
            imageBCanny.Image = cannyEdges3;


            imageH.Image = uimage1;
            imageS.Image = uimage2;
            imageB.Image = uimage3;
        }
Ejemplo n.º 11
0
        /// <summary>
        /// Creates 4-dimensional blob from series of images. Optionally resizes and crops images from center, subtract mean values, scales values by scalefactor, swap Blue and Red channels.
        /// </summary>
        /// <param name="images">Input images (all with 1- or 3-channels).</param>
        /// <param name="scaleFactor">Multiplier for images values.</param>
        /// <param name="size">Spatial size for output image</param>
        /// <param name="mean">Scalar with mean values which are subtracted from channels. Values are intended to be in (mean-R, mean-G, mean-B) order if image has BGR ordering and swapRB is true.</param>
        /// <param name="swapRB">Flag which indicates that swap first and last channels in 3-channel image is necessary.</param>
        /// <param name="crop">Flag which indicates whether image will be cropped after resize or not</param>
        /// <param name="ddepth">Depth of output blob. Choose CV_32F or CV_8U.</param>
        /// <returns>Input image is resized so one side after resize is equal to corresponding dimension in size and another one is equal or larger. Then, crop from the center is performed.</returns>
        public static Mat BlobFromImages(
            Mat[] images,
            double scaleFactor      = 1.0,
            Size size               = new Size(),
            MCvScalar mean          = new MCvScalar(),
            bool swapRB             = false,
            bool crop               = false,
            CvEnum.DepthType ddepth = CvEnum.DepthType.Cv32F)
        {
            Mat blob = new Mat();

            using (VectorOfMat vm = new VectorOfMat(images))
            {
                BlobFromImages(vm, blob, scaleFactor, size, mean, swapRB, crop, ddepth);
            }
            return(blob);
        }
Ejemplo n.º 12
0
        /// <summary>
        /// Backproject the histogram into a matrix
        /// </summary>
        /// <param name="srcs">Source matrices, all are of the same size and type</param>
        /// <returns>Destination back projection matrix of the sametype as the source matrices</returns>
        /// <typeparam name="TDepth">The type of depth of the matrix</typeparam>
        public Matrix <TDepth> BackProject <TDepth>(Matrix <TDepth>[] srcs) where TDepth : new()
        {
            Debug.Assert(srcs.Length == _binSizes.Length, "Incompatible Dimension");
            using (VectorOfMat vm = new VectorOfMat())
            {
                vm.Push(srcs);

                int[] channels = new int[srcs.Length];
                for (int i = 0; i < channels.Length; i++)
                {
                    channels[i] = i;
                }
                Matrix <TDepth> res = new Matrix <TDepth>(srcs[0].Size);
                CvInvoke.CalcBackProject(vm, channels, this, res.Mat, GetRangeAsFloatVec(), 1.0);

                return(res);
            }
        }
Ejemplo n.º 13
0
      /// <summary>
      /// The function imreadmulti loads a multi-page image from the specified file into a vector of Mat objects.
      /// </summary>
      /// <param name="filename">Name of file to be loaded.</param>
      /// <param name="flags">Read flags</param>
      /// <returns>Null if the reading fails, otherwise, an array of Mat from the file</returns>
      public static Mat[] Imreadmulti(String filename, CvEnum.ImreadModes flags = ImreadModes.AnyColor)
      {
         using (VectorOfMat vm = new VectorOfMat())
            using (CvString strFilename = new CvString(filename))
         {
            if (!cveImreadmulti(strFilename, vm, flags))
               return null;
            Mat[] result = new Mat[vm.Size];

            for (int i = 0; i < result.Length; i++)
            {
               Mat m = new Mat();
               CvInvoke.Swap(m, vm[i]);
               result[i] = m;
            }
            return result;
         }
      }
Ejemplo n.º 14
0
        private void ConvertBlackToTransparent(Bitmap image)
        {
            //Mat src = new Mat(new Size(image.Width, image.Height),
            //                    Emgu.CV.CvEnum.DepthType.Cv8U, 4);

            var src     = new Image <Rgb, byte>(image);
            var graySrc = src.Convert <Gray, byte>();

            // var alpha = graySrc.ThresholdBinary(new Gray(100), new Gray(255));

            var srcMat   = src.Mat;
            var finalMat = new Mat(image.Height, image.Width, Emgu.CV.CvEnum.DepthType.Cv8U, 4);
            var tmp      = new Mat(image.Height, image.Width, Emgu.CV.CvEnum.DepthType.Cv8U, 1);
            var alpha    = new Mat(image.Height, image.Width, Emgu.CV.CvEnum.DepthType.Cv8U, 1);

            CvInvoke.CvtColor(srcMat, tmp, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);
            imgBox.Image = tmp.ToImage <Rgba, byte>();

            CvInvoke.Threshold(tmp, alpha, 100, 255, Emgu.CV.CvEnum.ThresholdType.Binary);
            imgBox.Image = alpha.ToImage <Rgba, byte>();


            var rgb = new VectorOfMat(3);

            CvInvoke.Split(srcMat, rgb);

            Mat[] rgba = { rgb[0], rgb[1], rgb[2], alpha };

            VectorOfMat rgbaMat = new VectorOfMat(rgba);

            CvInvoke.Merge(rgbaMat, finalMat);

            imgBox.Image = finalMat.ToImage <Rgba, byte>();

            //Mat m = new Mat(image.Height, image.Width, Emgu.CV.CvEnum.DepthType.Cv8U, 4);

            //m.PushBack(src[0].Mat);
            //m.PushBack(src[1].Mat);
            //m.PushBack(src[2].Mat);
            //m.PushBack(alpha.Mat);

            //imgBox.Image = m.ToImage<Rgba, byte>();
        }
Ejemplo n.º 15
0
        private void button1_Click(object sender, EventArgs e)
        {
            Image <Hsv, byte> hsvImage = src.Convert <Hsv, byte>();

            imageBox1.Image = hsvImage;

            VectorOfMat channels = new VectorOfMat();                      //创建vectorOfmat类型存储分离后的图像

            CvInvoke.Split(hsvImage, channels);                            //通道分离
            InputOutputArray mix_channel = channels.GetInputOutputArray(); //获得数组

            Mat H_channel = mix_channel.GetMat(0);                         //获得第一通道
            Mat S_channel = mix_channel.GetMat(1);                         //获得第二通道
            Mat V_channel = mix_channel.GetMat(2);                         //获得第一通道

            imageBox2.Image = H_channel;                                   //显示第一通道
            imageBox3.Image = S_channel;                                   //显示第二通道
            imageBox4.Image = V_channel;                                   //显示第三通道
        }
Ejemplo n.º 16
0
        public static Mat CreateBGRLut(Mat img, CvLUT[] LUTS)
        {
            Mat         dst         = new Mat(img.Size, DepthType.Cv8U, img.NumberOfChannels);
            VectorOfMat brgChannels = new VectorOfMat();

            CvInvoke.Split(img, brgChannels);

            Mat[] mats = new Mat[brgChannels.Size];
            for (int n = 0; n < brgChannels.Size; n++)
            {
                mats[n] = LUTS[n].CreateImg(brgChannels[n]);
            }

            VectorOfMat lutsChannels = new VectorOfMat(mats);

            CvInvoke.Merge(lutsChannels, dst);

            return(dst);
        }
Ejemplo n.º 17
0
        /// <summary>
        /// Process the input image and render into the output image
        /// </summary>
        /// <param name="imageIn">The input image</param>
        /// <param name="imageOut">
        /// The output image, can be the same as <paramref name="imageIn"/>, in which case we will render directly into the input image.
        /// Note that if no bar codes are detected, <paramref name="imageOut"/> will remain unchanged.
        /// If bar codes are detected, we will draw the code and (rectangle) regions on top of the existing pixels of <paramref name="imageOut"/>.
        /// If the <paramref name="imageOut"/> is not the same object as <paramref name="imageIn"/>, it is a good idea to copy the pixels over from the input image before passing it to this function.
        /// </param>
        /// <returns>The messages that we want to display.</returns>
        public String ProcessAndRender(IInputArray imageIn, IInputOutputArray imageOut)
        {
            using (VectorOfMat points = new VectorOfMat())
            {
                Stopwatch watch         = Stopwatch.StartNew();
                var       barcodesFound = _barcodeDetector.DetectAndDecode(imageIn);
                watch.Stop();

                for (int i = 0; i < barcodesFound.Length; i++)
                {
                    Point[] contour = Array.ConvertAll(barcodesFound[i].Points, Point.Round);

                    using (VectorOfVectorOfPoint vpp = new VectorOfVectorOfPoint(new Point[][] { contour }))
                    {
                        CvInvoke.DrawContours(imageOut, vpp, -1, RenderColor);
                    }

                    CvInvoke.PutText(
                        imageOut,
                        barcodesFound[i].DecodedInfo,
                        Point.Round(barcodesFound[i].Points[0]),
                        FontFace.HersheySimplex,
                        1.0,
                        RenderColor
                        );
                }


                if (barcodesFound.Length == 0)
                {
                    return(String.Format("No barcodes found (in {0} milliseconds)", watch.ElapsedMilliseconds));
                }

                String[] barcodesTexts = Array.ConvertAll(barcodesFound,
                                                          delegate(BarcodeDetector.Barcode input) { return(input.DecodedInfo); });
                String allBarcodeText = String.Join(";", barcodesTexts);
                return(String.Format(
                           "Barcodes found (in {1} milliseconds): {0}",
                           allBarcodeText,
                           watch.ElapsedMilliseconds));
            }
        }
Ejemplo n.º 18
0
        public Mat FingerprintDescriptor(Mat input)
        {
            var harris_normalised = PrepareImage(input);

            float            threshold  = 125.0f;
            List <MKeyPoint> mKeyPoints = new List <MKeyPoint>();
            Mat rescaled = new Mat();
            VectorOfKeyPoint keypoints = new VectorOfKeyPoint();
            double           scale = 1.0, shift = 0.0;

            CvInvoke.ConvertScaleAbs(harris_normalised, rescaled, scale, shift);
            Mat[]       mat         = new Mat[] { rescaled, rescaled, rescaled };
            VectorOfMat vectorOfMat = new VectorOfMat(mat);

            int[] from_to  = { 0, 0, 1, 1, 2, 2 };
            Mat   harris_c = new Mat(rescaled.Size, DepthType.Cv8U, 3);

            CvInvoke.MixChannels(vectorOfMat, harris_c, from_to);
            for (int x = 0; x < harris_c.Width; x++)
            {
                for (int y = 0; y < harris_c.Height; y++)
                {
                    if (GetFloatValue(harris_c, x, y) > threshold)
                    {
                        MKeyPoint m = new MKeyPoint
                        {
                            Size  = 1,
                            Point = new PointF(x, y)
                        };
                        mKeyPoints.Add(m);
                    }
                }
            }

            keypoints.Push(mKeyPoints.ToArray());
            Mat         descriptors = new Mat();
            ORBDetector ORBCPU      = new ORBDetector();

            ORBCPU.Compute(_input_thinned, keypoints, descriptors);

            return(descriptors);
        }
Ejemplo n.º 19
0
        public void Calibrate(VectorOfVectorOfPointF cornersPoints, Size imageSize, int innerCornersPerChessboardCols,
            int innerCornersPerChessboardRows)
        {
            modelPoints = CreateModelPoints(cornersPoints.Size, innerCornersPerChessboardCols,
                innerCornersPerChessboardRows);

            var rotationVectors = new VectorOfMat();
            var translationVectors = new VectorOfMat();

            CvInvoke.CalibrateCamera(modelPoints, cornersPoints, imageSize, cameraMatrix, cameraDistortionCoeffs,
                rotationVectors, translationVectors, CalibType.Default, new MCvTermCriteria(10));

            translation = new Matrix<double>(translationVectors[0].Rows, translationVectors[0].Cols,
                translationVectors[0].DataPointer);

            var rotationMatrix = new Matrix<double>(rotationVectors[0].Rows, rotationVectors[0].Cols,
                rotationVectors[0].DataPointer);

            rotation = new RotationVector3D(new[] {rotationMatrix[0, 0], rotationMatrix[1, 0], rotationMatrix[2, 0]});
        }
Ejemplo n.º 20
0
        ///<summary>
        /// Backproject the histogram into a matrix
        ///</summary>
        ///<param name="srcs">Source matrices, all are of the same size and type</param>
        ///<returns>Destination back projection matrix of the sametype as the source matrices</returns>
        ///<typeparam name="TDepth">The type of depth of the matrix</typeparam>
        public Matrix <TDepth> BackProject <TDepth>(Matrix <TDepth>[] srcs) where TDepth : new()
        {
#if !(NETFX_CORE || (UNITY_ANDROID || UNITY_IPHONE || UNITY_STANDALONE || UNITY_METRO))
            Debug.Assert(srcs.Length == _binSizes.Length, Properties.StringTable.IncompatibleDimension);
#endif
            using (VectorOfMat vm = new VectorOfMat())
            {
                vm.Push(srcs);

                int[] channels = new int[srcs.Length];
                for (int i = 0; i < channels.Length; i++)
                {
                    channels[i] = i;
                }
                Matrix <TDepth> res = new Matrix <TDepth>(srcs[0].Size);
                CvInvoke.CalcBackProject(vm, channels, this, res.Mat, GetRangeAsFloatVec(), 1.0);

                return(res);
            }
        }
Ejemplo n.º 21
0
        // Equalization
        private void equalization_Click(object sender, EventArgs e)
        {
            maskDraw.Enabled = true;
            Image <Ycc, Byte> temp = new Image <Ycc, Byte>(img.Width, img.Height);

            CvInvoke.CvtColor(img, temp, ColorConversion.Rgb2YCrCb);
            Image <Gray, Byte>[] channels = temp.Split();

            channels[0]._EqualizeHist();
            VectorOfMat c = new VectorOfMat();

            c.Push(channels[0]);
            c.Push(channels[1]);
            c.Push(channels[2]);
            CvInvoke.Merge(c, temp);
            CvInvoke.CvtColor(temp, img, ColorConversion.YCrCb2Rgb);
            imgBackUp        = img.Clone();
            mainImage2.Image = img.ToBitmap();
            label1.Text      = "Status: Histogram equalization";
        }
Ejemplo n.º 22
0
        /// <summary>
        /// The function imreadmulti loads a multi-page image from the specified file into a vector of Mat objects.
        /// </summary>
        /// <param name="filename">Name of file to be loaded.</param>
        /// <param name="flags">Read flags</param>
        /// <returns>Null if the reading fails, otherwise, an array of Mat from the file</returns>
        public static Mat[] Imreadmulti(String filename, CvEnum.ImreadModes flags = ImreadModes.AnyColor)
        {
            using (VectorOfMat vm = new VectorOfMat())
                using (CvString strFilename = new CvString(filename))
                {
                    if (!cveImreadmulti(strFilename, vm, flags))
                    {
                        return(null);
                    }
                    Mat[] result = new Mat[vm.Size];

                    for (int i = 0; i < result.Length; i++)
                    {
                        Mat m = new Mat();
                        CvInvoke.Swap(m, vm[i]);
                        result[i] = m;
                    }
                    return(result);
                }
        }
Ejemplo n.º 23
0
        /// <summary>
        /// This function compares the separte Blue, Green and Red values of each pixel and, using a clever
        /// subtraction, tries to determine if it is skin-colored. The idea is taken from this paper:
        /// "In-air gestures around unmodified mobile devices" by Song et al.
        /// </summary>
        /// <param name="inputImage">Standard BGR image.</param>
        /// <returns>Grayscale image with the white pixels containing skin.</returns>
        private static Image <Gray, byte> MinimumSegment(Image <Bgr, byte> inputImage)
        {
            Mat deltaOne = new Mat();
            Mat deltaTwo = new Mat();

            VectorOfMat bgrChannels = new VectorOfMat(3);

            CvInvoke.Split(inputImage, bgrChannels);
            CvInvoke.Subtract(bgrChannels[2], bgrChannels[1], deltaOne);
            CvInvoke.Subtract(bgrChannels[2], bgrChannels[0], deltaTwo);

            Mat mixedMat = new Mat();

            CvInvoke.Min(deltaOne, deltaTwo, mixedMat);
            Image <Gray, byte> outputImage = mixedMat.ToImage <Gray, byte>().InRange(new Gray(10), new Gray(200));

            bgrChannels.Dispose();
            mixedMat.Dispose();
            return(outputImage);
        }
Ejemplo n.º 24
0
        private Mat Magnitude(Mat fftData)
        {
            Mat Real = new Mat(fftData.Size, DepthType.Cv32F, 1);

            Mat         Imaginary = new Mat(fftData.Size, DepthType.Cv32F, 1);
            VectorOfMat channels  = new VectorOfMat();

            CvInvoke.Split(fftData, channels); //将多通道mat分离成几个单通道mat
            Real      = channels.GetOutputArray().GetMat(0);
            Imaginary = channels.GetOutputArray().GetMat(1);
            CvInvoke.Pow(Real, 2.0, Real);
            CvInvoke.Pow(Imaginary, 2.0, Imaginary);
            CvInvoke.Add(Real, Imaginary, Real);
            CvInvoke.Pow(Real, 0.5, Real);
            Mat onesMat = Mat.Ones(Real.Rows, Real.Cols, DepthType.Cv32F, 1);

            CvInvoke.Add(Real, onesMat, Real);
            CvInvoke.Log(Real, Real); //求自然对数
            return(Real);
        }
Ejemplo n.º 25
0
        public override void ProcessData(Mat sourceImage, Mat destImage)
        {
            Size size = sourceImage.Size;

            Mat i0 = GetBufferGray(size, 0);
            Mat i1 = GetBufferGray(size, 1);
            Mat i2 = GetBufferGray(size, 2);
            Mat i3 = GetBufferGray(size, 3);

            CvInvoke.ExtractChannel(sourceImage, i0, 0);
            CvInvoke.Canny(i0, i1, _thresh, _threshLinking, _apertureSize);
            CvInvoke.ExtractChannel(sourceImage, i0, 1);
            CvInvoke.Canny(i0, i2, _thresh, _threshLinking, _apertureSize);
            CvInvoke.ExtractChannel(sourceImage, i0, 2);
            CvInvoke.Canny(i0, i3, _thresh, _threshLinking, _apertureSize);
            using (VectorOfMat vm = new VectorOfMat(i1, i2, i3))
            {
                CvInvoke.Merge(vm, destImage);
            }
        }
Ejemplo n.º 26
0
        private Mat MagnitudeInverse(Mat fftData)
        {
            Mat Real = new Mat(fftData.Size, DepthType.Cv32F, 1);

            Mat         Imaginary = new Mat(fftData.Size, DepthType.Cv32F, 1);
            VectorOfMat channels  = new VectorOfMat();

            CvInvoke.Split(fftData, channels);
            Real      = channels.GetOutputArray().GetMat(0);
            Imaginary = channels.GetOutputArray().GetMat(1);


            CvInvoke.Pow(Real, 2.0, Real);
            CvInvoke.Pow(Imaginary, 2.0, Imaginary);
            CvInvoke.Add(Real, Imaginary, Real);
            CvInvoke.Pow(Real, 0.5, Real);
            Console.WriteLine(Real);

            return(Real);
        }
Ejemplo n.º 27
0
        public Mat initialtracker(Image <Bgr, Byte> SELECTION, out UMat model, out Mat Model, Mat selection, int value1, int value2, int value3, out Mat Mask)
        {
            Mat hsv = new Mat();

            model = SELECTION.Copy().ToUMat();
            Model = selection;
            S     = SELECTION;
            s     = selection;
            Image <Hsv, Byte> HSV = SELECTION.Convert <Hsv, Byte>();
            // CvInvoke.CvtColor(selection, hsv, ColorConversion.Bgr2Hsv);
            //  CvInvoke.CvtColor(skin_sample, skin_sample, ColorConversion.Bgr2Hsv);

            Mat mask = new Mat();

            CvInvoke.Threshold(mask, mask, 60, 255, ThresholdType.Binary);
            CvInvoke.InRange(HSV, new ScalarArray(new MCvScalar(0, value2 - 30, value3 - 45)), new ScalarArray(new MCvScalar(value1 + 30, value2 + 30, value3 + 30)), mask);
            //CvInvoke.InRange(HSV, new ScalarArray(new MCvScalar(0, value1, Math.Min(value2, value3))), new ScalarArray(new MCvScalar(180, 255, Math.Max(value2, value3))), mask);
            Mat hue = new Mat();

            hue  = HSV.Mat.Split()[0];
            Mask = mask;
            int[]   Chn     = { 0 };
            int[]   size    = { 24 };
            float[] range   = { 0, 180 };
            var     vhue    = new VectorOfMat(hue);
            Mat     element = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new Size(2, 2), new Point(1, 1));//  Mat element = getStructuringElement(MORPH_RECT, Size(3, 3));

            // CvInvoke.Erode(mask, mask, element, new Point(1, 1), 2, BorderType.Default, new MCvScalar(0, 0, 0));
            Mat element2 = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new Size(2, 2), new Point(1, 1));

            CvInvoke.Dilate(mask, mask, element, new Point(-1, -1), 2, BorderType.Default, new MCvScalar(0, 0, 0));
            Mat hist = new Mat();

            //mask = MASK.Mat;
            CvInvoke.CalcHist(vhue, Chn, mask, hist, size, range, true);
            //  CvInvoke.EqualizeHist(hist, hist);

            CvInvoke.Normalize(hist, hist, 0, 200, NormType.MinMax);

            return(hist);
        }
Ejemplo n.º 28
0
        public Image <Bgr, byte> Chanel(int i)
        {
            var channel = MainImageExp.Split()[i];

            Image <Bgr, byte> destImage = MainImageExp.CopyBlank();

            VectorOfMat vm = new VectorOfMat();

            switch (i)
            {
            case 0:
                vm.Push(channel);
                vm.Push(channel.CopyBlank());
                vm.Push(channel.CopyBlank());
                break;

            case 1:
                vm.Push(channel.CopyBlank());
                vm.Push(channel);
                vm.Push(channel.CopyBlank());
                break;

            case 2:
                vm.Push(channel.CopyBlank());
                vm.Push(channel.CopyBlank());
                vm.Push(channel);
                break;

            default:
                vm.Push(channel.CopyBlank());
                vm.Push(channel.CopyBlank());
                vm.Push(channel.CopyBlank());
                break;
            }

            CvInvoke.Merge(vm, destImage);

            MainImageExp = destImage.Resize(640, 480, Inter.Linear);

            return(MainImageExp);
        }
Ejemplo n.º 29
0
        /// <summary>
        /// Get the preview image
        /// </summary>
        /// <param name="preview">The preview image</param>
        /// <returns>True if successfully retrieve the preview image.</returns>
        public bool GetPreviewOut(Mat preview)
        {
            if (StreamName.Contains("previewout"))
            {
                int[] dim = Dimension;
                if (dim[0] == 3)
                {
                    int step = dim[2] * ElemSize;
                    using (Mat blue = new Mat(new Size(dim[2], dim[1]), DepthType.Cv8U, 1, Data, step))
                        using (Mat green = new Mat(new Size(dim[2], dim[1]), DepthType.Cv8U, 1, new IntPtr(Data.ToInt64() + dim[1] * step), step))
                            using (Mat red = new Mat(new Size(dim[2], dim[1]), DepthType.Cv8U, 1, new IntPtr(Data.ToInt64() + dim[1] * step * 2), step))
                                using (VectorOfMat vm = new VectorOfMat(blue, green, red))
                                {
                                    CvInvoke.Merge(vm, preview);
                                    return(true);
                                }
                }
            }

            return(false);
        }
Ejemplo n.º 30
0
        private void MenuSplitChannels_Click(object sender, EventArgs e)
        {
            if (mCurrentImage.NumberOfChannels == 1)
            {
                MessageBox.Show("当前主图片为单通道图像,无法继续分离!", "警告");
                return;
            }

            VectorOfMat channels = new VectorOfMat();

            CvInvoke.Split(mCurrentImage, channels);
            for (int i = 0; i < channels.Size; i++)
            {
                string filename = string.Format("通道{0}", i);
                mCurrentImage = channels[i].Clone();
                UpdateCurrentImage(filename);

                mFrmMainImage.SetImageSource(mCurrentImage);
                ShowAlonePicture(mCurrentImage, string.Format("通道{0}", i));
            }
        }
Ejemplo n.º 31
0
        public void TrainRecognizer(List<User> users)
        {
            var imageList = new VectorOfMat();
            var indexList = new VectorOfInt();

            var userIndex = 0;

            foreach (var user in users)
            {
                foreach (var userImage in user.UserImages.Where(userImage => File.Exists(userImage.ImageFilePath)))
                {
                    imageList.Push(new Image<Gray, byte>(userImage.ImageFilePath));
                    indexList.Push(new[] { userIndex });
                }

                userIndex++;
            }

            Recognizer = new EigenFaceRecognizer(imageList.Size);
            Recognizer.Train(imageList, indexList);
        }
Ejemplo n.º 32
0
        private void button5_Click(object sender, EventArgs e)
        {
            Stitcher    _sticher     = new Stitcher(Stitcher.Mode.Scans); //创建一个 Sticher 类。
            Mat         result_image = new Mat();                         //创建 Mat 存储输出全景图
            VectorOfMat sti_image    = new VectorOfMat();                 //创建 VectorOfMat 类型, 输入图像拼接数组

            // 添加图像到 sti_image 中, 不按照循序进行添加, 说明拼接图像与顺序无关*//
            sti_image.Push(image1);
            sti_image.Push(image2);
            sti_image.Push(image3);
            sti_image.Push(image4);
            Stitcher.Status status = _sticher.Stitch(sti_image, result_image);//进行图像拼接, 返回 bool 类型, 是否拼接成功。
            if (status == Stitcher.Status.Ok)
            {
                imageBox5.Image = result_image;//显示图像。
            }
            else
            {
                MessageBox.Show("拼接失败", "提示");
            }
        }
Ejemplo n.º 33
0
 /// <summary>
 /// Detect objects by template matching. Matches globally at the lowest pyramid level, then refines locally stepping up the pyramid.
 /// </summary>
 /// <param name="sources">Source images, one for each modality.</param>
 /// <param name="threshold">Similarity threshold, a percentage between 0 and 100.</param>
 /// <param name="matches">Template matches, sorted by similarity score.</param>
 /// <param name="classIds">If non-empty, only search for the desired object classes.</param>
 /// <param name="quantizedImages">Optionally return vector&lt;Mat&gt; of quantized images.</param>
 /// <param name="masks">The masks for consideration during matching. The masks should be CV_8UC1 where 255 represents a valid pixel. If non-empty, the vector must be the same size as sources. Each element must be empty or the same size as its corresponding source.</param>
 public void Match(
     VectorOfMat sources,
     float threshold,
     VectorOfLinemodMatch matches,
     VectorOfCvString classIds            = null,
     IOutputArrayOfArrays quantizedImages = null,
     VectorOfMat masks = null)
 {
     using (OutputArray oaQuantizedImages =
                quantizedImages == null ? OutputArray.GetEmpty() : quantizedImages.GetOutputArray())
     {
         LinemodInvoke.cveLinemodDetectorMatch(
             _ptr,
             sources,
             threshold,
             matches,
             classIds,
             oaQuantizedImages,
             masks
             );
     }
 }
Ejemplo n.º 34
0
        private void jointBtn_Click(object sender, EventArgs e)
        {
            MessageBox.Show("此过程可能需要几秒钟的时间,请稍候...");
            Image <Bgr, byte>[] sources;
            Image  img1 = this.pictureBox1.Image;
            Bitmap map1 = new Bitmap(img1);
            Image  img2 = this.pictureBox2.Image;
            Bitmap map2 = new Bitmap(img2);

            sources = new Image <Bgr, byte> [2];      //图像集初始化,确定了图像集的页数。

            sources[0] = new Image <Bgr, byte>(map1); //填入位图数据
            sources[1] = new Image <Bgr, byte>(map2);

            Stitcher stitcher      = new Stitcher(false); //true代表使用GPU,false代表不使用GPU
            Mat      panoramic_img = new Mat();
            bool     ok            = true;                //定义并初始化ok,用来标识是否拼接成功

            Mat[]       mat = new Mat[] { sources[0].Mat, sources[1].Mat };
            VectorOfMat vc  = new VectorOfMat(mat);

            try
            {
                ok = stitcher.Stitch(vc, panoramic_img);
            }  //ok为真,则拼接成功;ok为假,则拼接失败。
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
            }
            if (ok)
            {
                pictureBox3.Image = (Image)panoramic_img.Bitmap;
                MessageBox.Show("两张图像合成成功");
            }
            else
            {
                MessageBox.Show("合成失败,图像相似区域太少或者无法合成");; //拼接失败
            }
        }
Ejemplo n.º 35
0
        public PageMetric(Mat image)
        {
            Mat hsv = new Mat();

            CvInvoke.CvtColor(image, hsv, ColorConversion.Rgb2Hsv);

            var  dim        = new int[] { 0 };
            var  histSize   = new int[] { 256 };
            var  range      = new float[] { 0, 255 };
            bool accumulate = false;

            Mat hist = new Mat();
            Mat rgb  = new Mat();

            using (VectorOfMat array = new VectorOfMat())
            {
                array.Push(hsv);
                CvInvoke.CalcHist(array, dim, new Mat(), hist, histSize, range, accumulate);
            }

            this.histogram = hist;
        }
Ejemplo n.º 36
0
        public Image <Bgr, byte> editColourChanel(Image <Bgr, byte> sourceImage, CheckedListBox checkedListBox)
        {
            var channel = sourceImage.Split()[checkedListBox.SelectedIndex];

            Image <Bgr, byte> destImage = sourceImage.CopyBlank();

            VectorOfMat vm = new VectorOfMat();

            for (int i = 0; i < 3; i++)
            {
                if (i == checkedListBox.SelectedIndex)
                {
                    vm.Push(channel);
                }
                else
                {
                    vm.Push(channel.CopyBlank());
                }
            }
            CvInvoke.Merge(vm, destImage);
            return(destImage);
        }
Ejemplo n.º 37
0
        internal static double DetermineThreshold(Image <Gray, byte> img)
        {
            double threshhold = 0;
            float  smallest   = float.MaxValue;
            Mat    hist       = new Mat();

            using (VectorOfMat vm = new VectorOfMat())
            {
                vm.Push(img.Mat);
                float[] ranges = new float[] { 0.0f, 256.0f };
                CvInvoke.CalcHist(vm, new int[] { 0 }, null, hist, new int[] { 256 }, ranges, false);
            }
            for (int i = 5; i < 50; ++i)
            {
                if (hist.GetValue(0, i) < smallest)
                {
                    smallest   = hist.GetValue(0, i);
                    threshhold = i;
                }
            }
            return(threshhold);
        }
Ejemplo n.º 38
0
        private void btnGetRedChanel_Click(object sender, EventArgs e)
        {
            // remove blue and green channels, ie. only red channel is saved
            int[] zeroCHs = new int[2] {
                0, 1
            };

            Image <Gray, byte>[] channels;
            Mat outMat = new Mat();

            channels = img.Split();    // img: original image(color)
            for (int i = 0; i < 2; i++)
            {
                channels[zeroCHs[i]].SetZero();
            }

            using (VectorOfMat vm = new VectorOfMat(channels[0].Mat, channels[1].Mat, channels[2].Mat))
            {
                CvInvoke.Merge(vm, outMat);
            }
            imageBox2.Image = outMat;
        }
Ejemplo n.º 39
0
        /// <summary>
        /// Process the input image and render into the output image
        /// </summary>
        /// <param name="imageIn">The input image</param>
        /// <param name="imageOut">The output image, can be the same as imageIn, in which case we will render directly into the input image</param>
        /// <returns>The messages that we want to display.</returns>

        public String ProcessAndRender(IInputArray imageIn, IInputOutputArray imageOut)
        {
            using (VectorOfMat points = new VectorOfMat())
            {
                Stopwatch watch        = Stopwatch.StartNew();
                String[]  qrCodesFound = _weChatQRCodeDetectionModel.DetectAndDecode(imageIn, points);
                watch.Stop();
                for (int i = 0; i < qrCodesFound.Length; i++)
                {
                    using (Mat p = points[i])
                    {
                        Point[] contour = MatToPoints(p);

                        using (VectorOfVectorOfPoint vpp = new VectorOfVectorOfPoint(new Point[][] { contour }))
                        {
                            CvInvoke.DrawContours(imageOut, vpp, -1, new MCvScalar(255, 0, 0));
                        }
                    }
                    //CvInvoke.DrawContours(imageOut, points, i, new MCvScalar(255, 0, 0));
                    //CvInvoke.PutText(imageOut, qrCodesFound[i],  );
                }
                if (imageOut != imageIn)
                {
                    using (InputArray iaImageIn = imageIn.GetInputArray())
                    {
                        iaImageIn.CopyTo(imageOut);
                    }
                }

                //foreach (var detected in detectedObjects)
                //    detected.Render(imageOut, new MCvScalar(0, 0, 255));
                return(String.Format(
                           "QR codes found (in {1} milliseconds): {0}",
                           String.Join(";", String.Format("\"{0}\"", qrCodesFound)),
                           watch.ElapsedMilliseconds));
            }

            //var detectedObjects = Detect(imageIn);
        }
Ejemplo n.º 40
0
        public Image <Bgr, byte> ReturnColorChannel(Image <Bgr, byte> image, string channelName = "r")
        {
            int channelIndex;

            if (channelName == "r")
            {
                channelIndex = 2;
            }
            if (channelName == "g")
            {
                channelIndex = 1;
            }
            if (channelName == "b")
            {
                channelIndex = 0;
            }
            var         channel = sourceImage.Split()[channelIndex];
            VectorOfMat vm      = new VectorOfMat();

            vm.Push(channel[0]); vm.Push(channel[1]); vm.Push(channel[2]);
            CvInvoke.Merge(vm, destImage);
        }
        public async Task<HttpResponseMessage> GetPanoDemo()
        {
            try
            {
                var imgUrl1 = @"https://cs.brown.edu/courses/csci1950-g/results/proj6/edwallac/source001_01.jpg";
                var imgUrl2 = @"https://cs.brown.edu/courses/csci1950-g/results/proj6/edwallac/source001_02.jpg";

                var img1Stream = await(new HttpClient()).GetStreamAsync(imgUrl1);
                var img2Stream = await(new HttpClient()).GetStreamAsync(imgUrl2);

                var bitmap1 = new Bitmap(img1Stream);
                var bitmap2 = new Bitmap(img2Stream);

                var img1 = new Image<Bgr, byte>(bitmap1);
                var img2 = new Image<Bgr, byte>(bitmap2);

                var arr = new VectorOfMat();
                arr.Push(new[] { img1, img2 });

                var stitchedImage = new Mat();

                using (var stitcher = new Stitcher(false))
                {
                    stitcher.Stitch(arr, stitchedImage);
                }

                var resultMemStream = new MemoryStream();

                stitchedImage.Bitmap.Save(resultMemStream, ImageFormat.Jpeg);
                resultMemStream.Position = 0;

                var responseMessage = new HttpResponseMessage
                {
                    Content = new StreamContent(resultMemStream)
                    {
                        Headers =
                        {
                            ContentLength = resultMemStream.Length,
                            ContentType = new MediaTypeHeaderValue("image/jpeg"),
                            ContentDisposition = new ContentDispositionHeaderValue("attachment")
                            {
                                FileName = HttpUtility.UrlDecode("result.jpg"),
                                Size = resultMemStream.Length
                            }
                        }
                    }
                };

                return responseMessage;
            }
            catch (Exception e)
            {
                return new HttpResponseMessage(HttpStatusCode.InternalServerError)
                {
                    ReasonPhrase = e.Message
                };
            }
        }
Ejemplo n.º 42
0
        /// <summary>
        /// finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern.
        /// </summary>
        /// <param name="objectPoints">In the new interface it is a vector of vectors of calibration pattern points in the calibration pattern coordinate space. 
        /// The outer vector contains as many elements as the number of the pattern views. If the same calibration pattern is shown in each view and 
        /// it is fully visible, all the vectors will be the same. Although, it is possible to use partially occluded patterns, or even different patterns 
        /// in different views. Then, the vectors will be different. The points are 3D, but since they are in a pattern coordinate system, then, 
        /// if the rig is planar, it may make sense to put the model to a XY coordinate plane so that Z-coordinate of each input object point is 0.
        /// In the old interface all the vectors of object points from different views are concatenated together.</param>
        /// <param name="imagePoints">In the new interface it is a vector of vectors of the projections of calibration pattern points. 
        /// imagePoints.Count() and objectPoints.Count() and imagePoints[i].Count() must be equal to objectPoints[i].Count() for each i.</param>
        /// <param name="imageSize">Size of the image used only to initialize the intrinsic camera matrix.</param>
        /// <param name="cameraMatrix">Output 3x3 floating-point camera matrix. 
        /// If CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be 
        /// initialized before calling the function.</param>
        /// <param name="distCoeffs">Output vector of distortion coefficients (k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements.</param>
        /// <param name="rvecs">Output vector of rotation vectors (see Rodrigues() ) estimated for each pattern view. That is, each k-th rotation vector 
        /// together with the corresponding k-th translation vector (see the next output parameter description) brings the calibration pattern 
        /// from the model coordinate space (in which object points are specified) to the world coordinate space, that is, a real position of the 
        /// calibration pattern in the k-th pattern view (k=0.. M -1)</param>
        /// <param name="tvecs">Output vector of translation vectors estimated for each pattern view.</param>
        /// <param name="flags">Different flags that may be zero or a combination of the CalibrationFlag values</param>
        /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
        /// <returns></returns>
        public static double CalibrateCamera(
            IEnumerable<IEnumerable<Point3d>> objectPoints,
            IEnumerable<IEnumerable<Point2d>> imagePoints,
            Size imageSize,
            double[,] cameraMatrix,
            double[] distCoeffs,
            out Vec3d[] rvecs,
            out Vec3d[] tvecs,
            CalibrationFlag flags = CalibrationFlag.Zero,
            TermCriteria? criteria = null)
        {
            if (objectPoints == null)
                throw new ArgumentNullException("objectPoints");
            if (objectPoints == null)
                throw new ArgumentNullException("objectPoints");
            if (cameraMatrix == null)
                throw new ArgumentNullException("cameraMatrix");
            if (distCoeffs == null)
                throw new ArgumentNullException("distCoeffs");

            TermCriteria criteria0 = criteria.GetValueOrDefault(
                new TermCriteria(CriteriaType.Iteration | CriteriaType.Epsilon, 30, Double.Epsilon));

            using (var op = new ArrayAddress2<Point3d>(objectPoints))
            using (var ip = new ArrayAddress2<Point2d>(imagePoints))
            using (var rvecsVec = new VectorOfMat())
            using (var tvecsVec = new VectorOfMat())
            {
                double ret = NativeMethods.calib3d_calibrateCamera_vector(
                    op.Pointer, op.Dim1Length, op.Dim2Lengths,
                    ip.Pointer, ip.Dim1Length, ip.Dim2Lengths,
                    imageSize, cameraMatrix, distCoeffs, distCoeffs.Length,
                    rvecsVec.CvPtr, tvecsVec.CvPtr, (int)flags, criteria0);
                Mat[] rvecsM = rvecsVec.ToArray();
                Mat[] tvecsM = tvecsVec.ToArray();
                rvecs = EnumerableEx.SelectToArray(rvecsM, m => m.Get<Vec3d>(0));
                tvecs = EnumerableEx.SelectToArray(tvecsM, m => m.Get<Vec3d>(0));
                return ret;
            }
        }
Ejemplo n.º 43
0
        /// <summary>
        /// finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern.
        /// </summary>
        /// <param name="objectPoints">In the new interface it is a vector of vectors of calibration pattern points in the calibration pattern coordinate space. 
        /// The outer vector contains as many elements as the number of the pattern views. If the same calibration pattern is shown in each view and 
        /// it is fully visible, all the vectors will be the same. Although, it is possible to use partially occluded patterns, or even different patterns 
        /// in different views. Then, the vectors will be different. The points are 3D, but since they are in a pattern coordinate system, then, 
        /// if the rig is planar, it may make sense to put the model to a XY coordinate plane so that Z-coordinate of each input object point is 0.
        /// In the old interface all the vectors of object points from different views are concatenated together.</param>
        /// <param name="imagePoints">In the new interface it is a vector of vectors of the projections of calibration pattern points. 
        /// imagePoints.Count() and objectPoints.Count() and imagePoints[i].Count() must be equal to objectPoints[i].Count() for each i.</param>
        /// <param name="imageSize">Size of the image used only to initialize the intrinsic camera matrix.</param>
        /// <param name="cameraMatrix">Output 3x3 floating-point camera matrix. 
        /// If CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be 
        /// initialized before calling the function.</param>
        /// <param name="distCoeffs">Output vector of distortion coefficients (k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements.</param>
        /// <param name="rvecs">Output vector of rotation vectors (see Rodrigues() ) estimated for each pattern view. That is, each k-th rotation vector 
        /// together with the corresponding k-th translation vector (see the next output parameter description) brings the calibration pattern 
        /// from the model coordinate space (in which object points are specified) to the world coordinate space, that is, a real position of the 
        /// calibration pattern in the k-th pattern view (k=0.. M -1)</param>
        /// <param name="tvecs">Output vector of translation vectors estimated for each pattern view.</param>
        /// <param name="flags">Different flags that may be zero or a combination of the CalibrationFlag values</param>
        /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
        /// <returns></returns>
        public static double CalibrateCamera(
            IEnumerable<Mat> objectPoints,
            IEnumerable<Mat> imagePoints,
            Size imageSize,
            InputOutputArray cameraMatrix,
            InputOutputArray distCoeffs,
            out Mat[] rvecs, 
            out Mat[] tvecs,
            CalibrationFlag flags = CalibrationFlag.Zero, 
            TermCriteria? criteria = null)
        {
            if (objectPoints == null)
                throw new ArgumentNullException("objectPoints");
            if (objectPoints == null)
                throw new ArgumentNullException("objectPoints");
            if (cameraMatrix == null)
                throw new ArgumentNullException("cameraMatrix");
            if (distCoeffs == null)
                throw new ArgumentNullException("distCoeffs");
            cameraMatrix.ThrowIfNotReady();
            distCoeffs.ThrowIfNotReady();

            TermCriteria criteria0 = criteria.GetValueOrDefault(
                new TermCriteria(CriteriaType.Iteration | CriteriaType.Epsilon, 30, Double.Epsilon));

            IntPtr[] objectPointsPtrs = EnumerableEx.SelectPtrs(objectPoints);
            IntPtr[] imagePointsPtrs = EnumerableEx.SelectPtrs(imagePoints);

            double ret;
            using (var rvecsVec = new VectorOfMat())
            using (var tvecsVec = new VectorOfMat())
            {
                ret = NativeMethods.calib3d_calibrateCamera_InputArray(
                    objectPointsPtrs, objectPointsPtrs.Length,
                    imagePointsPtrs, objectPointsPtrs.Length,
                    imageSize, cameraMatrix.CvPtr, distCoeffs.CvPtr,
                    rvecsVec.CvPtr, tvecsVec.CvPtr, (int)flags, criteria0);
                rvecs = rvecsVec.ToArray();
                tvecs = tvecsVec.ToArray();
            }

            cameraMatrix.Fix();
            distCoeffs.Fix();
            return ret;
        }
Ejemplo n.º 44
0
      public void TestVectorOfMat()
      {
         Matrix<double> m1 = new Matrix<double>(3, 3);
         m1.SetRandNormal(new MCvScalar(0.0), new MCvScalar(1.0));
         Matrix<int> m2 = new Matrix<int>(4, 4);
         m2.SetRandNormal(new MCvScalar(2), new MCvScalar(2));

         VectorOfMat vec = new VectorOfMat(m1.Mat, m2.Mat);

         Mat tmp1 = vec[0];
         Mat tmp2 = vec[1];
         Matrix<double> n1 = new Matrix<double>(tmp1.Size);
         Matrix<int> n2 = new Matrix<int>(tmp2.Size);
         tmp1.CopyTo(n1, null);
         tmp2.CopyTo(n2, null);

         EmguAssert.IsTrue(m1.Equals(n1));
         EmguAssert.IsTrue(m2.Equals(n2));
      }
Ejemplo n.º 45
0
      public void TestStitching2()
      {
         Image<Bgr, Byte>[] images = new Image<Bgr, byte>[4];

         images[0] = EmguAssert.LoadImage<Bgr, Byte>("stitch1.jpg");
         images[1] = EmguAssert.LoadImage<Bgr, Byte>("stitch2.jpg");
         images[2] = EmguAssert.LoadImage<Bgr, Byte>("stitch3.jpg");
         images[3] = EmguAssert.LoadImage<Bgr, Byte>("stitch4.jpg");

         using (Stitcher stitcher = new Stitcher(false))
         using (OrbFeaturesFinder finder = new OrbFeaturesFinder(new Size(3, 1)))
         {
            stitcher.SetFeaturesFinder(finder);
            Mat result = new Mat();
            using (VectorOfMat vm = new VectorOfMat())
            {
               vm.Push(images);
               stitcher.Stitch(vm, result);
            }
            //Emgu.CV.UI.ImageViewer.Show(result);
         }
      }
Ejemplo n.º 46
0
        /// <summary>
        /// 2値画像中の輪郭を検出します.
        /// </summary>
        /// <param name="image">入力画像,8ビット,シングルチャンネル.0以外のピクセルは 1として,0のピクセルは0のまま扱われます.
        /// また,この関数は,輪郭抽出処理中に入力画像 image の中身を書き換えます.</param>
        /// <param name="contours">検出された輪郭.各輪郭は,点のベクトルとして格納されます.</param>
        /// <param name="hierarchy">画像のトポロジーに関する情報を含む出力ベクトル.これは,輪郭数と同じ数の要素を持ちます.各輪郭 contours[i] に対して,
        /// 要素 hierarchy[i]のメンバにはそれぞれ,同じ階層レベルに存在する前後の輪郭,最初の子輪郭,および親輪郭の 
        /// contours インデックス(0 基準)がセットされます.また,輪郭 i において,前後,親,子の輪郭が存在しない場合,
        /// それに対応する hierarchy[i] の要素は,負の値になります.</param>
        /// <param name="mode">輪郭抽出モード</param>
        /// <param name="method">輪郭の近似手法</param>
        /// <param name="offset">オプションのオフセット.各輪郭点はこの値の分だけシフトします.これは,ROIの中で抽出された輪郭を,画像全体に対して位置づけて解析する場合に役立ちます.</param>
#else
        /// <summary>
        /// Finds contours in a binary image.
        /// </summary>
        /// <param name="image">Source, an 8-bit single-channel image. Non-zero pixels are treated as 1’s. 
        /// Zero pixels remain 0’s, so the image is treated as binary.
        /// The function modifies the image while extracting the contours.</param> 
        /// <param name="contours">Detected contours. Each contour is stored as a vector of points.</param>
        /// <param name="hierarchy">Optional output vector, containing information about the image topology. 
        /// It has as many elements as the number of contours. For each i-th contour contours[i], 
        /// the members of the elements hierarchy[i] are set to 0-based indices in contours of the next 
        /// and previous contours at the same hierarchical level, the first child contour and the parent contour, respectively. 
        /// If for the contour i there are no next, previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.</param>
        /// <param name="mode">Contour retrieval mode</param>
        /// <param name="method">Contour approximation method</param>
        /// <param name="offset"> Optional offset by which every contour point is shifted. 
        /// This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context.</param>
#endif
        public static void FindContours(InputOutputArray image, out Mat[] contours,
            OutputArray hierarchy, ContourRetrieval mode, ContourChain method, Point? offset = null)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            if (hierarchy == null)
                throw new ArgumentNullException("hierarchy");
            image.ThrowIfNotReady();
            hierarchy.ThrowIfNotReady();

            CvPoint offset0 = offset.GetValueOrDefault(new Point());
            IntPtr contoursPtr;
            NativeMethods.imgproc_findContours1_OutputArray(image.CvPtr, out contoursPtr, hierarchy.CvPtr, (int)mode, (int)method, offset0);

            using (var contoursVec = new VectorOfMat(contoursPtr))
            {
                contours = contoursVec.ToArray();
            }
            image.Fix();
            hierarchy.Fix();
        }
Ejemplo n.º 47
0
        /// <summary>
        /// Returns covariation matrices.
        /// Returns vector of covariation matrices. Number of matrices is the number of 
        /// gaussian mixtures, each matrix is a square floating-point matrix NxN, where N is the space dimensionality.
        /// </summary>
        public Mat[] GetCovs()
        {
            if (disposed)
                throw new ObjectDisposedException(GetType().Name);

            using (var vec = new VectorOfMat())
            {
                NativeMethods.ml_EM_getCovs(ptr, vec.CvPtr);
                return vec.ToArray();
            }
            
        }
Ejemplo n.º 48
0
      public void TestStitching4()
      {
         Mat[] images = new Mat[1];

         images[0] = EmguAssert.LoadMat("stitch1.jpg");
         

         using (Stitcher stitcher = new Stitcher(false))
         //using (OrbFeaturesFinder finder = new OrbFeaturesFinder(new Size(3, 1)))
         {
            //stitcher.SetFeaturesFinder(finder);
            Mat result = new Mat();
            using (VectorOfMat vm = new VectorOfMat())
            {
               vm.Push(images);
               stitcher.Stitch(vm, result);
            }
            //Emgu.CV.UI.ImageViewer.Show(result);
         }
      }
Ejemplo n.º 49
0
 /// <summary>
 /// Get train descriptors collection.
 /// </summary>
 /// <returns></returns>
 public Mat[] GetTrainDescriptors()
 {
     ThrowIfDisposed();
     using (var matVec = new VectorOfMat())
     {
         NativeMethods.features2d_DescriptorMatcher_getTrainDescriptors(ptr, matVec.CvPtr);
         return matVec.ToArray();
     }
 }
Ejemplo n.º 50
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="pyr"></param>
        /// <returns></returns>
        public Mat[] BuildDoGPyramid(IEnumerable<Mat> pyr)
        {
            ThrowIfDisposed();
            if (pyr == null)
                throw new ArgumentNullException("pyr");

            IntPtr[] pyrPtrs = EnumerableEx.SelectPtrs(pyr);
            using (VectorOfMat dogPyrVec = new VectorOfMat())
            {
                NativeMethods.nonfree_SIFT_buildDoGPyramid(ptr, pyrPtrs, pyrPtrs.Length, dogPyrVec.CvPtr);
                return dogPyrVec.ToArray();
            }
        }
Ejemplo n.º 51
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="baseMat"></param>
        /// <param name="nOctaves"></param>
        /// <returns></returns>
        public Mat[] BuildGaussianPyramid(Mat baseMat, int nOctaves)
        {
            ThrowIfDisposed();
            if (baseMat == null)
                throw new ArgumentNullException("baseMat");
            baseMat.ThrowIfDisposed();

            using (VectorOfMat pyrVec = new VectorOfMat())
            {
                NativeMethods.nonfree_SIFT_buildGaussianPyramid(ptr, baseMat.CvPtr, pyrVec.CvPtr, nOctaves);
                return pyrVec.ToArray();
            }
        }
Ejemplo n.º 52
0
      /// <summary>
      /// Estimates intrinsic camera parameters and extrinsic parameters for each of the views
      /// </summary>
      /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
      /// <param name="imagePoints">The 2D image location of the points. The first index is the index of the image, second index is the index of the point</param>
      /// <param name="imageSize">The size of the image, used only to initialize intrinsic camera matrix</param>
      /// <param name="rotationVectors">The output 3xM or Mx3 array of rotation vectors (compact representation of rotation matrices, see cvRodrigues2). </param>
      /// <param name="translationVectors">The output 3xM or Mx3 array of translation vectors</param>/// <param name="calibrationType">cCalibration type</param>
      /// <param name="termCriteria">The termination criteria</param>
      /// <param name="cameraMatrix">The output camera matrix (A) [fx 0 cx; 0 fy cy; 0 0 1]. If CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATION are specified, some or all of fx, fy, cx, cy must be initialized</param>
      /// <param name="distortionCoeffs">The output 4x1 or 1x4 vector of distortion coefficients [k1, k2, p1, p2]</param>
      /// <returns>The final reprojection error</returns>
      public static double CalibrateCamera(
         MCvPoint3D32f[][] objectPoints,
         PointF[][] imagePoints,
         Size imageSize,
         IInputOutputArray cameraMatrix,
         IInputOutputArray distortionCoeffs,
         CvEnum.CalibType calibrationType,
         MCvTermCriteria termCriteria,
         out Mat[] rotationVectors,
         out Mat[] translationVectors)
      {
         System.Diagnostics.Debug.Assert(objectPoints.Length == imagePoints.Length,
            "The number of images for objects points should be equal to the number of images for image points");
         int imageCount = objectPoints.Length;

         using (VectorOfVectorOfPoint3D32F vvObjPts = new VectorOfVectorOfPoint3D32F(objectPoints))
         using (VectorOfVectorOfPointF vvImgPts = new VectorOfVectorOfPointF(imagePoints))
         {
            double reprojectionError;
            using (VectorOfMat rVecs = new VectorOfMat())
            using (VectorOfMat tVecs = new VectorOfMat())
            {
               reprojectionError = CvInvoke.CalibrateCamera(
                  vvObjPts,
                  vvImgPts,
                  imageSize,
                  cameraMatrix,
                  distortionCoeffs,
                  rVecs,
                  tVecs,
                  calibrationType,
                  termCriteria);

               rotationVectors = new Mat[imageCount];
               translationVectors = new Mat[imageCount];
               for (int i = 0; i < imageCount; i++)
               {
                  rotationVectors[i] = new Mat();
                  using (Mat matR = rotationVectors[i])
                     matR.CopyTo(rotationVectors[i]);
                  translationVectors[i] = new Mat();
                  using (Mat matT = translationVectors[i])
                     matT.CopyTo(translationVectors[i]);
               }
            }
            return reprojectionError;
         }
      }
Ejemplo n.º 53
0
 public void TestDenseHistogram4()
 {
    Mat img = new Mat(400, 400, DepthType.Cv8U, 3);
    CvInvoke.Randu(img, new MCvScalar(), new MCvScalar(255, 255, 255));
    Mat hist = new Mat();
    using (VectorOfMat vms = new VectorOfMat(img))
    {
       CvInvoke.CalcHist(vms, new int[] { 0, 1, 2 }, null, hist, new int[] { 20, 20, 20 },
          new float[] { 0, 255, 0, 255, 0, 255 }, true);
       byte[] bytes = hist.GetData();
    }
 }
Ejemplo n.º 54
0
        /// <summary>
        /// 2値画像中の輪郭を検出します.
        /// </summary>
        /// <param name="image">入力画像,8ビット,シングルチャンネル.0以外のピクセルは 1として,0のピクセルは0のまま扱われます.
        /// また,この関数は,輪郭抽出処理中に入力画像 image の中身を書き換えます.</param>
        /// <param name="mode">輪郭抽出モード</param>
        /// <param name="method">輪郭の近似手法</param>
        /// <param name="offset">オプションのオフセット.各輪郭点はこの値の分だけシフトします.これは,ROIの中で抽出された輪郭を,画像全体に対して位置づけて解析する場合に役立ちます.</param>
        /// <return>検出された輪郭.各輪郭は,点のベクトルとして格納されます.</return>
#else
        /// <summary>
        /// Finds contours in a binary image.
        /// </summary>
        /// <param name="image">Source, an 8-bit single-channel image. Non-zero pixels are treated as 1’s. 
        /// Zero pixels remain 0’s, so the image is treated as binary.
        /// The function modifies the image while extracting the contours.</param> 
        /// <param name="mode">Contour retrieval mode</param>
        /// <param name="method">Contour approximation method</param>
        /// <param name="offset"> Optional offset by which every contour point is shifted. 
        /// This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context.</param>
        /// <returns>Detected contours. Each contour is stored as a vector of points.</returns>
#endif
        public static MatOfPoint[] FindContoursAsMat(InputOutputArray image, 
            ContourRetrieval mode, ContourChain method, Point? offset = null)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            image.ThrowIfNotReady();

            CvPoint offset0 = offset.GetValueOrDefault(new Point());
            IntPtr contoursPtr;
            NativeMethods.imgproc_findContours2_OutputArray(image.CvPtr, out contoursPtr, (int)mode, (int)method, offset0);
            image.Fix();

            using (var contoursVec = new VectorOfMat(contoursPtr))
            {
                return contoursVec.ToArray<MatOfPoint>();
            }
        }
Ejemplo n.º 55
0
      public void TestHistogram()
      {
         using (Image<Bgr, Byte> img = EmguAssert.LoadImage<Bgr, Byte>("stuff.jpg"))
         using (Image<Hsv, Byte> img2 = img.Convert<Hsv, Byte>())
         {
            Image<Gray, Byte>[] HSVs = img2.Split();

            using (Mat h = new Mat())
            using (Mat bpj = new Mat())
            using (VectorOfMat vm = new VectorOfMat())
            {
               vm.Push(HSVs[0]);
               CvInvoke.CalcHist(vm, new int[] { 0 }, null, h, new int[] { 20 }, new float[] { 0, 180 }, false);
               CvInvoke.CalcBackProject(vm, new int[] { 0 }, h, bpj, new float[] { 0, 180 }, 0.1);

               //Emgu.CV.UI.HistogramViewer.Show(bpj);
               //Emgu.CV.UI.ImageViewer.Show(bpj);
               //h.Calculate(new Image<Gray, Byte>[1] { HSVs[0] }, true, null);
               //using (Image<Gray, Byte> bpj = h.BackProject(new Image<Gray, Byte>[1] { HSVs[0] }))
               //{
               //   Size sz = bpj.Size;
               //}

               //using (Image<Gray, Single> patchBpj = h.BackProjectPatch(
               //   new Image<Gray, Byte>[1] { HSVs[0] },
               //   new Size(5, 5),
               //   Emgu.CV.CvEnum.HISTOGRAM_COMP_METHOD.CV_COMP_CHISQR,
               //   1.0))
               //{
               //   Size sz = patchBpj.Size;
               //}
            }

            foreach (Image<Gray, Byte> i in HSVs)
               i.Dispose();
         }
      }
Ejemplo n.º 56
0
        /// <summary>
        /// Constructs a pyramid which can be used as input for calcOpticalFlowPyrLK
        /// </summary>
        /// <param name="img">8-bit input image.</param>
        /// <param name="pyramid">output pyramid.</param>
        /// <param name="winSize">window size of optical flow algorithm. 
        /// Must be not less than winSize argument of calcOpticalFlowPyrLK(). 
        /// It is needed to calculate required padding for pyramid levels.</param>
        /// <param name="maxLevel">0-based maximal pyramid level number.</param>
        /// <param name="withDerivatives">set to precompute gradients for the every pyramid level. 
        /// If pyramid is constructed without the gradients then calcOpticalFlowPyrLK() will 
        /// calculate them internally.</param>
        /// <param name="pyrBorder">the border mode for pyramid layers.</param>
        /// <param name="derivBorder">the border mode for gradients.</param>
        /// <param name="tryReuseInputImage">put ROI of input image into the pyramid if possible. 
        /// You can pass false to force data copying.</param>
        /// <returns>number of levels in constructed pyramid. Can be less than maxLevel.</returns>
        public static int BuildOpticalFlowPyramid(
            InputArray img, out Mat[] pyramid,
            Size winSize, int maxLevel,
            bool withDerivatives = true,
            BorderTypes pyrBorder = BorderTypes.Reflect101,
            BorderTypes derivBorder = BorderTypes.Constant,
            bool tryReuseInputImage = true)
        {
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();

            using (var pyramidVec = new VectorOfMat())
            {
                int result = NativeMethods.video_buildOpticalFlowPyramid2(
                    img.CvPtr, pyramidVec.CvPtr, winSize, maxLevel, withDerivatives ? 1 : 0,
                    (int) pyrBorder, (int) derivBorder, tryReuseInputImage ? 1 : 0);
                pyramid = pyramidVec.ToArray();
                return result;
            }
        }
Ejemplo n.º 57
0
        /// <summary>
        /// Copies each plane of a multi-channel array to a dedicated array
        /// </summary>
        /// <param name="src">The source multi-channel array</param>
        /// <param name="mv">The destination array or vector of arrays; 
        /// The number of arrays must match mtx.channels() . 
        /// The arrays themselves will be reallocated if needed</param>
        public static void Split(Mat src, out Mat[] mv)
        {
            if (src == null)
                throw new ArgumentNullException("src");
            src.ThrowIfDisposed();

            IntPtr mvPtr;
            NativeMethods.core_split(src.CvPtr, out mvPtr);

            using (var vec = new VectorOfMat(mvPtr))
            {
                mv = vec.ToArray();
            }
            GC.KeepAlive(src);
        }
Ejemplo n.º 58
0
      public override void ProcessData(Mat sourceImage, Mat destImage)
      {
         Size size = sourceImage.Size;

         Mat i0 = GetBufferGray(size, 0);
         Mat i1 = GetBufferGray(size, 1);
         Mat i2 = GetBufferGray(size, 2);
         Mat i3 = GetBufferGray(size, 3);
         CvInvoke.ExtractChannel(sourceImage, i0, 0);
         CvInvoke.Canny(i0, i1, _thresh, _threshLinking, _apertureSize );
         CvInvoke.ExtractChannel(sourceImage, i0, 1);
         CvInvoke.Canny(i0, i2, _thresh, _threshLinking, _apertureSize);
         CvInvoke.ExtractChannel(sourceImage, i0, 2);
         CvInvoke.Canny(i0, i3, _thresh, _threshLinking, _apertureSize);
         using (VectorOfMat vm = new VectorOfMat(i1, i2, i3))
         {
            CvInvoke.Merge(vm, destImage );
         }

      }
Ejemplo n.º 59
-1
        public static bool Sticher(IEnumerable<string> fileList, string saveFileLocation)
        {
            var imageArray = from fileName in fileList
                             select new Image<Bgr, byte>(fileName);

            try
            {

                using (var stitcher = new Stitcher(false))
                {
                    using (var vm = new VectorOfMat())
                    {
                        var result = new Mat();
                        vm.Push(imageArray.ToArray());
                        stitcher.Stitch(vm, result);
                        result.Save(saveFileLocation);
                    }
                }

                return true;
            }
            catch (Exception ex)
            {
                Logger.Error("Failed to stich !!", ex);
                return false;
            }
            finally
            {
                foreach (Image<Bgr, Byte> img in imageArray)
                {
                    img.Dispose();
                }
            }
        }