Exemplo n.º 1
0
        /// <summary>
        /// Computes dense optical flow using Gunnar Farneback's algorithm
        /// </summary>
        /// <param name="prev0">The first 8-bit single-channel input image</param>
        /// <param name="next0">The second input image of the same size and the same type as prevImg</param>
        /// <param name="flowX">The computed flow image for x-velocity; will have the same size as prevImg</param>
        /// <param name="flowY">The computed flow image for y-velocity; will have the same size as prevImg</param>
        /// <param name="pyrScale">Specifies the image scale (!1) to build the pyramids for each image. pyrScale=0.5 means the classical pyramid, where each next layer is twice smaller than the previous</param>
        /// <param name="levels">The number of pyramid layers, including the initial image. levels=1 means that no extra layers are created and only the original images are used</param>
        /// <param name="winSize">The averaging window size; The larger values increase the algorithm robustness to image noise and give more chances for fast motion detection, but yield more blurred motion field</param>
        /// <param name="iterations">The number of iterations the algorithm does at each pyramid level</param>
        /// <param name="polyN">Size of the pixel neighborhood used to find polynomial expansion in each pixel. The larger values mean that the image will be approximated with smoother surfaces, yielding more robust algorithm and more blurred motion field. Typically, poly n=5 or 7</param>
        /// <param name="polySigma">Standard deviation of the Gaussian that is used to smooth derivatives that are used as a basis for the polynomial expansion. For poly n=5 you can set poly sigma=1.1, for poly n=7 a good value would be poly sigma=1.5</param>
        /// <param name="flags">The operation flags</param>
        public static void CalcOpticalFlowFarneback(
            Image <Gray, Byte> prev0,
            Image <Gray, Byte> next0,
            Image <Gray, Single> flowX,
            Image <Gray, Single> flowY,
            double pyrScale,
            int levels,
            int winSize,
            int iterations,
            int polyN,
            double polySigma,
            CvEnum.OpticalflowFarnebackFlag flags)
        {
            using (Mat flow0 = new Mat(prev0.Height, prev0.Width, CvEnum.DepthType.Cv32F, 2))
                using (Util.VectorOfMat vm = new Util.VectorOfMat(new Mat[] { flowX.Mat, flowY.Mat }))
                {
                    if ((int)(flags & Emgu.CV.CvEnum.OpticalflowFarnebackFlag.UseInitialFlow) != 0)
                    { //use initial flow
                        CvInvoke.Merge(vm, flow0);
                    }

                    CvInvoke.CalcOpticalFlowFarneback(prev0, next0, flow0, pyrScale, levels, winSize, iterations, polyN, polySigma, flags);
                    CvInvoke.Split(flow0, vm);
                }
        }
Exemplo n.º 2
0
 /// <summary>
 /// Primal-dual algorithm is an algorithm for solving special types of variational problems (that is, finding a function to minimize some functional).
 /// As the image denoising, in particular, may be seen as the variational problem, primal-dual algorithm then can be used to perform
 /// denoising and this is exactly what is implemented.
 /// </summary>
 /// <param name="observations">This array should contain one or more noised versions of the image that is to be restored.</param>
 /// <param name="result">Here the denoised image will be stored. There is no need to do pre-allocation of storage space, as it will be automatically allocated, if necessary.</param>
 /// <param name="lambda">Corresponds to  in the formulas above. As it is enlarged, the smooth (blurred) images are treated more favorably than detailed (but maybe more noised) ones. Roughly speaking, as it becomes smaller, the result will be more blur but more sever outliers will be removed.</param>
 /// <param name="niters">Number of iterations that the algorithm will run. Of course, as more iterations as better, but it is hard to quantitatively refine this statement, so just use the default and increase it if the results are poor.</param>
 public static void DenoiseTVL1(Mat[] observations, Mat result, double lambda, int niters)
 {
     using (Util.VectorOfMat vm = new Util.VectorOfMat(observations))
     {
         cveDenoiseTVL1(vm, result, lambda, niters);
     }
 }
Exemplo n.º 3
0
    public static Array GetHistogramOfImage(Mat image, char channel = 'b', int size = 256, float range = 256)
    {
        if (image == null)
        {
            return(null);
        }

        Mat hist = new Mat();

        using (Emgu.CV.Util.VectorOfMat vm = new Emgu.CV.Util.VectorOfMat())
        {
            int[] histoChannel = { 0 };
            if (channel == 'b')
            {
                histoChannel = new int[] { 0 }
            }
            ;
            if (channel == 'g')
            {
                histoChannel = new int[] { 1 }
            }
            ;
            if (channel == 'r')
            {
                histoChannel = new int[] { 2 }
            }
            ;

            int[]   histoSize  = { size };
            float[] histoRange = { 0.0f, range };


            vm.Push(image);
            CvInvoke.CalcHist(vm, histoChannel, null, hist, histoSize, histoRange, false);

            //CvInvoke.Normalize(hist, hist, 0, image.Rows, NormType.MinMax);
        }

        return(hist.GetData().Cast <float>().ToArray());
    }
        /// <summary>
        /// Generate histograms for the image. One histogram is generated for each color channel.
        /// You will need to call the Refresh function to do the painting afterward.
        /// </summary>
        /// <param name="image">The image to generate histogram from</param>
        /// <param name="numberOfBins">The number of bins for each histogram</param>
        public void GenerateHistograms(IImage image, int numberOfBins)
        {
            Mat[] channels = new Mat[image.NumberOfChannels];
            Type  imageType;

            if ((imageType = Toolbox.GetBaseType(image.GetType(), "Image`2")) != null ||
                (imageType = Toolbox.GetBaseType(image.GetType(), "Mat")) != null ||
                (imageType = Toolbox.GetBaseType(image.GetType(), "UMat")) != null)
            {
                for (int i = 0; i < image.NumberOfChannels; i++)
                {
                    Mat channel = new Mat();
                    CvInvoke.ExtractChannel(image, channel, i);
                    channels[i] = channel;
                }
            }
            else if ((imageType = Toolbox.GetBaseType(image.GetType(), "CudaImage`2")) != null)
            {
                IImage img = imageType.GetMethod("ToImage").Invoke(image, null) as IImage;
                for (int i = 0; i < img.NumberOfChannels; i++)
                {
                    Mat channel = new Mat();
                    CvInvoke.ExtractChannel(img, channel, i);
                    channels[i] = channel;
                }
            }
            else
            {
                throw new ArgumentException(String.Format("The input image type of {0} is not supported", image.GetType().ToString()));
            }

            Type[]   genericArguments = imageType.GetGenericArguments();
            String[] channelNames;
            Color[]  colors;
            Type     typeOfDepth;

            if (genericArguments.Length > 0)
            {
                IColor typeOfColor = Activator.CreateInstance(genericArguments[0]) as IColor;
                channelNames = Emgu.CV.Reflection.ReflectColorType.GetNamesOfChannels(typeOfColor);
                colors       = Emgu.CV.Reflection.ReflectColorType.GetDisplayColorOfChannels(typeOfColor);
                typeOfDepth  = imageType.GetGenericArguments()[1];
            }
            else
            {
                channelNames = new String[image.NumberOfChannels];
                colors       = new Color[image.NumberOfChannels];
                for (int i = 0; i < image.NumberOfChannels; i++)
                {
                    channelNames[i] = String.Format("Channel {0}", i);
                    colors[i]       = Color.Red;
                }

                if (image is Mat)
                {
                    typeOfDepth = CvInvoke.GetDepthType(((Mat)image).Depth);
                }
                else if (image is UMat)
                {
                    typeOfDepth = CvInvoke.GetDepthType(((UMat)image).Depth);
                }
                else
                {
                    throw new ArgumentException(String.Format("Unable to get the type of depth from image of type {0}", image.GetType().ToString()));
                }
            }

            float minVal, maxVal;

            #region Get the maximum and minimum color intensity values

            if (typeOfDepth == typeof(Byte))
            {
                minVal = 0.0f;
                maxVal = 256.0f;
            }
            else
            {
                #region obtain the maximum and minimum color value
                double[] minValues, maxValues;
                Point[]  minLocations, maxLocations;
                image.MinMax(out minValues, out maxValues, out minLocations, out maxLocations);

                double min = minValues[0], max = maxValues[0];
                for (int i = 1; i < minValues.Length; i++)
                {
                    if (minValues[i] < min)
                    {
                        min = minValues[i];
                    }
                    if (maxValues[i] > max)
                    {
                        max = maxValues[i];
                    }
                }
                #endregion

                minVal = (float)min;
                maxVal = (float)max;
            }
            #endregion

            for (int i = 0; i < channels.Length; i++)
            {
                //using (DenseHistogram hist = new DenseHistogram(numberOfBins, new RangeF(minVal, maxVal)))
                using (Mat hist = new Mat())
                    using (Emgu.CV.Util.VectorOfMat vm = new Emgu.CV.Util.VectorOfMat())
                    {
                        vm.Push(channels[i]);

                        float[] ranges = new float[] { minVal, maxVal };
                        CvInvoke.CalcHist(vm, new int[] { 0 }, null, hist, new int[] { numberOfBins }, ranges, false);
                        //hist.Calculate(new IImage[1] { channels[i] }, true, null);
                        AddHistogram(channelNames[i], colors[i], hist, numberOfBins, ranges);
                    }
            }
        }
Exemplo n.º 5
0
 public DebuggerProxy(VectorOfMat v)
 {
     _v = v;
 }
Exemplo n.º 6
0
 /// <summary>
 /// Push multiple values from the other vector into this vector
 /// </summary>
 /// <param name="other">The other vector, from which the values will be pushed to the current vector</param>
 public void Push(VectorOfMat other)
 {
     VectorOfMatPushVector(_ptr, other);
 }