示例#1
0
 /// <summary>
 /// Apply color map to the image
 /// </summary>
 /// <param name="src">
 /// The source image.
 /// This function expects Image&lt;Bgr, Byte&gt; or Image&lt;Gray, Byte&gt;. If the wrong image type is given, the original image
 /// will be returned.</param>
 /// <param name="dst">The destination image</param>
 /// <param name="colorMapType">The type of color map</param>
 public static void ApplyColorMap(IInputArray src, IOutputArray dst, CvEnum.ColorMapType colorMapType)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveApplyColorMap(iaSrc, oaDst, colorMapType);
 }
示例#2
0
 private static Size InputArrGetSize(IInputArray arr)
 {
     using (InputArray ia = arr.GetInputArray())
         return(ia.GetSize());
 }
示例#3
0
 /// <summary>
 /// Calculate square root of each source array element. in the case of multichannel
 /// arrays each channel is processed independently. The function accuracy is approximately
 /// the same as of the built-in std::sqrt.
 /// </summary>
 /// <param name="src">The source floating-point array</param>
 /// <param name="dst">The destination array; will have the same size and the same type as src</param>
 public static void Sqrt(IInputArray src, IOutputArray dst)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveSqrt(iaSrc, oaDst);
 }
示例#4
0
 /// <summary>
 /// Constructs a WCloud.
 /// </summary>
 /// <param name="cloud">Set of points which can be of type: CV_32FC3, CV_32FC4, CV_64FC3, CV_64FC4.</param>
 /// <param name="color">Set of colors. It has to be of the same size with cloud.</param>
 public WCloud(IInputArray cloud, IInputArray color)
 {
     using (InputArray iaCloud = cloud.GetInputArray())
         using (InputArray iaColor = color.GetInputArray())
             CvInvoke.cveWCloudCreateWithColorArray(iaCloud, iaColor, ref _widget3dPtr, ref _widgetPtr);
 }
示例#5
0
 /// <summary>
 /// Copy the data in this umat to the other mat
 /// </summary>
 /// <param name="mask">Operation mask. Its non-zero elements indicate which matrix elements need to be copied.</param>
 /// <param name="m">The input array to copy to</param>
 public void CopyTo(IOutputArray m, IInputArray mask = null)
 {
     using (OutputArray oaM = m.GetOutputArray())
         using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             UMatInvoke.cveUMatCopyTo(this, oaM, iaMask);
 }
示例#6
0
 /// <summary>
 /// Detector the location of the QR code
 /// </summary>
 /// <param name="input">The input image</param>
 /// <param name="points">The location of the QR code in the image</param>
 /// <returns>True if a QRCode is found.</returns>
 public bool Detect(IInputArray input, IOutputArray points)
 {
     using (InputArray iaInput = input.GetInputArray())
         using (OutputArray oaPoints = points.GetOutputArray())
             return(CvInvoke.cveQRCodeDetectorDetect(_ptr, iaInput, oaPoints));
 }
示例#7
0
        /// <summary>
        /// Convert an input array to texture 2D
        /// </summary>
        /// <param name="image">The input image, if 3 channel, we assume it is Bgr, if 4 channels, we assume it is Bgra</param>
        /// <param name="flipType"></param>
        /// <param name="buffer">Optional buffer for the texture conversion, should be big enough to hold the image data. e.g. width*height*pixel_size</param>
        /// <returns>The texture 2D</returns>
        public static Texture2D InputArrayToTexture2D(IInputArray image, Emgu.CV.CvEnum.FlipType flipType = FlipType.Vertical, byte[] buffer = null)
        {
            using (InputArray iaImage = image.GetInputArray())
            {
                Size size = iaImage.GetSize();

                if (iaImage.GetChannels() == 3 && iaImage.GetDepth() == DepthType.Cv8U && SystemInfo.SupportsTextureFormat(TextureFormat.RGB24))
                {
                    //assuming 3 channel image is of BGR color
                    Texture2D texture = new Texture2D(size.Width, size.Height, TextureFormat.RGB24, false);

                    byte[] data;
                    int    bufferLength = size.Width * size.Height * 3;
                    if (buffer != null)
                    {
                        if (buffer.Length < bufferLength)
                        {
                            throw new ArgumentException(String.Format("Buffer size ({0}) is not big enough for the RBG24 texture, width * height * 3 = {1} is required.", buffer.Length, bufferLength));
                        }
                        data = buffer;
                    }
                    else
                    {
                        data = new byte[bufferLength];
                    }
                    GCHandle dataHandle = GCHandle.Alloc(data, GCHandleType.Pinned);
                    using (
                        Image <Rgb, byte> rgb = new Image <Rgb, byte>(size.Width, size.Height, size.Width * 3,
                                                                      dataHandle.AddrOfPinnedObject()))
                    {
                        rgb.ConvertFrom(image);
                        if (flipType != FlipType.None)
                        {
                            CvInvoke.Flip(rgb, rgb, flipType);
                        }
                    }
                    dataHandle.Free();
                    texture.LoadRawTextureData(data);
                    texture.Apply();
                    return(texture);
                }
                else if (SystemInfo.SupportsTextureFormat(TextureFormat.RGBA32))
                {
                    Texture2D texture = new Texture2D(size.Width, size.Height, TextureFormat.RGBA32, false);
                    byte[]    data;
                    int       bufferLength = size.Width * size.Height * 4;
                    if (buffer != null)
                    {
                        if (buffer.Length < bufferLength)
                        {
                            throw new ArgumentException(
                                      String.Format(
                                          "Buffer size ({0}) is not big enough for the RGBA32 texture, width * height * 4 = {1} is required.",
                                          buffer.Length, bufferLength));
                        }
                        data = buffer;
                    }
                    else
                    {
                        data = new byte[bufferLength];
                    }
                    GCHandle dataHandle = GCHandle.Alloc(data, GCHandleType.Pinned);
                    using (
                        Image <Rgba, byte> rgba = new Image <Rgba, byte>(size.Width, size.Height, size.Width * 4,
                                                                         dataHandle.AddrOfPinnedObject()))
                    {
                        rgba.ConvertFrom(image);
                        if (flipType != FlipType.None)
                        {
                            CvInvoke.Flip(rgba, rgba, flipType);
                        }
                    }
                    dataHandle.Free();
                    texture.LoadRawTextureData(data);

                    texture.Apply();
                    return(texture);
                }
                else
                {
                    throw new Exception("TextureFormat of RGBA32 is not supported on this device");
                }
            }
        }
示例#8
0
文件: UMat.cs 项目: v5chn/emgucv
 /// <summary>
 /// Computes the dot product of two mats
 /// </summary>
 /// <param name="mat">The matrix to compute dot product with</param>
 /// <returns>The dot product</returns>
 public double Dot(IInputArray mat)
 {
     using (InputArray iaMat = mat.GetInputArray())
         return(UMatInvoke.cveUMatDot(Ptr, iaMat));
 }
示例#9
0
 public static bool DetectQRCode(IInputArray input, VectorOfPoint points, double epsX, double epsY)
 {
     using (InputArray iaInput = input.GetInputArray())
         return(cveDetectQRCode(iaInput, points, epsX, epsY));
 }
示例#10
0
 /// <summary>
 /// Detects Barcode in image and returns the rectangle(s) containing the code.
 /// </summary>
 /// <param name="image">grayscale or color (BGR) image containing (or not) Barcode.</param>
 /// <param name="points">
 /// Output vector of vector of vertices of the minimum-area rotated rectangle containing the codes.
 /// For N detected barcodes, the dimensions of this array should be [N][4].
 /// Order of four points in VectorOfPointF is bottomLeft, topLeft, topRight, bottomRight.
 /// </param>
 /// <returns>True of barcode found</returns>
 public bool Detect(IInputArray image, IOutputArray points)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaPoints = points.GetOutputArray())
             return(BarcodeInvoke.cveBarcodeDetectorDetect(_ptr, iaImage, oaPoints));
 }
示例#11
0
 /// <summary>
 /// Determines whether the specified input array is umat.
 /// </summary>
 /// <param name="arr">The array</param>
 /// <returns>True if it is a umat</returns>
 public static bool IsUmat(this IInputArray arr)
 {
     using (InputArray ia = arr.GetInputArray())
         return(ia.IsUMat);
 }
示例#12
0
 /// <summary>
 /// Perform image denoising using Non-local Means Denoising algorithm (modified for color image):
 /// http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/
 /// with several computational optimizations. Noise expected to be a Gaussian white noise.
 /// The function converts image to CIELAB colorspace and then separately denoise L and AB components with given h parameters using fastNlMeansDenoising function.
 /// </summary>
 /// <param name="src">Input 8-bit 1-channel, 2-channel or 3-channel image.</param>
 /// <param name="dst">Output image with the same size and type as src.</param>
 /// <param name="h">Parameter regulating filter strength. Big h value perfectly removes noise but also removes image details, smaller h value preserves details but also preserves some noise.</param>
 /// <param name="hColor">The same as h but for color components. For most images value equals 10 will be enought to remove colored noise and do not distort colors.</param>
 /// <param name="templateWindowSize">Size in pixels of the template patch that is used to compute weights. Should be odd.</param>
 /// <param name="searchWindowSize">Size in pixels of the window that is used to compute weighted average for given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater denoising time.</param>
 public static void FastNlMeansDenoisingColored(IInputArray src, IOutputArray dst, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveFastNlMeansDenoisingColored(iaSrc, oaDst, h, hColor, templateWindowSize, searchWindowSize);
 }
示例#13
0
 /// <summary>
 /// Stylization aims to produce digital imagery with a wide variety of effects not focused on photorealism. Edge-aware filters are ideal for stylization, as they can abstract regions of low contrast while preserving, or enhancing, high-contrast features.
 /// </summary>
 /// <param name="src">Input 8-bit 3-channel image.</param>
 /// <param name="dst">Output image with the same size and type as src.</param>
 /// <param name="sigmaS">Range between 0 to 200.</param>
 /// <param name="sigmaR"> Range between 0 to 1.</param>
 public static void Stylization(IInputArray src, IOutputArray dst, float sigmaS = 60, float sigmaR = 0.45f)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveStylization(iaSrc, oaDst, sigmaS, sigmaR);
 }
示例#14
0
 /// <summary>
 /// This filter enhances the details of a particular image.
 /// </summary>
 /// <param name="src">Input 8-bit 3-channel image</param>
 /// <param name="dst">Output image with the same size and type as src</param>
 /// <param name="sigmaS">Range between 0 to 200</param>
 /// <param name="sigmaR">Range between 0 to 1</param>
 public static void DetailEnhance(IInputArray src, IOutputArray dst, float sigmaS = 10.0f, float sigmaR = 0.15f)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveDetailEnhance(iaSrc, oaDst, sigmaS, sigmaR);
 }
示例#15
0
 /// <summary>
 /// A simple interface to detect face from given image.
 /// </summary>
 /// <param name="image">An image to detect</param>
 /// <param name="faces">Detection results stored in a Mat</param>
 /// <returns>1 if detection is successful, 0 otherwise.</returns>
 public int Detect(IInputArray image, IOutputArray faces)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaFaces = faces.GetOutputArray())
             return(CvInvoke.cveFaceDetectorYNDetect(_ptr, iaImage, oaFaces));
 }
示例#16
0
 /// <summary>
 /// Decode image stored in the buffer
 /// </summary>
 /// <param name="buf">The buffer</param>
 /// <param name="loadType">The image loading type</param>
 /// <param name="dst">The output placeholder for the decoded matrix.</param>
 public static void Imdecode(IInputArray buf, CvEnum.ImreadModes loadType, Mat dst)
 {
     using (InputArray iaBuffer = buf.GetInputArray())
         cveImdecode(iaBuffer, loadType, dst);
 }
示例#17
0
 /// <summary>
 /// Update the background model
 /// </summary>
 /// <param name="image">The image that is used to update the background model</param>
 /// <param name="learningRate">Use -1 for default</param>
 /// <param name="subtractor">The background subtractor</param>
 /// <param name="fgMask">The output foreground mask</param>
 public static void Apply(this IBackgroundSubtractor subtractor, IInputArray image, IOutputArray fgMask, double learningRate = -1)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaFgMask = fgMask.GetOutputArray())
             CvInvoke.cveBackgroundSubtractorUpdate(subtractor.BackgroundSubtractorPtr, iaImage, oaFgMask, learningRate);
 }
示例#18
0
 /// <summary>
 /// Transforms the image to compensate radial and tangential lens distortion.
 /// </summary>
 /// <param name="src">The input (distorted) image</param>
 /// <param name="dst">The output (corrected) image</param>
 /// <param name="cameraMatrix">The camera matrix (A) [fx 0 cx; 0 fy cy; 0 0 1].</param>
 /// <param name="distortionCoeffs">The vector of distortion coefficients, 4x1 or 1x4 [k1, k2, p1, p2].</param>
 /// <param name="newCameraMatrix">Camera matrix of the distorted image. By default it is the same as cameraMatrix, but you may additionally scale and shift the result by using some different matrix</param>
 public static void Undistort(
     IInputArray src,
     IOutputArray dst,
     IInputArray cameraMatrix,
     IInputArray distortionCoeffs,
     IInputArray newCameraMatrix = null)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             using (InputArray iaCameraMatrix = cameraMatrix.GetInputArray())
                 using (InputArray iaDistortionCoeffs = distortionCoeffs.GetInputArray())
                     using (InputArray iaNewCameraMatrix = newCameraMatrix == null ? InputArray.GetEmpty() : newCameraMatrix.GetInputArray())
                         cveUndistort(iaSrc, oaDst, iaCameraMatrix, iaDistortionCoeffs, iaNewCameraMatrix);
 }
示例#19
0
 /// <summary>
 /// Computes a dot-product of two vectors.
 /// </summary>
 /// <param name="m">Another dot-product operand</param>
 /// <returns>The dot-product of two vectors.</returns>
 public double Dot(IInputArray m)
 {
     using (InputArray iaM = m.GetInputArray())
         return(MatInvoke.cvMatDot(Ptr, iaM));
 }
示例#20
0
 /// <summary>
 /// Updates the motion history image as following:
 /// mhi(x,y)=timestamp  if silhouette(x,y)!=0
 ///         0          if silhouette(x,y)=0 and mhi(x,y)&lt;timestamp-duration
 ///         mhi(x,y)   otherwise
 /// That is, MHI pixels where motion occurs are set to the current timestamp, while the pixels where motion happened far ago are cleared.
 /// </summary>
 /// <param name="silhouette">Silhouette mask that has non-zero pixels where the motion occurs. </param>
 /// <param name="mhi">Motion history image, that is updated by the function (single-channel, 32-bit floating-point) </param>
 /// <param name="timestamp">Current time in milliseconds or other units. </param>
 /// <param name="duration">Maximal duration of motion track in the same units as timestamp. </param>
 public static void UpdateMotionHistory(IInputArray silhouette, IInputOutputArray mhi, double timestamp, double duration)
 {
     using (InputArray iaSilhouette = silhouette.GetInputArray())
         using (InputOutputArray ioaMhi = mhi.GetInputOutputArray())
             cveUpdateMotionHistory(iaSilhouette, ioaMhi, timestamp, duration);
 }
示例#21
0
        /// <summary>
        /// Convert an input array to texture 2D
        /// </summary>
        /// <param name="image">The input image, if 3 channel, we assume it is Bgr, if 4 channels, we assume it is Bgra</param>
        /// <param name="flipType"></param>
        /// <param name="buffer">Optional buffer for the texture conversion, should be big enough to hold the image data. e.g. width*height*pixel_size</param>
        /// <returns>The texture 2D</returns>
        public static Texture2D InputArrayToTexture2D(IInputArray image, Emgu.CV.CvEnum.FlipType flipType = FlipType.Vertical, byte[] buffer = null)
        {
            using (InputArray iaImage = image.GetInputArray())
                using (Mat m = iaImage.GetMat())
                {
                    Size size = m.Size;

                    if (m.NumberOfChannels == 3 && m.Depth == DepthType.Cv8U)
                    {
                        Texture2D texture = new Texture2D(size.Width, size.Height, TextureFormat.RGB24, false);

                        byte[] data;
                        int    bufferLength = size.Width * size.Height * 3;
                        if (buffer != null)
                        {
                            if (buffer.Length < bufferLength)
                            {
                                throw new ArgumentException(String.Format("Buffer size ({0}) is not big enough for the RBG24 texture, width * height * 3 = {1} is required.", buffer.Length, bufferLength));
                            }
                            data = buffer;
                        }
                        else
                        {
                            data = new byte[bufferLength];
                        }
                        GCHandle dataHandle = GCHandle.Alloc(data, GCHandleType.Pinned);
                        using (
                            Image <Rgb, byte> rgb = new Image <Rgb, byte>(size.Width, size.Height, size.Width * 3,
                                                                          dataHandle.AddrOfPinnedObject()))
                        {
                            rgb.ConvertFrom(m);
                            if (flipType != FlipType.None)
                            {
                                CvInvoke.Flip(rgb, rgb, flipType);
                            }
                        }
                        dataHandle.Free();
                        texture.LoadRawTextureData(data);
                        texture.Apply();
                        return(texture);
                    }
                    else //if (m.NumberOfChannels == 4 && m.Depth == DepthType.Cv8U)
                    {
                        Texture2D texture = new Texture2D(size.Width, size.Height, TextureFormat.RGBA32, false);
                        byte[]    data;
                        int       bufferLength = size.Width * size.Height * 4;
                        if (buffer != null)
                        {
                            if (buffer.Length < bufferLength)
                            {
                                throw new ArgumentException(String.Format("Buffer size ({0}) is not big enough for the RGBA32 texture, width * height * 4 = {1} is required.", buffer.Length, bufferLength));
                            }
                            data = buffer;
                        }
                        else
                        {
                            data = new byte[bufferLength];
                        }
                        GCHandle dataHandle = GCHandle.Alloc(data, GCHandleType.Pinned);
                        using (
                            Image <Rgba, byte> rgba = new Image <Rgba, byte>(size.Width, size.Height, size.Width * 4,
                                                                             dataHandle.AddrOfPinnedObject()))
                        {
                            rgba.ConvertFrom(m);
                            if (flipType != FlipType.None)
                            {
                                CvInvoke.Flip(rgba, rgba, flipType);
                            }
                        }
                        dataHandle.Free();
                        texture.LoadRawTextureData(data);

                        texture.Apply();
                        return(texture);
                    }
                }
        }
示例#22
0
 /// <summary>
 /// Contrast Limited Adaptive Histogram Equalization (CLAHE)
 /// </summary>
 /// <param name="src">The source image</param>
 /// <param name="clipLimit">Clip Limit, use 40 for default</param>
 /// <param name="tileGridSize">Tile grid size, use (8, 8) for default</param>
 /// <param name="dst">The destination image</param>
 public static void CLAHE(IInputArray src, double clipLimit, Size tileGridSize, IOutputArray dst)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cveCLAHE(iaSrc, clipLimit, ref tileGridSize, oaDst);
 }
示例#23
0
 /// <summary>
 /// Constructs a WCloud.
 /// </summary>
 /// <param name="cloud">Set of points which can be of type: CV_32FC3, CV_32FC4, CV_64FC3, CV_64FC4.</param>
 /// <param name="color">A single Color for the whole cloud.</param>
 public WCloud(IInputArray cloud, MCvScalar color)
 {
     using (InputArray iaCloud = cloud.GetInputArray())
         CvInvoke.cveWCloudCreateWithColor(iaCloud, ref color, ref _widget3dPtr, ref _widgetPtr);
 }
示例#24
0
 /// <summary>
 /// Finds centers in the grid of circles
 /// </summary>
 /// <param name="image">Source chessboard view</param>
 /// <param name="patternSize">The number of inner circle per chessboard row and column</param>
 /// <param name="flags">Various operation flags</param>
 /// <param name="featureDetector">The feature detector. Use a SimpleBlobDetector for default</param>
 /// <param name="centers">output array of detected centers.</param>
 /// <returns>True if grid found.</returns>
 public static bool FindCirclesGrid(IInputArray image, Size patternSize, IOutputArray centers, CvEnum.CalibCgType flags, Feature2D featureDetector)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaCenters = centers.GetOutputArray())
             return(cveFindCirclesGrid(iaImage, ref patternSize, oaCenters, flags, featureDetector.Feature2DPtr));
 }
示例#25
0
        /*
         * /// <summary>
         * /// Copies the values of the UMat to <paramref name="data"/>.
         * /// </summary>
         * /// <param name="data">The data storage, must match the size of the UMat</param>
         * public void CopyTo(Array data)
         * {
         * if (IsEmpty)
         * {
         *    throw new Exception("The UMat is empty");
         * }
         *
         * using (Mat.MatWithHandle m = Mat.PrepareArrayForCopy(Depth, Size, NumberOfChannels, data))
         *    CopyTo(m);
         * }*/

        /// <summary>
        /// Sets all or some of the array elements to the specified value.
        /// </summary>
        /// <param name="value">Assigned scalar converted to the actual array type.</param>
        /// <param name="mask">Operation mask of the same size as the umat.</param>
        public void SetTo(IInputArray value, IInputArray mask = null)
        {
            using (InputArray iaValue = value.GetInputArray())
                using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
                    UMatInvoke.cveUMatSetTo(Ptr, iaValue, iaMask);
        }
示例#26
0
 /// <summary>
 /// Write a single frame to the video writer
 /// </summary>
 /// <param name="frame">The frame to be written to the video writer</param>
 public void Write(IInputArray frame)
 {
     using (InputArray iaFrame = frame.GetInputArray())
         CvInvoke.cveVideoWriterWrite(_ptr, iaFrame);
 }