/*
          /// <summary>
          /// Find the minimum enclosing circle for the specific array of points
          /// </summary>
          /// <param name="points">The collection of points</param>
          /// <returns>The minimum enclosing circle for the array of points</returns>
          public static CircleF MinEnclosingCircle(PointF[] points)
          {
         IntPtr seq = Marshal.AllocHGlobal(StructSize.MCvContour);
         IntPtr block = Marshal.AllocHGlobal(StructSize.MCvSeqBlock);
         GCHandle handle = GCHandle.Alloc(points, GCHandleType.Pinned);
         CvInvoke.cvMakeSeqHeaderForArray(
            CvInvoke.MakeType(CvEnum.DepthType.Cv32F, 2),
            StructSize.MCvSeq,
            StructSize.PointF,
            handle.AddrOfPinnedObject(),
            points.Length,
            seq,
            block);
         PointF center;
         float radius;
         CvInvoke.cvMinEnclosingCircle(seq, out center, out radius);

         CvInvoke.MinEnclosingCircle(
         return new CircleF(center, radius);
          }*/
        /// <summary>
        /// Re-project pixels on a 1-channel disparity map to array of 3D points.
        /// </summary>
        /// <param name="disparity">Disparity map</param>
        /// <param name="Q">The re-projection 4x4 matrix, can be arbitrary, e.g. the one, computed by cvStereoRectify</param>
        /// <returns>The reprojected 3D points</returns>
        public static MCvPoint3D32f[] ReprojectImageTo3D(IInputArray disparity, IInputArray Q)
        {
            Size size;
             using (InputArray ia = disparity.GetInputArray())
            size = ia.GetSize();

             MCvPoint3D32f[] points3D = new MCvPoint3D32f[size.Width * size.Height];
             GCHandle handle = GCHandle.Alloc(points3D, GCHandleType.Pinned);

             using (Matrix<float> pts = new Matrix<float>(size.Height, size.Width, 3, handle.AddrOfPinnedObject(), 0))
            CvInvoke.ReprojectImageTo3D(disparity, pts, Q, false, CvEnum.DepthType.Cv32F);

             handle.Free();
             return points3D;
        }
        /*
         * public static Mat ToMat(this WriteableBitmap writeableBitmap)
         * {
         *  Mat m = new Mat();
         *  writeableBitmap.ToArray(m);
         *  return m;
         * }*/

        public static WriteableBitmap ToWritableBitmap(this IInputArray array)
        {
            using (InputArray ia = array.GetInputArray())
            {
                Size            size   = ia.GetSize();
                WriteableBitmap bmp    = new WriteableBitmap(size.Width, size.Height);
                byte[]          buffer = new byte[bmp.PixelWidth * bmp.PixelHeight * 4];
                GCHandle        handle = GCHandle.Alloc(buffer, GCHandleType.Pinned);

                using (Mat resultImage = new Mat(
                           new Size(bmp.PixelWidth, bmp.PixelHeight),
                           DepthType.Cv8U,
                           4,
                           handle.AddrOfPinnedObject(),
                           bmp.PixelWidth * 4))
                {
                    int channels = ia.GetChannels();
                    switch (channels)
                    {
                    case 1:
                        CvInvoke.CvtColor(array, resultImage, ColorConversion.Gray2Bgra);
                        break;

                    case 3:
                        CvInvoke.CvtColor(array, resultImage, ColorConversion.Bgr2Bgra);
                        break;

                    case 4:
                        using (Mat m = ia.GetMat())
                            m.CopyTo(resultImage);
                        break;

                    default:
                        throw new NotImplementedException(String.Format(
                                                              "Conversion from {0} channel IInputArray to WritableBitmap is not supported",
                                                              channels));
                    }
                }
                handle.Free();

                using (Stream resultStream = bmp.PixelBuffer.AsStream())
                {
                    resultStream.Write(buffer, 0, buffer.Length);
                }

                return(bmp);
            }
        }
Exemple #3
0
        /// <summary>
        /// Convert the image to Bitmap
        /// </summary>
        /// <param name="image">The image to convert to Bitmap</param>
        /// <param name="config">The bitmap config type. If null, Argb8888 will be used</param>
        /// <returns>The Bitmap</returns>
        public static Android.Graphics.Bitmap ToBitmap(this IInputArray image, Android.Graphics.Bitmap.Config config = null)
        {
            using (InputArray iaImage = image.GetInputArray())
            {
                System.Drawing.Size size = iaImage.GetSize();

                if (config == null)
                {
                    config = Android.Graphics.Bitmap.Config.Argb8888;
                }

                Android.Graphics.Bitmap result = Android.Graphics.Bitmap.CreateBitmap(size.Width, size.Height, config);
                image.ToBitmap(result);
                return(result);
            }
        }
Exemple #4
0
        /// <summary>
        /// Convert an input array to texture 2D
        /// </summary>
        /// <param name="image">The input image, if 3 channel, we assume it is Bgr, if 4 channels, we assume it is Bgra</param>
        /// <param name="flipType"></param>
        /// <param name="buffer">Optional buffer for the texture conversion, should be big enough to hold the image data. e.g. width*height*pixel_size</param>
        /// <returns>The texture 2D</returns>
        public static Texture2D InputArrayToTexture2D(IInputArray image, Emgu.CV.CvEnum.FlipType flipType = FlipType.Vertical, byte[] buffer = null)
        {
            using (InputArray iaImage = image.GetInputArray())
            {
                Size size = iaImage.GetSize();

                if (iaImage.GetChannels() == 3 && iaImage.GetDepth() == DepthType.Cv8U && SystemInfo.SupportsTextureFormat(TextureFormat.RGB24))
                {
                    //assuming 3 channel image is of BGR color
                    Texture2D texture = new Texture2D(size.Width, size.Height, TextureFormat.RGB24, false);

                    byte[] data;
                    int    bufferLength = size.Width * size.Height * 3;
                    if (buffer != null)
                    {
                        if (buffer.Length < bufferLength)
                        {
                            throw new ArgumentException(String.Format("Buffer size ({0}) is not big enough for the RBG24 texture, width * height * 3 = {1} is required.", buffer.Length, bufferLength));
                        }
                        data = buffer;
                    }
                    else
                    {
                        data = new byte[bufferLength];
                    }
                    GCHandle dataHandle = GCHandle.Alloc(data, GCHandleType.Pinned);
                    using (
                        Image <Rgb, byte> rgb = new Image <Rgb, byte>(size.Width, size.Height, size.Width * 3,
                                                                      dataHandle.AddrOfPinnedObject()))
                    {
                        rgb.ConvertFrom(image);
                        if (flipType != FlipType.None)
                        {
                            CvInvoke.Flip(rgb, rgb, flipType);
                        }
                    }
                    dataHandle.Free();
                    texture.LoadRawTextureData(data);
                    texture.Apply();
                    return(texture);
                }
                else if (SystemInfo.SupportsTextureFormat(TextureFormat.RGBA32))
                {
                    Texture2D texture = new Texture2D(size.Width, size.Height, TextureFormat.RGBA32, false);
                    byte[]    data;
                    int       bufferLength = size.Width * size.Height * 4;
                    if (buffer != null)
                    {
                        if (buffer.Length < bufferLength)
                        {
                            throw new ArgumentException(
                                      String.Format(
                                          "Buffer size ({0}) is not big enough for the RGBA32 texture, width * height * 4 = {1} is required.",
                                          buffer.Length, bufferLength));
                        }
                        data = buffer;
                    }
                    else
                    {
                        data = new byte[bufferLength];
                    }
                    GCHandle dataHandle = GCHandle.Alloc(data, GCHandleType.Pinned);
                    using (
                        Image <Rgba, byte> rgba = new Image <Rgba, byte>(size.Width, size.Height, size.Width * 4,
                                                                         dataHandle.AddrOfPinnedObject()))
                    {
                        rgba.ConvertFrom(image);
                        if (flipType != FlipType.None)
                        {
                            CvInvoke.Flip(rgba, rgba, flipType);
                        }
                    }
                    dataHandle.Free();
                    texture.LoadRawTextureData(data);

                    texture.Apply();
                    return(texture);
                }
                else
                {
                    throw new Exception("TextureFormat of RGBA32 is not supported on this device");
                }
            }
        }
Exemple #5
0
        /// <summary>
        /// Convert the image to Bitmap
        /// </summary>
        /// <param name="image">The image to convert to Bitmap</param>
        /// <param name="bitmap">The bitmap, must be of the same size and has bitmap config type of either Argb888 or Rgb565</param>
        /// <returns>The Bitmap</returns>
        public static void ToBitmap(this IInputArray image, Android.Graphics.Bitmap bitmap)
        {
            using (InputArray iaImage = image.GetInputArray())
            {
                System.Drawing.Size size = iaImage.GetSize();
                if (!(size.Width == bitmap.Width && size.Height == bitmap.Height))
                {
                    throw new Exception("Bitmap size doesn't match the Mat size");
                }

                Android.Graphics.Bitmap.Config config = bitmap.GetConfig();
                if (config == Android.Graphics.Bitmap.Config.Argb8888)
                {
                    int channels = iaImage.GetChannels();
                    using (Mat m = new Mat(new Size(bitmap.Width, bitmap.Height), DepthType.Cv8U, 4, bitmap.LockPixels(), bitmap.RowBytes))
                    {
                        if (channels == 1)
                        {
                            CvInvoke.CvtColor(image, m, ColorConversion.Gray2Rgba);
                        }
                        else if (channels == 3)
                        {
                            CvInvoke.CvtColor(image, m, ColorConversion.Bgr2Rgba);
                        }
                        else if (channels == 4)
                        {
                            CvInvoke.CvtColor(image, m, ColorConversion.Bgra2Rgba);
                        }
                        else
                        {
                            throw new NotImplementedException(
                                      String.Format("InputArray of {0} channels is supported.", channels));
                        }
                        bitmap.UnlockPixels();
                    }
                }
                else if (config == Android.Graphics.Bitmap.Config.Rgb565)
                {
                    int channels = iaImage.GetChannels();
                    using (Mat m = new Mat(new Size(bitmap.Width, bitmap.Height), DepthType.Cv8U, 2, bitmap.LockPixels(), bitmap.RowBytes))
                    {
                        if (channels == 1)
                        {
                            CvInvoke.CvtColor(image, m, ColorConversion.Gray2Bgr565);
                        }
                        else if (channels == 3)
                        {
                            CvInvoke.CvtColor(image, m, ColorConversion.Bgr2Bgr565);
                        }
                        else if (channels == 4)
                        {
                            CvInvoke.CvtColor(image, m, ColorConversion.Bgra2Bgr565);
                        }
                        else
                        {
                            throw new NotImplementedException(
                                      String.Format("InputArray of {0} channels is supported.", channels));
                        }
                        bitmap.UnlockPixels();
                    }
                }
                else
                {
                    throw new NotImplementedException("Only Bitmap config of Argb888 or Rgb565 is supported.");
                }
            }
        }
Exemple #6
0
 private static Size InputArrGetSize(IInputArray arr)
 {
     using (InputArray ia = arr.GetInputArray())
         return(ia.GetSize());
 }