コード例 #1
0
        /// <summary>
        /// Reads image from the specified buffer in memory.
        /// </summary>
        /// <param name="buf">The input array of vector of bytes.</param>
        /// <param name="flags">The same flags as in imread</param>
        /// <returns></returns>
        public static Mat ImDecode(Mat buf, ImreadModes flags)
        {
            if (buf == null)
            {
                throw new ArgumentNullException(nameof(buf));
            }
            buf.ThrowIfDisposed();
            IntPtr matPtr = NativeMethods.imgcodecs_imdecode_Mat(buf.CvPtr, (int)flags);

            GC.KeepAlive(buf);
            return(new Mat(matPtr));
        }
コード例 #2
0
ファイル: Cv2_imgcodecs.cs プロジェクト: CodeSang/opencvsharp
        /// <summary>
        /// Loads a multi-page image from a file. 
        /// </summary>
        /// <param name="filename">Name of file to be loaded.</param>
        /// <param name="mats">A vector of Mat objects holding each page, if more than one.</param>
        /// <param name="flags">Flag that can take values of @ref cv::ImreadModes, default with IMREAD_ANYCOLOR.</param>
        /// <returns></returns>
        public static bool ImReadMulti(string filename, out Mat[] mats, ImreadModes flags = ImreadModes.AnyColor)
        {
            if (filename == null) 
                throw new ArgumentNullException("filename");

            using (var matsVec = new VectorOfMat())
            {
                int ret = NativeMethods.imgcodecs_imreadmulti(filename, matsVec.CvPtr, (int) flags);
                mats = matsVec.ToArray();
                return ret != 0;
            }
        }
コード例 #3
0
ファイル: MatAndroid.cs プロジェクト: uzbekdev1/emgucv
        /// <summary>
        /// Read an image file from Android Asset
        /// </summary>
        /// <param name="assets">The asset manager</param>
        /// <param name="fileName">The name of the file</param>
        /// <param name="mode">The read mode</param>
        public static Mat GetMat(this AssetManager assets, String fileName, ImreadModes mode = ImreadModes.AnyColor | ImreadModes.AnyDepth)
        {
            Mat m = new Mat();

            using (Stream imageStream = assets.Open(fileName))
                using (MemoryStream ms = new MemoryStream())
                {
                    imageStream.CopyTo(ms);
                    CvInvoke.Imdecode(ms.ToArray(), mode, m);
                }
            return(m);
        }
コード例 #4
0
ファイル: FileManager.cs プロジェクト: 0V/Face-Exchanger
        /// <summary>
        /// 指定したイメージファイルを取得します
        /// </summary>
        /// <param name="flags"></param>
        /// <returns></returns>
        public static Mat OpenImageFile(ImreadModes flags = ImreadModes.AnyColor)
        {
            using (var openfile = new OpenFileDialog())
            {
                openfile.Filter =
                    "イメージファイル(*.bmp;*.png;*.jpg;*.jpeg)|*.bmp;*.png;*.jpg;*.jpeg|すべてのファイル(*.*)|*.*";

                if (openfile.ShowDialog() != DialogResult.OK)
                    return null;

                return Cv2.ImRead(openfile.FileName, flags);
            }
        }
コード例 #5
0
        /// <summary>
        /// Reads image from the specified buffer in memory.
        /// </summary>
        /// <param name="buf">The input array of vector of bytes.</param>
        /// <param name="flags">The same flags as in imread</param>
        /// <returns></returns>
        public static Mat ImDecode(InputArray buf, ImreadModes flags)
        {
            if (buf == null)
            {
                throw new ArgumentNullException(nameof(buf));
            }
            buf.ThrowIfDisposed();

            NativeMethods.HandleException(
                NativeMethods.imgcodecs_imdecode_InputArray(buf.CvPtr, (int)flags, out var ret));
            GC.KeepAlive(buf);
            return(new Mat(ret));
        }
コード例 #6
0
        /// <summary>
        /// Loads a multi-page image from a file.
        /// </summary>
        /// <param name="filename">Name of file to be loaded.</param>
        /// <param name="mats">A vector of Mat objects holding each page, if more than one.</param>
        /// <param name="flags">Flag that can take values of @ref cv::ImreadModes, default with IMREAD_ANYCOLOR.</param>
        /// <returns></returns>
        public static bool ImReadMulti(string filename, out Mat[] mats, ImreadModes flags = ImreadModes.AnyColor)
        {
            if (filename == null)
            {
                throw new ArgumentNullException(nameof(filename));
            }

            using var matsVec = new VectorOfMat();
            NativeMethods.HandleException(
                NativeMethods.imgcodecs_imreadmulti(filename, matsVec.CvPtr, (int)flags, out var ret));
            mats = matsVec.ToArray();
            return(ret != 0);
        }
コード例 #7
0
        private void ConvertFromCGImage(CGImage cgImage, ImreadModes modes = ImreadModes.AnyColor)
        {
            Size sz = new Size((int)cgImage.Width, (int)cgImage.Height);

            using (Mat m = new Mat(sz, DepthType.Cv8U, 4))
            {
                RectangleF rect = new RectangleF(PointF.Empty, new SizeF(cgImage.Width, cgImage.Height));
                using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB())
                    using (CGBitmapContext context = new CGBitmapContext(
                               m.DataPointer,
                               sz.Width, sz.Height,
                               8,
                               sz.Width * 4,
                               cspace,
                               CGImageAlphaInfo.PremultipliedLast))
                        context.DrawImage(rect, cgImage);
                if (modes == ImreadModes.Grayscale)
                {
                    CvInvoke.CvtColor(m, this, ColorConversion.Rgba2Gray);
                }
                else if (modes == ImreadModes.AnyColor)
                {
                    CvInvoke.CvtColor(m, this, ColorConversion.Rgba2Bgra);
                }
                else if (modes == ImreadModes.ReducedColor2)
                {
                    using (Mat tmp = new Mat())
                    {
                        CvInvoke.PyrDown(m, tmp);
                        CvInvoke.CvtColor(tmp, this, ColorConversion.Rgba2Bgr);
                    }
                }
                else if (modes == ImreadModes.ReducedGrayscale2)
                {
                    using (Mat tmp = new Mat())
                    {
                        CvInvoke.PyrDown(m, tmp);
                        CvInvoke.CvtColor(tmp, this, ColorConversion.Rgba2Gray);
                    }
                }
                else if (modes == ImreadModes.ReducedColor4 || modes == ImreadModes.ReducedColor8 || modes == ImreadModes.ReducedGrayscale4 || modes == ImreadModes.ReducedGrayscale8 || modes == ImreadModes.LoadGdal)
                {
                    throw  new NotImplementedException(String.Format("Conversion from PNG using mode {0} is not supported", modes));
                }
                else
                {
                    CvInvoke.CvtColor(m, this, ColorConversion.Rgba2Bgr);
                }
            }
        }
コード例 #8
0
        /// <summary>
        /// Loads an image from a file.
        /// </summary>
        /// <param name="fileName">Name of file to be loaded.</param>
        /// <param name="flags">Specifies color type of the loaded image</param>
        /// <returns></returns>
        public static Mat ImRead(string fileName, ImreadModes flags = ImreadModes.Color)
        {
            if (string.IsNullOrEmpty(fileName))
            {
                throw new ArgumentNullException(nameof(fileName));
            }

            NativeMethods.HandleException(
                NativeMethods.imgcodecs_imread(fileName, (int)flags, out var ret));
            if (ret == IntPtr.Zero)
            {
                throw new OpenCvSharpException("imread failed.");
            }
            return(new Mat(ret));
        }
コード例 #9
0
ファイル: BitmapExtension.cs プロジェクト: byancey/UVtools
 /// <summary>
 /// Read the file into a Mat
 /// </summary>
 /// <param name="fileName">The name of the image file</param>
 /// <param name="mat">The Mat to read into</param>
 /// <param name="loadType">Image load type.</param>
 /// <returns>True if the file can be read into the Mat</returns>
 public bool ReadFile(String fileName, Mat mat, ImreadModes loadType)
 {
     try
     {
         using (Bitmap bmp = new Bitmap(fileName))
             using (Image <Bgr, Byte> image = bmp.ToImage <Bgr, Byte>())
                 image.Mat.CopyTo(mat);
         return(true);
     }
     catch (Exception e)
     {
         Debug.WriteLine(e);
         //throw;
         return(false);
     }
 }
コード例 #10
0
        /// <summary>
        /// Reads image from the specified buffer in memory.
        /// </summary>
        /// <param name="span">The input slice of bytes.</param>
        /// <param name="flags">The same flags as in imread</param>
        /// <returns></returns>
        public static Mat ImDecode(ReadOnlySpan <byte> span, ImreadModes flags)
        {
            if (span.IsEmpty)
            {
                throw new ArgumentException("Empty span", nameof(span));
            }

            unsafe
            {
                fixed(byte *pBuf = span)
                {
                    NativeMethods.HandleException(
                        NativeMethods.imgcodecs_imdecode_vector(pBuf, span.Length, (int)flags, out var ret));
                    return(new Mat(ret));
                }
            }
        }
コード例 #11
0
        private float[] ReadData(string filename, int?width = null, int?height = null)
        {
            ImreadModes mode = ImreadModes.Color;

            float[] imdata = new float[imgChannel * imgWidth * imgHeight];
            if (imgChannel == 1)
            {
                mode = ImreadModes.Grayscale;
            }

            using (Mat im = Cv2.ImRead(filename, mode))
            {
                var byteData  = new byte[imgChannel * imgWidth * imgHeight];
                var resizedim = im.Resize(new Size(width.Value, height.Value));
                resizedim.ConvertTo(resizedim, MatType.CV_32F);
                resizedim.GetArray <float>(out imdata);
            }

            return(imdata);
        }
コード例 #12
0
 public ImageSource(string path, ImreadModes readMode)
 {
     mPath = path;
     if (mPath != null)
     {
         try
         {
             mSourceImage  = new Mat(path, readMode);
             mHaveNewFrame = true;
         }
         catch (Exception)
         {
             Debug.WriteLine("Can not open this image");
             mHaveNewFrame = true;
             throw;
         }
     }
     else
     {
         mHaveNewFrame = false;
     }
 }
コード例 #13
0
ファイル: UMatiOS.cs プロジェクト: shubegor/PupilDetection
 /// <summary>
 /// Initializes a new instance of the <see cref="Emgu.CV.Mat"/> class from CGImage
 /// </summary>
 /// <param name="mode">The color conversion mode. By default, it convert the UIImage to BGRA color type to preserve all the image channels.</param>
 /// <param name="cgImage">The CGImage.</param>
 public UMat(CGImage cgImage, ImreadModes mode = ImreadModes.AnyColor)
     : this()
 {
     CvInvoke.ConvertCGImageToArray(cgImage, this, mode);
 }
コード例 #14
0
 public WImage(string path, ImreadModes mode = ImreadModes.Color)
 {
     this.Data = CvInvoke.Imread(path, mode);
 }
コード例 #15
0
 public static Mat Imread(string filename, ImreadModes loadType = ImreadModes.Color)
 {
     // No change needed, just wrapping to complete the api
     return(CvInvoke.Imread(filename, loadType));
 }
コード例 #16
0
 /// <summary>
 /// Initializes a new instance of the <see cref="Emgu.CV.Mat"/> class from UIImage
 /// </summary>
 /// <param name="mode">The color conversion mode. By default, it convert the UIImage to BGRA color type to preserve all the image channels.</param>
 /// <param name="uiImage">The UIImage.</param>
 public static Mat ToMat(this UIImage uiImage, ImreadModes mode = ImreadModes.AnyColor)
 {
     using (CGImage cgImage = uiImage.CGImage) {
         return(cgImage.ToMat(mode));
     }
 }
コード例 #17
0
ファイル: UIImageExtension.cs プロジェクト: v5chn/emgucv
 /// <summary>
 /// Initializes a new instance of the <see cref="Emgu.CV.Mat"/> class from UIImage
 /// </summary>
 /// <param name="mode">The color conversion mode. By default, it convert the UIImage to BGRA color type to preserve all the image channels.</param>
 /// <param name="uiImage">The UIImage.</param>
 /// <param name="outputArray">The output array</param>
 public static void ToArray(this UIImage uiImage, IOutputArray outputArray, ImreadModes mode = ImreadModes.AnyColor)
 {
     using (CGImage cgImage = uiImage.CGImage) {
         cgImage.ToArray(outputArray, mode);
     }
 }
コード例 #18
0
 public static Mat Imread(string path, ImreadModes mode = ImreadModes.Color)
 {
     return(Cv2.ImDecode(File.ReadAllBytes(path), mode));
 }
コード例 #19
0
 public static extern Mat Imread(string path, ImreadModes mode);
コード例 #20
0
ファイル: Cv2_imgcodecs.cs プロジェクト: CodeSang/opencvsharp
 /// <summary>
 /// Loads an image from a file.
 /// </summary>
 /// <param name="fileName">Name of file to be loaded.</param>
 /// <param name="flags">Specifies color type of the loaded image</param>
 /// <returns></returns>
 public static Mat ImRead(string fileName, ImreadModes flags = ImreadModes.Color)
 {
     return new Mat(fileName, flags);
 }
コード例 #21
0
ファイル: Cv2_imgcodecs.cs プロジェクト: CodeSang/opencvsharp
 /// <summary>
 /// Reads image from the specified buffer in memory.
 /// </summary>
 /// <param name="buf">The input array of vector of bytes.</param>
 /// <param name="flags">The same flags as in imread</param>
 /// <returns></returns>
 public static Mat ImDecode(byte[] buf, ImreadModes flags)
 {
     if (buf == null)
         throw new ArgumentNullException("buf");
     IntPtr matPtr = NativeMethods.imgcodecs_imdecode_vector(
         buf, new IntPtr(buf.LongLength), (int) flags);
     return new Mat(matPtr);
 }
コード例 #22
0
 /// <summary>
 /// 获取图片  Mat指矩阵
 /// </summary>
 /// <param name="fileName"></param>
 /// <param name="imreadModes"></param>
 /// <returns></returns>
 public static Mat Read(string fileName, ImreadModes imreadModes)
 {
     return(new Mat(fileName, imreadModes));
 }
コード例 #23
0
ファイル: Cv2_imgcodecs.cs プロジェクト: CodeSang/opencvsharp
 /// <summary>
 /// Reads image from the specified buffer in memory.
 /// </summary>
 /// <param name="buf">The input array of vector of bytes.</param>
 /// <param name="flags">The same flags as in imread</param>
 /// <returns></returns>
 public static Mat ImDecode(Mat buf, ImreadModes flags)
 {
     if (buf == null)
         throw new ArgumentNullException("buf");
     buf.ThrowIfDisposed();
     IntPtr matPtr = NativeMethods.imgcodecs_imdecode_Mat(buf.CvPtr, (int) flags);
     GC.KeepAlive(buf);
     return new Mat(matPtr);
 }
コード例 #24
0
ファイル: NSImageExtension.cs プロジェクト: Ismaelhm/emgucv-1
 /// <summary>
 /// Initializes a new instance of the <see cref="Emgu.CV.Mat"/> class from UIImage
 /// </summary>
 /// <param name="mode">The color conversion mode. By default, it convert the UIImage to BGRA color type to preserve all the image channels.</param>
 /// <param name="nsImage">The NSImage.</param>
 public static Mat ToMat(this NSImage nsImage, ImreadModes mode = ImreadModes.AnyColor)
 {
    using (CGImage cgImage = nsImage.CGImage) {
       return cgImage.ToMat (mode);
    }
 }
コード例 #25
0
ファイル: EmguAssert.cs プロジェクト: zanker99/emgucv
 public static Mat LoadMat(string name, ImreadModes modes = ImreadModes.AnyColor | ImreadModes.AnyDepth)
 {
     return(CvInvoke.Imread(name, modes));
 }
コード例 #26
0
ファイル: EmguAssert.cs プロジェクト: zanker99/emgucv
 public static Mat LoadMat(String name, ImreadModes modes = ImreadModes.AnyColor | ImreadModes.AnyDepth)
 {
     return(Task.Run(async() => await ReadFile(name, modes)).Result);
 }
コード例 #27
0
 protected static Mat Image(string fileName, ImreadModes modes = ImreadModes.Color)
 {
     return(new Mat(Path.Combine("_data", "image", fileName), modes));
 }
コード例 #28
0
        public AlignedResult CreateAlignedSecondImageKeypoints(SKBitmap firstImage, SKBitmap secondImage,
                                                               bool discardTransX, AlignmentSettings settings, bool keystoneRightOnFirst)
        {
#if __NO_EMGU__
            return(null);
#endif
            var result = new AlignedResult();

            var detector = new ORBDetector();
            const ImreadModes READ_MODE = ImreadModes.Color;

            var mat1                = new Mat();
            var descriptors1        = new Mat();
            var allKeyPointsVector1 = new VectorOfKeyPoint();
            CvInvoke.Imdecode(GetBytes(firstImage, 1), READ_MODE, mat1);
            detector.DetectAndCompute(mat1, null, allKeyPointsVector1, descriptors1, false);

            var mat2                = new Mat();
            var descriptors2        = new Mat();
            var allKeyPointsVector2 = new VectorOfKeyPoint();
            CvInvoke.Imdecode(GetBytes(secondImage, 1), READ_MODE, mat2);
            detector.DetectAndCompute(mat2, null, allKeyPointsVector2, descriptors2, false);

            const double THRESHOLD_PROPORTION = 1 / 4d;
            var          thresholdDistance    = Math.Sqrt(Math.Pow(firstImage.Width, 2) + Math.Pow(firstImage.Height, 2)) * THRESHOLD_PROPORTION;

            var distanceThresholdMask = new Mat(allKeyPointsVector2.Size, allKeyPointsVector1.Size, DepthType.Cv8U, 1);
            if (!settings.UseCrossCheck)
            {
                unsafe
                {
                    var maskPtr = (byte *)distanceThresholdMask.DataPointer.ToPointer();
                    for (var i = 0; i < allKeyPointsVector2.Size; i++)
                    {
                        var keyPoint2 = allKeyPointsVector2[i];
                        for (var j = 0; j < allKeyPointsVector1.Size; j++)
                        {
                            var keyPoint1        = allKeyPointsVector1[j];
                            var physicalDistance = CalculatePhysicalDistanceBetweenPoints(keyPoint2.Point, keyPoint1.Point);
                            if (physicalDistance < thresholdDistance)
                            {
                                *maskPtr = 255;
                            }
                            else
                            {
                                *maskPtr = 0;
                            }

                            maskPtr++;
                        }
                    }
                }
            }

            var vectorOfMatches = new VectorOfVectorOfDMatch();
            var matcher         = new BFMatcher(DistanceType.Hamming, settings.UseCrossCheck);
            matcher.Add(descriptors1);
            matcher.KnnMatch(descriptors2, vectorOfMatches, settings.UseCrossCheck ? 1 : 2, settings.UseCrossCheck ? new VectorOfMat() : new VectorOfMat(distanceThresholdMask));

            var goodMatches = new List <MDMatch>();
            for (var i = 0; i < vectorOfMatches.Size; i++)
            {
                if (vectorOfMatches[i].Size == 0)
                {
                    continue;
                }

                if (vectorOfMatches[i].Size == 1 ||
                    (vectorOfMatches[i][0].Distance < 0.75 * vectorOfMatches[i][1].Distance)) //make sure matches are unique
                {
                    goodMatches.Add(vectorOfMatches[i][0]);
                }
            }

            if (goodMatches.Count < settings.MinimumKeypoints)
            {
                return(null);
            }

            var pairedPoints = new List <PointForCleaning>();
            for (var ii = 0; ii < goodMatches.Count; ii++)
            {
                var keyPoint1 = allKeyPointsVector1[goodMatches[ii].TrainIdx];
                var keyPoint2 = allKeyPointsVector2[goodMatches[ii].QueryIdx];
                pairedPoints.Add(new PointForCleaning
                {
                    KeyPoint1 = keyPoint1,
                    KeyPoint2 = keyPoint2,
                    Data      = new KeyPointOutlierDetectorData
                    {
                        Distance = (float)CalculatePhysicalDistanceBetweenPoints(keyPoint1.Point, keyPoint2.Point),
                        Slope    = (keyPoint2.Point.Y - keyPoint1.Point.Y) / (keyPoint2.Point.X - keyPoint1.Point.X)
                    },
                    Match = new MDMatch
                    {
                        Distance = goodMatches[ii].Distance,
                        ImgIdx   = goodMatches[ii].ImgIdx,
                        QueryIdx = ii,
                        TrainIdx = ii
                    }
                });
            }

            if (settings.DrawKeypointMatches)
            {
                result.DirtyMatchesCount = pairedPoints.Count;
                result.DrawnDirtyMatches = DrawMatches(firstImage, secondImage, pairedPoints);
            }

            if (settings.DiscardOutliersByDistance || settings.DiscardOutliersBySlope)
            {
                //Debug.WriteLine("DIRTY POINTS START (ham,dist,slope,ydiff), count: " + pairedPoints.Count);
                //foreach (var pointForCleaning in pairedPoints)
                //{
                //    Debug.WriteLine(pointForCleaning.Match.Distance  + "," + pointForCleaning.Data.Distance + "," + pointForCleaning.Data.Slope + "," + Math.Abs(pointForCleaning.KeyPoint1.Point.Y - pointForCleaning.KeyPoint2.Point.Y));
                //}

                //Debug.WriteLine("DIRTY PAIRS:");
                //PrintPairs(pairedPoints);

                if (settings.DiscardOutliersByDistance)
                {
                    // reject distances and slopes more than some number of standard deviations from the median
                    var medianDistance = pairedPoints.OrderBy(p => p.Data.Distance).ElementAt(pairedPoints.Count / 2).Data.Distance;
                    var distanceStdDev = CalcStandardDeviation(pairedPoints.Select(p => p.Data.Distance).ToArray());
                    pairedPoints = pairedPoints.Where(p => Math.Abs(p.Data.Distance - medianDistance) < Math.Abs(distanceStdDev * (settings.KeypointOutlierThresholdTenths / 10d))).ToList();
                    //Debug.WriteLine("Median Distance: " + medianDistance);
                    //Debug.WriteLine("Distance Cleaned Points count: " + pairedPoints.Count);
                }

                if (settings.DiscardOutliersBySlope)
                {
                    var validSlopes = pairedPoints.Where(p => !float.IsNaN(p.Data.Slope) && float.IsFinite(p.Data.Slope)).ToArray();
                    var medianSlope = validSlopes.OrderBy(p => p.Data.Slope).ElementAt(validSlopes.Length / 2).Data.Slope;
                    var slopeStdDev = CalcStandardDeviation(validSlopes.Select(p => p.Data.Slope).ToArray());
                    pairedPoints = validSlopes.Where(p => Math.Abs(p.Data.Slope - medianSlope) < Math.Abs(slopeStdDev * (settings.KeypointOutlierThresholdTenths / 10d))).ToList();
                    //Debug.WriteLine("Median Slope: " + medianSlope);
                    //Debug.WriteLine("Slope Cleaned Points count: " + pairedPoints.Count);
                }

                //Debug.WriteLine("CLEAN POINTS START (ham,dist,slope,ydiff), count: " + pairedPoints.Count);
                //foreach (var pointForCleaning in pairedPoints)
                //{
                //    Debug.WriteLine(pointForCleaning.Match.Distance + "," + pointForCleaning.Data.Distance + "," + pointForCleaning.Data.Slope + "," + Math.Abs(pointForCleaning.KeyPoint1.Point.Y - pointForCleaning.KeyPoint2.Point.Y));
                //}

                //Debug.WriteLine("CLEANED PAIRS:");
                //PrintPairs(pairedPoints);

                for (var ii = 0; ii < pairedPoints.Count; ii++)
                {
                    var oldMatch = pairedPoints[ii].Match;
                    pairedPoints[ii].Match = new MDMatch
                    {
                        Distance = oldMatch.Distance,
                        ImgIdx   = oldMatch.ImgIdx,
                        QueryIdx = ii,
                        TrainIdx = ii
                    };
                }

                if (settings.DrawKeypointMatches)
                {
                    result.CleanMatchesCount = pairedPoints.Count;
                    result.DrawnCleanMatches = DrawMatches(firstImage, secondImage, pairedPoints);
                }
            }

            var points1 = pairedPoints.Select(p => new SKPoint(p.KeyPoint1.Point.X, p.KeyPoint1.Point.Y)).ToArray();
            var points2 = pairedPoints.Select(p => new SKPoint(p.KeyPoint2.Point.X, p.KeyPoint2.Point.Y)).ToArray();


            var translation1 = FindVerticalTranslation(points1, points2, secondImage);
            var translated1  = SKMatrix.MakeTranslation(0, translation1);
            points2 = translated1.MapPoints(points2);

            var rotation1 = FindRotation(points1, points2, secondImage);
            var rotated1  = SKMatrix.MakeRotation(rotation1, secondImage.Width / 2f, secondImage.Height / 2f);
            points2 = rotated1.MapPoints(points2);

            var zoom1   = FindZoom(points1, points2, secondImage);
            var zoomed1 = SKMatrix.MakeScale(zoom1, zoom1, secondImage.Width / 2f, secondImage.Height / 2f);
            points2 = zoomed1.MapPoints(points2);



            var translation2 = FindVerticalTranslation(points1, points2, secondImage);
            var translated2  = SKMatrix.MakeTranslation(0, translation2);
            points2 = translated2.MapPoints(points2);

            var rotation2 = FindRotation(points1, points2, secondImage);
            var rotated2  = SKMatrix.MakeRotation(rotation2, secondImage.Width / 2f, secondImage.Height / 2f);
            points2 = rotated2.MapPoints(points2);

            var zoom2   = FindZoom(points1, points2, secondImage);
            var zoomed2 = SKMatrix.MakeScale(zoom2, zoom2, secondImage.Width / 2f, secondImage.Height / 2f);
            points2 = zoomed2.MapPoints(points2);



            var translation3 = FindVerticalTranslation(points1, points2, secondImage);
            var translated3  = SKMatrix.MakeTranslation(0, translation3);
            points2 = translated3.MapPoints(points2);

            var rotation3 = FindRotation(points1, points2, secondImage);
            var rotated3  = SKMatrix.MakeRotation(rotation3, secondImage.Width / 2f, secondImage.Height / 2f);
            points2 = rotated3.MapPoints(points2);

            var zoom3   = FindZoom(points1, points2, secondImage);
            var zoomed3 = SKMatrix.MakeScale(zoom3, zoom3, secondImage.Width / 2f, secondImage.Height / 2f);
            points2 = zoomed3.MapPoints(points2);


            var keystoned1 = SKMatrix.MakeIdentity();
            var keystoned2 = SKMatrix.MakeIdentity();
            if (settings.DoKeystoneCorrection)
            {
                keystoned1 = FindTaper(points2, points1, secondImage, keystoneRightOnFirst);
                points1    = keystoned1.MapPoints(points1);
                keystoned2 = FindTaper(points1, points2, secondImage, !keystoneRightOnFirst);
                points2    = keystoned2.MapPoints(points2);
            }


            var horizontaled = SKMatrix.MakeIdentity();
            if (!discardTransX)
            {
                var horizontalAdj = FindHorizontalTranslation(points1, points2, secondImage);
                horizontaled = SKMatrix.MakeTranslation(horizontalAdj, 0);
                points2      = horizontaled.MapPoints(points2);
            }



            var tempMatrix1 = new SKMatrix();
            SKMatrix.Concat(ref tempMatrix1, translated1, rotated1);
            var tempMatrix2 = new SKMatrix();
            SKMatrix.Concat(ref tempMatrix2, tempMatrix1, zoomed1);

            var tempMatrix3 = new SKMatrix();
            SKMatrix.Concat(ref tempMatrix3, tempMatrix2, translated2);
            var tempMatrix4 = new SKMatrix();
            SKMatrix.Concat(ref tempMatrix4, tempMatrix3, rotated2);
            var tempMatrix5 = new SKMatrix();
            SKMatrix.Concat(ref tempMatrix5, tempMatrix4, zoomed2);

            var tempMatrix6 = new SKMatrix();
            SKMatrix.Concat(ref tempMatrix6, tempMatrix5, translated3);
            var tempMatrix7 = new SKMatrix();
            SKMatrix.Concat(ref tempMatrix7, tempMatrix6, rotated3);
            var tempMatrix8 = new SKMatrix();
            SKMatrix.Concat(ref tempMatrix8, tempMatrix7, zoomed3);


            var tempMatrix9 = new SKMatrix();
            SKMatrix.Concat(ref tempMatrix9, tempMatrix8, keystoned2);

            var tempMatrix10 = new SKMatrix();
            SKMatrix.Concat(ref tempMatrix10, tempMatrix9, horizontaled);

            var finalMatrix = tempMatrix10;
            result.TransformMatrix2 = finalMatrix;
            var alignedImage2 = new SKBitmap(secondImage.Width, secondImage.Height);
            using (var canvas = new SKCanvas(alignedImage2))
            {
                canvas.SetMatrix(finalMatrix);
                canvas.DrawBitmap(secondImage, 0, 0);
            }
            result.AlignedBitmap2 = alignedImage2;


            result.TransformMatrix1 = keystoned1;
            var alignedImage1 = new SKBitmap(firstImage.Width, firstImage.Height);
            using (var canvas = new SKCanvas(alignedImage1))
            {
                canvas.SetMatrix(keystoned1);
                canvas.DrawBitmap(firstImage, 0, 0);
            }
            result.AlignedBitmap1 = alignedImage1;


            return(result);
        }
コード例 #29
0
 /// <summary>
 /// Loads an image from a file.
 /// </summary>
 /// <param name="fileName">Name of file to be loaded.</param>
 /// <param name="flags">Specifies color type of the loaded image</param>
 /// <returns></returns>
 public static Mat ImRead(string fileName, ImreadModes flags = ImreadModes.Color)
 {
     return(new Mat(fileName, flags));
 }
コード例 #30
0
ファイル: OpenCV.cs プロジェクト: xin-pu/Somnium
 public static Mat GetMat(string fileName, ImreadModes imreadModes = ImreadModes.Grayscale)
 {
     return(new Mat(fileName, imreadModes));
 }