예제 #1
0
        private void button2_Click(object sender, EventArgs e)
        {
            //获取最佳Size,以便可以使用FFT,通常2*3*5倍数
            int M = CvInvoke.GetOptimalDFTSize(image.Rows);
            int N = CvInvoke.GetOptimalDFTSize(image.Cols);
            //图像扩展
            Mat padded = new Mat();

            CvInvoke.CopyMakeBorder(image, padded, 0, M - image.Rows, 0, N - image.Cols, BorderType.Constant, new MCvScalar(1));

            //创建一个2通道矩阵,0通道为源图数据,1通道为0

            Mat m = new Mat(padded.Size, DepthType.Cv32F, 1);

            m.SetTo(new MCvScalar(255));
            CvInvoke.Divide(padded, m, padded);
            m.SetTo(new MCvScalar(0));
            VectorOfMat matVector = new VectorOfMat();

            matVector.Push(padded);
            matVector.Push(m);
            Mat matComplex = new Mat(padded.Size, DepthType.Cv32F, 2);

            CvInvoke.Merge(matVector, matComplex);
            padded.Dispose();
            m.Dispose();
            matVector.Dispose();
            // This will hold the DFT data,创建2通道矩阵,储存变换后结果
            Matrix <float> forwardDft = new Matrix <float>(image.Rows, image.Cols, 2);

            CvInvoke.Dft(matComplex, forwardDft, DxtType.Forward, 0);

            // We'll display the magnitude,显示谱图像
            Matrix <float> forwardDftMagnitude = GetDftMagnitude(forwardDft);

            SwitchQuadrants(ref forwardDftMagnitude);

            // Now compute the inverse to see if we can get back the original
            //进行反变换
            Matrix <float> reverseDft = new Matrix <float>(forwardDft.Rows, forwardDft.Cols, 2);

            CvInvoke.Dft(forwardDft, reverseDft, DxtType.InvScale, 0);
            Matrix <float> reverseDftMagnitude = GetDftMagnitude(reverseDft);

            imageBox1.Image = image;
            imageBox2.Image = Matrix2Image(forwardDftMagnitude);
            imageBox3.Image = Matrix2Image(reverseDftMagnitude);
        }
예제 #2
0
 public static Task <Image <Bgr, Byte> > CorrectLightness(Image <Bgr, Byte> rgb)
 {
     return(Task.Run(delegate() {
         Image <Lab, Byte> lab = rgb.Convert <Lab, Byte>();
         Image <Gray, Byte>[] lab_planes = lab.Split();
         Image <Gray, Byte> lightness = new Image <Gray, byte>(rgb.Size);
         CvInvoke.CLAHE(lab_planes[0], 40, new Size(4, 4), lightness);
         VectorOfMat vm = new VectorOfMat(lightness.Mat, lab_planes[1].Mat, lab_planes[2].Mat);
         CvInvoke.Merge(vm, lab);
         Image <Bgr, Byte> dst = lab.Convert <Bgr, Byte>();
         vm.Dispose();
         lab.Dispose();
         lab_planes = null;
         lightness.Dispose();
         return dst;
     }));
 }
예제 #3
0
        /// <summary>
        /// This function compares the separte Blue, Green and Red values of each pixel and, using a clever
        /// subtraction, tries to determine if it is skin-colored. The idea is taken from this paper:
        /// "In-air gestures around unmodified mobile devices" by Song et al.
        /// </summary>
        /// <param name="inputImage">Standard BGR image.</param>
        /// <returns>Grayscale image with the white pixels containing skin.</returns>
        private static Image <Gray, byte> MinimumSegment(Image <Bgr, byte> inputImage)
        {
            Mat deltaOne = new Mat();
            Mat deltaTwo = new Mat();

            VectorOfMat bgrChannels = new VectorOfMat(3);

            CvInvoke.Split(inputImage, bgrChannels);
            CvInvoke.Subtract(bgrChannels[2], bgrChannels[1], deltaOne);
            CvInvoke.Subtract(bgrChannels[2], bgrChannels[0], deltaTwo);

            Mat mixedMat = new Mat();

            CvInvoke.Min(deltaOne, deltaTwo, mixedMat);
            Image <Gray, byte> outputImage = mixedMat.ToImage <Gray, byte>().InRange(new Gray(10), new Gray(200));

            bgrChannels.Dispose();
            mixedMat.Dispose();
            return(outputImage);
        }
예제 #4
0
        private void extractFromSlides(string outdir, List <int> slideList, int maskSlide, int shadowSlide, string slidePrefix = "Slide_")
        {
            var mat_mask       = CvInvoke.Imread(outdir + slidePrefix + maskSlide + ".png", ImreadModes.Grayscale);
            Mat mat_shadow_inv = null;

            if (shadowSlide > 0)
            {
                var mat_shadow = CvInvoke.Imread(outdir + slidePrefix + shadowSlide + ".png", ImreadModes.Grayscale);
                mat_shadow_inv = 255 - mat_shadow;
                mat_shadow.Dispose();
            }

            Mat         mat_content     = null;
            VectorOfMat mat_content_bgr = null;
            Mat         mat_alpha       = null;

            foreach (var slide in slideList)
            {
                // Read current image back from File
                mat_content = CvInvoke.Imread(outdir + slidePrefix + slide + ".png", ImreadModes.Color);

                // Split in Blue, Green and Red channel
                mat_content_bgr = new VectorOfMat(mat_content.Split());

                // Mask all channel so image gets blck where mask is black
                CvInvoke.Multiply(mat_content_bgr[0], mat_mask, mat_content_bgr[0], 1.0 / 255);
                CvInvoke.Multiply(mat_content_bgr[1], mat_mask, mat_content_bgr[1], 1.0 / 255);
                CvInvoke.Multiply(mat_content_bgr[2], mat_mask, mat_content_bgr[2], 1.0 / 255);

                // Create alpha layer for image
                mat_alpha = mat_mask.Clone();
                if (shadowSlide > 0)
                {
                    CvInvoke.Add(mat_mask, mat_shadow_inv, mat_alpha);
                }

                // Merge channels to one image again
                CvInvoke.Merge(new VectorOfMat(mat_content_bgr[0], mat_content_bgr[1], mat_content_bgr[2], mat_alpha), mat_content);

                // Save current extracted image with alpha layer
                mat_content.Save(outdir + "Extract_" + slide + ".png");
            }

            // Dispose all used elements
            if (mat_mask != null)
            {
                mat_mask.Dispose();
            }
            if (mat_shadow_inv != null)
            {
                mat_shadow_inv.Dispose();
            }
            if (mat_content != null)
            {
                mat_content.Dispose();
            }
            if (mat_content_bgr != null)
            {
                mat_content_bgr.Dispose();
            }
            if (mat_alpha != null)
            {
                mat_alpha.Dispose();
            }
        }
        /// <summary>
        /// Calculate a mask for the pieces. The function calculates a histogram to find the piece background color.
        /// Everything within a specific HSV range around the piece background color is regarded as foreground. The rest is regarded as background.
        /// </summary>
        /// <param name="inputImg">Color input image</param>
        /// <returns>Mask image</returns>
        /// see: https://docs.opencv.org/2.4/modules/imgproc/doc/histograms.html?highlight=calchist
        public override Image <Gray, byte> GetMask(Image <Rgba, byte> inputImg)
        {
            Image <Gray, byte> mask;

            using (Image <Hsv, byte> hsvSourceImg = inputImg.Convert <Hsv, byte>())       //Convert input image to HSV color space
            {
                Mat hsvImgMat = new Mat();
                hsvSourceImg.Mat.ConvertTo(hsvImgMat, DepthType.Cv32F);
                VectorOfMat vm = new VectorOfMat(hsvImgMat);

                // Calculate histograms for each channel of the HSV image (H, S, V)
                Mat histOutH = new Mat(), histOutS = new Mat(), histOutV = new Mat();
                int hbins = 32, sbins = 32, vbins = 32;
                CvInvoke.CalcHist(vm, new int[] { 0 }, new Mat(), histOutH, new int[] { hbins }, new float[] { 0, 179 }, false);
                CvInvoke.CalcHist(vm, new int[] { 1 }, new Mat(), histOutS, new int[] { sbins }, new float[] { 0, 255 }, false);
                CvInvoke.CalcHist(vm, new int[] { 2 }, new Mat(), histOutV, new int[] { vbins }, new float[] { 0, 255 }, false);

                hsvImgMat.Dispose();
                vm.Dispose();

                // Draw the histograms for debugging purposes
                if (PluginFactory.GetGeneralSettingsPlugin().SolverShowDebugResults)
                {
                    PluginFactory.LogHandle?.Report(new LogEventImage("Hist H", Utils.DrawHist(histOutH, hbins, 30, 1024, new MCvScalar(255, 0, 0)).Bitmap));
                    PluginFactory.LogHandle?.Report(new LogEventImage("Hist S", Utils.DrawHist(histOutS, sbins, 30, 1024, new MCvScalar(0, 255, 0)).Bitmap));
                    PluginFactory.LogHandle?.Report(new LogEventImage("Hist V", Utils.DrawHist(histOutV, vbins, 30, 1024, new MCvScalar(0, 0, 255)).Bitmap));
                }

                //#warning Use border color
                //int borderHeight = 10;
                //Image<Hsv, byte> borderImg = hsvSourceImg.Copy(new Rectangle(0, hsvSourceImg.Height - borderHeight, hsvSourceImg.Width, borderHeight));
                //MCvScalar meanBorderColorScalar = CvInvoke.Mean(borderImg);
                //Hsv meanBorderColor = new Hsv(meanBorderColorScalar.V0, meanBorderColorScalar.V1, meanBorderColorScalar.V2);
                //if (PuzzleSolverParameters.Instance.SolverShowDebugResults)
                //{
                //    Image<Hsv, byte> borderColorImg = new Image<Hsv, byte>(12, 12);
                //    borderColorImg.SetValue(meanBorderColor);
                //    _logHandle.Report(new LogBox.LogEventImage("HSV Border Color (" + meanBorderColor.Hue + " ; " + meanBorderColor.Satuation + "; " + meanBorderColor.Value + ")", borderColorImg.Bitmap));
                //}


                // Find the peaks in the histograms and use them as piece background color. Black and white areas are ignored.
                Hsv pieceBackgroundColor = new Hsv
                {
                    Hue       = Utils.HighestBinValInRange(histOutH, MainHueSegment - HueDiffHist, MainHueSegment + HueDiffHist, 179), //25, 179, 179);
                    Satuation = Utils.HighestBinValInRange(histOutS, 50, 205, 255),                                                    //50, 255, 255);
                    Value     = Utils.HighestBinValInRange(histOutV, 75, 205, 255)                                                     //75, 255, 255);
                };

                histOutH.Dispose();
                histOutS.Dispose();
                histOutV.Dispose();

                // Show the found piece background color
                if (PluginFactory.GetGeneralSettingsPlugin().SolverShowDebugResults)
                {
                    Image <Hsv, byte> pieceBgColorImg     = new Image <Hsv, byte>(4, 12);
                    Image <Hsv, byte> lowPieceBgColorImg  = new Image <Hsv, byte>(4, 12);
                    Image <Hsv, byte> highPieceBgColorImg = new Image <Hsv, byte>(4, 12);
                    pieceBgColorImg.SetValue(pieceBackgroundColor);
                    lowPieceBgColorImg.SetValue(new Hsv(pieceBackgroundColor.Hue - HueDiff, pieceBackgroundColor.Satuation - SaturationDiff, pieceBackgroundColor.Value - ValueDiff));
                    highPieceBgColorImg.SetValue(new Hsv(pieceBackgroundColor.Hue + HueDiff, pieceBackgroundColor.Satuation + SaturationDiff, pieceBackgroundColor.Value + ValueDiff));

                    PluginFactory.LogHandle?.Report(new LogEventImage("HSV Piece Bg Color (" + pieceBackgroundColor.Hue + " ; " + pieceBackgroundColor.Satuation + "; " + pieceBackgroundColor.Value + ")", Utils.Combine2ImagesHorizontal(Utils.Combine2ImagesHorizontal(lowPieceBgColorImg.Convert <Rgb, byte>(), pieceBgColorImg.Convert <Rgb, byte>(), 0), highPieceBgColorImg.Convert <Rgb, byte>(), 0).Bitmap));

                    pieceBgColorImg.Dispose();
                    lowPieceBgColorImg.Dispose();
                    highPieceBgColorImg.Dispose();
                }

                // do HSV segmentation and keep only the meanColor areas with some hysteresis as pieces
                mask = hsvSourceImg.InRange(new Hsv(pieceBackgroundColor.Hue - HueDiff, pieceBackgroundColor.Satuation - SaturationDiff, pieceBackgroundColor.Value - ValueDiff), new Hsv(pieceBackgroundColor.Hue + HueDiff, pieceBackgroundColor.Satuation + SaturationDiff, pieceBackgroundColor.Value + ValueDiff));

                // close small black gaps with morphological closing operation
                Mat kernel = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new Size(5, 5), new Point(-1, -1));
                CvInvoke.MorphologyEx(mask, mask, MorphOp.Close, kernel, new Point(-1, -1), 5, BorderType.Default, new MCvScalar(0));
            }
            return(mask);
        }