コード例 #1
1
        private void button1_Click(object sender, EventArgs e)
        {
            OpenFileDialog Openfile = new OpenFileDialog();
            if (Openfile.ShowDialog() == DialogResult.OK)
            {
                Image<Bgr, byte> My_Image = new Image<Bgr, byte>(Openfile.FileName);
                Image<Gray, byte> gray_image = My_Image.Convert<Gray, byte>();
                Image<Gray, byte> eh_gray_image = My_Image.Convert<Gray, byte>();
                Image<Gray, byte> smooth_gray_image = My_Image.Convert<Gray, byte>();
                Image<Gray, byte> ed_gray_image = new Image<Gray, byte>(gray_image.Size);
                Image<Bgr, byte> final_image = new Image<Bgr, byte>(Openfile.FileName);
                MemStorage stor = new MemStorage();
                List<MCvBox2D> detectedLicensePlateRegionList = new List<MCvBox2D>();

                CvInvoke.cvEqualizeHist(gray_image, eh_gray_image);
                CvInvoke.cvSmooth(eh_gray_image, smooth_gray_image, Emgu.CV.CvEnum.SMOOTH_TYPE.CV_GAUSSIAN, 3, 3, 0, 0);
                //CvInvoke.cvAdaptiveThreshold(smooth_gray_image, bi_gray_image, 255, Emgu.CV.CvEnum.ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_GAUSSIAN_C, Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY, 71, 1);
                CvInvoke.cvCanny(smooth_gray_image, ed_gray_image, 100, 50, 3);
                Contour<Point> contours = ed_gray_image.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, stor);
                DetectPlate(contours, detectedLicensePlateRegionList);

                for (int i = 0; i < detectedLicensePlateRegionList.Count; i++)
                {
                    final_image.Draw(detectedLicensePlateRegionList[i], new Bgr(Color.Red), 2);
                }
                imageBox1.Image = My_Image;
                imageBox2.Image = gray_image;
                imageBox3.Image = eh_gray_image;
                imageBox4.Image = smooth_gray_image;
                imageBox5.Image = ed_gray_image;
                imageBox6.Image = final_image;
            }
        }
コード例 #2
1
ファイル: LicensePlates.cs プロジェクト: thuyenvinh/qlvx
        /// <summary>
        /// Detect license plate from the given image
        /// </summary>
        /// <param name="img">The image to search license plate from</param>
        /// <param name="licensePlateImagesList">A list of images where the detected license plate regions are stored</param>
        /// <param name="filteredLicensePlateImagesList">A list of images where the detected license plate regions (with noise removed) are stored</param>
        /// <param name="detectedLicensePlateRegionList">A list where the regions of license plate (defined by an MCvBox2D) are stored</param>
        /// <returns>The list of words for each license plate</returns>
        public List<List<Word>> DetectLicensePlate(
            Image<Bgr, byte> img,
            List<Image<Gray, Byte>> licensePlateImagesList,
            List<Image<Gray, Byte>> filteredLicensePlateImagesList,
            List<MCvBox2D> detectedLicensePlateRegionList)
        {
            List<List<Word>> licenses = new List<List<Word>>();

            // Convert image to gray
            using (Image<Gray, byte> gray = img.Convert<Gray, Byte>())

            // Create Canny image
            using (Image<Gray, Byte> canny = new Image<Gray, byte>(gray.Size))

            //Create MemStorage
            using (MemStorage stor = new MemStorage())
            {
                //Convert gray with Canny Algorithm
                CvInvoke.cvCanny(gray, canny, 130, 70, 3);

                //List all Contour
                Contour<Point> contours = canny.FindContours(
                     Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                     Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE,
                     stor);

                //Check Contour
                FindLicensePlate(contours, gray, canny, licensePlateImagesList, filteredLicensePlateImagesList, detectedLicensePlateRegionList, licenses);
            }

            return licenses;
        }
コード例 #3
1
ファイル: FFT.cs プロジェクト: juanluislm/TeamVis
        private bool Detect_objects(Image<Gray, Byte> Input_Image, Image<Gray, Byte> object_Image)
        {
            Point dftSize = new Point(Input_Image.Width + (object_Image.Width * 2), Input_Image.Height + (object_Image.Height * 2));
            bool Success = false;
            using (Image<Gray, Byte> pad_array = new Image<Gray, Byte>(dftSize.X, dftSize.Y))
            {
                //copy centre
                pad_array.ROI = new Rectangle(object_Image.Width, object_Image.Height, Input_Image.Width, Input_Image.Height);
                CvInvoke.cvCopy(Input_Image.Convert<Gray, Byte>(), pad_array, IntPtr.Zero);
               // CvInvoke.cvMatchTemplate
                //CvInvoke.cvShowImage("pad_array", pad_array);
                pad_array.ROI = (new Rectangle(0, 0, dftSize.X, dftSize.Y));
                using (Image<Gray, float> result_Matrix = pad_array.MatchTemplate(object_Image, TM_TYPE.CV_TM_CCOEFF_NORMED))
                {
                    result_Matrix.ROI = new Rectangle(object_Image.Width, object_Image.Height, Input_Image.Width, Input_Image.Height);

                    Point[] MAX_Loc, Min_Loc;
                    double[] min, max;
                    result_Matrix.MinMax(out min, out max, out Min_Loc, out MAX_Loc);

                    using (Image<Gray, double> RG_Image = result_Matrix.Convert<Gray, double>().Copy())
                    {
                        //#TAG WILL NEED TO INCREASE SO THRESHOLD AT LEAST 0.8...used to have 0.7

                        if (max[0] > 0.85)
                        {
                            Object_Location = MAX_Loc[0];
                            Success = true;
                        }
                    }

                }
            }
            return Success;
        }
コード例 #4
0
ファイル: CvUser.cs プロジェクト: Ceasius/University
        public Image<Gray, byte> FacialDetection(Image<Gray, byte> Frame)
        {
            StreamReader SR = new StreamReader("CVConfig.txt");
            int width = int.Parse(SR.ReadLine().Split(':')[1]);
            int height = int.Parse(SR.ReadLine().Split(':')[1]);
            Image<Gray, byte> Img = Frame.Convert<Gray, byte>();
            using (Frame)
            {
                if (Frame != null)
                {
                    // there's only one channel (greyscale), hence the zero index
                    //var faces = nextFrame.DetectHaarCascade(haar)[0];
                    Image<Gray, byte> grayframe = Frame.Convert<Gray, byte>();
                    
                    var faces =
                            grayframe.DetectHaarCascade(
                                    gFacedetection, 1.4, 4,
                                    HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                                    new Size(Frame.Width / 8, Frame.Height / 8)
                                    )[0];

                    
                    foreach (var face in faces)
                    {
                        CvInvoke.cvSetImageROI(grayframe, face.rect);
                        break;
                    }
                    Img = grayframe.Clone().Resize(width, height, INTER.CV_INTER_CUBIC); ;
                }
                
            }
            SR.Close();
            return Img;
        }
コード例 #5
0
ファイル: MatchImage.cs プロジェクト: pakerliu/sharp-context
        public double MatchChar(char a, char b)
        {
            Image<Bgr, Byte> pic1 = new Image<Bgr, Byte>(100, 100).Resize(matchScale, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR),
                             pic2 = new Image<Bgr, Byte>(100, 100).Resize(matchScale, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR);
            using (var g1 = Graphics.FromImage(pic1.Bitmap)) {
                g1.Clear(Color.Black);
                g1.DrawString(a.ToString(), new Font("Arial", 64), Brushes.White, Point.Empty);
            }
            using (var g2 = Graphics.FromImage(pic2.Bitmap)) {
                g2.Clear(Color.Black);
                g2.DrawString(b.ToString(), new Font("Comic Sans MS", 64), Brushes.White, Point.Empty);
            }

            List<Point> edge1 = findEdge(pic1.Convert<Gray, Byte>()),
                        edge2 = findEdge(pic2.Convert<Gray, Byte>());
            nsamp = Math.Min(maxsamplecount, Math.Min(edge1.Count, edge2.Count));
            edge1 = edge1.Sample(nsamp);
            edge2 = edge2.Sample(nsamp);

            Matrix t1, t2, V1, V2;
            ExtractBoundary(pic1.Convert<Gray, Byte>(), edge1, out origX, out V1, out t1);
            ExtractBoundary(pic2.Convert<Gray, Byte>(), edge2, out origY, out V2, out t2);

            return MatchIteration(origX, origY, V1, V2, t1, t2);
        }
コード例 #6
0
ファイル: Form1.cs プロジェクト: ivlukin/seminars
        private void button1_Click(object sender, EventArgs e)
        {
            OpenFileDialog Openfile = new OpenFileDialog();
            if (Openfile.ShowDialog() == DialogResult.OK)
            {
                Image<Bgr, Byte> originalImage = new Image<Bgr, byte>(Openfile.FileName);
                Image<Gray, Byte> grayImage = originalImage.Convert<Gray, Byte>();

                grayImage._SmoothGaussian(3);
                CvInvoke.cvAdaptiveThreshold(grayImage, grayImage, 255, ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_MEAN_C, THRESH.CV_THRESH_BINARY, 75, 10);
                grayImage._Not();
                //ArrayList lines = new ArrayList();
                LineSegment2D[] lines = grayImage.HoughLinesBinary(
                    1, //Distance resolution in pixel-related units
                    Math.PI / 45.0, //Angle resolution measured in radians.
                    20, //threshold
                    30, //min Line width
                    10 //gap between lines
                    )[0]; //Get the lines from the first channel

                #region draw lines                
                foreach (LineSegment2D line in lines)
                    originalImage.Draw(line, new Bgr(Color.Red), 1);
                #endregion

                pictureBox1.Image = originalImage.ToBitmap();
            }
        }
コード例 #7
0
        public LineDetectionFromFileTesting()
        {
            viewer = new ImageViewer(); //create an image viewer

            //Convert the image to grayscale and filter out the noise
            // gray = new Image<Gray, Byte>("C:/RoboSub/RoboImagesTest2/92c.png");
            fileImage = new Image<Bgr, Byte>(fileName);
            fileImage = fileImage.Resize(300, 200, Emgu.CV.CvEnum.INTER.CV_INTER_AREA, true);
            img = fileImage.Clone();
            gray = img.Convert<Gray, Byte>();
            // img = new Image<Bgr, Byte>("C:/RoboSub/RoboImagesTest2/92c.png");

            viewer.Size = new Size(fileImage.Width * 3, fileImage.Height * 3);

            Thread input = new Thread(getKeyboardInput);
            input.Start();
            Thread test = new Thread(testShapeDetection);
            test.Start();
            Application.Idle += new EventHandler(delegate(object sender, EventArgs e)
            {
                //testShapeDetection();
            });
            viewer.ShowDialog();
            test.Abort();
            input.Abort();
        }
コード例 #8
0
		private Bitmap DetectFace(Bitmap faceImage)
		{
			var image = new Image<Bgr, byte>(faceImage);
			var gray = image.Convert<Gray, Byte>();
			var haarCascadeFilePath = _httpContext.Server.MapPath("haarcascade_frontalface_default.xml");
			var face = new HaarCascade(haarCascadeFilePath);
			MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(face, 1.1, 10, HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20));
			Image<Gray, byte> result = null;

			foreach (MCvAvgComp f in facesDetected[0])
			{
				//draw the face detected in the 0th (gray) channel with blue color
				image.Draw(f.rect, new Bgr(Color.Blue), 2);
				result = image.Copy(f.rect).Convert<Gray, byte>();
				break;
			}

			if (result != null)
			{
				result = result.Resize(150, 150, INTER.CV_INTER_CUBIC);

				return result.Bitmap;
			}

			return null;
		}
コード例 #9
0
		public void ProcessFrame(int threshold)
		{
			m_OriginalImage = m_Capture.QueryFrame();

			m_ClippedImage = m_OriginalImage.Copy(this.RegionOfInterest);

			// Make the dark portions bigger
			m_ErodedImage = m_ClippedImage.Erode(1);

			//Convert the image to grayscale
			m_GrayImage = m_ErodedImage.Convert<Gray, Byte>();

			m_BlackAndWhiteImage = m_GrayImage.ThresholdBinaryInv(new Gray(threshold), new Gray(255));

			FindRectangles(m_BlackAndWhiteImage);

			this.FoundRectangleCount = m_FoundRectangles.Count;
			if (this.FoundRectangleCount == m_ImageModel.ExpectedRectangleCount)
			{
				m_ImageModel.AssignFoundRectangles(m_FoundRectangles);
				m_FoundRectanglesImage = CreateRectanglesImage(m_ImageModel.GetInsideRectangles());
			}
			else
			{
				m_FoundRectanglesImage = CreateRectanglesImage(m_FoundRectangles);
			}
		}
コード例 #10
0
 public override Image<Gray, byte> DetectSkin(Image<Bgr, byte> Img, IColor min, IColor max)
 {
     Image<Hsv, Byte> currentHsvFrame = Img.Convert<Hsv, Byte>();
     Image<Gray, byte> skin = new Image<Gray, byte>(Img.Width, Img.Height);
     skin = currentHsvFrame.InRange((Hsv)min, (Hsv)max);
     return skin;
 }
コード例 #11
0
        private void BoardButton_Click(object sender, RoutedEventArgs e)
        {
            string[] args = Environment.GetCommandLineArgs();
            Image<Hsv, byte> img = new Image<Hsv, byte>(args[1]);

            Image<Gray, byte> blue = ImageTools.FilterColor(img, new Hsv(90, 90, 50), new Hsv(120, 255, 255));
            Image<Gray, byte> green = ImageTools.FilterColor(img, new Hsv(35, 70, 35), new Hsv(90, 255, 255));
            Image<Gray, byte> yellow = ImageTools.FilterColor(img, new Hsv(10, 70, 127), new Hsv(35, 255, 255));
            Image<Gray, byte> red = ImageTools.FilterColor(
                img,
                new KeyValuePair<Hsv, Hsv>[]{
                    new KeyValuePair<Hsv,Hsv>(new Hsv(0, 85, 80), new Hsv(12, 255, 255)),
                    new KeyValuePair<Hsv,Hsv>(new Hsv(150,85,80), new Hsv(179,255,255))
                }
            );

            DetectionData ddb = ImageTools.DetectSquares(blue);
            DetectionData ddr = ImageTools.DetectSquares(red);
            DetectionData ddg = ImageTools.DetectSquares(green);
            DetectionData ddy = ImageTools.DetectSquares(yellow);
            ddb.RemoveNoises();
            ddr.RemoveNoises();
            ddg.RemoveNoises();
            ddy.RemoveNoises();
            ddb.AddColor(ddr);
            ddb.AddColor(ddg);
            ddb.AddColor(ddy);

            var board = ddb.CreateBoard();
            var di = ddb.DrawDetection().Bitmap;
            MessageBox.Show("Detected board: " + board.Height + "x" + board.Width);

            ImageTools.ShowInNamedWindow(img.Convert<Bgr, byte>(), "Original");
        }
コード例 #12
0
 /// <summary>
 /// Stores a Face image and its Name in the Training Set, in MS Access Database 
 /// </summary>
 /// <param name="ImageAsBytes"></param> Face image converted to bytes 
 /// <param name="FaceName"></param>the name of face set in the textbox
 private void AddFaceToDB(Image InputFace, string FaceName)
 {
     Image<Bgr, byte> grayframe = new Image<Bgr, byte>(new Bitmap(InputFace));
     Image<Gray, byte> faceGrayPic = grayframe.Convert<Gray, Byte>().Resize(64, 64, Emgu.CV.CvEnum.Inter.Cubic);
     faceGrayPic.Save("trainingset/"+txtBoxFaceName.Text+".bmp");
     MessageBox.Show("nailigtas");
 }
コード例 #13
0
ファイル: Processor.cs プロジェクト: JasonCrease/CarColour
        internal void Process(int hueAfter, byte satAfter, int hueMid, int hueWidth)
        {
            byte afterSat = 0;

            BeforeImage = new Image<Bgr, byte>(BeforeImagePath).Resize(440, 320, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC, false);
            DebugImage = BeforeImage.Convert<Hsv, byte>();

            hueMid = FindHuePeak();
            int hueStart = (180 + hueMid - (hueWidth / 2)) % 180;
            int hueEnd = (180 + hueMid + (hueWidth / 2)) % 180;

            for (int i = 0; i < DebugImage.Width; i++)
                for (int j = 0; j < DebugImage.Height; j++)
                {
                    int hue = DebugImage.Data[j, i, 0];
                    int sat = DebugImage.Data[j, i, 1];
                    int val = DebugImage.Data[j, i, 2];

                    if ((hueStart < hueEnd) && (hue < hueEnd && hue > hueStart)
                        || (hueStart > hueEnd) && (hue < hueEnd || hue > hueStart))
                    {
                        if (sat > 30)
                        {
                            DebugImage.Data[j, i, 0] =(byte) hueAfter;
                            //DebugImage.Data[j, i, 1] = satAfter;
                        }
                    }
                }

            AfterImage = DebugImage.Convert<Bgr, byte>();
        }
コード例 #14
0
ファイル: default.aspx.cs プロジェクト: AndreuChel/facedetect
        protected void UploadButton_Click(object sender, EventArgs e)
        {
            try
            {
                if (!((FileUpload1.PostedFile.ContentType == "image/jpeg") ||
                    (FileUpload1.PostedFile.ContentType == "image/png") ||
                    (FileUpload1.PostedFile.ContentType == "image/gif") ||
                    (FileUpload1.PostedFile.ContentType == "image/bmp"))) throw new Exception("Неизвестный тип файла");

                string PhotoFolder = Request.PhysicalApplicationPath + @"\photos\";

                if (!Directory.Exists(PhotoFolder)) Directory.CreateDirectory(PhotoFolder);

                string extention = Path.GetExtension(FileUpload1.FileName);
                string uniqueName = Path.ChangeExtension(FileUpload1.FileName, DateTime.Now.Ticks.ToString());

                string upFile = Path.Combine(PhotoFolder, uniqueName + extention);
                FileUpload1.SaveAs(upFile);

                //Распознование лиц

                HaarCascade haarCascade = new HaarCascade(Request.PhysicalApplicationPath + @"\haarcascade_frontalface_alt2.xml");

                Image<Bgr, Byte> image = new Image<Bgr, Byte>(upFile);
                Image<Gray, Byte> grayImage = image.Convert<Gray, Byte>();

                Bitmap srcImage = image.ToBitmap();

                var detectedFaces = grayImage.DetectHaarCascade(haarCascade)[0];
                foreach (var face in detectedFaces)
                {
                    Image<Bgr, Byte> imFace = image.Copy(face.rect);
                    //Пикселизация (фактор подобран эмпирически)
                    //при данном факторе одиноково хорошо пикселизируются и большие и маленькие лица
                    double factor = 0.02 + (double)10 / (double)face.rect.Height;

                    imFace = imFace.Resize(factor, 0);
                    imFace = imFace.Resize(1 / factor, 0);

                    Bitmap faceBitmap = imFace.ToBitmap();

                    using (Graphics grD = Graphics.FromImage(srcImage))
                    {
                        grD.DrawImage(faceBitmap, new Point(face.rect.Left, face.rect.Top));
                    }
                }
                string uniqueName_processed = uniqueName + "_processed";

                srcImage.Save(Path.Combine(PhotoFolder, uniqueName_processed + extention));

                imgTitle.Visible = true;
                Image1.ImageUrl = "photos/" + uniqueName_processed + extention;

            }
            catch (Exception ex)
            {
                Session["ErrorMsg"] = ex.Message;
                Response.Redirect("~/error.aspx", true);
            }
        }
コード例 #15
0
        /// <summary>
        /// Compute the red pixel mask for the given image. 
        /// A red pixel is a pixel where:  20 &lt; hue &lt; 160 AND satuation &gt; 10
        /// </summary>
        /// <param name="image">The color image to find red mask from</param>
        /// <returns>The red pixel mask</returns>
        public static Image<Gray, Byte> GetRedPixelMask(Image<Bgr, byte> image)
        {
            using (Image<Hsv, Byte> hsv = image.Convert<Hsv, Byte>())
            {
                Image<Gray, Byte>[] channels = hsv.Split();

                try
                {
                    //channels[0] is the mask for hue less than 20 or larger than 160
                    CvInvoke.cvInRangeS(channels[0], new MCvScalar(MaskHueLow), new MCvScalar(MaskHueHigh), channels[0]);
                    channels[0]._Not();

                    //channels[1] is the mask for satuation of at least 10, this is mainly used to filter out white pixels
                    channels[1]._ThresholdBinary(new Gray(10), new Gray(255.0));

                    CvInvoke.cvAnd(channels[0], channels[1], channels[0], IntPtr.Zero);
                }
                finally
                {
                    channels[1].Dispose();
                    channels[2].Dispose();
                }
                return channels[0];
            }
        }
コード例 #16
0
ファイル: cImageDistanceMap.cs プロジェクト: cyrenaique/HCSA
        public void Run()
        {
            base.Output = new cImage(Input.Width, Input.Height, Input.Depth, base.ListChannelsToBeProcessed.Count);
            for (int IdxChannel = 0; IdxChannel < base.ListChannelsToBeProcessed.Count; IdxChannel++)
            {
                int CurrentChannel = base.ListChannelsToBeProcessed[IdxChannel];

                Image<Gray, float> inputImage = new Image<Gray, float>(Input.Width, Input.Height);

                for (int j = 0; j < Input.Height; j++)
                    for (int i = 0; i < Input.Width; i++)
                        inputImage.Data[j, i, 0] = Input.SingleChannelImage[CurrentChannel].Data[i + j * Input.Width];

                Image<Gray, float> ProcessedImage = new Image<Gray, float>(inputImage.Width, inputImage.Height);

                Emgu.CV.Image<Gray, byte> gray = inputImage.Convert<Gray, byte>();//convert to grayscale
                IntPtr dsti = Emgu.CV.CvInvoke.cvCreateImage(Emgu.CV.CvInvoke.cvGetSize(gray), Emgu.CV.CvEnum.IplDepth.IplDepth32F, 1);
                //TODO: Has to be checked!!!!

                Emgu.CV.CvInvoke.DistanceTransform(gray, ProcessedImage,null, DistanceType, MaskSize, DistLabelType.CComp);

                this.Output.SingleChannelImage[IdxChannel].SetNewDataFromOpenCV(ProcessedImage);
            }
            return;
        }
コード例 #17
0
ファイル: Form1.cs プロジェクト: srivera4/imageWithSpeech
        private Image<Gray, Byte> colorMask(Image<Bgr, Byte> bgrImage)
        {
            Image<Hsv, Byte> hsvImg = bgrImage.Convert<Hsv, Byte>();
              Image<Gray, Byte>[] channels = hsvImg.Split();
              Image<Gray, Byte> imghue = channels[0];            //hsv, so channels[0] is hue.
              Image<Gray, Byte> imgval = channels[2];            //hsv, so channels[2] is value.
              Image<Gray, Byte> imgsat = channels[1];            //hsv, so channels[1] is

              minHue = hueMinTB.Value;
              maxHue = hueMaxTB.Value;
              minSat = satMinTB.Value;
              maxSat = satMaxTB.Value;
              minVal = valMinTB.Value;
              maxVal = valMaxTB.Value;

              //filter out all but "the color you want"...seems to be 0 to 128 ?
              Image<Gray, byte> huefilter = imghue.InRange(new Gray(minHue ), new Gray(maxHue ));
              Image<Gray, byte> satfilter = imgsat.InRange(new Gray(minSat ), new Gray(maxSat ));

              //use the value channel to filter out all but brighter colors
              //Image<Gray, byte> valfilter = imgval.InRange(new Gray(Color.Orange .GetBrightness() - 5), new Gray(Color.Orange .GetBrightness() + 5));
              Image<Gray, byte> valfilter = imgval.InRange(new Gray(minVal ), new Gray(maxVal));

              //now and the two to get the parts of the imaged that are colored and above some brightness.
              Image<Gray, byte> detimg = huefilter.And(valfilter).And(satfilter);
              return detimg;
        }
コード例 #18
0
        //called when data for any output pin is requested
        public void Evaluate(int SpreadMax)
        {
            if (_ocr == null)
            {
                try
                {
                    _ocr = new Tesseract(@FTessdata[0], "eng", Tesseract.OcrEngineMode.OEM_TESSERACT_ONLY);
                }
                catch (Exception exception){

                    FLogger.Log(LogType.Debug, exception.Message);
                }
            }

            if (FInit.IsChanged && FInit[0] == true)
            {

                Image<Bgr, byte> My_Image = new Image<Bgr, byte>(@FInput[0]);
                Image<Gray, byte> gray = My_Image.Convert<Gray, Byte>();
                _ocr.Recognize(gray);
                FOutput[0] = _ocr.GetText();
            }

            //FOutput[0] = "hallo";
        }
コード例 #19
0
ファイル: ShapeDetector.cs プロジェクト: petrind/SRTesis2
        /// <summary>
        /// Compute the red pixel mask for the given image. 
        /// A red pixel is a pixel where:  20 &lt; hue &lt; 160 AND satuation &gt; 10
        /// </summary>
        /// <param name="image">The color image to find red mask from</param>
        /// <returns>The red pixel mask</returns>
        public Image<Gray, Byte> GetColorPixelMask(Image<Bgr, byte> image, int minHue, int maxHue, int minSat, int maxSat, int minValue, int maxValue)
        {
            using (Image<Hsv, Byte> hsv = image.Convert<Hsv, Byte>())
            {
                Image<Gray, Byte>[] channels = hsv.Split();
                try
                {

                    CvInvoke.cvInRangeS(channels[0], new MCvScalar(minHue), new MCvScalar(maxHue), channels[0]);
                    //CvInvoke.cvShowImage("channel 0", channels[0]);
                    //channels[1] is the mask for satuation of at least 10, this is mainly used to filter out white pixels
                    CvInvoke.cvInRangeS(channels[1], new MCvScalar(minSat), new MCvScalar(maxSat), channels[1]);

                    CvInvoke.cvInRangeS(channels[2], new MCvScalar(minValue), new MCvScalar(maxValue), channels[2]);

                    CvInvoke.cvAnd(channels[0], channels[1], channels[0], IntPtr.Zero);
                    CvInvoke.cvAnd(channels[0], channels[2], channels[0], IntPtr.Zero);
                    //CvInvoke.cvAnd(channels[0], channels[2], channels[0], IntPtr.Zero);

                }
                finally
                {
                    //CvInvoke.cvShowImage("channel 1", channels[1]);
                    //CvInvoke.cvShowImage("channel 2", channels[2]);
                    channels[1].Dispose();
                    channels[2].Dispose();
                    //channels[0].Dispose();
                }
                return channels[0];
            }
        }
コード例 #20
0
ファイル: Program.cs プロジェクト: Algorithmix/Straighten
 static void Main(string[] args)
 {
     Image<Bgra, Byte> img = new Image<Bgra, byte>(args[0]);
     Image<Gray, Byte> bw = img.Convert<Gray, byte>();
     CvInvoke.cvSmooth(bw, bw, SMOOTH_TYPE.CV_BLUR);
     CvInvoke.cvCanny(bw, bw, 100, 100, 3);
 }
コード例 #21
0
ファイル: Superpixels.cs プロジェクト: Thomas214/MA-Code
        private double s = 0; // Superpixel Intervall Int

        #endregion Fields

        #region Constructors

        // =============== Konstruktor ===============
        public Superpixels(Image<Bgr, Byte> imageBgr, int superpixelCount)
        {
            // Werte setzen
            k = superpixelCount;
            n = imageBgr.Width * imageBgr.Height;
            s = Math.Sqrt((double)n / (double)k);
            area = Convert.ToInt32(2 * s * 2 * s);
            m = 12;

            // BGR to LAB Umrechnung und Vektormatrix erstellen
            imageLab = imageBgr.Convert<Lab, Byte>();
            pixels = new Pixel[imageBgr.Width, imageBgr.Height];
            for (int r = 0; r < imageLab.Height; r++)
            {
                for (int c = 0; c < imageLab.Width; c++)
                {
                    double l = (double)imageLab.Data[r, c, 0] * 100 / 255;
                    double a = (double)imageLab.Data[r, c, 1] - 128;
                    double b = (double)imageLab.Data[r, c, 2] - 128;

                    Bgr bgr = new Bgr(imageBgr.Data[r, c, 0], imageBgr.Data[r, c, 1], imageBgr.Data[r, c, 2]);

                    pixels[c, r] = new Pixel(new Vector5(l, a, b, c, r), bgr);

                    //Console.WriteLine("BGR = " + imageBgr.Data[r, c, 0] + " " + imageBgr.Data[r, c, 1] + " " + imageBgr.Data[r, c, 2]);
                    //Console.WriteLine("RGB = " + imageBgr.Data[r, c, 2] + " " + imageBgr.Data[r, c, 1] + " " + imageBgr.Data[r, c, 0]);
                    //Console.WriteLine("LAB = " + labValues[r, c].X + " " + labValues[r, c].Y + " " + labValues[r, c].Z);
                }
            }
        }
コード例 #22
0
ファイル: Form1.cs プロジェクト: mehulsbhatt/ocrrefinement
        private void Form1_Load(object sender, EventArgs e)
        {
            if (openImageFileDialog.ShowDialog() == System.Windows.Forms.DialogResult.OK)
            {
                Bgr drawColor = new Bgr(Color.Blue);
                try
                {
                    Image<Bgr, Byte> image = new Image<Bgr, byte>(openImageFileDialog.FileName);
                    original.Image = image.ToBitmap();
                    original.SizeMode = PictureBoxSizeMode.Zoom;
                    using (Image<Gray, byte> gray = image.Convert<Gray, Byte>())
                    {
                        _ocr.Recognize(gray);
                        Tesseract.Charactor[] charactors = _ocr.GetCharactors();
                        foreach (Tesseract.Charactor c in charactors)
                        {
                            image.Draw(c.Region, drawColor, 1);
                        }

                        processed.Image = image.ToBitmap();
                        processed.SizeMode = PictureBoxSizeMode.Zoom;
                        //String text = String.Concat( Array.ConvertAll(charactors, delegate(Tesseract.Charactor t) { return t.Text; }) );
                        String text = _ocr.GetText();
                        ocrTextBox.Text = text;
                    }
                }
                catch (Exception exception)
                {
                    MessageBox.Show(exception.Message);
                }
            }

        }
コード例 #23
0
        public static void Detect(Image<Bgr, Byte> image, String faceFileName, List<Rectangle> recFaces, List<Image<Bgr, Byte>> imgFaces, out long detectionTime)
        {
            Stopwatch watch;

            {
                //Read the HaarCascade objects
                using (CascadeClassifier faceClassifier = new CascadeClassifier(faceFileName))
                {
                    watch = Stopwatch.StartNew();
                    using (Image<Gray, Byte> gray = image.Convert<Gray, Byte>()) //Convert it to Grayscale
                    {
                        //Normalizes brightness and increases contrast of the image
                        gray._EqualizeHist();

                        //Detect the faces  from the gray scale image and store the locations as rectangle
                        //The first dimensional is the channel
                        //The second dimension is the index of the rectangle in the specific channel
                        Rectangle[] facesDetected = faceClassifier.DetectMultiScale(
                           gray,
                           1.1,
                           10,
                           new Size(20, 20),
                           Size.Empty);
                        recFaces.AddRange(facesDetected);
                        //Now for each rectangle, get the sub face image from the coordinates and store it for display later
                        foreach (Rectangle rec in facesDetected)
                            imgFaces.Add(image.GetSubRect(rec));
                    }
                    watch.Stop();
                }
            }
            detectionTime = watch.ElapsedMilliseconds;
        }
コード例 #24
0
        public void render( Image<Bgr, Byte> img)
        {
            md.findMarkers(img.Convert<Gray, Byte>());
            if (md.isMarker())
            {
                int id = toolNetwork.recognitionPictograms(md.markers[0].getSymbolImage());
                if (id != -1)
                {
                    pos.estimate(md.markers[0]);
                    piktoViewManager.viewSceneMarker(id, pos.getTransformatinMatrix(), img.ToBitmap());

                }
                else
                {
                    piktoViewManager.updateDisplayCameraLayer(img.ToBitmap());
                }
                //   piktoViewMan.updateDisplayCameraLayer(img.ToBitmap());
            }
            else
            {
                if (!piktoViewManager.videoMode)
                    piktoViewManager.viewOnlyCameraImage();
                piktoViewManager.updateDisplayCameraLayer(img.ToBitmap());

            }
            displayComponent.displaySetContent();
        }
コード例 #25
0
ファイル: DetectFace.cs プロジェクト: Raptek/STEM
        public static void detectFaceCPU(Image<Bgr, Byte> image, String faceFileName, String eyesFileName, List<Rectangle> facesList, List<Rectangle> eyesList, out long detectionTime)
        {
            Stopwatch watch;
            using (CascadeClassifier faceCascade = new CascadeClassifier(faceFileName))
            using (CascadeClassifier eyesCascade = new CascadeClassifier(eyesFileName))
            {
                watch = Stopwatch.StartNew();
                using (Image<Gray, Byte> grayImage = image.Convert<Gray, Byte>())
                {
                    //grayImage._EqualizeHist();
                    Rectangle[] facesRegion = faceCascade.DetectMultiScale(grayImage, 1.1, 10, new Size(image.Width / 8, image.Height / 8), Size.Empty);
                    facesList.AddRange(facesRegion);

                    foreach (Rectangle f in facesRegion)
                    {
                        grayImage.ROI = f;
                        Rectangle[] eyesDetected = eyesCascade.DetectMultiScale(grayImage, 1.1, 10, new Size(image.Width / 8, image.Height / 8), Size.Empty);
                        grayImage.ROI = Rectangle.Empty;
                        foreach (Rectangle e in eyesDetected)
                        {
                            Rectangle eyeRect = e;
                            eyeRect.Offset(f.X, f.Y);
                            eyesList.Add(eyeRect);
                        }
                    }
                }
                watch.Stop();
            }
            detectionTime = watch.ElapsedMilliseconds;
        }
コード例 #26
0
        private void Window_Loaded(object sender, RoutedEventArgs e)
        {
            var sourceImage = new Bitmap("C:\\Steve_Wozniak.jpg");

            string haarcascade = "haarcascade_frontalface_default.xml";

            using (HaarCascade face = new HaarCascade(haarcascade))
            {
                var image = new Image<Rgb, Byte>(sourceImage);

                using (var gray = image.Convert<Gray, Byte>())
                {
                    var detectedFaces = face.Detect(
                                            gray,
                                            1.1,
                                            10,
                                            Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                                            new System.Drawing.Size(20, 20));

                    var firstFace = detectedFaces[0];
                    System.Drawing.Bitmap bmpImage = image.Bitmap;
                    System.Drawing.Bitmap bmpCrop = bmpImage.Clone(firstFace.rect,
                                                                    bmpImage.PixelFormat);

                    var cropedImage = new Image<Rgb, Byte>(bmpCrop);

                    MainImage.Source = ToBitmapSource(sourceImage);
                    DetectedFaceImage.Source = ToBitmapSource(cropedImage.Bitmap);
                }
            }
        }
コード例 #27
0
ファイル: Face.cs プロジェクト: genecyber/PredatorCV
        public static Image<Bgr, byte> DetectAndDrawFaces(Image<Bgr, byte> image, HaarCascade face, HaarCascade eye)
        {
            Image<Gray, Byte> gray = image.Convert<Gray, Byte>(); //Convert it to Grayscale

            gray._EqualizeHist();

            //Detect the faces  from the gray scale image and store the locations as rectangle
            //The first dimensional is the channel
            //The second dimension is the index of the rectangle in the specific channel
            MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
               face,
               1.1,
               10,
               Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
               new Size(20, 20));

            foreach (MCvAvgComp f in facesDetected[0])
            {
                //draw the face detected in the 0th (gray) channel with blue color
                image.Draw(f.rect, new Bgr(Color.Blue), 2);

                //Set the region of interest on the faces
                if (eye != null)
                    DetectAndDrawEyes(image, gray, f, eye);
            }
            return image;
        }
コード例 #28
0
        public DetectorResult Process(Image<Bgr, byte> rawFrame, Image<Gray, byte> grayFrame)
        {
            var surfParam = new SurfDetector(900, true);

            var modelImage = new Image<Gray, byte>("iphone\\signin.jpg");
            var modelFeatures = surfParam.DetectFeatures(modelImage, null);
            var tracker = new Features2DTracker(modelFeatures);

            var imageFeatures = surfParam.DetectFeatures(grayFrame, null);
            var homographyMatrix = tracker.Detect(imageFeatures, 100.0);

            Image<Bgr, Byte> processedImage = modelImage.Convert<Bgr, Byte>().ConcateVertical(rawFrame);

            if (homographyMatrix != null)
            {
                var rect = modelImage.ROI;
                var pts = new[]
                              {
                                  new PointF(rect.Left, rect.Bottom),
                                  new PointF(rect.Right, rect.Bottom),
                                  new PointF(rect.Right, rect.Top),
                                  new PointF(rect.Left, rect.Top)
                              };
                homographyMatrix.ProjectPoints(pts);

                for (int i = 0; i < pts.Length; i++)
                    pts[i].Y += modelImage.Height;

                processedImage.DrawPolyline(Array.ConvertAll(pts, Point.Round), true, new Bgr(Color.DarkOrange), 1);
            }
            return new DetectorResult(){RawImage = rawFrame, ProcessedImage = processedImage};
        }
コード例 #29
0
        public LearningPathViewModel(/*ICommand renderXnaCmd, */DatabaseService db, ICommand returnToMainWindowCmd)
        {
            ReturnToMainWindowCmd = returnToMainWindowCmd;

            MDetector md = new MDetector();
            Image<Bgr, Byte> img = new Image<Bgr, Byte>(640, 480, new Bgr(255, 255, 0));
            PiktoViewDB piktodb = new PiktoViewDB(db);
            pictoViewManager = new PiktoViewManager(piktodb);
            ToolArtNetwork toolNetwork = new ToolArtNetwork(piktodb.getImageIdDic());
            MarkerPosition3D pos = new MarkerPosition3D(80.0f, 640.0f, 640, 480);

            RenderXnaCmd = new BasicCommand(p =>
            {
                md.findMarkers(img.Convert<Gray, Byte>());
                if (md.isMarker())
                {
                    int id = toolNetwork.recognitionPictograms(md.markers[0].getSymbolImage());
                    if (id != -1)
                    {
                        pos.estimate(md.markers[0]);
                        pictoViewManager.viewSceneMarker(id, pos.getTransformatinMatrix(), img.ToBitmap());
                    }
                }
                else
                {
                    pictoViewManager.updateDisplayCameraLayer(img.ToBitmap());
                }
                displayComponent.displaySetContent();
            });
        }
コード例 #30
-1
ファイル: OCRSample.cs プロジェクト: Algorithmix/Papyrus
        public static Image<Gray, byte> Filter(Image<Bgra, byte> original)
        {
            var gray = original.Convert<Gray, byte>();
            var binary = new Image<Gray, byte>(new OtsuThreshold().Apply(gray.Bitmap));
            var canny = new Image<Gray, byte>(new CannyEdgeDetector().Apply(gray.Bitmap));
            var list = new List<Rectangle>();
            using (MemStorage stor = new MemStorage())
            {
                for (
                    Contour<Point> contours = canny.FindContours(
                        CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                        RETR_TYPE.CV_RETR_EXTERNAL,
                        stor);
                    contours != null;
                    contours = contours.HNext)
                {
                    Rectangle rect = contours.BoundingRectangle;
                    list.Add(rect);
                }
            }
            //list.Where(rect => rect.Height * rect.Width < 100)
            //    .ToList().ForEach( rect => binary.Draw(rect, new Gray(1.0) ,-1));

            binary._Erode(1);
            binary._Dilate(1);
            return binary;
        }