Esempio n. 1
0
    public OpencvSource(string cam_or_url)
    {
        MAssert.Check(cam_or_url != string.Empty);

        // check if cam_or_url is number
        bool stream = false;

        for (int i = 0; i < cam_or_url.Length; ++i)
        {
            stream = stream ||
                     (cam_or_url[i] < '0') ||
                     (cam_or_url[i] > '9');
        }

        if (stream)
        {
            // open stream
            Console.WriteLine("opening stream '{0}'", cam_or_url);
            capturer = new OpenCvSharp.VideoCapture(cam_or_url);
        }
        else
        {
            // convert to integer
            int cam_id = Convert.ToInt32(cam_or_url, 10);
            MAssert.Check(cam_id >= 0, "wrong webcam id");

            // open vebcam
            Console.WriteLine("opening webcam {0}", cam_id);
            capturer = new OpenCvSharp.VideoCapture(cam_id);
            MAssert.Check(capturer.IsOpened(), "webcam not opened");

            // set resolution
            capturer.Set(OpenCvSharp.CaptureProperty.FrameWidth, 1280);
            capturer.Set(OpenCvSharp.CaptureProperty.FrameHeight, 720);

            MAssert.Check(capturer.IsOpened(), "webcam not opened");
        }

        // sometimes first few frames can be empty even if camera is good
        // so skip few frames
        OpenCvSharp.Mat frame;
        for (int i = 0; i < 10; ++i)
        {
            frame = capturer.RetrieveMat();
        }
        // check first two frames
        OpenCvSharp.Mat image1 = new OpenCvSharp.Mat(), image2 = new OpenCvSharp.Mat();
        capturer.Read(image1);
        capturer.Read(image2);
        Console.WriteLine("image1 size: {0}", image1.Size());
        Console.WriteLine("image1 size: {0}", image2.Size());

        MAssert.Check(
            !image1.Empty() &&
            !image2.Empty() &&
            image1.Size() == image2.Size() &&
            image1.Type() == OpenCvSharp.MatType.CV_8UC3 &&
            image2.Type() == OpenCvSharp.MatType.CV_8UC3,
            "error opening webcam or stream");
    }
Esempio n. 2
0
 //https://stackoverflow.com/questions/52016253/python-np-array-equivilent-in-opencv-opencvsharp
 public static Mat zeros_like(Mat a)
 {
     return(new Mat(a.Size(), a.Type(), new Scalar(0)));
 }
Esempio n. 3
0
    // create the database
    public Database(
        string databaseDirPath,
        Recognizer recognizer,
        Capturer capturer,
        float distanceThreshold)
    {
        vwElements = new List <VideoWorker.DatabaseElement>();
        samples    = new List <RawSample>();
        thumbnails = new List <OpenCvSharp.Mat>();
        names      = new List <string>();
        // check paths
        MAssert.Check(Directory.Exists(databaseDirPath), "database not found");

        // get directory content
        List <string> path_l1 = new List <string>(Directory.EnumerateDirectories(databaseDirPath));

        // check every element in that directory

        ulong element_id_counter = 0;

        for (int il1 = 0; il1 < path_l1.Count; ++il1)
        {
            // ignore files
            if (!Directory.Exists(path_l1[il1]))
            {
                continue;
            }
            // so path_l1[il1] is supposed to be the path to the person directory

            // get files inside i
            List <string> path_l2 = new List <string>(Directory.EnumerateFiles(path_l1[il1]));
            string        name    = string.Empty;

            // search for the name.txt file

            for (int il2 = 0; il2 < path_l2.Count; ++il2)
            {
                if (Path.GetFileName(path_l2[il2]) == "name.txt")
                {
                    // put file content in the name

                    using (StreamReader sr = new StreamReader(path_l2[il2]))
                    {
                        name = sr.ReadToEnd();
                    }
                }
            }

            // try to open each file as an image
            for (int il2 = 0; il2 < path_l2.Count; ++il2)
            {
                if (Path.GetFileName(path_l2[il2]) == "name.txt")
                {
                    continue;
                }

                Console.WriteLine("processing '{0}' name: '{1}'", path_l2[il2], name);

                // read image with opencv

                OpenCvSharp.Mat readed_image = OpenCvSharp.Cv2.ImRead(path_l2[il2]);

                if (readed_image.Empty() || readed_image.Type() != OpenCvSharp.MatType.CV_8UC3)
                {
                    Console.WriteLine("\n\nWARNING: can't read image '{0}'\n\n", path_l2[il2]);
                    continue;
                }

                byte[] data = new byte[readed_image.Total() * readed_image.Type().Channels];
                Marshal.Copy(readed_image.DataStart, data, 0, (int)data.Length);
                RawImage image = new RawImage(readed_image.Width, readed_image.Height, RawImage.Format.FORMAT_BGR, data);

                // capture the face
                List <RawSample> capturedSamples = capturer.capture(image);

                if (capturedSamples.Count != 1)
                {
                    Console.WriteLine("\n\nWARNING: detected {0} faces on '{1}' image instead of one, image ignored \n\n", capturedSamples.Count, path_l2[il2]);
                    continue;
                }

                RawSample sample = capturedSamples[0];

                // make template
                Template templ = recognizer.processing(sample);

                // prepare data for VideoWorker
                VideoWorker.DatabaseElement vwElement = new VideoWorker.DatabaseElement(element_id_counter++, (ulong)il1, templ, distanceThreshold);

                vwElements.Add(vwElement);

                samples.Add(sample);

                thumbnails.Add(makeThumbnail(sample, name));

                names.Add(name);
            }
        }

        MAssert.Check((int)element_id_counter == vwElements.Count);
        MAssert.Check((int)element_id_counter == samples.Count);
        MAssert.Check((int)element_id_counter == thumbnails.Count);
        MAssert.Check((int)element_id_counter == names.Count);
    }
Esempio n. 4
0
    // make a thumbnail of a sample
    public static OpenCvSharp.Mat makeThumbnail(
        RawSample sample,
        string name = "")
    {
        int thumbnail_size = Worker.thumbnail_size;

        // buffer for the cutted image
        MemoryStream stream = new MemoryStream();

        // make a cut in bmp format
        // so we don't waste time for encode/decode image
        // just copying it few times, which is irrelevant
        sample.cutFaceImage(
            stream,
            RawSample.ImageFormat.IMAGE_FORMAT_BMP,
            RawSample.FaceCutType.FACE_CUT_BASE);

        OpenCvSharp.Mat temp = OpenCvSharp.Mat.ImDecode(stream.ToArray(), OpenCvSharp.ImreadModes.Color);

        // so we got an image

        // check it
        MAssert.Check(!temp.Empty());
        MAssert.Check(temp.Type() == OpenCvSharp.MatType.CV_8UC3);


        // and resize to the thumbnail_size

        OpenCvSharp.Rect resRect;

        if (temp.Rows >= temp.Cols)
        {
            resRect.Height = thumbnail_size;
            resRect.Width  = temp.Cols * thumbnail_size / temp.Rows;
        }
        else
        {
            resRect.Width  = thumbnail_size;
            resRect.Height = temp.Rows * thumbnail_size / temp.Cols;
        }

        resRect.X = (thumbnail_size - resRect.Width) / 2;
        resRect.Y = (thumbnail_size - resRect.Height) / 2;

        OpenCvSharp.Mat result = new OpenCvSharp.Mat(
            thumbnail_size,
            thumbnail_size,
            OpenCvSharp.MatType.CV_8UC3,
            OpenCvSharp.Scalar.All(0));

        OpenCvSharp.Cv2.Resize(
            temp,
            result[resRect],
            resRect.Size);

        if (!string.IsNullOrEmpty(name))
        {
            result[new OpenCvSharp.Rect(0, result.Rows - 27, result.Cols, 27)] = result.RowRange(result.Rows - 27, result.Rows) * 0.5f;

            OpenCvSharp.Cv2.PutText(
                result,
                name,
                new OpenCvSharp.Point(0, result.Rows - 7),
                OpenCvSharp.HersheyFonts.HersheyDuplex,
                0.7,
                OpenCvSharp.Scalar.All(255),
                1,
                OpenCvSharp.LineTypes.AntiAlias);
        }

        return(result);
    }
Esempio n. 5
0
    public void work(OpenCvSharp.Mat frame)
    {
        // sending the frame in the capturer (_tracker)
        // pbio::CVRawImage cvri_frame;
        byte[] data = new byte[frame.Total() * frame.Type().Channels];
        Marshal.Copy(frame.DataStart, data, 0, (int)data.Length);
        RawImage         ri_frame = new RawImage(frame.Width, frame.Height, RawImage.Format.FORMAT_BGR, data);
        List <RawSample> samples  = _tracker.capture(ri_frame);

        // clone the frame for drawing on it
        OpenCvSharp.Mat draw_image = frame.Clone();
        // handle each face on the frame separately
        for (int i = 0; i < samples.Count; ++i)
        {
            RawSample sample = samples[i];

            // get a face rectangle
            RawSample.Rectangle rectangle = sample.getRectangle();

            // set a point to place information for this face
            OpenCvSharp.Point2f text_point = new OpenCvSharp.Point2f(
                rectangle.x + rectangle.width + 3,
                rectangle.y + 10);

            const float text_line_height = 22;

            // draw facial points
            // red color for all points
            // green for left eye
            // yellow for right eye
            // (yes, there is a mess with left and right eyes in face_sdk api,
            // but if we fix it now we will lose compatibility with previous versions)
            if (_flag_points)
            {
                List <Point> points = sample.getLandmarks();

                for (int j = -2; j < points.Count; ++j)
                {
                    Point p =
                        j == -2 ?
                        sample.getLeftEye() :
                        j == -1 ?
                        sample.getRightEye() :
                        points[j];

                    OpenCvSharp.Scalar color =
                        j == -2 ?
                        new OpenCvSharp.Scalar(50, 255, 50) :
                        j == -1 ?
                        new OpenCvSharp.Scalar(50, 255, 255) :
                        new OpenCvSharp.Scalar(50, 50, 255);


                    OpenCvSharp.Cv2.Circle(
                        draw_image,
                        new OpenCvSharp.Point2f(p.x, p.y),
                        j < 0 ? 4 : 2,
                        color,
                        -1,
                        OpenCvSharp.LineTypes.AntiAlias);
                }
            }

            // draw rectangle
            if (_flag_positions)
            {
                OpenCvSharp.Cv2.Rectangle(
                    draw_image,
                    new OpenCvSharp.Rect(
                        rectangle.x,
                        rectangle.y,
                        rectangle.width,
                        rectangle.height),
                    new OpenCvSharp.Scalar(50, 50, 255),
                    2,
                    OpenCvSharp.LineTypes.AntiAlias);
            }

            // draw age and gender
            if (_flag_age_gender)
            {
                AgeGenderEstimator.AgeGender age_gender = _age_geder_estimator.estimateAgeGender(sample);

                string age_text = "age: ";

                switch (age_gender.age)
                {
                case AgeGenderEstimator.Age.AGE_KID: age_text += "kid    "; break;

                case AgeGenderEstimator.Age.AGE_YOUNG: age_text += "young  "; break;

                case AgeGenderEstimator.Age.AGE_ADULT: age_text += "adult  "; break;

                case AgeGenderEstimator.Age.AGE_SENIOR: age_text += "senior "; break;
                }

                age_text += string.Format("years: {0:G3}", age_gender.age_years);

                puttext(
                    draw_image,
                    age_text,
                    text_point);
                text_point.Y += text_line_height;

                puttext(
                    draw_image,
                    age_gender.gender == AgeGenderEstimator.Gender.GENDER_FEMALE ? "gender: female" :
                    age_gender.gender == AgeGenderEstimator.Gender.GENDER_MALE ? "gender: male" : "?",
                    text_point);
                text_point.Y += text_line_height;

                text_point.Y += text_line_height / 3;
            }

            // draw emotions
            if (_flag_emotions)
            {
                List <EmotionsEstimator.EmotionConfidence> emotions =
                    _emotions_estimator.estimateEmotions(sample);

                for (int j = 0; j < emotions.Count; ++j)
                {
                    EmotionsEstimator.Emotion emotion = emotions[j].emotion;
                    float confidence = emotions[j].confidence;

                    OpenCvSharp.Cv2.Rectangle(
                        draw_image,
                        new OpenCvSharp.Rect(
                            (int)text_point.X,
                            (int)text_point.Y - (int)text_line_height / 2,
                            (int)(100 * confidence),
                            (int)text_line_height),
                        emotion == EmotionsEstimator.Emotion.EMOTION_NEUTRAL  ? new OpenCvSharp.Scalar(255, 0, 0) :
                        emotion == EmotionsEstimator.Emotion.EMOTION_HAPPY    ? new OpenCvSharp.Scalar(0, 255, 0) :
                        emotion == EmotionsEstimator.Emotion.EMOTION_ANGRY    ? new OpenCvSharp.Scalar(0, 0, 255) :
                        emotion == EmotionsEstimator.Emotion.EMOTION_SURPRISE ? new OpenCvSharp.Scalar(0, 255, 255) :
                        new OpenCvSharp.Scalar(0, 0, 0),
                        -1);

                    puttext(
                        draw_image,
                        emotion == EmotionsEstimator.Emotion.EMOTION_NEUTRAL  ? "neutral" :
                        emotion == EmotionsEstimator.Emotion.EMOTION_HAPPY    ? "happy" :
                        emotion == EmotionsEstimator.Emotion.EMOTION_ANGRY    ? "angry" :
                        emotion == EmotionsEstimator.Emotion.EMOTION_SURPRISE ? "surprise" : "?",
                        text_point + new OpenCvSharp.Point2f(100, 0));

                    text_point.Y += text_line_height;

                    text_point.Y += text_line_height / 3;
                }
            }


            // draw angles text
            if (_flag_angles)
            {
                string yaw, pitch, roll;
                yaw   = string.Format("yaw: {0}", (0.1f * (int)10 * sample.getAngles().yaw + 0.5f));
                pitch = string.Format("pitch: {0}", (0.1f * (int)10 * sample.getAngles().pitch + 0.5f));
                roll  = string.Format("roll: {0}", (0.1f * (int)10 * sample.getAngles().roll + 0.5f));

                puttext(draw_image, yaw, text_point);
                text_point.Y += text_line_height;

                puttext(draw_image, pitch, text_point);
                text_point.Y += text_line_height;

                puttext(draw_image, roll, text_point);
                text_point.Y += text_line_height;

                text_point.Y += text_line_height / 3;
            }

            // draw angles vectors
            if (_flag_angles_vectors)
            {
                RawSample.Angles angles = sample.getAngles();

                float cos_a = (float)Math.Cos(angles.yaw * OpenCvSharp.Cv2.PI / 180);
                float sin_a = (float)Math.Sin(angles.yaw * OpenCvSharp.Cv2.PI / 180);

                float cos_b = (float)Math.Cos(angles.pitch * OpenCvSharp.Cv2.PI / 180);
                float sin_b = (float)Math.Sin(angles.pitch * OpenCvSharp.Cv2.PI / 180);

                float cos_c = (float)Math.Cos(angles.roll * OpenCvSharp.Cv2.PI / 180);
                float sin_c = (float)Math.Sin(angles.roll * OpenCvSharp.Cv2.PI / 180);

                OpenCvSharp.Point3f[] xyz =
                {
                    new OpenCvSharp.Point3f(cos_a * cos_c,        -sin_c, -sin_a),
                    new OpenCvSharp.Point3f(sin_c,         cos_b * cos_c, -sin_b),
                    new OpenCvSharp.Point3f(sin_a,         sin_b,         cos_a * cos_b)
                };

                OpenCvSharp.Point2f center = new OpenCvSharp.Point2f(
                    (sample.getLeftEye().x + sample.getRightEye().x) * 0.5f,
                    (sample.getLeftEye().y + sample.getRightEye().y) * 0.5f);

                float length = (rectangle.width + rectangle.height) * 0.3f;

                for (int c = 0; c < 3; ++c)
                {
                    OpenCvSharp.Cv2.Line(
                        draw_image,
                        center,
                        center + new OpenCvSharp.Point2f(xyz[c].X, -xyz[c].Y) * length,
                        c == 0 ? new OpenCvSharp.Scalar(50, 255, 255) :
                        c == 1 ? new OpenCvSharp.Scalar(50, 255, 50) :
                        c == 2 ? new OpenCvSharp.Scalar(50, 50, 255) : new OpenCvSharp.Scalar(),
                        2,
                        OpenCvSharp.LineTypes.AntiAlias);
                }
            }

            // draw quality text
            if (_flag_quality)
            {
                QualityEstimator.Quality q =
                    _quality_estimator.estimateQuality(sample);

                string lighting, noise, sharpness, flare;

                lighting = "lighting: " + q.lighting.ToString();
                puttext(draw_image, lighting, text_point);
                text_point.Y += text_line_height;

                noise = "noise: " + q.noise.ToString();
                puttext(draw_image, noise, text_point);
                text_point.Y += text_line_height;

                sharpness = "sharpness: " + q.sharpness.ToString();
                puttext(draw_image, sharpness, text_point);
                text_point.Y += text_line_height;

                flare = "flare: " + q.flare.ToString();
                puttext(draw_image, flare, text_point);
                text_point.Y += text_line_height;

                text_point.Y += text_line_height / 3;
            }

            // draw liveness text
            if (_flag_liveness)
            {
                Liveness2DEstimator.Liveness liveness_2d_result = _liveness_2d_estimator.estimateLiveness(sample);

                puttext(
                    draw_image,
                    "liveness: " + (
                        liveness_2d_result == Liveness2DEstimator.Liveness.REAL ? "real" :
                        liveness_2d_result == Liveness2DEstimator.Liveness.FAKE ? "fake" :
                        liveness_2d_result == Liveness2DEstimator.Liveness.NOT_ENOUGH_DATA ? "not enough data" : "??"),
                    text_point);

                text_point.Y += text_line_height;
                text_point.Y += text_line_height / 3;
            }

            // draw face quality
            if (_flag_face_quality)
            {
                float quality = _face_quality_estimator.estimateQuality(sample);

                string ss = "face quality: " + quality.ToString();
                puttext(draw_image, ss, text_point);
                text_point.Y += text_line_height;
                text_point.Y += text_line_height / 3;
            }

            // draw face cuts
            for (int cut_i = 0; cut_i < 3; ++cut_i)
            {
                if ((cut_i == 0 && !_flag_cutting_base) ||
                    (cut_i == 1 && !_flag_cutting_full) ||
                    (cut_i == 2 && !_flag_cutting_token))
                {
                    continue;
                }

                puttext(
                    draw_image,
                    cut_i == 0 ? "base cut:" :
                    cut_i == 1 ? "full cut:" :
                    cut_i == 2 ? "token cut:" : "?? cut",
                    text_point);
                text_point.Y += text_line_height / 2;

                MemoryStream obuf = new MemoryStream();
                sample.cutFaceImage(
                    obuf,
                    RawSample.ImageFormat.IMAGE_FORMAT_BMP,
                    cut_i == 0 ? RawSample.FaceCutType.FACE_CUT_BASE :
                    cut_i == 1 ? RawSample.FaceCutType.FACE_CUT_FULL_FRONTAL :
                    cut_i == 2 ? RawSample.FaceCutType.FACE_CUT_TOKEN_FRONTAL :
                    (RawSample.FaceCutType) 999);

                byte[] sbuf = obuf.ToArray();

                // const OpenCvSharp.Mat_<uchar> cvbuf(1, sbuf.length(), (uchar*) sbuf.c_str());

                OpenCvSharp.Mat img = OpenCvSharp.Cv2.ImDecode(sbuf, OpenCvSharp.ImreadModes.Unchanged);

                OpenCvSharp.Cv2.Resize(img, img, OpenCvSharp.Size.Zero, 0.3, 0.3);


                int img_rect_x = (int)Math.Max(0, -text_point.X);
                int img_rect_y = (int)Math.Max(0, -text_point.Y);

                int img_rect_width = (int)Math.Min(
                    img.Cols - img_rect_x,
                    draw_image.Cols - Math.Max(0, text_point.X));

                int img_rect_height = (int)Math.Min(
                    img.Rows - img_rect_y,
                    draw_image.Rows - Math.Max(0, text_point.Y));

                if (img_rect_width <= 0 || img_rect_height <= 0)
                {
                    continue;
                }

                OpenCvSharp.Rect img_rect = new OpenCvSharp.Rect(img_rect_x, img_rect_y, img_rect_width, img_rect_height);

                img[img_rect].CopyTo(
                    draw_image[new OpenCvSharp.Rect(
                                   (int)Math.Max(0, text_point.X),
                                   (int)Math.Max(0, text_point.Y),
                                   img_rect.Width,
                                   img_rect.Height)]);

                text_point.Y += text_line_height / 2;
                text_point.Y += img.Rows;


                text_point.Y += text_line_height / 3;
            }
        }
        // draw checkboxes
        for (int i = 0; i < flags_count; ++i)
        {
            OpenCvSharp.Rect rect  = flag_rect(i);
            OpenCvSharp.Rect rect2 = new OpenCvSharp.Rect(rect.X + 5, rect.Y + 5, rect.Width - 10, rect.Height - 10);

            OpenCvSharp.Cv2.Rectangle(draw_image, rect, OpenCvSharp.Scalar.All(255), -1);
            OpenCvSharp.Cv2.Rectangle(draw_image, rect, OpenCvSharp.Scalar.All(0), 2, OpenCvSharp.LineTypes.AntiAlias);

            if (get_flag(i))
            {
                OpenCvSharp.Cv2.Rectangle(draw_image, rect2, OpenCvSharp.Scalar.All(0), -1, OpenCvSharp.LineTypes.AntiAlias);
            }

            puttext(
                draw_image,
                flag_name(i),
                new OpenCvSharp.Point2f(rect.X + rect.Width + 3, rect.Y + rect.Height - 3));
        }


        // show image with drawed information
        OpenCvSharp.Cv2.ImShow("demo", draw_image);

        // register callback on mouse events
        OpenCvSharp.Cv2.SetMouseCallback("demo", (OpenCvSharp.CvMouseCallback)onMouse);
    }
Esempio n. 6
0
        /// <summary>
        /// Combines original and processed images into a new twice wide image
        /// </summary>
        /// <param name="original">Source image</param>
        /// <param name="processed">Processed image</param>
        /// <param name="detectedContour">Contour to draw over original image to show detected shape</param>
        /// <returns>OpenCV::Mat image with images combined</returns>
        private Mat CombineMats(Mat original, Mat processed, Point[] detectedContour)
        {
            Size inputSize = new Size(original.Width, original.Height);

            // combine fancy output image:
            // - create new texture twice as wide as input
            // - copy input into the left half
            // - draw detected paper contour over original input
            // - put "scanned", un-warped and cleared paper to the right, centered in the right half
            var matCombined = new Mat(new Size(inputSize.Width * 2, inputSize.Height), original.Type(), Scalar.FromRgb(64, 64, 64));

            // copy original image with detected shape drawn over
            original.CopyTo(matCombined.SubMat(0, inputSize.Height, 0, inputSize.Width));
            if (null != detectedContour && detectedContour.Length > 2)
            {
                matCombined.DrawContours(new Point[][] { detectedContour }, 0, Scalar.FromRgb(255, 255, 0), 3);
            }

            // copy scanned paper without extra scaling, as is
            if (null != processed)
            {
                double  hw = processed.Width * 0.5, hh = processed.Height * 0.5;
                Point2d center = new Point2d(inputSize.Width + inputSize.Width * 0.5, inputSize.Height * 0.5);
                Mat     roi    = matCombined.SubMat(
                    (int)(center.Y - hh), (int)(center.Y + hh),
                    (int)(center.X - hw), (int)(center.X + hw)
                    );
                processed.CopyTo(roi);
            }

            return(matCombined);
        }
Esempio n. 7
0
        public Texture getScanFrame(WebCamTexture inputTexture)
        {
            Mat  original  = Unity.TextureToMat(inputTexture);
            Size inputSize = new Size(original.Width, original.Height);

            scanner.Input = Unity.TextureToMat(inputTexture);

            if (!scanner.Success)
            {
                scanner.Settings.GrayMode = PaperScanner.ScannerSettings.ColorMode.HueGrayscale;
            }

            Point[] detectedContour = scanner.PaperShape;

            var matCombinedFrame = new Mat(new Size(inputSize.Width, inputSize.Height), original.Type(), Scalar.FromRgb(64, 64, 64));

            original.CopyTo(matCombinedFrame.SubMat(0, inputSize.Height, 0, inputSize.Width));
            if (null != detectedContour && detectedContour.Length > 2)
            {
                matCombinedFrame.DrawContours(new Point[][] { detectedContour }, 0, Scalar.FromRgb(255, 255, 0), 3);
            }


            return(Unity.MatToTexture(matCombinedFrame));
        }