Beispiel #1
0
        public void NotSupportedUnicodeFileName()
        {
            Assert.True(File.Exists(LocalModelPath), $"'{LocalModelPath}' not found");

            var unicodeFileName = Path.Combine(Path.GetDirectoryName(LocalModelPath) !, "🤣🍀.pb");

            if (!File.Exists(unicodeFileName))
            {
                File.Copy(LocalModelPath, unicodeFileName, true);
            }

            // Check that ArgumentException(unicode unmappable char) does not occur.
            if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows))
            {
                var ex = Assert.Throws <OpenCVException>(() =>
                {
                    using var net = CvDnn.ReadNet(unicodeFileName);
                });
                Assert.StartsWith("FAILED: fs.is_open(). Can't open", ex.Message, StringComparison.InvariantCulture);
                Assert.Equal("cv::dnn::ReadProtoFromBinaryFile", ex.FuncName);
            }
            else
            {
                // No error
            }
        }
Beispiel #2
0
        public void DetectAllText(string fileName)
        {
            const int   InputWidth    = 320;
            const int   InputHeight   = 320;
            const float ConfThreshold = 0.5f;
            const float NmsThreshold  = 0.4f;

            // Load network.
            using (Net net = CvDnn.ReadNet(Path.GetFullPath(LocalModelPath)))
                using (Mat img = new Mat(fileName))

                    // Prepare input image
                    using (var blob = CvDnn.BlobFromImage(img, 1.0, new Size(InputWidth, InputHeight), new Scalar(123.68, 116.78, 103.94), true, false))
                    {
                        // Forward Pass
                        // Now that we have prepared the input, we will pass it through the network. There are two outputs of the network.
                        // One specifies the geometry of the Text-box and the other specifies the confidence score of the detected box.
                        // These are given by the layers :
                        //   feature_fusion/concat_3
                        //   feature_fusion/Conv_7/Sigmoid
                        var outputBlobNames = new string[] { "feature_fusion/Conv_7/Sigmoid", "feature_fusion/concat_3" };
                        var outputBlobs     = outputBlobNames.Select(_ => new Mat()).ToArray();

                        net.SetInput(blob);
                        net.Forward(outputBlobs, outputBlobNames);
                        Mat scores   = outputBlobs[0];
                        Mat geometry = outputBlobs[1];

                        // Decode predicted bounding boxes (decode the positions of the text boxes along with their orientation)
                        Decode(scores, geometry, ConfThreshold, out var boxes, out var confidences);

                        // Apply non-maximum suppression procedure for filtering out the false positives and get the final predictions
                        CvDnn.NMSBoxes(boxes, confidences, ConfThreshold, NmsThreshold, out var indices);

                        // Render detections.
                        Point2f ratio = new Point2f((float)img.Cols / InputWidth, (float)img.Rows / InputHeight);
                        for (var i = 0; i < indices.Length; ++i)
                        {
                            RotatedRect box = boxes[indices[i]];

                            Point2f[] vertices = box.Points();

                            for (int j = 0; j < 4; ++j)
                            {
                                vertices[j].X *= ratio.X;
                                vertices[j].Y *= ratio.Y;
                            }

                            for (int j = 0; j < 4; ++j)
                            {
                                Cv2.Line(img, (int)vertices[j].X, (int)vertices[j].Y, (int)vertices[(j + 1) % 4].X, (int)vertices[(j + 1) % 4].Y, new Scalar(0, 255, 0), 3);
                            }
                        }

                        ShowImagesWhenDebugMode(img);
                    }
        }
        public InferenceEngineDetector(string modelFile)
        {
            Net = CvDnn.ReadNet(Path.ChangeExtension(modelFile, ".bin"), Path.ChangeExtension(modelFile, ".xml"));
            Net.SetPreferableBackend(Net.Backend.INFERENCE_ENGINE);
            Net.SetPreferableTarget(Net.Target.OPENCL_FP16);
            //Net.SetPreferableTarget(Net.Target.OPENCL_FP16);
            //Net.SetPreferableBackend(Net.Backend.OPENCV);

            OutputMat = new Mat();
        }
Beispiel #4
0
        public InferenceEngineDetector(string modelFile, Net.Backend backend, Net.Target target)
        {
            Net = CvDnn.ReadNet(Path.ChangeExtension(modelFile, ".bin"), Path.ChangeExtension(modelFile, ".xml"));
            Net.SetPreferableTarget(target);
            Net.SetPreferableBackend(backend);
            Console.WriteLine($"Backend = '{backend}', Target = '{target}'");

            //Net.SetPreferableBackend(Net.Backend.INFERENCE_ENGINE);
            //Net.SetPreferableTarget(Net.Target.OPENCL_FP16);
            //Net.SetPreferableTarget(Net.Target.OPENCL_FP16);
            //Net.SetPreferableBackend(Net.Backend.OPENCV);

            OutputMat = new Mat();
        }
Beispiel #5
0
 public Sbd(string modelPath, Size inputSize, int outputWidth, int outputHeight, string[] classNames)
 {
     net = CvDnn.ReadNet(modelPath);
     if (net.Empty())
     {
         Console.WriteLine("fail to load model");
         return;
     }
     net.SetPreferableBackend(Net.Backend.OPENCV);
     net.SetPreferableTarget(Net.Target.CPU);
     this.inputSize    = inputSize;
     this.outputWidth  = outputWidth;
     this.outputHeight = outputHeight;
     this.classNames   = classNames;
     this.classCount   = classNames.Length;
 }
        public void Run()
        {
            const string modelFace    = "face-detection-adas-0001.bin";
            const string modelFaceTxt = "face-detection-adas-0001.xml";
            const string sampleImage  = "sample.jpg";
            const string outputLoc    = "sample_output.jpg";

            using var frame = Cv2.ImRead(sampleImage);
            int frameHeight = frame.Rows;
            int frameWidth  = frame.Cols;

            using var netFace = CvDnn.ReadNet(modelFace, modelFaceTxt);
            netFace.SetPreferableBackend(Backend.INFERENCE_ENGINE);
            netFace.SetPreferableTarget(Target.CPU);

            using var blob = CvDnn.BlobFromImage(frame, 1.0, new OpenCvSharp.Size(672, 384), new OpenCvSharp.Scalar(0, 0, 0), false, false);
            netFace.SetInput(blob);

            using (var detection = netFace.Forward())
            {
                Mat detectionMat = new Mat(detection.Size(2), detection.Size(3), MatType.CV_32F, detection.Ptr(0));

                for (int i = 0; i < detectionMat.Rows; i++)
                {
                    float confidence = detectionMat.At <float>(i, 2);

                    if (confidence > 0.7)
                    {
                        int x1 = (int)(detectionMat.At <float>(i, 3) * frameWidth);  //xmin
                        int y1 = (int)(detectionMat.At <float>(i, 4) * frameHeight); //ymin
                        int x2 = (int)(detectionMat.At <float>(i, 5) * frameWidth);  //xmax
                        int y2 = (int)(detectionMat.At <float>(i, 6) * frameHeight); //ymax

                        OpenCvSharp.Rect roi = new OpenCvSharp.Rect(x1, y1, (x2 - x1), (y2 - y1));
                        roi = AdjustBoundingBox(roi);
                        Cv2.Rectangle(frame, roi, new Scalar(0, 255, 0), 2, LineTypes.Link4);
                    }
                }
            }

            var finalOutput = outputLoc;

            Cv2.ImWrite(finalOutput, frame);
        }
Beispiel #7
0
        public void Load()
        {
            Assert.True(File.Exists(LocalModelPath), $"'{LocalModelPath}' not found");

            using var net = CvDnn.ReadNet(LocalModelPath);
        }
        public void Run()
        {
            const string modelFace    = "face-detection-adas-0001.bin";
            const string modelFaceTxt = "face-detection-adas-0001.xml";
            const string modelHead    = "head-pose-estimation-adas-0001.bin";
            const string modelHeadTxt = "head-pose-estimation-adas-0001.xml";
            const string sampleImage  = "sample.jpg";
            const string outputLoc    = "sample_output.jpg";

            var frame       = Cv2.ImRead(sampleImage);
            int frameHeight = frame.Rows;
            int frameWidth  = frame.Cols;
            var netFace     = CvDnn.ReadNet(modelFace, modelFaceTxt);
            var netHead     = CvDnn.ReadNet(modelHead, modelHeadTxt);

            netFace.SetPreferableBackend(Net.Backend.INFERENCE_ENGINE);
            netFace.SetPreferableTarget(Net.Target.CPU);
            netHead.SetPreferableBackend(Net.Backend.INFERENCE_ENGINE);
            netHead.SetPreferableTarget(Net.Target.CPU);

            var blob = CvDnn.BlobFromImage(frame, 1.0, new OpenCvSharp.Size(672, 384), new OpenCvSharp.Scalar(0, 0, 0), false, false);

            netFace.SetInput(blob);

            using (var detection = netFace.Forward())
            {
                Mat detectionMat = new Mat(detection.Size(2), detection.Size(3), MatType.CV_32F, detection.Ptr(0));
                for (int i = 0; i < detectionMat.Rows; i++)
                {
                    float confidence = detectionMat.At <float>(i, 2);

                    if (confidence > 0.7)
                    {
                        int x1 = (int)(detectionMat.At <float>(i, 3) * frameWidth);  //xmin
                        int y1 = (int)(detectionMat.At <float>(i, 4) * frameHeight); //ymin
                        int x2 = (int)(detectionMat.At <float>(i, 5) * frameWidth);  //xmax
                        int y2 = (int)(detectionMat.At <float>(i, 6) * frameHeight); //ymax

                        OpenCvSharp.Rect roi = new OpenCvSharp.Rect(x1, y1, (x2 - x1), (y2 - y1));
                        roi = AdjustBoundingBox(roi);
                        Mat face = new Mat(frame, roi);

                        var blob2 = CvDnn.BlobFromImage(face, 1.0, new OpenCvSharp.Size(60, 60), new OpenCvSharp.Scalar(0, 0, 0), false, false);
                        netHead.SetInput(blob2);

                        IEnumerable <string> outNames    = netHead.GetUnconnectedOutLayersNames();
                        IEnumerable <Mat>    outputBlobs = new List <Mat>()
                        {
                            new Mat(), new Mat(), new Mat()
                        };

                        netHead.Forward(outputBlobs, outNames);

                        Point3f headAngles  = HeadAngles(outputBlobs);
                        string  printAngles = "Yaw " + headAngles.Y.ToString() + " | Pitch " + headAngles.X.ToString() + " | Roll " + headAngles.Z.ToString();

                        Cv2.Rectangle(frame, roi, new Scalar(0, 255, 0), 2, LineTypes.Link4);
                        Cv2.PutText(frame, printAngles, new OpenCvSharp.Point(10, 500), HersheyFonts.HersheyComplex, 1.0, new Scalar(0, 255, 0), 2);
                    }
                }
            }

            var finalOutput = outputLoc;

            Cv2.ImWrite(finalOutput, frame);
        }
        private void Form1_Load(object sender, EventArgs e)
        {
            var model      = "opencv_face_detector_uint8.pb"; //tensorflow 학습된 모델 파일
            var config     = "opencv_face_detector.pbtxt";    // 구성 파일
            var confidence = 0.0;
            var x1         = 0.0;
            var x2         = 0.0;
            var y1         = 0.0;
            var y2         = 0.0;
            int cnt        = 0;

            video = new VideoCapture(1); //video 1번(ManyCam) 사용

            if (!video.IsOpened())       //video 초기화 안될 시 error
            {
                Console.WriteLine("error");
                return;
            }

            Net net = CvDnn.ReadNet(model, config);

            if (net.Empty())
            {
                Console.WriteLine("error");
                return;
            }

            frame = new Mat();

            while (true)
            {
                video.Read(frame);
                if (frame.Empty())
                {
                    break;
                }

                //frame  blob 입력 설정된 부분을 net
                Mat blob = CvDnn.BlobFromImage(frame, 1.0, new OpenCvSharp.Size(300, 300), new Scalar(104, 177, 123));
                net.SetInput(blob);
                Mat res = net.Forward();

                var detect = res.Reshape(1, res.Cols * res.Rows);

                for (int i = 0; i < detect.Rows; i++)     //Face recongition
                {
                    confidence = detect.At <float>(i, 2);
                    if (confidence < 0.5)
                    {
                        break;                       //Face recongition 0.5 이하는 무시
                    }
                    //get center and width/height
                    x1 = Math.Round(detect.At <float>(i, 3) * frame.Cols);   //좌표 계산
                    y1 = Math.Round(detect.At <float>(i, 4) * frame.Rows);
                    x2 = Math.Round(detect.At <float>(i, 5) * frame.Cols);
                    y2 = Math.Round(detect.At <float>(i, 6) * frame.Rows);

                    string Label = string.Format("Face:" + confidence);    // 1. Face recongition을 시각화하는 작업


                    //2. confidence > 0.955 greenbox 생성
                    if (confidence > 0.955)
                    {
                        Cv2.Rectangle(frame, new OpenCvSharp.Point(x1, y1), new OpenCvSharp.Point(x2, y2), new Scalar(0, 255, 0));
                        Cv2.PutText(frame, Label, new OpenCvSharp.Point(x1, y1 - 1), OpenCvSharp.HersheyFonts.Italic, 0.8, new Scalar(0, 255, 0));
                    }
                    //2. 아닐 시 redbox 생성
                    else
                    {
                        Cv2.Rectangle(frame, new OpenCvSharp.Point(x1, y1), new OpenCvSharp.Point(x2, y2), new Scalar(0, 0, 255));
                        Cv2.PutText(frame, Label, new OpenCvSharp.Point(x1, y1 - 1), OpenCvSharp.HersheyFonts.Italic, 0.8, new Scalar(0, 0, 255));
                    }
                }

                Cv2.ImShow("FACE ID", frame);

                if (confidence > 0.997910)
                {
                    cnt++;
                    if (cnt >= 30)    //30번 인식시 파일 save
                    {
                        _saveName = "C:/images/" + DateTime.Now.ToString("yyyy/MM/dd_hh_mm_ss") + ".jpeg";
                        Cv2.ImWrite(_saveName, frame);
                        video.Release();
                        Cv2.DestroyAllWindows();
                        break;
                    }
                }

                if (Cv2.WaitKey(1) == 27)    //아스키코드  27 : esc
                {
                    break;
                }
            }

            Delay(100);

            inCvImage = Cv2.ImRead(_saveName);     //2번
            Cv2.Transpose(inCvImage, inCvImage);

            outCvImage = new Mat();
            Cv2.CvtColor(inCvImage, outCvImage, ColorConversionCodes.BGR2GRAY);
            Cv2.EqualizeHist(outCvImage, outCvImage);

            Cv2ToOutImage();

            //사각박스 내부만 나오게 설정
            for (int rgb = 0; rgb < 3; rgb++)
            {
                for (int i = 0; i < _outH; i++)
                {
                    for (int k = 0; k < _outW; k++)
                    {
                        if ((x1 <= i && i <= x2) && (y1 <= k && k <= y2))
                        {
                            _outImage[rgb, i, k] = (byte)(255 - _outImage[rgb, i, k]);
                        }
                        else
                        {
                            _outImage[rgb, i, k] = 0;
                        }
                    }
                }
            }

            DisplayImage();

            connStr = "Server=127.0.0.1; Uid=root; Pwd=1234;Database=HYDB;CHARSET=UTF8";
            conn    = new MySqlConnection(connStr);
            //db로 전달
            dbUpload();
            CCTV ma = new CCTV();

            ma.ShowDialog();
            Close();
        } //1. 얼굴인식 후 저장 및 blob 업로드