public static void Detect(Mat image, List <Rectangle> faces) { Net net = DnnInvoke.ReadNetFromTensorflow(@"C:\models\opencv_face_detector_uint8.pb", @"C:\models\opencv_face_detector.pbtxt"); Mat inputBlob = DnnInvoke.BlobFromImage(image, 1.0, new Size(300, 300), new MCvScalar(104.0, 117.0, 123.0), true, false); net.SetInput(inputBlob, "data"); Mat detection = net.Forward("detection_out"); int resultRows = detection.SizeOfDimension[2]; int resultCols = detection.SizeOfDimension[3]; float[] temp = new float[resultRows * resultCols]; Marshal.Copy(detection.DataPointer, temp, 0, temp.Length); for (int i = 0; i < resultRows; i++) { float confidence = temp[i * resultCols + 2]; if (confidence > 0.7) { int x1 = (int)(temp[i * resultCols + 3] * image.Width); int y1 = (int)(temp[i * resultCols + 4] * image.Height); int x2 = (int)(temp[i * resultCols + 5] * image.Width); int y2 = (int)(temp[i * resultCols + 6] * image.Height); Rectangle rectangle = new Rectangle(x1, y1, x2 - x1, y2 - y1); faces.Add(rectangle); } } }
//////////////// /// 窗口操作 /// //////////////// public Form1() { InitializeComponent(); net = DnnInvoke.ReadNetFromTensorflow(@"Properties\frozen_inference_graph.pb", @"Properties\graph.pbtxt"); //载入神经网络 if (net == null) //如果载入失败 { throw new Exception("Error"); //报错 } Application.Idle += new EventHandler(delegate //于空闲状态时执行以下代码 { if (img != null) //如果摄像头有传入图像 { //则依次经过以下图像处理方法 img = ProcessImageUseDnn(img); // 1.DNN网络手部检测方法 if (showPoints) { img = DrawPoints(img); // 2.检测点绘制方法 } if (showKeyPoint) { img = DrawKeyPoint(img); // 3.关键点绘制方法 } imageBox1.Image = img; //最后输出图像到 imageBox1 } }); }
public DnnFaceDetector(IConfiguration configuration) { int.TryParse(configuration["Algos:Dnn:Height"], out _height); int.TryParse(configuration["Algos:Dnn:Width"], out _width); double.TryParse(configuration["Algos:Dnn:Probability"], out _probability); _net = DnnInvoke.ReadNetFromTensorflow(configuration["Algos:Dnn:BinFilePath"], configuration["Algos:Dnn:ParamFilePath"]); }
public CameraObjectDetectionWindow() { InitializeComponent(); frame = new Image <Bgr, byte>(resolutionX, resolutionY); net = DnnInvoke.ReadNetFromTensorflow(graphPath, configPath); labels = File.ReadAllLines(labelPath); }
/// <summary> /// 初始化臉部偵測物件 /// </summary> public FaceDetector() { string programPath = AppDomain.CurrentDomain.BaseDirectory; string modelsPath = Path.Combine(programPath, "models"); string detectionPbPath = Path.Combine(modelsPath, "opencv_face_detector_uint8.pb"); string detectionPbtxtPath = Path.Combine(modelsPath, "opencv_face_detector.pbtxt"); detectorModel = DnnInvoke.ReadNetFromTensorflow(detectionPbPath, detectionPbtxtPath); }
public DnnInfo(string modeFileName) { try { m_dnnModel = DnnInvoke.ReadNetFromTensorflow(modeFileName); UpdateInfo(); } catch (Exception ex) { m_dnnModel = null; } }
// Ищем лица по списку изображений (SSD) public List <int[][]> DetectFacesSDD(List <string> imagePaths) { List <int[][]> allFaces = new List <int[][]>() { }; int count = 0; // Ищем лица для каждого изображения foreach (var file in imagePaths) { List <int[]> faces = new List <int[]>(); int i = 0; using (Image <Bgr, byte> image = new Image <Bgr, byte>(file)) { int cols = image.Width; int rows = image.Height; Net net = DnnInvoke.ReadNetFromTensorflow(_modelFile, _configFile); net.SetInput(DnnInvoke.BlobFromImage(image.Mat, 1, new System.Drawing.Size(300, 300), default(MCvScalar), false, false)); Mat mat = net.Forward(); float[,,,] flt = (float[, , , ])mat.GetData(); for (int x = 0; x < flt.GetLength(2); x++) { if (flt[0, 0, x, 2] > 0.2) { int left = Convert.ToInt32(flt[0, 0, x, 3] * cols); int top = Convert.ToInt32(flt[0, 0, x, 4] * rows); int right = Convert.ToInt32(flt[0, 0, x, 5] * cols) - left; int bottom = Convert.ToInt32(flt[0, 0, x, 6] * rows) - top; int[] face = new[] { left, top, right, bottom }; faces.Add(face); i++; } } } allFaces.Add(faces.ToArray()); Console.WriteLine(count); count++; } return(allFaces); }