private static Net GetEmbedderFromCaffe(string protoFileName, string modelFileName) { var proto = Utils.GetResourceBytes(protoFileName); var model = Utils.GetResourceBytes(modelFileName); return(DnnInvoke.ReadNetFromCaffe(proto, model)); }
private static Net GetDetector() { var proto = Utils.GetResourceBytes("Models.deploy.prototxt"); var model = Utils.GetResourceBytes("Models.res10_300x300_ssd_iter_140000.caffemodel"); return(DnnInvoke.ReadNetFromCaffe(proto, model)); }
/// <summary> /// Download and initialize the DNN face detector /// </summary> /// <param name="onDownloadProgressChanged">Callback when download progress has been changed</param> /// <returns>Async task</returns> public async Task Init(System.Net.DownloadProgressChangedEventHandler onDownloadProgressChanged = null) { if (_faceDetectorNet == null) { FileDownloadManager manager = new FileDownloadManager(); manager.AddFile( "https://github.com/opencv/opencv_3rdparty/raw/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel", _modelFolderName, "2A56A11A57A4A295956B0660B4A3D76BBDCA2206C4961CEA8EFE7D95C7CB2F2D"); manager.AddFile( "https://raw.githubusercontent.com/opencv/opencv/4.0.1/samples/dnn/face_detector/deploy.prototxt", _modelFolderName, "F62621CAC923D6F37BD669298C428BB7EE72233B5F8C3389BB893E35EBBCF795"); if (onDownloadProgressChanged != null) { manager.OnDownloadProgressChanged += onDownloadProgressChanged; } await manager.Download(); if (manager.AllFilesDownloaded) { _faceDetectorNet = DnnInvoke.ReadNetFromCaffe(manager.Files[1].LocalFile, manager.Files[0].LocalFile); if (Emgu.CV.Cuda.CudaInvoke.HasCuda) { _faceDetectorNet.SetPreferableBackend(Emgu.CV.Dnn.Backend.Cuda); _faceDetectorNet.SetPreferableTarget(Emgu.CV.Dnn.Target.Cuda); } } } }
public DetectionModule(byte[] proto, byte[] caffeModel, string embeddingModel, double minConfidence) { detector = DnnInvoke.ReadNetFromCaffe(proto, caffeModel); //todo : is there a way to read embeddingModel from bytes? it's a torch model. embedder = DnnInvoke.ReadNet(embeddingModel); this.minConfidence = minConfidence; }
public async Task Init(System.Net.DownloadProgressChangedEventHandler onDownloadProgressChanged = null) { if (_faceDetector == null) { FileDownloadManager manager = new FileDownloadManager(); manager.AddFile( "https://github.com/opencv/opencv_3rdparty/raw/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel", _modelFolderName); manager.AddFile( "https://raw.githubusercontent.com/opencv/opencv/4.0.1/samples/dnn/face_detector/deploy.prototxt", _modelFolderName); if (onDownloadProgressChanged != null) { manager.OnDownloadProgressChanged += onDownloadProgressChanged; } await manager.Download(); _faceDetector = DnnInvoke.ReadNetFromCaffe(manager.Files[1].LocalFile, manager.Files[0].LocalFile); if (Emgu.CV.Cuda.CudaInvoke.HasCuda) { _faceDetector.SetPreferableBackend(Emgu.CV.Dnn.Backend.Cuda); _faceDetector.SetPreferableTarget(Emgu.CV.Dnn.Target.Cuda); } } }
/// <summary> /// Face detection. /// </summary> public FaceDetectionWindow() { InitializeComponent(); frame = new Image <Bgr, byte>(resolutionX, resolutionY); xRate = resolutionX / (float)detectionSize; yRate = resolutionY / (float)detectionSize; net = DnnInvoke.ReadNetFromCaffe(protoPath, caffemodelPath); }
private void InitFaceDetector() { if (_faceDetector == null) { InitPath(); String ssdFileLocal = DnnPage.DnnDownloadFile( "https://github.com/opencv/opencv_3rdparty/raw/dnn_samples_face_detector_20170830/", "res10_300x300_ssd_iter_140000.caffemodel", _path); String ssdProtoFileLocal = DnnPage.DnnDownloadFile( "https://raw.githubusercontent.com/opencv/opencv/4.0.1/samples/dnn/face_detector/", "deploy.prototxt", _path); _faceDetector = DnnInvoke.ReadNetFromCaffe(ssdProtoFileLocal, ssdFileLocal); } }
public Form1() { InitializeComponent(); try { eyes_detect = new CascadeClassifier(Path.GetFullPath("Models/haarcascade_eye.xml")); xRate = resolutionX / (float)detectionSize; yRate = resolutionY / (float)detectionSize; net = DnnInvoke.ReadNetFromCaffe(protoPath, caffemodelPath); this.Width = resolutionX; this.Height = resolutionY; camera = new VideoCapture(cameraIndex); camera.SetCaptureProperty(Emgu.CV.CvEnum.CapProp.FrameWidth, resolutionX); camera.SetCaptureProperty(Emgu.CV.CvEnum.CapProp.FrameHeight, resolutionY); //camera.FlipHorizontal = true; } catch (NullReferenceException ex) { throw; } }
private void PoseEstimationBody_25_Click(object sender, EventArgs e) { try { if (!IMGDict.ContainsKey("input")) { throw new Exception("Please read in image first."); } // for openopse int inWidth = 368; int inHeight = 368; float threshold = 0.1f; int nPoints = 25; var BODY_PARTS = new Dictionary <string, int>() { { "Nose", 0 }, { "Neck", 1 }, { "RShoulder", 2 }, { "RElbow", 3 }, { "RWrist", 4 }, { "LShoulder", 5 }, { "LElbow", 6 }, { "LWrist", 7 }, { "MidHip", 8 }, { "RHip", 9 }, { "RKnee", 10 }, { "RAnkle", 11 }, { "LHip", 12 }, { "LKnee", 13 }, { "LAnkle", 14 }, { "REye", 15 }, { "LEye", 16 }, { "REar", 17 }, { "LEar", 18 }, { "LBigToe", 19 }, { "LSmallToe", 20 }, { "LHeel", 21 }, { "RBigToe", 22 }, { "RSmallToe", 23 }, { "RHeel", 24 }, { "Background", 25 } }; int[,] point_pairs = new int[, ] { { 1, 0 }, { 1, 2 }, { 1, 5 }, { 2, 3 }, { 3, 4 }, { 5, 6 }, { 6, 7 }, { 0, 15 }, { 15, 17 }, { 0, 16 }, { 16, 18 }, { 1, 8 }, { 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 22 }, { 22, 23 }, { 11, 24 }, { 8, 12 }, { 12, 13 }, { 13, 14 }, { 14, 19 }, { 19, 20 }, { 14, 21 } }; // Load the caffe Model string prototxt = @"F:\openpose\models\pose\body_25\pose_deploy.prototxt"; string modelPath = @"F:\openpose\models\pose\body_25\pose_iter_584000.caffemodel"; var net = DnnInvoke.ReadNetFromCaffe(prototxt, modelPath); var img = IMGDict["input"].Clone(); var imgHeight = img.Height; var imgWidth = img.Width; var blob = DnnInvoke.BlobFromImage(img, 1.0 / 255.0, new Size(inWidth, inHeight), new MCvScalar(0, 0, 0)); net.SetInput(blob); net.SetPreferableBackend(Emgu.CV.Dnn.Backend.OpenCV); var output = net.Forward(); var H = output.SizeOfDimension[2]; var W = output.SizeOfDimension[3]; var HeatMap = output.GetData(); var points = new List <Point>(); for (int i = 0; i < nPoints; i++) { Matrix <float> matrix = new Matrix <float>(H, W); for (int row = 0; row < H; row++) { for (int col = 0; col < W; col++) { matrix[row, col] = (float)HeatMap.GetValue(0, i, row, col); } } double minVal = 0, maxVal = 0; Point minLoc = default, maxLoc = default;