public void Run() { var capture = new VideoCapture(); capture.Set(CaptureProperty.FrameWidth, 640); capture.Set(CaptureProperty.FrameHeight, 480); capture.Open(-1); if (!capture.IsOpened()) throw new Exception("capture initialization failed"); var fs = FrameSource.CreateCameraSource(-1); var sr = SuperResolution.CreateBTVL1(); sr.SetInput(fs); using (var normalWindow = new Window("normal")) using (var srWindow = new Window("super resolution")) { var normalFrame = new Mat(); var srFrame = new Mat(); while (true) { capture.Read(normalFrame); sr.NextFrame(srFrame); if (normalFrame.Empty() || srFrame.Empty()) break; normalWindow.ShowImage(normalFrame); srWindow.ShowImage(srFrame); Cv2.WaitKey(100); } } }
public OpencvSource(string cam_or_url) { MAssert.Check(cam_or_url != string.Empty); // check if cam_or_url is number bool stream = false; for (int i = 0; i < cam_or_url.Length; ++i) { stream = stream || (cam_or_url[i] < '0') || (cam_or_url[i] > '9'); } if (stream) { // open stream Console.WriteLine("opening stream '{0}'", cam_or_url); capturer = new OpenCvSharp.VideoCapture(cam_or_url); } else { // convert to integer int cam_id = Convert.ToInt32(cam_or_url, 10); MAssert.Check(cam_id >= 0, "wrong webcam id"); // open vebcam Console.WriteLine("opening webcam {0}", cam_id); capturer = new OpenCvSharp.VideoCapture(cam_id); MAssert.Check(capturer.IsOpened(), "webcam not opened"); // set resolution capturer.Set(OpenCvSharp.CaptureProperty.FrameWidth, 1280); capturer.Set(OpenCvSharp.CaptureProperty.FrameHeight, 720); MAssert.Check(capturer.IsOpened(), "webcam not opened"); } // sometimes first few frames can be empty even if camera is good // so skip few frames OpenCvSharp.Mat frame; for (int i = 0; i < 10; ++i) { frame = capturer.RetrieveMat(); } // check first two frames OpenCvSharp.Mat image1 = new OpenCvSharp.Mat(), image2 = new OpenCvSharp.Mat(); capturer.Read(image1); capturer.Read(image2); Console.WriteLine("image1 size: {0}", image1.Size()); Console.WriteLine("image1 size: {0}", image2.Size()); MAssert.Check( !image1.Empty() && !image2.Empty() && image1.Size() == image2.Size() && image1.Type() == OpenCvSharp.MatType.CV_8UC3 && image2.Type() == OpenCvSharp.MatType.CV_8UC3, "error opening webcam or stream"); }
public void Run() { // Opens MP4 file (ffmpeg is probably needed) var capture = new VideoCapture(FilePath.Movie.Bach); int sleepTime = (int)Math.Round(1000 / capture.Fps); using (var window = new Window("capture")) { // Frame image buffer Mat image = new Mat(); // When the movie playback reaches end, Mat.data becomes NULL. while (true) { capture.Read(image); // same as cvQueryFrame if(image.Empty()) break; window.ShowImage(image); Cv2.WaitKey(sleepTime); } } }
static int Main(string[] args) { try { // print usage Console.WriteLine("Usage: dotnet csharp_video_recognition_demo.dll [OPTIONS] <video_source>..."); Console.WriteLine("Examples:"); Console.WriteLine(" Webcam: dotnet csharp_video_recognition_demo.dll --config_dir ../../../conf/facerec 0"); Console.WriteLine(" RTSP stream: dotnet csharp_video_recognition_demo.dll --config_dir ../../../conf/facerec rtsp://localhost:8554/"); Console.WriteLine(""); // parse arguments bool error = false; Options options = new Options(); CommandLine.Parser.Default.ParseArguments <Options>(args) .WithParsed <Options>(opts => options = opts) .WithNotParsed <Options>(errs => error = true); // exit if argument parsign error if (error) { return(1); } // print values of arguments Console.WriteLine("Arguments:"); foreach (var opt in options.GetType().GetProperties()) { if (opt.Name == "video_sources") { Console.Write("video sources = "); foreach (string vs in options.video_sources) { Console.Write(vs + " "); } Console.WriteLine(); } else { Console.WriteLine("--{0} = {1}", opt.Name, opt.GetValue(options, null)); } } Console.WriteLine("\n"); //parameters parse string config_dir = options.config_dir; string license_dir = options.license_dir; string database_dir = options.database_dir; string method_config = options.method_config; float recognition_distance_threshold = options.recognition_distance_threshold; float frame_fps_limit = options.frame_fps_limit; List <string> video_sources = new List <string>(options.video_sources); // check params MAssert.Check(config_dir != string.Empty, "Error! config_dir is empty."); MAssert.Check(database_dir != string.Empty, "Error! database_dir is empty."); MAssert.Check(method_config != string.Empty, "Error! method_config is empty."); MAssert.Check(recognition_distance_threshold > 0, "Error! Failed recognition distance threshold."); List <ImageAndDepthSource> sources = new List <ImageAndDepthSource>(); List <string> sources_names = new List <string>(); MAssert.Check(video_sources.Count > 0, "Error! video_sources is empty."); for (int i = 0; i < video_sources.Count; i++) { sources_names.Add(string.Format("OpenCvS source {0}", i)); sources.Add(new OpencvSource(video_sources[i])); } MAssert.Check(sources_names.Count == sources.Count); // print sources Console.WriteLine("\n{0} sources: ", sources.Count); for (int i = 0; i < sources_names.Count; ++i) { Console.WriteLine(" {0}", sources_names[i]); } Console.WriteLine(""); // create facerec servcie FacerecService service = FacerecService.createService( config_dir, license_dir); Console.WriteLine("Library version: {0}\n", service.getVersion()); // create database Recognizer recognizer = service.createRecognizer(method_config, true, false, false); Capturer capturer = service.createCapturer("common_capturer4_lbf_singleface.xml"); Database database = new Database( database_dir, recognizer, capturer, recognition_distance_threshold); recognizer.Dispose(); capturer.Dispose(); FacerecService.Config vw_config = new FacerecService.Config("video_worker_fdatracker_blf_fda.xml"); // vw_config.overrideParameter("single_match_mode", 1); vw_config.overrideParameter("search_k", 10); vw_config.overrideParameter("not_found_match_found_callback", 1); vw_config.overrideParameter("downscale_rawsamples_to_preferred_size", 0); //ActiveLiveness.CheckType[] checks = new ActiveLiveness.CheckType[3] //{ // ActiveLiveness.CheckType.BLINK, // ActiveLiveness.CheckType.TURN_RIGHT, // ActiveLiveness.CheckType.SMILE //}; // create one VideoWorker VideoWorker video_worker = service.createVideoWorker( new VideoWorker.Params() .video_worker_config(vw_config) .recognizer_ini_file(method_config) .streams_count(sources.Count) //.age_gender_estimation_threads_count(sources.Count) //.emotions_estimation_threads_count(sources.Count) //.active_liveness_checks_order(checks) .processing_threads_count(sources.Count) .matching_threads_count(sources.Count)); // set database video_worker.setDatabase(database.vwElements, Recognizer.SearchAccelerationType.SEARCH_ACCELERATION_1); for (int i = 0; i < sources_names.Count; ++i) { OpenCvSharp.Window window = new OpenCvSharp.Window(sources_names[i]); OpenCvSharp.Cv2.ImShow(sources_names[i], new OpenCvSharp.Mat(100, 100, OpenCvSharp.MatType.CV_8UC3, OpenCvSharp.Scalar.All(0))); } // prepare buffers for store drawed results Mutex draw_images_mutex = new Mutex(); List <OpenCvSharp.Mat> draw_images = new List <OpenCvSharp.Mat>(sources.Count); // create one worker per one source List <Worker> workers = new List <Worker>(); for (int i = 0; i < sources.Count; ++i) { draw_images.Add(new OpenCvSharp.Mat(100, 100, OpenCvSharp.MatType.CV_8UC3, OpenCvSharp.Scalar.All(0))); workers.Add(new Worker( database, video_worker, sources[i], i, // stream_id draw_images_mutex, draw_images[i], frame_fps_limit )); } // draw results until escape presssed for (;;) { { draw_images_mutex.WaitOne(); for (int i = 0; i < draw_images.Count; ++i) { OpenCvSharp.Mat drawed_im = workers[i]._draw_image; if (!drawed_im.Empty()) { OpenCvSharp.Cv2.ImShow(sources_names[i], drawed_im); draw_images[i] = new OpenCvSharp.Mat(); } } draw_images_mutex.ReleaseMutex(); } int key = OpenCvSharp.Cv2.WaitKey(20); if (27 == key) { foreach (Worker w in workers) { w.Dispose(); } break; } if (' ' == key) { Console.WriteLine("enable processing 0"); video_worker.enableProcessingOnStream(0); } if (13 == key) { Console.WriteLine("disable processing 0"); video_worker.disableProcessingOnStream(0); } if ('r' == key) { Console.WriteLine("reset trackerOnStream"); video_worker.resetTrackerOnStream(0); } // check exceptions in callbacks video_worker.checkExceptions(); } // force free resources // otherwise licence error may occur // when create sdk object in next time service.Dispose(); video_worker.Dispose(); } catch (Exception e) { Console.WriteLine("video_recognition_show exception catched: '{0}'", e.ToString()); return(1); } return(0); }
// create the database public Database( string databaseDirPath, Recognizer recognizer, Capturer capturer, float distanceThreshold) { vwElements = new List <VideoWorker.DatabaseElement>(); samples = new List <RawSample>(); thumbnails = new List <OpenCvSharp.Mat>(); names = new List <string>(); // check paths MAssert.Check(Directory.Exists(databaseDirPath), "database not found"); // get directory content List <string> path_l1 = new List <string>(Directory.EnumerateDirectories(databaseDirPath)); // check every element in that directory ulong element_id_counter = 0; for (int il1 = 0; il1 < path_l1.Count; ++il1) { // ignore files if (!Directory.Exists(path_l1[il1])) { continue; } // so path_l1[il1] is supposed to be the path to the person directory // get files inside i List <string> path_l2 = new List <string>(Directory.EnumerateFiles(path_l1[il1])); string name = string.Empty; // search for the name.txt file for (int il2 = 0; il2 < path_l2.Count; ++il2) { if (Path.GetFileName(path_l2[il2]) == "name.txt") { // put file content in the name using (StreamReader sr = new StreamReader(path_l2[il2])) { name = sr.ReadToEnd(); } } } // try to open each file as an image for (int il2 = 0; il2 < path_l2.Count; ++il2) { if (Path.GetFileName(path_l2[il2]) == "name.txt") { continue; } Console.WriteLine("processing '{0}' name: '{1}'", path_l2[il2], name); // read image with opencv OpenCvSharp.Mat readed_image = OpenCvSharp.Cv2.ImRead(path_l2[il2]); if (readed_image.Empty() || readed_image.Type() != OpenCvSharp.MatType.CV_8UC3) { Console.WriteLine("\n\nWARNING: can't read image '{0}'\n\n", path_l2[il2]); continue; } byte[] data = new byte[readed_image.Total() * readed_image.Type().Channels]; Marshal.Copy(readed_image.DataStart, data, 0, (int)data.Length); RawImage image = new RawImage(readed_image.Width, readed_image.Height, RawImage.Format.FORMAT_BGR, data); // capture the face List <RawSample> capturedSamples = capturer.capture(image); if (capturedSamples.Count != 1) { Console.WriteLine("\n\nWARNING: detected {0} faces on '{1}' image instead of one, image ignored \n\n", capturedSamples.Count, path_l2[il2]); continue; } RawSample sample = capturedSamples[0]; // make template Template templ = recognizer.processing(sample); // prepare data for VideoWorker VideoWorker.DatabaseElement vwElement = new VideoWorker.DatabaseElement(element_id_counter++, (ulong)il1, templ, distanceThreshold); vwElements.Add(vwElement); samples.Add(sample); thumbnails.Add(makeThumbnail(sample, name)); names.Add(name); } } MAssert.Check((int)element_id_counter == vwElements.Count); MAssert.Check((int)element_id_counter == samples.Count); MAssert.Check((int)element_id_counter == thumbnails.Count); MAssert.Check((int)element_id_counter == names.Count); }
// make a thumbnail of a sample public static OpenCvSharp.Mat makeThumbnail( RawSample sample, string name = "") { int thumbnail_size = Worker.thumbnail_size; // buffer for the cutted image MemoryStream stream = new MemoryStream(); // make a cut in bmp format // so we don't waste time for encode/decode image // just copying it few times, which is irrelevant sample.cutFaceImage( stream, RawSample.ImageFormat.IMAGE_FORMAT_BMP, RawSample.FaceCutType.FACE_CUT_BASE); OpenCvSharp.Mat temp = OpenCvSharp.Mat.ImDecode(stream.ToArray(), OpenCvSharp.ImreadModes.Color); // so we got an image // check it MAssert.Check(!temp.Empty()); MAssert.Check(temp.Type() == OpenCvSharp.MatType.CV_8UC3); // and resize to the thumbnail_size OpenCvSharp.Rect resRect; if (temp.Rows >= temp.Cols) { resRect.Height = thumbnail_size; resRect.Width = temp.Cols * thumbnail_size / temp.Rows; } else { resRect.Width = thumbnail_size; resRect.Height = temp.Rows * thumbnail_size / temp.Cols; } resRect.X = (thumbnail_size - resRect.Width) / 2; resRect.Y = (thumbnail_size - resRect.Height) / 2; OpenCvSharp.Mat result = new OpenCvSharp.Mat( thumbnail_size, thumbnail_size, OpenCvSharp.MatType.CV_8UC3, OpenCvSharp.Scalar.All(0)); OpenCvSharp.Cv2.Resize( temp, result[resRect], resRect.Size); if (!string.IsNullOrEmpty(name)) { result[new OpenCvSharp.Rect(0, result.Rows - 27, result.Cols, 27)] = result.RowRange(result.Rows - 27, result.Rows) * 0.5f; OpenCvSharp.Cv2.PutText( result, name, new OpenCvSharp.Point(0, result.Rows - 7), OpenCvSharp.HersheyFonts.HersheyDuplex, 0.7, OpenCvSharp.Scalar.All(255), 1, OpenCvSharp.LineTypes.AntiAlias); } return(result); }
public static int Main(string[] args) { // Parse input arguments string videoPath, facePath, studyPath, output; int parseResult = ParseArgs(args, out videoPath, out facePath, out studyPath, out output); if (parseResult != 2) { return(parseResult); } // Create a Factory object var factory = new Dfx.Sdk.Factory(); Console.WriteLine($"Created DFX Factory: {factory.Version}"); // Initialize a study if (!factory.InitializeStudyFromFile(studyPath)) { Console.WriteLine($"DFX study initialization failed: {factory.LastErrorMessage}"); return(1); } Console.WriteLine($"Created study from {studyPath}"); // Create a collector var collector = factory.CreateCollector(); if (collector.CurrentState == Dfx.Sdk.Collector.State.ERROR) { Console.WriteLine($"Collector creation failed: {collector.LastErrorMessage}"); Console.ReadKey(); return(1); } Console.WriteLine("Created collector"); // Load the face tracking data var jsonFaces = LoadJsonFaces(facePath); // Load video file (or stream of images) var videocap = Cv.VideoCapture.FromFile(videoPath); var videoFileName = Path.GetFileName(videoPath); // Set target FPS and chunk duration double targetFps = videocap.Get(Cv.CaptureProperty.Fps); double videoFrameCount = videocap.Get(Cv.CaptureProperty.FrameCount); const int chunkDuration_s = 5; const int KLUDGE = 1; double chunkFrameCount = Math.Ceiling(chunkDuration_s * targetFps + KLUDGE); ulong numberChunks = (ulong)Math.Ceiling(videoFrameCount / chunkFrameCount); // Ask more chunks then needed double durationOfOneFrame_ns = 1000_000_000.0 / targetFps; collector.TargetFps = (float)targetFps; collector.ChunkDuration = chunkDuration_s; collector.NumberChunks = numberChunks; Console.WriteLine($" mode: {factory.Mode}"); Console.WriteLine($" number chunks: {collector.NumberChunks}"); Console.WriteLine($" chunk duration: {collector.ChunkDuration}"); foreach (var constraint in collector.GetEnabledConstraints()) { Console.WriteLine($" enabled constraint: {constraint}"); } // Start collection collector.StartCollection(); // Start reading frames and adding to collector uint frameNumber = 0; bool success = false; using (var window = new Cv.Window("capture")) { Cv.Mat image = new Cv.Mat(); while (true) { bool ret = videocap.Read(image); if (!ret || image.Empty()) { // Video ended, so grab what should be the last, possibly truncated chunk var chunkData = collector.ChunkData; if (chunkData != null) { var chunkPayload = chunkData.Payload; //if (output != null) // savePayload(chunkPayload, output); Console.WriteLine($"Got chunk with {chunkPayload}"); } else { Console.WriteLine("Got empty chunk"); } success = true; break; } // Create a Dfx VideoFrame using (Dfx.Sdk.VideoFrame videoFrame = new Dfx.Sdk.VideoFrame((ushort)image.Rows, (ushort)image.Cols, Dfx.Sdk.PixelType.TYPE_8UC3, image.Channels() * image.Cols, image.Data, Dfx.Sdk.ChannelOrder.BGR, (ulong)(frameNumber * durationOfOneFrame_ns), frameNumber)) { frameNumber++; // Create a Dfx Frame from the VideoFrame var frame = collector.CreateFrame(videoFrame); // Add the Dfx Face to the Dfx Frame var jsonFace = jsonFaces[frameNumber.ToString()]; var face = new Dfx.Sdk.Face((string)jsonFace["id"]); face.PoseValid = (bool)jsonFace["poseValid"]; face.Detected = (bool)jsonFace["detected"]; face.SetRect((ushort)jsonFace["rect.x"], (ushort)jsonFace["rect.y"], (ushort)jsonFace["rect.w"], (ushort)jsonFace["rect.h"]); foreach (JProperty entry in jsonFace["points"]) { face.AddPosePoint(entry.Name, new Dfx.Sdk.PosePoint((float)entry.Value["x"], (float)entry.Value["y"], 0, (bool)entry.Value["valid"], (bool)entry.Value["estimated"], (float)entry.Value["quality"])); } frame.AddFace(face); // Add a marker to the 1000th dfx_frame if (frameNumber == 1000) { frame.AddMarker("This is the 1000th frame"); } // Do the extraction collector.DefineRegions(frame); var result = collector.ExtractChannels(frame); // Grab a chunk and check if we are finished if (result == Dfx.Sdk.Collector.State.CHUNKREADY || result == Dfx.Sdk.Collector.State.COMPLETED) { var chunkData = collector.ChunkData; if (chunkData != null) { var chunkPayload = chunkData.Payload; //if (output != null) // savePayload(chunkPayload, output); Console.WriteLine($"Got chunk with {chunkPayload}"); } else { Console.WriteLine("Got empty chunk"); } if (result == Dfx.Sdk.Collector.State.COMPLETED) { Console.WriteLine($"{nameof(Dfx.Sdk.Collector.State.COMPLETED)} at frame {frameNumber}"); success = true; break; } } // Render if (true) { foreach (var faceID in frame.FaceIdentifiers) { foreach (var regionID in frame.GetRegionNames(faceID)) { if (frame.GetRegionIntProperty(faceID, regionID, "draw") != 0) { var dfxpolygon = frame.GetRegionPolygon(faceID, regionID); var cvpolygon = new List <Cv.Point>(); foreach (var point in dfxpolygon) { cvpolygon.Add(new Cv.Point(point.X, point.Y)); } var cvpolygons = new List <List <Cv.Point> >(); cvpolygons.Add(cvpolygon); Cv.Cv2.Polylines(image, cvpolygons, isClosed: true, color: Cv.Scalar.Cyan, thickness: 1, lineType: Cv.LineTypes.AntiAlias); } } } string msg = $"Extracting from {videoFileName} - frame {frameNumber} of {videoFrameCount}"; Cv.Cv2.PutText(image, msg, org: new Cv.Point(10, 30), fontFace: Cv.HersheyFonts.HersheyPlain, fontScale: 1, color: Cv.Scalar.Black, thickness: 1, lineType: Cv.LineTypes.AntiAlias); window.ShowImage(image); if (Cv.Cv2.WaitKey(1) == 'q') { success = false; break; } } } } } if (success) { Console.WriteLine("Collection finished completely. Press any key to exit..."); } else { Console.WriteLine("Collection interrupted or failed. Press any key to exit..."); } // When everything done, release the capture videocap.Release(); Console.ReadKey(); return(0); }
public void Run() { using (var capture = new VideoCapture(FilePath.Movie.Bach)) using (var mog = BackgroundSubtractorMOG.Create()) using (var windowSrc = new Window("src")) using (var windowDst = new Window("dst")) { var frame = new Mat(); var fg = new Mat(); while (true) { capture.Read(frame); if(frame.Empty()) break; mog.Run(frame, fg, 0.01); windowSrc.Image = frame; windowDst.Image = fg; Cv2.WaitKey(50); } } }
public void Run() { const string OutVideoFile = "out.avi"; // Opens MP4 file (ffmpeg is probably needed) VideoCapture capture = new VideoCapture(FilePath.Movie.Bach); // Read movie frames and write them to VideoWriter Size dsize = new Size(640, 480); using (VideoWriter writer = new VideoWriter(OutVideoFile, -1, capture.Fps, dsize)) { Console.WriteLine("Converting each movie frames..."); Mat frame = new Mat(); while(true) { // Read image capture.Read(frame); if(frame.Empty()) break; Console.CursorLeft = 0; Console.Write("{0} / {1}", capture.PosFrames, capture.FrameCount); // grayscale -> canny -> resize Mat gray = new Mat(); Mat canny = new Mat(); Mat dst = new Mat(); Cv2.CvtColor(frame, gray, ColorConversionCodes.BGR2GRAY); Cv2.Canny(gray, canny, 100, 180); Cv2.Resize(canny, dst, dsize, 0, 0, InterpolationFlags.Linear); // Write mat to VideoWriter writer.Write(dst); } Console.WriteLine(); } // Watch result movie using (VideoCapture capture2 = new VideoCapture(OutVideoFile)) using (Window window = new Window("result")) { int sleepTime = (int)(1000 / capture.Fps); Mat frame = new Mat(); while (true) { capture2.Read(frame); if(frame.Empty()) break; window.ShowImage(frame); Cv2.WaitKey(sleepTime); } } }