コード例 #1
0
        private void Init()
        {
            _traking   = LocatorFactory.GetTrackingLocator(AppId, FtKey) as FaceTracking;
            _detection = LocatorFactory.GetDetectionLocator(AppId, FdKey) as FaceDetection;
            _recognize = new FaceRecognize(AppId, FrKey);
            _processor = new FaceProcessor(_traking, _recognize);

            //init cache
            if (Directory.Exists(FaceLibraryPath))
            {
                var files = Directory.GetFiles(FaceLibraryPath);
                foreach (var file in files)
                {
                    var info = new FileInfo(file);
                    _cache.Add(info.Name.Replace(info.Extension, ""), File.ReadAllBytes(file));
                }
            }

            CvInvoke.UseOpenCL = false;

            _capture = GetWebCamera();
            _capture.Start();

            //Application.Idle += VideoCaptured; //可以采用此方式捕获视频,则无需单独开线程
            //_capture.ImageGrabbed += VideoCaptured; //不要采用此方式
            _run = new Task(VideoCaptured);
            _run.Start();
        }
コード例 #2
0
        /// <summary>
        ///     获取面部检测
        /// </summary>
        /// <param name="appId">应用ID</param>
        /// <param name="sdkKey">应用Key</param>
        /// <param name="useTracking">如果设置为True,则启用面部跟踪,如果False,则启用面部检测</param>
        /// <param name="preAllocMemSize">缓存区内存大小(byte)</param>
        /// <param name="orientPriority">期望的脸部检测角度范围</param>
        /// <param name="scale">
        ///     用于数值表示的最小人脸尺寸有效值范围[2,50] 推荐值 16(面部跟踪的取值范围为[2,16])。该尺寸是人脸相对于所在图片的长边的占比。例如,如果用户想检测到的最小人脸尺寸是图片长度的
        ///     1/8,那么这个 nScale 就应该设置为 8
        /// </param>
        /// <param name="maxFaceNumber">用户期望引擎最多能检测出的人脸数有效值范围[1,50](面部跟踪的取值范围为[1,20])</param>
        /// <returns></returns>
        public static FaceLocator GetLocator(string appId, string sdkKey, bool useTracking = true,
                                             int preAllocMemSize           = 41943040,
                                             OrientPriority orientPriority = OrientPriority.OrientHigherExt,
                                             int scale = 16, int maxFaceNumber = 10)
        {
            FaceLocator locator;

            if (useTracking)
            {
                locator = new FaceTracking(appId, sdkKey, false, preAllocMemSize);
            }
            else
            {
                locator = new FaceDetection(appId, sdkKey, false, preAllocMemSize);
            }

            var code = locator.Initialize(orientPriority, scale, maxFaceNumber);

            if (code != ErrorCode.Ok)
            {
                throw new FaceException(code);
            }

            return(locator);
        }
コード例 #3
0
        // -----------------------------------------------------------------------
        // KEY SAMPLE CODE ENDS HERE
        // -----------------------------------------------------------------------

        /// <summary>
        /// This method parses the JSON output, and converts to a sequence of time frames with highlight regions.  One highlight region reprensents a tracked face in the frame.
        /// </summary>
        /// <param name="json">JSON output of face tracking result.</param>
        /// <returns>Sequence of time frames with highlight regions.</returns>
        private static IEnumerable <FrameHighlight> GetHighlights(string json)
        {
            FaceTracking faceTrackingResult = Helpers.FromJson <FaceTracking>(json);

            if (faceTrackingResult.FacesDetected == null)
            {
                yield break;
            }

            float timescale = (float)faceTrackingResult.Timescale;

            Rect invisibleRect = new Rect(new Point(0, 0), new Size(0, 0));  // Uses this rectangle if a specific face is not showing in one frame

            foreach (Fragment <FaceEvent> fragment in faceTrackingResult.Fragments)
            {
                FaceEvent[][] events = fragment.Events;
                if (events == null || events.Length == 0)
                {
                    // If 'Events' is empty, there isn't any face detected in this fragment
                    Rect[] rects = new Rect[faceTrackingResult.FacesDetected.Length];
                    for (int i = 0; i < rects.Length; i++)
                    {
                        rects[i] = invisibleRect;
                    }

                    yield return(new FrameHighlight()
                    {
                        Time = fragment.Start / timescale, HighlightRects = rects
                    });
                }
                else
                {
                    long interval = fragment.Interval.GetValueOrDefault();
                    long start    = fragment.Start;
                    int  i        = 0;
                    foreach (FaceEvent[] evt in events)
                    {
                        Rect[] rects = faceTrackingResult.FacesDetected.Select(face =>
                        {
                            FaceEvent faceRect = evt.FirstOrDefault(x => x.Id == face.FaceId);
                            if (faceRect == null)
                            {
                                return(invisibleRect);
                            }

                            // Creates highlight region at the location of the tracked face
                            return(new Rect(new Point(faceRect.X, faceRect.Y), new Size(faceRect.Width, faceRect.Height)));
                        }).ToArray();

                        yield return(new FrameHighlight()
                        {
                            Time = (start + interval * i) / timescale, HighlightRects = rects
                        });

                        i++;
                    }
                }
            }
        }
コード例 #4
0
 void Start()
 {
     R_shoulder = Bip_R_UpperArm.GetComponentInParent <Transform>();
     L_shoulder = Bip_L_UpperArm.GetComponentInParent <Transform>();
     neck       = Bip_C_Head.GetComponentInParent <Transform>();
     body       = GetComponent <BodyTracking>();
     tracking   = gameObject.GetComponent <FaceTrack.FaceTracking>();
 }
コード例 #5
0
        private void Init()
        {
            _age       = new FaceAge(AppId, AgeKey);
            _gender    = new FaceGender(AppId, GenderKey);
            _traking   = LocatorFactory.GetTrackingLocator(AppId, FtKey, _age, _gender) as FaceTracking;
            _detection = LocatorFactory.GetDetectionLocator(AppId, FdKey) as FaceDetection;
            _recognize = new FaceRecognize(AppId, FrKey);
            _processor = new FaceProcessor(_traking, _recognize);

            //init cache
            if (Directory.Exists(FaceLibraryPath))
            {
                var files = Directory.GetFiles(FaceLibraryPath);
                foreach (var file in files)
                {
                    var info = new FileInfo(file);
                    _cache.Add(info.Name.Replace(info.Extension, ""), File.ReadAllBytes(file));
                }
            }

            stride     = width * pixelSize;
            bufferSize = stride * height;

            _pImage = Marshal.AllocHGlobal(bufferSize);
            _image  = new Bitmap(width, height, stride, PixelFormat.Format24bppRgb, _pImage);

            var ffmpeg = new FFMpegConverter();

            outputStream = new MemoryStream();

            var setting =
                new ConvertSettings
            {
                CustomOutputArgs = "-s 1920x1080", //根据业务需求-r参数可以调整,取决于摄像机的FPS
            };                                     //-s 1920x1080 -q:v 2 -b:v 64k

            //-an -r 15 -pix_fmt bgr24 -updatefirst 1
            //task = ffmpeg.ConvertLiveMedia("rtsp://*****:*****@192.168.1.64:554/h264/ch1/main/av_stream", null,
            //    outputStream, Format.raw_video, setting);

            /*
             * USB摄像头捕获
             * 通过ffmpeg可以捕获USB摄像,如下代码所示。
             * 首先通过:ffmpeg -list_devices true -f dshow -i dummy命令,可以列出系统中存在的USB摄像设备(或通过控制面板的设备管理工具查看设备名称),例如在我电脑中叫USB2.0 PC CAMERA。
             * 然后根据捕获的分辨率,修改视频图形信息,包括width和height,一般像素大小不用修改,如果要参考设备支持的分辨率,可以使用:
             * ffmpeg -list_options true -f dshow -i video="USB2.0 PC CAMERA"命令
             */
            task = ffmpeg.ConvertLiveMedia("video=Logitech HD Webcam C270", "dshow",
                                           outputStream, Format.raw_video, setting);

            task.OutputDataReceived += DataReceived;
            task.Start();

            _renderTask = new Task(Render);
            _renderTask.Start();
        }
コード例 #6
0
        public FaceDetectionService()
        {
            _age    = new FaceAge(AppConfigurations.AppId, AppConfigurations.AgeKey);        // 年龄识别
            _gender = new FaceGender(AppConfigurations.AppId, AppConfigurations.GenderKey);  // 性别识别
            //// 图片检测人脸
            _detection = LocatorFactory.GetDetectionLocator(AppConfigurations.AppId, AppConfigurations.FdKey, _age, _gender) as FaceDetection;
            _traking   = LocatorFactory.GetTrackingLocator(AppConfigurations.AppId, AppConfigurations.FtKey, _age, _gender) as FaceTracking;

            _recognize = new FaceRecognize(AppConfigurations.AppId, AppConfigurations.FrKey);

            _processor            = new FaceProcessor(_detection, _recognize);
            _personFaceRepository = new PersonFaceRepository();
        }
コード例 #7
0
ファイル: Eyes.cs プロジェクト: robbietherobot/robbie
        /// <summary>
        /// Constructs a new eyes object.
        /// </summary>
        /// <param name="visionPreview">A capture element that is placed on a canvas used for capturing what Robbie sees.</param>
        /// <param name="previewCanvas">A canvas element used for rendering the image preview showing what Robbie sees.</param>
        public Eyes(CaptureElement visionPreview, Canvas previewCanvas)
        {
            Camera.Instance.Initialize(visionPreview);
            this.previewCanvas = previewCanvas;

            faceTracking   = new FaceTracking();
            faceDetection  = new FaceDetection();
            computerVision = new ComputerVision();

            identityInterpolation = new IdentityInterpolation();
            visualization         = new Vision();
            panTilt     = new PanTilt();
            eyesDisplay = new EyesDisplay();

            identityInterpolation.LargestFaceChanged += IdentityInterpolation_LargestFaceChanged;

            // fire up the continuous tasks of processing video and controlling the servos
            ThreadPoolTimer.CreatePeriodicTimer(ProcessCurrentVideoFrame_Delegate, TimeSpan.FromMilliseconds(125)); // 8 fps
            ThreadPoolTimer.CreatePeriodicTimer(UpdatePanTiltPosition_Delegate, TimeSpan.FromMilliseconds(25));     // 40 fps
            ThreadPoolTimer.CreatePeriodicTimer(Blink_Delegate, TimeSpan.FromMilliseconds(5000));                   // 12 fpm
        }
コード例 #8
0
ファイル: Main.cs プロジェクト: vebin/FaceRecognization
        private void Init()
        {
            _traking   = LocatorFactory.GetTrackingLocator(AppId, FtKey) as FaceTracking;
            _detection = LocatorFactory.GetDetectionLocator(AppId, FdKey) as FaceDetection;
            _recognize = new FaceRecognize(AppId, FrKey);
            _processor = new FaceProcessor(_traking, _recognize);

            //init cache
            if (Directory.Exists(FaceLibraryPath))
            {
                var files = Directory.GetFiles(FaceLibraryPath);
                foreach (var file in files)
                {
                    var info = new FileInfo(file);
                    _cache.Add(info.Name.Replace(info.Extension, ""), File.ReadAllBytes(file));
                }
            }

            _pImage = Marshal.AllocHGlobal(1920 * 1080 * 3);
            _image  = new Bitmap(1920, 1080, 1920 * 3, PixelFormat.Format24bppRgb, _pImage);

            var ffmpeg = new FFMpegConverter();

            outputStream = new MemoryStream();

            var setting =
                new ConvertSettings
            {
                CustomOutputArgs = "-an -r 15 -pix_fmt bgr24 -updatefirst 1" //根据业务需求-r参数可以调整,取决于摄像机的FPS
            };                                                               //-s 1920x1080 -q:v 2 -b:v 64k

            task = ffmpeg.ConvertLiveMedia("rtsp://*****:*****@192.168.1.64:554/h264/ch1/main/av_stream", null,
                                           outputStream, Format.raw_video, setting);

            task.OutputDataReceived += DataReceived;
            task.Start();

            _renderTask = new Task(Render);
            _renderTask.Start();
        }
コード例 #9
0
 public SendFace2VMCProtocolToPerformer(FaceTracking tracker, uOscClient client)
 {
     cli  = client;
     face = tracker;
 }
コード例 #10
0
 public SendBoth2VMCProtocolToPerformer(BodyTracking BodyTracker, FaceTracking faceTracker, uOscClient client)
 {
     body = BodyTracker;
     face = faceTracker;
     cli  = client;
 }
コード例 #11
0
ファイル: MainForm.cs プロジェクト: modulexcite/FRESHER
        private void DoTracking()
        {
            var ft = new FaceTracking(this);
            ft.SimplePipeline();
            Invoke(new DoTrackingCompleted(() =>
            {
                foreach (CheckBox moduleCheckBox in m_modulesCheckBoxes)
                {
                    moduleCheckBox.Enabled = true;
                }
                Start.Enabled = true;
                Stop.Enabled = false;
                MainMenu.Enabled = true;

                Mirror.Enabled = true;
                NumDetectionText.Enabled = true;
                NumLandmarksText.Enabled = true;
                NumPoseText.Enabled = true;
                NumExpressionsText.Enabled = true;

                RegisterUser.Enabled = false;
                UnregisterUser.Enabled = false;

                if (m_closing) Close();
            }));
        }