/// <summary> /// Determines facial expression on media source. /// </summary> /// <param name="source">The source of the media to recognize facial expression for.</param> /// <param name="bound">The location bounding the face at the source.</param> /// <param name="config">The configuration used for expression recognition. This value can be null.</param> /// <returns>A task that represents the asynchronous recognition operation.</returns> /// <exception cref="ArgumentNullException"><paramref name="source"/> is null.</exception> /// <exception cref="ObjectDisposedException"> /// <paramref name="source"/> has already been disposed of.<br/> /// -or-<br/> /// <paramref name="config"/> has already been disposed of. /// </exception> /// <exception cref="NotSupportedException">The feature is not supported.</exception> /// <feature>http://tizen.org/feature/vision.face_recognition</feature> /// <since_tizen> 4 </since_tizen> public static async Task <FacialExpression> RecognizeFacialExpressionAsync(MediaVisionSource source, Rectangle bound, FaceRecognitionConfiguration config) { if (source == null) { throw new ArgumentNullException(nameof(source)); } TaskCompletionSource <FacialExpression> tcsResult = new TaskCompletionSource <FacialExpression>(); InteropFace.MvFaceFacialExpressionRecognizedCallback cb = (IntPtr sourceHandle, IntPtr engineCfgHandle, global::Interop.MediaVision.Rectangle faceLocation, FacialExpression facialExpression, IntPtr _) => { Log.Info(MediaVisionLog.Tag, $"Facial expression recognized, expression : {facialExpression}"); if (!tcsResult.TrySetResult(facialExpression)) { Log.Error(MediaVisionLog.Tag, "Failed to set facial result"); } }; using (var cbKeeper = ObjectKeeper.Get(cb)) { InteropFace.RecognizeFacialExpression(source.Handle, EngineConfiguration.GetHandle(config), bound.ToMarshalable(), cb). Validate("Failed to perform facial expression recognition."); return(await tcsResult.Task); } }
private static MediaVisionError InvokeRecognize(IntPtr sourceHandle, IntPtr modelHandle, IntPtr configHandle, InteropFace.RecognizedCallback cb, Rectangle?area) { if (area == null) { return(InteropFace.Recognize(sourceHandle, modelHandle, configHandle, IntPtr.Zero, cb)); } var rect = area.Value.ToMarshalable(); return(InteropFace.Recognize(sourceHandle, modelHandle, configHandle, ref rect, cb)); }
/// <summary> /// Detects faces on the source.<br/> /// Each time when DetectAsync is called, a set of the detected faces at the media source are received asynchronously. /// </summary> /// <param name="source">The source of the media where faces will be detected.</param> /// <param name="config">The configuration of engine will be used for detecting. This value can be null.</param> /// <returns>A task that represents the asynchronous detect operation.</returns> /// <exception cref="ArgumentNullException"><paramref name="source"/> is null.</exception> /// <exception cref="NotSupportedException">The feature is not supported.</exception> /// <feature>http://tizen.org/feature/vision.face_recognition</feature> /// <since_tizen> 4 </since_tizen> public static async Task <Rectangle[]> DetectAsync(MediaVisionSource source, FaceDetectionConfiguration config) { if (source == null) { throw new ArgumentNullException(nameof(source)); } TaskCompletionSource <Rectangle[]> tcs = new TaskCompletionSource <Rectangle[]>(); using (var cb = ObjectKeeper.Get(GetCallback(tcs))) { InteropFace.Detect(source.Handle, EngineConfiguration.GetHandle(config), cb.Target). Validate("Failed to perform face detection"); return(await tcs.Task); } }
/// <summary> /// Performs face tracking on the source with the trackingModel. /// </summary> /// <param name="source">The source of the media to recognize face for.</param> /// <param name="trackingModel">The model will be used for tracking.</param> /// <param name="doLearn">The value indicating whether model learning while tracking. If it is true, then the model will try to learn /// (if it supports learning feature), otherwise the model will be not learned during the invoking tracking iteration. /// Learning process improves tracking correctness, but can decrease tracking performance.</param> /// <returns>A task that represents the asynchronous tracking operation.</returns> /// <exception cref="ArgumentNullException"> /// <paramref name="source"/> is null.<br/> /// -or-<br/> /// <paramref name="trackingModel"/> is null. /// </exception> /// <exception cref="ObjectDisposedException"> /// <paramref name="source"/> has already been disposed of.<br/> /// -or-<br/> /// <paramref name="trackingModel"/> has already been disposed of. /// </exception> /// <exception cref="NotSupportedException">The feature is not supported.</exception> /// <exception cref="InvalidOperationException"><paramref name="trackingModel"/> is not prepared.</exception> /// <feature>http://tizen.org/feature/vision.face_recognition</feature> /// <since_tizen> 4 </since_tizen> public static async Task <FaceTrackingResult> TrackAsync(MediaVisionSource source, FaceTrackingModel trackingModel, bool doLearn) { if (source == null) { throw new ArgumentNullException(nameof(source)); } if (trackingModel == null) { throw new ArgumentNullException(nameof(trackingModel)); } TaskCompletionSource <FaceTrackingResult> tcs = new TaskCompletionSource <FaceTrackingResult>(); using (var cb = ObjectKeeper.Get(GetTrackCallback(tcs))) { InteropFace.Track(source.Handle, trackingModel.Handle, IntPtr.Zero, cb.Target, doLearn).Validate("Failed to perform face tracking."); return(await tcs.Task); } }