Exemple #1
0
        private static async Task <FaceRecognitionResult> InvokeRecognizeAsync(MediaVisionSource source,
                                                                               FaceRecognitionModel recognitionModel, Rectangle?area,
                                                                               FaceRecognitionConfiguration config)
        {
            if (source == null)
            {
                throw new ArgumentNullException(nameof(source));
            }
            if (recognitionModel == null)
            {
                throw new ArgumentNullException(nameof(recognitionModel));
            }

            TaskCompletionSource <FaceRecognitionResult> tcs = new TaskCompletionSource <FaceRecognitionResult>();

            using (var cb = ObjectKeeper.Get(GetRecognizedCallback(tcs)))
            {
                InvokeRecognize(source.Handle, recognitionModel.Handle, EngineConfiguration.GetHandle(config),
                                cb.Target, area).Validate("Failed to perform face recognition.");

                return(await tcs.Task);
            }
        }
 /// <summary>
 /// Learns the face recognition model with <see cref="FaceRecognitionConfiguration"/>.
 /// </summary>
 /// <remarks>
 /// Before you start the learning process, face recognition models have to be filled with the training data - face image examples.
 /// These examples have to be provided by <see cref="Add(MediaVisionSource, int)"/> or <see cref="Add(MediaVisionSource, int, Rectangle)"/>.
 /// Recognition accuracy is usually increased when the different examples of the identical faces are added more and more.
 /// But it depends on the used learning algorithm.
 /// </remarks>
 /// <param name="config">The configuration used for learning of the recognition models. This value can be null.</param>
 /// <exception cref="ObjectDisposedException">
 ///     The <see cref="FaceRecognitionModel"/> has already been disposed of.<br/>
 ///     -or-<br/>
 ///     <paramref name="config"/> has already been disposed of.
 /// </exception>
 /// <exception cref="InvalidOperationException">No examples added.</exception>
 /// <seealso cref="Add(MediaVisionSource, int)"/>
 /// <seealso cref="Add(MediaVisionSource, int, Rectangle)"/>
 /// <since_tizen> 4 </since_tizen>
 public void Learn(FaceRecognitionConfiguration config)
 {
     InteropModel.Learn(EngineConfiguration.GetHandle(config), Handle).
     Validate("Failed to learn");
 }
Exemple #3
0
 /// <summary>
 /// Performs face recognition on the source with <see cref="FaceRecognitionModel"/> and <see cref="FaceRecognitionConfiguration"/>.
 /// </summary>
 /// <param name="source">The <see cref="MediaVisionSource"/> of the media to recognize faces for.</param>
 /// <param name="recognitionModel">The <see cref="FaceRecognitionModel"/> to be used for recognition.</param>
 /// <param name="config">The configuration used for recognition. This value can be null.</param>
 /// <returns>A task that represents the asynchronous recognition operation.</returns>
 /// <exception cref="ArgumentNullException">
 ///     <paramref name="source"/> is null.<br/>
 ///     -or-<br/>
 ///     <paramref name="recognitionModel"/> is null.
 /// </exception>
 /// <exception cref="NotSupportedException">The feature is not supported.</exception>
 /// <exception cref="ObjectDisposedException">
 ///     <paramref name="source"/> has already been disposed of.<br/>
 ///     -or-<br/>
 ///     <paramref name="config"/> has already been disposed of.
 /// </exception>
 /// <exception cref="InvalidOperationException"><paramref name="recognitionModel"/> is untrained model.</exception>
 /// <feature>http://tizen.org/feature/vision.face_recognition</feature>
 /// <since_tizen> 4 </since_tizen>
 public static async Task <FaceRecognitionResult> RecognizeAsync(MediaVisionSource source,
                                                                 FaceRecognitionModel recognitionModel, FaceRecognitionConfiguration config)
 {
     return(await InvokeRecognizeAsync(source, recognitionModel, null, config));
 }
Exemple #4
0
        /// <summary>
        /// Determines facial expression on media source.
        /// </summary>
        /// <param name="source">The source of the media to recognize facial expression for.</param>
        /// <param name="bound">The location bounding the face at the source.</param>
        /// <param name="config">The configuration used for expression recognition. This value can be null.</param>
        /// <returns>A task that represents the asynchronous recognition operation.</returns>
        /// <exception cref="ArgumentNullException"><paramref name="source"/> is null.</exception>
        /// <exception cref="ObjectDisposedException">
        ///     <paramref name="source"/> has already been disposed of.<br/>
        ///     -or-<br/>
        ///     <paramref name="config"/> has already been disposed of.
        /// </exception>
        /// <exception cref="NotSupportedException">The feature is not supported.</exception>
        /// <feature>http://tizen.org/feature/vision.face_recognition</feature>
        /// <since_tizen> 4 </since_tizen>
        public static async Task <FacialExpression> RecognizeFacialExpressionAsync(MediaVisionSource source,
                                                                                   Rectangle bound, FaceRecognitionConfiguration config)
        {
            if (source == null)
            {
                throw new ArgumentNullException(nameof(source));
            }

            TaskCompletionSource <FacialExpression> tcsResult = new TaskCompletionSource <FacialExpression>();

            InteropFace.MvFaceFacialExpressionRecognizedCallback cb = (IntPtr sourceHandle, IntPtr engineCfgHandle,
                                                                       global::Interop.MediaVision.Rectangle faceLocation, FacialExpression facialExpression, IntPtr _) =>
            {
                Log.Info(MediaVisionLog.Tag, $"Facial expression recognized, expression : {facialExpression}");
                if (!tcsResult.TrySetResult(facialExpression))
                {
                    Log.Error(MediaVisionLog.Tag, "Failed to set facial result");
                }
            };

            using (var cbKeeper = ObjectKeeper.Get(cb))
            {
                InteropFace.RecognizeFacialExpression(source.Handle, EngineConfiguration.GetHandle(config),
                                                      bound.ToMarshalable(), cb).
                Validate("Failed to perform facial expression recognition.");

                return(await tcsResult.Task);
            }
        }