/// <summary>
 /// Detect human faces in an image and returns face locations, and optionally
 /// with faceIds, landmarks, and attributes.
 /// </summary>
 /// <param name='operations'>
 /// The operations group for this extension method.
 /// </param>
 /// <param name='image'>
 /// An image stream.
 /// </param>
 /// <param name='returnFaceId'>
 /// A value indicating whether the operation should return faceIds of detected
 /// faces.
 /// </param>
 /// <param name='returnFaceLandmarks'>
 /// A value indicating whether the operation should return landmarks of the
 /// detected faces.
 /// </param>
 /// <param name='returnFaceAttributes'>
 /// Analyze and return the one or more specified face attributes in the
 /// comma-separated string like "returnFaceAttributes=age,gender". Supported
 /// face attributes include age, gender, headPose, smile, facialHair, glasses
 /// and emotion. Note that each face attribute analysis has additional
 /// computational and time cost.
 /// </param>
 /// <param name='cancellationToken'>
 /// The cancellation token.
 /// </param>
 public static async Task <IList <DetectedFace> > DetectWithStreamAsync(this IFaceOperations operations, Stream image, bool?returnFaceId = true, bool?returnFaceLandmarks = false, IList <FaceAttributeType> returnFaceAttributes = default(IList <FaceAttributeType>), CancellationToken cancellationToken = default(CancellationToken))
 {
     using (var _result = await operations.DetectWithStreamWithHttpMessagesAsync(image, returnFaceId, returnFaceLandmarks, returnFaceAttributes, null, cancellationToken).ConfigureAwait(false))
     {
         return(_result.Body);
     }
 }
Example #2
0
 /// <summary>
 /// Detect human faces in an image, return face rectangles, and optionally with
 /// faceIds, landmarks, and attributes.&lt;br /&gt;
 /// * No image will be stored. Only the extracted face feature will be stored
 /// on server. The faceId is an identifier of the face feature and will be used
 /// in [Face -
 /// Identify](https://docs.microsoft.com/rest/api/faceapi/face/identify), [Face
 /// -
 /// Verify](https://docs.microsoft.com/rest/api/faceapi/face/verifyfacetoface),
 /// and [Face - Find
 /// Similar](https://docs.microsoft.com/rest/api/faceapi/face/findsimilar). The
 /// stored face feature(s) will expire and be deleted at the time specified by
 /// faceIdTimeToLive after the original detection call.
 /// * Optional parameters include faceId, landmarks, and attributes. Attributes
 /// include age, gender, headPose, smile, facialHair, glasses, emotion, hair,
 /// makeup, occlusion, accessories, blur, exposure, noise, mask, and
 /// qualityForRecognition. Some of the results returned for specific attributes
 /// may not be highly accurate.
 /// * JPEG, PNG, GIF (the first frame), and BMP format are supported. The
 /// allowed image file size is from 1KB to 6MB.
 /// * Up to 100 faces can be returned for an image. Faces are ranked by face
 /// rectangle size from large to small.
 /// * For optimal results when querying [Face -
 /// Identify](https://docs.microsoft.com/rest/api/faceapi/face/identify), [Face
 /// -
 /// Verify](https://docs.microsoft.com/rest/api/faceapi/face/verifyfacetoface),
 /// and [Face - Find
 /// Similar](https://docs.microsoft.com/rest/api/faceapi/face/findsimilar)
 /// ('returnFaceId' is true), please use faces that are: frontal, clear, and
 /// with a minimum size of 200x200 pixels (100 pixels between eyes).
 /// * The minimum detectable face size is 36x36 pixels in an image no larger
 /// than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels
 /// will need a proportionally larger minimum face size.
 /// * Different 'detectionModel' values can be provided. To use and compare
 /// different detection models, please refer to [How to specify a detection
 /// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model)
 /// * Different 'recognitionModel' values are provided. If follow-up operations
 /// like Verify, Identify, Find Similar are needed, please specify the
 /// recognition model with 'recognitionModel' parameter. The default value for
 /// 'recognitionModel' is 'recognition_01', if latest model needed, please
 /// explicitly specify the model you need in this parameter. Once specified,
 /// the detected faceIds will be associated with the specified recognition
 /// model. More details, please refer to [Specify a recognition
 /// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model).
 /// </summary>
 /// <param name='operations'>
 /// The operations group for this extension method.
 /// </param>
 /// <param name='image'>
 /// An image stream.
 /// </param>
 /// <param name='returnFaceId'>
 /// A value indicating whether the operation should return faceIds of detected
 /// faces.
 /// </param>
 /// <param name='returnFaceLandmarks'>
 /// A value indicating whether the operation should return landmarks of the
 /// detected faces.
 /// </param>
 /// <param name='returnFaceAttributes'>
 /// Analyze and return the one or more specified face attributes in the
 /// comma-separated string like "returnFaceAttributes=age,gender". The
 /// available attributes depends on the 'detectionModel' specified.
 /// 'detection_01' supports age, gender, headPose, smile, facialHair, glasses,
 /// emotion, hair, makeup, occlusion, accessories, blur, exposure, noise, and
 /// qualityForRecognition. While 'detection_02' does not support any attributes
 /// and 'detection_03' only supports mask and qualityForRecognition.
 /// Additionally, qualityForRecognition is only supported when the
 /// 'recognitionModel' is specified as 'recognition_03' or 'recognition_04'.
 /// Note that each face attribute analysis has additional computational and
 /// time cost.
 /// </param>
 /// <param name='recognitionModel'>
 /// Name of recognition model. Recognition model is used when the face features
 /// are extracted and associated with detected faceIds, (Large)FaceList or
 /// (Large)PersonGroup. A recognition model name can be provided when
 /// performing Face - Detect or (Large)FaceList - Create or (Large)PersonGroup
 /// - Create. The default value is 'recognition_01', if latest model needed,
 /// please explicitly specify the model you need. Possible values include:
 /// 'recognition_01', 'recognition_02', 'recognition_03', 'recognition_04'
 /// </param>
 /// <param name='returnRecognitionModel'>
 /// A value indicating whether the operation should return 'recognitionModel'
 /// in response.
 /// </param>
 /// <param name='detectionModel'>
 /// Name of detection model. Detection model is used to detect faces in the
 /// submitted image. A detection model name can be provided when performing
 /// Face - Detect or (Large)FaceList - Add Face or (Large)PersonGroup - Add
 /// Face. The default value is 'detection_01', if another model is needed,
 /// please explicitly specify it. Possible values include: 'detection_01',
 /// 'detection_02', 'detection_03'
 /// </param>
 /// <param name='faceIdTimeToLive'>
 /// The number of seconds for the faceId being cached. Supported range from 60
 /// seconds up to 86400 seconds. The default value is 86400 (24 hours).
 /// </param>
 /// <param name='cancellationToken'>
 /// The cancellation token.
 /// </param>
 public static async Task <IList <DetectedFace> > DetectWithStreamAsync(this IFaceOperations operations, Stream image, bool?returnFaceId = true, bool?returnFaceLandmarks = false, IList <FaceAttributeType> returnFaceAttributes = default(IList <FaceAttributeType>), string recognitionModel = default(string), bool?returnRecognitionModel = false, string detectionModel = default(string), int?faceIdTimeToLive = 86400, CancellationToken cancellationToken = default(CancellationToken))
 {
     using (var _result = await operations.DetectWithStreamWithHttpMessagesAsync(image, returnFaceId, returnFaceLandmarks, returnFaceAttributes, recognitionModel, returnRecognitionModel, detectionModel, faceIdTimeToLive, null, cancellationToken).ConfigureAwait(false))
     {
         return(_result.Body);
     }
 }