Container for the parameters to the IndexFaces operation. Detects faces in the input image and adds them to the specified collection.

Amazon Rekognition does not save the actual faces detected. Instead, the underlying detection algorithm first detects the faces in the input image, and for each face extracts facial features into a feature vector, and stores it in the back-end database. Amazon Rekognition uses feature vectors when performing face match and search operations using the and operations.

If you provide the optional externalImageID for the input image you provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the operation, the response returns the external ID. You can use this external image ID to create a client-side index to associate the faces with each image. You can then use the index to find all faces in an image.

In response, the operation returns an array of metadata for all detected faces. This includes, the bounding box of the detected face, confidence value (indicating the bounding box contains a face), a face ID assigned by the service for each face that is detected and stored, and an image ID assigned by the service for the input image If you request all facial attributes (using the detectionAttributes parameter, Rekognition returns detailed facial attributes such as facial landmarks (for example, location of eye and mount) and other facial attributes such gender. If you provide the same image, specify the same collection, and use the same external ID in the IndexFaces operation, Rekognition doesn't save duplicate face metadata.

For an example, see example2.

This operation requires permissions to perform the rekognition:IndexFaces action.

Inheritance: AmazonRekognitionRequest
 private Amazon.Rekognition.Model.IndexFacesResponse CallAWSServiceOperation(IAmazonRekognition client, Amazon.Rekognition.Model.IndexFacesRequest request)
 {
     Utils.Common.WriteVerboseEndpointMessage(this, client.Config, "Amazon Rekognition", "IndexFaces");
     try
     {
         #if DESKTOP
         return(client.IndexFaces(request));
         #elif CORECLR
         return(client.IndexFacesAsync(request).GetAwaiter().GetResult());
         #else
                 #error "Unknown build edition"
         #endif
     }
     catch (AmazonServiceException exc)
     {
         var webException = exc.InnerException as System.Net.WebException;
         if (webException != null)
         {
             throw new Exception(Utils.Common.FormatNameResolutionFailureMessage(client.Config, webException.Message), webException);
         }
         throw;
     }
 }
        public object Execute(ExecutorContext context)
        {
            System.IO.MemoryStream _ImageContentStream = null;

            try
            {
                var cmdletContext = context as CmdletContext;
                // create request
                var request = new Amazon.Rekognition.Model.IndexFacesRequest();

                if (cmdletContext.CollectionId != null)
                {
                    request.CollectionId = cmdletContext.CollectionId;
                }
                if (cmdletContext.DetectionAttribute != null)
                {
                    request.DetectionAttributes = cmdletContext.DetectionAttribute;
                }
                if (cmdletContext.ExternalImageId != null)
                {
                    request.ExternalImageId = cmdletContext.ExternalImageId;
                }

                // populate Image
                var requestImageIsNull = true;
                request.Image = new Amazon.Rekognition.Model.Image();
                System.IO.MemoryStream requestImage_imageContent = null;
                if (cmdletContext.ImageContent != null)
                {
                    _ImageContentStream       = new System.IO.MemoryStream(cmdletContext.ImageContent);
                    requestImage_imageContent = _ImageContentStream;
                }
                if (requestImage_imageContent != null)
                {
                    request.Image.Bytes = requestImage_imageContent;
                    requestImageIsNull  = false;
                }
                Amazon.Rekognition.Model.S3Object requestImage_image_S3Object = null;

                // populate S3Object
                var requestImage_image_S3ObjectIsNull = true;
                requestImage_image_S3Object = new Amazon.Rekognition.Model.S3Object();
                System.String requestImage_image_S3Object_imageBucket = null;
                if (cmdletContext.ImageBucket != null)
                {
                    requestImage_image_S3Object_imageBucket = cmdletContext.ImageBucket;
                }
                if (requestImage_image_S3Object_imageBucket != null)
                {
                    requestImage_image_S3Object.Bucket = requestImage_image_S3Object_imageBucket;
                    requestImage_image_S3ObjectIsNull  = false;
                }
                System.String requestImage_image_S3Object_imageName = null;
                if (cmdletContext.ImageName != null)
                {
                    requestImage_image_S3Object_imageName = cmdletContext.ImageName;
                }
                if (requestImage_image_S3Object_imageName != null)
                {
                    requestImage_image_S3Object.Name  = requestImage_image_S3Object_imageName;
                    requestImage_image_S3ObjectIsNull = false;
                }
                System.String requestImage_image_S3Object_imageVersion = null;
                if (cmdletContext.ImageVersion != null)
                {
                    requestImage_image_S3Object_imageVersion = cmdletContext.ImageVersion;
                }
                if (requestImage_image_S3Object_imageVersion != null)
                {
                    requestImage_image_S3Object.Version = requestImage_image_S3Object_imageVersion;
                    requestImage_image_S3ObjectIsNull   = false;
                }
                // determine if requestImage_image_S3Object should be set to null
                if (requestImage_image_S3ObjectIsNull)
                {
                    requestImage_image_S3Object = null;
                }
                if (requestImage_image_S3Object != null)
                {
                    request.Image.S3Object = requestImage_image_S3Object;
                    requestImageIsNull     = false;
                }
                // determine if request.Image should be set to null
                if (requestImageIsNull)
                {
                    request.Image = null;
                }
                if (cmdletContext.MaxFace != null)
                {
                    request.MaxFaces = cmdletContext.MaxFace.Value;
                }
                if (cmdletContext.QualityFilter != null)
                {
                    request.QualityFilter = cmdletContext.QualityFilter;
                }

                CmdletOutput output;

                // issue call
                var client = Client ?? CreateClient(_CurrentCredentials, _RegionEndpoint);
                try
                {
                    var    response       = CallAWSServiceOperation(client, request);
                    object pipelineOutput = null;
                    pipelineOutput = cmdletContext.Select(response, this);
                    output         = new CmdletOutput
                    {
                        PipelineOutput  = pipelineOutput,
                        ServiceResponse = response
                    };
                }
                catch (Exception e)
                {
                    output = new CmdletOutput {
                        ErrorResponse = e
                    };
                }

                return(output);
            }
            finally
            {
                if (_ImageContentStream != null)
                {
                    _ImageContentStream.Dispose();
                }
            }
        }
Exemplo n.º 3
0
        /// <summary>
        /// Detects faces in the input image and adds them to the specified collection. 
        /// 
        ///  
        /// <para>
        ///  Amazon Rekognition does not save the actual faces detected. Instead, the underlying
        /// detection algorithm first detects the faces in the input image, and for each face
        /// extracts facial features into a feature vector, and stores it in the back-end database.
        /// Amazon Rekognition uses feature vectors when performing face match and search operations
        /// using the and operations. 
        /// </para>
        ///  
        /// <para>
        /// If you provide the optional <code>externalImageID</code> for the input image you provided,
        /// Amazon Rekognition associates this ID with all faces that it detects. When you call
        /// the operation, the response returns the external ID. You can use this external image
        /// ID to create a client-side index to associate the faces with each image. You can then
        /// use the index to find all faces in an image. 
        /// </para>
        ///  
        /// <para>
        /// In response, the operation returns an array of metadata for all detected faces. This
        /// includes, the bounding box of the detected face, confidence value (indicating the
        /// bounding box contains a face), a face ID assigned by the service for each face that
        /// is detected and stored, and an image ID assigned by the service for the input image
        /// If you request all facial attributes (using the <code>detectionAttributes</code> parameter,
        /// Rekognition returns detailed facial attributes such as facial landmarks (for example,
        /// location of eye and mount) and other facial attributes such gender. If you provide
        /// the same image, specify the same collection, and use the same external ID in the <code>IndexFaces</code>
        /// operation, Rekognition doesn't save duplicate face metadata. 
        /// </para>
        ///  
        /// <para>
        /// For an example, see <a>example2</a>.
        /// </para>
        ///  
        /// <para>
        /// This operation requires permissions to perform the <code>rekognition:IndexFaces</code>
        /// action.
        /// </para>
        /// </summary>
        /// <param name="request">Container for the necessary parameters to execute the IndexFaces service method.</param>
        /// 
        /// <returns>The response from the IndexFaces service method, as returned by Rekognition.</returns>
        /// <exception cref="Amazon.Rekognition.Model.AccessDeniedException">
        /// You are not authorized to perform the action.
        /// </exception>
        /// <exception cref="Amazon.Rekognition.Model.ImageTooLargeException">
        /// The input image size exceeds the allowed limit. For more information, see <a>limits</a>.
        /// </exception>
        /// <exception cref="Amazon.Rekognition.Model.InternalServerErrorException">
        /// Amazon Rekognition experienced a service issue. Try your call again.
        /// </exception>
        /// <exception cref="Amazon.Rekognition.Model.InvalidImageFormatException">
        /// The provided image format is not supported.
        /// </exception>
        /// <exception cref="Amazon.Rekognition.Model.InvalidParameterException">
        /// Input parameter violated a constraint. Validate your parameter before calling the
        /// API again.
        /// </exception>
        /// <exception cref="Amazon.Rekognition.Model.InvalidS3ObjectException">
        /// Amazon Rekognition is unable to access the S3 object specified in the request.
        /// </exception>
        /// <exception cref="Amazon.Rekognition.Model.ProvisionedThroughputExceededException">
        /// The number of requests exceeded your throughput limit. If you want to increase this
        /// limit, contact Amazon Rekognition.
        /// </exception>
        /// <exception cref="Amazon.Rekognition.Model.ResourceNotFoundException">
        /// Collection specified in the request is not found.
        /// </exception>
        /// <exception cref="Amazon.Rekognition.Model.ThrottlingException">
        /// Amazon Rekognition is temporarily unable to process the request. Try your call again.
        /// </exception>
        public IndexFacesResponse IndexFaces(IndexFacesRequest request)
        {
            var marshaller = new IndexFacesRequestMarshaller();
            var unmarshaller = IndexFacesResponseUnmarshaller.Instance;

            return Invoke<IndexFacesRequest,IndexFacesResponse>(request, marshaller, unmarshaller);
        }
Exemplo n.º 4
0
        /// <summary>
        /// Initiates the asynchronous execution of the IndexFaces operation.
        /// </summary>
        /// 
        /// <param name="request">Container for the necessary parameters to execute the IndexFaces operation on AmazonRekognitionClient.</param>
        /// <param name="callback">An AsyncCallback delegate that is invoked when the operation completes.</param>
        /// <param name="state">A user-defined state object that is passed to the callback procedure. Retrieve this object from within the callback
        ///          procedure using the AsyncState property.</param>
        /// 
        /// <returns>An IAsyncResult that can be used to poll or wait for results, or both; this value is also needed when invoking EndIndexFaces
        ///         operation.</returns>
        public IAsyncResult BeginIndexFaces(IndexFacesRequest request, AsyncCallback callback, object state)
        {
            var marshaller = new IndexFacesRequestMarshaller();
            var unmarshaller = IndexFacesResponseUnmarshaller.Instance;

            return BeginInvoke<IndexFacesRequest>(request, marshaller, unmarshaller,
                callback, state);
        }
Exemplo n.º 5
0
        /// <summary>
        /// Initiates the asynchronous execution of the IndexFaces operation.
        /// </summary>
        /// 
        /// <param name="request">Container for the necessary parameters to execute the IndexFaces operation.</param>
        /// <param name="cancellationToken">
        ///     A cancellation token that can be used by other objects or threads to receive notice of cancellation.
        /// </param>
        /// <returns>The task object representing the asynchronous operation.</returns>
        public Task<IndexFacesResponse> IndexFacesAsync(IndexFacesRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
        {
            var marshaller = new IndexFacesRequestMarshaller();
            var unmarshaller = IndexFacesResponseUnmarshaller.Instance;

            return InvokeAsync<IndexFacesRequest,IndexFacesResponse>(request, marshaller, 
                unmarshaller, cancellationToken);
        }