/// <summary> /// Initiates the asynchronous execution of the DetectFaces operation. /// </summary> /// /// <param name="request">Container for the necessary parameters to execute the DetectFaces operation on AmazonRekognitionClient.</param> /// <param name="callback">An AsyncCallback delegate that is invoked when the operation completes.</param> /// <param name="state">A user-defined state object that is passed to the callback procedure. Retrieve this object from within the callback /// procedure using the AsyncState property.</param> /// /// <returns>An IAsyncResult that can be used to poll or wait for results, or both; this value is also needed when invoking EndDetectFaces /// operation.</returns> public IAsyncResult BeginDetectFaces(DetectFacesRequest request, AsyncCallback callback, object state) { var marshaller = new DetectFacesRequestMarshaller(); var unmarshaller = DetectFacesResponseUnmarshaller.Instance; return BeginInvoke<DetectFacesRequest>(request, marshaller, unmarshaller, callback, state); }
/// <summary> /// Detects faces within an image (JPEG or PNG) that is provided as input. /// /// /// <para> /// For each face detected, the operation returns face details including a bounding box /// of the face, a confidence value (that the bounding box contains a face), and a fixed /// set of attributes such as facial landmarks (for example, coordinates of eye and mouth), /// gender, presence of beard, sunglasses, etc. /// </para> /// /// <para> /// The face-detection algorithm is most effective on frontal faces. For non-frontal or /// obscured faces, the algorithm may not detect the faces or might detect faces with /// lower confidence. /// </para> /// <note> /// <para> /// This is a stateless API operation. That is, the operation does not persist any data. /// </para> /// </note> /// <para> /// For an example, see <a>get-started-exercise-detect-faces</a>. /// </para> /// /// <para> /// This operation requires permissions to perform the <code>rekognition:DetectFaces</code> /// action. /// </para> /// </summary> /// <param name="request">Container for the necessary parameters to execute the DetectFaces service method.</param> /// /// <returns>The response from the DetectFaces service method, as returned by Rekognition.</returns> /// <exception cref="Amazon.Rekognition.Model.AccessDeniedException"> /// You are not authorized to perform the action. /// </exception> /// <exception cref="Amazon.Rekognition.Model.ImageTooLargeException"> /// The input image size exceeds the allowed limit. For more information, see <a>limits</a>. /// </exception> /// <exception cref="Amazon.Rekognition.Model.InternalServerErrorException"> /// Amazon Rekognition experienced a service issue. Try your call again. /// </exception> /// <exception cref="Amazon.Rekognition.Model.InvalidImageFormatException"> /// The provided image format is not supported. /// </exception> /// <exception cref="Amazon.Rekognition.Model.InvalidParameterException"> /// Input parameter violated a constraint. Validate your parameter before calling the /// API again. /// </exception> /// <exception cref="Amazon.Rekognition.Model.InvalidS3ObjectException"> /// Amazon Rekognition is unable to access the S3 object specified in the request. /// </exception> /// <exception cref="Amazon.Rekognition.Model.ProvisionedThroughputExceededException"> /// The number of requests exceeded your throughput limit. If you want to increase this /// limit, contact Amazon Rekognition. /// </exception> /// <exception cref="Amazon.Rekognition.Model.ThrottlingException"> /// Amazon Rekognition is temporarily unable to process the request. Try your call again. /// </exception> public DetectFacesResponse DetectFaces(DetectFacesRequest request) { var marshaller = new DetectFacesRequestMarshaller(); var unmarshaller = DetectFacesResponseUnmarshaller.Instance; return Invoke<DetectFacesRequest,DetectFacesResponse>(request, marshaller, unmarshaller); }
public object Execute(ExecutorContext context) { System.IO.MemoryStream _ImageContentStream = null; try { var cmdletContext = context as CmdletContext; // create request var request = new Amazon.Rekognition.Model.DetectFacesRequest(); if (cmdletContext.Attribute != null) { request.Attributes = cmdletContext.Attribute; } // populate Image var requestImageIsNull = true; request.Image = new Amazon.Rekognition.Model.Image(); System.IO.MemoryStream requestImage_imageContent = null; if (cmdletContext.ImageContent != null) { _ImageContentStream = new System.IO.MemoryStream(cmdletContext.ImageContent); requestImage_imageContent = _ImageContentStream; } if (requestImage_imageContent != null) { request.Image.Bytes = requestImage_imageContent; requestImageIsNull = false; } Amazon.Rekognition.Model.S3Object requestImage_image_S3Object = null; // populate S3Object var requestImage_image_S3ObjectIsNull = true; requestImage_image_S3Object = new Amazon.Rekognition.Model.S3Object(); System.String requestImage_image_S3Object_imageBucket = null; if (cmdletContext.ImageBucket != null) { requestImage_image_S3Object_imageBucket = cmdletContext.ImageBucket; } if (requestImage_image_S3Object_imageBucket != null) { requestImage_image_S3Object.Bucket = requestImage_image_S3Object_imageBucket; requestImage_image_S3ObjectIsNull = false; } System.String requestImage_image_S3Object_imageName = null; if (cmdletContext.ImageName != null) { requestImage_image_S3Object_imageName = cmdletContext.ImageName; } if (requestImage_image_S3Object_imageName != null) { requestImage_image_S3Object.Name = requestImage_image_S3Object_imageName; requestImage_image_S3ObjectIsNull = false; } System.String requestImage_image_S3Object_imageVersion = null; if (cmdletContext.ImageVersion != null) { requestImage_image_S3Object_imageVersion = cmdletContext.ImageVersion; } if (requestImage_image_S3Object_imageVersion != null) { requestImage_image_S3Object.Version = requestImage_image_S3Object_imageVersion; requestImage_image_S3ObjectIsNull = false; } // determine if requestImage_image_S3Object should be set to null if (requestImage_image_S3ObjectIsNull) { requestImage_image_S3Object = null; } if (requestImage_image_S3Object != null) { request.Image.S3Object = requestImage_image_S3Object; requestImageIsNull = false; } // determine if request.Image should be set to null if (requestImageIsNull) { request.Image = null; } CmdletOutput output; // issue call var client = Client ?? CreateClient(_CurrentCredentials, _RegionEndpoint); try { var response = CallAWSServiceOperation(client, request); object pipelineOutput = null; pipelineOutput = cmdletContext.Select(response, this); output = new CmdletOutput { PipelineOutput = pipelineOutput, ServiceResponse = response }; } catch (Exception e) { output = new CmdletOutput { ErrorResponse = e }; } return(output); } finally { if (_ImageContentStream != null) { _ImageContentStream.Dispose(); } } }
private Amazon.Rekognition.Model.DetectFacesResponse CallAWSServiceOperation(IAmazonRekognition client, Amazon.Rekognition.Model.DetectFacesRequest request) { Utils.Common.WriteVerboseEndpointMessage(this, client.Config, "Amazon Rekognition", "DetectFaces"); try { #if DESKTOP return(client.DetectFaces(request)); #elif CORECLR return(client.DetectFacesAsync(request).GetAwaiter().GetResult()); #else #error "Unknown build edition" #endif } catch (AmazonServiceException exc) { var webException = exc.InnerException as System.Net.WebException; if (webException != null) { throw new Exception(Utils.Common.FormatNameResolutionFailureMessage(client.Config, webException.Message), webException); } throw; } }
/// <summary> /// Initiates the asynchronous execution of the DetectFaces operation. /// </summary> /// /// <param name="request">Container for the necessary parameters to execute the DetectFaces operation.</param> /// <param name="cancellationToken"> /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. /// </param> /// <returns>The task object representing the asynchronous operation.</returns> public Task<DetectFacesResponse> DetectFacesAsync(DetectFacesRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken)) { var marshaller = new DetectFacesRequestMarshaller(); var unmarshaller = DetectFacesResponseUnmarshaller.Instance; return InvokeAsync<DetectFacesRequest,DetectFacesResponse>(request, marshaller, unmarshaller, cancellationToken); }