public object Execute(ExecutorContext context) { var cmdletContext = context as CmdletContext; // create request var request = new Amazon.MachineLearning.Model.PredictRequest(); if (cmdletContext.MLModelId != null) { request.MLModelId = cmdletContext.MLModelId; } if (cmdletContext.PredictEndpoint != null) { request.PredictEndpoint = cmdletContext.PredictEndpoint; } if (cmdletContext.Record != null) { request.Record = cmdletContext.Record; } CmdletOutput output; // issue call var client = Client ?? CreateClient(_CurrentCredentials, _RegionEndpoint); try { var response = CallAWSServiceOperation(client, request); object pipelineOutput = null; pipelineOutput = cmdletContext.Select(response, this); output = new CmdletOutput { PipelineOutput = pipelineOutput, ServiceResponse = response }; } catch (Exception e) { output = new CmdletOutput { ErrorResponse = e }; } return(output); }
/// <summary> /// Initiates the asynchronous execution of the Predict operation. /// </summary> /// /// <param name="request">Container for the necessary parameters to execute the Predict operation.</param> /// <param name="cancellationToken"> /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. /// </param> /// <returns>The task object representing the asynchronous operation.</returns> public Task<PredictResponse> PredictAsync(PredictRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken)) { var marshaller = new PredictRequestMarshaller(); var unmarshaller = PredictResponseUnmarshaller.Instance; return InvokeAsync<PredictRequest,PredictResponse>(request, marshaller, unmarshaller, cancellationToken); }
/// <summary> /// Generates a prediction for the observation using the specified <code>MLModel</code>. /// /// <note><title>Note</title> /// <para> /// Not all response parameters will be populated because this is dependent on the type /// of requested model. /// </para> /// </note> /// </summary> /// <param name="mlModelId">A unique identifier of the <code>MLModel</code>.</param> /// <param name="predictEndpoint">A property of PredictRequest used to execute the Predict service method.</param> /// <param name="record">A property of PredictRequest used to execute the Predict service method.</param> /// <param name="cancellationToken"> /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. /// </param> /// /// <returns>The response from the Predict service method, as returned by MachineLearning.</returns> /// <exception cref="Amazon.MachineLearning.Model.InternalServerException"> /// An error on the server occurred when trying to process a request. /// </exception> /// <exception cref="Amazon.MachineLearning.Model.InvalidInputException"> /// An error on the client occurred. Typically, the cause is an invalid input value. /// </exception> /// <exception cref="Amazon.MachineLearning.Model.LimitExceededException"> /// The subscriber exceeded the maximum number of operations. This exception can occur /// when listing objects such as <code>DataSource</code>. /// </exception> /// <exception cref="Amazon.MachineLearning.Model.PredictorNotMountedException"> /// The exception is thrown when a predict request is made to an unmounted <code>MLModel</code>. /// </exception> /// <exception cref="Amazon.MachineLearning.Model.ResourceNotFoundException"> /// A specified resource cannot be located. /// </exception> public Task<PredictResponse> PredictAsync(string mlModelId, string predictEndpoint, Dictionary<string, string> record, System.Threading.CancellationToken cancellationToken = default(CancellationToken)) { var request = new PredictRequest(); request.MLModelId = mlModelId; request.PredictEndpoint = predictEndpoint; request.Record = record; return PredictAsync(request, cancellationToken); }
internal PredictResponse Predict(PredictRequest request) { var marshaller = new PredictRequestMarshaller(); var unmarshaller = PredictResponseUnmarshaller.Instance; return Invoke<PredictRequest,PredictResponse>(request, marshaller, unmarshaller); }
/// <summary> /// Generates a prediction for the observation using the specified <code>ML Model</code>. /// /// <note><title>Note</title> /// <para> /// Not all response parameters will be populated. Whether a response parameter is populated /// depends on the type of model requested. /// </para> /// </note> /// </summary> /// <param name="mlModelId">A unique identifier of the <code>MLModel</code>.</param> /// <param name="predictEndpoint">A property of PredictRequest used to execute the Predict service method.</param> /// <param name="record">A property of PredictRequest used to execute the Predict service method.</param> /// /// <returns>The response from the Predict service method, as returned by MachineLearning.</returns> /// <exception cref="Amazon.MachineLearning.Model.InternalServerException"> /// An error on the server occurred when trying to process a request. /// </exception> /// <exception cref="Amazon.MachineLearning.Model.InvalidInputException"> /// An error on the client occurred. Typically, the cause is an invalid input value. /// </exception> /// <exception cref="Amazon.MachineLearning.Model.LimitExceededException"> /// The subscriber exceeded the maximum number of operations. This exception can occur /// when listing objects such as <code>DataSource</code>. /// </exception> /// <exception cref="Amazon.MachineLearning.Model.PredictorNotMountedException"> /// The exception is thrown when a predict request is made to an unmounted <code>MLModel</code>. /// </exception> /// <exception cref="Amazon.MachineLearning.Model.ResourceNotFoundException"> /// A specified resource cannot be located. /// </exception> public PredictResponse Predict(string mlModelId, string predictEndpoint, Dictionary<string, string> record) { var request = new PredictRequest(); request.MLModelId = mlModelId; request.PredictEndpoint = predictEndpoint; request.Record = record; return Predict(request); }
/// <summary> /// Initiates the asynchronous execution of the Predict operation. /// </summary> /// /// <param name="request">Container for the necessary parameters to execute the Predict operation on AmazonMachineLearningClient.</param> /// <param name="callback">An AsyncCallback delegate that is invoked when the operation completes.</param> /// <param name="state">A user-defined state object that is passed to the callback procedure. Retrieve this object from within the callback /// procedure using the AsyncState property.</param> /// /// <returns>An IAsyncResult that can be used to poll or wait for results, or both; this value is also needed when invoking EndPredict /// operation.</returns> public IAsyncResult BeginPredict(PredictRequest request, AsyncCallback callback, object state) { var marshaller = new PredictRequestMarshaller(); var unmarshaller = PredictResponseUnmarshaller.Instance; return BeginInvoke<PredictRequest>(request, marshaller, unmarshaller, callback, state); }
private Amazon.MachineLearning.Model.PredictResponse CallAWSServiceOperation(IAmazonMachineLearning client, Amazon.MachineLearning.Model.PredictRequest request) { Utils.Common.WriteVerboseEndpointMessage(this, client.Config, "Amazon Machine Learning", "Predict"); try { #if DESKTOP return(client.Predict(request)); #elif CORECLR return(client.PredictAsync(request).GetAwaiter().GetResult()); #else #error "Unknown build edition" #endif } catch (AmazonServiceException exc) { var webException = exc.InnerException as System.Net.WebException; if (webException != null) { throw new Exception(Utils.Common.FormatNameResolutionFailureMessage(client.Config, webException.Message), webException); } throw; } }