Container for the parameters to the GetMLModel operation. Returns an MLModel that includes detailed metadata, data source information, and the current status of the MLModel.

GetMLModel provides results in normal or verbose format.

Inheritance: AmazonMachineLearningRequest
Esempio n. 1
0
        /// <summary>
        /// The realtime prediction endpoint for the given MLModel.
        /// </summary>
        public async Task<string> GetEndpointAsync()
        {
            if (null == this.endpoint)
            {
                GetMLModelRequest request = new GetMLModelRequest { MLModelId = ModelId };
                this.endpoint = (await client.GetMLModelAsync(request).ConfigureAwait(false)).EndpointInfo.EndpointUrl;                
            }

            return this.endpoint;
        }
        public object Execute(ExecutorContext context)
        {
            var cmdletContext = context as CmdletContext;
            // create request
            var request = new Amazon.MachineLearning.Model.GetMLModelRequest();

            if (cmdletContext.MLModelId != null)
            {
                request.MLModelId = cmdletContext.MLModelId;
            }
            if (cmdletContext.VerboseResponse != null)
            {
                request.Verbose = cmdletContext.VerboseResponse.Value;
            }

            CmdletOutput output;

            // issue call
            var client = Client ?? CreateClient(_CurrentCredentials, _RegionEndpoint);

            try
            {
                var    response       = CallAWSServiceOperation(client, request);
                object pipelineOutput = null;
                pipelineOutput = cmdletContext.Select(response, this);
                output         = new CmdletOutput
                {
                    PipelineOutput  = pipelineOutput,
                    ServiceResponse = response
                };
            }
            catch (Exception e)
            {
                output = new CmdletOutput {
                    ErrorResponse = e
                };
            }

            return(output);
        }
        /// <summary>
        /// Initiates the asynchronous execution of the GetMLModel operation.
        /// </summary>
        /// 
        /// <param name="request">Container for the necessary parameters to execute the GetMLModel operation.</param>
        /// <param name="cancellationToken">
        ///     A cancellation token that can be used by other objects or threads to receive notice of cancellation.
        /// </param>
        /// <returns>The task object representing the asynchronous operation.</returns>
        public Task<GetMLModelResponse> GetMLModelAsync(GetMLModelRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
        {
            var marshaller = new GetMLModelRequestMarshaller();
            var unmarshaller = GetMLModelResponseUnmarshaller.Instance;

            return InvokeAsync<GetMLModelRequest,GetMLModelResponse>(request, marshaller, 
                unmarshaller, cancellationToken);
        }
 /// <summary>
 /// Returns an <code>MLModel</code> that includes detailed metadata, and data source information
 /// as well as the current status of the <code>MLModel</code>.
 /// 
 ///  
 /// <para>
 /// <code>GetMLModel</code> provides results in normal or verbose format. 
 /// </para>
 /// </summary>
 /// <param name="mlModelId">The ID assigned to the <code>MLModel</code> at creation.</param>
 /// <param name="verbose">Specifies whether the <code>GetMLModel</code> operation should return <code>Recipe</code>. If true, <code>Recipe</code> is returned. If false, <code>Recipe</code> is not returned.</param>
 /// <param name="cancellationToken">
 ///     A cancellation token that can be used by other objects or threads to receive notice of cancellation.
 /// </param>
 /// 
 /// <returns>The response from the GetMLModel service method, as returned by MachineLearning.</returns>
 /// <exception cref="Amazon.MachineLearning.Model.InternalServerException">
 /// An error on the server occurred when trying to process a request.
 /// </exception>
 /// <exception cref="Amazon.MachineLearning.Model.InvalidInputException">
 /// An error on the client occurred. Typically, the cause is an invalid input value.
 /// </exception>
 /// <exception cref="Amazon.MachineLearning.Model.ResourceNotFoundException">
 /// A specified resource cannot be located.
 /// </exception>
 public Task<GetMLModelResponse> GetMLModelAsync(string mlModelId, bool verbose, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
 {
     var request = new GetMLModelRequest();
     request.MLModelId = mlModelId;
     request.Verbose = verbose;
     return GetMLModelAsync(request, cancellationToken);
 }
        internal GetMLModelResponse GetMLModel(GetMLModelRequest request)
        {
            var marshaller = new GetMLModelRequestMarshaller();
            var unmarshaller = GetMLModelResponseUnmarshaller.Instance;

            return Invoke<GetMLModelRequest,GetMLModelResponse>(request, marshaller, unmarshaller);
        }
 /// <summary>
 /// Returns an <code>MLModel</code> that includes detailed metadata, data source information,
 /// and the current status of the <code>MLModel</code>.
 /// 
 ///  
 /// <para>
 /// <code>GetMLModel</code> provides results in normal or verbose format. 
 /// </para>
 /// </summary>
 /// <param name="mlModelId">The ID assigned to the <code>MLModel</code> at creation.</param>
 /// <param name="verbose">Specifies whether the <code>GetMLModel</code> operation should return <code>Recipe</code>. If true, <code>Recipe</code> is returned. If false, <code>Recipe</code> is not returned.</param>
 /// 
 /// <returns>The response from the GetMLModel service method, as returned by MachineLearning.</returns>
 /// <exception cref="Amazon.MachineLearning.Model.InternalServerException">
 /// An error on the server occurred when trying to process a request.
 /// </exception>
 /// <exception cref="Amazon.MachineLearning.Model.InvalidInputException">
 /// An error on the client occurred. Typically, the cause is an invalid input value.
 /// </exception>
 /// <exception cref="Amazon.MachineLearning.Model.ResourceNotFoundException">
 /// A specified resource cannot be located.
 /// </exception>
 public GetMLModelResponse GetMLModel(string mlModelId, bool verbose)
 {
     var request = new GetMLModelRequest();
     request.MLModelId = mlModelId;
     request.Verbose = verbose;
     return GetMLModel(request);
 }
 /// <summary>
 /// Returns an <code>MLModel</code> that includes detailed metadata, data source information,
 /// and the current status of the <code>MLModel</code>.
 /// 
 ///  
 /// <para>
 /// <code>GetMLModel</code> provides results in normal or verbose format. 
 /// </para>
 /// </summary>
 /// <param name="mlModelId">The ID assigned to the <code>MLModel</code> at creation.</param>
 /// 
 /// <returns>The response from the GetMLModel service method, as returned by MachineLearning.</returns>
 /// <exception cref="Amazon.MachineLearning.Model.InternalServerException">
 /// An error on the server occurred when trying to process a request.
 /// </exception>
 /// <exception cref="Amazon.MachineLearning.Model.InvalidInputException">
 /// An error on the client occurred. Typically, the cause is an invalid input value.
 /// </exception>
 /// <exception cref="Amazon.MachineLearning.Model.ResourceNotFoundException">
 /// A specified resource cannot be located.
 /// </exception>
 public GetMLModelResponse GetMLModel(string mlModelId)
 {
     var request = new GetMLModelRequest();
     request.MLModelId = mlModelId;
     return GetMLModel(request);
 }
        /// <summary>
        /// Initiates the asynchronous execution of the GetMLModel operation.
        /// </summary>
        /// 
        /// <param name="request">Container for the necessary parameters to execute the GetMLModel operation on AmazonMachineLearningClient.</param>
        /// <param name="callback">An AsyncCallback delegate that is invoked when the operation completes.</param>
        /// <param name="state">A user-defined state object that is passed to the callback procedure. Retrieve this object from within the callback
        ///          procedure using the AsyncState property.</param>
        /// 
        /// <returns>An IAsyncResult that can be used to poll or wait for results, or both; this value is also needed when invoking EndGetMLModel
        ///         operation.</returns>
        public IAsyncResult BeginGetMLModel(GetMLModelRequest request, AsyncCallback callback, object state)
        {
            var marshaller = new GetMLModelRequestMarshaller();
            var unmarshaller = GetMLModelResponseUnmarshaller.Instance;

            return BeginInvoke<GetMLModelRequest>(request, marshaller, unmarshaller,
                callback, state);
        }
 private Amazon.MachineLearning.Model.GetMLModelResponse CallAWSServiceOperation(IAmazonMachineLearning client, Amazon.MachineLearning.Model.GetMLModelRequest request)
 {
     Utils.Common.WriteVerboseEndpointMessage(this, client.Config, "Amazon Machine Learning", "GetMLModel");
     try
     {
         #if DESKTOP
         return(client.GetMLModel(request));
         #elif CORECLR
         return(client.GetMLModelAsync(request).GetAwaiter().GetResult());
         #else
                 #error "Unknown build edition"
         #endif
     }
     catch (AmazonServiceException exc)
     {
         var webException = exc.InnerException as System.Net.WebException;
         if (webException != null)
         {
             throw new Exception(Utils.Common.FormatNameResolutionFailureMessage(client.Config, webException.Message), webException);
         }
         throw;
     }
 }