/// <summary> /// Perform an online prediction. /// </summary> /// <param name="endpoint"> /// Required. The name of the Endpoint requested to serve the prediction. /// Format: /// `projects/{project}/locations/{location}/endpoints/{endpoint}` /// </param> /// <param name="instances"> /// Required. The instances that are the input to the prediction call. /// A DeployedModel may have an upper limit on the number of instances it /// supports per request, and when it is exceeded the prediction call errors /// in case of AutoML Models, or, in case of customer created Models, the /// behaviour is as documented by that Model. /// The schema of any single instance may be specified via Endpoint's /// DeployedModels' [Model's][google.cloud.aiplatform.v1.DeployedModel.model] /// [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] /// [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. /// </param> /// <param name="parameters"> /// The parameters that govern the prediction. The schema of the parameters may /// be specified via Endpoint's DeployedModels' [Model's ][google.cloud.aiplatform.v1.DeployedModel.model] /// [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] /// [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. /// </param> /// <param name="callSettings">If not null, applies overrides to this RPC call.</param> /// <returns>A Task containing the RPC response.</returns> public virtual stt::Task <PredictResponse> PredictAsync(EndpointName endpoint, scg::IEnumerable <wkt::Value> instances, wkt::Value parameters, gaxgrpc::CallSettings callSettings = null) => PredictAsync(new PredictRequest { EndpointAsEndpointName = gax::GaxPreconditions.CheckNotNull(endpoint, nameof(endpoint)), Instances = { gax::GaxPreconditions.CheckNotNull(instances, nameof(instances)), }, Parameters = parameters, }, callSettings);
/// <summary> /// Perform an online prediction. /// </summary> /// <param name="endpoint"> /// Required. The name of the Endpoint requested to serve the prediction. /// Format: /// `projects/{project}/locations/{location}/endpoints/{endpoint}` /// </param> /// <param name="instances"> /// Required. The instances that are the input to the prediction call. /// A DeployedModel may have an upper limit on the number of instances it /// supports per request, and when it is exceeded the prediction call errors /// in case of AutoML Models, or, in case of customer created Models, the /// behaviour is as documented by that Model. /// The schema of any single instance may be specified via Endpoint's /// DeployedModels' [Model's][google.cloud.aiplatform.v1.DeployedModel.model] /// [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] /// [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. /// </param> /// <param name="parameters"> /// The parameters that govern the prediction. The schema of the parameters may /// be specified via Endpoint's DeployedModels' [Model's ][google.cloud.aiplatform.v1.DeployedModel.model] /// [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] /// [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. /// </param> /// <param name="cancellationToken">A <see cref="st::CancellationToken"/> to use for this RPC.</param> /// <returns>A Task containing the RPC response.</returns> public virtual stt::Task <PredictResponse> PredictAsync(EndpointName endpoint, scg::IEnumerable <wkt::Value> instances, wkt::Value parameters, st::CancellationToken cancellationToken) => PredictAsync(endpoint, instances, parameters, gaxgrpc::CallSettings.FromCancellationToken(cancellationToken));
/// <summary> /// Perform an online prediction. /// </summary> /// <param name="endpoint"> /// Required. The name of the Endpoint requested to serve the prediction. /// Format: /// `projects/{project}/locations/{location}/endpoints/{endpoint}` /// </param> /// <param name="instances"> /// Required. The instances that are the input to the prediction call. /// A DeployedModel may have an upper limit on the number of instances it /// supports per request, and when it is exceeded the prediction call errors /// in case of AutoML Models, or, in case of customer created Models, the /// behaviour is as documented by that Model. /// The schema of any single instance may be specified via Endpoint's /// DeployedModels' [Model's][google.cloud.aiplatform.v1.DeployedModel.model] /// [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] /// [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. /// </param> /// <param name="parameters"> /// The parameters that govern the prediction. The schema of the parameters may /// be specified via Endpoint's DeployedModels' [Model's ][google.cloud.aiplatform.v1.DeployedModel.model] /// [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] /// [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. /// </param> /// <param name="callSettings">If not null, applies overrides to this RPC call.</param> /// <returns>The RPC response.</returns> public virtual PredictResponse Predict(string endpoint, scg::IEnumerable <wkt::Value> instances, wkt::Value parameters, gaxgrpc::CallSettings callSettings = null) => Predict(new PredictRequest { Endpoint = gax::GaxPreconditions.CheckNotNullOrEmpty(endpoint, nameof(endpoint)), Instances = { gax::GaxPreconditions.CheckNotNull(instances, nameof(instances)), }, Parameters = parameters, }, callSettings);