Container for the parameters to the RunJobFlow operation.

RunJobFlow creates and starts running a new job flow. The job flow will run the steps specified. Once the job flow completes, the cluster is stopped and the HDFS partition is lost. To prevent loss of data, configure the last step of the job flow to store results in Amazon S3. If the JobFlowInstancesDetail KeepJobFlowAliveWhenNoSteps parameter is set to TRUE , the job flow will transition to the WAITING state rather than shutting down once the steps have completed.

For additional protection, you can set the JobFlowInstancesDetail TerminationProtected parameter to TRUE to lock the job flow and prevent it from being terminated by API call, user intervention, or in the event of a job flow error.

A maximum of 256 steps are allowed in each job flow.

If your job flow is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using the SSH shell to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop. For more information on how to do this, go to Add More than 256 Steps to a Job Flow in the Amazon Elastic MapReduce Developer's Guide .

For long running job flows, we recommend that you periodically store your results.

Inheritance: Amazon.Runtime.AmazonWebServiceRequest
 protected override void ProcessRecord()
 {
     AmazonElasticMapReduce client = base.GetClient();
     Amazon.ElasticMapReduce.Model.RunJobFlowRequest request = new Amazon.ElasticMapReduce.Model.RunJobFlowRequest();
     request.Name = this._Name;
     request.LogUri = this._LogUri;
     request.AdditionalInfo = this._AdditionalInfo;
     Amazon.ElasticMapReduce.Model.RunJobFlowResponse response = client.RunJobFlow(request);
     base.WriteObject(response.RunJobFlowResult, true);
 }
        /// <summary>
        /// Initiates the asynchronous execution of the RunJobFlow operation.
        /// </summary>
        /// 
        /// <param name="request">Container for the necessary parameters to execute the RunJobFlow operation.</param>
        /// <param name="cancellationToken">
        ///     A cancellation token that can be used by other objects or threads to receive notice of cancellation.
        /// </param>
        /// <returns>The task object representing the asynchronous operation.</returns>
        public Task<RunJobFlowResponse> RunJobFlowAsync(RunJobFlowRequest request, System.Threading.CancellationToken cancellationToken = default(CancellationToken))
        {
            var marshaller = new RunJobFlowRequestMarshaller();
            var unmarshaller = RunJobFlowResponseUnmarshaller.Instance;

            return InvokeAsync<RunJobFlowRequest,RunJobFlowResponse>(request, marshaller, 
                unmarshaller, cancellationToken);
        }
        /// <summary>
        /// RunJobFlow creates and starts running a new job flow. The job flow will run the steps
        /// specified. Once the job flow completes, the cluster is stopped and the HDFS partition
        /// is lost. To prevent loss of data, configure the last step of the job flow to store
        /// results in Amazon S3. If the <a>JobFlowInstancesConfig</a> <code>KeepJobFlowAliveWhenNoSteps</code>
        /// parameter is set to <code>TRUE</code>, the job flow will transition to the WAITING
        /// state rather than shutting down once the steps have completed. 
        /// 
        ///  
        /// <para>
        /// For additional protection, you can set the <a>JobFlowInstancesConfig</a> <code>TerminationProtected</code>
        /// parameter to <code>TRUE</code> to lock the job flow and prevent it from being terminated
        /// by API call, user intervention, or in the event of a job flow error.
        /// </para>
        ///  
        /// <para>
        /// A maximum of 256 steps are allowed in each job flow.
        /// </para>
        ///  
        /// <para>
        /// If your job flow is long-running (such as a Hive data warehouse) or complex, you may
        /// require more than 256 steps to process your data. You can bypass the 256-step limitation
        /// in various ways, including using the SSH shell to connect to the master node and submitting
        /// queries directly to the software running on the master node, such as Hive and Hadoop.
        /// For more information on how to do this, go to <a href="http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/AddMoreThan256Steps.html">Add
        /// More than 256 Steps to a Job Flow</a> in the <i>Amazon Elastic MapReduce Developer's
        /// Guide</i>.
        /// </para>
        ///  
        /// <para>
        /// For long running job flows, we recommend that you periodically store your results.
        /// </para>
        /// </summary>
        /// <param name="request">Container for the necessary parameters to execute the RunJobFlow service method.</param>
        /// 
        /// <returns>The response from the RunJobFlow service method, as returned by ElasticMapReduce.</returns>
        /// <exception cref="Amazon.ElasticMapReduce.Model.InternalServerErrorException">
        /// Indicates that an error occurred while processing the request and that the request
        /// was not completed.
        /// </exception>
        public RunJobFlowResponse RunJobFlow(RunJobFlowRequest request)
        {
            var marshaller = new RunJobFlowRequestMarshaller();
            var unmarshaller = RunJobFlowResponseUnmarshaller.Instance;

            return Invoke<RunJobFlowRequest,RunJobFlowResponse>(request, marshaller, unmarshaller);
        }
 /// <summary>
 /// RunJobFlow creates and starts running a new job flow. The job flow will run the steps
 ///         specified. Once the job flow completes, the cluster is stopped and the HDFS
 /// partition is         lost. To prevent loss of data, configure the last step of the
 /// job flow to store results in         Amazon S3. If the <a>JobFlowInstancesConfig</a>
 /// <code>KeepJobFlowAliveWhenNoSteps</code> parameter is         set to <code>TRUE</code>,
 /// the job flow will transition to the WAITING state rather than         shutting down
 /// once the steps have completed. 
 /// 
 ///             
 /// <para>
 /// For additional protection, you can set the          <a>JobFlowInstancesConfig</a>
 /// <code>TerminationProtected</code> parameter to <code>TRUE</code> to lock the     
 ///     job flow and prevent it from being          terminated by API call, user intervention,
 /// or in the event of a job flow error.
 /// </para>
 ///       
 /// <para>
 /// A maximum of 256 steps are allowed in each job flow.
 /// </para>
 ///             
 /// <para>
 /// If your job flow is long-running (such as a Hive data warehouse) or complex, you may
 /// require more than 256 steps to process your data. You can bypass the 256-step limitation
 /// in various ways, including using the SSH shell to connect to the master node and submitting
 /// queries directly to the software running on the master node, such as Hive and Hadoop.
 /// For more information on how to do this, go to <a href="http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/AddMoreThan256Steps.html">Add
 /// More than 256 Steps to a Job Flow</a> in the <i>Amazon Elastic MapReduce Developer's
 /// Guide</i>.
 /// </para>
 ///       
 /// <para>
 /// For long running job flows, we recommend that you periodically store your results.
 /// </para>
 /// </summary>
 /// <param name="request">Container for the necessary parameters to execute the RunJobFlow service method.</param>
 /// 
 /// <returns>The response from the RunJobFlow service method, as returned by ElasticMapReduce.</returns>
 /// <exception cref="InternalServerErrorException">
 /// Indicates that an error occurred while processing the request and that the request
 /// was not         completed.
 /// </exception>
 public RunJobFlowResponse RunJobFlow(RunJobFlowRequest request)
 {
     var task = RunJobFlowAsync(request);
     try
     {
         return task.Result;
     }
     catch(AggregateException e)
     {
         ExceptionDispatchInfo.Capture(e.InnerException).Throw();
         return null;
     }
 }
        /// <summary>
        /// Initiates the asynchronous execution of the RunJobFlow operation.
        /// <seealso cref="Amazon.ElasticMapReduce.IAmazonElasticMapReduce.RunJobFlow"/>
        /// </summary>
        /// 
        /// <param name="request">Container for the necessary parameters to execute the RunJobFlow operation.</param>
        /// <param name="cancellationToken">
        ///     A cancellation token that can be used by other objects or threads to receive notice of cancellation.
        /// </param>
        /// <returns>The task object representing the asynchronous operation.</returns>
		public Task<RunJobFlowResponse> RunJobFlowAsync(RunJobFlowRequest request, CancellationToken cancellationToken = default(CancellationToken))
        {
            var marshaller = new RunJobFlowRequestMarshaller();
            var unmarshaller = RunJobFlowResponseUnmarshaller.GetInstance();
            return Invoke<IRequest, RunJobFlowRequest, RunJobFlowResponse>(request, marshaller, unmarshaller, signer, cancellationToken);
        }
        /// <summary>
        /// <para> RunJobFlow creates and starts running a new job flow. The job flow will run the steps specified. Once the job flow completes, the
        /// cluster is stopped and the HDFS partition is lost. To prevent loss of data, configure the last step of the job flow to store results in
        /// Amazon S3. If the JobFlowInstancesConfig <c>KeepJobFlowAliveWhenNoSteps</c> parameter is set to <c>TRUE</c> , the job flow will transition
        /// to the WAITING state rather than shutting down once the steps have completed. </para> <para>For additional protection, you can set the
        /// JobFlowInstancesConfig <c>TerminationProtected</c> parameter to <c>TRUE</c> to lock the job flow and prevent it from being terminated by API
        /// call, user intervention, or in the event of a job flow error.</para> <para>A maximum of 256 steps are allowed in each job flow.</para>
        /// <para>If your job flow is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data.
        /// You can bypass the 256-step limitation in various ways, including using the SSH shell to connect to the master node and submitting queries
        /// directly to the software running on the master node, such as Hive and Hadoop. For more information on how to do this, go to <a href="http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/AddMoreThan256Steps.html">Add More than 256 Steps to a Job Flow</a>
        /// in the <i>Amazon Elastic MapReduce Developer's Guide</i> .</para> <para>For long running job flows, we recommend that you periodically store
        /// your results.</para>
        /// </summary>
        /// 
        /// <param name="request">Container for the necessary parameters to execute the RunJobFlow service method on
        /// AmazonElasticMapReduce.</param>
        /// 
        /// <returns>The response from the RunJobFlow service method, as returned by AmazonElasticMapReduce.</returns>
        /// 
        /// <exception cref="T:Amazon.ElasticMapReduce.Model.InternalServerErrorException" />
		public RunJobFlowResponse RunJobFlow(RunJobFlowRequest request)
        {
            var task = RunJobFlowAsync(request);
            try
            {
                return task.Result;
            }
            catch(AggregateException e)
            {
                throw e.InnerException;
            }
        }
        /// <summary>
        /// Initiates the asynchronous execution of the RunJobFlow operation.
        /// </summary>
        /// 
        /// <param name="request">Container for the necessary parameters to execute the RunJobFlow operation on AmazonElasticMapReduceClient.</param>
        /// <param name="callback">An AsyncCallback delegate that is invoked when the operation completes.</param>
        /// <param name="state">A user-defined state object that is passed to the callback procedure. Retrieve this object from within the callback
        ///          procedure using the AsyncState property.</param>
        /// 
        /// <returns>An IAsyncResult that can be used to poll or wait for results, or both; this value is also needed when invoking EndRunJobFlow
        ///         operation.</returns>
        public IAsyncResult BeginRunJobFlow(RunJobFlowRequest request, AsyncCallback callback, object state)
        {
            var marshaller = new RunJobFlowRequestMarshaller();
            var unmarshaller = RunJobFlowResponseUnmarshaller.Instance;

            return BeginInvoke<RunJobFlowRequest>(request, marshaller, unmarshaller,
                callback, state);
        }
 IAsyncResult invokeRunJobFlow(RunJobFlowRequest runJobFlowRequest, AsyncCallback callback, object state, bool synchronized)
 {
     IRequest irequest = new RunJobFlowRequestMarshaller().Marshall(runJobFlowRequest);
     var unmarshaller = RunJobFlowResponseUnmarshaller.GetInstance();
     AsyncResult result = new AsyncResult(irequest, callback, state, synchronized, signer, unmarshaller);
     Invoke(result);
     return result;
 }
 /// <summary>
 /// Initiates the asynchronous execution of the RunJobFlow operation.
 /// <seealso cref="Amazon.ElasticMapReduce.IAmazonElasticMapReduce.RunJobFlow"/>
 /// </summary>
 /// 
 /// <param name="runJobFlowRequest">Container for the necessary parameters to execute the RunJobFlow operation on
 ///          AmazonElasticMapReduce.</param>
 /// <param name="callback">An AsyncCallback delegate that is invoked when the operation completes.</param>
 /// <param name="state">A user-defined state object that is passed to the callback procedure. Retrieve this object from within the callback
 ///          procedure using the AsyncState property.</param>
 /// 
 /// <returns>An IAsyncResult that can be used to poll or wait for results, or both; this value is also needed when invoking EndRunJobFlow
 ///         operation.</returns>
 public IAsyncResult BeginRunJobFlow(RunJobFlowRequest runJobFlowRequest, AsyncCallback callback, object state)
 {
     return invokeRunJobFlow(runJobFlowRequest, callback, state, false);
 }
 /// <summary>
 /// <para> RunJobFlow creates and starts running a new job flow. The job flow will run the steps specified. Once the job flow completes, the
 /// cluster is stopped and the HDFS partition is lost. To prevent loss of data, configure the last step of the job flow to store results in
 /// Amazon S3. If the JobFlowInstancesConfig <c>KeepJobFlowAliveWhenNoSteps</c> parameter is set to <c>TRUE</c> , the job flow will transition
 /// to the WAITING state rather than shutting down once the steps have completed. </para> <para>For additional protection, you can set the
 /// JobFlowInstancesConfig <c>TerminationProtected</c> parameter to <c>TRUE</c> to lock the job flow and prevent it from being terminated by API
 /// call, user intervention, or in the event of a job flow error.</para> <para>A maximum of 256 steps are allowed in each job flow.</para>
 /// <para>If your job flow is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data.
 /// You can bypass the 256-step limitation in various ways, including using the SSH shell to connect to the master node and submitting queries
 /// directly to the software running on the master node, such as Hive and Hadoop. For more information on how to do this, go to <a
 /// href="http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/AddMoreThan256Steps.html" >Add More than 256 Steps to a Job Flow</a>
 /// in the <i>Amazon Elastic MapReduce Developer's Guide</i> .</para> <para>For long running job flows, we recommend that you periodically store
 /// your results.</para>
 /// </summary>
 /// 
 /// <param name="runJobFlowRequest">Container for the necessary parameters to execute the RunJobFlow service method on
 ///          AmazonElasticMapReduce.</param>
 /// 
 /// <returns>The response from the RunJobFlow service method, as returned by AmazonElasticMapReduce.</returns>
 /// 
 /// <exception cref="InternalServerErrorException"/>
 public RunJobFlowResponse RunJobFlow(RunJobFlowRequest runJobFlowRequest)
 {
     IAsyncResult asyncResult = invokeRunJobFlow(runJobFlowRequest, null, null, true);
     return EndRunJobFlow(asyncResult);
 }
 /// <summary>
 /// <para> RunJobFlow creates and starts running a new job flow. The job
 /// flow will run the steps specified. Once the job flow completes, the
 /// cluster is stopped and the HDFS partition is lost. To prevent loss of
 /// data, configure the last step of the job flow to store results in
 /// Amazon S3. If the JobFlowInstancesDetail : KeepJobFlowAliveWhenNoSteps
 /// parameter is set to <c>TRUE</c> , the job flow will transition to the
 /// WAITING state rather than shutting down once the steps have completed.
 /// </para> <para>A maximum of 256 steps are allowed in each job
 /// flow.</para> <para>For long running job flows, we recommended that you
 /// periodically store your results.</para>
 /// </summary>
 /// 
 /// <param name="runJobFlowRequest">Container for the necessary parameters
 ///           to execute the RunJobFlow service method on
 ///           AmazonElasticMapReduce.</param>
 /// 
 /// <returns>The response from the RunJobFlow service method, as returned
 ///         by AmazonElasticMapReduce.</returns>
 /// 
 /// <exception cref="InternalServerErrorException"/>
 public RunJobFlowResponse RunJobFlow(RunJobFlowRequest runJobFlowRequest)
 {
     IRequest<RunJobFlowRequest> request = new RunJobFlowRequestMarshaller().Marshall(runJobFlowRequest);
     RunJobFlowResponse response = Invoke<RunJobFlowRequest, RunJobFlowResponse> (request, this.signer, RunJobFlowResponseUnmarshaller.GetInstance());
     return response;
 }
        /// <summary>
        /// <para> RunJobFlow creates and starts running a new job flow. The job flow will run the steps specified. Once the job flow completes, the
        /// cluster is stopped and the HDFS partition is lost. To prevent loss of data, configure the last step of the job flow to store results in
        /// Amazon S3. If the JobFlowInstancesConfig <c>KeepJobFlowAliveWhenNoSteps</c> parameter is set to <c>TRUE</c> , the job flow will transition
        /// to the WAITING state rather than shutting down once the steps have completed. </para> <para>For additional protection, you can set the
        /// JobFlowInstancesConfig <c>TerminationProtected</c> parameter to <c>TRUE</c> to lock the job flow and prevent it from being terminated by API
        /// call, user intervention, or in the event of a job flow error.</para> <para>A maximum of 256 steps are allowed in each job flow.</para>
        /// <para>If your job flow is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data.
        /// You can bypass the 256-step limitation in various ways, including using the SSH shell to connect to the master node and submitting queries
        /// directly to the software running on the master node, such as Hive and Hadoop. For more information on how to do this, go to <a href="http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/AddMoreThan256Steps.html">Add More than 256 Steps to a Job Flow</a>
        /// in the <i>Amazon Elastic MapReduce Developer's Guide</i> .</para> <para>For long running job flows, we recommend that you periodically store
        /// your results.</para>
        /// </summary>
        /// 
        /// <param name="runJobFlowRequest">Container for the necessary parameters to execute the RunJobFlow service method on
        /// AmazonElasticMapReduce.</param>
        /// 
        /// <returns>The response from the RunJobFlow service method, as returned by AmazonElasticMapReduce.</returns>
        /// 
        /// <exception cref="T:Amazon.ElasticMapReduce.Model.InternalServerErrorException" />
        /// <param name="cancellationToken">
        ///     A cancellation token that can be used by other objects or threads to receive notice of cancellation.
        /// </param>
		public async Task<RunJobFlowResponse> RunJobFlowAsync(RunJobFlowRequest runJobFlowRequest, CancellationToken cancellationToken = default(CancellationToken))
        {
            var marshaller = new RunJobFlowRequestMarshaller();
            var unmarshaller = RunJobFlowResponseUnmarshaller.GetInstance();
            var response = await Invoke<IRequest, RunJobFlowRequest, RunJobFlowResponse>(runJobFlowRequest, marshaller, unmarshaller, signer, cancellationToken)
                .ConfigureAwait(continueOnCapturedContext: false);
            return response;
        }
        public override ProvisionAddOnResult Provision(AddonProvisionRequest request)
        {
            var provisionResult = new ProvisionAddOnResult("") { IsSuccess = true };
            AddonManifest manifest = request.Manifest;
            string developerOptions = request.DeveloperOptions;

            try
            {
                IAmazonElasticMapReduce client;
                EMRDeveloperOptions devOptions;

                var parseOptionsResult = ParseDevOptions(developerOptions, out devOptions);
                if (!parseOptionsResult.IsSuccess)
                {
                    provisionResult.EndUserMessage = parseOptionsResult.EndUserMessage;
                    return provisionResult;
                }

                var establishClientResult = EstablishClient(manifest, EMRDeveloperOptions.Parse(developerOptions), out client);
                if (!establishClientResult.IsSuccess)
                {
                    provisionResult.EndUserMessage = establishClientResult.EndUserMessage;
                    return provisionResult;
                }

                var stepFactory = new StepFactory();

                StepConfig enabledebugging = null;

                if (devOptions.EnableDebugging)
                {
                    enabledebugging = new StepConfig
                    {
                        Name = "Enable debugging",
                        ActionOnFailure = "TERMINATE_JOB_FLOW",
                        HadoopJarStep = stepFactory.NewEnableDebuggingStep()
                    };
                }

                var installHive = new StepConfig
                {
                    Name = "Install Hive",
                    ActionOnFailure = "TERMINATE_JOB_FLOW",
                    HadoopJarStep = stepFactory.NewInstallHiveStep()
                };

                var instanceConfig = new JobFlowInstancesConfig
                {
                    Ec2KeyName = devOptions.Ec2KeyName,
                    HadoopVersion = "0.20",
                    InstanceCount = devOptions.InstanceCount,
                    // this is important. the EMR job flow must be kept alive for the application to see it during provisioning
                    KeepJobFlowAliveWhenNoSteps = true,
                    MasterInstanceType = devOptions.MasterInstanceType,
                    SlaveInstanceType = devOptions.SlaveInstanceType
                };

                var _request = new RunJobFlowRequest
                {
                    Name = devOptions.JobFlowName,
                    Steps = { enabledebugging, installHive },
                    // revisit this one in ne
                    LogUri = "s3://myawsbucket",
                    Instances = instanceConfig
                };

                // if debugging is enabled, add to top of the list of steps.
                if (devOptions.EnableDebugging)
                {
                    _request.Steps.Insert(0, enabledebugging);
                }

                var result = client.RunJobFlow(_request);

                // wait for JobFlowID to come back.
                while (result.JobFlowId == null)
                {
                    Thread.Sleep(1000);
                }

                provisionResult.IsSuccess = true;
                provisionResult.ConnectionData = string.Format(result.JobFlowId);
            }
            catch (Exception e)
            {
                provisionResult.EndUserMessage = e.Message;
            }

            return provisionResult;
        }
        IAsyncResult invokeRunJobFlow(RunJobFlowRequest request, AsyncCallback callback, object state, bool synchronized)
        {
            var marshaller = new RunJobFlowRequestMarshaller();
            var unmarshaller = RunJobFlowResponseUnmarshaller.Instance;

            return Invoke(request, callback, state, synchronized, marshaller, unmarshaller, signer);
        }