/// <summary> /// Job Management to stop a job by name if it is not stopped yet /// </summary> /// <param name="jobName">job name</param> /// <returns></returns> public async Task <Result> StopJob(string jobName) { Ensure.NotNull(jobName, "jobName"); Logger.LogInformation($"stoping job '{jobName}'"); var job = await SyncJobState(jobName); var jobState = job.SyncResult?.JobState; switch (jobState) { case JobState.Starting: case JobState.Running: var sparkJobClient = await ClusterManager.GetSparkJobClient(job.Cluster, job.DatabricksToken); var result = await sparkJobClient.StopJob(job.SyncResult.ClientCache); job.SyncResult = result; return(await this.JobData.UpdateSyncResultByName(jobName, result)); case JobState.Success: case JobState.Idle: return(new SuccessResult($"job '{jobName}' has already been stopped")); case JobState.Error: return(new SuccessResult($"job '{jobName}' is currently in an error state")); default: throw new GeneralException($"Unexpected state '{jobState}' for job '{jobName}'"); } }
/// <summary> /// Job management to start a job by name /// </summary> /// <param name="jobName">internal job name</param> /// <returns></returns> public async Task<Result> StartJob(string jobName) { Ensure.NotNull(jobName, "jobName"); Logger.LogInformation($"starting job '{jobName}'"); var job = await EnsureJobState(jobName, JobState.Idle); var sparkJobClient = await ClusterManager.GetSparkJobClient(job.Cluster); var result = await sparkJobClient.SubmitJob(job.Options); // Update job state job.SyncResult = result; return await this.JobData.UpdateSyncResultByName(jobName, result); }
/// <summary> /// Get job details for a specific job id from Job manager /// </summary> /// <param name="jobName">jobName</param> /// <returns></returns> public async Task <SparkJobConfig> SyncJobState(string jobName) { Ensure.NotNull(jobName, "jobName"); Logger.LogInformation($"sync'ing job '{jobName}'"); // Get the job config data from internal data and ensure job is in Idle state var job = await JobData.GetByName(jobName); Ensure.NotNull(job, "job"); var state = job.SyncResult; if (state == null) { state = new SparkJobSyncResult(); } if (state.ClientCache != null && state.ClientCache.Type != JTokenType.Null) { var sparkJobClient = await ClusterManager.GetSparkJobClient(job.Cluster, job.DatabricksToken); var newResult = await sparkJobClient.GetJobInfo(state.ClientCache); state = newResult; Logger.LogInformation($"got state for job '{jobName}'"); } else { state.JobState = JobState.Idle; Logger.LogInformation($"no client cache for job '{jobName}', set state to '{state.JobState}'"); } if (!state.Equals(job.SyncResult)) { var updateResult = await JobData.UpdateSyncResultByName(jobName, state); if (!updateResult.IsSuccess) { throw new GeneralException($"Failed to update job client cache for name '{jobName}': returned message '{updateResult.Message}'"); } job.SyncResult = state; Logger.LogInformation($"done sync'ing the state for job '{jobName}', state updated"); } else { Logger.LogInformation($"done sync'ing the state for job '{jobName}', no changes"); } return(job); }