public async Task <string> CreateHiveJob(string query, IEnumerable <string> resources, IDictionary <string, string> parameters, string jobFolder) { Contract.AssertArgNotNullOrEmpty(query, "query"); Contract.AssertArgNotNull(resources, "resources"); Contract.AssertArgNotNull(parameters, "parameters"); Contract.AssertArgNotNullOrEmpty(jobFolder, jobFolder); var request = await hCatClient.CreateHiveJob(query, resources, parameters, jobFolder, callback : null); if (!request.IsSuccessStatusCode) { throw new HttpResponseException(request); } try { return(await GetJobIdFromServerResponse(request.Content)); } catch (HttpParseException) { //502: The server, while acting as a gateway or proxy, received an invalid response from the upstream server. //This is caught and logged upstream. throw new HttpResponseException(HttpStatusCode.BadGateway); } }
internal IPassthroughAction CreateJobHistoryAction(Uri requestUri, DataAccess.Context.ClusterContainer container, string subscriptionId) { Contract.AssertArgNotNull(requestUri, "requestUri"); Contract.AssertArgNotNull(requestUri.Segments, "requestUri.Segments"); var lastSegs = requestUri.Segments.SkipWhile(i => i.ToLowerInvariant().Trim('/') != JobSubmissionConstants.ResourceExtentionName) .ToList(); Contract.AssertArgNotNull(lastSegs, "lastSegs"); //If there is one segment, then we know that no JobId was passed, and we should list job history. if (lastSegs.Count() == 1) { return(new ListJobsPassthroughAction(container, subscriptionId)); } //If there are two segments, then an jobId was passed, and we should get job details. if (lastSegs.Count() == 2) { return(new JobDetailsPassthroughAction(lastSegs.Last().Trim('/'), container, subscriptionId)); } //If zero or more then two segments are present, then the request is invalid, and an action cannot be found to support it. throw new PassthroughActionProcessingException(string.Format(JobSubmissionConstants.JobHistoryRequestActionNotFound, container.DnsName, requestUri)); }
/// <summary> /// Method that get the correct passthrough action based on the given request. /// </summary> /// <param name="resourceExtension">The resource extention that was requested (ie. "/jobs").</param> /// <param name="request">The actual http request.</param> /// <param name="container">The container that action should be preformed on.</param> /// <param name="subscriptionId">The subscription Id that made the request.</param> /// <returns></returns> public IPassthroughAction GetPassthroughAction(string resourceExtension, HttpRequestMessage request, DataAccess.Context.ClusterContainer container, string subscriptionId) { Contract.AssertArgNotNull(request, "request"); Contract.AssertArgNotNull(request.RequestUri, "request.RequestUri"); switch (resourceExtension.ToLowerInvariant()) { case "jobs": switch (request.Method.ToString().ToLowerInvariant()) { case "put": var requestPayload = GetRequestPayload(request); var jobRequest = jobRequestFactory.CreateJobRequest(requestPayload); if (jobRequest == null) { throw new PassthroughActionProcessingException(string.Format(JobSubmissionConstants.InvalidJobRequestLogMessage, requestPayload)); } return(CreateJobRequestAction(jobRequest, container, subscriptionId)); case "get": return(CreateJobHistoryAction(request.RequestUri, container, subscriptionId)); } break; } throw new PassthroughActionProcessingException(string.Format(JobSubmissionConstants.PassThroughActionCreationFailedLogMessage, resourceExtension, request.Method)); }
private void Initialize(Uri baseUri, string username, string password, string hadoopUserName, HttpMessageHandler handler, bool validateServerCert = true) { Contract.AssertArgNotNull(baseUri, "baseUri"); Contract.AssertArgNotNull(username, "username"); Contract.AssertArgNotNull(password, "password"); Contract.AssertArgNotNull(hadoopUserName, "hadoopUserName"); this.validateServerCert = validateServerCert; // TODO - have version passed in if (handler != null) { client = new HttpClient(handler); } else { client = new HttpClient(new HttpClientHandler() { Credentials = new NetworkCredential(username, password) }); } this.hadoopUserName = hadoopUserName; client.BaseAddress = new Uri(baseUri, WebHCatResources.RelativeWebHCatPath); var byteArray = Encoding.ASCII.GetBytes(username + ":" + password); client.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Basic", Convert.ToBase64String( byteArray)); this.Timeout = WebHCatResources.DefaultHCatRequestTimout; }
internal string CreateJobFolder(DataAccess.Context.ClusterContainer container) { Contract.AssertArgNotNull(container, "container"); Contract.AssertArgNotNull(container.Deployment, "container.Deployment"); Contract.AssertArgNotNull(container.Deployment.ASVAccounts, "container.Deployment.ASVAccounts"); var asvAccount = container.Deployment.ASVAccounts.FirstOrDefault(); Contract.AssertArgNotNull(asvAccount, "asvAccount"); var jobFolder = Guid.NewGuid().ToString(); return(string.Format(JobSubmissionConstants.AsvFormatString, JobSubmissionConstants.DefaultJobsContainer, asvAccount.AccountName, jobFolder)); }
internal string GetRequestPayload(HttpRequestMessage request) { Contract.AssertArgNotNull(request, "request"); Contract.AssertArgNotNull(request.Content, "request.Content"); try { var contentTask = request.Content.ReadAsStringAsync(); contentTask.Wait(); return(contentTask.Result); } catch (Exception ex) { throw new PassthroughActionProcessingException( string.Format(JobSubmissionConstants.ContentReadFailureLogMessage, request.RequestUri, request.Method, ex.Message)); } }
public ClusterJobServiceProxy(string address, string userName, string password, bool validateServerCert) { Contract.AssertArgNotNull(address, "address"); Contract.AssertArgNotNull(userName, "userName"); Contract.AssertArgNotNull(password, "password"); ClusterAddress = address; ClusterUserName = userName; ClusterPassword = password; Uri clusterUri; if (!Uri.TryCreate(ClusterAddress, UriKind.Absolute, out clusterUri)) { throw new InvalidDataException("ClusterAddress"); } hCatClient = new WebHCatHttpClient(new Uri(ClusterAddress), ClusterUserName, ClusterPassword, validateServerCert: validateServerCert); }
//This code is tightly coupled to Templeton. If parsing fails, we capture the full json payload, the error //then log it upstream. internal async Task <string> GetJobIdFromServerResponse(HttpContent content) { Contract.AssertArgNotNull(content, "content"); try { var result = await content.ReadAsAsync <JObject>(); Contract.Assert(result != null); JToken jobId; Contract.Assert(result.TryGetValue(JobSubmissionConstants.JobIdPropertyName, out jobId)); Contract.Assert(jobId != null); return(jobId.ToString()); } catch (Exception ex) { throw new HttpParseException(ex.Message); } }
//This code is tightly coupled to Templeton. If parsing fails, we capture the full json payload, the error //then log it upstream. internal async Task <List <string> > GetJobIdListFromServerResponse(HttpContent content) { Contract.AssertArgNotNull(content, "content"); try { var result = await content.ReadAsAsync <JArray>(); if (result == null || !result.HasValues) { return(new List <string>()); } var ret = result.Values <string>(); return(ret.ToList()); } catch (Exception ex) { throw new HttpParseException(ex.Message); } }
/// <inheritdocs/> public IClusterJobServiceProxy CreateClusterJobServiceProxy(DataAccess.Context.ClusterContainer container) { Contract.AssertArgNotNull(container, "container"); Contract.AssertArgNotNull(container.Deployment, "container.Deployment"); var clusterAddress = string.Format( "https://{0}:{1}", container.CNameMapping ?? string.Format(Constants.CNameMappingFormatString, container.DnsName), WebHCatResources.WebHCatDefaultPort); if (!string.IsNullOrEmpty(container.CNameMapping)) { Log.LogResourceExtensionEvent(container.SubscriptionId, JobSubmissionConstants.ResourceExtentionName, container.DnsName, string.Format( JobSubmissionConstants.BypassServerCertValidationLogMessage, clusterAddress), TraceEventType.Information); } return(new ClusterJobServiceProxy(clusterAddress, container.Deployment.ClusterUsername, container.Deployment.GetClearTextClusterPassword(), !string.IsNullOrEmpty(container.CNameMapping))); }
/// <summary> /// Executes a method and handles any exceptions that might be thrown. /// Specific to job submission, this wraps calls to templelton and handles the cases when errors occure sending the request /// or any errors returned by the cluster/templeton. /// </summary> /// <typeparam name="T">The return type of the function.</typeparam> /// <param name="func">The function to be executed.</param> /// <returns>The value that the function returned.</returns> protected async Task <PassthroughResponse> ExecuteAndHandleResponse <T>(Func <Task <T> > func) { Contract.AssertArgNotNull(func, "func"); try { var ret = await func(); return(new PassthroughResponse() { Data = ret, Error = null }); } catch (ArgumentException ex) { Log.LogResourceExtensionEvent(this.subscriptionId, JobSubmissionConstants.ResourceExtentionName, Container.DnsName, ex.Message, TraceEventType.Warning); var error = new PassthroughErrorResponse() { ErrorId = JobSubmissionConstants.InvalidJobSumbmissionRequestErrorId, StatusCode = HttpStatusCode.BadGateway }; return(new PassthroughResponse() { Data = null, Error = error }); } catch (HttpResponseException ex) { var errorId = JobSubmissionConstants.JobSubmissionFailedErrorId; if (ex.Response != null) { switch (ex.Response.StatusCode) { case HttpStatusCode.BadRequest: case HttpStatusCode.BadGateway: case HttpStatusCode.Unauthorized: errorId = JobSubmissionConstants.JobSubmissionFailedErrorId; break; case HttpStatusCode.ServiceUnavailable: case HttpStatusCode.InternalServerError: case HttpStatusCode.NotFound: case HttpStatusCode.GatewayTimeout: errorId = JobSubmissionConstants.ClusterUnavailableErrorId; break; default: errorId = JobSubmissionConstants.JobSubmissionFailedErrorId; break; } Log.LogResourceExtensionEvent(this.subscriptionId, JobSubmissionConstants.ResourceExtentionName, Container.DnsName, string.Format( JobSubmissionConstants.ClusterRequestErrorLogMessage, this.Container.DnsName, ex.Response.StatusCode, ex.Response.ReasonPhrase), TraceEventType.Warning); } Log.LogResourceExtensionEvent(this.subscriptionId, JobSubmissionConstants.ResourceExtentionName, Container.DnsName, string.Format( JobSubmissionConstants.JobSubmissionFailedLogMessage, this.Container.DnsName, errorId), TraceEventType.Warning); var error = new PassthroughErrorResponse() { ErrorId = errorId, StatusCode = HttpStatusCode.BadGateway }; return(new PassthroughResponse() { Data = null, Error = error }); } catch (Exception ex) { Log.LogResourceExtensionEvent(this.subscriptionId, JobSubmissionConstants.ResourceExtentionName, Container.DnsName, string.Format( JobSubmissionConstants.UnkownJobSubmissionErrorLogMessage, this.Container.DnsName, ex.Message), TraceEventType.Warning); var error = new PassthroughErrorResponse() { ErrorId = JobSubmissionConstants.JobSubmissionFailedErrorId, StatusCode = HttpStatusCode.BadGateway }; return(new PassthroughResponse() { Data = null, Error = error }); } }
/// <summary> /// Ctor. /// </summary> protected JobRequestPassthroughAction(DataAccess.Context.ClusterContainer container, string subscriptionId) : base(subscriptionId) { Contract.AssertArgNotNull(container, "container"); Container = container; }
//This code is tightly coupled to Templeton. If parsing fails, we capture the full json payload, the error //then log it upstream. I've left the constants here, since this is A) The only place they are used and B) there are a lot of them. //In the future, if we see this being something that is reused, they could be moved. //For a sample response see the large comment at the end of this file. internal async Task <JobDetails> GetJobDetailsFromServerResponse(HttpContent content) { const string userArgsSection = "userargs"; const string defineSection = "define"; const string statusSection = "status"; const string jobNameKey = "hdInsightJobName="; const string statusDirectory = "statusdir"; const string exitCodeValue = "exitValue"; const string startTimeValue = "startTime"; const string jobStatusValue = "runState"; const string hiveQueryValue = "execute"; const string outputFile = "/stdout"; const string errorFile = "/stderr"; Contract.AssertArgNotNull(content, "content"); JObject result = null; try { result = await content.ReadAsAsync <JObject>(); Contract.Assert(result != null); var outputAsvPath = (string)result[userArgsSection][statusDirectory]; var outputFolderUri = GetOutputFolderUri(outputAsvPath); var defines = result[userArgsSection][defineSection].ToArray(); var jobNameItem = (string)defines.First(s => ((string)s).Contains(jobNameKey)); var jobName = jobNameItem.Split('=')[1]; var details = new JobDetails { ExitCode = (int)result[exitCodeValue], SubmissionTime = result[statusSection][startTimeValue].ToString(), Name = jobName, StatusCode = (JobStatusCode)Enum.Parse(typeof(JobStatusCode), result[statusSection][jobStatusValue].ToString()), PhysicalOutputPath = new Uri(outputFolderUri + outputFile), LogicalOutputPath = outputAsvPath + outputFile, ErrorOutputPath = outputFolderUri + errorFile, Query = (string)result[userArgsSection][hiveQueryValue], }; return(details); } catch (Exception ex) { var rawJson = string.Empty; if (result != null) { rawJson = result.ToString(); if (rawJson.Length > 4000) { //truncating the response if its large then 4000 char, in order to prevent large data in the logs rawJson = rawJson.Substring(0, 4000); } } throw new HttpParseException(string.Format(JobSubmissionConstants.UnableToParseJobDetailsLogMessage, ex.Message, rawJson)); } }
/// <summary> /// Initializes a new instance of the HadoopApplicationHistoryRestClient class. /// </summary> /// <param name="readProxy"> /// The REST read client to use for performing requests. /// </param> public HadoopApplicationHistoryRestClient(IHadoopApplicationHistoryRestReadClient readProxy) { Contract.AssertArgNotNull(readProxy, "readProxy"); this.readProxy = readProxy; }