private void Log(ILogMessage message, LogLevel level) { switch (level) { case LogLevel.Error: _log.LogError(message.Message); break; case LogLevel.Warning: _log.LogWarning(message.Message); break; case LogLevel.Minimal: _log.LogMessage(MessageImportance.Low, message.Message); break; case LogLevel.Information: _log.LogMessage(MessageImportance.Normal, message.Message); break; case LogLevel.Debug: case LogLevel.Verbose: default: _log.LogMessage(MessageImportance.High, message.Message); break; } return; }
/// <summary> /// Deletes the state file from disk /// </summary> /// <param name="stateFile"></param> /// <param name="log"></param> static internal void DeleteFile( string stateFile, TaskLoggingHelper log ) { try { if(stateFile != null && stateFile.Length > 0) { if(File.Exists( stateFile )) { File.Delete( stateFile ); } } } catch(Exception e) { // If there was a problem deleting the file (like it's read-only or locked on disk, for // example), then eat the exception and log a warning. Otherwise, rethrow. ExceptionHandling.RethrowUnlessFileIO( e ); log.LogWarning( "Could not delete state file {0} ({1})", stateFile, e.Message ); } }
/// <summary> /// Reads the specified file from disk into a StateFileBase derived object. /// </summary> /// <param name="stateFile"></param> /// <returns></returns> static internal StateFileBase DeserializeCache( string stateFile, TaskLoggingHelper log, Type requiredReturnType ) { StateFileBase retVal = null; // First, we read the cache from disk if one exists, or if one does not exist // then we create one. try { if(stateFile != null && stateFile.Length > 0 && File.Exists( stateFile )) { using(FileStream s = new FileStream( stateFile, FileMode.Open )) { BinaryFormatter formatter = new BinaryFormatter(); retVal = (StateFileBase)formatter.Deserialize( s ); if((retVal != null) && (!requiredReturnType.IsInstanceOfType( retVal ))) { log.LogWarning( "Could not write state file {0} (Incompatible state file type)", stateFile); retVal = null; } } } } catch(Exception e) { // The deserialization process seems like it can throw just about // any exception imaginable. Catch them all here. // Not being able to deserialize the cache is not an error, but we let the user know anyway. // Don't want to hold up processing just because we couldn't read the file. log.LogWarning( "Could not read state file {0} ({1})", stateFile, e.Message ); } return retVal; }
/// <summary> /// Writes the contents of this object out to the specified file. /// </summary> /// <param name="stateFile"></param> virtual internal void SerializeCache( string stateFile, TaskLoggingHelper log ) { try { if(stateFile != null && stateFile.Length > 0) { if(File.Exists( stateFile )) { File.Delete( stateFile ); } using(FileStream s = new FileStream( stateFile, FileMode.CreateNew )) { BinaryFormatter formatter = new BinaryFormatter(); formatter.Serialize( s, this ); } } } catch(Exception e) { // If there was a problem writing the file (like it's read-only or locked on disk, for // example), then eat the exception and log a warning. Otherwise, rethrow. ExceptionHandling.RethrowUnlessFileIO( e ); // Not being able to serialize the cache is not an error, but we let the user know anyway. // Don't want to hold up processing just because we couldn't read the file. log.LogWarning( "Could not write state file {0} ({1})", stateFile, e.Message ); } }
public async Task <bool> CheckIfBlobExistsAsync(string blobPath) { string url = $"{FeedContainerUrl}/{blobPath}?comp=metadata"; using (HttpClient client = new HttpClient()) { const int MaxAttempts = 15; // add a bit of randomness to the retry delay. var rng = new Random(); int retryCount = MaxAttempts; // Used to make sure TaskCancelledException comes from timeouts. CancellationTokenSource cancelTokenSource = new CancellationTokenSource(); while (true) { try { client.DefaultRequestHeaders.Clear(); var request = AzureHelper.RequestMessage("GET", url, AccountName, AccountKey).Invoke(); using (HttpResponseMessage response = await client.SendAsync(request, cancelTokenSource.Token)) { if (response.IsSuccessStatusCode) { Log.LogMessage( MessageImportance.Low, $"Blob {blobPath} exists for {AccountName}: Status Code:{response.StatusCode} Status Desc: {await response.Content.ReadAsStringAsync()}"); } else { Log.LogMessage( MessageImportance.Low, $"Blob {blobPath} does not exist for {AccountName}: Status Code:{response.StatusCode} Status Desc: {await response.Content.ReadAsStringAsync()}"); } return(response.IsSuccessStatusCode); } } catch (HttpRequestException toLog) { if (retryCount <= 0) { Log.LogError($"Unable to check for existence of blob {blobPath} in {AccountName} after {MaxAttempts} retries."); throw; } else { Log.LogWarning("Exception thrown while trying to detect if blob already exists in feed:"); Log.LogWarningFromException(toLog, true); } } catch (TaskCanceledException possibleTimeoutToLog) { // Detect timeout. if (possibleTimeoutToLog.CancellationToken != cancelTokenSource.Token) { if (retryCount <= 0) { Log.LogError($"Unable to check for existence of blob {blobPath} in {AccountName} after {MaxAttempts} retries."); throw; } else { Log.LogWarning("Exception thrown while trying to detect if blob already exists in feed:"); Log.LogWarningFromException(possibleTimeoutToLog, true); } } else { throw; } } --retryCount; Log.LogWarning($"Failed to check for existence of blob {blobPath}. {retryCount} attempts remaining"); int delay = (MaxAttempts - retryCount) * rng.Next(1, 7); await Task.Delay(delay * 1000); } } }
public static async Task<HttpResponseMessage> RequestWithRetry(TaskLoggingHelper loggingHelper, HttpClient client, Func<HttpRequestMessage> createRequest, Func<HttpResponseMessage, bool> validationCallback = null, int retryCount = 5, int retryDelaySeconds = 5) { if (loggingHelper == null) throw new ArgumentNullException(nameof(loggingHelper)); if (client == null) throw new ArgumentNullException(nameof(client)); if (createRequest == null) throw new ArgumentNullException(nameof(createRequest)); if (retryCount < 1) throw new ArgumentException(nameof(retryCount)); if (retryDelaySeconds < 1) throw new ArgumentException(nameof(retryDelaySeconds)); int retries = 0; HttpResponseMessage response = null; // add a bit of randomness to the retry delay var rng = new Random(); while (retries < retryCount) { if (retries > 0) { if (response != null) { response.Dispose(); response = null; } int delay = retryDelaySeconds * retries * rng.Next(1, 5); loggingHelper.LogMessage(MessageImportance.Low, "Waiting {0} seconds before retry", delay); await System.Threading.Tasks.Task.Delay(delay * 1000); } try { using (var request = createRequest()) response = await client.SendAsync(request); } catch (Exception e) { loggingHelper.LogWarningFromException(e, true); // if this is the final iteration let the exception bubble up if (retries + 1 == retryCount) throw; } // response can be null if we fail to send the request if (response != null) { if (validationCallback == null) { // check if the response code is within the range of failures if (IsWithinRetryRange(response.StatusCode)) { loggingHelper.LogWarning("Request failed with status code {0}", response.StatusCode); } else { loggingHelper.LogMessage(MessageImportance.Low, "Response completed with status code {0}", response.StatusCode); return response; } } else { bool isSuccess = validationCallback(response); if (!isSuccess) { loggingHelper.LogMessage("Validation callback returned retry for status code {0}", response.StatusCode); } else { loggingHelper.LogMessage("Validation callback returned success for status code {0}", response.StatusCode); return response; } } } ++retries; } // retry count exceeded loggingHelper.LogWarning("Retry count {0} exceeded", retryCount); // set some default values in case response is null var statusCode = "None"; var contentStr = "Null"; if (response != null) { statusCode = response.StatusCode.ToString(); contentStr = await response.Content.ReadAsStringAsync(); response.Dispose(); } throw new HttpRequestException(string.Format("Request failed with status {0} response {1}", statusCode, contentStr)); }
public void LogError(string data) { // There are cases where Sleet fails and we retry on our side causing things to actually work but if Sleet logs an error the whole build leg // is marked as failed even though it actually succeeded, hence we log a warning here but we will log an error if the retry did not help _log.LogWarning($"This error is being logged as a warning: {data}"); }