public async Task <bool> SendTracesAsync(Span[][] traces) { // retry up to 5 times with exponential back-off var retryLimit = 5; var retryCount = 1; var sleepDuration = 100; // in milliseconds var traceIds = GetUniqueTraceIds(traces); while (true) { var request = _apiRequestFactory.Create(_tracesEndpoint); // Set additional headers request.AddHeader(AgentHttpHeaderNames.TraceCount, traceIds.Count.ToString()); if (_frameworkDescription != null) { request.AddHeader(AgentHttpHeaderNames.LanguageInterpreter, _frameworkDescription.Name); request.AddHeader(AgentHttpHeaderNames.LanguageVersion, _frameworkDescription.ProductVersion); } if (_containerId != null) { request.AddHeader(AgentHttpHeaderNames.ContainerId, _containerId); } bool success = false; Exception exception = null; try { success = await SendTracesAsync(traces, request).ConfigureAwait(false); } catch (Exception ex) { #if DEBUG if (ex.InnerException is InvalidOperationException ioe) { Log.Error(ex, "An error occurred while sending traces to the agent at {0}", _tracesEndpoint); return(false); } #endif exception = ex; } // Error handling block if (!success) { // Exit if we've hit our retry limit if (retryCount >= retryLimit) { // stop retrying Log.Error(exception, "An error occurred while sending traces to the agent at {0}", _tracesEndpoint); _statsd?.Send(); return(false); } // Before retry delay bool isSocketException = false; Exception innerException = exception; while (innerException != null) { if (innerException is SocketException) { isSocketException = true; break; } innerException = innerException.InnerException; } if (isSocketException) { Log.Error(exception, "Unable to communicate with the trace agent at {0}", _tracesEndpoint); TracingProcessManager.TryForceTraceAgentRefresh(); } // Execute retry delay await Task.Delay(sleepDuration).ConfigureAwait(false); retryCount++; sleepDuration *= 2; // After retry delay if (isSocketException) { // Ensure we have the most recent port before trying again TracingProcessManager.TraceAgentMetadata.ForcePortFileRead(); } continue; } _statsd?.Send(); return(true); } }
public async Task SendTracesAsync(Span[][] traces) { // retry up to 5 times with exponential back-off var retryLimit = 5; var retryCount = 1; var sleepDuration = 100; // in milliseconds var traceIds = GetUniqueTraceIds(traces); while (true) { var request = _apiRequestFactory.Create(_tracesEndpoint); // Set additional headers request.AddHeader(AgentHttpHeaderNames.TraceCount, traceIds.Count.ToString()); if (_frameworkDescription != null) { request.AddHeader(AgentHttpHeaderNames.LanguageInterpreter, _frameworkDescription.Name); request.AddHeader(AgentHttpHeaderNames.LanguageVersion, _frameworkDescription.ProductVersion); } if (_containerId != null) { request.AddHeader(AgentHttpHeaderNames.ContainerId, _containerId); } IApiResponse response; try { try { _statsd?.AppendIncrementCount(TracerMetricNames.Api.Requests); response = await request.PostAsync(traces, _formatterResolver).ConfigureAwait(false); } catch { // count the exceptions thrown by the HttpClient, // not responses with 5xx status codes // (which cause EnsureSuccessStatusCode() to throw below) _statsd?.AppendIncrementCount(TracerMetricNames.Api.Errors); throw; } if (_statsd != null) { // don't bother creating the tags array if trace metrics are disabled string[] tags = { $"status:{response.StatusCode}" }; // count every response, grouped by status code _statsd.AppendIncrementCount(TracerMetricNames.Api.Responses, tags: tags); } // Attempt a retry if the status code is not SUCCESS if (response.StatusCode < 200 || response.StatusCode > 300) { if (retryCount >= retryLimit) { // stop retrying Log.Error("An error occurred while sending traces to the agent at {Endpoint}", _tracesEndpoint); return; } // retry await Task.Delay(sleepDuration).ConfigureAwait(false); retryCount++; sleepDuration *= 2; continue; } } catch (Exception ex) { #if DEBUG if (ex.InnerException is InvalidOperationException ioe) { Log.Error("An error occurred while sending traces to the agent at {Endpoint}\n{Exception}", ex, _tracesEndpoint, ex.ToString()); return; } #endif var isSocketException = false; if (ex.InnerException is SocketException se) { isSocketException = true; Log.Error(se, "Unable to communicate with the trace agent at {Endpoint}", _tracesEndpoint); TracingProcessManager.TryForceTraceAgentRefresh(); } if (retryCount >= retryLimit) { // stop retrying Log.Error("An error occurred while sending traces to the agent at {Endpoint}", ex, _tracesEndpoint); return; } // retry await Task.Delay(sleepDuration).ConfigureAwait(false); retryCount++; sleepDuration *= 2; if (isSocketException) { // Ensure we have the most recent port before trying again TracingProcessManager.TraceAgentMetadata.ForcePortFileRead(); } continue; } try { if (response.ContentLength > 0 && Tracer.Instance.Sampler != null) { var responseContent = await response.ReadAsStringAsync().ConfigureAwait(false); var apiResponse = JsonConvert.DeserializeObject <ApiResponse>(responseContent); Tracer.Instance.Sampler.SetDefaultSampleRates(apiResponse?.RateByService); } } catch (Exception ex) { Log.Error("Traces sent successfully to the Agent at {Endpoint}, but an error occurred deserializing the response.", ex, _tracesEndpoint); } _statsd?.Send(); return; } }
public async Task SendTracesAsync(Span[][] traces) { // retry up to 5 times with exponential back-off var retryLimit = 5; var retryCount = 1; var sleepDuration = 100; // in milliseconds var traceIds = GetUniqueTraceIds(traces); while (true) { HttpResponseMessage responseMessage; try { // re-create HttpContent on every retry because some versions of HttpClient always dispose of it, so we can't reuse. using (var content = new TracesMessagePackContent(traces, _formatterResolver)) { content.Headers.Add(AgentHttpHeaderNames.TraceCount, traceIds.Count.ToString()); try { _statsd?.AppendIncrementCount(TracerMetricNames.Api.Requests); responseMessage = await _client.PostAsync(_tracesEndpoint, content).ConfigureAwait(false); } catch { // count the exceptions thrown by the HttpClient, // not responses with 5xx status codes // (which cause EnsureSuccessStatusCode() to throw below) _statsd?.AppendIncrementCount(TracerMetricNames.Api.Errors); throw; } if (_statsd != null) { // don't bother creating the tags array if trace metrics are disabled string[] tags = { $"status:{(int)responseMessage.StatusCode}" }; // count every response, grouped by status code _statsd.AppendIncrementCount(TracerMetricNames.Api.Responses, tags: tags); } responseMessage.EnsureSuccessStatusCode(); } } catch (Exception ex) { #if DEBUG if (ex.InnerException is InvalidOperationException ioe) { Log.Error("An error occurred while sending traces to the agent at {Endpoint}\n{Exception}", ex, _tracesEndpoint, ex.ToString()); return; } #endif var isSocketException = false; if (ex.InnerException is SocketException se) { isSocketException = true; Log.Error(se, "Unable to communicate with the trace agent at {Endpoint}", _tracesEndpoint); TracingProcessManager.TryForceTraceAgentRefresh(); } if (retryCount >= retryLimit) { // stop retrying Log.Error("An error occurred while sending traces to the agent at {Endpoint}", ex, _tracesEndpoint); return; } // retry await Task.Delay(sleepDuration).ConfigureAwait(false); retryCount++; sleepDuration *= 2; if (isSocketException) { // Ensure we have the most recent port before trying again TracingProcessManager.TraceAgentMetadata.ForcePortFileRead(); } continue; } try { if (responseMessage.Content != null && Tracer.Instance.Sampler != null) { // build the sample rate map from the response json var responseContent = await responseMessage.Content.ReadAsStringAsync().ConfigureAwait(false); var response = JsonConvert.DeserializeObject <ApiResponse>(responseContent); Tracer.Instance.Sampler.SetDefaultSampleRates(response?.RateByService); } } catch (Exception ex) { Log.Error("Traces sent successfully to the Agent at {Endpoint}, but an error occurred deserializing the response.", ex, _tracesEndpoint); } _statsd?.Send(); return; } }