public ZipkinApi(TracerSettings settings) { Log.Debug("Creating new Zipkin Api"); _settings = settings ?? throw new ArgumentNullException(nameof(settings)); _tracesEndpoint = _settings.AgentUri; // User needs to include the proper path. }
public ZipkinExporter(ImmutableTracerSettings settings) { Log.Debug("Creating new Zipkin exporter"); _settings = settings ?? throw new ArgumentNullException(nameof(settings)); _tracesEndpoint = _settings.ExporterSettings.AgentUri; }
public override async Task <TelemetryPushResult> PushTelemetry(TelemetryData data) { try { // have to buffer in memory so we know the content length var serializedData = SerializeTelemetry(data); var request = new HttpRequestMessage(HttpMethod.Post, TelemetryConstants.TelemetryPath) { Content = new StringContent(serializedData, Encoding.UTF8, "application/json") }; request.Headers.Add(TelemetryConstants.ApiVersionHeader, data.ApiVersion); request.Headers.Add(TelemetryConstants.RequestTypeHeader, data.RequestType); var response = await _httpClient.SendAsync(request).ConfigureAwait(false); if (response.IsSuccessStatusCode) { Log.Debug("Telemetry sent successfully"); return(TelemetryPushResult.Success); } else if (response.StatusCode == HttpStatusCode.NotFound) { Log.Debug("Error sending telemetry: 404. Disabling further telemetry, as endpoint not found", response.StatusCode); return(TelemetryPushResult.FatalError); } else { Log.Debug("Error sending telemetry {StatusCode}", response.StatusCode); return(TelemetryPushResult.TransientFailure); } } catch (Exception ex) when(ex is SocketException or HttpRequestException { InnerException: SocketException })
private void LoggingCallback( DDWAF_LOG_LEVEL level, string function, string file, int line, string message, ulong message_len) { var formattedMessage = $"{level}: [{function}]{file}({line}): {message}"; switch (level) { case DDWAF_LOG_LEVEL.DDWAF_TRACE: case DDWAF_LOG_LEVEL.DDWAF_DEBUG: _log.Debug(formattedMessage); break; case DDWAF_LOG_LEVEL.DDWAF_INFO: _log.Information(formattedMessage); break; case DDWAF_LOG_LEVEL.DDWAF_WARN: _log.Warning(formattedMessage); break; case DDWAF_LOG_LEVEL.DDWAF_ERROR: case DDWAF_LOG_LEVEL.DDWAF_AFTER_LAST: _log.Error(formattedMessage); break; default: _log.Error("[Unknown level] " + formattedMessage); break; } }
public LogsApi(string apiKey, IApiRequestFactory apiRequestFactory) { _apiKey = apiKey; _apiRequestFactory = apiRequestFactory; _logsIntakeEndpoint = _apiRequestFactory.GetEndpoint(LogIntakePath); Log.Debug("Using logs intake endpoint {LogsIntakeEndpoint}", _logsIntakeEndpoint.ToString()); }
public SamplingPriority GetSamplingPriority(Span span) { var traceId = span.TraceId; if (_rules.Count > 0) { foreach (var rule in _rules) { if (rule.IsMatch(span)) { var sampleRate = rule.GetSamplingRate(span); Log.Debug( "Matched on rule {RuleName}. Applying rate of {Rate} to trace id {TraceId}", rule.RuleName, sampleRate, traceId); return(GetSamplingPriority(span, sampleRate, agentSampling: rule is DefaultSamplingRule)); } } } Log.Debug("No rules matched for trace {TraceId}", traceId); return(SamplingPriority.AutoKeep); }
private static List <string> GetDatadogNativeFolders(FrameworkDescription frameworkDescription, string runtimeId) { // first get anything "home folder" like // if running under Windows: // - get msi install location // - then get the default program files folder (because of a know issue in the installer for location of the x86 folder on a 64bit OS) // then combine with the profiler's location // taking into account that these locations could be the same place var paths = GetHomeFolders(runtimeId); if (frameworkDescription.OSPlatform == OSPlatform.Windows) { var programFilesFolder = GetProgramFilesFolder(); paths.Add(programFilesFolder); AddPathFromMsiSettings(paths); } var profilerFolder = GetProfilerFolder(frameworkDescription); if (!string.IsNullOrWhiteSpace(profilerFolder)) { paths.Add(profilerFolder); } else { // this is expected under Windows, but problematic under other OSs Log.Debug("Couldn't find profilerFolder"); } return(paths.Distinct().ToList()); }
public async Task <TelemetryPushResult> PushTelemetry(TelemetryData data) { try { // have to buffer in memory so we know the content length var serializedData = SerializeTelemetry(data); var bytes = Encoding.UTF8.GetBytes(serializedData); var request = _requestFactory.Create(_endpoint); request.AddHeader(TelemetryConstants.ApiVersionHeader, data.ApiVersion); request.AddHeader(TelemetryConstants.RequestTypeHeader, data.RequestType); using var response = await request.PostAsync(new ArraySegment <byte>(bytes), "application/json").ConfigureAwait(false); if (response.StatusCode is >= 200 and < 300) { Log.Debug("Telemetry sent successfully"); return(TelemetryPushResult.Success); } else if (response.StatusCode == 404) { Log.Debug("Error sending telemetry: 404. Disabling further telemetry, as endpoint '{Endpoint}' not found", _requestFactory.Info(_endpoint)); return(TelemetryPushResult.FatalError); } else { Log.Debug <string, int>("Error sending telemetry to '{Endpoint}' {StatusCode} ", _requestFactory.Info(_endpoint), response.StatusCode); return(TelemetryPushResult.TransientFailure); } }
public void Start() { if (_allListenersSubscription == null) { Log.Debug("Starting DiagnosticListener.AllListeners subscription"); _allListenersSubscription = DiagnosticListener.AllListeners.Subscribe(this); } }
internal void PushEvents() { try { _listener?.Refresh(); if (_enableProcessMetrics) { ProcessHelpers.GetCurrentProcessRuntimeMetrics(out var newUserCpu, out var newSystemCpu, out var threadCount, out var memoryUsage); var userCpu = newUserCpu - _previousUserCpu; var systemCpu = newSystemCpu - _previousSystemCpu; _previousUserCpu = newUserCpu; _previousSystemCpu = newSystemCpu; // Note: the behavior of Environment.ProcessorCount has changed a lot accross version: https://github.com/dotnet/runtime/issues/622 // What we want is the number of cores attributed to the container, which is the behavior in 3.1.2+ (and, I believe, in 2.x) var maximumCpu = Environment.ProcessorCount * _delay.TotalMilliseconds; var totalCpu = userCpu + systemCpu; _statsd.Gauge(MetricsNames.ThreadsCount, threadCount); _statsd.Gauge(MetricsNames.CommittedMemory, memoryUsage); // Get CPU time in milliseconds per second _statsd.Gauge(MetricsNames.CpuUserTime, userCpu.TotalMilliseconds / _delay.TotalSeconds); _statsd.Gauge(MetricsNames.CpuSystemTime, systemCpu.TotalMilliseconds / _delay.TotalSeconds); _statsd.Gauge(MetricsNames.CpuPercentage, Math.Round(totalCpu.TotalMilliseconds * 100 / maximumCpu, 1, MidpointRounding.AwayFromZero)); Log.Debug("Sent the following metrics: {metrics}", ProcessMetrics); } if (!_exceptionCounts.IsEmpty) { foreach (var element in _exceptionCounts) { _statsd.Increment(MetricsNames.ExceptionsCount, element.Value, tags: new[] { $"exception_type:{element.Key}" }); } // There's a race condition where we could clear items that haven't been pushed // Having an exact exception count is probably not worth the overhead required to fix it _exceptionCounts.Clear(); Log.Debug("Sent the following metrics: {metrics}", MetricsNames.ExceptionsCount); } else { Log.Debug("Did not send the following metrics: {metrics}", MetricsNames.ExceptionsCount); } } catch (Exception ex) { Log.Warning(ex, "Error while updating runtime metrics"); } }
public void Send(DataPointUploadMessage msg) { if (msg?.datapoints == null || msg.datapoints.Count < 1) { return; } var attemptNumber = 1; while (true) { try { var webRequest = WebRequest.CreateHttp(_metricsEndpointAddress); webRequest.ContentType = "application/x-protobuf"; webRequest.Method = "POST"; webRequest.Headers.Add(CommonHttpHeaderNames.TracingEnabled, "false"); if (!string.IsNullOrWhiteSpace(_apiToken)) { webRequest.Headers.Add("X-Sf-Token", _apiToken); } webRequest.Timeout = _webRequestTimeoutMs; using (var requestStream = webRequest.GetRequestStream()) { Serializer.Serialize(requestStream, msg); requestStream.Flush(); } using var webResponse = (HttpWebResponse)webRequest.GetResponse(); // if request failed (statusCode < 200 || statusCode >= 300), exception will be thrown Log.Debug($"Sent {msg.datapoints.Count} metric data points to: {_metricsEndpointAddress}."); return; } catch (Exception ex) { if (attemptNumber >= MaxAttempts || !IsTransient(ex)) { Log.Error(ex, $"Dropping metrics after {attemptNumber} unsuccessful attempt(s) sending to: {_metricsEndpointAddress}."); return; } Log.Debug(ex, "Transient exception encountered. Retrying sending metric data."); } attemptNumber++; // short wait before retrying Thread.Sleep(_waitBeforeRetries); } }
public ITelemetryTransport Create() { #if NETCOREAPP Log.Debug("Using {FactoryType} for telemetry transport.", nameof(JsonHttpClientTelemetryTransport)); var httpClient = new System.Net.Http.HttpClient { BaseAddress = _baseEndpoint }; return(new JsonHttpClientTelemetryTransport(httpClient, _apiKey)); #else Log.Debug("Using {FactoryType} for telemetry transport.", nameof(JsonWebRequestTelemetryTransport)); return(new JsonWebRequestTelemetryTransport(_baseEndpoint, _apiKey)); #endif }
// https://github.com/DataDog/datadog-agent/blob/eac2327c5574da7f225f9ef0f89eaeb05ed10382/pkg/trace/agent/truncator.go#L26-L44 public void ProcessMeta(ref string key, ref string value) { if (TraceUtil.TruncateUTF8(ref key, MaxMetaKeyLen)) { key += "..."; Log.Debug("span.truncate: truncating `Meta` key (max {maxMetaKeyLen} chars): {key}", MaxMetaKeyLen, key); } if (TraceUtil.TruncateUTF8(ref value, MaxMetaValLen)) { value += "..."; Log.Debug("span.truncate: truncating `Meta` value (max {maxMetaValLen} chars): {value}", MaxMetaValLen, value); } }
static async Task InternalFlush() { try { // We have to ensure the flush of the buffer after we finish the tests of an assembly. // For some reason, sometimes when all test are finished none of the callbacks to handling the tracer disposal is triggered. // So the last spans in buffer aren't send to the agent. Log.Debug("Integration flushing spans."); await Tracer.Instance.FlushAsync().ConfigureAwait(false); Log.Debug("Integration flushed."); } catch (Exception ex) { Log.Error(ex, "Exception occurred when flushing spans."); } }
public static string ReadLocalMachineString(string key, string value) { int pcbData = 512; string result = null; var pvData = Marshal.AllocHGlobal(pcbData); try { var hresult = RegGetValue(HKEY.HKEY_LOCAL_MACHINE, key, value, RFlags.Any, out var _, pvData, ref pcbData); Log.Debug <int>("RegGetValue - read string data: {pcbData}", pcbData); if (hresult != 0) { // warning as the call could fail because the key is missing, which is expected in many situations Log.Warning("registering access for key: {Key} failed with 0x{HResult}", key, hresult.ToString("X8")); return(null); } result = Marshal.PtrToStringUni(pvData); } finally { Marshal.FreeHGlobal(pvData); } return(result); }
public Task <bool> SendTracesAsync(Span[][] traces) { if (traces == null || traces.Length == 0) { // Nothing to send, no ping for Zipkin. return(Task.FromResult(true)); } try { foreach (var trace in traces) { if (Batch == null) { SetResourceAndInitializeBatch(_serviceName); } foreach (var span in trace) { AppendSpan(span.ToJaegerSpan()); } SendCurrentBatch(); } return(Task.FromResult(true)); } catch (Exception ex) { Log.Debug("Exception sending traces to {0}: {1}", $"{_options.Host}:{_options.Port}", ex.Message); return(Task.FromResult(false)); } }
private async Task SendRequestAsync(HttpRequest request, Stream requestStream) { // Headers are always ASCII per the HTTP spec using (var writer = new StreamWriter(requestStream, Encoding.ASCII, bufferSize: MaxRequestHeadersBufferSize, leaveOpen: true)) { await _headerHelper.WriteLeadingHeaders(request, writer).ConfigureAwait(false); foreach (var header in request.Headers) { await _headerHelper.WriteHeader(writer, header).ConfigureAwait(false); } // Empty line to signify end of headers await writer.WriteAsync(DatadogHttpValues.CrLf).ConfigureAwait(false); // Remove (admittedly really small) sync over async occurrence // by forcing a flush so that System.IO.TextWriter.Dispose() does not block await writer.FlushAsync().ConfigureAwait(false); } await request.Content.CopyToAsync(requestStream).ConfigureAwait(false); Logger.Debug("Datadog HTTP: Flushing stream."); await requestStream.FlushAsync().ConfigureAwait(false); }
private void ExtractCounters(ReadOnlyCollection <object> payload) { for (int i = 0; i < payload.Count; ++i) { if (!(payload[i] is IDictionary <string, object> eventPayload)) { continue; } if (!eventPayload.TryGetValue("Name", out object name) || !MetricsMapping.TryGetValue(name.ToString(), out var statName)) { continue; } if (eventPayload.TryGetValue("Mean", out object rawValue) || eventPayload.TryGetValue("Increment", out rawValue)) { var value = (double)rawValue; _statsd.Gauge(statName, value); } else { Log.Debug <object>("EventCounter {CounterName} has no Mean or Increment field", name); } } }
public Api( IApiRequestFactory apiRequestFactory, IDogStatsd statsd, Action <Dictionary <string, float> > updateSampleRates, bool isPartialFlushEnabled, IDatadogLogger log = null) { // optionally injecting a log instance in here for testing purposes _log = log ?? StaticLog; _log.Debug("Creating new Api"); _updateSampleRates = updateSampleRates; _statsd = statsd; _containerId = ContainerMetadata.GetContainerId(); _apiRequestFactory = apiRequestFactory; _isPartialFlushEnabled = isPartialFlushEnabled; _tracesEndpoint = _apiRequestFactory.GetEndpoint(TracesPath); _log.Debug("Using traces endpoint {TracesEndpoint}", _tracesEndpoint.ToString()); }
public void ProcessMeta(ref string key, ref string value) { // https://github.dev/DataDog/datadog-agent/blob/712c7a7835e0f5aaa47211c4d75a84323eed7fd9/pkg/trace/obfuscate/redis.go#L91 if (_redisObfuscationEnabled && key == Trace.Tags.RedisRawCommand) { value = RedisObfuscationUtil.Obfuscate(value); Log.Debug("span.obfuscate: obfuscating `redis.raw_command` value"); } }
private static void LogAddressIfDebugEnabled(IDictionary <string, object> args) { if (Log.IsEnabled(LogEventLevel.Debug)) { foreach (var key in args.Keys) { Log.Debug("DDAS-0008-00: Pushing address {Key} to the Instrumentation Gateway.", key); } } }
internal Span(SpanContext context, DateTimeOffset?start, ITags tags) { Tags = tags ?? new CommonTags(); Context = context; StartTime = start ?? Context.TraceContext.UtcNow; Log.Debug( "Span started: [s_id: {SpanID}, p_id: {ParentId}, t_id: {TraceId}]", SpanId, Context.ParentId, TraceId); }
public void Dispose() { try { Logger.Debug("Running shutdown tasks for logs direct submission"); Sink?.Dispose(); } catch (Exception ex) { Logger.Error(ex, "Error flushing logs on shutdown"); } }
private void EventSource_BuildStarted(object sender, BuildStartedEventArgs e) { try { Log.Debug("Build Started"); _buildSpan = _tracer.StartSpan(BuildTags.BuildOperationName); _buildSpan.SetMetric(Tags.Analytics, 1.0d); _buildSpan.SetTraceSamplingPriority(SamplingPriorityValues.AutoKeep); _buildSpan.Type = SpanTypes.Build; _buildSpan.SetTag(Tags.Origin, TestTags.CIAppTestOriginName); _buildSpan.SetTag(Tags.Language, TracerConstants.Language); _buildSpan.SetTag(BuildTags.BuildName, e.SenderName); foreach (KeyValuePair <string, string> envValue in e.BuildEnvironment) { _buildSpan.SetTag($"{BuildTags.BuildEnvironment}.{envValue.Key}", envValue.Value); } _buildSpan.SetTag(BuildTags.BuildCommand, Environment.CommandLine); _buildSpan.SetTag(BuildTags.BuildWorkingFolder, Environment.CurrentDirectory); _buildSpan.SetTag(BuildTags.BuildStartMessage, e.Message); _buildSpan.SetTag(CommonTags.OSArchitecture, Environment.Is64BitOperatingSystem ? "x64" : "x86"); _buildSpan.SetTag(CommonTags.OSVersion, Environment.OSVersion.VersionString); _buildSpan.SetTag(CommonTags.RuntimeArchitecture, Environment.Is64BitProcess ? "x64" : "x86"); _buildSpan.SetTag(CommonTags.LibraryVersion, TracerConstants.AssemblyVersion); CIEnvironmentValues.Instance.DecorateSpan(_buildSpan); } catch (Exception ex) { Log.Error(ex, "Error in BuildStarted event"); } }
private static void LogRuleDetailsIfDebugEnabled(JToken root) { if (Log.IsEnabled(LogEventLevel.Debug)) { try { var eventsProp = root.Value <JArray>("rules"); Log.Debug($"eventspropo {eventsProp.Count}"); foreach (var ev in eventsProp) { var idProp = ev.Value <JValue>("id"); var nameProp = ev.Value <JValue>("name"); var addresses = ev.Value <JArray>("conditions").SelectMany(x => x.Value <JObject>("parameters").Value <JArray>("inputs")); Log.Debug("DDAS-0007-00: Loaded rule: {id} - {name} on addresses: {addresses}", idProp.Value, nameProp.Value, string.Join(", ", addresses)); } } catch (Exception ex) { Log.Error(ex, "Error occured logging the ddwaf rules"); } } }
private static string GetImpl() { if (NativeLoader.TryGetRuntimeIdFromNative(out var runtimeId)) { Log.Information("Runtime id retrieved from native loader: " + runtimeId); return(runtimeId); } var guid = Guid.NewGuid().ToString(); Log.Debug("Unable to get the runtime id from native. Fallback to Guid.NewGuid() : {NewGuid}", guid); return(guid); }
static async Task InternalFlush() { try { // We have to ensure the flush of the buffer after we finish the tests of an assembly. // For some reason, sometimes when all test are finished none of the callbacks to handling the tracer disposal is triggered. // So the last spans in buffer aren't send to the agent. Log.Debug("Integration flushing spans."); await TestTracer.FlushAsync().ConfigureAwait(false); // The current agent writer FlushAsync method can return inmediately if a payload is being sent (there is buffer lock) // There is not api in the agent writer that guarantees the send has been sucessfully completed. // Until we change the behavior of the agentwriter we should at least wait 2 seconds before returning. Log.Debug("Waiting 2 seconds to flush."); await Task.Delay(2000).ConfigureAwait(false); Log.Debug("Integration flushed."); } catch (Exception ex) { Log.Error(ex, "Exception occurred when flushing spans."); } }
public ISpanBuilder AddReference(string referenceType, global::OpenTracing.ISpanContext referencedContext) { lock (_lock) { if (referenceType == References.ChildOf) { _parent = referencedContext; return(this); } } Log.Debug("ISpanBuilder.AddReference is not implemented for other references than ChildOf by Datadog.Trace"); return(this); }
static void InternalFlush() { const int timeout = 60_000; try { // We have to ensure the flush of the buffer after we finish the tests of an assembly. // For some reason, sometimes when all test are finished none of the callbacks to handling the tracer disposal is triggered. // So the last spans in buffer aren't send to the agent. Log.Debug("Integration flushing spans."); Task task; if (_settings.Logs) { task = Task.WhenAll( Tracer.Instance.FlushAsync(), Tracer.Instance.TracerManager.DirectLogSubmission.Sink.FlushAsync()); } else { task = Tracer.Instance.FlushAsync(); } if (!task.Wait(timeout)) { Log.Error("Timeout occurred when flushing spans."); return; } Log.Debug("Integration flushed."); } catch (Exception ex) { Log.Error(ex, "Exception occurred when flushing spans."); } }
public async Task DisposeAsync() { try { Logger.Debug("Running shutdown tasks for logs direct submission"); if (Sink is { } sink) { await sink.DisposeAsync().ConfigureAwait(false); } } catch (Exception ex) { Logger.Error(ex, "Error flushing logs on shutdown"); } }