internal static Scope CreateProducerScope(Tracer tracer, ITopicPartition topicPartition, bool isTombstone, bool finishOnClose) { Scope scope = null; try { var settings = tracer.Settings; if (!settings.IsIntegrationEnabled(KafkaConstants.IntegrationId)) { // integration disabled, don't create a scope/span, skip this trace return(null); } var parent = tracer.ActiveScope?.Span; if (parent is not null && parent.OperationName == KafkaConstants.ProduceOperationName && parent.GetTag(Tags.InstrumentationName) != null) { return(null); } string serviceName = settings.GetServiceName(tracer, KafkaConstants.ServiceName); var tags = new KafkaTags(SpanKinds.Producer); scope = tracer.StartActiveInternal( KafkaConstants.ProduceOperationName, tags: tags, serviceName: serviceName, finishOnClose: finishOnClose); string resourceName = $"Produce Topic {(string.IsNullOrEmpty(topicPartition?.Topic) ? "kafka" : topicPartition?.Topic)}"; var span = scope.Span; span.Type = SpanTypes.Queue; span.ResourceName = resourceName; if (topicPartition?.Partition is not null && !topicPartition.Partition.IsSpecial) { tags.Partition = (topicPartition?.Partition).ToString(); } if (isTombstone) { tags.Tombstone = "true"; } // Producer spans should always be measured span.SetTag(Tags.Measured, "1"); tags.SetAnalyticsSampleRate(KafkaConstants.IntegrationId, settings, enabledWithGlobalSetting: false); tracer.TracerManager.Telemetry.IntegrationGeneratedSpan(KafkaConstants.IntegrationId); } catch (Exception ex) { Log.Error(ex, "Error creating or populating scope."); } return(scope); }
internal static Scope CreateConsumerScope( Tracer tracer, string topic, Partition?partition, Offset?offset, IMessage message) { Scope scope = null; try { if (!tracer.Settings.IsIntegrationEnabled(KafkaConstants.IntegrationId)) { // integration disabled, don't create a scope/span, skip this trace return(null); } var parent = tracer.ActiveScope?.Span; if (parent is not null && parent.OperationName == KafkaConstants.ConsumeOperationName && parent.GetTag(Tags.InstrumentationName) != null) { return(null); } SpanContext propagatedContext = null; // Try to extract propagated context from headers if (message is not null && message.Headers is not null) { var headers = new KafkaHeadersCollectionAdapter(message.Headers); try { propagatedContext = SpanContextPropagator.Instance.Extract(headers); } catch (Exception ex) { Log.Error(ex, "Error extracting propagated headers from Kafka message"); } } string serviceName = tracer.Settings.GetServiceName(tracer, KafkaConstants.ServiceName); var tags = new KafkaTags(SpanKinds.Consumer); scope = tracer.StartActiveInternal(KafkaConstants.ConsumeOperationName, parent: propagatedContext, tags: tags, serviceName: serviceName); string resourceName = $"Consume Topic {(string.IsNullOrEmpty(topic) ? "kafka" : topic)}"; var span = scope.Span; span.Type = SpanTypes.Queue; span.ResourceName = resourceName; if (partition is not null) { tags.Partition = partition.ToString(); } if (offset is not null) { tags.Offset = offset.ToString(); } if (message is not null && message.Timestamp.Type != 0) { var consumeTime = span.StartTime.UtcDateTime; var produceTime = message.Timestamp.UtcDateTime; tags.MessageQueueTimeMs = Math.Max(0, (consumeTime - produceTime).TotalMilliseconds); } if (message is not null && message.Value is null) { tags.Tombstone = "true"; } // Consumer spans should always be measured span.SetTag(Tags.Measured, "1"); tags.SetAnalyticsSampleRate(KafkaConstants.IntegrationId, tracer.Settings, enabledWithGlobalSetting: false); } catch (Exception ex) { Log.Error(ex, "Error creating or populating scope."); } return(scope); }