public async Task ProducerCanPublishBatches(EventHubsTransportType transportType) { await using (EventHubScope scope = await EventHubScope.CreateAsync(1)) { var connectionString = EventHubsTestEnvironment.Instance.BuildConnectionStringForEventHub(scope.EventHubName); var options = new EventHubProducerClientOptions { EnableIdempotentPartitions = true, ConnectionOptions = new EventHubConnectionOptions { TransportType = transportType } }; await using var producer = new EventHubProducerClient(connectionString, options); var cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(EventHubsTestEnvironment.Instance.TestExecutionTimeLimit); var partition = (await producer.GetPartitionIdsAsync()).First(); var batchOptions = new CreateBatchOptions { PartitionId = partition }; using var firstBatch = await producer.CreateBatchAsync(batchOptions, cancellationSource.Token); firstBatch.TryAdd(EventGenerator.CreateEvents(1).First()); using var secondBatch = await producer.CreateBatchAsync(batchOptions, cancellationSource.Token); secondBatch.TryAdd(EventGenerator.CreateEvents(1).First()); secondBatch.TryAdd(EventGenerator.CreateEvents(1).First()); Assert.That(async() => await producer.SendAsync(firstBatch, cancellationSource.Token), Throws.Nothing, "The first publishing operation was not successful."); Assert.That(async() => await producer.SendAsync(secondBatch, cancellationSource.Token), Throws.Nothing, "The second publishing operation was not successful."); } }
public async Task ProducerUpdatesPropertiesAfterPublishingEvents() { await using (EventHubScope scope = await EventHubScope.CreateAsync(1)) { var connectionString = EventHubsTestEnvironment.Instance.BuildConnectionStringForEventHub(scope.EventHubName); var options = new EventHubProducerClientOptions { EnableIdempotentPartitions = true }; await using var producer = new EventHubProducerClient(connectionString, options); var cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(EventHubsTestEnvironment.Instance.TestExecutionTimeLimit); var partition = (await producer.GetPartitionIdsAsync(cancellationSource.Token)).First(); var initialPartitionProperties = await producer.GetPartitionPublishingPropertiesAsync(partition); var sendOptions = new SendEventOptions { PartitionId = partition }; var events = EventGenerator.CreateEvents(10).ToArray(); await producer.SendAsync(events, sendOptions, cancellationSource.Token); var updatedPartitionProperties = await producer.GetPartitionPublishingPropertiesAsync(partition); Assert.That(updatedPartitionProperties.IsIdempotentPublishingEnabled, Is.True, "Idempotent publishing should be enabled."); Assert.That(updatedPartitionProperties.ProducerGroupId, Is.EqualTo(initialPartitionProperties.ProducerGroupId), "The producer group identifier should not have changed."); Assert.That(updatedPartitionProperties.OwnerLevel, Is.EqualTo(initialPartitionProperties.OwnerLevel), "The owner level should not have changed."); Assert.That(updatedPartitionProperties.LastPublishedSequenceNumber, Is.GreaterThan(initialPartitionProperties.LastPublishedSequenceNumber), "The last published sequence number should have increased."); } }
public async Task ProducerAllowsPublishingConcurrentlyToDifferentPartitions() { await using (EventHubScope scope = await EventHubScope.CreateAsync(4)) { var connectionString = EventHubsTestEnvironment.Instance.BuildConnectionStringForEventHub(scope.EventHubName); var options = new EventHubProducerClientOptions { EnableIdempotentPartitions = true }; await using var producer = new EventHubProducerClient(connectionString, options); var cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(EventHubsTestEnvironment.Instance.TestExecutionTimeLimit); async Task sendEvents(string partition, int delayMilliseconds) { await Task.Delay(delayMilliseconds); await producer.SendAsync(EventGenerator.CreateEvents(5), new SendEventOptions { PartitionId = partition }, cancellationSource.Token); } var partitions = await producer.GetPartitionIdsAsync(cancellationSource.Token); var pendingSends = new List <Task>(); foreach (var partition in partitions) { pendingSends.Add(sendEvents(partition, 50)); pendingSends.Add(sendEvents(partition, 0)); } Assert.That(async() => await Task.WhenAll(pendingSends), Throws.Nothing); } }
public async Task ProducerManagesConcurrencyWhenPublishingEvents() { await using (EventHubScope scope = await EventHubScope.CreateAsync(1)) { var connectionString = EventHubsTestEnvironment.Instance.BuildConnectionStringForEventHub(scope.EventHubName); var options = new EventHubProducerClientOptions { EnableIdempotentPartitions = true }; await using var producer = new EventHubProducerClient(connectionString, options); var cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(EventHubsTestEnvironment.Instance.TestExecutionTimeLimit); var partition = (await producer.GetPartitionIdsAsync(cancellationSource.Token)).First(); var sendOptions = new SendEventOptions { PartitionId = partition }; async Task sendEvents(int delayMilliseconds) { await Task.Delay(delayMilliseconds); await producer.SendAsync(EventGenerator.CreateEvents(2), sendOptions, cancellationSource.Token); } var pendingSends = Task.WhenAll( sendEvents(100), sendEvents(50), sendEvents(0) ); Assert.That(async() => await pendingSends, Throws.Nothing); } }
/// <summary> /// Sends a set of events using a new producer to do so. /// </summary> /// /// <param name="connectionString">The connection string to use when creating the producer.</param> /// <param name="sourceEvents">The set of events to send.</param> /// <param name="cancellationToken">The token used to signal a cancellation request.</param> /// /// <returns>The count of events that were sent.</returns> /// private async Task <int> SendEvents(string connectionString, IEnumerable <EventData> sourceEvents, CancellationToken cancellationToken) { var sentCount = 0; await using (var producer = new EventHubProducerClient(connectionString)) { foreach (var batch in (await EventGenerator.BuildBatchesAsync(sourceEvents, producer, default, cancellationToken)))
public async Task ProducerCanInitializeWithPartialPartitionOptions() { await using (EventHubScope scope = await EventHubScope.CreateAsync(2)) { var connectionString = EventHubsTestEnvironment.Instance.BuildConnectionStringForEventHub(scope.EventHubName); var options = new EventHubProducerClientOptions { EnableIdempotentPartitions = true }; var cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(EventHubsTestEnvironment.Instance.TestExecutionTimeLimit); var partition = default(string); var partitionProperties = default(PartitionPublishingProperties); // Create a producer for a small scope that will Send some events and read the properties. await using (var initialProducer = new EventHubProducerClient(connectionString, options)) { partition = (await initialProducer.GetPartitionIdsAsync(cancellationSource.Token)).Last(); await initialProducer.SendAsync(EventGenerator.CreateEvents(10), new SendEventOptions { PartitionId = partition }, cancellationSource.Token); partitionProperties = await initialProducer.GetPartitionPublishingPropertiesAsync(partition); } // Create a new producer using the previously read properties to set options for the partition. options.PartitionOptions.Add(partition, new PartitionPublishingOptions { ProducerGroupId = partitionProperties.ProducerGroupId, OwnerLevel = partitionProperties.OwnerLevel }); Assert.That(options.PartitionOptions[partition].StartingSequenceNumber.HasValue, Is.False, "The partition options should not specifiy a starting sequence number."); await using var producer = new EventHubProducerClient(connectionString, options); // Verify that the properties were fully initialized when using partial options. partitionProperties = await producer.GetPartitionPublishingPropertiesAsync(partition); Assert.That(partitionProperties, Is.Not.Null, "The properties should have been created."); Assert.That(partitionProperties.IsIdempotentPublishingEnabled, Is.True, "Idempotent publishing should be enabled."); Assert.That(partitionProperties.ProducerGroupId.HasValue, Is.True, "The producer group identifier should have a value."); Assert.That(partitionProperties.OwnerLevel.HasValue, Is.True, "The owner level should have a value."); Assert.That(partitionProperties.LastPublishedSequenceNumber.HasValue, Is.True, "The last published sequence number should have a value."); // Ensure that the state supports publishing. Assert.That(async() => await producer.SendAsync(EventGenerator.CreateEvents(10), new SendEventOptions { PartitionId = partition }, cancellationSource.Token), Throws.Nothing); } }
public async Task ProducerSequencesBatches() { await using (EventHubScope scope = await EventHubScope.CreateAsync(2)) { var connectionString = EventHubsTestEnvironment.Instance.BuildConnectionStringForEventHub(scope.EventHubName); var options = new EventHubProducerClientOptions { EnableIdempotentPartitions = true }; await using var producer = new EventHubProducerClient(connectionString, options); var cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(EventHubsTestEnvironment.Instance.TestExecutionTimeLimit); var partition = (await producer.GetPartitionIdsAsync(cancellationSource.Token)).Last(); var batchOptions = new CreateBatchOptions { PartitionId = partition }; var partitionProperties = await producer.GetPartitionPublishingPropertiesAsync(partition); var eventSequenceNumber = partitionProperties.LastPublishedSequenceNumber; using var firstBatch = await producer.CreateBatchAsync(batchOptions, cancellationSource.Token); firstBatch.TryAdd(EventGenerator.CreateEvents(1).First()); firstBatch.TryAdd(EventGenerator.CreateEvents(1).First()); firstBatch.TryAdd(EventGenerator.CreateEvents(1).First()); using var secondBatch = await producer.CreateBatchAsync(batchOptions, cancellationSource.Token); secondBatch.TryAdd(EventGenerator.CreateEvents(1).First()); secondBatch.TryAdd(EventGenerator.CreateEvents(1).First()); secondBatch.TryAdd(EventGenerator.CreateEvents(1).First()); secondBatch.TryAdd(EventGenerator.CreateEvents(1).First()); Assert.That(firstBatch.StartingPublishedSequenceNumber.HasValue, Is.False, "Batches should start out as unpublished with no sequence number, the first batch was incorrect."); Assert.That(secondBatch.StartingPublishedSequenceNumber.HasValue, Is.False, "Batches should start out as unpublished with no sequence number, the second batch was incorrect."); await producer.SendAsync(firstBatch, cancellationSource.Token); await producer.SendAsync(secondBatch, cancellationSource.Token); Assert.That(firstBatch.StartingPublishedSequenceNumber.HasValue, "Batches should be sequenced after publishing, the first batch was incorrect."); Assert.That(firstBatch.StartingPublishedSequenceNumber, Is.EqualTo(eventSequenceNumber + 1), "Batches should be sequenced after publishing, the first batch was incorrect."); Assert.That(secondBatch.StartingPublishedSequenceNumber.HasValue, "Batches should be sequenced after publishing, the second batch was incorrect."); Assert.That(secondBatch.StartingPublishedSequenceNumber, Is.EqualTo(eventSequenceNumber + 1 + firstBatch.Count), "Batches should be sequenced after publishing, the second batch was incorrect."); } }
public async Task EventsCanBeReadByOneProcessorClientUsingAnIdentityCredential() { // Setup the environment. await using EventHubScope scope = await EventHubScope.CreateAsync(2); var connectionString = EventHubsTestEnvironment.Instance.BuildConnectionStringForEventHub(scope.EventHubName); using var cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(EventHubsTestEnvironment.Instance.TestExecutionTimeLimit); // Send a set of events. var sourceEvents = EventGenerator.CreateEvents(50).ToList(); var sentCount = await SendEvents(connectionString, sourceEvents, cancellationSource.Token); Assert.That(sentCount, Is.EqualTo(sourceEvents.Count), "Not all of the source events were sent."); // Attempt to read back the events. var processedEvents = new ConcurrentDictionary <string, EventData>(); var completionSource = new TaskCompletionSource <bool>(TaskCreationOptions.RunContinuationsAsynchronously); var options = new EventProcessorOptions { LoadBalancingUpdateInterval = TimeSpan.FromMilliseconds(250) }; var processor = CreateProcessorWithIdentity(scope.ConsumerGroups.First(), scope.EventHubName, options: options); processor.ProcessErrorAsync += CreateAssertingErrorHandler(); processor.ProcessEventAsync += CreateEventTrackingHandler(sentCount, processedEvents, completionSource, cancellationSource.Token); await processor.StartProcessingAsync(cancellationSource.Token); await Task.WhenAny(completionSource.Task, Task.Delay(Timeout.Infinite, cancellationSource.Token)); Assert.That(cancellationSource.IsCancellationRequested, Is.False, $"The cancellation token should not have been signaled. { processedEvents.Count } events were processed."); await processor.StopProcessingAsync(cancellationSource.Token); cancellationSource.Cancel(); // Validate the events that were processed. foreach (var sourceEvent in sourceEvents) { var sourceId = sourceEvent.Properties[EventGenerator.IdPropertyName].ToString(); Assert.That(processedEvents.TryGetValue(sourceId, out var processedEvent), Is.True, $"The event with custom identifier [{ sourceId }] was not processed."); Assert.That(sourceEvent.IsEquivalentTo(processedEvent), $"The event with custom identifier [{ sourceId }] did not match the corresponding processed event."); } }
public void TryAddHonorStatefulFeatures(byte activeFeatures) { var maximumSize = 50; var batchEnvelopeSize = 0; var capturedSequence = default(int?); var capturedGroupId = default(long?); var capturedOwnerLevel = default(short?); var options = new CreateBatchOptions { MaximumSizeInBytes = maximumSize }; var mockEnvelope = new Mock <AmqpMessage>(); var mockEvent = new Mock <AmqpMessage>(); var mockConverter = new InjectableMockConverter { CreateBatchFromEventsHandler = (_e, _p) => mockEnvelope.Object, CreateMessageFromEventHandler = (_e, _p) => { capturedSequence = _e.PendingPublishSequenceNumber; capturedGroupId = _e.PendingProducerGroupId; capturedOwnerLevel = _e.PendingProducerOwnerLevel; return(mockEvent.Object); } }; mockEnvelope .Setup(message => message.SerializedMessageSize) .Returns(batchEnvelopeSize); mockEvent .Setup(message => message.SerializedMessageSize) .Returns(maximumSize); var batch = new AmqpEventBatch(mockConverter, options, (TransportProducerFeatures)activeFeatures); batch.TryAdd(EventGenerator.CreateEvents(1).Single()); NullConstraint generateConstraint() => ((TransportProducerFeatures)activeFeatures == TransportProducerFeatures.None) ? Is.Null : Is.Not.Null; Assert.That(capturedSequence, generateConstraint(), "The sequence was not set as expected."); Assert.That(capturedGroupId, generateConstraint(), "The group identifier was not set as expected."); Assert.That(capturedOwnerLevel, generateConstraint(), "The owner level was not set as expected."); }
public async Task ProducerCanPublishBatchesAfterAnException() { await using (EventHubScope scope = await EventHubScope.CreateAsync(1)) { var cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(EventHubsTestEnvironment.Instance.TestExecutionTimeLimit); var connectionString = EventHubsTestEnvironment.Instance.BuildConnectionStringForEventHub(scope.EventHubName); var options = new EventHubProducerClientOptions { EnableIdempotentPartitions = true }; await using var producer = new EventHubProducerClient(connectionString, options); var partition = (await producer.GetPartitionIdsAsync()).First(); var batchOptions = new CreateBatchOptions { PartitionId = partition }; // Publish a batch to validate that the initial publish works. using var firstBatch = await producer.CreateBatchAsync(batchOptions, cancellationSource.Token); firstBatch.TryAdd(EventGenerator.CreateEvents(1).First()); Assert.That(async() => await producer.SendAsync(firstBatch, cancellationSource.Token), Throws.Nothing, "The first publishing operation was not successful."); // Publish an event too large to succeed; this will force the producer to deal with an exception, which should // update idempotent state. var producerId = (await producer.GetPartitionPublishingPropertiesAsync(partition, cancellationSource.Token)).ProducerGroupId; using var badBatch = EventHubsModelFactory.EventDataBatch(firstBatch.MaximumSizeInBytes + 1000, new List <EventData>(new[] { new EventData(EventGenerator.CreateRandomBody(firstBatch.MaximumSizeInBytes + 1000)) }), new CreateBatchOptions { PartitionId = partition }); Assert.That(async() => await producer.SendAsync(badBatch, cancellationSource.Token), Throws.InstanceOf <EventHubsException>(), "The attempt to publish a too-large event should fail."); // Publish a second batch of events; this will prove that the producer recovered from the exception. using var secondBatch = await producer.CreateBatchAsync(batchOptions, cancellationSource.Token); secondBatch.TryAdd(EventGenerator.CreateEvents(1).First()); secondBatch.TryAdd(EventGenerator.CreateEvents(1).First()); Assert.That(async() => await producer.SendAsync(secondBatch, cancellationSource.Token), Throws.Nothing, "The second publishing operation was not successful."); var newProducerId = (await producer.GetPartitionPublishingPropertiesAsync(partition, cancellationSource.Token)).ProducerGroupId; Assert.That(newProducerId, Is.Not.Null, "The producer group identifier should have a value."); Assert.That(newProducerId, Is.Not.EqualTo(producerId), "The producer group identifier should have been updated after the exception."); } }
public async Task ProducerIsRejectedWithPartitionOptionsForInvalidState() { await using (EventHubScope scope = await EventHubScope.CreateAsync(2)) { var connectionString = EventHubsTestEnvironment.Instance.BuildConnectionStringForEventHub(scope.EventHubName); var options = new EventHubProducerClientOptions { EnableIdempotentPartitions = true }; var cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(EventHubsTestEnvironment.Instance.TestExecutionTimeLimit); var partition = default(string); var partitionProperties = default(PartitionPublishingProperties); // Create a producer for a small scope that will Send some events and read the properties. await using (var initialProducer = new EventHubProducerClient(connectionString, options)) { partition = (await initialProducer.GetPartitionIdsAsync(cancellationSource.Token)).Last(); await initialProducer.SendAsync(EventGenerator.CreateEvents(10), new SendEventOptions { PartitionId = partition }, cancellationSource.Token); partitionProperties = await initialProducer.GetPartitionPublishingPropertiesAsync(partition); } // Create a new producer using the previously read properties to set options for the partition. options.PartitionOptions.Add(partition, new PartitionPublishingOptions { ProducerGroupId = partitionProperties.ProducerGroupId, OwnerLevel = partitionProperties.OwnerLevel, StartingSequenceNumber = (partitionProperties.LastPublishedSequenceNumber - 5) }); await using var producer = new EventHubProducerClient(connectionString, options); Assert.That(async() => await producer.SendAsync(EventGenerator.CreateEvents(10), new SendEventOptions { PartitionId = partition }, cancellationSource.Token), Throws.InstanceOf <EventHubsException>().And.Property("Reason").EqualTo(EventHubsException.FailureReason.InvalidClientState)); } }
public void TryAddHonorsTheMeasureSequenceNumber(bool measureSequenceNumber) { var maximumSize = 50; var batchEnvelopeSize = 0; var capturedSequence = default(int?); var options = new CreateBatchOptions { MaximumSizeInBytes = maximumSize }; var mockEnvelope = new Mock <AmqpMessage>(); var mockEvent = new Mock <AmqpMessage>(); var mockConverter = new InjectableMockConverter { CreateBatchFromEventsHandler = (_e, _p) => mockEnvelope.Object, CreateMessageFromEventHandler = (_e, _p) => { capturedSequence = _e.PendingPublishSequenceNumber; return(mockEvent.Object); } }; mockEnvelope .Setup(message => message.SerializedMessageSize) .Returns(batchEnvelopeSize); mockEvent .Setup(message => message.SerializedMessageSize) .Returns(maximumSize); var batch = new AmqpEventBatch(mockConverter, options, measureSequenceNumber); batch.TryAdd(EventGenerator.CreateEvents(1).Single()); var expectationConstraint = (measureSequenceNumber) ? Is.Not.Null : Is.Null; Assert.That(capturedSequence, expectationConstraint); }
public void TryAddResetsPublishingState() { var maximumSize = 50; var batchEnvelopeSize = 0; var capturedEvent = default(EventData); var options = new CreateBatchOptions { MaximumSizeInBytes = maximumSize }; var mockEnvelope = new Mock <AmqpMessage>(); var mockEvent = new Mock <AmqpMessage>(); var mockConverter = new InjectableMockConverter { CreateBatchFromEventsHandler = (_e, _p) => mockEnvelope.Object, CreateMessageFromEventHandler = (_e, _p) => { capturedEvent = _e; return(mockEvent.Object); } }; mockEnvelope .Setup(message => message.SerializedMessageSize) .Returns(batchEnvelopeSize); mockEvent .Setup(message => message.SerializedMessageSize) .Returns(maximumSize); var batch = new AmqpEventBatch(mockConverter, options, TransportProducerFeatures.IdempotentPublishing); batch.TryAdd(EventGenerator.CreateEvents(1).Single()); Assert.That(capturedEvent.PublishedSequenceNumber, Is.Null, "The final sequence should not have been set."); Assert.That(capturedEvent.PendingPublishSequenceNumber, Is.Null, "The pending sequence was not cleared."); Assert.That(capturedEvent.PendingProducerGroupId, Is.Null, "The group identifier was not cleared."); Assert.That(capturedEvent.PendingProducerOwnerLevel, Is.Null, "The owner level was not cleared."); }
public void TryAddRemovesTheMeasureSequenceNumber() { var maximumSize = 50; var batchEnvelopeSize = 0; var capturedEvent = default(EventData); var options = new CreateBatchOptions { MaximumSizeInBytes = maximumSize }; var mockEnvelope = new Mock <AmqpMessage>(); var mockEvent = new Mock <AmqpMessage>(); var mockConverter = new InjectableMockConverter { CreateBatchFromEventsHandler = (_e, _p) => mockEnvelope.Object, CreateMessageFromEventHandler = (_e, _p) => { capturedEvent = _e; return(mockEvent.Object); } }; mockEnvelope .Setup(message => message.SerializedMessageSize) .Returns(batchEnvelopeSize); mockEvent .Setup(message => message.SerializedMessageSize) .Returns(maximumSize); var batch = new AmqpEventBatch(mockConverter, options, true); batch.TryAdd(EventGenerator.CreateEvents(1).Single()); Assert.That(capturedEvent.PublishedSequenceNumber, Is.Null); }
public async Task ProducerSequencesEvents() { await using (EventHubScope scope = await EventHubScope.CreateAsync(2)) { var connectionString = EventHubsTestEnvironment.Instance.BuildConnectionStringForEventHub(scope.EventHubName); var options = new EventHubProducerClientOptions { EnableIdempotentPartitions = true }; await using var producer = new EventHubProducerClient(connectionString, options); var cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(EventHubsTestEnvironment.Instance.TestExecutionTimeLimit); var partition = (await producer.GetPartitionIdsAsync(cancellationSource.Token)).Last(); var sendOptions = new SendEventOptions { PartitionId = partition }; var partitionProperties = await producer.GetPartitionPublishingPropertiesAsync(partition); var eventSequenceNumber = partitionProperties.LastPublishedSequenceNumber; var events = EventGenerator.CreateEvents(10).ToArray(); Assert.That(events.Any(item => item.PublishedSequenceNumber.HasValue), Is.False, "Events should start out as unpublished with no sequence number."); await producer.SendAsync(events, sendOptions, cancellationSource.Token); Assert.That(events.All(item => item.PublishedSequenceNumber.HasValue), Is.True, "Events should be sequenced after publishing."); foreach (var item in events) { Assert.That(item.PublishedSequenceNumber, Is.EqualTo(++eventSequenceNumber), $"The sequence numbers should be contiguous. Event { eventSequenceNumber } was out of order."); } } }
public async Task ProcessorClientBeginsWithTheNextEventAfterCheckpointing() { // Setup the environment. await using EventHubScope scope = await EventHubScope.CreateAsync(1); var connectionString = EventHubsTestEnvironment.Instance.BuildConnectionStringForEventHub(scope.EventHubName); using var cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(EventHubsTestEnvironment.Instance.TestExecutionTimeLimit); // Send a set of events. var segmentEventCount = 25; var beforeCheckpointEvents = EventGenerator.CreateEvents(segmentEventCount).ToList(); var afterCheckpointEvents = EventGenerator.CreateEvents(segmentEventCount).ToList(); var sourceEvents = Enumerable.Concat(beforeCheckpointEvents, afterCheckpointEvents).ToList(); var checkpointEvent = beforeCheckpointEvents.Last(); var sentCount = await SendEvents(connectionString, sourceEvents, cancellationSource.Token); Assert.That(sentCount, Is.EqualTo(sourceEvents.Count), "Not all of the source events were sent."); // Attempt to read back the first half of the events and checkpoint. Func <ProcessEventArgs, Task> processedEventCallback = async args => { if (args.Data.IsEquivalentTo(checkpointEvent)) { await args.UpdateCheckpointAsync(cancellationSource.Token); } }; var processedEvents = new ConcurrentDictionary <string, EventData>(); var completionSource = new TaskCompletionSource <bool>(TaskCreationOptions.RunContinuationsAsynchronously); var beforeCheckpointProcessHandler = CreateEventTrackingHandler(segmentEventCount, processedEvents, completionSource, cancellationSource.Token, processedEventCallback); var options = new EventProcessorOptions { LoadBalancingUpdateInterval = TimeSpan.FromMilliseconds(250) }; var storageManager = new InMemoryStorageManager(_ => { }); var processor = CreateProcessor(scope.ConsumerGroups.First(), connectionString, storageManager, options); processor.ProcessErrorAsync += CreateAssertingErrorHandler(); processor.ProcessEventAsync += beforeCheckpointProcessHandler; await processor.StartProcessingAsync(cancellationSource.Token); await Task.WhenAny(completionSource.Task, Task.Delay(Timeout.Infinite, cancellationSource.Token)); Assert.That(cancellationSource.IsCancellationRequested, Is.False, "The cancellation token should not have been signaled."); await processor.StopProcessingAsync(cancellationSource.Token); // Validate a checkpoint was created and that events were processed. var checkpoints = (await storageManager.ListCheckpointsAsync(processor.FullyQualifiedNamespace, processor.EventHubName, processor.ConsumerGroup, cancellationSource.Token))?.ToList(); Assert.That(checkpoints, Is.Not.Null, "A checkpoint should have been created."); Assert.That(checkpoints.Count, Is.EqualTo(1), "A single checkpoint should exist."); Assert.That(processedEvents.Count, Is.AtLeast(beforeCheckpointEvents.Count), "All events before the checkpoint should have been processed."); // Reset state and start the processor again; it should resume from the event following the checkpoint. processedEvents.Clear(); completionSource = new TaskCompletionSource <bool>(TaskCreationOptions.RunContinuationsAsynchronously); processor.ProcessEventAsync -= beforeCheckpointProcessHandler; processor.ProcessEventAsync += CreateEventTrackingHandler(segmentEventCount, processedEvents, completionSource, cancellationSource.Token); await processor.StartProcessingAsync(cancellationSource.Token); await Task.WhenAny(completionSource.Task, Task.Delay(Timeout.Infinite, cancellationSource.Token)); Assert.That(cancellationSource.IsCancellationRequested, Is.False, "The cancellation token should not have been signaled."); await processor.StopProcessingAsync(cancellationSource.Token); cancellationSource.Cancel(); foreach (var sourceEvent in afterCheckpointEvents) { var sourceId = sourceEvent.Properties[EventGenerator.IdPropertyName].ToString(); Assert.That(processedEvents.TryGetValue(sourceId, out var processedEvent), Is.True, $"The event with custom identifier [{ sourceId }] was not processed."); Assert.That(sourceEvent.IsEquivalentTo(processedEvent), $"The event with custom identifier [{ sourceId }] did not match the corresponding processed event."); } }
public async Task ProcessorClientCanStartFromAnInitialPosition() { // Setup the environment. await using EventHubScope scope = await EventHubScope.CreateAsync(1); var connectionString = EventHubsTestEnvironment.Instance.BuildConnectionStringForEventHub(scope.EventHubName); using var cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(EventHubsTestEnvironment.Instance.TestExecutionTimeLimit); // Send a set of events. var sourceEvents = EventGenerator.CreateEvents(25).ToList(); var lastSourceEvent = sourceEvents.Last(); var sentCount = await SendEvents(connectionString, sourceEvents, cancellationSource.Token); Assert.That(sentCount, Is.EqualTo(sourceEvents.Count), "Not all of the source events were sent."); // Read the initial set back, marking the offset and sequence number of the last event in the initial set. var startingOffset = 0L; await using (var consumer = new EventHubConsumerClient(scope.ConsumerGroups.First(), connectionString)) { await foreach (var partitionEvent in consumer.ReadEventsAsync(new ReadEventOptions { MaximumWaitTime = null }, cancellationSource.Token)) { if (partitionEvent.Data.IsEquivalentTo(lastSourceEvent)) { startingOffset = partitionEvent.Data.Offset; break; } } } // Send the second set of events to be read by the processor. sourceEvents = EventGenerator.CreateEvents(20).ToList(); sentCount = await SendEvents(connectionString, sourceEvents, cancellationSource.Token); Assert.That(sentCount, Is.EqualTo(sourceEvents.Count), "Not all of the source events were sent."); // Attempt to read back the second set of events. var processedEvents = new ConcurrentDictionary <string, EventData>(); var completionSource = new TaskCompletionSource <bool>(TaskCreationOptions.RunContinuationsAsynchronously); var options = new EventProcessorOptions { LoadBalancingUpdateInterval = TimeSpan.FromMilliseconds(250) }; var processor = CreateProcessor(scope.ConsumerGroups.First(), connectionString, options: options); processor.PartitionInitializingAsync += args => { args.DefaultStartingPosition = EventPosition.FromOffset(startingOffset, false); return(Task.CompletedTask); }; processor.ProcessErrorAsync += CreateAssertingErrorHandler(); processor.ProcessEventAsync += CreateEventTrackingHandler(sentCount, processedEvents, completionSource, cancellationSource.Token); await processor.StartProcessingAsync(cancellationSource.Token); await Task.WhenAny(completionSource.Task, Task.Delay(Timeout.Infinite, cancellationSource.Token)); Assert.That(cancellationSource.IsCancellationRequested, Is.False, "The cancellation token should not have been signaled."); await processor.StopProcessingAsync(cancellationSource.Token); cancellationSource.Cancel(); // Validate the events that were processed. foreach (var sourceEvent in sourceEvents) { var sourceId = sourceEvent.Properties[EventGenerator.IdPropertyName].ToString(); Assert.That(processedEvents.TryGetValue(sourceId, out var processedEvent), Is.True, $"The event with custom identifier [{ sourceId }] was not processed."); Assert.That(sourceEvent.IsEquivalentTo(processedEvent), $"The event with custom identifier [{ sourceId }] did not match the corresponding processed event."); } }
public async Task ProcessorClientCreatesOwnership() { // Setup the environment. var partitionCount = 2; var partitionIds = new HashSet <string>(); await using EventHubScope scope = await EventHubScope.CreateAsync(partitionCount); var connectionString = EventHubsTestEnvironment.Instance.BuildConnectionStringForEventHub(scope.EventHubName); using var cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(EventHubsTestEnvironment.Instance.TestExecutionTimeLimit); // Discover the partitions. await using (var producer = new EventHubProducerClient(connectionString)) { foreach (var partitionId in (await producer.GetPartitionIdsAsync())) { partitionIds.Add(partitionId); } } // Send a set of events. var sourceEvents = EventGenerator.CreateEvents(200).ToList(); var sentCount = await SendEvents(connectionString, sourceEvents, cancellationSource.Token); Assert.That(sentCount, Is.EqualTo(sourceEvents.Count), "Not all of the source events were sent."); // Attempt to read back the events. var processedEvents = new ConcurrentDictionary <string, EventData>(); var completionSource = new TaskCompletionSource <bool>(TaskCreationOptions.RunContinuationsAsynchronously); var storageManager = new InMemoryStorageManager(_ => { }); var options = new EventProcessorOptions { LoadBalancingUpdateInterval = TimeSpan.FromMilliseconds(250) }; var processor = CreateProcessorWithIdentity(scope.ConsumerGroups.First(), scope.EventHubName, storageManager, options); processor.ProcessErrorAsync += CreateAssertingErrorHandler(); processor.ProcessEventAsync += CreateEventTrackingHandler(sentCount, processedEvents, completionSource, cancellationSource.Token); await processor.StartProcessingAsync(cancellationSource.Token); await Task.WhenAny(completionSource.Task, Task.Delay(Timeout.Infinite, cancellationSource.Token)); Assert.That(cancellationSource.IsCancellationRequested, Is.False, $"The cancellation token should not have been signaled. { processedEvents.Count } events were processed."); await processor.StopProcessingAsync(cancellationSource.Token); cancellationSource.Cancel(); // Validate that events that were processed. var ownership = (await storageManager.ListOwnershipAsync(EventHubsTestEnvironment.Instance.FullyQualifiedNamespace, scope.EventHubName, scope.ConsumerGroups.First(), cancellationSource.Token))?.ToList(); Assert.That(ownership, Is.Not.Null, "The ownership list should have been returned."); Assert.That(ownership.Count, Is.AtLeast(1), "At least one partition should have been owned."); foreach (var partitionOwnership in ownership) { Assert.That(partitionIds.Contains(partitionOwnership.PartitionId), Is.True, $"The partition `{ partitionOwnership.PartitionId }` is not valid for the Event Hub."); Assert.That(partitionOwnership.OwnerIdentifier, Is.Empty, "Ownership should have bee relinquished when the processor was stopped."); } }