public async Task EventProcessorCanReceiveFromCheckpointedEventPosition() { await using (EventHubScope scope = await EventHubScope.CreateAsync(1)) { var connectionString = TestEnvironment.BuildConnectionStringForEventHub(scope.EventHubName); await using (var connection = new EventHubConnection(connectionString)) { int receivedEventsCount = 0; // Send some events. var expectedEventsCount = 20; var dummyEvent = new EventData(Encoding.UTF8.GetBytes("I'm dummy.")); long?checkpointedSequenceNumber = default; var partitionId = (await connection.GetPartitionIdsAsync(DefaultRetryPolicy)).First(); await using (var producer = new EventHubProducerClient(connectionString)) await using (var consumer = new EventHubConsumerClient(EventHubConsumerClient.DefaultConsumerGroupName, connection)) await using (var receiver = consumer.CreatePartitionReceiver(partitionId, EventPosition.Earliest)) { // Send a few dummy events. We are not expecting to receive these. var dummyEventsCount = 30; for (int i = 0; i < dummyEventsCount; i++) { await producer.SendAsync(dummyEvent); } // Receive the events; because there is some non-determinism in the messaging flow, the // sent events may not be immediately available. Allow for a small number of attempts to receive, in order // to account for availability delays. var receivedEvents = new List <EventData>(); var index = 0; while ((receivedEvents.Count < dummyEventsCount) && (++index < ReceiveRetryLimit)) { receivedEvents.AddRange(await receiver.ReceiveAsync(dummyEventsCount + 10, TimeSpan.FromMilliseconds(25))); } Assert.That(receivedEvents.Count, Is.EqualTo(dummyEventsCount)); checkpointedSequenceNumber = receivedEvents.Last().SequenceNumber; // Send the events we expect to receive. for (int i = 0; i < expectedEventsCount; i++) { await producer.SendAsync(dummyEvent); } } // Create a partition manager and add an ownership with a checkpoint in it. var partitionManager = new InMemoryPartitionManager(); await partitionManager.ClaimOwnershipAsync(new List <PartitionOwnership>() { new PartitionOwnership(connection.FullyQualifiedNamespace, connection.EventHubName, EventHubConsumerClient.DefaultConsumerGroupName, "ownerIdentifier", partitionId, sequenceNumber: checkpointedSequenceNumber, lastModifiedTime: DateTimeOffset.UtcNow) }); // Create the event processor manager to manage our event processors. var eventProcessorManager = new EventProcessorManager ( EventHubConsumerClient.DefaultConsumerGroupName, connection, partitionManager, onProcessEvent: processorEvent => { if (processorEvent.Data != null) { Interlocked.Increment(ref receivedEventsCount); } } ); eventProcessorManager.AddEventProcessors(1); // Start the event processors. await eventProcessorManager.StartAllAsync(); // Make sure the event processors have enough time to stabilize and receive events. await eventProcessorManager.WaitStabilization(); // Stop the event processors. await eventProcessorManager.StopAllAsync(); // Validate results. Assert.That(receivedEventsCount, Is.EqualTo(expectedEventsCount)); } } }
public async Task PartitionProcessorProcessEventsAsyncReceivesAllEvents() { await using (EventHubScope scope = await EventHubScope.CreateAsync(2)) { var connectionString = TestEnvironment.BuildConnectionStringForEventHub(scope.EventHubName); await using (var connection = new EventHubConnection(connectionString)) { var allReceivedEvents = new ConcurrentDictionary <string, List <EventData> >(); // Create the event processor manager to manage our event processors. var eventProcessorManager = new EventProcessorManager ( EventHubConsumerClient.DefaultConsumerGroupName, connection, onProcessEvent: processorEvent => { if (processorEvent.Data != null) { allReceivedEvents.AddOrUpdate ( processorEvent.Context.PartitionId, partitionId => new List <EventData>() { processorEvent.Data }, (partitionId, list) => { list.Add(processorEvent.Data); return(list); } ); } } ); eventProcessorManager.AddEventProcessors(1); // Send some events. var partitionIds = await connection.GetPartitionIdsAsync(DefaultRetryPolicy); var expectedEvents = new Dictionary <string, List <EventData> >(); foreach (var partitionId in partitionIds) { // Send a similar set of events for every partition. expectedEvents[partitionId] = new List <EventData> { new EventData(Encoding.UTF8.GetBytes($"{ partitionId }: event processor tests are so long.")), new EventData(Encoding.UTF8.GetBytes($"{ partitionId }: there are so many of them.")), new EventData(Encoding.UTF8.GetBytes($"{ partitionId }: will they ever end?")), new EventData(Encoding.UTF8.GetBytes($"{ partitionId }: let's add a few more messages.")), new EventData(Encoding.UTF8.GetBytes($"{ partitionId }: this is a monologue.")), new EventData(Encoding.UTF8.GetBytes($"{ partitionId }: loneliness is what I feel.")), new EventData(Encoding.UTF8.GetBytes($"{ partitionId }: the end has come.")) }; await using (var producer = new EventHubProducerClient(connection)) { await producer.SendAsync(expectedEvents[partitionId], new SendOptions { PartitionId = partitionId }); } } // Start the event processors. await eventProcessorManager.StartAllAsync(); // Make sure the event processors have enough time to stabilize and receive events. await eventProcessorManager.WaitStabilization(); // Stop the event processors. await eventProcessorManager.StopAllAsync(); // Validate results. Make sure we received every event with the correct partition context, // in the order they were sent. foreach (var partitionId in partitionIds) { Assert.That(allReceivedEvents.TryGetValue(partitionId, out List <EventData> partitionReceivedEvents), Is.True, $"{ partitionId }: there should have been a set of events received."); Assert.That(partitionReceivedEvents.Count, Is.EqualTo(expectedEvents[partitionId].Count), $"{ partitionId }: amount of received events should match."); var index = 0; foreach (EventData receivedEvent in partitionReceivedEvents) { Assert.That(receivedEvent.IsEquivalentTo(expectedEvents[partitionId][index]), Is.True, $"{ partitionId }: the received event at index { index } did not match the sent set of events."); ++index; } } Assert.That(allReceivedEvents.Keys.Count, Is.EqualTo(partitionIds.Count())); } } }
public async Task EventProcessorCanStartAgainAfterStopping() { await using (EventHubScope scope = await EventHubScope.CreateAsync(2)) { var connectionString = TestEnvironment.BuildConnectionStringForEventHub(scope.EventHubName); await using (var connection = new EventHubConnection(connectionString)) { int receivedEventsCount = 0; // Create the event processor manager to manage our event processors. var eventProcessorManager = new EventProcessorManager ( EventHubConsumerClient.DefaultConsumerGroupName, connection, onProcessEvent: processorEvent => { if (processorEvent.Data != null) { Interlocked.Increment(ref receivedEventsCount); } } ); eventProcessorManager.AddEventProcessors(1); // Send some events. var expectedEventsCount = 20; await using (var producer = new EventHubProducerClient(connection)) { var dummyEvent = new EventData(Encoding.UTF8.GetBytes("I'm dummy.")); for (int i = 0; i < expectedEventsCount; i++) { await producer.SendAsync(dummyEvent); } } // We'll start and stop the event processors twice. This way, we can assert they will behave // the same way both times, reprocessing all events in the second run. for (int i = 0; i < 2; i++) { receivedEventsCount = 0; // Start the event processors. await eventProcessorManager.StartAllAsync(); // Make sure the event processors have enough time to stabilize and receive events. await eventProcessorManager.WaitStabilization(); // Stop the event processors. await eventProcessorManager.StopAllAsync(); // Validate results. Assert.That(receivedEventsCount, Is.EqualTo(expectedEventsCount), $"Events should match in iteration { i + 1 }."); } } } }
public void CreateConsumerRequiresPartition(string partition) { var client = new EventHubConnection("Endpoint=sb://not-real.servicebus.windows.net/;SharedAccessKeyName=DummyKey;SharedAccessKey=[not_real]", "fake", new EventHubConnectionOptions()); Assert.That(() => client.CreateTransportConsumer("someGroup", partition, EventPosition.Earliest), Throws.InstanceOf <ArgumentException>()); }
/// <summary> /// Provides a test shim for retrieving the transport client contained by an /// Event Hub client instance. /// </summary> /// /// <param name="client">The client to retrieve the transport client of.</param> /// /// <returns>The transport client contained by the Event Hub client.</returns> /// private TransportClient GetTransportClient(EventHubConnection client) => typeof(EventHubConnection) .GetProperty("InnerClient", BindingFlags.Instance | BindingFlags.NonPublic) .GetValue(client) as TransportClient;
public async Task EventProcessorWaitsMaximumReceiveWaitTimeForEvents(int maximumWaitTimeInSecs) { await using (EventHubScope scope = await EventHubScope.CreateAsync(2)) { var connectionString = TestEnvironment.BuildConnectionStringForEventHub(scope.EventHubName); await using (var connection = new EventHubConnection(connectionString)) { var timestamps = new ConcurrentDictionary <string, List <DateTimeOffset> >(); // Create the event processor manager to manage our event processors. var eventProcessorManager = new EventProcessorManager ( EventHubConsumerClient.DefaultConsumerGroupName, connection, options: new EventProcessorClientOptions { MaximumReceiveWaitTime = TimeSpan.FromSeconds(maximumWaitTimeInSecs) }, onInitialize: initializationContext => timestamps.TryAdd(initializationContext.Context.PartitionId, new List <DateTimeOffset> { DateTimeOffset.UtcNow }), onProcessEvent: processorEvent => timestamps.AddOrUpdate ( // The key already exists, so the 'addValue' factory will never be called. processorEvent.Context.PartitionId, partitionId => null, (partitionId, list) => { list.Add(DateTimeOffset.UtcNow); return(list); } ) ); eventProcessorManager.AddEventProcessors(1); // Start the event processors. await eventProcessorManager.StartAllAsync(); // Make sure the event processors have enough time to stabilize. await eventProcessorManager.WaitStabilization(); // Stop the event processors. await eventProcessorManager.StopAllAsync(); // Validate results. foreach (KeyValuePair <string, List <DateTimeOffset> > kvp in timestamps) { var partitionId = kvp.Key; List <DateTimeOffset> partitionTimestamps = kvp.Value; Assert.That(partitionTimestamps.Count, Is.GreaterThan(1), $"{ partitionId }: more time stamp samples were expected."); for (int index = 1; index < partitionTimestamps.Count; index++) { var elapsedTime = partitionTimestamps[index].Subtract(partitionTimestamps[index - 1]).TotalSeconds; Assert.That(elapsedTime, Is.GreaterThan(maximumWaitTimeInSecs - 0.1), $"{ partitionId }: elapsed time between indexes { index - 1 } and { index } was too short."); Assert.That(elapsedTime, Is.LessThan(maximumWaitTimeInSecs + 5), $"{ partitionId }: elapsed time between indexes { index - 1 } and { index } was too long."); ++index; } } } } }
/// <summary> /// Runs the sample using the specified Event Hubs connection information. /// </summary> /// /// <param name="connectionString">The connection string for the Event Hubs namespace that the sample should target.</param> /// <param name="eventHubName">The name of the Event Hub, sometimes known as its path, that she sample should run against.</param> /// public async Task RunAsync(string connectionString, string eventHubName) { string firstPartition; // In this example, we will make use of multiple clients. Because clients are typically responsible for managing their own connection to the // Event Hubs service, each will implicitly create their own connection. In this example, we will create a connection that may be shared amongst // clients in order to illustrate connection sharing. Because we are explicitly creating the connection, we assume responsibility for managing its // lifespan and ensuring that it is properly closed or disposed when we are done using it. await using (var eventHubConnection = new EventHubConnection(connectionString, eventHubName)) { // Our initial consumer will begin watching the partition at the very end, reading only new events that we will publish for it. Before we can publish // the events and have them observed, we will need to ask the consumer to perform an operation, // because it opens its connection only when it needs to. // // We'll begin to iterate on the partition using a small wait time, so that control will return to our loop even when // no event is available. For the first call, we'll publish so that we can receive them. // // Each event that the initial consumer reads will have attributes set that describe the event's place in the // partition, such as its offset, sequence number, and the date/time that it was enqueued. These attributes can be // used to create a new consumer that begins consuming at a known position. // // With Event Hubs, it is the responsibility of an application consuming events to keep track of those that it has processed, // and to manage where in the partition the consumer begins reading events. This is done by using the position information to track // state, commonly known as "creating a checkpoint." // // The goal is to preserve the position of an event in some form of durable state, such as writing it to a database, so that if the // consuming application crashes or is otherwise restarted, it can retrieve that checkpoint information and use it to create a consumer that // begins reading at the position where it left off. // // It is important to note that there is potential for a consumer to process an event and be unable to preserve the checkpoint. A well-designed // consumer must be able to deal with processing the same event multiple times without it causing data corruption or otherwise creating issues. // Event Hubs, like most event streaming systems, guarantees "at least once" delivery; even in cases where the consumer does not experience a restart, // there is a small possibility that the service will return an event multiple times. // // In this example, we will publish a batch of events to be received with an initial consumer. The third event that is consumed will be captured // and another consumer will use its attributes to start reading the event that follows, consuming the same set of events that our initial consumer // read, skipping over the first three. EventData thirdEvent; int eventBatchSize = 50; await using (var initialConsumerClient = new EventHubConsumerClient(EventHubConsumerClient.DefaultConsumerGroupName, eventHubConnection)) { // We will start by using the consumer client inspect the Event Hub and select a partition to operate against to ensure that events are being // published and read from the same partition. firstPartition = (await initialConsumerClient.GetPartitionIdsAsync()).First(); // We will consume the events until all of the published events have been received. List <EventData> receivedEvents = new List <EventData>(); bool wereEventsPublished = false; CancellationTokenSource cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(TimeSpan.FromSeconds(30)); await foreach (PartitionEvent currentEvent in initialConsumerClient.ReadEventsFromPartitionAsync(firstPartition, EventPosition.Latest, TimeSpan.FromMilliseconds(150), cancellationSource.Token)) { if (!wereEventsPublished) { EventData[] eventBatch = new EventData[eventBatchSize]; for (int index = 0; index < eventBatchSize; ++index) { eventBatch[index] = new EventData(Encoding.UTF8.GetBytes($"I am event #{ index }")); } await using var producerClient = new EventHubProducerClient(connectionString, eventHubName, new EventHubProducerClientOptions { PartitionId = firstPartition }); await producerClient.SendAsync(eventBatch); wereEventsPublished = true; Console.WriteLine($"The event batch with { eventBatchSize } events has been published."); } // Because publishing and receiving events is asynchronous, the events that we published may not // be immediately available for our consumer to see, so we'll have to guard against an empty event being sent as // punctuation if our actual event is not available within the waiting time period. if (currentEvent.Data != null) { receivedEvents.Add(currentEvent.Data); if (receivedEvents.Count >= eventBatchSize) { break; } } } // Print out the events that we received. Console.WriteLine(); Console.WriteLine($"The initial consumer processed { receivedEvents.Count } events of the { eventBatchSize } that were published. { eventBatchSize } were expected."); foreach (EventData eventData in receivedEvents) { // The body of our event was an encoded string; we'll recover the // message by reversing the encoding process. string message = Encoding.UTF8.GetString(eventData.Body.ToArray()); Console.WriteLine($"\tMessage: \"{ message }\""); } // Remember the third event that was consumed. thirdEvent = receivedEvents[2]; } // At this point, our initial consumer client has passed its "using" scope and has been safely disposed of. // // Create a new consumer beginning using the third event as the last sequence number processed; this new consumer will begin reading at the next available // sequence number, allowing it to read the set of published events beginning with the fourth one. // // Because our second consumer will begin watching the partition at a specific event, there is no need to ask for an initial operation to set our place; when // we begin iterating, the consumer will locate the proper place in the partition to read from. await using (var newConsumerClient = new EventHubConsumerClient(EventHubConsumerClient.DefaultConsumerGroupName, eventHubConnection)) { // We will consume the events using the new consumer until all of the published events have been received. CancellationTokenSource cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(TimeSpan.FromSeconds(30)); int expectedCount = (eventBatchSize - 3); var receivedEvents = new List <EventData>(); await foreach (PartitionEvent currentEvent in newConsumerClient.ReadEventsFromPartitionAsync(firstPartition, EventPosition.FromSequenceNumber(thirdEvent.SequenceNumber.Value), cancellationSource.Token)) { receivedEvents.Add(currentEvent.Data); if (receivedEvents.Count >= expectedCount) { break; } } // Print out the events that we received. Console.WriteLine(); Console.WriteLine(); Console.WriteLine($"The new consumer processed { receivedEvents.Count } events of the { eventBatchSize } that were published. { expectedCount } were expected."); foreach (EventData eventData in receivedEvents) { // The body of our event was an encoded string; we'll recover the // message by reversing the encoding process. string message = Encoding.UTF8.GetString(eventData.Body.ToArray()); Console.WriteLine($"\tMessage: \"{ message }\""); } } } // At this point, our clients and connection have passed their "using" scope and have safely been disposed of. We // have no further obligations. Console.WriteLine(); }
public void CreateConsumerRequiresEventPosition() { var client = new EventHubConnection("Endpoint=sb://not-real.servicebus.windows.net/;SharedAccessKeyName=DummyKey;SharedAccessKey=[not_real]", "fake", new EventHubConnectionOptions()); Assert.That(() => client.CreateTransportConsumer(EventHubConsumerClient.DefaultConsumerGroupName, "123", null), Throws.ArgumentNullException); }
public void CreateConsumerRequiresConsumerGroup(string consumerGroup) { var client = new EventHubConnection("Endpoint=sb://not-real.servicebus.windows.net/;SharedAccessKeyName=DummyKey;SharedAccessKey=[not_real]", "fake", new EventHubConnectionOptions()); Assert.That(() => client.CreateTransportConsumer(consumerGroup, "partition1", EventPosition.Earliest, Mock.Of <EventHubsRetryPolicy>()), Throws.InstanceOf <ArgumentException>()); }
public async Task PartitionProcessorCanCreateACheckpoint() { await using EventHubScope scope = await EventHubScope.CreateAsync(1); var connectionString = TestEnvironment.BuildConnectionStringForEventHub(scope.EventHubName); await using var connection = new EventHubConnection(connectionString); // Send some events. EventData lastEvent; await using var producer = new EventHubProducerClient(connection); await using var consumer = new EventHubConsumerClient(EventHubConsumerClient.DefaultConsumerGroupName, connectionString); // Send a few events. We are only interested in the last one of them. var dummyEventsCount = 10; using var dummyBatch = await producer.CreateBatchAsync(); for (int i = 0; i < dummyEventsCount; i++) { dummyBatch.TryAdd(new EventData(Encoding.UTF8.GetBytes(eventBody))); } await producer.SendAsync(dummyBatch); var receivedEvents = new List <EventData>(); using var cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(TimeSpan.FromSeconds(30)); await foreach (var evt in consumer.ReadEventsAsync(new ReadEventOptions { MaximumWaitTime = TimeSpan.FromSeconds(30) }, cancellationSource.Token)) { receivedEvents.Add(evt.Data); if (receivedEvents.Count == dummyEventsCount) { break; } } Assert.That(receivedEvents.Count, Is.EqualTo(dummyEventsCount)); lastEvent = receivedEvents.Last(); // Create a storage manager so we can retrieve the created checkpoint from it. var storageManager = new MockCheckPointStorage(); // Create the event processor manager to manage our event processors. var eventProcessorManager = new EventProcessorManager ( EventHubConsumerClient.DefaultConsumerGroupName, connectionString, storageManager, onProcessEvent: eventArgs => { if (eventArgs.Data != null) { eventArgs.UpdateCheckpointAsync(); } } ); eventProcessorManager.AddEventProcessors(1); // Start the event processors. await eventProcessorManager.StartAllAsync(); // Make sure the event processors have enough time to stabilize and receive events. await eventProcessorManager.WaitStabilization(); // Stop the event processors. await eventProcessorManager.StopAllAsync(); // Validate results. IEnumerable <PartitionOwnership> ownershipEnumerable = await storageManager.ListOwnershipAsync(connection.FullyQualifiedNamespace, connection.EventHubName, EventHubConsumerClient.DefaultConsumerGroupName); Assert.That(ownershipEnumerable, Is.Not.Null); Assert.That(ownershipEnumerable.Count, Is.EqualTo(1)); }
/// <summary> /// Initializes a new instance of the <see cref="ShortWaitTimeMock"/> class. /// </summary> /// /// <param name="consumerGroup">The name of the consumer group this event processor is associated with. Events are read in the context of this group.</param> /// <param name="partitionManager">Interacts with the storage system with responsibility for creation of checkpoints and for ownership claim.</param> /// <param name="connection">The client used to interact with the Azure Event Hubs service.</param> /// <param name="options">The set of options to use for this event processor.</param> /// public ShortWaitTimeMock(string consumerGroup, PartitionManager partitionManager, EventHubConnection connection, EventProcessorClientOptions options) : base(consumerGroup, partitionManager, connection, options) { }
public async Task EventProcessorWaitsMaximumWaitTimeForEvents(int maximumWaitTimeInSecs) { await using EventHubScope scope = await EventHubScope.CreateAsync(2); var connectionString = TestEnvironment.BuildConnectionStringForEventHub(scope.EventHubName); await using var connection = new EventHubConnection(connectionString); var timestamps = new ConcurrentDictionary <string, ConcurrentBag <DateTimeOffset> >(); using var cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(TimeSpan.FromSeconds(30)); var receivedCount = 0; // Create the event processor manager to manage our event processors. var eventProcessorManager = new EventProcessorManager ( EventHubConsumerClient.DefaultConsumerGroupName, connectionString, clientOptions: new EventProcessorClientOptions { MaximumWaitTime = TimeSpan.FromSeconds(maximumWaitTimeInSecs) }, onInitialize: eventArgs => timestamps.TryAdd(eventArgs.PartitionId, new ConcurrentBag <DateTimeOffset> { DateTimeOffset.UtcNow }), onProcessEvent: eventArgs => { timestamps[eventArgs.Partition.PartitionId].Add(DateTimeOffset.UtcNow); receivedCount++; if (receivedCount >= 5) { cancellationSource.Cancel(); } } ); eventProcessorManager.AddEventProcessors(1); // Start the event processors. await eventProcessorManager.StartAllAsync(); // Make sure the event processors have enough time to receive some events. try { await Task.Delay(Timeout.Infinite, cancellationSource.Token); } catch (TaskCanceledException) { /*expected*/ } // Stop the event processors. await eventProcessorManager.StopAllAsync(); // Validate results. foreach (KeyValuePair <string, ConcurrentBag <DateTimeOffset> > kvp in timestamps) { var partitionId = kvp.Key; var partitionTimestamps = kvp.Value.ToList(); partitionTimestamps.Sort(); Assert.That(partitionTimestamps.Count, Is.GreaterThan(1), $"{ partitionId }: more time stamp samples were expected."); for (int index = 1; index < partitionTimestamps.Count; index++) { var elapsedTime = partitionTimestamps[index].Subtract(partitionTimestamps[index - 1]).TotalSeconds; Assert.That(elapsedTime, Is.GreaterThan(maximumWaitTimeInSecs - 0.1), $"{ partitionId }: elapsed time between indexes { index - 1 } and { index } was too short."); Assert.That(elapsedTime, Is.LessThan(maximumWaitTimeInSecs + 5), $"{ partitionId }: elapsed time between indexes { index - 1 } and { index } was too long."); ++index; } } }
public async Task EventProcessorCanReceiveFromSpecifiedInitialEventPosition() { await using EventHubScope scope = await EventHubScope.CreateAsync(2); var connectionString = TestEnvironment.BuildConnectionStringForEventHub(scope.EventHubName); await using var connection = new EventHubConnection(connectionString); int receivedEventsCount = 0; // Send some events. var expectedEventsCount = 20; var firstBatchEventCount = 30; DateTimeOffset enqueuedTime = DateTimeOffset.MinValue; await using var producer = new EventHubProducerClient(connection); // Send a few dummy events. We are not expecting to receive these. using (var dummyBatch = await producer.CreateBatchAsync()) { for (int i = 0; i < firstBatchEventCount; i++) { dummyBatch.TryAdd(new EventData(Encoding.UTF8.GetBytes(firstBatchBody))); } await producer.SendAsync(dummyBatch); } // Wait a reasonable amount of time so the there is a time gap between the first and second batch. await Task.Delay(2000); // Send the events we expect to receive. using (var dummyBatch = await producer.CreateBatchAsync()) { for (int i = 0; i < expectedEventsCount; i++) { dummyBatch.TryAdd(new EventData(Encoding.UTF8.GetBytes(eventBody))); } await producer.SendAsync(dummyBatch); } // Create the event processor manager to manage the event processor which will receive all events and set the enqueuedTime of the latest event from the first batch. using var firstBatchCancellationSource = new CancellationTokenSource(); firstBatchCancellationSource.CancelAfter(TimeSpan.FromSeconds(30)); var receivedFromFirstBatch = 0; var eventProcessorManager = new EventProcessorManager ( EventHubConsumerClient.DefaultConsumerGroupName, connectionString, onInitialize: eventArgs => eventArgs.DefaultStartingPosition = EventPosition.Earliest, onProcessEvent: eventArgs => { if (eventArgs.Data != null) { var dataAsString = Encoding.UTF8.GetString(eventArgs.Data.Body.Span.ToArray()); if (dataAsString == firstBatchBody) { enqueuedTime = enqueuedTime > eventArgs.Data.EnqueuedTime ? enqueuedTime : eventArgs.Data.EnqueuedTime; receivedFromFirstBatch++; if (receivedFromFirstBatch == firstBatchEventCount) { firstBatchCancellationSource.Cancel(); } } } } ); eventProcessorManager.AddEventProcessors(1); // Start the event processors. await eventProcessorManager.StartAllAsync(); // Wait for the event processors to receive events. try { await Task.Delay(Timeout.Infinite, firstBatchCancellationSource.Token); } catch (TaskCanceledException) { /*expected*/ } // Stop the event processors. await eventProcessorManager.StopAllAsync(); // Validate that we set at least one enqueuedTime Assert.That(enqueuedTime, Is.GreaterThan(DateTimeOffset.MinValue)); // Create the event processor manager to manage the event processor which will receive all events FromEnqueuedTime of enqueuedTime. using var secondBatchCancellationSource = new CancellationTokenSource(); secondBatchCancellationSource.CancelAfter(TimeSpan.FromSeconds(30)); var eventProcessorManager2 = new EventProcessorManager ( EventHubConsumerClient.DefaultConsumerGroupName, connectionString, onInitialize: eventArgs => eventArgs.DefaultStartingPosition = EventPosition.FromEnqueuedTime(enqueuedTime), onProcessEvent: eventArgs => { if (eventArgs.Data != null) { Interlocked.Increment(ref receivedEventsCount); if (receivedEventsCount >= expectedEventsCount) { secondBatchCancellationSource.Cancel(); } } } ); eventProcessorManager2.AddEventProcessors(1); // Start the event processors. await eventProcessorManager2.StartAllAsync(); // Wait for the event processors to receive events. try { await Task.Delay(Timeout.Infinite, secondBatchCancellationSource.Token); } catch (TaskCanceledException) { /*expected*/ } // Stop the event processors. await eventProcessorManager2.StopAllAsync(); // Validate results. Assert.That(receivedEventsCount, Is.EqualTo(expectedEventsCount)); }
/// <summary> /// Initializes a new instance of the <see cref="IdempotentProducer" /> class. /// </summary> /// /// <param name="connection">The <see cref="EventHubConnection" /> connection to use for communication with the Event Hubs service.</param> /// <param name="clientOptions">A set of options to apply when configuring the producer.</param> /// public IdempotentProducer(EventHubConnection connection, IdempotentProducerOptions clientOptions = default) : base(connection, clientOptions) { }
public async Task PartitionProcessorCanCreateACheckpoint() { await using (EventHubScope scope = await EventHubScope.CreateAsync(1)) { var connectionString = TestEnvironment.BuildConnectionStringForEventHub(scope.EventHubName); await using (var connection = new EventHubConnection(connectionString)) { // Send some events. EventData lastEvent; var dummyEvent = new EventData(Encoding.UTF8.GetBytes("I'm dummy.")); var partitionId = (await connection.GetPartitionIdsAsync(DefaultRetryPolicy)).First(); await using (var producer = new EventHubProducerClient(connection)) await using (var consumer = new EventHubConsumerClient(EventHubConsumerClient.DefaultConsumerGroupName, connectionString)) await using (var receiver = consumer.CreatePartitionReceiver(partitionId, EventPosition.Earliest)) { // Send a few events. We are only interested in the last one of them. var dummyEventsCount = 10; for (int i = 0; i < dummyEventsCount; i++) { await producer.SendAsync(dummyEvent); } // Receive the events; because there is some non-determinism in the messaging flow, the // sent events may not be immediately available. Allow for a small number of attempts to receive, in order // to account for availability delays. var receivedEvents = new List <EventData>(); var index = 0; while ((receivedEvents.Count < dummyEventsCount) && (++index < ReceiveRetryLimit)) { receivedEvents.AddRange(await receiver.ReceiveAsync(dummyEventsCount + 10, TimeSpan.FromMilliseconds(25))); } Assert.That(receivedEvents.Count, Is.EqualTo(dummyEventsCount)); lastEvent = receivedEvents.Last(); } // Create a partition manager so we can retrieve the created checkpoint from it. var partitionManager = new InMemoryPartitionManager(); // Create the event processor manager to manage our event processors. var eventProcessorManager = new EventProcessorManager ( EventHubConsumerClient.DefaultConsumerGroupName, connection, partitionManager, onProcessEvent: processorEvent => { if (processorEvent.Data != null) { processorEvent.UpdateCheckpointAsync(); } } ); eventProcessorManager.AddEventProcessors(1); // Start the event processors. await eventProcessorManager.StartAllAsync(); // Make sure the event processors have enough time to stabilize and receive events. await eventProcessorManager.WaitStabilization(); // Stop the event processors. await eventProcessorManager.StopAllAsync(); // Validate results. IEnumerable <PartitionOwnership> ownershipEnumerable = await partitionManager.ListOwnershipAsync(connection.FullyQualifiedNamespace, connection.EventHubName, EventHubConsumerClient.DefaultConsumerGroupName); Assert.That(ownershipEnumerable, Is.Not.Null); Assert.That(ownershipEnumerable.Count, Is.EqualTo(1)); PartitionOwnership ownership = ownershipEnumerable.Single(); Assert.That(ownership.Offset.HasValue, Is.True); Assert.That(ownership.Offset.Value, Is.EqualTo(lastEvent.Offset)); Assert.That(ownership.SequenceNumber.HasValue, Is.True); Assert.That(ownership.SequenceNumber.Value, Is.EqualTo(lastEvent.SequenceNumber)); } } }
public async Task PartitionProcessorCanCreateACheckpoint() { await using (EventHubScope scope = await EventHubScope.CreateAsync(1)) { var connectionString = TestEnvironment.BuildConnectionStringForEventHub(scope.EventHubName); await using (var connection = new EventHubConnection(connectionString)) { // Send some events. EventData lastEvent; await using (var producer = new EventHubProducerClient(connection)) await using (var consumer = new EventHubConsumerClient(EventHubConsumerClient.DefaultConsumerGroupName, connectionString)) { // Send a few events. We are only interested in the last one of them. var dummyEventsCount = 10; using (var dummyBatch = await producer.CreateBatchAsync()) { for (int i = 0; i < dummyEventsCount; i++) { dummyBatch.TryAdd(new EventData(Encoding.UTF8.GetBytes("I'm dummy."))); } await producer.SendAsync(dummyBatch); } // Receive the events; because there is some non-determinism in the messaging flow, the // sent events may not be immediately available. Allow for a small number of attempts to receive, in order // to account for availability delays. var receivedEvents = new List <EventData>(); var index = 0; while ((receivedEvents.Count < dummyEventsCount) && (++index < ReceiveRetryLimit)) { Assert.Fail("Convert to iterator."); //receivedEvents.AddRange(await receiver.ReceiveAsync(dummyEventsCount + 10, TimeSpan.FromMilliseconds(25))); } Assert.That(receivedEvents.Count, Is.EqualTo(dummyEventsCount)); lastEvent = receivedEvents.Last(); } // Create a storage manager so we can retrieve the created checkpoint from it. var storageManager = new MockCheckPointStorage(); // Create the event processor manager to manage our event processors. var eventProcessorManager = new EventProcessorManager ( EventHubConsumerClient.DefaultConsumerGroupName, connectionString, storageManager, onProcessEvent: eventArgs => { if (eventArgs.Data != null) { eventArgs.UpdateCheckpointAsync(); } } ); eventProcessorManager.AddEventProcessors(1); // Start the event processors. await eventProcessorManager.StartAllAsync(); // Make sure the event processors have enough time to stabilize and receive events. await eventProcessorManager.WaitStabilization(); // Stop the event processors. await eventProcessorManager.StopAllAsync(); // Validate results. IEnumerable <PartitionOwnership> ownershipEnumerable = await storageManager.ListOwnershipAsync(connection.FullyQualifiedNamespace, connection.EventHubName, EventHubConsumerClient.DefaultConsumerGroupName); Assert.That(ownershipEnumerable, Is.Not.Null); Assert.That(ownershipEnumerable.Count, Is.EqualTo(1)); } } }
public async Task EventProcessorCanReceiveFromSpecifiedInitialEventPosition() { await using (EventHubScope scope = await EventHubScope.CreateAsync(2)) { var connectionString = TestEnvironment.BuildConnectionStringForEventHub(scope.EventHubName); await using (var connection = new EventHubConnection(connectionString)) { int receivedEventsCount = 0; // Send some events. var expectedEventsCount = 20; var dummyEvent = new EventData(Encoding.UTF8.GetBytes("I'm dummy.")); DateTimeOffset enqueuedTime; await using (var producer = new EventHubProducerClient(connection)) { // Send a few dummy events. We are not expecting to receive these. for (int i = 0; i < 30; i++) { await producer.SendAsync(dummyEvent); } // Wait a reasonable amount of time so the events are able to reach the service. await Task.Delay(1000); // Send the events we expect to receive. enqueuedTime = DateTimeOffset.UtcNow; for (int i = 0; i < expectedEventsCount; i++) { await producer.SendAsync(dummyEvent); } } // Create the event processor manager to manage our event processors. var eventProcessorManager = new EventProcessorManager ( EventHubConsumerClient.DefaultConsumerGroupName, connection, onInitialize: initializationContext => initializationContext.DefaultStartingPosition = EventPosition.FromEnqueuedTime(enqueuedTime), onProcessEvent: processorEvent => { if (processorEvent.Data != null) { Interlocked.Increment(ref receivedEventsCount); } } ); eventProcessorManager.AddEventProcessors(1); // Start the event processors. await eventProcessorManager.StartAllAsync(); // Make sure the event processors have enough time to stabilize and receive events. await eventProcessorManager.WaitStabilization(); // Stop the event processors. await eventProcessorManager.StopAllAsync(); // Validate results. Assert.That(receivedEventsCount, Is.EqualTo(expectedEventsCount)); } } }
public void ContructorWithConnectionStringCreatesTheTransportClient() { var client = new EventHubConnection("Endpoint=sb://not-real.servicebus.windows.net/;SharedAccessKeyName=DummyKey;SharedAccessKey=[not_real]", "fake", new EventHubConnectionOptions()); Assert.That(GetTransportClient(client), Is.Not.Null); }
public async Task LoadBalancingIsEnforcedWhenDistributionIsUneven() { var partitions = 10; await using (EventHubScope scope = await EventHubScope.CreateAsync(partitions)) { var connectionString = TestEnvironment.BuildConnectionStringForEventHub(scope.EventHubName); await using (var connection = new EventHubConnection(connectionString)) { ConcurrentDictionary <string, int> ownedPartitionsCount = new ConcurrentDictionary <string, int>(); // Create the event processor manager to manage our event processors. var eventProcessorManager = new EventProcessorManager ( EventHubConsumerClient.DefaultConsumerGroupName, connection // TODO: fix test. OwnerIdentifier is not accessible anymore. // onInitialize: initializationContext => // ownedPartitionsCount.AddOrUpdate(initializationContext.Context.OwnerIdentifier, 1, (ownerId, value) => value + 1), // onStop: stopContext => // ownedPartitionsCount.AddOrUpdate(stopContext.Context.OwnerIdentifier, 0, (ownerId, value) => value - 1) ); eventProcessorManager.AddEventProcessors(1); // Start the event processors. await eventProcessorManager.StartAllAsync(); // Make sure the event processors have enough time to stabilize. await eventProcessorManager.WaitStabilization(); // Assert all partitions have been claimed. Assert.That(ownedPartitionsCount.ToArray().Single().Value, Is.EqualTo(partitions)); // Insert a new event processor into the manager so it can start stealing partitions. eventProcessorManager.AddEventProcessors(1); await eventProcessorManager.StartAllAsync(); // Make sure the event processors have enough time to stabilize. await eventProcessorManager.WaitStabilization(); // Take a snapshot of the current partition balancing status. IEnumerable <int> ownedPartitionsCountSnapshot = ownedPartitionsCount.ToArray().Select(kvp => kvp.Value); // Stop the event processors. await eventProcessorManager.StopAllAsync(); // Validate results. var minimumOwnedPartitionsCount = partitions / 2; var maximumOwnedPartitionsCount = minimumOwnedPartitionsCount + 1; foreach (var count in ownedPartitionsCountSnapshot) { Assert.That(count, Is.InRange(minimumOwnedPartitionsCount, maximumOwnedPartitionsCount)); } Assert.That(ownedPartitionsCountSnapshot.Sum(), Is.EqualTo(partitions)); } } }
// To simplify logic, tell the processor that only its assigned // partitions exist for the Event Hub. protected override Task <string[]> ListPartitionIdsAsync( EventHubConnection connection, CancellationToken cancellationToken) => Task.FromResult(_assignedPartitions);