public async void GivenCollectionOfMeasurements_WhenAddAsync_ThenAllEventsAreSentInASingleBatch_Test() { var mockEventDataBatch = EventHubsModelFactory.EventDataBatch( 10000, new List <EventData>(), new CreateBatchOptions() { PartitionKey = "partition123", }, (data) => true); _eventHubService.CreateEventDataBatchAsync(Arg.Any <string>()).Returns(mockEventDataBatch); var measurements = Enumerable.Range(0, 100).Select(i => { var mockMeasurement = Substitute.For <IMeasurement>(); mockMeasurement.DeviceId.Returns($"deviceId_{i}"); return(mockMeasurement); }); await _measurementCollector.AddAsync(measurements, default); await _eventHubService.Received(1).CreateEventDataBatchAsync("123"); await _eventHubService.Received(1) .SendAsync( Arg.Is <EventDataBatch>(data => data.Count == 100), default); }
public async void GivenCollectionOfMeasurements_WhenAddAsync_AndAEventIsToBigToSend_ThenEventsIsSkipped_Test() { var eventDataBatch = EventHubsModelFactory.EventDataBatch( 10000, new List <EventData>(), new CreateBatchOptions() { PartitionKey = "partition123", }, (data) => { var measurement = data.EventBody.ToObjectFromJson <Measurement>(null); return(measurement.DeviceId != "deviceId_5"); }); _eventHubService.CreateEventDataBatchAsync(Arg.Any <string>()).Returns(eventDataBatch); var measurements = Enumerable.Range(0, 10).Select(i => { var mockMeasurement = Substitute.For <IMeasurement>(); mockMeasurement.DeviceId.Returns($"deviceId_{i}"); return(mockMeasurement); }); await _measurementCollector.AddAsync(measurements, default); await _eventHubService.Received(2).CreateEventDataBatchAsync("123"); await _eventHubService.Received(1) .SendAsync( Arg.Is <EventDataBatch>(data => data.Count == 9), default); }
public void EventDataBatchIsSafeToDispose() { var size = 1024; var store = new List <EventData> { new EventData(new BinaryData(Array.Empty <byte>())), new EventData(new BinaryData(Array.Empty <byte>())) }; var options = new CreateBatchOptions { MaximumSizeInBytes = 2048 }; var batch = EventHubsModelFactory.EventDataBatch(size, store, options, _ => false); Assert.That(() => batch.Dispose(), Throws.Nothing); }
public async Task ProducerCanPublishBatchesAfterAnException() { await using (EventHubScope scope = await EventHubScope.CreateAsync(1)) { var cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(EventHubsTestEnvironment.Instance.TestExecutionTimeLimit); var connectionString = EventHubsTestEnvironment.Instance.BuildConnectionStringForEventHub(scope.EventHubName); var options = new EventHubProducerClientOptions { EnableIdempotentPartitions = true }; await using var producer = new EventHubProducerClient(connectionString, options); var partition = (await producer.GetPartitionIdsAsync()).First(); var batchOptions = new CreateBatchOptions { PartitionId = partition }; // Publish a batch to validate that the initial publish works. using var firstBatch = await producer.CreateBatchAsync(batchOptions, cancellationSource.Token); firstBatch.TryAdd(EventGenerator.CreateEvents(1).First()); Assert.That(async() => await producer.SendAsync(firstBatch, cancellationSource.Token), Throws.Nothing, "The first publishing operation was not successful."); // Publish an event too large to succeed; this will force the producer to deal with an exception, which should // update idempotent state. var producerId = (await producer.GetPartitionPublishingPropertiesAsync(partition, cancellationSource.Token)).ProducerGroupId; using var badBatch = EventHubsModelFactory.EventDataBatch(firstBatch.MaximumSizeInBytes + 1000, new List <EventData>(new[] { new EventData(EventGenerator.CreateRandomBody(firstBatch.MaximumSizeInBytes + 1000)) }), new CreateBatchOptions { PartitionId = partition }); Assert.That(async() => await producer.SendAsync(badBatch, cancellationSource.Token), Throws.InstanceOf <EventHubsException>(), "The attempt to publish a too-large event should fail."); // Publish a second batch of events; this will prove that the producer recovered from the exception. using var secondBatch = await producer.CreateBatchAsync(batchOptions, cancellationSource.Token); secondBatch.TryAdd(EventGenerator.CreateEvents(1).First()); secondBatch.TryAdd(EventGenerator.CreateEvents(1).First()); Assert.That(async() => await producer.SendAsync(secondBatch, cancellationSource.Token), Throws.Nothing, "The second publishing operation was not successful."); var newProducerId = (await producer.GetPartitionPublishingPropertiesAsync(partition, cancellationSource.Token)).ProducerGroupId; Assert.That(newProducerId, Is.Not.Null, "The producer group identifier should have a value."); Assert.That(newProducerId, Is.Not.EqualTo(producerId), "The producer group identifier should have been updated after the exception."); } }
public void EventDataBatchInitializesProperties() { var size = 1024; var store = new List <EventData> { new EventData(new BinaryData(Array.Empty <byte>())), new EventData(new BinaryData(Array.Empty <byte>())) }; var options = new CreateBatchOptions { MaximumSizeInBytes = 2048 }; var batch = EventHubsModelFactory.EventDataBatch(size, store, options); Assert.That(batch, Is.Not.Null, "The batch should have been created."); Assert.That(batch.SizeInBytes, Is.EqualTo(size), "The batch size should have been set."); Assert.That(batch.MaximumSizeInBytes, Is.EqualTo(options.MaximumSizeInBytes), "The maximum batch size should have been set."); Assert.That(batch.Count, Is.EqualTo(store.Count), "The batch count should reflect the count of the backing store."); Assert.That(batch.AsReadOnlyCollection <EventData>(), Is.EquivalentTo(store), "The batch enumerable should reflect the events in the backing store."); }
public void Enqueue_TransmissionFlow() { // assert ManualResetEventSlim resetEvent = new ManualResetEventSlim(); Mock <IMemoryBuffer <EventData> > buffer = new Mock <IMemoryBuffer <EventData> >(); Mock <ITransmissionBuffer <EventData, EventDataBatch> > aggregator = new Mock <ITransmissionBuffer <EventData, EventDataBatch> >(); Mock <ITransmissionSender <EventDataBatch> > sender = new Mock <ITransmissionSender <EventDataBatch> >(); Mock <ITransmissionStorage <EventData> > storage = new Mock <ITransmissionStorage <EventData> >(); ConcurrentQueue <EventDataBatch> bufferQueue = new ConcurrentQueue <EventDataBatch>(); buffer .Setup(t => t.Enqueue(It.IsAny <EventData>())) .Callback((EventData d) => bufferQueue.Enqueue(EventHubsModelFactory.EventDataBatch(1, new[] { d }))); aggregator .Setup(t => t.Dequeue(It.IsAny <CancellationToken>())) .Returns(() => { int count = 0; List <EventDataBatch> results = new List <EventDataBatch>(); while (bufferQueue.TryDequeue(out EventDataBatch d) && count < 10) { results.Add(d); count++; } return(results.ToAsyncEnumerable()); }); sender .Setup(t => t.SendAsync(It.IsAny <IAsyncEnumerable <EventDataBatch> >(), It.IsAny <CancellationToken>())) .Callback(() => resetEvent.Set()); ITelemetryEventTransmitter transmitter = new EventHubTransmitter( buffer.Object, aggregator.Object, sender.Object, storage.Object); TelemetryEvent data = new TelemetryEvent(); // act transmitter.Enqueue(data); // arrange resetEvent.Wait(TimeSpan.FromSeconds(5)); sender.Verify(s => s.SendAsync( It.IsAny <IAsyncEnumerable <EventDataBatch> >(), It.IsAny <CancellationToken>()), Times.Once); }
public void EventDataBatchRespectsTheTryAddCallback() { var eventLimit = 3; var store = new List <EventData>(); var batch = EventHubsModelFactory.EventDataBatch(5, store, tryAddCallback: _ => store.Count < eventLimit); while (store.Count < eventLimit) { Assert.That(() => batch.TryAdd(new EventData(new BinaryData("Test"))), Is.True, $"The batch contains { store.Count } events; adding another should be permitted."); } Assert.That(store.Count, Is.EqualTo(eventLimit), "The batch should be at its limit."); Assert.That(() => batch.TryAdd(new EventData(new BinaryData("Too many"))), Is.False, "The batch is full; it should not be possible to add a new event."); Assert.That(() => batch.TryAdd(new EventData(new BinaryData("Too many"))), Is.False, "The batch is full; a second attempt to add a new event should not succeed."); Assert.That(store.Count, Is.EqualTo(eventLimit), "The batch should be at its limit after the failed TryAdd attempts."); Assert.That(batch.AsReadOnlyCollection <EventData>(), Is.EquivalentTo(store), "The batch enumerable should reflect the events in the backing store."); }
public async void GivenCollectionOfMeasurements_WhenAddAsync_AndEventsCannotFitInSingleBatch_ThenEventsAreSentInAMultipeBatches_Test() { var count = 0; var simpleMockEventDataBatch = EventHubsModelFactory.EventDataBatch( 10000, new List <EventData>(), new CreateBatchOptions() { PartitionKey = "partition123", }); var splittingEventDataBatch = EventHubsModelFactory.EventDataBatch( 10000, new List <EventData>(), new CreateBatchOptions() { PartitionKey = "partition123", }, (data) => count++ != 5); // split at 5 measurement _eventHubService.CreateEventDataBatchAsync(Arg.Any <string>()) .Returns(splittingEventDataBatch, simpleMockEventDataBatch); var measurements = Enumerable.Range(0, 10).Select(i => { var mockMeasurement = Substitute.For <IMeasurement>(); mockMeasurement.DeviceId.Returns($"deviceId_{i}"); return(mockMeasurement); }); await _measurementCollector.AddAsync(measurements, default); await _eventHubService.Received(2).CreateEventDataBatchAsync("123"); await _eventHubService.Received(2) .SendAsync( Arg.Is <EventDataBatch>(data => data.Count == 5), default); }
public void EventDataBatchRespectsTheTryAddCallback() { var eventLimit = 3; var converter = new AmqpMessageConverter(); var store = new List <EventData>(); var messages = new List <AmqpMessage>(); var batch = EventHubsModelFactory.EventDataBatch(5, store, tryAddCallback: _ => store.Count < eventLimit); while (store.Count < eventLimit) { var eventData = new EventData(new BinaryData("Test")); Assert.That(() => batch.TryAdd(eventData), Is.True, $"The batch contains { store.Count } events; adding another should be permitted."); messages.Add(converter.CreateMessageFromEvent(eventData)); } Assert.That(store.Count, Is.EqualTo(eventLimit), "The batch should be at its limit."); Assert.That(() => batch.TryAdd(new EventData(new BinaryData("Too many"))), Is.False, "The batch is full; it should not be possible to add a new event."); Assert.That(() => batch.TryAdd(new EventData(new BinaryData("Too many"))), Is.False, "The batch is full; a second attempt to add a new event should not succeed."); Assert.That(store.Count, Is.EqualTo(eventLimit), "The batch should be at its limit after the failed TryAdd attempts."); Assert.That(batch.AsReadOnlyCollection <AmqpMessage>().Count, Is.EqualTo(eventLimit), "The messages produced by the batch should match the limit."); }