public void CreateConsumerCreatesDefaultWhenOptionsAreNotSet() { var clientOptions = new EventHubClientOptions { Retry = new ExponentialRetry(TimeSpan.FromSeconds(1), TimeSpan.FromSeconds(3), 5), DefaultTimeout = TimeSpan.FromHours(24) }; var expectedOptions = new EventHubConsumerOptions { OwnerLevel = 251, Identifier = "Bob", PrefetchCount = 600, Retry = clientOptions.Retry, DefaultMaximumReceiveWaitTime = clientOptions.DefaultTimeout }; var expectedConsumerGroup = "SomeGroup"; var expectedPartition = "56767"; var expectedPosition = EventPosition.FromSequenceNumber(123); var connectionString = "Endpoint=value.com;SharedAccessKeyName=[value];SharedAccessKey=[value];EntityPath=[value]"; var mockClient = new ReadableOptionsMock(connectionString, clientOptions); mockClient.CreateConsumer(expectedConsumerGroup, expectedPartition, expectedPosition, expectedOptions); var actualOptions = mockClient.ConsumerOptions; Assert.That(actualOptions, Is.Not.Null, "The consumer options should have been set."); Assert.That(actualOptions, Is.Not.SameAs(expectedOptions), "A clone of the options should have been made."); Assert.That(actualOptions.OwnerLevel, Is.EqualTo(expectedOptions.OwnerLevel), "The owner levels should match."); Assert.That(actualOptions.Identifier, Is.EqualTo(expectedOptions.Identifier), "The identifiers should match."); Assert.That(actualOptions.PrefetchCount, Is.EqualTo(expectedOptions.PrefetchCount), "The prefetch counts should match."); Assert.That(ExponentialRetry.HaveSameConfiguration((ExponentialRetry)actualOptions.Retry, (ExponentialRetry)expectedOptions.Retry), "The retries should match."); Assert.That(actualOptions.MaximumReceiveWaitTimeOrDefault, Is.EqualTo(expectedOptions.MaximumReceiveWaitTimeOrDefault), "The wait times should match."); }
public void GetHashCodeReturnsDifferentValuesForDifferentMembers() { var first = EventPosition.FromOffset(12); var second = EventPosition.FromSequenceNumber(123); Assert.That(first.GetHashCode(), Is.Not.EqualTo(second.GetHashCode())); }
public void CreateConsumerCreatesDefaultWhenOptionsAreNotSet() { var retryOptions = new RetryOptions { MaximumRetries = 99, MaximumDelay = TimeSpan.FromHours(72), Delay = TimeSpan.FromSeconds(27) }; var expectedOptions = new EventHubConsumerClientOptions { OwnerLevel = 251, PrefetchCount = 600, RetryOptions = retryOptions, DefaultMaximumReceiveWaitTime = TimeSpan.FromSeconds(123) }; var clientOptions = new EventHubConnectionOptions(); var expectedConsumerGroup = "SomeGroup"; var expectedPartition = "56767"; var expectedPosition = EventPosition.FromSequenceNumber(123); var connectionString = "Endpoint=value.com;SharedAccessKeyName=[value];SharedAccessKey=[value];EntityPath=[value]"; var mockClient = new ReadableOptionsMock(connectionString, clientOptions); mockClient.CreateTransportConsumer(expectedConsumerGroup, expectedPartition, expectedPosition, expectedOptions); EventHubConsumerClientOptions actualOptions = mockClient.ConsumerOptions; Assert.That(actualOptions, Is.Not.Null, "The consumer options should have been set."); Assert.That(actualOptions, Is.Not.SameAs(expectedOptions), "A clone of the options should have been made."); Assert.That(actualOptions.OwnerLevel, Is.EqualTo(expectedOptions.OwnerLevel), "The owner levels should match."); Assert.That(actualOptions.PrefetchCount, Is.EqualTo(expectedOptions.PrefetchCount), "The prefetch counts should match."); Assert.That(actualOptions.RetryOptions.IsEquivalentTo(expectedOptions.RetryOptions), Is.True, "The retries should match."); Assert.That(actualOptions.MaximumReceiveWaitTimeOrDefault, Is.EqualTo(expectedOptions.MaximumReceiveWaitTimeOrDefault), "The wait times should match."); }
public void IsEventPositionEquivalentDetectsDifferentSequence() { var trackOnePosition = TrackOne.EventPosition.FromSequenceNumber(54123); var trackTwoPosition = EventPosition.FromSequenceNumber(2); Assert.That(TrackOneComparer.IsEventPositionEquivalent(trackOnePosition, trackTwoPosition), Is.False); }
public void IsEquivalentToDetectsInclusivity() { var first = EventPosition.FromSequenceNumber(42, true); var second = EventPosition.FromSequenceNumber(42, false); Assert.That(first.IsEquivalentTo(second), Is.False); }
public void IsEventPositionEquivalentRecognizesSameSequence() { var trackOnePosition = TrackOne.EventPosition.FromSequenceNumber(54123); var trackTwoPosition = EventPosition.FromSequenceNumber(54123); Assert.That(TrackOneComparer.IsEventPositionEquivalent(trackOnePosition, trackTwoPosition), Is.True); }
public void IsEquivalentToDetectsSequenceNumber() { var first = EventPosition.FromSequenceNumber(42); var second = EventPosition.FromSequenceNumber(1975); Assert.That(first.IsEquivalentTo(second), Is.False); }
public void UseInitialPositionProviderTest() { TestState state = new TestState(); state.Initialize("UseInitialPositionProvider", 1, 0); const long firstSequenceNumber = 3456L; state.Options.InitialPositionProvider = (partitionId) => { return EventPosition.FromSequenceNumber(firstSequenceNumber); }; ServiceFabricProcessor sfp = new ServiceFabricProcessor( state.ServiceUri, state.ServicePartitionId, state.StateManager, state.StatefulServicePartition, state.Processor, state.ConnectionString, "$Default", state.Options); sfp.MockMode = state.PartitionLister; sfp.EventHubClientFactory = new EventHubMocks.EventHubClientFactoryMock(1); state.PrepareToRun(); state.StartRun(sfp); state.RunForNBatches(20, 10); state.WaitRun(); // EXPECTED RESULT: Normal processing. Sequence number of first event processed should match that // supplied in the InitialPositionProvider. Assert.True(state.Processor.FirstEvent.SystemProperties.SequenceNumber == (firstSequenceNumber + 1L), $"Got unexpected first sequence number {state.Processor.FirstEvent.SystemProperties.SequenceNumber}"); Assert.True(state.Processor.TotalErrors == 0, $"Errors found {state.Processor.TotalErrors}"); Assert.Null(state.ShutdownException); }
public async Task <EventPosition> GetPartitionPosition(string partitionId) { if (partitionId == null) { throw new ArgumentNullException(nameof(partitionId)); } if (_blobStorage == null) { return(EventPosition.FromStart()); } string state; try { state = await _blobStorage.ReadTextAsync(GetBlobName(partitionId)); } catch (StorageException ex) when(ex.ErrorCode == ErrorCode.NotFound) { state = null; } if (state == null) { return(EventPosition.FromStart()); } StateToken token = state.AsJsonObject <StateToken>(); return(token.SequenceNumber == null?EventPosition.FromStart() : EventPosition.FromSequenceNumber(token.SequenceNumber.Value)); }
public void ConstructorSetsTheStartingPosition() { var expectedPosition = EventPosition.FromSequenceNumber(5641); var transportConsumer = new ObservableTransportConsumerMock(); var consumer = new EventHubConsumer(transportConsumer, "dummy", EventHubConsumer.DefaultConsumerGroupName, "0", expectedPosition, new EventHubConsumerOptions(), Mock.Of <EventHubRetryPolicy>()); Assert.That(consumer.StartingPosition, Is.EqualTo(expectedPosition)); }
public void BuildFilterExpressionHonorsInclusiveFlagForSequenceNumber(bool inclusive) { var comparison = (inclusive) ? ">=" : ">"; var position = EventPosition.FromSequenceNumber(123, inclusive); var filter = AmqpFilter.BuildFilterExpression(position); Assert.That(filter, Contains.Substring(comparison), "The comparison should be based on the inclusive flag."); }
public void ResumeFrom(long position) { EventPosition eventPosition = (position == -1) ? EventPosition.FromStart() : EventPosition.FromSequenceNumber(position); var client = GetEventHubClient(processId); ProcessReceiver = client.CreateReceiver("$Default", (processId / 8).ToString(), eventPosition); }
/// <summary> /// Determines the default start position for processing when no checkpoint is found /// </summary> /// <returns>An event position indicating the startup logic</returns> public static EventPosition GetStartPosition() { EventPosition startPosition; var startDefinition = Environment.GetEnvironmentVariable("StartPosition"); if (!bool.TryParse(Environment.GetEnvironmentVariable("StartInclusive"), out var startInclusive)) { startInclusive = false; } if (string.IsNullOrWhiteSpace(startDefinition)) { startDefinition = "Start"; } startDefinition = startDefinition.Trim().ToLowerInvariant(); if (string.IsNullOrWhiteSpace(startDefinition)) { startDefinition = "Start"; } switch (startDefinition) { case "end": startPosition = EventPosition.FromEnd(); break; case "offset": startPosition = EventPosition.FromOffset(Environment.GetEnvironmentVariable("StartOffset"), startInclusive); break; case "sequence": if (!long.TryParse(Environment.GetEnvironmentVariable("StartSequence"), out var startSequence)) { startSequence = -1; } startPosition = EventPosition.FromSequenceNumber(startSequence, startInclusive); break; case "time": if (!int.TryParse(Environment.GetEnvironmentVariable("StartSeconds"), out var startSeconds)) { startSeconds = 0; } startPosition = EventPosition.FromEnqueuedTime(DateTime.UtcNow.AddSeconds(startSeconds * -1)); break; default: startPosition = EventPosition.FromStart(); break; } return(startPosition); }
public void TheSameInclusiveFlagsAreEqual(bool isInclusive) { var first = EventPosition.FromSequenceNumber(234234, isInclusive); var second = EventPosition.FromSequenceNumber(234234, isInclusive); Assert.That(first.Equals((object)second), Is.True, "The default Equals comparison is incorrect."); Assert.That(first.Equals(second), Is.True, "The IEquatable comparison is incorrect."); Assert.That((first == second), Is.True, "The == operator comparison is incorrect."); Assert.That((first != second), Is.False, "The != operator comparison is incorrect."); }
public void DifferentMembersAreNotEqual() { var first = EventPosition.FromSequenceNumber(234234); var second = EventPosition.FromOffset(12); Assert.That(first.Equals((object)second), Is.False, "The default Equals comparison is incorrect."); Assert.That(first.Equals(second), Is.False, "The IEquatable comparison is incorrect."); Assert.That((first == second), Is.False, "The == operator comparison is incorrect."); Assert.That((first != second), Is.True, "The != operator comparison is incorrect."); }
private async Task BackgroundReceive(string connectionString, string eventHubName, string partitionId, CancellationToken cancellationToken) { var reportTasks = new List <Task>(); EventPosition eventPosition; if (LastReceivedSequenceNumber.TryGetValue(partitionId, out long sequenceNumber)) { eventPosition = EventPosition.FromSequenceNumber(sequenceNumber, false); } else { eventPosition = EventPosition.Latest; } await using (var consumerClient = new EventHubConsumerClient("$Default", connectionString, eventHubName)) { Interlocked.Decrement(ref consumersToConnect); await foreach (var receivedEvent in consumerClient.ReadEventsFromPartitionAsync(partitionId, eventPosition, new ReadEventOptions { MaximumWaitTime = TimeSpan.FromSeconds(5) })) { if (receivedEvent.Data != null) { var key = Encoding.UTF8.GetString(receivedEvent.Data.Body.ToArray()); if (MissingEvents.TryRemove(key, out var expectedEvent)) { if (HaveSameProperties(expectedEvent, receivedEvent.Data)) { Interlocked.Increment(ref successfullyReceivedEventsCount); } else { reportTasks.Add(ReportCorruptedPropertiesEvent(partitionId, expectedEvent, receivedEvent.Data)); } } else { reportTasks.Add(ReportCorruptedBodyEvent(partitionId, receivedEvent.Data)); } LastReceivedSequenceNumber[partitionId] = receivedEvent.Data.SequenceNumber; } if (cancellationToken.IsCancellationRequested) { break; } } } await Task.WhenAll(reportTasks); }
public void ConstructorSetsTheStartingPosition() { var options = new EventReceiverOptions { BeginReceivingAt = EventPosition.FromSequenceNumber(12345, true) }; var transportReceiver = new ObservableTransportReceiverMock(); var receiver = new EventReceiver(transportReceiver, "dummy", "0", options); Assert.That(receiver.StartingPosition, Is.EqualTo(options.BeginReceivingAt)); }
public async Task ReadPartitionFromSequence() { await using var scope = await EventHubScope.CreateAsync(1); #region Snippet:EventHubs_Sample05_ReadPartitionFromSequence #if SNIPPET var connectionString = "<< CONNECTION STRING FOR THE EVENT HUBS NAMESPACE >>"; var eventHubName = "<< NAME OF THE EVENT HUB >>"; #else var connectionString = EventHubsTestEnvironment.Instance.EventHubsConnectionString; var eventHubName = scope.EventHubName; #endif var consumerGroup = EventHubConsumerClient.DefaultConsumerGroupName; var consumer = new EventHubConsumerClient( consumerGroup, connectionString, eventHubName); try { using CancellationTokenSource cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(TimeSpan.FromSeconds(30)); string firstPartition = (await consumer.GetPartitionIdsAsync(cancellationSource.Token)).First(); PartitionProperties properties = await consumer.GetPartitionPropertiesAsync(firstPartition, cancellationSource.Token); EventPosition startingPosition = EventPosition.FromSequenceNumber(properties.LastEnqueuedSequenceNumber); await foreach (PartitionEvent partitionEvent in consumer.ReadEventsFromPartitionAsync( firstPartition, startingPosition, cancellationSource.Token)) { string readFromPartition = partitionEvent.Partition.PartitionId; byte[] eventBodyBytes = partitionEvent.Data.EventBody.ToArray(); Debug.WriteLine($"Read event of length { eventBodyBytes.Length } from { readFromPartition }"); } } catch (TaskCanceledException) { // This is expected if the cancellation token is // signaled. } finally { await consumer.CloseAsync(); } #endregion }
/// <summary> /// Provides test cases for the equality tests. /// </summary> /// public static IEnumerable <object[]> IsEquivalentToDetectsEqualEventPositionsCases() { var date = DateTimeOffset.Parse("1975-04-04T00:00:00Z"); yield return(new object[] { EventPosition.Earliest, EventPosition.Earliest }); yield return(new object[] { EventPosition.Latest, EventPosition.Latest }); yield return(new object[] { EventPosition.FromOffset(1975), EventPosition.FromOffset(1975) }); yield return(new object[] { EventPosition.FromSequenceNumber(42), EventPosition.FromSequenceNumber(42) }); yield return(new object[] { EventPosition.FromEnqueuedTime(date), EventPosition.FromEnqueuedTime(date) }); }
public void BuildFilterExpressionPrefersSequenceNumberToEnqueuedTime() { // Set all properties for the event position. var sequence = 2345; var position = EventPosition.FromSequenceNumber(sequence); position.EnqueuedTime = DateTimeOffset.Parse("2015-10-27T12:00:00Z"); var filter = AmqpFilter.BuildFilterExpression(position); Assert.That(filter, Contains.Substring(AmqpFilter.SequenceNumberName), "The sequence number should have precedence over the enqueued time for filtering."); Assert.That(filter, Contains.Substring(sequence.ToString()), "The sequence number value should be present in the filter."); }
public void ToStringReflectsTheState() { var inclusive = true; var offset = 123; var sequence = 778; var enqueued = DateTimeOffset.Now.AddHours(1); Assert.That(EventPosition.Earliest.ToString(), Contains.Substring(nameof(EventPosition.Earliest)), "Earliest should be represented."); Assert.That(EventPosition.Latest.ToString(), Contains.Substring(nameof(EventPosition.Latest)), "Latest should be represented."); Assert.That(EventPosition.FromOffset(offset).ToString(), Contains.Substring($"[{ offset }]"), "The offset should be represented."); Assert.That(EventPosition.FromSequenceNumber(sequence).ToString(), Contains.Substring($"[{ sequence }]"), "The sequence should be represented."); Assert.That(EventPosition.FromEnqueuedTime(enqueued).ToString(), Contains.Substring($"[{ enqueued }]"), "The enqueued time should be represented."); Assert.That(EventPosition.FromOffset(offset, inclusive).ToString(), Contains.Substring($"[{ inclusive }]"), "The inclusive flag should be represented for the offset."); Assert.That(EventPosition.FromSequenceNumber(sequence, inclusive).ToString(), Contains.Substring($"[{ inclusive }]"), "The inclusive flag should be represented for the sequence number."); }
public void IgnoreInitialPositionProviderTest() { TestState state = new TestState(); state.Initialize("IgnoreInitialPositionProvider", 1, 0); const long ippSequenceNumber = 3456L; state.Options.InitialPositionProvider = (partitionId) => { return(EventPosition.FromSequenceNumber(ippSequenceNumber)); }; // Fake up a checkpoint using code borrowed from ReliableDictionaryCheckpointManager IReliableDictionary <string, Dictionary <string, object> > store = state.StateManager.GetOrAddAsync <IReliableDictionary <string, Dictionary <string, object> > >("EventProcessorCheckpointDictionary").Result; const long checkpointSequenceNumber = 8888L; Checkpoint fake = new Checkpoint((checkpointSequenceNumber * 100L).ToString(), checkpointSequenceNumber); using (ITransaction tx = state.StateManager.CreateTransaction()) { store.SetAsync(tx, "0", fake.ToDictionary(), TimeSpan.FromSeconds(5.0), CancellationToken.None).Wait(); tx.CommitAsync().Wait(); } ServiceFabricProcessor sfp = new ServiceFabricProcessor( state.ServiceUri, state.ServicePartitionId, state.StateManager, state.StatefulServicePartition, state.Processor, state.ConnectionString, "$Default", state.Options); sfp.MockMode = state.PartitionLister; sfp.EventHubClientFactory = new EventHubMocks.EventHubClientFactoryMock(1); state.PrepareToRun(); state.StartRun(sfp); state.RunForNBatches(20, 10); state.WaitRun(); // EXPECTED RESULT: Normal processing. Sequence number of first event processed should match that // supplied in the checkpoint, NOT the InitialPositionProvider. Assert.True(state.Processor.FirstEvent.SystemProperties.SequenceNumber == (checkpointSequenceNumber + 1L), $"Got unexpected first sequence number {state.Processor.FirstEvent.SystemProperties.SequenceNumber}"); Assert.True(state.Processor.TotalErrors == 0, $"Errors found {state.Processor.TotalErrors}"); Assert.Null(state.ShutdownException); }
/// <inheritdoc /> public virtual async Task <IEnumerable <EventProcessorCheckpoint> > ListCheckpointsAsync(CancellationToken cancellationToken) { var checkpoints = new List <EventProcessorCheckpoint>(); _listCheckpointsCounter.Increment(); using (Logger.BeginScope("Listing checkpoints")) { // Timing added around loop although not ideal. This is tied the paging async using (_listBlobTiming.Time()) { Logger.LogInformation("Retrieving checkpoints"); await foreach (BlobItem blob in Client.GetBlobsAsync(traits: BlobTraits.Metadata, prefix: _checkpointPrefix, cancellationToken: cancellationToken).ConfigureAwait(false)) { var partitionId = blob.Name.Substring(_checkpointPrefix.Length + 1); var startingPosition = default(EventPosition?); Logger.LogDebug("Retrieved blob for partition {partitionId}", partitionId); var sequenceNumber = await GetCheckpointSequenceNumberAsync(partitionId, blob, cancellationToken).ConfigureAwait(false); if (sequenceNumber.HasValue) { Logger.LogDebug("Checkpoint was found with a sequence number of {sequenceNumber}", sequenceNumber); startingPosition = EventPosition.FromSequenceNumber(sequenceNumber.Value, false); checkpoints.Add(new Checkpoint { FullyQualifiedNamespace = _processorClient.FullyQualifiedNamespace, EventHubName = _processorClient.EventHubName, ConsumerGroup = _processorClient.ConsumerGroup, PartitionId = partitionId, StartingPosition = startingPosition.Value, SequenceNumber = sequenceNumber }); } else { Logger.LogError("An invalid checkpoint was found, no starting position"); } } } } return(checkpoints); }
static private async Task GetEvents() { EventHubConsumerClient client = new EventHubConsumerClient("$Default", connstring, hubname); string _partition = (await client.GetPartitionIdsAsync()).First(); var cancellation = new CancellationToken(); EventPosition _position = EventPosition.FromSequenceNumber(5); Console.WriteLine("Getting events from a certain position from a particular partition"); await foreach (PartitionEvent _recent_event in client.ReadEventsFromPartitionAsync(_partition, _position, cancellation)) { EventData event_data = _recent_event.Data; Console.WriteLine(Encoding.UTF8.GetString(event_data.Body.ToArray())); Console.WriteLine($"Sequence Number : {event_data.SequenceNumber}"); } }
private static async Task ReadFromPartition(string partitionNumber) { var cancellationTokenSource = new CancellationTokenSource(); cancellationTokenSource.CancelAfter(TimeSpan.FromSeconds(120)); await using var consumerClient = new EventHubConsumerClient(ConsumerGroup, ConnectionString, EventHubName); try { var props = await consumerClient.GetPartitionPropertiesAsync(partitionNumber); var startingPosition = EventPosition.FromSequenceNumber( //props.LastEnqueuedSequenceNumber props.BeginningSequenceNumber); await foreach (PartitionEvent partitionEvent in consumerClient.ReadEventsFromPartitionAsync(partitionNumber, startingPosition, cancellationTokenSource.Token)) { Console.WriteLine("***** NEW COFFEE *****"); var partitionId = partitionEvent.Partition.PartitionId; var sequenceNumber = partitionEvent.Data.SequenceNumber; var key = partitionEvent.Data.PartitionKey; Console.WriteLine($"Partition Id: {partitionId}{Environment.NewLine}" + $"SenquenceNumber: {sequenceNumber}{Environment.NewLine}" + $"Partition key: {key}"); var coffee = JsonSerializer.Deserialize <CoffeeData>(partitionEvent.Data.EventBody.ToArray()); Console.WriteLine($"Temperature: {coffee.WaterTemperature}, time: {coffee.BeadingTime}, type: {coffee.CoffeeType}"); } } catch (Exception ex) { Console.WriteLine(ex); } finally { await consumerClient.CloseAsync(); } }
/// <summary> /// Creates a checkpoint instance based on the blob metadata. /// </summary> /// /// <param name="fullyQualifiedNamespace">The fully qualified Event Hubs namespace the checkpoint are associated with. This is likely to be similar to <c>{yournamespace}.servicebus.windows.net</c>.</param> /// <param name="eventHubName">The name of the specific Event Hub the checkpoint is associated with, relative to the Event Hubs namespace that contains it.</param> /// <param name="consumerGroup">The name of the consumer group the checkpoint is associated with.</param> /// <param name="partitionId">The partition id the specific checkpoint is associated with.</param> /// <param name="metadata">The metadata of the blob that represents the checkpoint.</param> /// /// <returns>A <see cref="EventProcessorCheckpoint"/> initialized with checkpoint properties if the checkpoint exists, otherwise <code>null</code>.</returns> /// private EventProcessorCheckpoint CreateCheckpoint(string fullyQualifiedNamespace, string eventHubName, string consumerGroup, string partitionId, IDictionary <string, string> metadata) { var startingPosition = default(EventPosition?); var offset = default(long?); var sequenceNumber = default(long?); if (metadata.TryGetValue(BlobMetadataKey.Offset, out var str) && long.TryParse(str, NumberStyles.Integer, CultureInfo.InvariantCulture, out var result)) { offset = result; startingPosition = EventPosition.FromOffset(result, false); } else if (metadata.TryGetValue(BlobMetadataKey.SequenceNumber, out str) && long.TryParse(str, NumberStyles.Integer, CultureInfo.InvariantCulture, out result)) { sequenceNumber = result; startingPosition = EventPosition.FromSequenceNumber(result, false); } // If either the offset or the sequence number was not populated, // this is not a valid checkpoint. if (!startingPosition.HasValue) { InvalidCheckpointFound(partitionId, fullyQualifiedNamespace, eventHubName, consumerGroup); return(null); } return(new BlobStorageCheckpoint() { FullyQualifiedNamespace = fullyQualifiedNamespace, EventHubName = eventHubName, ConsumerGroup = consumerGroup, PartitionId = partitionId, StartingPosition = startingPosition.Value, Offset = offset, SequenceNumber = sequenceNumber }); }
private static void Init() { var builder = new ConfigurationBuilder() .SetBasePath(Directory.GetCurrentDirectory()) .AddJsonFile("appsettings.json", optional: true, reloadOnChange: true); configuration = builder.Build(); connectionString = configuration.GetConnectionString("eventhub"); consumergroup = configuration.GetConnectionString("consumergroup"); eventHubClient = new EventHubConsumerClient(consumergroup, connectionString); partitionId = configuration["partitionId"]; if (long.TryParse(configuration["SequenceNumber"], out long sequenceNumber) == false) { throw new ArgumentException("Invalid SequenceNumber"); } processingEnqueueEndTimeUTC = DateTimeOffset.Parse(configuration["ProcessingEnqueueEndTimeUTC"]); startingPosition = EventPosition.FromSequenceNumber(sequenceNumber); }
/// <summary> /// Creates and starts a new partition pump associated with the specified partition. Partition pumps that are overwritten by the creation /// of a new one are properly stopped. /// </summary> /// /// <param name="partitionId">The identifier of the Event Hub partition the partition pump will be associated with. Events will be read only from this partition.</param> /// <param name="initialSequenceNumber">The sequence number of the event within a partition where the partition pump should begin reading events.</param> /// /// <returns>A task to be resolved on when the operation has completed.</returns> /// private async Task AddOrOverwritePartitionPumpAsync(string partitionId, long?initialSequenceNumber) { // Remove and stop the existing partition pump if it exists. We are not specifying any close reason because partition // pumps only are overwritten in case of failure. In these cases, the close reason is delegated to the pump as it may // have more information about what caused the failure. await RemovePartitionPumpIfItExistsAsync(partitionId).ConfigureAwait(false); // Create and start the new partition pump and add it to the dictionary. var partitionContext = new PartitionContext(InnerClient.EventHubName, ConsumerGroup, partitionId, Identifier, Manager); try { var partitionProcessor = PartitionProcessorFactory(partitionContext); var options = Options.Clone(); // Ovewrite the initial event position in case a checkpoint exists. if (initialSequenceNumber.HasValue) { options.InitialEventPosition = EventPosition.FromSequenceNumber(initialSequenceNumber.Value); } var partitionPump = new PartitionPump(InnerClient, ConsumerGroup, partitionContext, partitionProcessor, options); await partitionPump.StartAsync().ConfigureAwait(false); PartitionPumps[partitionId] = partitionPump; } catch (Exception) { // If partition pump creation fails, we'll try again on the next time this method is called. This should happen // on the next load balancing loop as long as this instance still owns the partition. // TODO: delegate the exception handling to an Exception Callback. } }
private async Task <EventPosition> LoadEventPositionFromDatabaseAsync() { try { _latestSequenceNumber = await _retryService.RetryTask(() => _eventHubInfoDataService.FindHubProcessPartionSequenceNumberAsync(_eventHubPartitionId)); if (_latestSequenceNumber >= 0) { return(EventPosition.FromSequenceNumber(_latestSequenceNumber)); } } catch (Exception e) { //ServiceEventSource.Current.ServiceMessage(this.Context, $"RouterService LoadEventPositionFromDatabaseAsync met exception={e} "); string err = $"RouterService LoadEventPositionFromDatabaseAsync met exception at partition ={Context.PartitionId} and exception= {e.Message}."; //ServiceEventSource.Current.Error("RouterService", err); } return(null); }
/// <summary> /// Runs the sample using the specified Event Hubs connection information. /// </summary> /// /// <param name="connectionString">The connection string for the Event Hubs namespace that the sample should target.</param> /// <param name="eventHubName">The name of the Event Hub, sometimes known as its path, that she sample should run against.</param> /// public async Task RunAsync(string connectionString, string eventHubName) { string firstPartition; // In this example, we will make use of multiple clients. Because clients are typically responsible for managing their own connection to the // Event Hubs service, each will implicitly create their own connection. In this example, we will create a connection that may be shared amongst // clients in order to illustrate connection sharing. Because we are explicitly creating the connection, we assume responsibility for managing its // lifespan and ensuring that it is properly closed or disposed when we are done using it. await using (var eventHubConnection = new EventHubConnection(connectionString, eventHubName)) { // Our initial consumer will begin watching the partition at the very end, reading only new events that we will publish for it. Before we can publish // the events and have them observed, we will need to ask the consumer to perform an operation, // because it opens its connection only when it needs to. // // We'll begin to iterate on the partition using a small wait time, so that control will return to our loop even when // no event is available. For the first call, we'll publish so that we can receive them. // // Each event that the initial consumer reads will have attributes set that describe the event's place in the // partition, such as its offset, sequence number, and the date/time that it was enqueued. These attributes can be // used to create a new consumer that begins consuming at a known position. // // With Event Hubs, it is the responsibility of an application consuming events to keep track of those that it has processed, // and to manage where in the partition the consumer begins reading events. This is done by using the position information to track // state, commonly known as "creating a checkpoint." // // The goal is to preserve the position of an event in some form of durable state, such as writing it to a database, so that if the // consuming application crashes or is otherwise restarted, it can retrieve that checkpoint information and use it to create a consumer that // begins reading at the position where it left off. // // It is important to note that there is potential for a consumer to process an event and be unable to preserve the checkpoint. A well-designed // consumer must be able to deal with processing the same event multiple times without it causing data corruption or otherwise creating issues. // Event Hubs, like most event streaming systems, guarantees "at least once" delivery; even in cases where the consumer does not experience a restart, // there is a small possibility that the service will return an event multiple times. // // In this example, we will publish a batch of events to be received with an initial consumer. The third event that is consumed will be captured // and another consumer will use its attributes to start reading the event that follows, consuming the same set of events that our initial consumer // read, skipping over the first three. EventData thirdEvent; int eventBatchSize = 50; await using (var initialConsumerClient = new EventHubConsumerClient(EventHubConsumerClient.DefaultConsumerGroupName, eventHubConnection)) { // We will start by using the consumer client inspect the Event Hub and select a partition to operate against to ensure that events are being // published and read from the same partition. firstPartition = (await initialConsumerClient.GetPartitionIdsAsync()).First(); // We will consume the events until all of the published events have been received. CancellationTokenSource cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(TimeSpan.FromSeconds(30)); ReadOptions readOptions = new ReadOptions { MaximumWaitTime = TimeSpan.FromMilliseconds(150) }; List <EventData> receivedEvents = new List <EventData>(); bool wereEventsPublished = false; await foreach (PartitionEvent currentEvent in initialConsumerClient.ReadEventsFromPartitionAsync(firstPartition, EventPosition.Latest, readOptions, cancellationSource.Token)) { if (!wereEventsPublished) { await using (var producerClient = new EventHubProducerClient(connectionString, eventHubName)) { using EventDataBatch eventBatch = await producerClient.CreateBatchAsync(new CreateBatchOptions { PartitionId = firstPartition }); for (int index = 0; index < eventBatchSize; ++index) { eventBatch.TryAdd(new EventData(Encoding.UTF8.GetBytes($"I am event #{ index }"))); } await producerClient.SendAsync(eventBatch); wereEventsPublished = true; await Task.Delay(250); Console.WriteLine($"The event batch with { eventBatchSize } events has been published."); } } // Because publishing and receiving events is asynchronous, the events that we published may not // be immediately available for our consumer to see, so we'll have to guard against an empty event being sent as // punctuation if our actual event is not available within the waiting time period. if (currentEvent.Data != null) { receivedEvents.Add(currentEvent.Data); if (receivedEvents.Count >= eventBatchSize) { break; } } } // Print out the events that we received. Console.WriteLine(); Console.WriteLine($"The initial consumer processed { receivedEvents.Count } events of the { eventBatchSize } that were published. { eventBatchSize } were expected."); foreach (EventData eventData in receivedEvents) { // The body of our event was an encoded string; we'll recover the // message by reversing the encoding process. string message = Encoding.UTF8.GetString(eventData.Body.ToArray()); Console.WriteLine($"\tMessage: \"{ message }\""); } // Remember the third event that was consumed. thirdEvent = receivedEvents[2]; } // At this point, our initial consumer client has passed its "using" scope and has been safely disposed of. // // Create a new consumer beginning using the third event as the last sequence number processed; this new consumer will begin reading at the next available // sequence number, allowing it to read the set of published events beginning with the fourth one. // // Because our second consumer will begin watching the partition at a specific event, there is no need to ask for an initial operation to set our place; when // we begin iterating, the consumer will locate the proper place in the partition to read from. await using (var newConsumerClient = new EventHubConsumerClient(EventHubConsumerClient.DefaultConsumerGroupName, eventHubConnection)) { // We will consume the events using the new consumer until all of the published events have been received. CancellationTokenSource cancellationSource = new CancellationTokenSource(); cancellationSource.CancelAfter(TimeSpan.FromSeconds(30)); int expectedCount = (eventBatchSize - 3); var receivedEvents = new List <EventData>(); await foreach (PartitionEvent currentEvent in newConsumerClient.ReadEventsFromPartitionAsync(firstPartition, EventPosition.FromSequenceNumber(thirdEvent.SequenceNumber.Value), cancellationSource.Token)) { receivedEvents.Add(currentEvent.Data); if (receivedEvents.Count >= expectedCount) { break; } } // Print out the events that we received. Console.WriteLine(); Console.WriteLine(); Console.WriteLine($"The new consumer processed { receivedEvents.Count } events of the { eventBatchSize } that were published. { expectedCount } were expected."); foreach (EventData eventData in receivedEvents) { // The body of our event was an encoded string; we'll recover the // message by reversing the encoding process. string message = Encoding.UTF8.GetString(eventData.Body.ToArray()); Console.WriteLine($"\tMessage: \"{ message }\""); } } } // At this point, our clients and connection have passed their "using" scope and have safely been disposed of. We // have no further obligations. Console.WriteLine(); }