public NsqLookupConsumerFacts() { lookupEndPoint = new DnsEndPoint(Settings.LookupHostName, Settings.LookupPort); options = new ConsumerOptions { LookupEndPoints = { lookupEndPoint } }; prod = new NsqProducer(Settings.NsqdHostName, Settings.NsqdHttpPort); }
public NsqProducerFacts() { endPoint = new DnsEndPoint(Settings.NsqdHostName, Settings.NsqdTcpPort); options = new ConsumerOptions() { Topic = "foo", Channel = "bar", }; prod = new NsqProducer(Settings.NsqdHostName, Settings.NsqdHttpPort); }
public void Initialize() { //Requires concrete implementation of consumer factory to create consumer. options = new ConsumerOptions { ConsumerKey = "consumerKey", ConsumerSecret = "consumerSecret", RequestTokenUrl = "http://requestTokenPath", AccessTokenUrl = "http://accessToken", RequestTokenResponseCallbackUrl = "http://localhost123", SignatureMethod = SignatureMethod.PlainText, }; }
public JsonConsumer(ConsumerOptions options) { _options = options; _consumer = new Consumer(options); }
public TestTimeline SendMessage(ConsumerOptions toConsumer, IMessage message) { Operations.Enqueue(() => _tester.SendMessage(toConsumer, message)); return(this); }
public MongoDbPopulatorMessageConsumer(MongoDbOptions mongoDbOptions, MongoDbPopulatorOptions populatorOptions, ConsumerOptions consumerOptions) { if (typeof(T) == typeof(DicomFileMessage)) { var mongoImageAdapter = new MongoDbAdapter("ImageMessageProcessor", mongoDbOptions, populatorOptions.ImageCollection); Processor = (IMessageProcessor <T>) new ImageMessageProcessor(populatorOptions, mongoImageAdapter, consumerOptions.QoSPrefetchCount, ExceptionCallback); } else if (typeof(T) == typeof(SeriesMessage)) { var mongoSeriesAdapter = new MongoDbAdapter("SeriesMessageProcessor", mongoDbOptions, populatorOptions.SeriesCollection); Processor = (IMessageProcessor <T>) new SeriesMessageProcessor(populatorOptions, mongoSeriesAdapter, consumerOptions.QoSPrefetchCount, ExceptionCallback); } else { throw new ArgumentException("Message type " + typeof(T).Name + " not supported here"); } ConsumerOptions = consumerOptions; Logger.Debug(_messageTypePrefix + "Constructed for " + typeof(T).Name); }
public DotPulsarEndpoint EndpointFor(ConsumerOptions consumerConifg) => AddOrUpdateEndpoint(endpoint => { endpoint.Topic = consumerConifg.Topic; endpoint.ConsumerOptions = consumerConifg; });
public async Task ConsumerShouldConsumeInSameOrderAsAsyncProduced_dataLoad(int numberOfMessage, int timeoutInMs) { int partition = 0; Stopwatch stopwatch = new Stopwatch(); stopwatch.Start(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create BrokerRouter ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = IntegrationConfig.NoDebugLog }); stopwatch.Restart(); var producer = new Producer(router) { BatchDelayTime = TimeSpan.FromMilliseconds(10), BatchSize = numberOfMessage / 10 }; IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create producer ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); stopwatch.Restart(); List <OffsetResponse> offsets = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("request Offset,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); stopwatch.Restart(); List <Task> sendList = new List <Task>(numberOfMessage); for (int i = 0; i < numberOfMessage; i++) { var sendTask = producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString()) }, 1, null, MessageCodec.CodecNone, partition); sendList.Add(sendTask); } TimeSpan maxTimeToRun = TimeSpan.FromMilliseconds(timeoutInMs); var doneSend = Task.WhenAll(sendList.ToArray()); await Task.WhenAny(doneSend, Task.Delay(maxTimeToRun)); Assert.IsTrue(doneSend.IsCompleted, "not done to send in time"); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done send ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); stopwatch.Restart(); ConsumerOptions consumerOptions = new ConsumerOptions(IntegrationConfig.IntegrationTopic, router); consumerOptions.PartitionWhitelist = new List <int> { partition }; consumerOptions.MaxWaitTimeForMinimumBytes = TimeSpan.Zero; Consumer consumer = new Consumer(consumerOptions, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()); int expected = 0; IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start Consume ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); IEnumerable <Message> messages = null; var doneConsume = Task.Run((() => { stopwatch.Restart(); messages = consumer.Consume().Take(numberOfMessage).ToArray(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done Consume ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); stopwatch.Restart(); })); await Task.WhenAny(doneConsume, Task.Delay(maxTimeToRun)); Assert.IsTrue(doneConsume.IsCompleted, "not done to Consume in time"); Assert.IsTrue(messages.Count() == numberOfMessage, "not Consume all ,messages"); foreach (Message message in messages) { Assert.That(message.Value.ToUtf8String(), Is.EqualTo(expected.ToString()), "Expected the message list in the correct order."); expected++; } stopwatch.Restart(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start producer Dispose ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); producer.Dispose(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start consumer Dispose ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); consumer.Dispose(); stopwatch.Restart(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start router Dispose,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); router.Dispose(); }
public void TestPartitionedModuleSpEL() { var bindingsOptions = new RabbitBindingsOptions(); var binder = GetBinder(bindingsOptions); ConsumerOptions consumerProperties = GetConsumerOptions("input", bindingsOptions); consumerProperties.Concurrency = 2; consumerProperties.InstanceIndex = 0; consumerProperties.InstanceCount = 3; consumerProperties.Partitioned = true; var delimiter = GetDestinationNameDelimiter(); var input0 = new QueueChannel { ComponentName = "test.input0S" }; var input0Binding = binder.BindConsumer($"part{delimiter}0", "testPartitionedModuleSpEL", input0, consumerProperties); consumerProperties.InstanceIndex = 1; var input1 = new QueueChannel(); input1.ComponentName = "test.input1S"; var input1Binding = binder.BindConsumer($"part{delimiter}0", "testPartitionedModuleSpEL", input1, consumerProperties); consumerProperties.InstanceIndex = 2; var input2 = new QueueChannel(); input2.ComponentName = "test.input2S"; var input2Binding = binder.BindConsumer($"part{delimiter}0", "testPartitionedModuleSpEL", input2, consumerProperties); var producerProperties = GetProducerOptions("output", bindingsOptions); var rabbitProducerOptions = bindingsOptions.GetRabbitProducerOptions("output"); rabbitProducerOptions.RoutingKeyExpression = "'part.0'"; producerProperties.PartitionKeyExpression = "Payload"; producerProperties.PartitionSelectorExpression = "ToString()"; // For strings, Java hash is not equivalent to GetHashCode, but for 0,1,2 ToString() is equivalent to hash. producerProperties.PartitionCount = 3; var output = CreateBindableChannel("output", CreateProducerBindingOptions(producerProperties)); output.ComponentName = "test.output"; var outputBinding = binder.BindProducer($"part{delimiter}0", output, producerProperties); try { var endpoint = ExtractEndpoint(outputBinding); CheckRkExpressionForPartitionedModuleSpEL(endpoint); } catch (Exception ex) { _logger?.LogError(ex, ex.Message); } var message2 = MessageBuilder.WithPayload("2").SetHeader("correlationId", "foo").SetHeader("contentType", MimeTypeUtils.TEXT_PLAIN).SetHeader("sequenceNumber", 42).SetHeader("sequenceSize", 43).Build(); output.Send(message2); output.Send(MessageBuilder.WithPayload("1").SetHeader("contentType", MimeTypeUtils.TEXT_PLAIN).Build()); output.Send(MessageBuilder.WithPayload("0").SetHeader("contentType", MimeTypeUtils.TEXT_PLAIN).Build()); var receive0 = Receive(input0); Assert.NotNull(receive0); var receive1 = Receive(input1); Assert.NotNull(receive1); var receive2 = Receive(input2); Assert.NotNull(receive2); Func <IMessage, bool> correlationHeadersForPayload2 = (m) => { var accessor = new IntegrationMessageHeaderAccessor(m); return("foo".Equals(accessor.GetCorrelationId()) && accessor.GetSequenceNumber() == 42 && accessor.GetSequenceSize() == 43); }; if (UsesExplicitRouting()) { Assert.Equal("0", ((byte[])receive0.Payload).GetString()); Assert.Equal("1", ((byte[])receive1.Payload).GetString()); Assert.Equal("2", ((byte[])receive2.Payload).GetString()); Assert.True(correlationHeadersForPayload2(receive2)); } else { var receivedMessages = new List <IMessage>() { receive0, receive1, receive2 }; Assert.Contains(receivedMessages, m => ((byte[])m.Payload).ToString() == "0"); Assert.Contains(receivedMessages, m => ((byte[])m.Payload).ToString() == "1"); Assert.Contains(receivedMessages, m => ((byte[])m.Payload).ToString() == "2"); Func <IMessage, bool> payloadIs2 = (m) => m.Payload.Equals("2".GetBytes()); Assert.Single(receivedMessages.Where(payloadIs2).Where(correlationHeadersForPayload2)); } input0Binding.Unbind(); input1Binding.Unbind(); input2Binding.Unbind(); outputBinding.Unbind(); }
public ObservingConsumer(ConsumerOptions options, params OffsetPosition[] positions) : base(options, positions) { }
internal Consumer(BusConnection connection, IBusLogger logger, IRetryBehavior retryBehavior, IServiceScopeFactory scopeFactory, ConsumerOptions <T> options) { _options = options ?? throw new ArgumentNullException(nameof(options)); _connection = connection ?? throw new ArgumentNullException(nameof(connection)); _logger = logger; _retryBehavior = retryBehavior ?? throw new ArgumentNullException(nameof(retryBehavior)); _scopeFactory = scopeFactory ?? throw new ArgumentNullException(nameof(scopeFactory)); _tasks = new Tasks(_options.ConsumerMaxParallelTasks); _channel = connection.ConsumerConnection.CreateModel(); _channel.BasicQos(0, options.PrefetchCount, false); DeclareAndBind(); _consumerTag = _channel.BasicConsume(_options.Queue.Name.Value, false, this); }
public TcpConnectionFacts() { endPoint = new DnsEndPoint(Settings.NsqdHostName, Settings.NsqdTcpPort); options = new ConsumerOptions(); prod = new NsqProducer(Settings.NsqdHostName, Settings.NsqdHttpPort); }
protected override void EngineController() { logger.LogInformation($"'{this.GetType().Name}' (ID={_EngineID}) Started."); tryRecover: try { var kafkaOptions = new KafkaOptions(new Uri(_Config.URL)); var BrokerRouter = new BrokerRouter(kafkaOptions); var consumerOptions = new ConsumerOptions(Topic, BrokerRouter); //consumerOptions.MaxWaitTimeForMinimumBytes = new TimeSpan(0, 0, 5); //consumerOptions.MinimumBytes = 2; //consumerOptions.FetchBufferMultiplier = 1; //consumerOptions.TopicPartitionQueryTimeMs = 100; int i = 0; var storedOffsetProcessed = GetOffsetProccessed(); if (storedOffsetProcessed == null) { storedOffsetProcessed = new OffsetPosition() { Offset = 0, PartitionId = 0 } } ; using (var consumer = new Consumer(consumerOptions)) { var kafkaOffsets = consumer.GetTopicOffsetAsync(Topic).Result; if (kafkaOffsets != null && kafkaOffsets.Count() != 0) { var kafkaMinOffset = kafkaOffsets.OrderBy(s => s.Offsets.Min()).FirstOrDefault(); var kafkaMaxOffset = kafkaOffsets.OrderByDescending(s => s.Offsets.Max()).FirstOrDefault(); if (storedOffsetProcessed.Offset > kafkaMaxOffset.Offsets.Max() || storedOffsetProcessed.Offset < kafkaMinOffset.Offsets.Min()) { storedOffsetProcessed = new OffsetPosition() { Offset = kafkaMinOffset.Offsets.Min(), PartitionId = kafkaMinOffset.PartitionId } } ; else { storedOffsetProcessed.Offset++; } consumer.SetOffsetPosition(storedOffsetProcessed); } foreach (var message in consumer.Consume(_CancellationToken)) { tryMesseageAgain: try { HandleMessage(message); SaveMesseageOffsetProccessed(message.Meta); } catch (Exception ex) { logger.LogCritical(ex, $"Exception Occured In Kafka Consumer '{this.GetType().Name}' Offset {message.Meta.Offset} (ID={_EngineID})"); var delayTask = Task.Delay(5000); delayTask.Wait(); goto tryMesseageAgain; } } } } catch (OperationCanceledException ex) { } catch (Exception ex) { logger.LogInformation($"Exception Occured In Kafka Consumer '{this.GetType().Name}' (ID={_EngineID}) {ex.Message} \n {ex.StackTrace}."); logger.LogInformation($"'{this.GetType().Name}' (ID={_EngineID}) Recovering..."); goto tryRecover; } logger.LogInformation($"'{this.GetType().Name}' (ID={_EngineID}) Stoped."); }
public async Task Claim_Test() { ISequenceOperations otherSubscriber = A.Fake <ISequenceOperations>(); #region ISequenceOperations producer = ... ISequenceOperations producer = _producerBuilder //.WithOptions(producerOption) .Partition(PARTITION) .Shard(SHARD) .Build <ISequenceOperations>(); #endregion // ISequenceOperations producer = ... #region A.CallTo(...).ReturnsLazily(...) A.CallTo(() => otherSubscriber.RegisterAsync(A <User> .Ignored)) .ReturnsLazily <ValueTask>(() => { throw new ApplicationException("test intensional exception"); }); A.CallTo(() => otherSubscriber.LoginAsync(A <string> .Ignored, A <string> .Ignored)) .ReturnsLazily(() => ValueTaskStatic.CompletedValueTask); A.CallTo(() => otherSubscriber.EarseAsync(A <int> .Ignored)) .ReturnsLazily(() => ValueTaskStatic.CompletedValueTask); #endregion // A.CallTo(...).ReturnsLazily(...) await SendSequenceAsync(producer); var consumerOptions = new ConsumerOptions( AckBehavior.OnSucceed, maxMessages: 3 /* detach consumer after 3 messages */); CancellationToken cancellation = GetCancellationToken(); #region await using IConsumerLifetime subscription = ...Subscribe(...) var consumerPipe = _consumerBuilder .WithOptions(consumerOptions) .WithCancellation(cancellation) .Partition(PARTITION) .Shard(SHARD) .WithResiliencePolicy(Policy.Handle <Exception>().RetryAsync(3)) .WithLogger(_fakeLogger); await using IConsumerLifetime otherSubscription = consumerPipe .Subscribe(meta => otherSubscriber, "CONSUMER_GROUP_1", $"TEST Other {DateTime.UtcNow:HH:mm:ss}"); await otherSubscription.Completion; await using IConsumerLifetime subscription = consumerPipe .Subscribe(meta => _subscriber, "CONSUMER_GROUP_1", $"TEST {DateTime.UtcNow:HH:mm:ss}"); #endregion // await using IConsumerLifetime subscription = ...Subscribe(...) await subscription.Completion; #region Validation A.CallTo(() => otherSubscriber.RegisterAsync(A <User> .Ignored)) .MustHaveHappened( (3 /* Polly retry */ + 1 /* throw */) * 3 /* disconnect after 3 messaged */, Times.Exactly); A.CallTo(() => _subscriber.RegisterAsync(A <User> .Ignored)) .MustHaveHappenedOnceExactly(); A.CallTo(() => _subscriber.LoginAsync("admin", "1234")) .MustHaveHappenedOnceExactly(); A.CallTo(() => _subscriber.EarseAsync(4335)) .MustHaveHappenedOnceExactly(); #endregion // Validation }
public async Task Resilience_Test() { #region ISequenceOperations producer = ... ISequenceOperations producer = _producerBuilder //.WithOptions(producerOption) .Partition(PARTITION) .Shard(SHARD) .Build <ISequenceOperations>(); #endregion // ISequenceOperations producer = ... int tryNumber = 0; A.CallTo(() => _subscriber.RegisterAsync(A <User> .Ignored)) .ReturnsLazily(() => Ack.Current.AckAsync()); A.CallTo(() => _subscriber.LoginAsync(A <string> .Ignored, A <string> .Ignored)) .ReturnsLazily <ValueTask>(async() => { if (Interlocked.Increment(ref tryNumber) == 1) { throw new ApplicationException("test intensional exception"); } await Ack.Current.AckAsync(); }); A.CallTo(() => _subscriber.EarseAsync(A <int> .Ignored)) .ReturnsLazily(() => Ack.Current.AckAsync()); await SendSequenceAsync(producer); var consumerOptions = new ConsumerOptions( AckBehavior.Manual, maxMessages: 3 /* detach consumer after 3 messages */); CancellationToken cancellation = GetCancellationToken(); #region await using IConsumerLifetime subscription = ...Subscribe(...) await using IConsumerLifetime subscription = _consumerBuilder .WithOptions(consumerOptions) .WithCancellation(cancellation) .Partition(PARTITION) .Shard(SHARD) .WithResiliencePolicy(Policy.Handle <Exception>().RetryAsync(3)) .WithLogger(_fakeLogger) .Subscribe(meta => _subscriber, "CONSUMER_GROUP_1", $"TEST {DateTime.UtcNow:HH:mm:ss}"); #endregion // await using IConsumerLifetime subscription = ...Subscribe(...) await subscription.Completion; #region Validation A.CallTo(() => _subscriber.RegisterAsync(A <User> .Ignored)) .MustHaveHappenedOnceExactly(); A.CallTo(() => _subscriber.LoginAsync("admin", "1234")) .MustHaveHappenedTwiceExactly(); /* 1 Polly, 1 succeed */ A.CallTo(() => _subscriber.EarseAsync(4335)) .MustHaveHappenedOnceExactly(); #endregion // Validation }
public async Task Manual_ACK_Test() { #region ISequenceOperations producer = ... ISequenceOperations producer = _producerBuilder //.WithOptions(producerOption) .Partition(PARTITION) .Shard(SHARD) .Build <ISequenceOperations>(); #endregion // ISequenceOperations producer = ... #region A.CallTo(...).ReturnsLazily(...) int tryNumber = 0; A.CallTo(() => _subscriber.RegisterAsync(A <User> .Ignored)) .ReturnsLazily(() => Ack.Current.AckAsync()); A.CallTo(() => _subscriber.LoginAsync(A <string> .Ignored, A <string> .Ignored)) .ReturnsLazily <ValueTask>(async() => { // 3 error will be catch by Polly, the 4th one will catch outside of Polly if (Interlocked.Increment(ref tryNumber) < 5) { throw new ApplicationException("test intensional exception"); } await Ack.Current.AckAsync(); }); A.CallTo(() => _subscriber.EarseAsync(A <int> .Ignored)) .ReturnsLazily(() => Ack.Current.AckAsync()); #endregion // A.CallTo(...).ReturnsLazily(...) await SendSequenceAsync(producer); var consumerOptions = new ConsumerOptions( AckBehavior.Manual, maxMessages: 4 /* detach consumer after 4 messages*/); CancellationToken cancellation = GetCancellationToken(); #region await using IConsumerLifetime subscription = ...Subscribe(...) await using IConsumerLifetime subscription = _consumerBuilder .WithOptions(consumerOptions) .WithCancellation(cancellation) .Partition(PARTITION) .Shard(SHARD) .WithResiliencePolicy(Policy.Handle <Exception>().RetryAsync(3, (ex, i) => _outputHelper.WriteLine($"Retry {i}"))) .WithLogger(_fakeLogger) .Subscribe(meta => _subscriber, "CONSUMER_GROUP_1", $"TEST {DateTime.UtcNow:HH:mm:ss}"); #endregion // await using IConsumerLifetime subscription = ...Subscribe(...) await subscription.Completion; #region Validation A.CallTo(() => _subscriber.RegisterAsync(A <User> .Ignored)) .MustHaveHappenedOnceExactly(); A.CallTo(() => _subscriber.LoginAsync("admin", "1234")) .MustHaveHappened( 3 /* Polly retry */ + 1 /* error */ + 1 /* succeed */, Times.Exactly); A.CallTo(() => _subscriber.EarseAsync(4335)) .MustHaveHappenedOnceExactly(); #endregion // Validation }
/// <summary> /// order Should remain in the same ack leve and partition /// </summary> /// <returns></returns> public async Task ConsumerShouldConsumeInSameOrderAsAsyncProduced() { int partition = 0; int numberOfMessage = 200; IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create BrokerRouter")); var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)); int causesRaceConditionOldVersion = 2; var producer = new Producer(router, causesRaceConditionOldVersion) { BatchDelayTime = TimeSpan.Zero }; //this is slow on purpose //this is not slow var producer = new Producer(router); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create producer")); List <OffsetResponse> offsets = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("request Offset")); List <Task> sendList = new List <Task>(numberOfMessage); for (int i = 0; i < numberOfMessage; i++) { var sendTask = producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString()) }, 1, null, MessageCodec.CodecNone, partition); sendList.Add(sendTask); } await Task.WhenAll(sendList.ToArray()); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done send")); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create Consumer")); ConsumerOptions consumerOptions = new ConsumerOptions(IntegrationConfig.IntegrationTopic, router); consumerOptions.PartitionWhitelist = new List <int> { partition }; Consumer consumer = new Consumer(consumerOptions, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()); int expected = 0; IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start Consume")); await Task.Run((() => { var results = consumer.Consume().Take(numberOfMessage).ToList(); Assert.IsTrue(results.Count() == numberOfMessage, "not Consume all ,messages"); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done Consume")); foreach (Message message in results) { Assert.That(message.Value.ToUtf8String(), Is.EqualTo(expected.ToString()), "Expected the message list in the correct order."); expected++; } })); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start producer Dispose")); producer.Dispose(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start consumer Dispose")); consumer.Dispose(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start router Dispose")); router.Dispose(); }
public void TestInitailize() { options = new ConsumerOptions { ConsumerKey = ConfigurationManager.AppSettings["ConsumerKey"], ConsumerSecret = ConfigurationManager.AppSettings["ConsumerSecret"], RequestTokenUrl = ConfigurationManager.AppSettings["RequestTokenUrl"], AccessTokenUrl = ConfigurationManager.AppSettings["AccessTokenUrl"], RequestTokenResponseCallbackUrl = ConfigurationManager.AppSettings["RequestTokenCallbackUrl"], SignatureMethod = ConfigurationManager.AppSettings["SignatureMethod"] == "hmac_sha1" ? SignatureMethod.Hmac_Sha1 : SignatureMethod.PlainText }; }
// Per partition consumer read handler private void ProcessPartitionMessages(object state) { int partition = (int)state; try { Dictionary <uint, MeasurementKey> idTable = new Dictionary <uint, MeasurementKey>(); ConsumerOptions options = new ConsumerOptions(Topic, m_router); LongSynchronizedOperation cacheLastConsumerOffset = null; OffsetPosition consumerCursor = new OffsetPosition { PartitionId = partition, Offset = 0 }; long lastUpdateTime = 0; long lastMetadataUpdateCount = 0; long lastMeasurementTime = 0; options.PartitionWhitelist.Add(partition); options.Log = new TimeSeriesLogger((message, parameters) => OnStatusMessage($"P[{partition}]: " + message, parameters), OnProcessException); // Handle consumer offset tracking, i.e., adapter will start reading messages where it left off from last run if (TrackConsumerOffset) { // Parse path/filename.ext into constituent parts string[] fileParts = new string[3]; fileParts[0] = FilePath.GetDirectoryName(ConsumerOffsetFileName); // 0: path/ fileParts[1] = FilePath.GetFileNameWithoutExtension(ConsumerOffsetFileName); // 1: filename fileParts[2] = FilePath.GetExtension(ConsumerOffsetFileName); // 2: .ext // Include partition index as part of consumer offset cache file name string fileName = $"{fileParts[0]}{fileParts[1]}-P{partition}{fileParts[2]}"; if (File.Exists(fileName)) { try { // Read last consumer offset consumerCursor.Offset = long.Parse(File.ReadAllText(fileName)); } catch (Exception ex) { OnProcessException(new InvalidOperationException($"Failed to read last consumer offset from \"{fileName}\": {ex.Message}", ex)); } } cacheLastConsumerOffset = new LongSynchronizedOperation(() => { // Do not write file any more often than defined consumer offset cache interval int restTime = (int)(Ticks.FromSeconds(ConsumerOffsetCacheInterval) - (DateTime.UtcNow.Ticks - lastUpdateTime)).ToMilliseconds(); if (restTime > 0) { Thread.Sleep(restTime); } lastUpdateTime = DateTime.UtcNow.Ticks; // Write current consumer offset File.WriteAllText(fileName, consumerCursor.Offset.ToString()); }, ex => OnProcessException(new InvalidOperationException($"Failed to cache current consumer offset to \"{fileName}\": {ex.Message}", ex))) { IsBackground = true }; } using (Consumer consumer = new Consumer(options, new OffsetPosition(partition, consumerCursor.Offset))) { lock (m_consumers) m_consumers.Add(new WeakReference <Consumer>(consumer)); foreach (Message message in consumer.Consume()) { if ((object)m_metadata == null) { continue; } uint id; byte metadataVersion; IMeasurement measurement = message.KafkaDeserialize(out id, out metadataVersion); // Kick-off a refresh for new metadata if message version numbers change if (m_lastMetadataVersion != metadataVersion) { m_lastMetadataVersion = metadataVersion; m_updateMetadata.RunOnceAsync(); } // Clear all undefined items in dictionary when metadata gets updated if (lastMetadataUpdateCount < m_metadataUpdateCount) { lastMetadataUpdateCount = m_metadataUpdateCount; foreach (uint undefinedID in idTable.Where(item => item.Value.SignalID == Guid.Empty).Select(item => item.Key).ToArray()) { idTable.Remove(undefinedID); } } // Get associated measurement key, or look it up in metadata table measurement.Key = idTable.GetOrAdd(id, lookupID => MeasurementKey.LookUpBySignalID(m_metadata?.Records?.FirstOrDefault(record => record.ID == lookupID)?.ParseSignalID() ?? Guid.Empty)); // Only publish measurements with associated metadata and are assigned to this adapter if (measurement.Key != MeasurementKey.Undefined && ((object)m_outputMeasurementKeys == null || m_outputMeasurementKeys.Contains(measurement.Key))) { OnNewMeasurements(new[] { measurement }); } // Cache last consumer offset consumerCursor.Offset = message.Offset; if ((object)cacheLastConsumerOffset != null) { cacheLastConsumerOffset.RunOnceAsync(); } if (ReadDelay > -1) { // As a group of measurements transition from timestamp to another, inject configured read delay if (lastMeasurementTime != measurement.Timestamp) { Thread.Sleep(ReadDelay); } lastMeasurementTime = measurement.Timestamp; } } } } catch (Exception ex) { OnProcessException(new InvalidOperationException($"Exception while reading Kafka messages for topic \"{Topic}\" P[{partition}]: {ex.Message}", ex)); } }
public IKafkaConsumer Consumer(ConsumerOptions options) { return(new KafkaConsumer(_zkConnection, options)); }
public void TestEndpointLifecycle() { var binder = serviceProvider.GetService <IBinder>() as TestChannelBinder; Assert.NotNull(binder); var consumerProperties = new ConsumerOptions() { MaxAttempts = 1 }; consumerProperties.PostProcess(); // IBinding<IMessageChannel> consumerBinding = await binder.BindConsumer("foo", "fooGroup", new DirectChannel(serviceProvider), consumerProperties); var consumerBinding = binder.BindConsumer("foo", "fooGroup", new DirectChannel(serviceProvider), consumerProperties); var defaultBinding = consumerBinding as DefaultBinding <IMessageChannel>; Assert.NotNull(defaultBinding); // lifecycle var messageProducer = defaultBinding.Endpoint as TestChannelBinder.TestMessageProducerSupportEndpoint; Assert.NotNull(messageProducer); Assert.True(defaultBinding.Endpoint.IsRunning); Assert.NotNull(messageProducer.OutputChannel); // lifecycle.errorchannel Assert.NotNull(messageProducer.ErrorChannel); var errorChannel = messageProducer.ErrorChannel as PublishSubscribeChannel; Assert.NotNull(errorChannel.Dispatcher); // dispatcher.handlers Assert.Equal(2, errorChannel.Dispatcher.HandlerCount); var dispatcher = errorChannel.Dispatcher as AbstractDispatcher; Assert.NotNull(dispatcher); var handlers = dispatcher.Handlers; Assert.True(handlers[0] is BridgeHandler); Assert.True(handlers[1] is ILastSubscriberMessageHandler); var registry = serviceProvider.GetRequiredService <IDestinationRegistry>(); Assert.True(registry.Contains("foo.fooGroup.errors")); Assert.True(registry.Contains("foo.fooGroup.errors.recoverer")); Assert.True(registry.Contains("foo.fooGroup.errors.handler")); Assert.True(registry.Contains("foo.fooGroup.errors.bridge")); consumerBinding.Unbind(); Assert.False(registry.Contains("foo.fooGroup.errors")); Assert.False(registry.Contains("foo.fooGroup.errors.recoverer")); Assert.False(registry.Contains("foo.fooGroup.errors.handler")); Assert.False(registry.Contains("foo.fooGroup.errors.bridge")); Assert.False(defaultBinding.Endpoint.IsRunning); var producerProps = new ProducerOptions() { ErrorChannelEnabled = true }; producerProps.PostProcess(); // IBinding<IMessageChannel> producerBinding = await binder.BindProducer("bar", new DirectChannel(serviceProvider), producerProps); var producerBinding = binder.BindProducer("bar", new DirectChannel(serviceProvider), producerProps); Assert.True(registry.Contains("bar.errors")); Assert.True(registry.Contains("bar.errors.bridge")); producerBinding.Unbind(); Assert.False(registry.Contains("bar.errors")); Assert.False(registry.Contains("bar.errors.bridge")); }
protected bool Equals(ConsumerOptions other) { return(ConsumerStrategy == other.ConsumerStrategy && NoAck == other.NoAck && ShouldSerializeExceptions == other.ShouldSerializeExceptions); }
public Subscription ConsumeRaw(Action <MessageEnvelope, IMessageAck> onReceived, ConsumerOptions options) { if (_respondCalled) { throw new Exception("Consumed was called - have a respond and a consume on the same queue and you're gonna have a bad day"); } if (_consumedCalled) { throw new Exception("setting up more than one consumer for the same queue?"); } _consumedCalled = true; _consumer = onReceived; return(new Subscription(_model, Guid.NewGuid().ToString())); }
public void TestErrors() { var searchDirectories = GetSearchDirectories("TestBinder"); IServiceProvider serviceProvider = CreateStreamsContainer(searchDirectories).BuildServiceProvider(); var messageConverter = serviceProvider.GetService <ISmartMessageConverter>(); Assert.NotNull(messageConverter); var binder = serviceProvider.GetService <IBinder>() as AbstractPollableMessageSourceBinder; Assert.NotNull(binder); var configurer = serviceProvider.GetService <MessageConverterConfigurer>(); Assert.NotNull(configurer); var pollableSource = new DefaultPollableMessageSource(serviceProvider.GetService <IApplicationContext>(), messageConverter); configurer.ConfigurePolledMessageSource(pollableSource, "foo"); pollableSource.AddInterceptor(new TestSimpleChannelInterceptor()); var properties = new ConsumerOptions() { MaxAttempts = 2, BackOffInitialInterval = 0, RetryableExceptions = new List <string>() { "!System.InvalidOperationException" } }; properties.PostProcess(); var latch = new CountdownEvent(2); binder.BindConsumer("foo", "bar", pollableSource, properties); var errorChan = serviceProvider.GetServices <IMessageChannel>().Where(chan => chan.ServiceName == IntegrationContextUtils.ERROR_CHANNEL_BEAN_NAME).Single() as ISubscribableChannel; var errorChanHandler = new TestErrorsErrorChannelHandler(latch); errorChan.Subscribe(errorChanHandler); var h1 = new TestFuncMessageHandler((m) => { throw new Exception("test recoverer"); }); Assert.True(pollableSource.Poll(h1)); Assert.Equal(2, h1.Count); var getter = binder.GetType().GetProperty("LastError").GetGetMethod(); var lastError = getter.Invoke(binder, new object[0]) as IMessage; Assert.NotNull(lastError); var lastErrorMessage = ((Exception)lastError.Payload).InnerException.Message; Assert.Equal("test recoverer", lastErrorMessage); var h2 = new TestFuncMessageHandler((m) => { throw new InvalidOperationException("no retries"); }); Assert.True(pollableSource.Poll(h2)); Assert.Equal(1, h2.Count); lastError = getter.Invoke(binder, new object[0]) as IMessage; lastErrorMessage = ((Exception)lastError.Payload).InnerException.Message; Assert.Equal("no retries", lastErrorMessage); }
public KafkaConsumer(IZookeeperConnection zkConnect, ConsumerOptions options) { _zkConnect = zkConnect; _options = options; }
protected override void EngineController() { logger.LogInformation($"'{this.GetType().Name}' (ID={_EngineID}) Started."); _LastCheckServiceIsForThisAgent = DateTime.Now.AddSeconds(-30); while (!_CancellationToken.IsCancellationRequested) { if (!ServiceIsRunning()) { var delayTask = Task.Delay(new TimeSpan(0, 0, 5), _DelayCancelation); delayTask.Wait(); continue; } var kafkaOptions = new KafkaOptions(new Uri(_Config.URL)); var BrokerRouter = new BrokerRouter(kafkaOptions); var consumerOptions = new ConsumerOptions(Topic, BrokerRouter); //consumerOptions.MaxWaitTimeForMinimumBytes = new TimeSpan(0, 0, 5); //consumerOptions.MinimumBytes = 2; //consumerOptions.FetchBufferMultiplier = 1; //consumerOptions.TopicPartitionQueryTimeMs = 100; var offsetProcessed = GetOffsetProccessed(); using (var consumer = offsetProcessed == null ? new Consumer(consumerOptions) : new Consumer(consumerOptions, new OffsetPosition(offsetProcessed.PartitionId, offsetProcessed.Offset + 1))) { foreach (var message in consumer.Consume(_CancellationToken)) { try { if (!ServiceIsRunning()) { break; } } catch (Exception ex) { logger.LogCritical(ex, $"Exception Occured In Engine Work, (ID={_EngineID})"); } lock (_lockLastRun) { _LastRun = DateTime.Now; } try { HandleMessage(message); SaveMesseageOffsetProccessed(message.Meta); } catch (Exception ex) { logger.LogCritical(ex, $"Exception Occured In Engine Work, (ID={_EngineID})"); consumer.SetOffsetPosition(new OffsetPosition(message.Meta.PartitionId, message.Meta.Offset - 1)); } lock (_lockLastEnd) { _LastEnd = DateTime.Now; } } } } logger.LogInformation($"'{this.GetType().Name}' (ID={_EngineID}) Stoped."); }
/// <summary> /// Setup a subscription to a queue which sends messages to the <see cref="IConsumer"/>. /// </summary> /// <param name="consumerOptions">The connection options.</param> /// <param name="consumer">Consumer that will be sent any received messages.</param> /// <param name="isSolo">If specified, will ensure that it is the only consumer on the provided queue</param> /// <returns>Identifier for the consumer task, can be used to stop the consumer without shutting down the whole adapter</returns> public Guid StartConsumer(ConsumerOptions consumerOptions, IConsumer consumer, bool isSolo = false) { if (ShutdownCalled) { throw new ApplicationException("Adapter has been shut down"); } if (consumerOptions == null) { throw new ArgumentNullException(nameof(consumerOptions)); } if (!consumerOptions.VerifyPopulated()) { throw new ArgumentException("The given ConsumerOptions has invalid values"); } // Client label is the same for the IConnection and Subscription since we have a separate connection per consumer string label = string.Format("{0}::Consumer::{1}", _hostId, consumerOptions.QueueName); IConnection connection = _factory.CreateConnection(label); connection.ConnectionBlocked += (s, a) => _logger.Warn($"ConnectionBlocked for {consumerOptions.QueueName} ( Reason: {a.Reason})"); connection.ConnectionUnblocked += (s, a) => _logger.Warn($"ConnectionUnblocked for {consumerOptions.QueueName}"); IModel model = connection.CreateModel(); model.BasicQos(0, consumerOptions.QoSPrefetchCount, false); // Check queue exists try { // Passively declare the queue (equivalent to checking the queue exists) model.QueueDeclarePassive(consumerOptions.QueueName); } catch (OperationInterruptedException e) { model.Close(200, "StartConsumer - Queue missing"); connection.Close(200, "StartConsumer - Queue missing"); throw new ApplicationException($"Expected queue \"{consumerOptions.QueueName}\" to exist", e); } if (isSolo && model.ConsumerCount(consumerOptions.QueueName) > 0) { model.Close(200, "StartConsumer - Already a consumer on the queue"); connection.Close(200, "StartConsumer - Already a consumer on the queue"); throw new ApplicationException($"Already a consumer on queue {consumerOptions.QueueName} and solo consumer was specified"); } Subscription subscription = null; var connected = false; var failed = 0; while (!connected) { try { subscription = new Subscription(model, consumerOptions.QueueName, consumerOptions.AutoAck, label); connected = true; } catch (TimeoutException) { if (++failed >= MaxSubscriptionAttempts) { _logger.Warn("Retries exceeded, throwing exception"); throw; } _logger.Warn($"Timeout when creating Subscription, retrying in 5s..."); Thread.Sleep(TimeSpan.FromSeconds(5)); } catch (OperationInterruptedException e) { throw new ApplicationException( $"Error when creating subscription on queue \"{consumerOptions.QueueName}\"", e); } finally { if (!connected) { model.Close(200, "StartConsumer - Couldn't create subscription"); connection.Close(200, "StartConsumer - Couldn't create subscription"); } } } Guid taskId = Guid.NewGuid(); var taskTokenSource = new CancellationTokenSource(); var consumerTask = new Task(() => Consume(subscription, consumer, taskTokenSource.Token)); var resources = new ConsumerResources { Connection = connection, Model = model, Subscription = subscription, ConsumerTask = consumerTask, TokenSource = taskTokenSource }; lock (_oResourceLock) { _rabbitResources.Add(taskId, resources); } consumer.OnFatal += (s, e) => { resources.Shutdown(DefaultOperationTimeout); _hostFatalHandler(s, e); }; consumerTask.Start(); _logger.Debug($"Consumer task started [QueueName={subscription?.QueueName}]"); return(taskId); }
public ConsumerRegistration(ConsumerOptions consumerOptions, StreamFlowDefaults? @default) { Options = consumerOptions; Default = @default; }
public void TestAnonymousGroupBase() { B binder = GetBinder(); var producerOptions = new ProducerOptions(); producerOptions.PostProcess(string.Empty); var consumerOptions = new ConsumerOptions(); consumerOptions.PostProcess(string.Empty); var producerBindingOptions = CreateProducerBindingOptions(producerOptions); var output = CreateBindableChannel("output", producerBindingOptions); var producerBinding = binder.BindProducer(string.Format("defaultGroup%s0", GetDestinationNameDelimiter()), output, producerBindingOptions.Producer); QueueChannel input1 = new QueueChannel(); var binding1 = binder.BindConsumer(string.Format("defaultGroup%s0", GetDestinationNameDelimiter()), null, input1, consumerOptions); QueueChannel input2 = new QueueChannel(); var binding2 = binder.BindConsumer(string.Format("defaultGroup%s0", GetDestinationNameDelimiter()), null, input2, consumerOptions); var testPayload1 = "foo-" + Guid.NewGuid().ToString(); output.Send(MessageBuilder.WithPayload(testPayload1) .SetHeader(MessageHeaders.CONTENT_TYPE, MimeTypeUtils.TEXT_PLAIN) .Build()); Message <byte[]> receivedMessage1 = (Message <byte[]>)Receive(input1); Assert.NotNull(receivedMessage1); Assert.Equal(testPayload1, Encoding.UTF8.GetString(receivedMessage1.Payload)); Message <byte[]> receivedMessage2 = (Message <byte[]>)Receive(input2); Assert.NotNull(receivedMessage2); Assert.Equal(testPayload1, Encoding.UTF8.GetString(receivedMessage2.Payload)); binding2.Unbind(); var testPayload2 = "foo-" + Guid.NewGuid().ToString(); output.Send(MessageBuilder.WithPayload(testPayload2) .SetHeader(MessageHeaders.CONTENT_TYPE, MimeTypeUtils.TEXT_PLAIN) .Build()); binding2 = binder.BindConsumer(string.Format("defaultGroup%s0", GetDestinationNameDelimiter()), null, input2, consumerOptions); var testPayload3 = "foo-" + Guid.NewGuid().ToString(); output.Send(MessageBuilder.WithPayload(testPayload3) .SetHeader(MessageHeaders.CONTENT_TYPE, MimeTypeUtils.TEXT_PLAIN) .Build()); receivedMessage1 = (Message <byte[]>)Receive(input1); Assert.NotNull(receivedMessage1); Assert.Equal(testPayload2, Encoding.UTF8.GetString(receivedMessage1.Payload)); receivedMessage1 = (Message <byte[]>)Receive(input1); Assert.NotNull(receivedMessage1); Assert.NotNull(receivedMessage1.Payload); receivedMessage2 = (Message <byte[]>)Receive(input2); Assert.NotNull(receivedMessage2); Assert.Equal(testPayload3, Encoding.UTF8.GetString(receivedMessage2.Payload)); producerBinding.Unbind(); binding1.Unbind(); binding2.Unbind(); }
public GenericConsumerRegistration(Type requestType, ConsumerOptions consumerOptions, StreamFlowDefaults?globals) { RequestType = requestType; Options = consumerOptions; Default = globals; }
public async Task <IActiveConsumer> StartListeningQueueAsync <T>(string queueName, ConsumerOptions <T> consumerOptions, ProcessorMessageDelegate <T> messageProcessor) where T : class { var activeMessageProcessorCanceller = new ActiveMessageProcessorCanceller(); const string consumerTagKey = "consumerTag"; var pipeContext = new ClientPipeContextAction((channel, context) => { var consumerTag = BeginConsumeQueue(channel, queueName, consumerOptions, messageProcessor, activeMessageProcessorCanceller); context.Items[consumerTagKey] = consumerTag; return(Task.CompletedTask); }); await ClientPipe.ExecutePipelineAsync(pipeContext, _consumerPipeline.Value); return(new ActiveConsumer(pipeContext.GetItemValue <string>(consumerTagKey), pipeContext.ChannelContainer !, activeMessageProcessorCanceller)); }
private void RunTest(DirectoryInfo dir, int numberOfExpectedRows, Action <FileSystemOptions> adjustFileSystemOptions) { TestLogger.Setup(); var logger = LogManager.GetLogger("MicroservicesIntegrationTest"); _globals.FileSystemOptions.FileSystemRoot = TestContext.CurrentContext.TestDirectory; var readFromFatalErrors = new ConsumerOptions { QueueName = "TEST.FatalLoggingQueue" }; ///////////////////////////////////// Directory ////////////////////////// var processDirectoryOptions = new DicomDirectoryProcessorCliOptions(); processDirectoryOptions.ToProcessDir = dir; processDirectoryOptions.DirectoryFormat = "Default"; adjustFileSystemOptions?.Invoke(_globals.FileSystemOptions); //////////////////////////////////////////////// Mongo Db Populator //////////////////////// // Make this a GUID or something, should be unique per test var currentSeriesCollectionName = "Integration_HappyPath_Series" + DateTime.Now.Ticks; var currentImageCollectionName = "Integration_HappyPath_Image" + DateTime.Now.Ticks; _globals.MongoDbPopulatorOptions.SeriesCollection = currentSeriesCollectionName; _globals.MongoDbPopulatorOptions.ImageCollection = currentImageCollectionName; //use the test catalogue not the one in the combined app.config _globals.RDMPOptions.CatalogueConnectionString = ((TableRepository)RepositoryLocator.CatalogueRepository).DiscoveredServer.Builder.ConnectionString; _globals.RDMPOptions.DataExportConnectionString = ((TableRepository)RepositoryLocator.DataExportRepository).DiscoveredServer.Builder.ConnectionString; _globals.DicomRelationalMapperOptions.RunChecks = true; if (_globals.DicomRelationalMapperOptions.MinimumBatchSize < 1) { _globals.DicomRelationalMapperOptions.MinimumBatchSize = 1; } using (var tester = new MicroserviceTester(_globals.RabbitOptions, _globals.CohortExtractorOptions)) { tester.CreateExchange(_globals.ProcessDirectoryOptions.AccessionDirectoryProducerOptions.ExchangeName, _globals.DicomTagReaderOptions.QueueName); tester.CreateExchange(_globals.DicomTagReaderOptions.SeriesProducerOptions.ExchangeName, _globals.MongoDbPopulatorOptions.SeriesQueueConsumerOptions.QueueName); tester.CreateExchange(_globals.DicomTagReaderOptions.ImageProducerOptions.ExchangeName, _globals.IdentifierMapperOptions.QueueName); tester.CreateExchange(_globals.DicomTagReaderOptions.ImageProducerOptions.ExchangeName, _globals.MongoDbPopulatorOptions.ImageQueueConsumerOptions.QueueName, true); tester.CreateExchange(_globals.IdentifierMapperOptions.AnonImagesProducerOptions.ExchangeName, _globals.DicomRelationalMapperOptions.QueueName); tester.CreateExchange(_globals.RabbitOptions.FatalLoggingExchange, readFromFatalErrors.QueueName); tester.CreateExchange(_globals.CohortExtractorOptions.ExtractFilesProducerOptions.ExchangeName, null, false, _globals.CohortExtractorOptions.ExtractIdentRoutingKey); tester.CreateExchange(_globals.CohortExtractorOptions.ExtractFilesProducerOptions.ExchangeName, null, true, _globals.CohortExtractorOptions.ExtractAnonRoutingKey); tester.CreateExchange(_globals.CohortExtractorOptions.ExtractFilesInfoProducerOptions.ExchangeName, null); #region Running Microservices var processDirectory = new DicomDirectoryProcessorHost(_globals, processDirectoryOptions); processDirectory.Start(); tester.StopOnDispose.Add(processDirectory); var dicomTagReaderHost = new DicomTagReaderHost(_globals); dicomTagReaderHost.Start(); tester.StopOnDispose.Add(dicomTagReaderHost); var mongoDbPopulatorHost = new MongoDbPopulatorHost(_globals); mongoDbPopulatorHost.Start(); tester.StopOnDispose.Add(mongoDbPopulatorHost); var identifierMapperHost = new IdentifierMapperHost(_globals, new SwapForFixedValueTester("FISHFISH")); identifierMapperHost.Start(); tester.StopOnDispose.Add(identifierMapperHost); new TestTimelineAwaiter().Await(() => dicomTagReaderHost.AccessionDirectoryMessageConsumer.AckCount >= 1); logger.Info("\n### DicomTagReader has processed its messages ###\n"); // FIXME: This isn't exactly how the pipeline runs new TestTimelineAwaiter().Await(() => identifierMapperHost.Consumer.AckCount >= 1); logger.Info("\n### IdentifierMapper has processed its messages ###\n"); using (var relationalMapperHost = new DicomRelationalMapperHost(_globals)) { var start = DateTime.Now; relationalMapperHost.Start(); tester.StopOnDispose.Add(relationalMapperHost); Assert.True(mongoDbPopulatorHost.Consumers.Count == 2); new TestTimelineAwaiter().Await(() => mongoDbPopulatorHost.Consumers[0].Processor.AckCount >= 1); new TestTimelineAwaiter().Await(() => mongoDbPopulatorHost.Consumers[1].Processor.AckCount >= 1); logger.Info("\n### MongoDbPopulator has processed its messages ###\n"); new TestTimelineAwaiter().Await(() => identifierMapperHost.Consumer.AckCount >= 1);//number of series logger.Info("\n### IdentifierMapper has processed its messages ###\n"); Assert.AreEqual(0, dicomTagReaderHost.AccessionDirectoryMessageConsumer.NackCount); Assert.AreEqual(0, identifierMapperHost.Consumer.NackCount); Assert.AreEqual(0, ((Consumer <SeriesMessage>)mongoDbPopulatorHost.Consumers[0]).NackCount); Assert.AreEqual(0, ((Consumer <DicomFileMessage>)mongoDbPopulatorHost.Consumers[1]).NackCount); try { Thread.Sleep(TimeSpan.FromSeconds(10)); new TestTimelineAwaiter().Await(() => relationalMapperHost.Consumer.AckCount >= numberOfExpectedRows, null, 30000, () => relationalMapperHost.Consumer.DleErrors); //number of image files logger.Info("\n### DicomRelationalMapper has processed its messages ###\n"); } finally { //find out what happens from the logging database var rdmpLogging = new Rdmp.Core.Logging.LogManager(_helper.LoadMetadata.GetDistinctLoggingDatabase()); //if error was reported during the dicom relational mapper run foreach (var dli in rdmpLogging.GetArchivalDataLoadInfos(_helper.LoadMetadata.GetDistinctLoggingTask(), null, null)) { if (dli.StartTime > start) { foreach (ArchivalFatalError e in dli.Errors) { logger.Error(e.Date.TimeOfDay + ":" + e.Source + ":" + e.Description); } } } } Assert.AreEqual(numberOfExpectedRows, _helper.ImageTable.GetRowCount(), "All images should appear in the image table"); Assert.LessOrEqual(_helper.SeriesTable.GetRowCount(), numberOfExpectedRows, "Only unique series data should appear in series table, there should be less unique series than images (or equal)"); Assert.LessOrEqual(_helper.StudyTable.GetRowCount(), numberOfExpectedRows, "Only unique study data should appear in study table, there should be less unique studies than images (or equal)"); Assert.LessOrEqual(_helper.StudyTable.GetRowCount(), _helper.SeriesTable.GetRowCount(), "There should be less studies than series (or equal)"); //make sure that the substitution identifier (that replaces old the PatientId) is the correct substitution (FISHFISH)/ Assert.AreEqual("FISHFISH", _helper.StudyTable.GetDataTable().Rows.OfType <DataRow>().First()["PatientId"]); //The file size in the final table should be more than 0 Assert.Greater((long)_helper.ImageTable.GetDataTable().Rows.OfType <DataRow>().First()["DicomFileSize"], 0); dicomTagReaderHost.Stop("TestIsFinished"); mongoDbPopulatorHost.Stop("TestIsFinished"); DropMongoTestDb(_globals.MongoDatabases.DicomStoreOptions.HostName, _globals.MongoDatabases.DicomStoreOptions.Port); identifierMapperHost.Stop("TestIsFinished"); relationalMapperHost.Stop("Test end"); } //Now do extraction var extractorHost = new CohortExtractorHost(_globals, null, null); extractorHost.Start(); var extract = new ExtractionRequestMessage { ExtractionJobIdentifier = Guid.NewGuid(), ProjectNumber = "1234-5678", ExtractionDirectory = "1234-5678_P1", KeyTag = "SeriesInstanceUID", }; foreach (DataRow row in _helper.ImageTable.GetDataTable().Rows) { var ser = (string)row["SeriesInstanceUID"]; if (!extract.ExtractionIdentifiers.Contains(ser)) { extract.ExtractionIdentifiers.Add(ser); } } tester.SendMessage(_globals.CohortExtractorOptions, extract); //wait till extractor picked up the messages and dispatched the responses new TestTimelineAwaiter().Await(() => extractorHost.Consumer.AckCount == 1); extractorHost.Stop("TestIsFinished"); tester.Shutdown(); } #endregion }