private static void AssertAsBuilt(ConsumerConfiguration c, DateTime dt) { Assert.Equal(AckPolicy.Explicit, c.AckPolicy); Assert.Equal(Duration.OfSeconds(99), c.AckWait); Assert.Equal(Duration.OfMillis(166), c.IdleHeartbeat); Assert.Equal(Duration.OfMillis(177), c.MaxExpires); Assert.Equal(Duration.OfMillis(188), c.InactiveThreshold); Assert.Equal(DeliverPolicy.ByStartSequence, c.DeliverPolicy); Assert.Equal("10s", c.SampleFrequency); Assert.Equal("deliver", c.DeliverSubject); Assert.Equal("blah", c.Description); Assert.Equal("durable", c.Durable); Assert.Equal("fs", c.FilterSubject); Assert.Equal(5555, c.MaxDeliver); Assert.Equal(6666, c.MaxAckPending); Assert.Equal(4242U, c.RateLimitBps); Assert.Equal(ReplayPolicy.Original, c.ReplayPolicy); Assert.Equal(2001ul, c.StartSeq); Assert.Equal(dt, c.StartTime); Assert.Equal(73, c.MaxPullWaiting); Assert.Equal(55, c.MaxBatch); Assert.Equal(56, c.MaxBytes); Assert.True(c.FlowControl); Assert.True(c.HeadersOnly); Assert.Equal(3, c.Backoff.Count); Assert.Equal(Duration.OfSeconds(1), c.Backoff[0]); Assert.Equal(Duration.OfSeconds(2), c.Backoff[1]); Assert.Equal(Duration.OfSeconds(3), c.Backoff[2]); }
private static void AssertDefaultCc(ConsumerConfiguration c) { Assert.Equal(DeliverPolicy.All, c.DeliverPolicy); Assert.Equal(AckPolicy.Explicit, c.AckPolicy); Assert.Equal(ReplayPolicy.Instant, c.ReplayPolicy); Assert.True(string.IsNullOrWhiteSpace(c.Durable)); Assert.True(string.IsNullOrWhiteSpace(c.DeliverGroup)); Assert.True(string.IsNullOrWhiteSpace(c.DeliverSubject)); Assert.True(string.IsNullOrWhiteSpace(c.FilterSubject)); Assert.True(string.IsNullOrWhiteSpace(c.Description)); Assert.True(string.IsNullOrWhiteSpace(c.SampleFrequency)); Assert.Null(c.AckWait); Assert.Null(c.IdleHeartbeat); Assert.Equal(DateTime.MinValue, c.StartTime); Assert.False(c.FlowControl); Assert.False(c.HeadersOnly); Assert.Equal(-1, c.MaxDeliver); Assert.Equal(-1, c.MaxAckPending); Assert.Equal(-1, c.MaxPullWaiting); Assert.Equal(-1, c.MaxBatch); Assert.Equal(-1, c.MaxBytes); Assert.Equal(0U, c.StartSeq); Assert.Equal(0U, c.RateLimitBps); Assert.Equal(0, c.Backoff.Count); }
/// <summary> /// MANIFOLD use . get one consumer from the pool. /// </summary> public Consumer GetConsumerFromPool(short versionId, string clientId, int correlationId , string topic, ConsumerConfiguration cosumerConfigTemplate, int partitionId) { if (!this.TopicPartitionsLeaderConsumers.ContainsKey(topic)) { TopicMetadata topicMetadata = RefreshMetadata(versionId, clientId, correlationId, topic, false); } ConcurrentDictionary <int, Consumer> consumers = GetConsumerPoolForTopic(topic); if (!consumers.ContainsKey(partitionId)) { lock (GetConsumeLockOfTopicPartition(topic, partitionId)) { if (!consumers.ContainsKey(partitionId)) { ConsumerConfiguration config = new ConsumerConfiguration(cosumerConfigTemplate, GetLeaderBrokerOfPartition(topic, partitionId)); Consumer consumer = new Consumer(config); if (consumers.TryAdd(partitionId, consumer)) { Logger.InfoFormat("Create one consumer for client {0} topic {1} partitoin {2} addOneConsumer return value:{3} ", clientId, topic, partitionId, true); } else { Logger.WarnFormat("Create one consumer for client {0} topic {1} partitoin {2} addOneConsumer return value:{3} ", clientId, topic, partitionId, false); } } } } return(consumers[partitionId]); }
static void BalancedConsumer(string consumerGroupId, string uniqueConsumerId, string topic, int threads, string zookeeperServer, Action <Message> ProcessMessage) { // Here we create a balanced consumer on one consumer machine for consumerGroupId. All machines consuming for this group will get balanced together ConsumerConfiguration config = new ConsumerConfiguration { AutoCommit = false, GroupId = consumerGroupId, ConsumerId = uniqueConsumerId, ZooKeeper = new ZooKeeperConfiguration(zookeeperServer, 30000, 30000, 2000) }; var balancedConsumer = new ZookeeperConsumerConnector(config, true); // grab streams for desired topics var topicMap = new Dictionary <string, int>() { { topic, threads } }; var streams = balancedConsumer.CreateMessageStreams(topicMap, new DefaultDecoder()); var KafkaMessageStream = streams[topic][0]; // start consuming stream foreach (Message message in KafkaMessageStream.GetCancellable(new CancellationToken())) { ProcessMessage(message); balancedConsumer.CommitOffsets(); } }
private void SubscribeOk(IJetStream js, IJetStreamManagement jsm, string fs, string ss) { int i = Rndm.Next(); // just want a unique number SetupConsumer(jsm, i, fs); js.PushSubscribeSync(ss, ConsumerConfiguration.Builder().WithDurable(Durable(i)).BuildPushSubscribeOptions()).Unsubscribe(); }
private void AssertInvalidConsumerUpdate(IJetStreamManagement jsm, ConsumerConfiguration cc) { NATSJetStreamException e = Assert.Throws <NATSJetStreamException>(() => jsm.AddOrUpdateConsumer(STREAM, cc)); Assert.Equal(10012, e.ApiErrorCode); Assert.Equal(500, e.ErrorCode); }
public Given_a_PersistentConsumer() { eventBus = new EventBus(); internalConsumers = new List <IInternalConsumer>(); createConsumerCalled = 0; mockBuilder = new MockBuilder(); queue = new Queue(queueName, false); onMessage = (body, properties, info, cancellation) => Task.FromResult(AckStrategies.Ack); internalConsumerFactory = Substitute.For <IInternalConsumerFactory>(); internalConsumerFactory.CreateConsumer().Returns(x => { var internalConsumer = Substitute.For <IInternalConsumer>(); internalConsumers.Add(internalConsumer); createConsumerCalled++; return(internalConsumer); }); configuration = new ConsumerConfiguration(0); consumer = new PersistentConsumer( queue, onMessage, configuration, internalConsumerFactory, eventBus ); AdditionalSetup(); }
public void TestValidConsumerUpdates() { Context.RunInJsServer(c => { IJetStreamManagement jsm = c.CreateJetStreamManagementContext(); CreateMemoryStream(jsm, STREAM, SUBJECT_GT); ConsumerConfiguration cc = PrepForUpdateTest(jsm); cc = ConsumerConfiguration.Builder(cc).WithDeliverSubject(Deliver(2)).Build(); AssertValidAddOrUpdate(jsm, cc); cc = PrepForUpdateTest(jsm); cc = ConsumerConfiguration.Builder(cc).WithAckWait(Duration.OfSeconds(5)).Build(); AssertValidAddOrUpdate(jsm, cc); cc = PrepForUpdateTest(jsm); cc = ConsumerConfiguration.Builder(cc).WithRateLimitBps(100).Build(); AssertValidAddOrUpdate(jsm, cc); cc = PrepForUpdateTest(jsm); cc = ConsumerConfiguration.Builder(cc).WithMaxAckPending(100).Build(); AssertValidAddOrUpdate(jsm, cc); cc = PrepForUpdateTest(jsm); cc = ConsumerConfiguration.Builder(cc).WithMaxDeliver(4).Build(); AssertValidAddOrUpdate(jsm, cc); }); }
public PartitionLeaderFinder(ConcurrentQueue<PartitionTopicInfo> partitionsNeedingLeaders, Cluster brokers, ConsumerConfiguration config, Action<PartitionTopicInfo, Broker> createNewFetcher) { _partitionsNeedingLeader = partitionsNeedingLeaders; _brokers = brokers; _config = config; _createNewFetcher = createNewFetcher; }
public ValuesController(KafkaConfiguration config, ConsumerConfiguration consumerconf) { _config = config; topic = config.topic; _consumerconf = consumerconf; consumertopic = consumerconf.topic; }
// ---------------------------------- consume -------------------------------------- public IDisposable Consume(IEnumerable <QueueConsumerPair> queueConsumerPairs, Action <IConsumerConfiguration> configure) { Preconditions.CheckNotNull(queueConsumerPairs, nameof(queueConsumerPairs)); Preconditions.CheckNotNull(configure, "configure"); if (disposed) { throw new MessageBusException("This bus has been disposed"); } var queueOnMessages = queueConsumerPairs.Select(x => { var onMessage = x.OnMessage; if (onMessage == null) { var handlerCollection = handlerCollectionFactory.CreateHandlerCollection(x.Queue); x.AddHandlers(handlerCollection); onMessage = (body, properties, messageReceivedInfo) => { var deserializedMessage = messageSerializationStrategy.DeserializeMessage(properties, body); var handler = handlerCollection.GetHandler(deserializedMessage.MessageType); return(handler(deserializedMessage, messageReceivedInfo)); }; } return(Tuple.Create(x.Queue, onMessage)); }).ToList(); var consumerConfiguration = new ConsumerConfiguration(connectionConfiguration.PrefetchCount); configure(consumerConfiguration); var consumer = consumerFactory.CreateConsumer(queueOnMessages, connection, consumerConfiguration); return(consumer.StartConsuming()); }
public KafkaConsumer(string zkConnectionString, string topic, string groupId, string consumerId, OnKafkaMessageReceived onMessageReceived, ConsumerConfig consumerConfig = null, bool start = true) { _consumerConfig = consumerConfig ?? ConsumerConfig.DefaultConfig; ZkConnectionString = zkConnectionString; Topic = topic; GroupId = groupId; ConsumerId = consumerId ?? string.Empty; SlidingDoors = new ConcurrentDictionary <int, SlidingDoor>(); ConsumerConfiguration = new ConsumerConfiguration { BackOffIncrement = _consumerConfig.BackOffIncrement, AutoCommit = false, GroupId = GroupId, ConsumerId = ConsumerId, BufferSize = ConsumerConfiguration.DefaultBufferSize, MaxFetchBufferLength = ConsumerConfiguration.DefaultMaxFetchBufferLength, FetchSize = ConsumerConfiguration.DefaultFetchSize, AutoOffsetReset = _consumerConfig.AutoOffsetReset, ZooKeeper = KafkaClient.GetZooKeeperConfiguration(zkConnectionString), ShutdownTimeout = 100 }; _onMessageReceived = onMessageReceived; if (start) { Start(); } }
/// <summary> /// Get Consumer object from current cached metadata information without retry. /// So maybe got exception if the related metadata not exists. /// When create ConsumerConfiguration, will take values in cosumerConfigTemplate. /// Client side need handle exception and the metadata change /// </summary> /// <param name="topic"></param> /// <param name="partitionID"></param> /// <param name="cosumerConfigTemplate"></param> /// <returns></returns> public Consumer GetConsumer(string topic, int partitionID, ConsumerConfiguration cosumerConfigTemplate) { var config = new ConsumerConfiguration(cosumerConfigTemplate, GetLeaderBrokerOfPartition(topic, partitionID)); return(new Consumer(config)); }
public static void Main(string[] args) { ArgumentHelper helper = new ArgumentHelperBuilder("Pull Subscription using primitive Expires In", args, Usage) .DefaultStream("fetch-stream") .DefaultSubject("fetch-subject") .DefaultDurable("fetch-durable") .DefaultCount(15) .Build(); try { using (IConnection c = new ConnectionFactory().CreateConnection(helper.MakeOptions())) { // Create a JetStreamManagement context. IJetStreamManagement jsm = c.CreateJetStreamManagementContext(); // Use the utility to create a stream stored in memory. JsUtils.CreateStreamExitWhenExists(jsm, helper.Stream, helper.Subject); // Create our JetStream context. IJetStream js = c.CreateJetStreamContext(); // Start publishing the messages, don't wait for them to finish, simulating an outside producer. JsUtils.PublishInBackground(js, helper.Subject, "fetch-message", helper.Count); // Build our consumer configuration and subscription options. // make sure the ack wait is sufficient to handle the reading and processing of the batch. // Durable is REQUIRED for pull based subscriptions ConsumerConfiguration cc = ConsumerConfiguration.Builder() .WithAckWait(2500) .Build(); PullSubscribeOptions pullOptions = PullSubscribeOptions.Builder() .WithDurable(helper.Durable) // required .WithConfiguration(cc) .Build(); // subscribe IJetStreamPullSubscription sub = js.PullSubscribe(helper.Subject, pullOptions); c.Flush(1000); int red = 0; while (red < helper.Count) { IList <Msg> list = sub.Fetch(10, 1000); foreach (Msg m in list) { Console.WriteLine($"{++red}. Message: {m}"); m.Ack(); } } // delete the stream since we are done with it. jsm.DeleteStream(helper.Stream); } } catch (Exception ex) { helper.ReportException(ex); } }
static void Main(string[] args) { if (args.Length != 2 || args[0] != "--config") { throw new ArgumentException("Make sure you pass --config /path/to/config.toml"); } var configFilePath = args[1]; var config = ConsumerConfiguration.Load(configFilePath); Logging.SetupSimpleLogging(config.LogPath); var containerBuilder = new ServiceCollection(); containerBuilder.AddSingleton <IConsumerConfiguration>(config); containerBuilder.AddSingleton <IRabbitMqConnectionDetails>(config); containerBuilder.AddSingleton <IRabbitMqConnection, RabbitMqConnection>(); containerBuilder.AddTransient <Consumer>(); var container = (IServiceProvider)containerBuilder.BuildServiceProvider(); var server = new ConsumerHttpServer(container); server.Start(46001); }
public PartitionLeaderFinder(ConcurrentQueue <PartitionTopicInfo> partitionsNeedingLeaders, Cluster brokers, ConsumerConfiguration config, Action <PartitionTopicInfo, Broker> createNewFetcher) { _partitionsNeedingLeader = partitionsNeedingLeaders; _brokers = brokers; _config = config; _createNewFetcher = createNewFetcher; }
private static void CheckConfiguration(ConsumerConfiguration configuration) { if (configuration == null) { throw new ArgumentNullException(nameof(configuration)); } if (string.IsNullOrWhiteSpace(configuration.Address)) { throw new ArgumentNullException(nameof(configuration.Address), "The address cannot be empty."); } if (configuration.Credit < 1) { throw new ArgumentOutOfRangeException(nameof(configuration.Credit), "Credit should be >= 1."); } if (configuration.RoutingType == RoutingType.Anycast && configuration.NoLocalFilter) { throw new ArgumentException($"{nameof(ConsumerConfiguration.NoLocalFilter)} cannot be used with {RoutingType.Anycast.ToString()} routing type.", nameof(configuration.NoLocalFilter)); } if (configuration.RoutingType.HasValue && !string.IsNullOrEmpty(configuration.Queue)) { throw new ArgumentException($"Queue name cannot be explicitly set when {nameof(RoutingType)} provided. " + $"If you want to attach to queue by name, do not set any {nameof(RoutingType)}.", nameof(configuration.Queue)); } if (!configuration.RoutingType.HasValue && string.IsNullOrWhiteSpace(configuration.Queue)) { throw new ArgumentNullException(nameof(configuration.Queue), "Cannot attach to queue when queue name not provided."); } }
protected virtual void UpdateExchangeConfig(ConsumerConfiguration config, Type messageType) { var attribute = GetAttribute <ExchangeAttribute>(messageType); if (attribute == null) { return; } if (!string.IsNullOrWhiteSpace(attribute.Name)) { config.Consume.ExchangeName = attribute.Name; if (config.Exchange != null) { config.Exchange.Name = attribute.Name; } } if (config.Exchange == null) { return; } if (attribute.NullableDurability.HasValue) { config.Exchange.Durable = attribute.NullableDurability.Value; } if (attribute.NullableAutoDelete.HasValue) { config.Exchange.AutoDelete = attribute.NullableAutoDelete.Value; } if (attribute.Type != ExchangeType.Unknown) { config.Exchange.ExchangeType = attribute.Type.ToString().ToLowerInvariant(); } }
public void TestDeliverSubjectValidation() { Assert.Null(PushSubscribeOptions.Builder() .WithDeliverSubject(null) .WithConfiguration(ConsumerConfiguration.Builder().WithDeliverSubject(null).Build()) .Build() .DeliverSubject); Assert.Equal("y", PushSubscribeOptions.Builder() .WithDeliverSubject(null) .WithConfiguration(ConsumerConfiguration.Builder().WithDeliverSubject("y").Build()) .Build() .DeliverSubject); Assert.Equal("x", PushSubscribeOptions.Builder() .WithDeliverSubject("x") .WithConfiguration(ConsumerConfiguration.Builder().WithDeliverSubject(null).Build()) .Build() .DeliverSubject); Assert.Equal("x", PushSubscribeOptions.Builder() .WithDeliverSubject("x") .WithConfiguration(ConsumerConfiguration.Builder().WithDeliverSubject("x").Build()) .Build() .DeliverSubject); Assert.Throws <NATSJetStreamClientException>(() => PushSubscribeOptions.Builder() .WithDeliverSubject("x") .WithConfiguration(ConsumerConfiguration.Builder().WithDeliverSubject("y").Build()) .Build()); }
public DeleteClientConsumer(ClientContext context, ConsumerConfiguration consumerConfiguration, ClientDeletedProducer clientDeletedProducer) { this.context = context; this.consumerConfiguration = consumerConfiguration; this.clientDeletedProducer = clientDeletedProducer; init(); }
public static void Main() { var clientConfig = new ConsumerConfiguration(); var registry = new GenericRegistry(null, null); SeifApplication.Initialize(registry, null, clientConfig, null); SeifApplication.ReferenceService<IMockService>(new ProxyOptions(), new IInvokeFilter[0]); }
public async Task <IConsumer> CreateConsumerAsync(ConsumerConfiguration configuration, CancellationToken cancellationToken) { var autoRecoveringConsumer = new AutoRecoveringConsumer(_loggerFactory, configuration); await PrepareRecoverable(autoRecoveringConsumer, cancellationToken).ConfigureAwait(false); return(autoRecoveringConsumer); }
public static bool Validate(ConsumerConfiguration configuration) { if (configuration.MaximumDeliveryCount <= 0) { return(false); } return(true); }
/// <summary> /// Initializes a new instance of the <see cref="Consumer"/> class. /// </summary> /// <param name="config"> /// The consumer configuration. /// </param> public Consumer(ConsumerConfiguration config) { Guard.NotNull(config, "config"); this.config = config; this.host = config.Broker.Host; this.port = config.Broker.Port; }
/// <summary> /// Initializes a new instance of the <see cref="Consumer"/> class. /// </summary> /// <param name="config"> /// The consumer configuration. /// </param> /// <param name="host"></param> /// <param name="port"></param> public Consumer(ConsumerConfiguration config, string host, int port) { Guard.NotNull(config, "config"); this.config = config; this.host = host; this.port = port; }
public void TestDurableValidation() { // push Assert.Null(PushSubscribeOptions.Builder() .WithDurable(null) .WithConfiguration(ConsumerConfiguration.Builder().WithDurable(null).Build()) .Build() .Durable); Assert.Equal("y", PushSubscribeOptions.Builder() .WithDurable(null) .WithConfiguration(ConsumerConfiguration.Builder().WithDurable("y").Build()) .Build() .Durable); Assert.Equal("x", PushSubscribeOptions.Builder() .WithDurable("x") .WithConfiguration(ConsumerConfiguration.Builder().WithDurable(null).Build()) .Build() .Durable); Assert.Equal("x", PushSubscribeOptions.Builder() .WithDurable("x") .WithConfiguration(ConsumerConfiguration.Builder().WithDurable("x").Build()) .Build() .Durable); Assert.Throws <NATSJetStreamClientException>(() => PushSubscribeOptions.Builder() .WithDurable("x") .WithConfiguration(ConsumerConfiguration.Builder().WithDurable("y").Build()) .Build()); Assert.Null(PushSubscribeOptions.Builder().Build().Durable); // pull Assert.Equal("y", PullSubscribeOptions.Builder() .WithDurable(null) .WithConfiguration(ConsumerConfiguration.Builder().WithDurable("y").Build()) .Build() .Durable); Assert.Equal("x", PullSubscribeOptions.Builder() .WithDurable("x") .WithConfiguration(ConsumerConfiguration.Builder().WithDurable(null).Build()) .Build() .Durable); Assert.Equal("x", PullSubscribeOptions.Builder() .WithDurable("x") .WithConfiguration(ConsumerConfiguration.Builder().WithDurable("x").Build()) .Build() .Durable); Assert.Throws <NATSJetStreamClientException>(() => PullSubscribeOptions.Builder() .WithDurable("x") .WithConfiguration(ConsumerConfiguration.Builder().WithDurable("y").Build()) .Build()); }
private static string GetLinkName(ConsumerConfiguration configuration) { if (configuration.Shared || !string.IsNullOrEmpty(configuration.Queue)) { return(configuration.Queue); } return(Guid.NewGuid().ToString()); }
//private readonly TestEntityFContext _dbcontext; public EventManager(ILoggerFactory loggerFactory, ConsumerConfiguration consumerConfiguration //,TestEntityFContext dbContext ) { _logger = loggerFactory.CreateLogger <EventManager>(); _configuration = consumerConfiguration; _consumer = new KafkaConsumer(loggerFactory.CreateLogger <KafkaConsumer>(), _configuration); //_dbcontext = dbContext; }
public static void Main() { var clientConfig = new ConsumerConfiguration(); var registry = new GenericRegistry(null, null); SeifApplication.Initialize(registry, null, clientConfig, null); SeifApplication.ReferenceService <IMockService>(new ProxyOptions(), new IInvokeFilter[0]); }
public static long GetCurrentKafkaOffset(string topic, string address, int port, int partition) { var request = new OffsetRequest(topic, partition, DateTime.Now.AddDays(-5).Ticks, 10); var consumerConfig = new ConsumerConfiguration(address, port); IConsumer consumer = new Consumer(consumerConfig, address, port); IList <long> list = consumer.GetOffsetsBefore(request); return(list.Sum()); }
private void SubscribeEx(IJetStream js, IJetStreamManagement jsm, string fs, string ss) { int i = Rndm.Next(); // just want a unique number SetupConsumer(jsm, i, fs); NATSJetStreamClientException e = Assert.Throws <NATSJetStreamClientException>( () => js.PushSubscribeSync(ss, ConsumerConfiguration.Builder().WithDurable(Durable(i)).BuildPushSubscribeOptions())); Assert.Contains(JsSubSubjectDoesNotMatchFilter.Id, e.Message); }
internal FetcherRunnable(string name, IZooKeeperClient zkClient, ConsumerConfiguration config, Broker broker, List <PartitionTopicInfo> partitionTopicInfos) { this.name = name; this.zkClient = zkClient; this.config = config; this.broker = broker; this.partitionTopicInfos = partitionTopicInfos; this.simpleConsumer = new Consumer(this.config, broker.Host, broker.Port); }
internal FetcherRunnable(string name, IZooKeeperClient zkClient, ConsumerConfiguration config, Broker broker, List<PartitionTopicInfo> partitionTopicInfos, Action<PartitionTopicInfo> markPartitonWithError) { _name = name; _zkClient = zkClient; _config = config; _broker = broker; _partitionTopicInfos = partitionTopicInfos; _fetchBufferLength = config.MaxFetchBufferLength; _markPartitonWithError = markPartitonWithError; _simpleConsumer = new Consumer(_config, broker.Host, broker.Port); }
public static void Main() { var servConfig = new ProviderConfiguration(); servConfig.ApiDomain = "api.aaa.com"; var clientConfig = new ConsumerConfiguration(); var registry = new GenericRegistry(null,null); SeifApplication.Initialize(registry, servConfig,clientConfig, null); SeifApplication.ExposeService<IMockService, MockService>(new ServiceConfiguration()); }
public Fetcher(Cluster cluster, BrokerMeta broker, Protocol protocol, ConsumerConfiguration consumerConfig, CancellationToken cancel) { _cluster = cluster; _broker = broker; _protocol = protocol; _cancel = cancel; _consumerConfig = consumerConfig; _fetchResponses = FetchLoop().Publish().RefCount(); BuildReceivedMessages(); _cancel.Register(() => _wakeupSignal.OnNext(true)); if(_log.IsDebugEnabled) _log.Debug("Created new fetcher #{0} for broker: {1}", _id, _broker); EtwTrace.Log.FetcherStart(_id, consumerConfig.Topic); }