private static KafkaConsumer <byte[]> CreateKafkaConsumer() { var kafkaSetting = new KafkaSetting() .SetGroupId("test-group2") .SetBootstrapServers(new Uri("http://icat-test01:9092")) .Set("auto.offset.reset", "latest") .Set("auto.commit.interval.ms", 1000) .Set("queued.max.messages.kbytes", 1000000000) .Set("queued.min.messages", 10000000) .Set("fetch.message.max.bytes", 80000) .Set("internal.termination.signal", 10) .Set("message.max.bytes", 1000000000) .Set("message.copy.max.bytes", 1000000000) .Set("receive.message.max.bytes", 1000000000) .Set("max.in.flight.requests.per.connection", 1000) .Set("socket.send.buffer.bytes", 100000000) .Set("socket.receive.buffer.bytes", 100000000) .Set("socket.blocking.max.ms", 20) .Set("socket.send.buffer.bytes", 100000000) .Set("queued.min.messages", 10000000) .Set("fetch.min.bytes", 1) .Set("queued.max.messages.kbytes", 1000000000) .Set("topic.metadata.refresh.sparse", false) .Set("fetch.error.backoff.ms", 20) .Set("fetch.wait.max.ms", 10); return(new KafkaConsumer <byte[]>(kafkaSetting, "dot-net", new DefaultDeserializer(), new MessageObserver())); }
public GeoDatabaseUpdatedHandler( ILogger <GeoDatabaseUpdatedHandler> logger, IMediator mediator, IRouteSegmentCommandFactory routeSegmentEventFactory, IRouteNodeCommandFactory routeNodeEventFactory, IGeoDatabase geoDatabase, IEventStore eventStore, IProducer producer, IOptions <KafkaSetting> kafkaSettings, IOptions <ApplicationSetting> applicationSettings, IModifiedGeometriesStore modifiedGeometriesStore, IRouteNodeInfoCommandFactory routeNodeInfoCommandFactory, IRouteSegmentInfoCommandFactory routeSegmentInfoCommandFactory, IValidationService validationService) { _logger = logger; _mediator = mediator; _routeSegmentEventFactory = routeSegmentEventFactory; _routeNodeEventFactory = routeNodeEventFactory; _geoDatabase = geoDatabase; _eventStore = eventStore; _producer = producer; _kafkaSettings = kafkaSettings.Value; _applicationSettings = applicationSettings.Value; _modifiedGeometriesStore = modifiedGeometriesStore; _routeNodeInfoCommandFactory = routeNodeInfoCommandFactory; _routeSegmentInfoCommandFactory = routeSegmentInfoCommandFactory; _validationService = validationService; }
// This method gets called by the runtime. Use this method to add services to the container. // For more information on how to configure your application, visit https://go.microsoft.com/fwlink/?LinkID=398940 public void ConfigureServices(IServiceCollection services) { var logger = NLog.Web.NLogBuilder.ConfigureNLog("nlog.config").GetCurrentClassLogger(); AppDomain.CurrentDomain.UnhandledException += new UnhandledExceptionEventHandler((o, e) => { logger.Error($"Unhandled Exception.{e.ExceptionObject}"); }); services.AddRazorPages(); services.AddServerSideBlazor(); services.AddSingleton <WeatherForecastService>(); services.AddSingleton <ReceiveKafkaService>(); services.AddSingleton <KafkaProducer>(); services.AddSingleton <KafkaConsumer>(); services.AddSingleton <AplQuery>(); services.AddSingleton <AplScan>(); //services.AddSingleton<KafkaSetting>(); KafkaSetting _kafkaSetting = new KafkaSetting(); Configuration.GetSection("KafkaSetting").Bind(_kafkaSetting); services.AddSingleton <KafkaSetting>(_kafkaSetting); //services.AddSingleton<JSRuntime>(); services.AddScoped <DragDropHelper>(); services.AddScoped <JsHelper>(); }
public KafkaProducer(ILogger <KafkaProducer> logger, KafkaSetting kafkaSetting) { this._aplLog = logger; _kafkaSetting = kafkaSetting; //configuration.GetSection("KafkaSetting").Bind(_kafkaSetting); Init(_kafkaSetting); this.InitTopics(_kafkaSetting); }
public ElementNotFeededValidator(ILogger <ElementNotFeededValidator> logger, InMemoryNetworkState inMemoryNetworkState, PostgresWriter postgresWriter, IOptions <DatabaseSetting> databaseSetting, IProducer eventProducer, IOptions <KafkaSetting> kafkaSetting) { _logger = logger; _inMemoryNetworkState = inMemoryNetworkState; _postgresWriter = postgresWriter; _databaseSetting = databaseSetting.Value; _eventProducer = eventProducer; _kafkaSetting = kafkaSetting.Value; }
public GeographicalAreaUpdatedKafkaConsumer( IOptions <KafkaSetting> kafkaSetting, ILogger <GeographicalAreaUpdatedKafkaConsumer> logger, IMediator mediator) { _kafkaSetting = kafkaSetting.Value; _logger = logger; _mediator = mediator; }
public void Init(KafkaSetting kafkaSetting) { var config = new ProducerConfig { BootstrapServers = kafkaSetting.IpPort }; this._producer = new ProducerBuilder <string, string>(config).Build(); }
public void InitTopics(KafkaSetting kafkaSetting) { _topics.Add(this._kafkaSetting.ProducerTopics.DeviceTopic); _topics.Add(this._kafkaSetting.ProducerTopics.TaskTopic); _topics.Add(this._kafkaSetting.ProducerTopics.StorageTopic); _topics.Add(this._kafkaSetting.ConsumerTopics.DeviceTopic); _topics.Add(this._kafkaSetting.ConsumerTopics.TaskTopic); _topics.Add(this._kafkaSetting.ConsumerTopics.StorageTopic); }
public AddressConsumer(ITypesenseClient client, ILogger <AddressConsumer> logger, IPostgresWriter postgresWriter, IOptions <KafkaSetting> kafkaSetting, IOptions <DatabaseSetting> databaseSetting, IOptions <TypesenseSetting> typesenseSetting) { _client = client; _logger = logger; _postgresWriter = postgresWriter; _kafkaSetting = kafkaSetting.Value; _databaseSetting = databaseSetting.Value; _typesenseSetting = typesenseSetting.Value; }
public MqttMonitorDataMessageHandler( IKafkaProducerManager kafkaProducerManager, ILogger <MqttMonitorDataMessageHandler> logger, IMessageQueueManager messageQueueManager, KafkaSetting kafkaSetting) : base(logger, messageQueueManager) { _kafkaProducerManager = kafkaProducerManager; _kafkaSetting = kafkaSetting; }
public DefaultKafkaProducerManager( KafkaSetting kafkaSetting, ILogger <DefaultKafkaProducerManager> logger ) { _kafkaSetting = kafkaSetting ?? throw new ArgumentNullException(nameof(kafkaSetting)); _logger = logger; Init(); }
public MSSQLWriter( ILogger <MSSQLWriter> logger, IOptions <DatabaseSetting> databaseSetting, IOptions <KafkaSetting> kafkaSetting ) { _logger = logger; _databaseSetting = databaseSetting.Value; _kafkaSetting = kafkaSetting.Value; }
public PostgresRouteNetworkSubscriber( IOptions <KafkaSetting> kafkaSetting, IOptions <PostgisSetting> postgisSetting, IMediator mediator, ILogger <PostgresRouteNetworkSubscriber> logger) { _kafkaSetting = kafkaSetting.Value; _mediator = mediator; _logger = logger; _postgisSetting = postgisSetting.Value; }
public PSQLWriter( ILogger <PSQLWriter> logger, IOptions <DatabaseSetting> databaseSetting, IOptions <KafkaSetting> kafkaSetting ) { _logger = logger; _databaseSetting = databaseSetting.Value; _kafkaSetting = kafkaSetting.Value; NpgsqlConnection.GlobalTypeMapper.UseNetTopologySuite(); }
public GeographicalAreaUpdatedHandler( IProducer producer, ILogger <GeographicalAreaUpdatedHandler> logger, IEnvelopeFactory envelopeFactory, IOptions <KafkaSetting> kafkaSettings, IOptions <ApplicationSetting> applicationSetting) { _producer = producer; _logger = logger; _envelopeFactory = envelopeFactory; _kafkaSettings = kafkaSettings.Value; _applicationSettings = applicationSetting.Value; }
public static double Run(Dictionary <string, int> parameters) { MessageCount = 0; var kafkaSetting = new KafkaSetting() .SetBootstrapServers(new Uri(Program.KafkaUri)) .SetAcks(1) .SetRetries(0) .SetCompression(CompressionCodes.none) .Set("fetch.message.max.bytes", 2000) //.Set("auto.offset.reset", "latest") .SetClientId("client-id") .SetGroupId("test-group"); foreach (var parameter in parameters) { kafkaSetting.Set(parameter.Key, parameter.Value); } var consumers = Enumerable.Range(1, 1) .Select(x => new KafkaConsumer <byte[]>(kafkaSetting, Program.Topic, new SimpleDesiralizer(), new MessageObserver())) .ToArray(); var counter = 0; const int stepMilliseconds = 1000; double avgRps = 0; var stopwatch = new Stopwatch(); stopwatch.Start(); while (true) { var prevCount = MessageCount; Thread.Sleep(TimeSpan.FromMilliseconds(stepMilliseconds)); var newCount = MessageCount; var rps = (double)(newCount - prevCount) / stepMilliseconds * 1000; if (avgRps > 0 || rps > 0) { counter++; avgRps = (double)newCount / counter / stepMilliseconds * 1000; } //Console.WriteLine(DiffTimestampManager.GetReport()); Program.Log($"MessageCount={newCount}, perSecond={rps}, avg={avgRps}"); if (Math.Abs(rps) < 1 && newCount > 0 || stopwatch.ElapsedMilliseconds > 60000) // { break; } } foreach (var consumer in consumers) { consumer.Dispose(); } return(avgRps); }
static KafkaProducerProvider() { kafkaSetting = new KafkaSetting() .SetBootstrapServers(new Uri("http://localhost:9092")) .SetAcks(1) .SetRetries(0).SetLinger(TimeSpan.FromMilliseconds(20)) .Set("socket.blocking.max.ms", 25) .Set("batch.num.messages", 64 * 1000) .Set("message.max.bytes", 20 * 1000 * 1000) .Set("queue.buffering.max.messages", 10000000) .Set("queue.buffering.max.kbytes", 2097151) .SetClientId("client-id") .SetGroupId("test-group"); }
public void Init(KafkaSetting kafkaSetting) { var config = new ConsumerConfig { BootstrapServers = kafkaSetting.IpPort, EnableAutoCommit = true, EnablePartitionEof = true, GroupId = "bgi_lims_consumer_yzq", AutoOffsetReset = AutoOffsetReset.Latest, }; this._consumer = new ConsumerBuilder <string, string>(config).Build(); Task.Run(() => ReceiveMessageAsync()); }
public DatafordelereDatabaseWriter( ILogger <DatafordelereDatabaseWriter> logger, IOptions <KafkaSetting> kafkaSetting, IOptions <DatabaseSetting> databaseSetting, IDatabaseWriter databaseWriter, IPostgresWriter postgresWriter ) { _logger = logger; _kafkaSetting = kafkaSetting.Value; _databaseSetting = databaseSetting.Value; _databaseWriter = databaseWriter; _postgresWriter = postgresWriter; }
public RouteNetworkEventConsumer(ILoggerFactory loggerFactory, IEventStore eventStore, IOptions <KafkaSetting> kafkaSetting, IOptions <EventStoreDatabaseSetting> eventStoreDatabaseSetting, IOptions <GeoDatabaseSetting> geoDatabaseSetting, IToposTypedEventObservable <RouteNetworkEditOperationOccuredEvent> eventDispatcher, RouteNetworkEventHandler routeNetworkEventHandler, IRouteNetworkState routeNetworkState, ICommandDispatcher commandDispatcher, IQueryDispatcher queryDispatcher) { _loggerFactory = loggerFactory; _logger = _loggerFactory.CreateLogger <RouteNetworkEventConsumer>(); _eventStore = eventStore; _kafkaSetting = kafkaSetting.Value; _eventStoreDatabaseSetting = eventStoreDatabaseSetting.Value; _geoDatabaseSetting = geoDatabaseSetting.Value; _commandDispatcher = commandDispatcher; _queryDispatcher = queryDispatcher; _eventDispatcher = eventDispatcher; _routeNetworkEventHandler = routeNetworkEventHandler; _routeNetworkState = routeNetworkState; }
public RouteNodeLocationChangedHandler( ILogger <RouteNodeLocationChangedHandler> logger, IOptions <KafkaSetting> kafkaSettings, IEventStore eventStore, IGeoDatabase geoDatabase, IMediator mediator, IRouteNodeEventFactory routeNodeEventFactory, IRouteSegmentEventFactory routeSegmentEventFactory) { _logger = logger; _kafkaSettings = kafkaSettings.Value; _eventStore = eventStore; _geoDatabase = geoDatabase; _mediator = mediator; _routeNodeEventFactory = routeNodeEventFactory; _routeSegmentEventFactory = routeSegmentEventFactory; }
public FBoxMqttService( Func <string, MqttManagerBase> mqttManagerFunc, MqttSetting mqttSetting, BoxStateHttpClient boxStateHttpClient, ILogger <FBoxMqttService> logger, IKafkaProducerManager kafkaProducerManager, KafkaSetting kafkaSetting ) { _mqttManagerFunc = mqttManagerFunc ?? throw new ArgumentNullException(nameof(_mqttManagerFunc)); _mqttSetting = mqttSetting ?? throw new ArgumentNullException(nameof(_mqttSetting)); _boxStateHttpClient = boxStateHttpClient ?? throw new ArgumentNullException(nameof(_boxStateHttpClient)); _logger = logger ?? throw new ArgumentNullException(nameof(_logger)); _kafkaProducerManager = kafkaProducerManager ?? throw new ArgumentNullException(nameof(_kafkaProducerManager)); _kafkaSetting = kafkaSetting ?? throw new ArgumentNullException(nameof(kafkaSetting)); }
private static KafkaProducer CreateKafkaProducer() { var topology = TopologyService.GetTopology("Kafka"); var settings = SettingsProvider.GetSettings(); var kafkaSetting = new KafkaSetting(settings.DisableKafkaReports) .SetBootstrapServers(topology) .SetAcks(1) .SetRetries(0) //.Set("queue.buffering.max.ms", 20) .Set("auto.commit.interval.ms", 1400) .Set("session.timeout.ms", 8400) .Set("message.max.bytes", 1000000) .Set("message.copy.max.bytes", 604000) //.Set("receive.message.max.bytes", 92000000) .Set("max.in.flight.requests.per.connection", 560000) .Set("queue.buffering.max.messages", 9200800) .Set("queue.buffering.max.kbytes", 839460) .Set("queue.buffering.max.ms", 500) .Set("batch.num.messages", 1000000) .SetClientId("client-id"); return(new KafkaProducer(kafkaSetting, OnMessageSent)); }
public void KafkaSettings_ShouldInitalizeValues_OnConstruction() { var server = "192.13.2.1"; var eventRouteNetwork = "event.route-network"; var postgisRouteNetworkConsumer = "postgis-route-network-consumer"; var postgisRouteNetworkTopic = "postgis.route-network"; var kafkaSettings = new KafkaSetting { Server = server, EventRouteNetworkTopicName = eventRouteNetwork, PostgisRouteNetworkConsumer = postgisRouteNetworkConsumer, PostgisRouteNetworkTopic = postgisRouteNetworkTopic }; using (new AssertionScope()) { kafkaSettings.Server.Should().BeEquivalentTo(server); kafkaSettings.EventRouteNetworkTopicName.Should().BeEquivalentTo(eventRouteNetwork); kafkaSettings.PostgisRouteNetworkConsumer.Should().BeEquivalentTo(postgisRouteNetworkConsumer); kafkaSettings.PostgisRouteNetworkTopic.Should().BeEquivalentTo(postgisRouteNetworkTopic); } }
public RouteSegmentConnectivityChangedHandler( ILogger <RouteSegmentConnectivityChangedHandler> logger, IOptions <KafkaSetting> kafkaSettings, IOptions <ApplicationSetting> applicationSettings, IGeoDatabase geoDatabase, IRouteNodeFactory routeNodeFactory, IRouteSegmentFactory routeSegmentFactory, IRouteNodeEventFactory routeNodeEventFactory, IRouteSegmentEventFactory routeSegmentEventFactory, IEventStore eventStore, IValidationService validationService = null) { _logger = logger; _kafkaSettings = kafkaSettings.Value; _applicationSettings = applicationSettings.Value; _geoDatabase = geoDatabase; _routeNodeFactory = routeNodeFactory; _routeSegmentFactory = routeSegmentFactory; _routeNodeEventFactory = routeNodeEventFactory; _routeSegmentEventFactory = routeSegmentEventFactory; _eventStore = eventStore; _validationService = validationService; }
private void Consume() { var kafka = new KafkaSetting(); kafka.DatafordelereTopic = "DAR"; kafka.Server = "localhost:9092"; kafka.PositionFilePath = "/tmp/"; var AdresseList = new List <JsonValue>(); var hussnumerList = new List <JsonValue>(); DateTime waitStartTimestamp = DateTime.UtcNow; var consumer = _consumer = Configure .Consumer(kafka.DatafordelereTopic, c => c.UseKafka(kafka.Server)) .Serialization(s => s.DatafordelerEventDeserializer()) .Topics(t => t.Subscribe(kafka.DatafordelereTopic)) .Positions(p => p.StoreInFileSystem(kafka.PositionFilePath)) .Handle(async(messages, context, token) => { foreach (var message in messages) { if (message.Body is JsonObject) { var messageTime = DateTime.UtcNow; TimeSpan timespan = waitStartTimestamp - messageTime; if (timespan.TotalSeconds > 10) { _logger.LogInformation("It worked"); } } } }).Start(); }
public KafkaConsumerClient(string groupId, KafkaSetting kafkaSetting) { _groupId = groupId; _kafkaSetting = kafkaSetting ?? throw new ArgumentNullException(nameof(KafkaSetting)); StringDeserializer = new StringDeserializer(Encoding.UTF8); }
public KafkaConsumerClientFactory(KafkaSetting kafkaSetting) { _kafkaSetting = kafkaSetting; }
public Producer(IOptions <KafkaSetting> kafkaSetting, ILogger <Producer> logger) { _kafkaSetting = kafkaSetting.Value; _logger = logger; }
public static double Run(Dictionary <string, int> parameters) { requestCount = 0; successCount = 0; errorCount = 0; var kafkaSetting = new KafkaSetting() .SetBootstrapServers(new Uri(Program.KafkaUri)) .SetAcks(1) .SetRetries(0) //.Set("auto.commit.interval.ms", 28120) //.Set("session.timeout.ms", 41904) //.Set("message.max.bytes", 8416000) //.Set("message.copy.max.bytes", 920000) //.Set("receive.message.max.bytes", 92000000) //.Set("max.in.flight.requests.per.connection", 128000) //.Set("queue.buffering.max.messages", 9001000) //.Set("queue.buffering.max.kbytes", 1887535) //.Set("queue.buffering.max.ms", 20) //.Set("batch.num.messages", 500500) .SetClientId("client-id") .SetGroupId("test-group"); foreach (var parameter in parameters.Where(x => !x.Key.StartsWith("_"))) { kafkaSetting.Set(parameter.Key, parameter.Value); } double avgRps; try { using (var kafkaProducer = new KafkaProducer(kafkaSetting, OnMessageDelivered)) { var httpClient = new HttpClient { BaseAddress = new Uri("http://localhost:8888") }; var cancellationTokenSource = new CancellationTokenSource(); var cancellationToken = cancellationTokenSource.Token; var tasks = new List <Task>(); avgRps = 0; var watcherTask = new Task(() => { var counter = 0; while (!cancellationToken.IsCancellationRequested) { var prevSuccess = successCount; Thread.Sleep(stepMilliseconds); var newSuccess = successCount; var rps = (double)(newSuccess - prevSuccess) / stepMilliseconds * 1000; if (avgRps > 0 || rps > 0) { counter++; avgRps = (double)successCount / counter / stepMilliseconds * 1000; } Program.Log($"tasks= {tasks.Count}, success = {successCount}, error = {errorCount}, perSecond={rps}, avg={avgRps}"); } }, cancellationToken, TaskCreationOptions.LongRunning); watcherTask.Start(); for (var i = 0; i < 1; i++) { for (var j = 0; j < parameters["_tasks"]; j++) { var task = new Task(() => { SendingLoop(kafkaProducer, httpClient, cancellationToken); }, cancellationToken); task.Start(); tasks.Add(task); } } Thread.Sleep(60000); cancellationTokenSource.Cancel(); Task.WaitAll(tasks.ToArray()); } } catch (Exception e) { Console.WriteLine(e); return(0); } Program.Log($"success = {successCount}, all = {requestCount}"); return(avgRps); }