Exemple #1
0
        static void kafkaConsumer()
        {
            OffsetPosition[] offsetPositions = new OffsetPosition[]
            {
                new OffsetPosition()
                {
                    Offset      = 1,
                    PartitionId = 0
                }
            };
            var options = new KafkaOptions(new Uri("http://localhost:9092"),
                                           new Uri("http://localhost:9092"));
            var consumer = new KafkaNet.Consumer(new ConsumerOptions("test",
                                                                     new BrokerRouter(options)), offsetPositions);


            using (FileStream fileStream = new FileStream("tweets.txt", FileMode.OpenOrCreate))
            {
                using (StreamWriter writer = new StreamWriter(fileStream))
                {
                    using (TextWriter originalConsoleOut = Console.Out)
                    {
                        foreach (var message in consumer.Consume())
                        {
                            Console.WriteLine("Response: P{0},O{1} : {2}",
                                              message.Meta.PartitionId, message.Meta.Offset,
                                              Encoding.UTF8.GetString(message.Value));
                            Console.SetOut(writer);
                            Console.WriteLine(originalConsoleOut);
                        }
                    }
                }
                Console.WriteLine("Hello to console only");
            }
        }
Exemple #2
0
        /// <summary>
        /// Rotate the rotor to the next position.
        /// </summary>
        /// <returns>true if the next should be rotate</returns>
        public bool Rotate()
        {
            bool shouldAdvance = false;

            foreach (char item in RotateAt)
            {
                if (OffsetPosition.Equals(item))
                {
                    shouldAdvance = true;
                    break;
                }
            }

            int offsetIndex = WiresLeft.ProjectCharacter(OffsetPosition);

            offsetIndex = offsetIndex + 1;
            if (offsetIndex >= OperatingAlphabet.Count)
            {
                offsetIndex = 0;
            }
            OffsetPosition = WiresLeft.ProjectIndex(offsetIndex);

            WiresLeft.Rotate();
            WiresRight = WiresLeft.Invert();

            return(shouldAdvance);
        }
 private  void ReadMessageForever(ConsumerOptions consumerOptions, OffsetPosition[] maxOffsets)
 {
     using (var consumer = new Consumer(consumerOptions, maxOffsets))
     {
         var blockingEnumerableOfMessage = consumer.Consume();
         foreach (var message in blockingEnumerableOfMessage)
         {
             _log.InfoFormat("Offset{0}", message.Meta.Offset);
         }
     }
 }
Exemple #4
0
        private static void Consume()
        {
            var  kafkaRepo     = new KafkaConsumerRepository();
            bool fromBeginning = Boolean.Parse(ConfigurationManager.AppSettings["FromBeginning"]);

            var router   = InitDefaultConfig();
            var consumer = new Consumer(new ConsumerOptions(_topic, router));

            //如果我们不想从头开始,使用最新偏移量
            if (!fromBeginning)
            {
                var maxOffsetByPartition = kafkaRepo.GetOffsetPositionByTopic(_topic);
                //如果我们得到一个结果使用它,否则默认
                if (maxOffsetByPartition.Any())
                {
                    var offsets = new List <OffsetPosition>();
                    foreach (var m in maxOffsetByPartition)
                    {
                        var o = new OffsetPosition(m.Partition, (long)m.MaxOffset + 1);
                        offsets.Add(o);
                    }
                    consumer.SetOffsetPosition(offsets.ToArray());
                }
                else
                {
                    consumer.SetOffsetPosition(new OffsetPosition());
                }
            }

            //消耗返回一个阻塞IEnumerable(ie:从没有结束流)
            foreach (var message in consumer.Consume())
            {
                var messageContent = Encoding.UTF8.GetString(message.Value);

                Console.WriteLine($"处理带有内容的消息:{messageContent}");

                kafkaRepo = new KafkaConsumerRepository();

                var consumerMessage = new KafkaConsumerMessage()
                {
                    Topic       = _topic,
                    Offset      = (int)message.Meta.Offset,
                    Partition   = message.Meta.PartitionId,
                    Content     = messageContent,
                    CreatedTime = DateTime.UtcNow
                };

                kafkaRepo.InsertKafkaConsumerMessage(consumerMessage);
                kafkaRepo.Dispose();
            }
        }
Exemple #5
0
        private static void Consume()
        {
            KafkaConsumerRepository KafkaRepo = new KafkaConsumerRepository();
            bool FromBeginning = Boolean.Parse(ConfigurationManager.AppSettings["FromBeginning"]);

            var Router   = InitDefaultConfig();
            var Consumer = new Consumer(new ConsumerOptions(Topic, Router));

            //if we don't want to start from beginning, use latest offset.
            if (!FromBeginning)
            {
                var MaxOffsetByPartition = KafkaRepo.GetOffsetPositionByTopic(Topic);
                //if we get a result use it, otherwise default
                if (MaxOffsetByPartition.Count != 0)
                {
                    List <OffsetPosition> offsets = new List <OffsetPosition>();
                    foreach (var m in MaxOffsetByPartition)
                    {
                        OffsetPosition o = new OffsetPosition(m.Partition, (long)m.MaxOffset + 1);
                        offsets.Add(o);
                    }
                    Consumer.SetOffsetPosition(offsets.ToArray());
                }
                else
                {
                    Consumer.SetOffsetPosition(new OffsetPosition());
                }
            }

            //Consume returns a blocking IEnumerable (ie: never ending stream)
            foreach (var message in Consumer.Consume())
            {
                string MessageContent = Encoding.Default.GetString(message.Value);

                Console.WriteLine(String.Format("Processing message with content: {0}", MessageContent));

                KafkaRepo = new KafkaConsumerRepository();

                KafkaConsumerMessage ConsumerMessage = new KafkaConsumerMessage()
                {
                    Topic          = Topic,
                    Offset         = (int)message.Meta.Offset,
                    Partition      = message.Meta.PartitionId,
                    MessageContent = MessageContent,
                    CreatedAt      = DateTime.UtcNow
                };

                KafkaRepo.InsertKafkaConsumerMessage(ConsumerMessage);
                KafkaRepo.Dispose();
            }
        }
Exemple #6
0
        static void ThreadReceive()
        {
            string topic2   = "RespostaTopic";
            Uri    uri2     = new Uri("http://localhost:9092");
            var    options2 = new KafkaOptions(uri2);
            var    router2  = new BrokerRouter(options2);

            OffsetPosition[] offsetPositions = new OffsetPosition[]
            {
                new OffsetPosition()
                {
                    Offset      = offint,
                    PartitionId = 0
                }
            };

            var    consumer = new Consumer(new ConsumerOptions(topic2, router2), offsetPositions);
            string chave    = "";

            foreach (var message in consumer.Consume())
            {
                if (message.Key != null)
                {
                    chave = Encoding.ASCII.GetString(message.Key);
                }
                if (Receive)
                {
                    string json = Encoding.ASCII.GetString(message.Value);

                    Console.WriteLine("\n" + json);
                    if (chave != "1")
                    {
                        Receive = false;
                    }
                }

                offint = offint + 1;
                using (StreamWriter file2 = new StreamWriter("Offset.txt", false))
                {
                    file2.WriteLine(offint);
                    // Console.WriteLine("Offset:" + offint);
                    file2.Close();
                }
            }
        }
Exemple #7
0
        public OffsetPosition SearchOnGrid(int xMin, int yMin, int xMax, int yMax, int skipChecks, int skipPixels)
        {
            int            halfWidth          = _rimgStart.Width / 2;
            int            halfHeight         = _rimgStart.Height / 2;
            OffsetPosition bestOffsetPosition = null;

            for (int y = yMin; y < yMax; y += skipChecks)
            {
                for (int x = xMin; x < xMax; x += skipChecks)
                {
                    int    offsetX         = x - halfWidth;
                    int    offsetY         = y - halfHeight;
                    double meanSquareError = CalculateMeanSquareError(offsetX, offsetY, skipPixels);
                    if (bestOffsetPosition == null || meanSquareError < bestOffsetPosition.MeanSquareError)
                    {
                        bestOffsetPosition = new OffsetPosition {
                            OffsetX = offsetX, OffsetY = offsetY, MeanSquareError = meanSquareError,
                        };
                    }
                }
            }
            return(bestOffsetPosition);
        }
        static void Main(string[] args)
        {
            var options = new KafkaOptions(new Uri("http://sjkap556:9092"), new Uri("http://sjkap556:9092"));
            var router  = new BrokerRouter(options);

            OffsetPosition[] offsetPositions = new OffsetPosition[]
            {
                new OffsetPosition()
                {
                    Offset      = 0,
                    PartitionId = 0
                }
            };

            var consumer = new KafkaNet.Consumer(new ConsumerOptions("testCockpit", new BrokerRouter(options)), offsetPositions);


            //Consume returns a blocking IEnumerable (ie: never ending stream)
            foreach (var message in consumer.Consume())
            {
                Console.WriteLine("Response: P{0},O{1} : {2}", message.Meta.PartitionId, message.Meta.Offset, Encoding.UTF8.GetString(message.Value));
            }
        }
Exemple #9
0
        public OffsetPosition SearchRecursive(int checkDensity, int skipPixels)
        {
            int            halfWidth          = _rimgStart.Width / 2;
            int            halfHeight         = _rimgStart.Height / 2;
            OffsetPosition bestOffsetPosition = null;
            int            xMin       = 0;
            int            yMin       = 0;
            int            xMax       = _rimgStart.Width;
            int            yMax       = _rimgStart.Height;
            int            skipChecks = 1;

            do
            {
                skipChecks = Math.Min((xMax - xMin), (yMax - yMin)) / checkDensity;
                if (skipChecks <= 0)
                {
                    skipChecks = 1;
                }
                OffsetPosition newOffsetPosition = SearchOnGrid(xMin, yMin, xMax, yMax, skipChecks, skipPixels);
                if (newOffsetPosition == null)
                {
                    return(null);
                }
                if (bestOffsetPosition == null || newOffsetPosition.MeanSquareError < bestOffsetPosition.MeanSquareError)
                {
                    bestOffsetPosition = newOffsetPosition;
                }
                if (skipChecks > 1)
                {
                    xMin = (bestOffsetPosition.OffsetX + halfWidth) - skipChecks;
                    yMin = (bestOffsetPosition.OffsetY + halfHeight) - skipChecks;
                    xMax = (bestOffsetPosition.OffsetX + halfWidth) + skipChecks;
                    yMax = (bestOffsetPosition.OffsetY + halfHeight) + skipChecks;
                }
            } while (skipChecks > 1);
            return(bestOffsetPosition);
        }
Exemple #10
0
        /// Thread que pega os comandos do topico do kafka os processa e envia o resultado para o cliente.///
        static void ThreadProcessaComando()
        {
            //////////////////////////////////////////////
            string topic   = "ComandoTopic4";
            Uri    uri     = new Uri("http://localhost:9092");
            var    options = new KafkaOptions(uri);
            var    router  = new BrokerRouter(options);

            string topic2   = "RespostaTopic";
            Uri    uri2     = new Uri("http://localhost:9092");
            var    options2 = new KafkaOptions(uri);
            var    router2  = new BrokerRouter(options);
            var    produce  = new Producer(router2);

            //////////////////////////////////////////////

            //Seta o offset para ler somente mensagens não lidas.
            OffsetPosition[] offsetPositions = new OffsetPosition[]
            {
                new OffsetPosition()
                {
                    Offset      = offint,
                    PartitionId = 0
                }
            };
            var consumer = new Consumer(new ConsumerOptions(topic, router), offsetPositions);

            while (true)
            {
                if (!desliga)
                {
                    //Recebe mensagens(comandos) do tópico
                    foreach (var message in consumer.Consume())
                    {
                        string  json     = Encoding.ASCII.GetString(message.Value);
                        Comando comando  = JsonConvert.DeserializeObject <Comando>(json);
                        string  resposta = "";

                        if (comando.comand != (int)Comandos.LISTAR)
                        {
                            ProcessaComando(comando, ref resposta);

                            Console.WriteLine("ThreadProcessa: " + resposta);

                            Message msg = new Message(resposta);
                            produce.SendMessageAsync(topic2, new List <Message> {
                                msg
                            });                                                          //Envia resposta do procesamento para cliente.

                            offint = offint + 1;
                        }
                        else if (comando.comand == (int)Comandos.LISTAR)
                        {
                            List <Message> msglist = new List <Message>();
                            int            count   = Mapa.Count;
                            int            i       = 1;
                            foreach (KeyValuePair <long, string> entry in Mapa)
                            {
                                resposta = entry.Key + " - " + entry.Value;
                                if (count == i)
                                {
                                    Message msg = new Message(resposta, "2");
                                    msglist.Add(msg);
                                }
                                else
                                {
                                    Message msg = new Message(resposta, "1");
                                    msglist.Add(msg);
                                }

                                i++;
                            }
                            produce.SendMessageAsync(topic2, msglist).Wait();
                            offint = offint + 1;
                        }

                        //Atualiza offset
                        using (StreamWriter file = new StreamWriter("portas.txt", false))
                        {
                            file.WriteLine(offint);
                            file.Close();
                        }

                        using (StreamWriter file = new StreamWriter("json.txt", true))
                        {
                            if (comando.comand == (int)Comandos.READ || comando.comand == (int)Comandos.DESLIGAR || comando.comand == (int)Comandos.LISTAR)
                            {
                                continue;
                            }
                            string comando2 = JsonConvert.SerializeObject(comando);
                            file.WriteLine(comando2);
                            Console.WriteLine("ThreadLog: " + comando2);
                            file.Close();
                        }
                    }
                }
            }
        }
Exemple #11
0
        public static void kafkaConsumer()
        {
            OffsetPosition[] offsetPositions = new OffsetPosition[]
            {
                new OffsetPosition()
                {
                    Offset      = 1,
                    PartitionId = 0
                }
            };
            var options = new KafkaOptions(new Uri("http://*****:*****@');

                            int    indexOfAtSign = tweet.IndexOf("@");
                            int    indexOfColon  = tweet.IndexOf(":");
                            string name          = " ";
                            string body          = " ";
                            if (indexOfAtSign >= 1 && indexOfColon >= 1)
                            {
                                try
                                {
                                    name = tweet.Substring(indexOfAtSign + 1, indexOfColon - indexOfAtSign - 1);
                                }
                                catch (Exception)
                                {
                                    name = tweet.Substring(3, 10);
                                }


                                body = tweet.Substring(indexOfColon + 1, tweet.Length - indexOfColon - 1);
                            }



                            var newLine = $"{name},{ dt},{body}";
                            csv.AppendLine(newLine);
                            File.WriteAllText(@"C:\Users\billm\source\repos\ConsoleApp2\testingTweets.csv", csv.ToString());
                        }
                    }
                }
                Console.WriteLine("Hello to console only");
            }
        }
Exemple #12
0
        // Per partition consumer read handler
        private void ProcessPartitionMessages(object state)
        {
            int partition = (int)state;

            try
            {
                Dictionary <uint, MeasurementKey> idTable         = new Dictionary <uint, MeasurementKey>();
                ConsumerOptions           options                 = new ConsumerOptions(Topic, m_router);
                LongSynchronizedOperation cacheLastConsumerOffset = null;
                OffsetPosition            consumerCursor          = new OffsetPosition {
                    PartitionId = partition, Offset = 0
                };
                long lastUpdateTime          = 0;
                long lastMetadataUpdateCount = 0;
                long lastMeasurementTime     = 0;

                options.PartitionWhitelist.Add(partition);
                options.Log = new TimeSeriesLogger((message, parameters) => OnStatusMessage($"P[{partition}]: " + message, parameters), OnProcessException);

                // Handle consumer offset tracking, i.e., adapter will start reading messages where it left off from last run
                if (TrackConsumerOffset)
                {
                    // Parse path/filename.ext into constituent parts
                    string[] fileParts = new string[3];

                    fileParts[0] = FilePath.GetDirectoryName(ConsumerOffsetFileName);               // 0: path/
                    fileParts[1] = FilePath.GetFileNameWithoutExtension(ConsumerOffsetFileName);    // 1: filename
                    fileParts[2] = FilePath.GetExtension(ConsumerOffsetFileName);                   // 2: .ext

                    // Include partition index as part of consumer offset cache file name
                    string fileName = $"{fileParts[0]}{fileParts[1]}-P{partition}{fileParts[2]}";

                    if (File.Exists(fileName))
                    {
                        try
                        {
                            // Read last consumer offset
                            consumerCursor.Offset = long.Parse(File.ReadAllText(fileName));
                        }
                        catch (Exception ex)
                        {
                            OnProcessException(new InvalidOperationException($"Failed to read last consumer offset from \"{fileName}\": {ex.Message}", ex));
                        }
                    }

                    cacheLastConsumerOffset = new LongSynchronizedOperation(() =>
                    {
                        // Do not write file any more often than defined consumer offset cache interval
                        int restTime = (int)(Ticks.FromSeconds(ConsumerOffsetCacheInterval) - (DateTime.UtcNow.Ticks - lastUpdateTime)).ToMilliseconds();

                        if (restTime > 0)
                        {
                            Thread.Sleep(restTime);
                        }

                        lastUpdateTime = DateTime.UtcNow.Ticks;

                        // Write current consumer offset
                        File.WriteAllText(fileName, consumerCursor.Offset.ToString());
                    },
                                                                            ex => OnProcessException(new InvalidOperationException($"Failed to cache current consumer offset to \"{fileName}\": {ex.Message}", ex)))
                    {
                        IsBackground = true
                    };
                }

                using (Consumer consumer = new Consumer(options, new OffsetPosition(partition, consumerCursor.Offset)))
                {
                    lock (m_consumers)
                        m_consumers.Add(new WeakReference <Consumer>(consumer));

                    foreach (Message message in consumer.Consume())
                    {
                        if ((object)m_metadata == null)
                        {
                            continue;
                        }

                        uint         id;
                        byte         metadataVersion;
                        IMeasurement measurement = message.KafkaDeserialize(out id, out metadataVersion);

                        // Kick-off a refresh for new metadata if message version numbers change
                        if (m_lastMetadataVersion != metadataVersion)
                        {
                            m_lastMetadataVersion = metadataVersion;
                            m_updateMetadata.RunOnceAsync();
                        }

                        // Clear all undefined items in dictionary when metadata gets updated
                        if (lastMetadataUpdateCount < m_metadataUpdateCount)
                        {
                            lastMetadataUpdateCount = m_metadataUpdateCount;
                            foreach (uint undefinedID in idTable.Where(item => item.Value.SignalID == Guid.Empty).Select(item => item.Key).ToArray())
                            {
                                idTable.Remove(undefinedID);
                            }
                        }

                        // Get associated measurement key, or look it up in metadata table
                        measurement.Key = idTable.GetOrAdd(id, lookupID => MeasurementKey.LookUpBySignalID(m_metadata?.Records?.FirstOrDefault(record => record.ID == lookupID)?.ParseSignalID() ?? Guid.Empty));

                        // Only publish measurements with associated metadata and are assigned to this adapter
                        if (measurement.Key != MeasurementKey.Undefined && ((object)m_outputMeasurementKeys == null || m_outputMeasurementKeys.Contains(measurement.Key)))
                        {
                            OnNewMeasurements(new[] { measurement });
                        }

                        // Cache last consumer offset
                        consumerCursor.Offset = message.Offset;

                        if ((object)cacheLastConsumerOffset != null)
                        {
                            cacheLastConsumerOffset.RunOnceAsync();
                        }

                        if (ReadDelay > -1)
                        {
                            // As a group of measurements transition from timestamp to another, inject configured read delay
                            if (lastMeasurementTime != measurement.Timestamp)
                            {
                                Thread.Sleep(ReadDelay);
                            }

                            lastMeasurementTime = measurement.Timestamp;
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                OnProcessException(new InvalidOperationException($"Exception while reading Kafka messages for topic \"{Topic}\" P[{partition}]: {ex.Message}", ex));
            }
        }
        // Per partition consumer read handler
        private void ProcessPartitionMessages(object state)
        {
            int partition = (int)state;

            try
            {
                Dictionary<uint, MeasurementKey> idTable = new Dictionary<uint, MeasurementKey>();
                ConsumerOptions options = new ConsumerOptions(Topic, m_router);
                LongSynchronizedOperation cacheLastConsumerOffset = null;
                OffsetPosition consumerCursor = new OffsetPosition { PartitionId = partition, Offset = 0 };
                long lastUpdateTime = 0;
                long lastMetadataUpdateCount = 0;
                long lastMeasurementTime = 0;

                options.PartitionWhitelist.Add(partition);
                options.Log = new TimeSeriesLogger((message, parameters) => OnStatusMessage(MessageLevel.Info, string.Format($"P[{partition}]: " + message, parameters)), ex => OnProcessException(MessageLevel.Warning, ex));

                // Handle consumer offset tracking, i.e., adapter will start reading messages where it left off from last run
                if (TrackConsumerOffset)
                {
                    // Parse path/filename.ext into constituent parts
                    string[] fileParts = new string[3];

                    fileParts[0] = FilePath.GetDirectoryName(ConsumerOffsetFileName);               // 0: path/
                    fileParts[1] = FilePath.GetFileNameWithoutExtension(ConsumerOffsetFileName);    // 1: filename
                    fileParts[2] = FilePath.GetExtension(ConsumerOffsetFileName);                   // 2: .ext

                    // Include partition index as part of consumer offset cache file name
                    string fileName = $"{fileParts[0]}{fileParts[1]}-P{partition}{fileParts[2]}";

                    if (File.Exists(fileName))
                    {
                        try
                        {
                            // Read last consumer offset
                            consumerCursor.Offset = long.Parse(File.ReadAllText(fileName));
                        }
                        catch (Exception ex)
                        {
                            OnProcessException(MessageLevel.Warning, new InvalidOperationException($"Failed to read last consumer offset from \"{fileName}\": {ex.Message}", ex));
                        }
                    }

                    cacheLastConsumerOffset = new LongSynchronizedOperation(() =>
                    {
                        // Do not write file any more often than defined consumer offset cache interval
                        int restTime = (int)(Ticks.FromSeconds(ConsumerOffsetCacheInterval) - (DateTime.UtcNow.Ticks - lastUpdateTime)).ToMilliseconds();

                        if (restTime > 0)
                            Thread.Sleep(restTime);

                        lastUpdateTime = DateTime.UtcNow.Ticks;

                        // Write current consumer offset
                        File.WriteAllText(fileName, consumerCursor.Offset.ToString());
                    }, 
                    ex => OnProcessException(MessageLevel.Warning, new InvalidOperationException($"Failed to cache current consumer offset to \"{fileName}\": {ex.Message}", ex)))
                    {
                        IsBackground = true
                    };
                }

                using (Consumer consumer = new Consumer(options, new OffsetPosition(partition, consumerCursor.Offset)))
                {
                    lock (m_consumers)
                        m_consumers.Add(new WeakReference<Consumer>(consumer));

                    foreach (Message message in consumer.Consume())
                    {
                        if ((object)m_metadata == null)
                            continue;

                        uint id;
                        byte metadataVersion;
                        IMeasurement measurement = message.KafkaDeserialize(out id, out metadataVersion);

                        // Kick-off a refresh for new metadata if message version numbers change
                        if (m_lastMetadataVersion != metadataVersion)
                        {
                            m_lastMetadataVersion = metadataVersion;
                            m_updateMetadata.RunOnceAsync();
                        }

                        // Clear all undefined items in dictionary when metadata gets updated
                        if (lastMetadataUpdateCount < m_metadataUpdateCount)
                        {
                            lastMetadataUpdateCount = m_metadataUpdateCount;
                            foreach (uint undefinedID in idTable.Where(item => item.Value.SignalID == Guid.Empty).Select(item => item.Key).ToArray())
                                idTable.Remove(undefinedID);
                        }

                        // Get associated measurement key, or look it up in metadata table
                        measurement.Metadata = idTable.GetOrAdd(id, lookupID => MeasurementKey.LookUpBySignalID(m_metadata?.Records?.FirstOrDefault(record => record.ID == lookupID)?.ParseSignalID() ?? Guid.Empty)).Metadata;

                        // Only publish measurements with associated metadata and are assigned to this adapter
                        if (measurement.Key != MeasurementKey.Undefined && ((object)m_outputMeasurementKeys == null || m_outputMeasurementKeys.Contains(measurement.Key)))
                            OnNewMeasurements(new[] { measurement });

                        // Cache last consumer offset
                        consumerCursor.Offset = message.Offset;

                        if ((object)cacheLastConsumerOffset != null)
                            cacheLastConsumerOffset.RunOnceAsync();

                        if (ReadDelay > -1)
                        {
                            // As a group of measurements transition from timestamp to another, inject configured read delay
                            if (lastMeasurementTime != measurement.Timestamp)
                                Thread.Sleep(ReadDelay);

                            lastMeasurementTime = measurement.Timestamp;
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                OnProcessException(MessageLevel.Warning, new InvalidOperationException($"Exception while reading Kafka messages for topic \"{Topic}\" P[{partition}]: {ex.Message}", ex));
            }
        }
        public void Initial()
        {
            _task = Task.Run(() =>
            {
                var options1 = new KafkaOptions(new Uri(_kafkaAddr), new Uri(_kafkaAddr))
                {
                    Log = new ConsoleLog()
                };
                var consumer = new Consumer(new ConsumerOptions(_topicName, new BrokerRouter(options1))
                {
                    Log = new ConsoleLog()
                });

                // 从数据文件加载
                List <KafkaPartOffset> kafkaPartOffset;
                XmlDataControl.ReadConfig(out kafkaPartOffset);
                if (kafkaPartOffset != null && kafkaPartOffset.Count > 0)
                {
                    OffsetPosition[] offsetPositions = new OffsetPosition[kafkaPartOffset.Count];
                    for (int index = 0; index < kafkaPartOffset.Count; index++)
                    {
                        offsetPositions[index] = new OffsetPosition(kafkaPartOffset[index].PartitionId, kafkaPartOffset[index].Offset);
                    }
                    consumer.SetOffsetPosition(offsetPositions);
                }
                foreach (var data in consumer.Consume())
                {
                    if (_tokenSource.Token.IsCancellationRequested)
                    {
                        _tokenSource.Token.ThrowIfCancellationRequested();
                    }
                    else
                    {
                        Log4NetHelper.Instance.Debug("接收Kafka数据成功:" + data.Meta.PartitionId + "-" + data.Meta.Offset + ", data");
                        // 保存数据到配置文件
                        XmlDataControl.WriteConfig(data.Meta.PartitionId, data.Meta.Offset);

                        Send2Quere(data.Value);
                    }
                }
            }, _tokenSource.Token);

            _taskSend = Task.Run(() =>
            {
                var options2 = new KafkaOptions(new Uri(_kafkaAddr))
                {
                    Log = new ConsoleLog()
                };
                var producer = new Producer(new BrokerRouter(options2))
                {
                    BatchSize      = 10,
                    BatchDelayTime = TimeSpan.FromMilliseconds(2000)
                };
                while (true)
                {
                    try
                    {
                        string message = ResultSpliceQueue.GetFromQueue();
                        if (string.IsNullOrEmpty(message))
                        {
                            Thread.Sleep(1000);
                            continue;
                        }
                        else
                        {
                            producer.SendMessageAsync(_outputtopicName, new[] { new Message(message) }).Wait();
                            Console.WriteLine("Posted messages. AsyncCount:{0}", producer.AsyncCount);
                            Log4NetHelper.Instance.Debug("发送Kafka数据成功:" + message);
                        }
                    }
                    catch (Exception ex)
                    {
                        Log4NetHelper.Instance.Error("从队列发送Kafka数据错误:" + (ex.InnerException != null ? ex.InnerException.Message : ex.Message));
                    }
                    Thread.Sleep(1000);
                }
            }, _tokenSource.Token);
        }
        protected override void EngineController()
        {
            logger.LogInformation($"'{this.GetType().Name}' (ID={_EngineID}) Started.");

tryRecover:
            try
            {
                var kafkaOptions = new KafkaOptions(new Uri(_Config.URL));
                var BrokerRouter = new BrokerRouter(kafkaOptions);

                var consumerOptions = new ConsumerOptions(Topic, BrokerRouter);
                //consumerOptions.MaxWaitTimeForMinimumBytes = new TimeSpan(0, 0, 5);
                //consumerOptions.MinimumBytes = 2;
                //consumerOptions.FetchBufferMultiplier = 1;
                //consumerOptions.TopicPartitionQueryTimeMs = 100;
                int i = 0;
                var storedOffsetProcessed = GetOffsetProccessed();

                if (storedOffsetProcessed == null)
                {
                    storedOffsetProcessed = new OffsetPosition()
                    {
                        Offset      = 0,
                        PartitionId = 0
                    }
                }
                ;

                using (var consumer = new Consumer(consumerOptions))
                {
                    var kafkaOffsets = consumer.GetTopicOffsetAsync(Topic).Result;
                    if (kafkaOffsets != null && kafkaOffsets.Count() != 0)
                    {
                        var kafkaMinOffset = kafkaOffsets.OrderBy(s => s.Offsets.Min()).FirstOrDefault();
                        var kafkaMaxOffset = kafkaOffsets.OrderByDescending(s => s.Offsets.Max()).FirstOrDefault();

                        if (storedOffsetProcessed.Offset > kafkaMaxOffset.Offsets.Max() || storedOffsetProcessed.Offset < kafkaMinOffset.Offsets.Min())
                        {
                            storedOffsetProcessed = new OffsetPosition()
                            {
                                Offset      = kafkaMinOffset.Offsets.Min(),
                                PartitionId = kafkaMinOffset.PartitionId
                            }
                        }
                        ;
                        else
                        {
                            storedOffsetProcessed.Offset++;
                        }

                        consumer.SetOffsetPosition(storedOffsetProcessed);
                    }

                    foreach (var message in consumer.Consume(_CancellationToken))
                    {
tryMesseageAgain:
                        try
                        {
                            HandleMessage(message);
                            SaveMesseageOffsetProccessed(message.Meta);
                        }
                        catch (Exception ex)
                        {
                            logger.LogCritical(ex, $"Exception Occured In Kafka Consumer '{this.GetType().Name}' Offset {message.Meta.Offset} (ID={_EngineID})");
                            var delayTask = Task.Delay(5000);
                            delayTask.Wait();
                            goto tryMesseageAgain;
                        }
                    }
                }
            }
            catch (OperationCanceledException ex)
            {
            }
            catch (Exception ex)
            {
                logger.LogInformation($"Exception Occured In Kafka Consumer '{this.GetType().Name}' (ID={_EngineID}) {ex.Message} \n {ex.StackTrace}.");
                logger.LogInformation($"'{this.GetType().Name}' (ID={_EngineID}) Recovering...");
                goto tryRecover;
            }
            logger.LogInformation($"'{this.GetType().Name}' (ID={_EngineID}) Stoped.");
        }