internal static void DumpMessageAsConsumerGroup(ConsumeGroupHelperOptions cgOptions)
        {
            try
            {
                totalCount = 0;

                ZookeeperConsumerConnector.UseSharedStaticZookeeperClient = cgOptions.UseSharedStaticZookeeperClient;
                DumpMessageAsConsumerGroupSigleThreadBlock(cgOptions);

                Logger.InfoFormat("======TotallyRead:{0}=============", Interlocked.Read(ref totalCount));
                Console.WriteLine("======TotallyRead:{0}=============", Interlocked.Read(ref totalCount));

                Logger.InfoFormat("======New offset");
                Console.WriteLine("======New offset");
                long totalCountCommit = 0;
                foreach (var kv in newOffset.OrderBy(r => r.Key))
                {
                    string d = string.Format("Partition:{0}\t Old Offset:{1,10} --> New Offset:{2,10}  Diff:{3}"
                        , kv.Key, initialOffset == null || !initialOffset.ContainsKey(kv.Key) ? 0 : initialOffset[kv.Key],
                        kv.Value,
                        kv.Value - (initialOffset == null || !initialOffset.ContainsKey(kv.Key) ? 0 : initialOffset[kv.Key]));
                    Logger.Info(d);
                    Console.WriteLine(d);
                    totalCountCommit += kv.Value - (initialOffset == null || !initialOffset.ContainsKey(kv.Key) ? 0 : initialOffset[kv.Key]);
                }
                //TODO: currently each partition maybe missed one message hasn't been commit.  please refer kafka document:https://cwiki.apache.org/confluence/display/KAFKA/Compression
                //Hence, for compressed data, the consumed offset will be advanced one compressed message at a time. This has the side effect of possible duplicates in the event of a consumer failure. For uncompressed data, consumed offset will be advanced one message at a time.
                if (totalCountCommit != Interlocked.Read(ref totalCount))
                {
                    Logger.ErrorFormat("totalCountCommit {0} !=  totalCount  {1}, check next line log see if it's reasonable:", totalCountCommit, Interlocked.Read(ref totalCount));
                    long diff = totalCountCommit - Interlocked.Read(ref totalCount);
                    if (diff <= newOffset.Count && diff >= 0)
                    {
                        Logger.ErrorFormat(" the difference is reasonable, by design of kafkaNET.Library.   For each partition, if not hit end of log, at least read one record from it!");
                    }
                    else
                    {
                        Logger.ErrorFormat(" the difference is not reasonable ,please check log!");
                    }
                }
                else
                {
                    Logger.InfoFormat("totalCountCommit {0} ==  totalCount  {1}", totalCountCommit, Interlocked.Read(ref totalCount));
                    Console.WriteLine("totalCountCommit {0} ==  totalCount  {1}", totalCountCommit, Interlocked.Read(ref totalCount));
                }
            }
            catch (Exception ex)
            {
                Logger.ErrorFormat("Consumer group consume data. got exception:{0}\r\ninput parameter:Topic:{1}\tZookeeper:{2}\tconsumerGroupName:{3}\tconsumerId:{4}\tthreadCount:{5}\tcount:{6}"
                     , ex.FormatException(),
                        cgOptions.Topic,
                        cgOptions.Zookeeper,
                        cgOptions.ConsumerGroupName,
                        cgOptions.ConsumerId,
                        cgOptions.FetchThreadCountPerConsumer,
                        cgOptions.Count);
            }
        }
Exemplo n.º 2
0
        internal static void DumpMessageAsConsumerGroup(ConsumeGroupHelperOptions cgOptions)
        {
            try
            {
                totalCount = 0;

                ZookeeperConsumerConnector.UseSharedStaticZookeeperClient = cgOptions.UseSharedStaticZookeeperClient;
                DumpMessageAsConsumerGroupSigleThreadBlock(cgOptions);

                Logger.InfoFormat("======TotallyRead:{0}=============", Interlocked.Read(ref totalCount));
                Console.WriteLine("======TotallyRead:{0}=============", Interlocked.Read(ref totalCount));

                Logger.InfoFormat("======New offset");
                Console.WriteLine("======New offset");
                long totalCountCommit = 0;
                foreach (var kv in newOffset.OrderBy(r => r.Key))
                {
                    string d = string.Format("Partition:{0}\t Old Offset:{1,10} --> New Offset:{2,10}  Diff:{3}"
                                             , kv.Key, initialOffset == null || !initialOffset.ContainsKey(kv.Key) ? 0 : initialOffset[kv.Key],
                                             kv.Value,
                                             kv.Value - (initialOffset == null || !initialOffset.ContainsKey(kv.Key) ? 0 : initialOffset[kv.Key]));
                    Logger.Info(d);
                    Console.WriteLine(d);
                    totalCountCommit += kv.Value - (initialOffset == null || !initialOffset.ContainsKey(kv.Key) ? 0 : initialOffset[kv.Key]);
                }
                //TODO: currently each partition maybe missed one message hasn't been commit.  please refer kafka document:https://cwiki.apache.org/confluence/display/KAFKA/Compression
                //Hence, for compressed data, the consumed offset will be advanced one compressed message at a time. This has the side effect of possible duplicates in the event of a consumer failure. For uncompressed data, consumed offset will be advanced one message at a time.
                if (totalCountCommit != Interlocked.Read(ref totalCount))
                {
                    Logger.ErrorFormat("totalCountCommit {0} !=  totalCount  {1}, check next line log see if it's reasonable:", totalCountCommit, Interlocked.Read(ref totalCount));
                    long diff = totalCountCommit - Interlocked.Read(ref totalCount);
                    if (diff <= newOffset.Count && diff >= 0)
                    {
                        Logger.ErrorFormat(" the difference is reasonable, by design of kafkaNET.Library.   For each partition, if not hit end of log, at least read one record from it!");
                    }
                    else
                    {
                        Logger.ErrorFormat(" the difference is not reasonable ,please check log!");
                    }
                }
                else
                {
                    Logger.InfoFormat("totalCountCommit {0} ==  totalCount  {1}", totalCountCommit, Interlocked.Read(ref totalCount));
                    Console.WriteLine("totalCountCommit {0} ==  totalCount  {1}", totalCountCommit, Interlocked.Read(ref totalCount));
                }
            }
            catch (Exception ex)
            {
                Logger.ErrorFormat("Consumer group consume data. got exception:{0}\r\ninput parameter:Topic:{1}\tZookeeper:{2}\tconsumerGroupName:{3}\tconsumerId:{4}\tthreadCount:{5}\tcount:{6}"
                                   , ex.FormatException(),
                                   cgOptions.Topic,
                                   cgOptions.Zookeeper,
                                   cgOptions.ConsumerGroupName,
                                   cgOptions.ConsumerId,
                                   cgOptions.FetchThreadCountPerConsumer,
                                   cgOptions.Count);
            }
        }
Exemplo n.º 3
0
        //#region Option 1, single ThreadBlock
        private static void DumpMessageAsConsumerGroupSigleThreadBlock(ConsumeGroupHelperOptions cgOptions)
        {
            initialOffset = null;
            newOffset     = null;
            AutoResetEvent[] autos = new AutoResetEvent[cgOptions.ZookeeperConnectorCount];

            for (int i = 0; i < cgOptions.ZookeeperConnectorCount; i++)
            {
                AutoResetEvent          resetEvent = new AutoResetEvent(false);
                ConsumerGroupHelperUnit unit       = new ConsumerGroupHelperUnit(DateTime.UtcNow.Second, cgOptions, resetEvent, cgOptions.ZookeeperConnectorConsumeMessageCount[i]);
                Thread t = new Thread(unit.Consume);
                t.Start();
                Logger.InfoFormat("Start thread {0} of ZookeeperConsumerConnector", i);
                autos[i] = resetEvent;
            }
            WaitHandle.WaitAll(autos);
        }
 internal void Parse(string[] args)
 {
     if (args == null || args.Length <= 0)
     {
         throw new ArgumentException("Please provide verb.");
     }
     Verb = args[0].ToLowerInvariant();
     if ("topic" == Verb.ToLowerInvariant())
         ActiveSubOption = new TopicHelperArguments();
     else if ("dumpdata" == Verb.ToLowerInvariant()
          || KafkaNETExampleType.ConsumeSimple.ToString().ToLowerInvariant() == Verb.ToLowerInvariant())
         ActiveSubOption = new ConsumeDataHelperArguments();
     else if ("dumpdataasconsumergroup" == Verb.ToLowerInvariant()
         || KafkaNETExampleType.ConsumeGroup.ToString().ToLowerInvariant() == Verb.ToLowerInvariant())
         ActiveSubOption = new ConsumeGroupHelperOptions();
     else if ("latestoffsetofconsumergroup" == Verb.ToLowerInvariant()
         || "consumegroupm" == Verb.ToLowerInvariant()
         || "consumem" == Verb.ToLowerInvariant()
         || KafkaNETExampleType.ConsumeGroupMonitor.ToString().ToLowerInvariant() == Verb.ToLowerInvariant())
         ActiveSubOption = new ConsumeGroupMonitorHelperOptions();
     else if ("produceroundrobin" == Verb.ToLowerInvariant()
         || KafkaNETExampleType.ProduceSimple.ToString().ToLowerInvariant() == Verb.ToLowerInvariant())
         ActiveSubOption = new ProduceSimpleHelperOption();
     else if ("test" == Verb.ToLowerInvariant())
         ActiveSubOption = new TestHelperOptions();
     else if ("producewrapper" == Verb.ToLowerInvariant()
         || KafkaNETExampleType.ProducePerfTest.ToString().ToLowerInvariant() == Verb.ToLowerInvariant())
         ActiveSubOption = new ProducePerfTestHelperOption();
     else if ("producem" == Verb.ToLowerInvariant()
         || KafkaNETExampleType.ProduceMonitor.ToString().ToLowerInvariant() == Verb.ToLowerInvariant())
         ActiveSubOption = new ProduceMonitorHelperOptions();
     else if (KafkaNETExampleType.EventServerPerfTest.ToString().ToLowerInvariant() == Verb.ToLowerInvariant())
         ActiveSubOption = new JavaEventServerPerfTestHelperOptions();
     else
     {
         throw new ArgumentException(string.Format("The command verb {0} is not recoganized.", Verb));
     }
 }
Exemplo n.º 5
0
 internal ConsumerGroupHelperUnit(int threadID, ConsumeGroupHelperOptions cg, AutoResetEvent e, int c)
 {
     this.resetEvent = e;
     this.cgOptions  = cg;
     this.ThreadID   = threadID;
     this.Count      = c;
     configSettings  = new ConsumerConfiguration
     {
         AutoOffsetReset      = OffsetRequest.SmallestTime,
         AutoCommit           = false,
         GroupId              = cgOptions.ConsumerGroupName,
         ConsumerId           = cgOptions.ConsumerId + "_Thread_" + threadID.ToString(),
         Timeout              = cgOptions.Timeout,
         ZooKeeper            = new ZooKeeperConfiguration(cgOptions.Zookeeper, 30000, 4000, 8000),
         BufferSize           = cgOptions.BufferSize,
         FetchSize            = cgOptions.FetchSize,
         MaxFetchBufferLength = cgOptions.MaxFetchBufferLength// cgOptions.FetchSize * (10~40) / cgOptions.MessageSize,
     };
     if (cgOptions.CancellationTimeoutMs != 5000)
     {
         configSettings.Timeout = -1;
     }
 }
        //#region Option 1, single ThreadBlock
        private static void DumpMessageAsConsumerGroupSigleThreadBlock(ConsumeGroupHelperOptions cgOptions)
        {
            initialOffset = null;
            newOffset = null;
            AutoResetEvent[] autos = new AutoResetEvent[cgOptions.ZookeeperConnectorCount];

            for (int i = 0; i < cgOptions.ZookeeperConnectorCount; i++)
            {
                AutoResetEvent resetEvent = new AutoResetEvent(false);
                ConsumerGroupHelperUnit unit = new ConsumerGroupHelperUnit(i, cgOptions, resetEvent, cgOptions.ZookeeperConnectorConsumeMessageCount[i]);
                Thread t = new Thread(unit.Consume);
                t.Start();
                Logger.InfoFormat("Start thread {0} of ZookeeperConsumerConnector", i);
                autos[i] = resetEvent;
            }
            WaitHandle.WaitAll(autos);
        }
 internal ConsumerGroupHelperUnit(int threadID, ConsumeGroupHelperOptions cg, AutoResetEvent e, int c)
 {
     this.resetEvent = e;
     this.cgOptions = cg;
     this.ThreadID = threadID;
     this.Count = c;
     configSettings = new ConsumerConfiguration
     {
         AutoOffsetReset = OffsetRequest.SmallestTime,
         AutoCommit = false,
         GroupId = cgOptions.ConsumerGroupName,
         ConsumerId = cgOptions.ConsumerId + "_Thread_" + threadID.ToString(),
         Timeout = cgOptions.Timeout,
         ZooKeeper = new ZooKeeperConfiguration(cgOptions.Zookeeper, 30000, 4000, 8000),
         BufferSize = cgOptions.BufferSize,
         FetchSize = cgOptions.FetchSize,
         MaxFetchBufferLength = cgOptions.MaxFetchBufferLength// cgOptions.FetchSize * (10~40) / cgOptions.MessageSize,
     };
     if (cgOptions.CancellationTimeoutMs != KafkaNETExampleConstants.DefaultCancellationTimeoutMs)
         configSettings.Timeout = -1;
 }
Exemplo n.º 8
0
        public static void MainInternal(int taskIndexInStorm, string[] args)
        {
            GenerateAssistFile("producesimple");
            GenerateAssistFile("produceperftest");
            GenerateAssistFile("producemonitor");
            GenerateAssistFile("eventserverperftest");
            GenerateAssistFile("consumesimple");
            GenerateAssistFile("consumegroup");
            GenerateAssistFile("consumegroupmonitor");
            GenerateAssistFile("topic");
            ServicePointManager.DefaultConnectionLimit = 5000;
            ServicePointManager.UseNagleAlgorithm      = false;

            var log4netSection = ConfigurationManager.GetSection("log4net");

            if (log4netSection != null)
            {
                //XmlConfigurator.Configure();
            }

            KafkaNETExampleCommandVerb commandOptions = new KafkaNETExampleCommandVerb();

            try
            {
                commandOptions.Parse(args);
            }
            catch (Exception e)
            {
                Logger.ErrorFormat("{0}", e.FormatException());
                Console.WriteLine(KafkaNETExampleCommandVerb.GetUsage());
                Environment.Exit(-1);
            }

            KafkaNETExampleSubArguments realOption = KafkaNETExampleCommandVerb.ActiveSubOption;

            try
            {
                realOption.Parse(args);
            }
            catch (Exception e)
            {
                Logger.ErrorFormat("{0}", e.FormatException());
                Console.WriteLine(realOption.GetUsage(false));
                Environment.Exit(-1);
            }

            Logger.InfoFormat("All arguments of {0}: \r\n{1}", KafkaNETExampleCommandVerb.AssemblyName, realOption.GetArgDict());

            switch (KafkaNETExampleCommandVerb.Verb)
            {
            case "producesimple":
            case "produceroundrobin":
                ProduceSimpleHelperOption produceroundrobinOptions = (ProduceSimpleHelperOption)realOption;
                ProduceSimpleHelper.Run(produceroundrobinOptions);
                break;

            case "produceperftest":
            case "producewrapper":
                ProducePerfTestHelperOption producewrapperOption = (ProducePerfTestHelperOption)realOption;
                (new ProducePerfTestHelper()).Run(producewrapperOption);
                break;

            case "producem":
            case "producemonitor":
                ProduceMonitorHelperOptions produceMonitorOptions = (ProduceMonitorHelperOptions)realOption;
                ProduceMonitorHelper.Run(produceMonitorOptions);
                break;

            case "eventserverperftest":
                JavaEventServerPerfTestHelperOptions evetServerPerfTestOptions = (JavaEventServerPerfTestHelperOptions)realOption;
                (new JavaEventServerPerfTestHelper()).Run(evetServerPerfTestOptions);
                break;

            case "consumesimple":
            case "dumpdata":
                ConsumeDataHelperArguments dumpdataOptions = (ConsumeDataHelperArguments)realOption;
                ConsumeSimpleHelper.ConsumeDataSimple(dumpdataOptions);
                break;

            case "consumegroup":
            case "dumpdataasconsumergroup":
                ConsumeGroupHelperOptions cgOptions = (ConsumeGroupHelperOptions)realOption;
                if (taskIndexInStorm >= 0)
                {
                    cgOptions.ConsumerId = cgOptions.ConsumerId + taskIndexInStorm.ToString();
                    cgOptions.File       = cgOptions.ConsumerId + taskIndexInStorm.ToString() + cgOptions.File;
                }

                ConsumerGroupHelper.DumpMessageAsConsumerGroup(cgOptions);
                break;

            case "latestoffsetofconsumergroup":
            case "consumegroupmonitor":
            case "consumegroupm":
            case "consumem":
                ConsumeGroupMonitorHelperOptions dcgOptions = (ConsumeGroupMonitorHelperOptions)realOption;
                ConsumeGroupMonitorHelper.DumpConsumerGroupOffsets(dcgOptions);
                break;

            case "topic":
                TopicHelperArguments dtOptions = (TopicHelperArguments)realOption;
                TopicHelper.DumpTopicMetadataAndOffset(dtOptions);
                break;

            case "test":
                var testOptions = (TestHelperOptions)realOption;
                TestHelper.Run(testOptions);
                break;

            default:
                Logger.Error(string.Format("Invalid verb={0}", KafkaNETExampleCommandVerb.Verb));
                return;
            }
        }