Пример #1
0
        /// <inheritdoc />
        protected override void OnEventCommand(EventCommandEventArgs command)
        {
            if (command.Command == EventCommand.Enable)
            {
                // Comment taken from RuntimeEventSource in CoreCLR
                // NOTE: These counters will NOT be disposed on disable command because we may be introducing
                // a race condition by doing that. We still want to create these lazily so that we aren't adding
                // overhead by at all times even when counters aren't enabled.
                // On disable, PollingCounters will stop polling for values so it should be fine to leave them around.

                _activeDbContextsCounter ??= new PollingCounter("active-db-contexts", this, () => Interlocked.Read(ref _activeDbContexts))
                {
                    DisplayName = "Active DbContexts"
                };

                _totalQueriesCounter ??= new PollingCounter("total-queries", this, () => Interlocked.Read(ref _totalQueries))
                {
                    DisplayName = "Queries (Total)"
                };

                _queriesPerSecondCounter ??= new IncrementingPollingCounter(
                    "queries-per-second",
                    this,
                    () => Interlocked.Read(ref _totalQueries))
                {
                    DisplayName = "Queries", DisplayRateTimeScale = TimeSpan.FromSeconds(1)
                };

                _totalSaveChangesCounter ??= new PollingCounter("total-save-changes", this, () => Interlocked.Read(ref _totalSaveChanges))
                {
                    DisplayName = "SaveChanges (Total)"
                };

                _saveChangesPerSecondCounter ??= new IncrementingPollingCounter(
                    "save-changes-per-second",
                    this,
                    () => Interlocked.Read(ref _totalSaveChanges))
                {
                    DisplayName = "SaveChanges", DisplayRateTimeScale = TimeSpan.FromSeconds(1)
                };

                _compiledQueryCacheHitRateCounter ??= new PollingCounter(
                    "compiled-query-cache-hit-rate",
                    this,
                    () => _compiledQueryCacheInfo.CalculateAndReset())
                {
                    DisplayName = "Query Cache Hit Rate", DisplayUnits = "%"
                };

                _totalExecutionStrategyOperationFailuresCounter ??= new PollingCounter(
                    "total-execution-strategy-operation-failures",
                    this,
                    () => Interlocked.Read(ref _totalExecutionStrategyOperationFailures))
                {
                    DisplayName = "Execution Strategy Operation Failures (Total)"
                };

                _executionStrategyOperationFailuresPerSecondCounter ??= new IncrementingPollingCounter(
                    "execution-strategy-operation-failures-per-second",
                    this,
                    () => Interlocked.Read(ref _totalExecutionStrategyOperationFailures))
                {
                    DisplayName = "Execution Strategy Operation Failures", DisplayRateTimeScale = TimeSpan.FromSeconds(1)
                };

                _totalOptimisticConcurrencyFailuresCounter ??= new PollingCounter(
                    "total-optimistic-concurrency-failures",
                    this,
                    () => Interlocked.Read(ref _totalOptimisticConcurrencyFailures))
                {
                    DisplayName = "Optimistic Concurrency Failures (Total)"
                };

                _optimisticConcurrencyFailuresPerSecondCounter ??= new IncrementingPollingCounter(
                    "optimistic-concurrency-failures-per-second",
                    this,
                    () => Interlocked.Read(ref _totalOptimisticConcurrencyFailures))
                {
                    DisplayName = "Optimistic Concurrency Failures", DisplayRateTimeScale = TimeSpan.FromSeconds(1)
                };
            }
        }
Пример #2
0
 public bool areChangeNotificationsEnabled()
 {
     return(Interlocked.Read(ref changeNotificationsSuppressed) == 0);
 }
        internal void OnHeartbeat(bool ifConnectedOnly)
        {
            bool runThisTime = false;

            try
            {
                runThisTime = !isDisposed && Interlocked.CompareExchange(ref beating, 1, 0) == 0;
                if (!runThisTime)
                {
                    return;
                }

                uint index          = (uint)Interlocked.Increment(ref profileLogIndex);
                long newSampleCount = Interlocked.Read(ref operationCount);
                Interlocked.Exchange(ref profileLog[index % ProfileLogSamples], newSampleCount);
                Interlocked.Exchange(ref profileLastLog, newSampleCount);
                Trace("OnHeartbeat: " + (State)state);
                switch (state)
                {
                case (int)State.Connecting:
                    int  connectTimeMilliseconds = unchecked (Environment.TickCount - Thread.VolatileRead(ref connectStartTicks));
                    bool shouldRetry             = Multiplexer.RawConfig.ReconnectRetryPolicy.ShouldRetry(Interlocked.Read(ref connectTimeoutRetryCount), connectTimeMilliseconds);
                    if (shouldRetry)
                    {
                        Interlocked.Increment(ref connectTimeoutRetryCount);
                        LastException = ExceptionFactory.UnableToConnect(Multiplexer, "ConnectTimeout");
                        Trace("Aborting connect");
                        // abort and reconnect
                        var snapshot = physical;
                        OnDisconnected(ConnectionFailureType.UnableToConnect, snapshot, out bool isCurrent, out State oldState);
                        using (snapshot) { }     // dispose etc
                        TryConnect(null);
                    }
                    break;

                case (int)State.ConnectedEstablishing:
                case (int)State.ConnectedEstablished:
                    var tmp = physical;
                    if (tmp != null)
                    {
                        if (state == (int)State.ConnectedEstablished)
                        {
                            Interlocked.Exchange(ref connectTimeoutRetryCount, 0);
                            tmp.BridgeCouldBeNull?.ServerEndPoint?.ClearUnselectable(UnselectableFlags.DidNotRespond);
                        }
                        tmp.OnBridgeHeartbeat();
                        int writeEverySeconds  = ServerEndPoint.WriteEverySeconds,
                            checkConfigSeconds = Multiplexer.RawConfig.ConfigCheckSeconds;

                        if (state == (int)State.ConnectedEstablished && ConnectionType == ConnectionType.Interactive &&
                            checkConfigSeconds > 0 && ServerEndPoint.LastInfoReplicationCheckSecondsAgo >= checkConfigSeconds &&
                            ServerEndPoint.CheckInfoReplication())
                        {
                            // that serves as a keep-alive, if it is accepted
                        }
                        else if (writeEverySeconds > 0 && tmp.LastWriteSecondsAgo >= writeEverySeconds)
                        {
                            Trace("OnHeartbeat - overdue");
                            if (state == (int)State.ConnectedEstablished)
                            {
                                KeepAlive();
                            }
                            else
                            {
                                OnDisconnected(ConnectionFailureType.SocketFailure, tmp, out bool ignore, out State oldState);
                            }
                        }
                        else if (writeEverySeconds <= 0 && tmp.IsIdle() &&
                                 tmp.LastWriteSecondsAgo > 2 &&
                                 tmp.GetSentAwaitingResponseCount() != 0)
                        {
                            // there's a chance this is a dead socket; sending data will shake that
                            // up a bit, so if we have an empty unsent queue and a non-empty sent
                            // queue, test the socket
                            KeepAlive();
                        }
                    }
                    break;

                case (int)State.Disconnected:
                    Interlocked.Exchange(ref connectTimeoutRetryCount, 0);
                    if (!ifConnectedOnly)
                    {
                        Multiplexer.Trace("Resurrecting " + ToString());
                        Multiplexer.OnResurrecting(ServerEndPoint?.EndPoint, ConnectionType);
                        GetConnection(null);
                    }
                    break;

                default:
                    Interlocked.Exchange(ref connectTimeoutRetryCount, 0);
                    break;
                }
            }
            catch (Exception ex)
            {
                OnInternalError(ex);
                Trace("OnHeartbeat error: " + ex.Message);
            }
            finally
            {
                if (runThisTime)
                {
                    Interlocked.Exchange(ref beating, 0);
                }
            }
        }
Пример #4
0
        protected override void OnEventCommand(EventCommandEventArgs command)
        {
            if (command.Command == EventCommand.Enable)
            {
                _tlsHandshakeRateCounter ??= new IncrementingPollingCounter("tls-handshake-rate", this, () => Interlocked.Read(ref _finishedTlsHandshakes))
                {
                    DisplayName          = "TLS handshakes completed",
                    DisplayRateTimeScale = TimeSpan.FromSeconds(1)
                };

                _totalTlsHandshakesCounter ??= new PollingCounter("total-tls-handshakes", this, () => Interlocked.Read(ref _finishedTlsHandshakes))
                {
                    DisplayName = "Total TLS handshakes completed"
                };

                _currentTlsHandshakesCounter ??= new PollingCounter("current-tls-handshakes", this, () => - Interlocked.Read(ref _finishedTlsHandshakes) + Interlocked.Read(ref _startedTlsHandshakes))
                {
                    DisplayName = "Current TLS handshakes"
                };

                _failedTlsHandshakesCounter ??= new PollingCounter("failed-tls-handshakes", this, () => Interlocked.Read(ref _failedTlsHandshakes))
                {
                    DisplayName = "Total TLS handshakes failed"
                };

                _sessionsOpenCounter ??= new PollingCounter("all-tls-sessions-open", this, () => Interlocked.Read(ref _sessionsOpen))
                {
                    DisplayName = "All TLS Sessions Active"
                };

                _sessionsOpenTls10Counter ??= new PollingCounter("tls10-sessions-open", this, () => Interlocked.Read(ref _sessionsOpenTls10))
                {
                    DisplayName = "TLS 1.0 Sessions Active"
                };

                _sessionsOpenTls11Counter ??= new PollingCounter("tls11-sessions-open", this, () => Interlocked.Read(ref _sessionsOpenTls11))
                {
                    DisplayName = "TLS 1.1 Sessions Active"
                };

                _sessionsOpenTls12Counter ??= new PollingCounter("tls12-sessions-open", this, () => Interlocked.Read(ref _sessionsOpenTls12))
                {
                    DisplayName = "TLS 1.2 Sessions Active"
                };

                _sessionsOpenTls13Counter ??= new PollingCounter("tls13-sessions-open", this, () => Interlocked.Read(ref _sessionsOpenTls13))
                {
                    DisplayName = "TLS 1.3 Sessions Active"
                };

                _handshakeDurationCounter ??= new EventCounter("all-tls-handshake-duration", this)
                {
                    DisplayName  = "TLS Handshake Duration",
                    DisplayUnits = "ms"
                };

                _handshakeDurationTls10Counter ??= new EventCounter("tls10-handshake-duration", this)
                {
                    DisplayName  = "TLS 1.0 Handshake Duration",
                    DisplayUnits = "ms"
                };

                _handshakeDurationTls11Counter ??= new EventCounter("tls11-handshake-duration", this)
                {
                    DisplayName  = "TLS 1.1 Handshake Duration",
                    DisplayUnits = "ms"
                };

                _handshakeDurationTls12Counter ??= new EventCounter("tls12-handshake-duration", this)
                {
                    DisplayName  = "TLS 1.2 Handshake Duration",
                    DisplayUnits = "ms"
                };

                _handshakeDurationTls13Counter ??= new EventCounter("tls13-handshake-duration", this)
                {
                    DisplayName  = "TLS 1.3 Handshake Duration",
                    DisplayUnits = "ms"
                };
            }
        }
Пример #5
0
 private T ReturnIfStopped <T>(Func <T> getter)
 => Interlocked.Read(ref status) == (long)Status.Stopped
 /// <summary>Gets a value indicating whether this instance is expired.</summary>
 /// <value>
 /// <c>true</c> if this instance is expired; otherwise, <c>false</c>.
 /// </value>
 public bool IsExpired()
 {
     return(DateTime.UtcNow.Ticks > Interlocked.Read(ref lastAccessTicks) + TimeSpan.TicksPerMinute);
 }
Пример #7
0
        public void Process(List <IJob> jobs)
        {
            if (jobs == null || jobs.Count == 0)
            {
                return;
            }

            // we originally used TPL, but we encountered unexpected behavior from MaxDegreeOfParallelism
            // With MaxDegreeOfParallelism, it seemed like if it was set to 10 and each job used 50% CPU,
            // it would launch around 20 threads. ThreadPool was better, but also tried to be smart about
            // launching threads. Using standard threads and a semaphore yielded the desired behavior.

            // run our jobs
            var jobPool       = new Semaphore(MaxThreads, MaxThreads);
            var doneEvent     = new ManualResetEvent(false);
            var failures      = 0L;
            var jobsRemaining = jobs.Count;
            var exceptions    = new List <Exception>();
            var threads       = ErrorHandlingMode == JobErrorHandlingMode.None ? null : new List <Thread>(jobs.Count);

            for (var jobIndex = 0; jobIndex < jobs.Count; ++jobIndex)
            {
                Thread.Sleep(10);
                jobPool.WaitOne();

                if (doneEvent.WaitOne(0)) // got the signal to quit
                {
                    Release(jobPool);
                    break;
                }

                var job       = jobs[jobIndex];
                var jobThread = new Thread(o => ExecuteJob(job, jobPool, doneEvent, ref failures, ref jobsRemaining, exceptions));
                if (!string.IsNullOrEmpty(job.Name))
                {
                    jobThread.Name = job.Name;
                }
                jobThread.Start();
                if (threads != null)
                {
                    threads.Add(jobThread);
                }
            }

            doneEvent.WaitOne();
            if (threads != null && Interlocked.Read(ref failures) > 0)
            {
                // copy the exceptions in case we are terminating jobs to prevent ThreadAbortExceptions ending up in there
                if (ErrorHandlingMode == JobErrorHandlingMode.Terminate)
                {
                    exceptions = new List <Exception>(exceptions);
                }

                foreach (var thread in threads)
                {
                    if (thread.IsAlive)
                    {
                        try
                        {
                            if (ErrorHandlingMode == JobErrorHandlingMode.Terminate)
                            {
                                thread.Abort();
                            }
                            thread.Join();
                        }
                        catch
                        {
                            // eating errors on purpose
                        }
                    }
                }
            }
            if (exceptions.Any())
            {
                throw exceptions[0];
            }
        }
 public override ulong GetPlayedSampleCount()
 {
     return(Interlocked.Read(ref _playedSampleCount));
 }
Пример #9
0
        public async Task SubscriptionSimpleTakeOverStrategy()
        {
            var timeout = Debugger.IsAttached ? TimeSpan.FromMinutes(5) : TimeSpan.FromSeconds(5);

            using (var store = GetDocumentStore())
            {
                await CreateDocuments(store, 1);

                var lastChangeVector = (await store.Maintenance.SendAsync(new GetStatisticsOperation())).DatabaseChangeVector;
                await CreateDocuments(store, 5);

                var subscriptionCreationParams = new SubscriptionCreationOptions
                {
                    Query        = "from Things",
                    ChangeVector = lastChangeVector
                };
                var subsId = await store.Subscriptions.CreateAsync(subscriptionCreationParams);

                Task firstSubscription;
                using (var acceptedSubscription = store.Subscriptions.GetSubscriptionWorker <Thing>(new SubscriptionWorkerOptions(subsId)))
                {
                    var  acceptedSubscriptionList = new BlockingCollection <Thing>();
                    long counter = 0;
                    var  batchProcessedByFirstSubscription = new AsyncManualResetEvent();

                    acceptedSubscription.AfterAcknowledgment += b =>
                    {
                        if (Interlocked.Read(ref counter) == 5)
                        {
                            batchProcessedByFirstSubscription.Set();
                        }
                        return(Task.CompletedTask);
                    };

                    firstSubscription = acceptedSubscription.Run(x =>
                    {
                        foreach (var item in x.Items)
                        {
                            Interlocked.Increment(ref counter);
                            acceptedSubscriptionList.Add(item.Result);
                        }
                    });

                    // wait until we know that connection was established
                    for (var i = 0; i < 5; i++)
                    {
                        Assert.True(acceptedSubscriptionList.TryTake(out _, timeout), "no doc");
                    }
                    Assert.True(await batchProcessedByFirstSubscription.WaitAsync(TimeSpan.FromSeconds(15)), "no ack");
                    Assert.False(acceptedSubscriptionList.TryTake(out _));
                }

                // open second subscription
                using (var takingOverSubscription = store.Subscriptions.GetSubscriptionWorker <Thing>(new SubscriptionWorkerOptions(subsId)
                {
                    Strategy = SubscriptionOpeningStrategy.TakeOver
                }))
                {
                    var takingOverSubscriptionList = new BlockingCollection <Thing>();

                    GC.KeepAlive(takingOverSubscription.Run(x =>
                    {
                        foreach (var item in x.Items)
                        {
                            takingOverSubscriptionList.Add(item.Result);
                        }
                    }));

                    // Wait for the first subscription to finish before creating the documents.
                    await firstSubscription;
                    await CreateDocuments(store, 5);

                    // wait until we know that connection was established
                    for (var i = 0; i < 5; i++)
                    {
                        Assert.True(takingOverSubscriptionList.TryTake(out _, timeout), "no doc takeover");
                    }
                    Assert.False(takingOverSubscriptionList.TryTake(out _));
                }
            }
        }
Пример #10
0
        internal static void DumpMessageAsConsumerGroup(ConsumeGroupHelperOptions cgOptions)
        {
            try
            {
                totalCount = 0;

                ZookeeperConsumerConnector.UseSharedStaticZookeeperClient = cgOptions.UseSharedStaticZookeeperClient;
                DumpMessageAsConsumerGroupSigleThreadBlock(cgOptions);

                Logger.InfoFormat("======TotallyRead:{0}=============", Interlocked.Read(ref totalCount));
                Console.WriteLine("======TotallyRead:{0}=============", Interlocked.Read(ref totalCount));

                Logger.InfoFormat("======New offset");
                Console.WriteLine("======New offset");
                long totalCountCommit = 0;
                foreach (var kv in newOffset.OrderBy(r => r.Key))
                {
                    string d = string.Format("Partition:{0}\t Old Offset:{1,10} --> New Offset:{2,10}  Diff:{3}"
                                             , kv.Key, initialOffset == null || !initialOffset.ContainsKey(kv.Key) ? 0 : initialOffset[kv.Key],
                                             kv.Value,
                                             kv.Value - (initialOffset == null || !initialOffset.ContainsKey(kv.Key) ? 0 : initialOffset[kv.Key]));
                    Logger.Info(d);
                    Console.WriteLine(d);
                    totalCountCommit += kv.Value - (initialOffset == null || !initialOffset.ContainsKey(kv.Key) ? 0 : initialOffset[kv.Key]);
                }
                //TODO: currently each partition maybe missed one message hasn't been commit.  please refer kafka document:https://cwiki.apache.org/confluence/display/KAFKA/Compression
                //Hence, for compressed data, the consumed offset will be advanced one compressed message at a time. This has the side effect of possible duplicates in the event of a consumer failure. For uncompressed data, consumed offset will be advanced one message at a time.
                if (totalCountCommit != Interlocked.Read(ref totalCount))
                {
                    Logger.ErrorFormat("totalCountCommit {0} !=  totalCount  {1}, check next line log see if it's reasonable:", totalCountCommit, Interlocked.Read(ref totalCount));
                    long diff = totalCountCommit - Interlocked.Read(ref totalCount);
                    if (diff <= newOffset.Count && diff >= 0)
                    {
                        Logger.ErrorFormat(" the difference is reasonable, by design of kafkaNET.Library.   For each partition, if not hit end of log, at least read one record from it!");
                    }
                    else
                    {
                        Logger.ErrorFormat(" the difference is not reasonable ,please check log!");
                    }
                }
                else
                {
                    Logger.InfoFormat("totalCountCommit {0} ==  totalCount  {1}", totalCountCommit, Interlocked.Read(ref totalCount));
                    Console.WriteLine("totalCountCommit {0} ==  totalCount  {1}", totalCountCommit, Interlocked.Read(ref totalCount));
                }
            }
            catch (Exception ex)
            {
                Logger.ErrorFormat("Consumer group consume data. got exception:{0}\r\ninput parameter:Topic:{1}\tZookeeper:{2}\tconsumerGroupName:{3}\tconsumerId:{4}\tthreadCount:{5}\tcount:{6}"
                                   , ex.FormatException(),
                                   cgOptions.Topic,
                                   cgOptions.Zookeeper,
                                   cgOptions.ConsumerGroupName,
                                   cgOptions.ConsumerId,
                                   cgOptions.FetchThreadCountPerConsumer,
                                   cgOptions.Count);
            }
        }
Пример #11
0
        protected override void OnStop()
        {
            try
            {
                StaticInfo.CanContinue = false;

                while (Interlocked.Read(ref StaticInfo.ThreadCount) > 0)
                {
                    Thread.Sleep(2000);
                    Logger.Log.InfoFormat("There are {0} threads which are active... Waiting for those threads to abort ", Interlocked.Read(ref StaticInfo.ThreadCount));
                }

                Logger.Log.Info("Data Download Service Stopped Successfully");
            }
            catch (Exception ex)
            {
                Logger.Log.Error("Failed to stop the transportation recovery service", ex);
            }
        }
Пример #12
0
        internal void Consume()
        {
            // connects to zookeeper
            using (ZookeeperConsumerConnector connector = new ZookeeperConsumerConnector(configSettings, true))
            {
                if (this.ThreadID == 0)
                {
                    ConsumerGroupHelper.initialOffset = connector.GetOffset(cgOptions.Topic);

                    Logger.InfoFormat("======Original offset \r\n{0}", ConsumerGroupHelper.initialOffset == null ? "(NULL)" : ConsumeGroupMonitorHelper.GetComsumerGroupOffsetsAsLog(ConsumerGroupHelper.initialOffset));
                }

                // defines collection of topics and number of threads to consume it with
                // ===============NOTE============================
                // For example , if there is 80 partitions for one topic.
                //
                // Normally start more than 96 = 80*120% clients with same GroupId.  ( the extra 20% are buffer for autopilot IMP).  And set  FetchThreadCountPerConsumer as 1.
                // Then 80 clients can lock partitions,  can set MACHINENAME_ProcessID as ConsumerId, other 16 are idle.  Strongly recomand take this method.
                //
                // If start 40 clients,  and  set  FetchThreadCountPerConsumer as 1. then every client can lock 2 partitions at least.   And if some client not available for autopilot
                // IMP reason, then some of the client maybe lock 3 partitions.
                //
                //  If start 40 clients, and set  FetchThreadCountPerConsumer as 2,  you will get two IEnumerator<Message>:topicData[0].GetEnumerator(),topicData[1].GetEnumerator()
                //  you need start TWO threads to process them in dependently.
                //  If the client get 2 partitions, each thread will handle 1 partition,
                //  If the client get 3 partitions, then one thread get 2 partitions, the other one get 1 partition.  It will make the situaiton complex and the consume of partition not balance.
                //==================NOTE=============================
                IDictionary <string, int> topicMap = new Dictionary <string, int> {
                    { cgOptions.Topic, cgOptions.FetchThreadCountPerConsumer }
                };

                // get references to topic streams.
                IDictionary <string, IList <KafkaMessageStream <Message> > > streams = connector.CreateMessageStreams(topicMap, new DefaultDecoder());
                IList <KafkaMessageStream <Message> > topicData = streams[cgOptions.Topic];
                long latestTotalCount = 0;

                bool hitEndAndCommited = false;
                if (cgOptions.CancellationTimeoutMs == KafkaNETExampleConstants.DefaultCancellationTimeoutMs)
                {
                    // Get the message enumerator.
                    IEnumerator <Message> messageEnumerator = topicData[0].GetEnumerator();
                    //TODO:  the enumerator count equal with FetchThreadCountPerConsumer . For example,  if that value is 5, then here should get 5 enumerator.
                    //IF have 100 partitions, and only 20 consumers, need set this value to 5.  and start 5 thread handle each one.

                    // Add tuples until maximum receive message count is reached or no new messages read after consumer configured timeout.
                    while (true)
                    {
                        bool noMoreMessage = false;
                        try
                        {
                            messageEnumerator.MoveNext();
                            Message m = messageEnumerator.Current;
                            latestTotalCount = Interlocked.Increment(ref ConsumerGroupHelper.totalCount);
                            Logger.InfoFormat("Message {0} from Partition:{1}, Offset:{2}, key:{3}, value:{4}", latestTotalCount, m.PartitionId, m.Offset, m.Key == null ? "(null)" : Encoding.UTF8.GetString(m.Key), m.Payload == null ? "(null)" : Encoding.UTF8.GetString(m.Payload));
                            if (latestTotalCount == 1)
                            {
                                Logger.InfoFormat("Read FIRST message, it's offset: {0}  PartitionID:{1}", m.Offset, ((ConsumerIterator <Message>)messageEnumerator).currentTopicInfo.PartitionId);
                            }

                            hitEndAndCommited = false;
                            if (latestTotalCount % cgOptions.CommitBatchSize == 0)
                            {
                                //NOTE======
                                //Normally, just directly call .CommitOffsets()
                                //    CommitOffset(string topic, int partition, long offset)  only used when customer has strong requirement for reprocess messages as few as possible.
                                //Need tune the frequecy of calling  .CommitOffsets(), it will directly increate zookeeper load and impact your overall performance
                                if (cgOptions.CommitOffsetWithPartitionIDOffset)
                                {
                                    connector.CommitOffset(cgOptions.Topic, m.PartitionId.Value, m.Offset);
                                }
                                else
                                {
                                    connector.CommitOffsets();
                                }
                                Console.WriteLine("\tRead some and commit once,  LATEST message offset: {0}. PartitionID:{1} -- {2}  Totally read  {3}  will commit offset. {4} FetchOffset:{5}  ConsumeOffset:{6} CommitedOffset:{7}"
                                                  , m.Offset, m.PartitionId.Value, ((ConsumerIterator <Message>)messageEnumerator).currentTopicInfo.PartitionId, latestTotalCount, DateTime.Now
                                                  , ((ConsumerIterator <Message>)messageEnumerator).currentTopicInfo.FetchOffset
                                                  , ((ConsumerIterator <Message>)messageEnumerator).currentTopicInfo.ConsumeOffset
                                                  , ((ConsumerIterator <Message>)messageEnumerator).currentTopicInfo.CommitedOffset);
                            }

                            if (cgOptions.Count > 0 && latestTotalCount >= cgOptions.Count)
                            {
                                Logger.InfoFormat("Read LAST message, it's offset: {0}. PartitionID:{1}   Totally read {2}  want {3} will exit.", m.Offset, ((ConsumerIterator <Message>)messageEnumerator).currentTopicInfo.PartitionId, latestTotalCount, cgOptions.Count);
                                break;
                            }
                        }
                        catch (ConsumerTimeoutException)
                        {
                            if (!hitEndAndCommited)
                            {
                                Logger.InfoFormat("Totally Read {0}  will commit offset. {1}", latestTotalCount, DateTime.Now);
                                connector.CommitOffsets();
                                hitEndAndCommited = true;
                            }
                            // Thrown if no new messages read after consumer configured timeout.
                            noMoreMessage = true;
                        }

                        if (noMoreMessage)
                        {
                            Logger.InfoFormat("No more message , hit end ,will Sleep(1), {0}", DateTime.Now);
                            if (cgOptions.SleepTypeWhileAlwaysRead == 0)
                            {
                                Thread.Sleep(0);
                            }
                            else if (cgOptions.SleepTypeWhileAlwaysRead == 1)
                            {
                                Thread.Sleep(1);        //Best choice is Thread.Sleep(1).  Other 3 choice still make the CPU 100%
                            }
                            else if (cgOptions.SleepTypeWhileAlwaysRead == 2)
                            {
                                Thread.Yield();
                            }
                            else
                            {
                            }
                        }
                    }
                }
                else
                {
                    //Siphon scenario, repeatly take some messages and process. if no enough messages, will stop current batch after timeout.
                    while (true)
                    {
#if NET45
                        bool    noMoreMessage = false;
                        Message lastMessage   = null;
                        int     count         = 0;
                        KafkaMessageStream <Message> messagesStream = null;
                        using (CancellationTokenSource cancellationTokenSource = new CancellationTokenSource(cgOptions.CancellationTimeoutMs))
                        {
                            lastMessage = null;
                            IEnumerable <Message> messages = topicData[0].GetCancellable(cancellationTokenSource.Token);
                            messagesStream = (KafkaMessageStream <Message>)messages;
                            foreach (Message message in messages)
                            {
                                latestTotalCount = Interlocked.Increment(ref ConsumerGroupHelper.totalCount);
                                lastMessage      = message;
                                if (latestTotalCount == 1)
                                {
                                    PartitionTopicInfo p = messagesStream.iterator.currentTopicInfo;
                                    Logger.InfoFormat("Read FIRST message, it's offset: {0}  PartitionID:{1}", lastMessage.Offset, p == null ? "null" : p.PartitionId.ToString());
                                }
                                hitEndAndCommited = false;
                                if (++count >= cgOptions.CommitBatchSize)
                                {
                                    cancellationTokenSource.Cancel();
                                }
                            }
                        }
                        if (count > 0)
                        {
                            connector.CommitOffsets();
                            consumedTotalCount += count;
                            PartitionTopicInfo p = messagesStream.iterator.currentTopicInfo;
                            Console.WriteLine("\tRead some and commit once, Thread: {8}  consumedTotalCount:{9} Target:{10} LATEST message offset: {0}. PartitionID:{1} -- {2}  Totally read  {3}  will commit offset. {4} FetchOffset:{5}  ConsumeOffset:{6} CommitedOffset:{7}"
                                              , lastMessage.Offset, lastMessage.PartitionId.Value, p == null ? "null" : p.PartitionId.ToString(), latestTotalCount, DateTime.Now
                                              , p == null ? "null" : p.FetchOffset.ToString()
                                              , p == null ? "null" : p.ConsumeOffset.ToString()
                                              , p == null ? "null" : p.CommitedOffset.ToString()
                                              , this.ThreadID
                                              , this.consumedTotalCount
                                              , this.Count);
                        }
                        else
                        {
                            noMoreMessage = true;
                        }

                        if (this.Count > 0 && consumedTotalCount >= this.Count)
                        {
                            Logger.InfoFormat("Current thrad Read LAST message, Totally read {0}  want {1} will exit current thread.", consumedTotalCount, this.Count);
                            break;
                        }

                        if (noMoreMessage)
                        {
                            Logger.InfoFormat("No more message , hit end ,will Sleep(2000), {0}", DateTime.Now);
                            if (cgOptions.SleepTypeWhileAlwaysRead == 0)
                            {
                                Thread.Sleep(0);
                            }
                            else if (cgOptions.SleepTypeWhileAlwaysRead == 1)
                            {
                                Thread.Sleep(2000);        //Best choice is Thread.Sleep(1).  Other 3 choice still make the CPU 100%
                            }
                            else if (cgOptions.SleepTypeWhileAlwaysRead == 2)
                            {
                                Thread.Yield();
                            }
                            else
                            {
                            }
                        }
#endif
#if NET4
                        throw new NotSupportedException("Please use .net45 to compile .");
#endif
                    }
                }

                Logger.InfoFormat("Read {0}  will commit offset. {1}", latestTotalCount, DateTime.Now);
                connector.CommitOffsets();

                latestTotalCount = Interlocked.Read(ref ConsumerGroupHelper.totalCount);

                Logger.InfoFormat("Totally read {0}  want {1} . ", latestTotalCount, cgOptions.Count);
                if (this.ThreadID == 0)
                {
                    ConsumerGroupHelper.newOffset = connector.GetOffset(cgOptions.Topic);
                }
            }

            this.resetEvent.Set();
        }
 public long AtomicRead(ref long location)
 {
     return(IntPtr.Size == 4 ? Interlocked.Read(ref location) : location);
 }
Пример #14
0
 public static bool IsOn() => Interlocked.Read(ref On) == 1;
Пример #15
0
 public long Get() => Interlocked.Read(ref _value);
Пример #16
0
 /// <summary>
 /// Returns true if the resource currently locked
 /// </summary>
 /// <returns>true if it is</returns>
 public bool OnHold()
 {
     return(Interlocked.Read(ref _lockCount) != 0);
 }
Пример #17
0
        static void Main(string[] args)
        {
            Options options = null;
            RuntimeEventListener listener = null;

            Parser.Default.ParseArguments <Options>(args)
            .WithParsed(_options =>
            {
                options = _options;
            });

            if (options == null)
            {
                return;
            }

            if (options.Verbose)
            {
                listener = new RuntimeEventListener();
            }

            SimsClient[] clients   = new SimsClient[options.Clients];
            Task[]       echoTasks = new Task[options.Clients];

            Random r = new Random();

            byte[] payload = new byte[options.Payload];
            r.NextBytes(payload);

            IPAddress address  = IPAddress.Parse(options.Address);
            EndPoint  endpoint = new IPEndPoint(address, options.Port);

            for (int i = 0; i < options.Clients; i++)
            {
                clients[i] = new SimsClient(endpoint, options.Rounds, payload);
            }

            Stopwatch stopwatch = new Stopwatch();

            stopwatch.Start();


            if (listener != null)
            {
                listener.ThreadPoolWorkerThreadWait += () =>
                {
                    Console.WriteLine("==============> {0} {1} {2} {3} {4} {5} {6} {7}",
                                      Interlocked.Read(ref RuntimeEventListener.EnqueueCnt),
                                      Interlocked.Read(ref RuntimeEventListener.DequeueCnt),
                                      Interlocked.Read(ref SimsClient.ConnectBeginCnt),
                                      Interlocked.Read(ref SimsClient.ConnectFinishCnt),
                                      Interlocked.Read(ref SimsClient.WriteBeginCnt),
                                      Interlocked.Read(ref SimsClient.WriteBeginCnt),
                                      Interlocked.Read(ref SimsClient.ReadFinishCnt),
                                      stopwatch.ElapsedMilliseconds
                                      );
                };
            }


            for (int i = 0; i < options.Clients; i++)
            {
                echoTasks[i] = clients[i].Start();
            }
            Task.WaitAll(echoTasks);
            stopwatch.Stop();


            int errorNum = 0;

            foreach (SimsClient cli in clients)
            {
                if (cli.Error != null)
                {
                    errorNum++;
                }
            }

            Console.WriteLine($"{options.Clients} clients, payload {options.Payload} bytes, {options.Rounds} rounds");
            Console.WriteLine("Total Time Elapsed: {0} Milliseconds", stopwatch.Elapsed.TotalMilliseconds);
            Console.WriteLine("{0} error of {1}", errorNum, options.Clients);

            double[] connect = clients.Where(cli => cli.Error == null).Select(cli => cli.ConnectDuration.TotalMilliseconds).ToArray();

            double[] echo = clients.Where(cli => cli.Error == null).Select(cli => cli.EchoDuration.TotalMilliseconds).ToArray();

            double[] total = clients.Where(cli => cli.Error == null).Select(cli => cli.ConnectDuration.TotalMilliseconds + cli.EchoDuration.TotalMilliseconds).ToArray();


            Console.WriteLine("connect\tp90:{0:N2}ms\tp95:{1:N2}ms\tp99:{2:N2}ms\tp99.9:{3:N2}ms",
                              Percentile(connect, 0.9),
                              Percentile(connect, 0.95),
                              Percentile(connect, 0.99),
                              Percentile(connect, 0.999)
                              );

            Console.WriteLine("echo\tp90:{0:N2}ms\tp95:{1:N2}ms\tp99:{2:N2}ms\tp99.9:{3:N2}ms",
                              Percentile(echo, 0.9),
                              Percentile(echo, 0.95),
                              Percentile(echo, 0.99),
                              Percentile(echo, 0.999)
                              );

            Console.WriteLine("total\tp90:{0:N2}ms\tp95:{1:N2}ms\tp99:{2:N2}ms\tp99.9:{3:N2}ms",
                              Percentile(total, 0.9),
                              Percentile(total, 0.95),
                              Percentile(total, 0.99),
                              Percentile(total, 0.999)
                              );

            Console.WriteLine("==============> {0} {1} {2} {3} {4} {5} {6}",
                              Interlocked.Read(ref RuntimeEventListener.EnqueueCnt),
                              Interlocked.Read(ref RuntimeEventListener.DequeueCnt),
                              Interlocked.Read(ref SimsClient.ConnectBeginCnt),
                              Interlocked.Read(ref SimsClient.ConnectFinishCnt),
                              Interlocked.Read(ref SimsClient.WriteBeginCnt),
                              Interlocked.Read(ref SimsClient.WriteBeginCnt),
                              Interlocked.Read(ref SimsClient.ReadFinishCnt)
                              );
        }
Пример #18
0
        public void Send(IMessageTree tree)
        {
            lock (_queue)
            {
                if (_queue.Count < _maxQueueSize)
                {
                    _queue.Enqueue(tree);
                }
                else
                {
                    // throw it away since the queue is full
                    Interlocked.Increment(ref _errors);

                    if (_statistics != null)
                    {
                        _statistics.OnOverflowed(tree);
                    }

                    if (Interlocked.Read(ref _errors) % 100 == 0)
                    {
                        Logger.Warn("Can't send message to cat-server due to queue's full! Count: " + Interlocked.Read(ref _errors));
                    }
                }
            }
        }
Пример #19
0
        public async Task TestServicesAsync(string networkString)
        {
            var network          = Network.GetNetwork(networkString);
            var blocksToDownload = new HashSet <uint256>();

            if (network == Network.Main)
            {
                blocksToDownload.Add(new uint256("00000000000000000037c2de35bd85f3e57f14ddd741ce6cee5b28e51473d5d0"));
                blocksToDownload.Add(new uint256("000000000000000000115315a43cb0cdfc4ea54a0e92bed127f4e395e718d8f9"));
                blocksToDownload.Add(new uint256("00000000000000000011b5b042ad0522b69aae36f7de796f563c895714bbd629"));
            }
            else if (network == Network.TestNet)
            {
                blocksToDownload.Add(new uint256("0000000097a664c4084b49faa6fd4417055cb8e5aac480abc31ddc57a8208524"));
                blocksToDownload.Add(new uint256("000000009ed5b82259ecd2aa4cd1f119db8da7a70e7ea78d9c9f603e01f93bcc"));
                blocksToDownload.Add(new uint256("00000000e6da8c2da304e9f5ad99c079df2c3803b49efded3061ecaf206ddc66"));
            }
            else
            {
                throw new NotSupportedException(network.ToString());
            }

            var manager    = KeyManager.CreateNew(out Mnemonic mnemonic, "password");
            var dataFolder = Path.Combine(SharedFixture.DataDir, nameof(TestServicesAsync));

            Directory.CreateDirectory(SharedFixture.DataDir);

            var             addressManagerFilePath = Path.Combine(SharedFixture.DataDir, $"AddressManager{network}.dat");
            var             blocksFolderPath       = Path.Combine(SharedFixture.DataDir, $"Blocks{network}");
            var             connectionParameters   = new NodeConnectionParameters();
            AddressManager  addressManager         = null;
            BlockDownloader downloader             = null;

            try
            {
                try
                {
                    addressManager = AddressManager.LoadPeerFile(addressManagerFilePath);
                    Logger.LogInfo <WalletService>($"Loaded {nameof(AddressManager)} from `{addressManagerFilePath}`.");
                }
                catch (FileNotFoundException ex)
                {
                    Logger.LogInfo <WalletService>($"{nameof(AddressManager)} did not exist at `{addressManagerFilePath}`. Initializing new one.");
                    Logger.LogTrace <WalletService>(ex);
                    addressManager = new AddressManager();
                }

                connectionParameters.TemplateBehaviors.Add(new AddressManagerBehavior(addressManager));
                var memPoolService = new MemPoolService();
                connectionParameters.TemplateBehaviors.Add(new MemPoolBehavior(memPoolService));

                using (var nodes = new NodesGroup(network, connectionParameters,
                                                  new NodeRequirement
                {
                    RequiredServices = NodeServices.Network,
                    MinVersion = ProtocolVersion.WITNESS_VERSION
                }))
                {
                    downloader = new BlockDownloader(nodes, blocksFolderPath);
                    Assert.True(Directory.Exists(blocksFolderPath));
                    downloader.Start();
                    foreach (var hash in blocksToDownload)
                    {
                        downloader.QueToDownload(hash);
                    }
                    var wallet = new WalletService(dataFolder, network, manager, nodes, memPoolService);
                    try
                    {
                        nodes.ConnectedNodes.Added         += ConnectedNodes_Added;
                        nodes.ConnectedNodes.Removed       += ConnectedNodes_Removed;
                        memPoolService.TransactionReceived += MemPoolService_TransactionReceived;

                        nodes.Connect();
                        // Using the interlocked, not because it makes sense in this context, but to
                        // set an example that these values are often concurrency sensitive
                        var times = 0;
                        while (Interlocked.Read(ref _nodeCount) < 3)
                        {
                            if (times > 4200)                             // 7 minutes
                            {
                                throw new TimeoutException($"Connection test timed out.");
                            }
                            await Task.Delay(100);

                            times++;
                        }

                        times = 0;
                        while (Interlocked.Read(ref _mempoolTransactionCount) < 3)
                        {
                            if (times > 3000)                             // 3 minutes
                            {
                                throw new TimeoutException($"{nameof(MemPoolService)} test timed out.");
                            }
                            await Task.Delay(100);

                            times++;
                        }

                        foreach (var hash in blocksToDownload)
                        {
                            times = 0;
                            while (downloader.GetBlock(hash) == null)
                            {
                                if (times > 1800)                                 // 3 minutes
                                {
                                    throw new TimeoutException($"{nameof(BlockDownloader)} test timed out.");
                                }
                                await Task.Delay(100);

                                times++;
                            }
                            Assert.True(File.Exists(Path.Combine(blocksFolderPath, hash.ToString())));
                            Logger.LogInfo <WalletTests>($"Full block is downloaded: {hash}.");
                        }
                    }
                    finally
                    {
                        nodes.ConnectedNodes.Added         -= ConnectedNodes_Added;
                        nodes.ConnectedNodes.Removed       -= ConnectedNodes_Removed;
                        memPoolService.TransactionReceived -= MemPoolService_TransactionReceived;
                    }
                }
            }
            finally
            {
                downloader?.Stop();

                // So next test will download the block.
                foreach (var hash in blocksToDownload)
                {
                    downloader.TryRemove(hash);
                }
                Directory.Delete(blocksFolderPath, recursive: true);

                addressManager?.SavePeerFile(addressManagerFilePath, network);
                Logger.LogInfo <WalletTests>($"Saved {nameof(AddressManager)} to `{addressManagerFilePath}`.");
            }
        }
Пример #20
0
        private static long Dispose = 0;         // To detect redundant calls

        public static async Task DisposeAsync()
        {
            var compareRes = Interlocked.CompareExchange(ref Dispose, 1, 0);

            if (compareRes == 1)
            {
                while (Interlocked.Read(ref Dispose) != 2)
                {
                    await Task.Delay(50);
                }
                return;
            }
            else if (compareRes == 2)
            {
                return;
            }

            try
            {
                await DisposeInWalletDependentServicesAsync();

                if (UpdateChecker != null)
                {
                    await UpdateChecker?.StopAsync();

                    Logger.LogInfo($"{nameof(UpdateChecker)} is stopped.", nameof(Global));
                }

                if (Synchronizer != null)
                {
                    await Synchronizer?.StopAsync();

                    Logger.LogInfo($"{nameof(Synchronizer)} is stopped.", nameof(Global));
                }

                if (AddressManagerFilePath != null)
                {
                    IoHelpers.EnsureContainingDirectoryExists(AddressManagerFilePath);
                    if (AddressManager != null)
                    {
                        AddressManager?.SavePeerFile(AddressManagerFilePath, Config.Network);
                        Logger.LogInfo($"{nameof(AddressManager)} is saved to `{AddressManagerFilePath}`.", nameof(Global));
                    }
                }

                if (Nodes != null)
                {
                    Nodes?.Disconnect();
                    while (Nodes.ConnectedNodes.Any(x => x.IsConnected))
                    {
                        await Task.Delay(50);
                    }
                    Nodes?.Dispose();
                    Logger.LogInfo($"{nameof(Nodes)} are disposed.", nameof(Global));
                }

                if (RegTestMemPoolServingNode != null)
                {
                    RegTestMemPoolServingNode.Disconnect();
                    Logger.LogInfo($"{nameof(RegTestMemPoolServingNode)} is disposed.", nameof(Global));
                }

                if (TorManager != null)
                {
                    await TorManager?.StopAsync();

                    Logger.LogInfo($"{nameof(TorManager)} is stopped.", nameof(Global));
                }

                if (AsyncMutex.IsAny)
                {
                    try
                    {
                        await AsyncMutex.WaitForAllMutexToCloseAsync();

                        Logger.LogInfo($"{nameof(AsyncMutex)}(es) are stopped.", nameof(Global));
                    }
                    catch (Exception ex)
                    {
                        Logger.LogError($"Error during stopping {nameof(AsyncMutex)}: {ex}", nameof(Global));
                    }
                }
            }
            catch (Exception ex)
            {
                Logger.LogWarning(ex, nameof(Global));
            }
            finally
            {
                Interlocked.Exchange(ref Dispose, 2);
            }
        }
 public void can_get_events_from_custom_stream()
 {
     AssertEx.IsOrBecomesTrue(() => Interlocked.Read(ref _testEventCount) == 1, 3000);
 }
Пример #22
0
        private void WorkerMain()
        {
            IFileEntryAction entry;
            bool             wasLastTickWorking = false;

            while (Interlocked.Read(ref _workerEndFlag) == 0)
            {
                // Try to get entry to process
                lock (_requests)
                {
                    if (_importingQueue.Count > 0)
                    {
                        entry = _importingQueue.Dequeue();
                    }
                    else
                    {
                        entry = null;
                    }
                }

                // Check if has any no job
                bool inThisTickWork = entry != null;
                if (inThisTickWork)
                {
                    // Check if begin importing
                    if (!wasLastTickWorking)
                    {
                        _importBatchDone = 0;
                        ImportingQueueBegin?.Invoke();
                    }

                    // Import file
                    bool failed = true;
                    try
                    {
                        ImportFileBegin?.Invoke(entry);
                        failed = entry.Execute();
                    }
                    catch (Exception ex)
                    {
                        Editor.LogWarning(ex);
                    }
                    finally
                    {
                        if (failed)
                        {
                            Editor.LogWarning("Failed to import " + entry.SourceUrl + " to " + entry.ResultUrl);
                        }

                        _importBatchDone++;
                        ImportFileEnd?.Invoke(entry, failed);
                    }
                }
                else
                {
                    // Check if end importing
                    if (wasLastTickWorking)
                    {
                        _importBatchDone = _importBatchSize = 0;
                        ImportingQueueEnd?.Invoke();
                    }

                    // Wait some time
                    Thread.Sleep(100);
                }

                wasLastTickWorking = inThisTickWork;
            }
        }
Пример #23
0
        protected override void OnEventCommand(EventCommandEventArgs command)
        {
            if (command.Command == EventCommand.Enable)
            {
                // This is the convention for initializing counters in the RuntimeEventSource (lazily on the first enable command).

                _outgoingConnectionsEstablishedCounter ??= new PollingCounter("outgoing-connections-established", this, () => Interlocked.Read(ref _outgoingConnectionsEstablished))
                {
                    DisplayName = "Outgoing Connections Established",
                };
                _incomingConnectionsEstablishedCounter ??= new PollingCounter("incoming-connections-established", this, () => Interlocked.Read(ref _incomingConnectionsEstablished))
                {
                    DisplayName = "Incoming Connections Established",
                };
                _bytesReceivedCounter ??= new PollingCounter("bytes-received", this, () => Interlocked.Read(ref _bytesReceived))
                {
                    DisplayName = "Bytes Received",
                };
                _bytesSentCounter ??= new PollingCounter("bytes-sent", this, () => Interlocked.Read(ref _bytesSent))
                {
                    DisplayName = "Bytes Sent",
                };
                _datagramsReceivedCounter ??= new PollingCounter("datagrams-received", this, () => Interlocked.Read(ref _datagramsReceived))
                {
                    DisplayName = "Datagrams Received",
                };
                _datagramsSentCounter ??= new PollingCounter("datagrams-sent", this, () => Interlocked.Read(ref _datagramsSent))
                {
                    DisplayName = "Datagrams Sent",
                };
            }
        }
 private bool TryGetValues(out long curVal, out long timeStamp)
 {
     curVal    = Interlocked.Read(ref _accumulatedValue);
     timeStamp = Interlocked.Read(ref _preciseTimeStamp);
     return(curVal == Interlocked.Read(ref _accumulatedValue) && timeStamp == Interlocked.Read(ref _preciseTimeStamp));
 }
Пример #25
0
 public long GetValue()
 {
     return(Interlocked.Read(ref this.value));
 }
Пример #26
0
 private void Cleanup()
 {
     if (_failedProxies.Count > LargeProxyConfigBoundary && Environment.TickCount64 >= Interlocked.Read(ref _nextFlushTicks))
     {
         CleanupHelper();
     }
 }
Пример #27
0
 public bool areChangeNotificationsDelayed()
 {
     return(Interlocked.Read(ref changeNotificationsDelayed) > 0);
 }
Пример #28
0
        public static void MeasureStats(MessageBus bus)
        {
            _sw.Start();
            _avgCalcStart = DateTime.UtcNow;
            var resultsPath = Guid.NewGuid().ToString() + ".csv";

            // File.WriteAllText(resultsPath, "Target Rate, RPS, Peak RPS, Avg RPS\n");

            _rateTimer = new Timer(_ =>
            {
                if (_measuringRate)
                {
                    return;
                }
                _measuringRate = true;

                try
                {
                    var now          = DateTime.UtcNow;
                    var timeDiffSecs = _sw.Elapsed.TotalSeconds;

                    _sw.Restart();

                    if (timeDiffSecs <= 0)
                    {
                        return;
                    }

                    if (_exception != null)
                    {
                        Console.WriteLine("Failed With:\r\n {0}", _exception.GetBaseException());
                        _rateTimer.Change(-1, -1);
                        _rateTimer.Dispose();
                        _rateTimer = null;
                        return;
                    }

                    Console.Clear();
                    Console.WriteLine("Started {0} of {1} clients", _clientsRunning, _clients);

                    Console.WriteLine("Total Rate: {0} (mps) = {1} (mps) * {2} (clients)", TotalRate, _rate, _clients);
                    Console.WriteLine();

                    // Sends
                    var sends       = Interlocked.Read(ref _sent);
                    var sendsDiff   = sends - _lastSendsCount;
                    var sendsPerSec = sendsDiff / timeDiffSecs;
                    _sendsPerSecond = sendsPerSec;

                    _lastSendsCount = sends;

                    Console.WriteLine("----- SENDS -----");

                    var s1 = Math.Max(0, _rate - _sendsPerSecond);
                    Console.WriteLine("SPS: {0:N3} (diff: {1:N3}, {2:N2}%)", _sendsPerSecond, s1, s1 * 100.0 / _rate);
                    var s2 = Math.Max(0, _rate - _peakSendsPerSecond);
                    Console.WriteLine("Peak SPS: {0:N3} (diff: {1:N2} {2:N2}%)", _peakSendsPerSecond, s2, s2 * 100.0 / _rate);
                    var s3 = Math.Max(0, _rate - _avgSendsPerSecond);
                    Console.WriteLine("Avg SPS: {0:N3} (diff: {1:N3} {2:N2}%)", _avgSendsPerSecond, s3, s3 * 100.0 / _rate);
                    Console.WriteLine();

                    if (sendsPerSec < long.MaxValue && sendsPerSec > _peakSendsPerSecond)
                    {
                        Interlocked.Exchange(ref _peakSendsPerSecond, sendsPerSec);
                    }

                    _avgSendsPerSecond = _avgLastSendsCount / (now - _avgCalcStart).TotalSeconds;

                    // Receives
                    var recv           = Interlocked.Read(ref _received);
                    var recvDiff       = recv - _lastReceivedCount;
                    var recvPerSec     = recvDiff / timeDiffSecs;
                    _receivesPerSecond = recvPerSec;

                    _lastReceivedCount = recv;

                    Console.WriteLine("----- RECEIVES -----");

                    var d1 = Math.Max(0, TotalRate - _receivesPerSecond);
                    Console.WriteLine("RPS: {0:N3} (diff: {1:N3}, {2:N2}%)", _receivesPerSecond, d1, d1 * 100.0 / TotalRate);
                    var d2 = Math.Max(0, TotalRate - _peakReceivesPerSecond);
                    Console.WriteLine("Peak RPS: {0:N3} (diff: {1:N3} {2:N2}%)", _peakReceivesPerSecond, d2, d2 * 100.0 / TotalRate);
                    var d3 = Math.Max(0, TotalRate - _avgReceivesPerSecond);
                    Console.WriteLine("Avg RPS: {0:N3} (diff: {1:N3} {2:N2}%)", _avgReceivesPerSecond, d3, d3 * 100.0 / TotalRate);
                    var d4 = Math.Max(0, _sendsPerSecond - _receivesPerSecond);
                    Console.WriteLine("Actual RPS: {0:N3} (diff: {1:N3} {2:N2}%)", _receivesPerSecond, d4, d4 * 100.0 / _sendsPerSecond);

                    if (bus != null)
                    {
                        Console.WriteLine();
                        Console.WriteLine("----- MESSAGE BUS -----");
                        Console.WriteLine("Allocated Workers: {0}", bus.AllocatedWorkers);
                        Console.WriteLine("BusyWorkers Workers: {0}", bus.BusyWorkers);
                    }

                    if (recvPerSec < long.MaxValue && recvPerSec > _peakReceivesPerSecond)
                    {
                        Interlocked.Exchange(ref _peakReceivesPerSecond, recvPerSec);
                    }

                    _avgReceivesPerSecond = _avgLastReceivedCount / (now - _avgCalcStart).TotalSeconds;

                    // File.AppendAllText(resultsPath, String.Format("{0}, {1}, {2}, {3}\n", TotalRate, _receivesPerSecond, _peakReceivesPerSecond, _avgReceivesPerSecond));

                    if (_runs > 0 && _runs % _stepInterval == 0)
                    {
                        _avgCalcStart = DateTime.UtcNow;
                        Interlocked.Exchange(ref _avgLastReceivedCount, 0);
                        Interlocked.Exchange(ref _avgLastSendsCount, 0);
                        long old  = Interlocked.Read(ref _rate);
                        long @new = old + _step;
                        while (Interlocked.Exchange(ref _rate, @new) == old)
                        {
                        }
                    }

                    _runs++;
                }
                finally
                {
                    _measuringRate = false;
                }
            }, null, 1000, 1000);
        }
Пример #29
0
        /// <summary>
        /// Calls the given callback with a span of the memory stream data
        /// </summary>
        /// <param name="callback">the callback to be called</param>
        /// <param name="state">A user-defined state, passed to the callback</param>
        /// <param name="bufferSize">the maximum size of the memory span</param>
        public override void CopyTo(ReadOnlySpanAction <byte, object?> callback, object?state, int bufferSize)
        {
            // If we have been inherited into a subclass, the following implementation could be incorrect
            // since it does not call through to Read() which a subclass might have overridden.
            // To be safe we will only use this implementation in cases where we know it is safe to do so,
            // and delegate to our base class (which will call into Read) when we are not sure.
            if (GetType() != typeof(UnmanagedMemoryStream))
            {
                base.CopyTo(callback, state, bufferSize);
                return;
            }

            if (callback == null)
            {
                throw new ArgumentNullException(nameof(callback));
            }

            EnsureNotClosed();
            EnsureReadable();

            // Use a local variable to avoid a race where another thread
            // changes our position after we decide we can read some bytes.
            long pos = Interlocked.Read(ref _position);
            long len = Interlocked.Read(ref _length);
            long n   = len - pos;

            if (n <= 0)
            {
                return;
            }

            int nInt = (int)n; // Safe because n <= count, which is an Int32

            if (nInt < 0)
            {
                return;  // _position could be beyond EOF
            }

            unsafe
            {
                if (_buffer != null)
                {
                    byte *pointer = null;

                    try
                    {
                        _buffer.AcquirePointer(ref pointer);
                        ReadOnlySpan <byte> span = new ReadOnlySpan <byte>(pointer + pos + _offset, nInt);
                        Interlocked.Exchange(ref _position, pos + n);
                        callback(span, state);
                    }
                    finally
                    {
                        if (pointer != null)
                        {
                            _buffer.ReleasePointer();
                        }
                    }
                }
                else
                {
                    ReadOnlySpan <byte> span = new ReadOnlySpan <byte>(_mem + pos, nInt);
                    Interlocked.Exchange(ref _position, pos + n);
                    callback(span, state);
                }
            }
        }
Пример #30
0
 private static void FillTrackingStreamStatistics(IDictionary <string, long> statistics)
 {
     // This method fills up counters for tracking memory leaks with file streams.
     statistics[$"{nameof(TrackingFileStream)}.{nameof(TrackingFileStream.Constructed)}"]    = Interlocked.Read(ref TrackingFileStream.Constructed);
     statistics[$"{nameof(TrackingFileStream)}.{nameof(TrackingFileStream.ProperlyClosed)}"] = Interlocked.Read(ref TrackingFileStream.ProperlyClosed);
     statistics[$"{nameof(TrackingFileStream)}.{nameof(TrackingFileStream.Leaked)}"]         = TrackingFileStream.Leaked;
 }