Beispiel #1
0
 /// <summary>
 /// Sets the path of the log files.
 /// </summary>
 /// <param name="logDirectory"></param>
 public void SetPath(string logDirectory)
 {
     FilePath.ValidatePathName(logDirectory);
     lock (m_syncRoot)
     {
         m_path = logDirectory;
         m_flushTask.Start(1000);
     }
 }
Beispiel #2
0
 /// <summary>
 /// Provides a thread safe way to enqueue points.
 /// While points are streaming all other writes are blocked. Therefore,
 /// this point stream should be high speed.
 /// </summary>
 /// <param name="stream"></param>
 public void Enqueue(TreeStream <HistorianKey, HistorianValue> stream)
 {
     lock (m_syncWrite)
     {
         PointData data = default(PointData);
         while (data.Load(stream))
         {
             m_blocks.Enqueue(data);
         }
     }
     m_worker.Start();
 }
        public void CanRunAfterError()
        {
            var           times  = 0;
            var           errors = 0;
            ScheduledTask s      = null;

            s = new ScheduledTask(() =>
            {
                times++;
                if (times == 2)
                {
                    throw new Exception("error");
                }
                if (times == 4)
                {
                    s.Stop();
                }
            })
            {
                StartInterval = 10, AfterSuccessInterval = 10, AfterErrorInterval = 10, ProceedOnError = true
            };
            s.OnError += _ => errors++;
            s.Start();
            Thread.Sleep(1000);
            Assert.AreEqual(4, times);
            Assert.AreEqual(1, errors);
        }
Beispiel #4
0
 /// <summary>
 /// Invalidates the current routing table.
 /// </summary>
 private void RecalculateRoutingTable()
 {
     RoutingTablesValid = false;
     //Wait some time before recalculating the routing tables. This will be done automatically
     //if a message is routed before the table is recalculated.
     m_calculateRoutingTable.Start(10);
 }
 /// <summary>
 /// Queues the supplied file as a file that needs to be deleted.
 /// MUST be called from a synchronized context.
 /// </summary>
 /// <param name="file"></param>
 void AddFileToDelete(SortedTreeTable <TKey, TValue> file)
 {
     if (file.BaseFile.IsMemoryFile)
     {
         AddFileToDispose(file);
         return;
     }
     if (!InternalIsFileBeingUsed(file))
     {
         file.BaseFile.Delete();
         return;
     }
     m_listLog.AddFileToDelete(file.ArchiveId);
     m_filesToDelete.Add(file);
     m_processRemovals.Start(1000);
 }
Beispiel #6
0
        private void CollectionRunning(object sender, EventArgs <ScheduledTaskRunningReason> e)
        {
            m_collection.Start(1000);

            m_countHistory.Enqueue(m_queue.Count);
            if (m_countHistory.Count >= 60)
            {
                int objectsCreated = Interlocked.Exchange(ref m_objectsCreated, 0);
                //if there were ever more than the target items in the queue over the past 60 seconds
                //remove some items.
                //However, don't remove items if the pool ever got to 0 and had objects that had to be created.
                int min = m_countHistory.Min();
                m_countHistory.Clear();

                if (objectsCreated > 250)
                {
                    Log.Publish(MessageLevel.Error, MessageFlags.PerformanceIssue | MessageFlags.UsageIssue, "Items Created since last collection cycle.", (objectsCreated).ToString());
                }
                else if (objectsCreated > 50)
                {
                    Log.Publish(MessageLevel.Warning, "Items Created since last collection cycle.", (objectsCreated).ToString());
                }
                else if (objectsCreated > 0)
                {
                    Log.Publish(MessageLevel.Info, "Items Created since last collection cycle.", (objectsCreated).ToString());
                }

                if (min > m_targetCount && objectsCreated == 0)
                {
                    int itemsRemoved = (min - m_targetCount);
                    if (itemsRemoved > 250)
                    {
                        Log.Publish(MessageLevel.Error, MessageFlags.PerformanceIssue | MessageFlags.UsageIssue, "Removing items", itemsRemoved.ToString());
                    }
                    else if (itemsRemoved > 50)
                    {
                        Log.Publish(MessageLevel.Warning, "Removing items", itemsRemoved.ToString());
                    }
                    else if (itemsRemoved > 0)
                    {
                        Log.Publish(MessageLevel.Info, "Removing items", itemsRemoved.ToString());
                    }
                }

                while (min > m_targetCount && objectsCreated == 0)
                {
                    T item;
                    if (m_queue.TryDequeue(out item))
                    {
                        (item as IDisposable)?.Dispose();
                    }
                    else
                    {
                        return;
                    }
                    min--;
                }
            }
        }
Beispiel #7
0
 public void TestMethod1()
 {
     m_task          = new ScheduledTask(ThreadingMode.DedicatedBackground, ThreadPriority.Highest);
     m_task.Running += task_Running;
     m_task.Start(10);
     Thread.Sleep(1000);
     m_task.Dispose();
     System.Console.WriteLine("Disposed");
 }
Beispiel #8
0
        /// <summary>
        /// Attempts to connect to data input source.
        /// </summary>
        protected override void AttemptConnection()
        {
            m_nextPublicationTime            = ToPublicationTime(DateTime.UtcNow.Ticks);
            m_nextPublicationTimeWithLatency = m_nextPublicationTime + (long)(m_latency.Next() * TimeSpan.TicksPerMillisecond);

            if ((object)m_timer == null)
            {
                m_timer          = new ScheduledTask(ThreadingMode.ThreadPool);
                m_timer.Running += m_timer_Running;
            }
            if ((object)m_statusUpdate == null)
            {
                m_statusUpdate          = new ScheduledTask(ThreadingMode.ThreadPool);
                m_statusUpdate.Running += m_statusUpdate_Running;
            }
            m_timer.Start();
            m_statusUpdate.Start(10000);
        }
 public void RoutingComplete()
 {
     if (MeasurementsToRoute.Count > 0)
     {
         m_pendingMeasurements.Enqueue(MeasurementsToRoute);
         MeasurementsToRoute = new List <IMeasurement>();
         m_task.Start();
     }
 }
Beispiel #10
0
 void task_Running(object sender, EventArgs <ScheduledTaskRunningReason> e)
 {
     if (e.Argument == ScheduledTaskRunningReason.Disposing)
     {
         return;
     }
     m_task.Start(10);
     m_count++;
     System.Console.WriteLine(m_count);
 }
Beispiel #11
0
        void TestTimed(ThreadingMode mode)
        {
            const int Count = 1000000000;
            Stopwatch sw    = new Stopwatch();

            m_doWorkCount = 0;
            using (ScheduledTask work = new ScheduledTask(mode))
            {
                work.Running += work_DoWork;

                sw.Start();
                for (int x = 0; x < 1000; x++)
                {
                    work.Start(1);
                    work.Start();
                }

                sw.Stop();
            }
            m_doWorkCount = 0;
            sw.Reset();

            using (ScheduledTask work = new ScheduledTask(mode))
            {
                work.Running += work_DoWork;

                sw.Start();
                for (int x = 0; x < Count; x++)
                {
                    work.Start(1000);
                    work.Start();
                }

                sw.Stop();
            }

            Console.WriteLine(mode.ToString());
            Console.WriteLine(" Fire Event Count: " + m_doWorkCount.ToString());
            Console.WriteLine("  Fire Event Rate: " + (m_doWorkCount / sw.Elapsed.TotalSeconds / 1000000).ToString("0.00"));
            Console.WriteLine(" Total Calls Time: " + sw.Elapsed.TotalMilliseconds.ToString("0.0") + "ms");
            Console.WriteLine(" Total Calls Rate: " + (Count / sw.Elapsed.TotalSeconds / 1000000).ToString("0.00"));
            Console.WriteLine();
        }
Beispiel #12
0
            public void Start()
            {
                lock (this)
                {
                    //Check if the last queue has processed, if not, then quit.
                    if (m_time.HasValue)
                    {
                        return;
                    }

                    m_time = ShortTime.Now;
                    if (m_delay == 0)
                    {
                        m_task.Start();
                    }
                    else
                    {
                        m_task.Start(m_delay);
                    }
                }
            }
Beispiel #13
0
 public void ScheduledTask_Test()
 {
    // create and start the task
    var task = new ScheduledTask("Task", ct => Thread.Sleep(500), 100);
    task.Changed += (s, e) => ReportAction(e);
    task.Start();
    // allow the task time to be scheduled and run
    Thread.Sleep(5000);
    // stop the task
    task.Stop();
    // wait for completion (only in unit tests)
    task.Wait();
 }
 /// <summary>
 /// Triggers a rollover if the provided transaction id has not yet been triggered.
 /// This method does not block
 /// </summary>
 /// <param name="transactionId">the transaction id to execute the commit on.</param>
 public void Commit(long transactionId)
 {
     lock (m_syncRoot)
     {
         if (m_stopped)
         {
             if (m_disposed)
             {
                 Log.Publish(MessageLevel.Warning, "Disposed Object", "A call to Commit() occured after this class disposed");
                 return;
             }
             else
             {
                 Log.Publish(MessageLevel.Warning, "Writer Stopped", "A call to Commit() occured after this class was stopped");
                 return;
             }
         }
         if (transactionId > m_currentTransactionIdRollingOver)
         {
             m_rolloverTask.Start();
         }
     }
 }
Beispiel #15
0
        public void ScheduledTask_Test()
        {
            // create and start the task
            var task = new ScheduledTask("Task", ct => Thread.Sleep(500), 100);

            task.Changed += (s, e) => ReportAction(e);
            task.Start();
            // allow the task time to be scheduled and run
            Thread.Sleep(5000);
            // stop the task
            task.Stop();
            // wait for completion (only in unit tests)
            task.Wait();
        }
 public void Test()
 {
     using (ScheduledTask work = new ScheduledTask(ThreadingMode.DedicatedForeground))
     {
         work.Running += work_DoWork;
         work.Running += work_CleanupWork;
         work.Start();
     }
     double x = 1;
     while (x > 3)
     {
         x--;
     }
 }
Beispiel #17
0
 void BlastStartMethod(object obj)
 {
     try
     {
         ScheduledTask task  = (ScheduledTask)obj;
         const int     Count = 100000000;
         for (int x = 0; x < Count; x++)
         {
             task.Start();
         }
     }
     catch (Exception)
     {
     }
 }
Beispiel #18
0
        /// <summary>
        /// Creates a new Resource Queue.
        /// </summary>
        /// <param name="instance">A delegate that will return the necessary queue.</param>
        /// <param name="targetCount">the ideal number of objects that are always pending on the queue.</param>
        public DynamicObjectPool(Func <T> instance, int targetCount)
        {
            if (instance == null)
            {
                throw new ArgumentNullException(nameof(instance));
            }

            m_targetCount         = targetCount;
            m_countHistory        = new Queue <int>(100);
            m_instanceObject      = instance;
            m_queue               = new ConcurrentQueue <T>();
            m_collection          = new ScheduledTask();
            m_collection.Running += CollectionRunning;
            m_collection.Start(1000);
        }
Beispiel #19
0
 /// <summary>
 /// Creates a stage writer.
 /// </summary>
 /// <param name="settings">the settings for this stage</param>
 /// <param name="archiveList">the archive list</param>
 /// <param name="rolloverLog">the rollover log</param>
 public CombineFiles(CombineFilesSettings settings, ArchiveList <TKey, TValue> archiveList, RolloverLog rolloverLog)
     : base(MessageClass.Framework)
 {
     m_settings = settings.CloneReadonly();
     m_settings.Validate();
     m_archiveList                      = archiveList;
     m_createNextStageFile              = new SimplifiedArchiveInitializer <TKey, TValue>(settings.ArchiveSettings);
     m_rolloverLog                      = rolloverLog;
     m_rolloverComplete                 = new ManualResetEvent(false);
     m_syncRoot                         = new object();
     m_rolloverTask                     = new ScheduledTask(ThreadingMode.DedicatedForeground, ThreadPriority.BelowNormal);
     m_rolloverTask.Running            += OnExecute;
     m_rolloverTask.UnhandledException += OnException;
     m_rolloverTask.Start(m_settings.ExecuteTimer);
 }
        public void Test()
        {
            using (ScheduledTask work = new ScheduledTask(ThreadingMode.DedicatedForeground))
            {
                work.Running += work_DoWork;
                work.Running += work_CleanupWork;
                work.Start();
            }
            double x = 1;

            while (x > 3)
            {
                x--;
            }
        }
Beispiel #21
0
        void TestConcurrent(ThreadingMode mode)
        {
            int workCount;

            const int Count = 100000000;
            Stopwatch sw    = new Stopwatch();

            m_doWorkCount = 0;
            using (ScheduledTask work = new ScheduledTask(mode))
            {
                work.Running += work_DoWork;

                sw.Start();
                for (int x = 0; x < 1000; x++)
                {
                    work.Start();
                }

                sw.Stop();
            }
            m_doWorkCount = 0;
            sw.Reset();

            using (ScheduledTask work = new ScheduledTask(mode))
            {
                work.Running += work_DoWork;


                sw.Start();
                ThreadPool.QueueUserWorkItem(BlastStartMethod, work);
                ThreadPool.QueueUserWorkItem(BlastStartMethod, work);

                for (int x = 0; x < Count; x++)
                {
                    work.Start();
                }
                workCount = m_doWorkCount;
                sw.Stop();
                Thread.Sleep(100);
            }

            Console.WriteLine(mode.ToString());
            Console.WriteLine(" Fire Event Count: " + workCount.ToString());
            Console.WriteLine("  Fire Event Rate: " + (workCount / sw.Elapsed.TotalSeconds / 1000000).ToString("0.00"));
            Console.WriteLine(" Total Calls Time: " + sw.Elapsed.TotalMilliseconds.ToString("0.0") + "ms");
            Console.WriteLine(" Total Calls Rate: " + (Count / sw.Elapsed.TotalSeconds / 1000000).ToString("0.00"));
            Console.WriteLine();
        }
        /// <summary>
        /// Creates a <see cref="RouteMappingHighLatencyLowCpu"/>
        /// </summary>
        public RouteMappingHighLatencyLowCpu()
        {
            m_lastStatusUpdate       = ShortTime.Now;
            m_maxPendingMeasurements = 1000;
            m_routeLatency           = OptimizationOptions.RoutingLatency;
            m_batchSize    = OptimizationOptions.RoutingBatchSize;
            m_inboundQueue = new ConcurrentQueue <List <IMeasurement> >();

            m_task                     = new ScheduledTask(ThreadingMode.DedicatedBackground, ThreadPriority.AboveNormal);
            m_task.Running            += m_task_Running;
            m_task.UnhandledException += m_task_UnhandledException;
            m_task.Disposing          += m_task_Disposing;
            m_task.Start(m_routeLatency);

            m_onStatusMessage    = x => { };
            m_onProcessException = x => { };
            m_globalCache        = new GlobalCache(new Dictionary <IAdapter, Consumer>(), 0);
            RouteCount           = m_globalCache.GlobalSignalLookup.Count(x => x != null);
        }
Beispiel #23
0
        public void CanRunRegular()
        {
            var           times = 0;
            ScheduledTask s     = null;

            s = new ScheduledTask(() => {
                Console.WriteLine("CanRunRegular " + times);
                times++;
                if (times == 3)
                {
                    s.Stop();
                }
            })
            {
                StartInterval = 10, AfterSuccessInterval = 10
            };
            s.Start();
            Thread.Sleep(1000);
            Assert.AreEqual(3, times);
        }
Beispiel #24
0
 public void ScheduledTask_WithCancellation_Test()
 {
    // create and start the task
    var task = new ScheduledTask("Task", ct => Thread.Sleep(500), 100);
    task.Changed += (s, e) => ReportAction(e);
    task.Start();
    // schedule a cancellation in 1 to 5 seconds
    var cancelTask = Task.Factory.StartNew(() =>
    {
       var random = new Random();
       Thread.Sleep(random.Next(1000, 5000));
       task.Cancel();
    });
    // allow the task time to be scheduled and run
    Thread.Sleep(5000);
    // stop the task
    task.Stop();
    // wait for completion (only in unit tests)
    task.Wait();
    // wait for completion of cancellation task
    cancelTask.Wait();
 }
Beispiel #25
0
        /// <summary>
        /// Creates a new Resource Queue.
        /// </summary>
        /// <param name="instance">A delegate that will return the necessary queue.</param>
        /// <param name="lifetimeInSeconds">The age of the object before it is expired.</param>
        public ExpireObjectPool(Func <T> instance, int lifetimeInSeconds)
        {
            if (instance == null)
            {
                throw new ArgumentNullException(nameof(instance));
            }

            m_syncRoot           = new object();
            m_lifeCycleInSeconds = lifetimeInSeconds;

            m_instanceObject = instance;
            m_queue          = new Queue <T>();
            m_allocations    = 0;
            m_enqueueCount   = 0;
            m_dequeueCount   = 0;
            m_maxQueueSize   = 0;
            m_minQueueSize   = 0;

            m_collection          = new ScheduledTask();
            m_collection.Running += CollectionRunning;
            m_collection.Start(1000 * m_lifeCycleInSeconds);
        }
        /// <summary>
        /// Creates a <see cref="RouteMappingHighLatencyLowCpu"/>
        /// </summary>
        /// <param name="routeLatency">The desired wait latency. Must be between 1 and 500ms inclusive</param>
        public RouteMappingHighLatencyLowCpu(int routeLatency)
        {
            if (routeLatency < 1 || routeLatency > 500)
            {
                throw new ArgumentOutOfRangeException("routeLatency", "Must be between 1 and 500ms");
            }

            m_maxPendingMeasurements = 1000;
            m_routeLatency           = routeLatency;
            m_list                     = new ConcurrentQueue <IMeasurement[]>();
            m_task                     = new ScheduledTask(ThreadingMode.DedicatedBackground, ThreadPriority.AboveNormal);
            m_task.Running            += m_task_Running;
            m_task.UnhandledException += m_task_UnhandledException;
            m_task.Disposing          += m_task_Disposing;
            m_task.Start(m_routeLatency);

            m_onStatusMessage              = x => { };
            m_onProcessException           = x => { };
            m_producerLookup               = new Dictionary <IAdapter, LocalCache>();
            m_globalCache                  = new GlobalCache(new Dictionary <IAdapter, Consumer>(), 0);
            m_injectMeasurementsLocalCache = new LocalCache(this, null);
        }
Beispiel #27
0
        public void ScheduledTask_WithCancellation_Test()
        {
            // create and start the task
            var task = new ScheduledTask("Task", ct => Thread.Sleep(500), 100);

            task.Changed += (s, e) => ReportAction(e);
            task.Start();
            // schedule a cancellation in 1 to 5 seconds
            var cancelTask = Task.Factory.StartNew(() =>
            {
                var random = new Random();
                Thread.Sleep(random.Next(1000, 5000));
                task.Cancel();
            });

            // allow the task time to be scheduled and run
            Thread.Sleep(5000);
            // stop the task
            task.Stop();
            // wait for completion (only in unit tests)
            task.Wait();
            // wait for completion of cancellation task
            cancelTask.Wait();
        }
        private void AsyncDoWork(object sender, EventArgs eventArgs)
        {
            if (BeforeExecuteQuery != null)
            {
                BeforeExecuteQuery(this, EventArgs.Empty);
            }
            DateTime            startTime;
            DateTime            stopTime;
            DateTime            currentTime;
            object              token;
            List <MetadataBase> activeSignals;

            lock (m_syncRoot)
            {
                token          = m_requestToken;
                m_requestToken = null;
                if (Mode == ExecutionMode.Manual)
                {
                    startTime     = m_lowerBounds;
                    stopTime      = m_upperBounds;
                    currentTime   = m_focusedDate;
                    activeSignals = m_activeSignals;
                }
                else
                {
                    m_playback.GetTimes(out startTime, out stopTime);
                    currentTime   = stopTime;
                    activeSignals = m_activeSignals;
                }
            }

            IDictionary <Guid, SignalDataBase> results = m_query.GetQueryResult(startTime, stopTime, 0, activeSignals);

            QueryResultsEventArgs queryResults = new QueryResultsEventArgs(results, token, startTime, stopTime);

            if (AfterQuery != null)
            {
                AfterQuery(this, EventArgs.Empty);
            }

            if (NewQueryResults != null)
            {
                NewQueryResults.ParallelRunAndWait(this, queryResults);
            }
            if (SynchronousNewQueryResults != null || ParallelWithControlLockNewQueryResults != null)
            {
                m_syncEvent.RaiseEvent(new QueryResultsEventArgs(results, token, startTime, stopTime));
            }

            lock (m_syncRoot)
            {
                if (Mode == ExecutionMode.Automatic)
                {
                    m_async.Start(m_refreshInterval.Milliseconds);
                }
            }

            if (AfterExecuteQuery != null)
            {
                AfterExecuteQuery(this, EventArgs.Empty);
            }
        }
Beispiel #29
0
        private void OnExecute(object sender, EventArgs <ScheduledTaskRunningReason> e)
        {
            //The worker can be disposed either via the Stop() method or
            //the Dispose() method.  If via the dispose method, then
            //don't do any cleanup.
            if (m_disposed && e.Argument == ScheduledTaskRunningReason.Disposing)
            {
                return;
            }

            //go ahead and schedule the next rollover since nothing
            //will happen until this function exits anyway.
            //if the task is disposing, the following line does nothing.
            m_rolloverTask.Start(m_settings.ExecuteTimer);

            lock (m_syncRoot)
            {
                if (m_disposed)
                {
                    return;
                }

                using (ArchiveListSnapshot <TKey, TValue> resource = m_archiveList.CreateNewClientResources())
                {
                    resource.UpdateSnapshot();

                    List <ArchiveTableSummary <TKey, TValue> > list = new List <ArchiveTableSummary <TKey, TValue> >();
                    List <Guid> listIds = new List <Guid>();

                    for (int x = 0; x < resource.Tables.Length; x++)
                    {
                        ArchiveTableSummary <TKey, TValue> table = resource.Tables[x];

                        if (table.SortedTreeTable.BaseFile.Snapshot.Header.Flags.Contains(m_settings.MatchFlag) && table.SortedTreeTable.BaseFile.Snapshot.Header.Flags.Contains(FileFlags.IntermediateFile))
                        {
                            list.Add(table);
                            listIds.Add(table.FileId);
                        }
                        else
                        {
                            resource.Tables[x] = null;
                        }
                    }

                    bool shouldRollover = list.Count >= m_settings.CombineOnFileCount;

                    long size = 0;

                    for (int x = 0; x < list.Count; x++)
                    {
                        size += list[x].SortedTreeTable.BaseFile.ArchiveSize;
                        if (size > m_settings.CombineOnFileSize)
                        {
                            if (x != list.Count - 1)//If not the last entry
                            {
                                list.RemoveRange(x + 1, list.Count - x - 1);
                            }
                            break;
                        }
                    }
                    if (size > m_settings.CombineOnFileSize)
                    {
                        shouldRollover = true;
                    }

                    if (shouldRollover)
                    {
                        TKey startKey = new TKey();
                        TKey endKey   = new TKey();
                        startKey.SetMax();
                        endKey.SetMin();

                        foreach (Guid fileId in listIds)
                        {
                            ArchiveTableSummary <TKey, TValue> table = resource.TryGetFile(fileId);
                            if (table is null)
                            {
                                throw new Exception("File not found");
                            }

                            if (!table.IsEmpty)
                            {
                                if (startKey.IsGreaterThan(table.FirstKey))
                                {
                                    table.FirstKey.CopyTo(startKey);
                                }
                                if (endKey.IsLessThan(table.LastKey))
                                {
                                    table.LastKey.CopyTo(endKey);
                                }
                            }
                        }

                        RolloverLogFile logFile = null;

                        Action <Guid> createLog = (x) =>
                        {
                            logFile = m_rolloverLog.Create(listIds, x);
                        };

                        using (UnionReader <TKey, TValue> reader = new UnionReader <TKey, TValue>(list))
                        {
                            SortedTreeTable <TKey, TValue> dest = m_createNextStageFile.CreateArchiveFile(startKey, endKey, size, reader, createLog);

                            resource.Dispose();

                            using (ArchiveListEditor <TKey, TValue> edit = m_archiveList.AcquireEditLock())
                            {
                                //Add the newly created file.
                                edit.Add(dest);

                                foreach (ArchiveTableSummary <TKey, TValue> table in list)
                                {
                                    edit.TryRemoveAndDelete(table.FileId);
                                }
                            }
                        }

                        if (logFile != null)
                        {
                            logFile.Delete();
                        }
                    }

                    resource.Dispose();
                }

                m_rolloverComplete.Set();
            }
        }
Beispiel #30
0
        private void CollectionRunning(object sender, EventArgs <ScheduledTaskRunningReason> e)
        {
            m_collection.Start(1000 * m_lifeCycleInSeconds);

            StringBuilder sb = new StringBuilder(1000);

            int allocations = 0;

            lock (m_syncRoot)
            {
                //Determines how many items in this list have expired based on time.
                int itemsProcessedPerCollectionCycle = Math.Max(m_enqueueCount, m_dequeueCount);
                int error             = Math.Max(4, itemsProcessedPerCollectionCycle >> 2); //Allow 25% error.
                int expiredItemsCount = m_queue.Count - itemsProcessedPerCollectionCycle - error;

                //Determine how many items in this list are never accessed.
                //The minimum size of the queue during this window, minus 25% of the range of the queue.
                int listSizeExcess = m_minQueueSize - ((m_maxQueueSize - m_minQueueSize) >> 2);

                int retireItems = Math.Max(listSizeExcess, expiredItemsCount);

                //Don't shrink the list based on the number of allocations.
                for (int x = m_allocations; x < retireItems; x++)
                {
                    (m_queue.Dequeue() as IDisposable)?.Dispose();
                }

                sb.Append("Allocations: ");
                sb.Append(m_allocations);
                sb.Append(" Enqueue Count: ");
                sb.Append(m_enqueueCount);
                sb.Append(" Dequeue Count: ");
                sb.Append(m_dequeueCount);

                sb.Append(" Max Queue Size: ");
                sb.Append(m_maxQueueSize);

                sb.Append(" Min Queue Size: ");
                sb.Append(m_minQueueSize);

                sb.Append(" Expired Items Count: ");
                sb.Append(expiredItemsCount);

                sb.Append(" List Size Excess: ");
                sb.Append(listSizeExcess);

                allocations    = m_allocations;
                m_allocations  = 0;
                m_enqueueCount = 0;
                m_dequeueCount = 0;
                m_maxQueueSize = m_queue.Count;
                m_minQueueSize = m_queue.Count;
            }

            if (allocations > 250)
            {
                m_log.Publish(MessageLevel.Error, MessageFlags.PerformanceIssue | MessageFlags.UsageIssue, "Object Pool is likely not optimized", sb.ToString());
            }
            else if (allocations > 50)
            {
                m_log.Publish(MessageLevel.Warning, "Items Created since last collection cycle.", sb.ToString());
            }
            else if (allocations > 0)
            {
                m_log.Publish(MessageLevel.Info, "Items Created since last collection cycle.", sb.ToString());
            }
        }
        void m_task_Running(object sender, EventArgs <ScheduledTaskRunningReason> e)
        {
            if (e.Argument == ScheduledTaskRunningReason.Disposing)
            {
                return;
            }

            m_task.Start(m_routeLatency);

            m_routeOperations++;

            if (m_lastStatusUpdate.ElapsedSeconds() > 15)
            {
                m_lastStatusUpdate = ShortTime.Now;
                Log.Publish(MessageLevel.Info, MessageFlags.None, "Routing Update",
                            string.Format("Route Operations: {0}, Input Frames: {1}, Input Measurements: {2}, Output Measurements: {3}",
                                          m_routeOperations, m_measurementsRoutedInputFrames,
                                          m_measurementsRoutedInputMeasurements,
                                          m_measurementsRoutedOutput));
            }

            var map = m_globalCache;

            try
            {
                int measurementsRouted = 0;

                List <IMeasurement> measurements;
                while (m_inboundQueue.TryDequeue(out measurements))
                {
                    measurementsRouted += measurements.Count;
                    Interlocked.Add(ref m_pendingMeasurements, -measurements.Count);

                    //For loops are faster than ForEach for List<T>
                    //Process Optimized Consumers
                    for (int x = 0; x < map.RoutingPassthroughAdapters.Count; x++)
                    {
                        map.RoutingPassthroughAdapters[x].ProcessMeasurementList(measurements);
                    }

                    //Process Broadcast Consumers
                    for (int x = 0; x < map.BroadcastConsumers.Count; x++)
                    {
                        m_measurementsRoutedOutput += measurements.Count;
                        map.BroadcastConsumers[x].MeasurementsToRoute.AddRange(measurements);
                    }

                    m_measurementsRoutedInputFrames++;
                    m_measurementsRoutedInputMeasurements += measurements.Count;
                    for (int x = 0; x < measurements.Count; x++)
                    {
                        var             measurement = measurements[x];
                        List <Consumer> consumers   = map.GlobalSignalLookup[measurement.Key.RuntimeID];
                        if (consumers != null)
                        {
                            for (int i = 0; i < consumers.Count; i++)
                            {
                                m_measurementsRoutedOutput++;
                                consumers[i].MeasurementsToRoute.Add(measurement);
                            }
                        }
                    }

                    //If any adapter has too many measurements on their batch
                    //Route all adapter's measurements
                    if (measurementsRouted > m_batchSize)
                    {
                        measurementsRouted = 0;
                        foreach (var consumer in map.NormalDestinationAdapters)
                        {
                            measurementsRouted = Math.Max(measurementsRouted, consumer.MeasurementsToRoute.Count);
                            if (consumer.MeasurementsToRoute.Count > m_batchSize)
                            {
                                foreach (var c2 in map.NormalDestinationAdapters)
                                {
                                    c2.RoutingComplete();
                                }
                                measurementsRouted = 0;
                                break;
                            }
                        }
                    }
                }
            }
            finally
            {
                foreach (var consumer in map.NormalDestinationAdapters)
                {
                    consumer.RoutingComplete();
                }
            }
        }
        void m_task_Running(object sender, EventArgs <ScheduledTaskRunningReason> e)
        {
            if (e.Argument == ScheduledTaskRunningReason.Disposing)
            {
                return;
            }

            m_task.Start(m_routeLatency);

            m_routeOperations++;

            if (m_routeOperations % 1000 == 0)
            {
                m_onStatusMessage(string.Format(
                                      "Route Operations: {0}, Input Frames: {1}, Input Measurements: {2}, Output Measurements: {3}",
                                      m_routeOperations, m_measurementsRoutedInputFrames,
                                      m_measurementsRoutedInputMeasurements,
                                      m_measurementsRoutedOutput));
            }

            var map = m_globalCache;

            try
            {
                IMeasurement[] measurements;
                while (m_list.TryDequeue(out measurements))
                {
                    Interlocked.Add(ref m_pendingMeasurements, -measurements.Length);

                    m_measurementsRoutedInputFrames++;
                    m_measurementsRoutedInputMeasurements += measurements.Length;
                    foreach (var measurement in measurements)
                    {
                        List <Consumer> consumers;
                        if (!map.GlobalSignalLookup.TryGetValue(measurement.ID, out consumers))
                        {
                            consumers = map.BroadcastConsumers;
                        }

                        // Add this measurement to the producers' list
                        for (int index = 0; index < consumers.Count; index++)
                        {
                            var consumer = consumers[index];
                            m_measurementsRoutedOutput++;
                            consumer.MeasurementsToRoute.Add(measurement);
                        }
                    }

                    //Limit routing to no more than 1000 measurements per sub-route.
                    foreach (var consumer in map.GlobalDestinationList)
                    {
                        if (consumer.MeasurementsToRoute.Count > 1000)
                        {
                            foreach (var c2 in map.GlobalDestinationLookup.Values)
                            {
                                c2.RoutingComplete();
                            }
                            break;
                        }
                    }
                }
            }
            finally
            {
                foreach (var consumer in map.GlobalDestinationList)
                {
                    consumer.RoutingComplete();
                }
            }
        }
Beispiel #33
0
        /// <summary>
        /// Appends this data to this stage. Also queues up for deletion if necessary.
        /// </summary>
        /// <param name="args">arguments handed to this class from either the
        /// PrestageWriter or another StageWriter of a previous generation</param>
        /// <remarks>
        /// This method must be called in a single threaded manner.
        /// </remarks>
        public void AppendData(PrebufferRolloverArgs <TKey, TValue> args)
        {
            if (m_stopped)
            {
                Log.Publish(MessageLevel.Info, "No new points can be added. Point queue has been stopped. Data in rollover will be lost");
                return;
            }
            if (m_disposed)
            {
                Log.Publish(MessageLevel.Info, "First stage writer has been disposed. Data in rollover will be lost");
                return;
            }

            SortedTreeFile file = SortedTreeFile.CreateInMemory(4096);
            SortedTreeTable <TKey, TValue> table = file.OpenOrCreateTable <TKey, TValue>(m_settings.EncodingMethod);

            using (SortedTreeTableEditor <TKey, TValue> edit = table.BeginEdit())
            {
                edit.AddPoints(args.Stream);
                edit.Commit();
            }

            bool shouldWait = false;

            //If there is data to write then write it to the current archive.
            lock (m_syncRoot)
            {
                if (m_stopped)
                {
                    Log.Publish(MessageLevel.Info, "No new points can be added. Point queue has been stopped. Data in rollover will be lost");
                    table.Dispose();
                    return;
                }
                if (m_disposed)
                {
                    Log.Publish(MessageLevel.Info, "First stage writer has been disposed. Data in rollover will be lost");
                    table.Dispose();
                    return;
                }

                using (ArchiveListEditor <TKey, TValue> edit = m_list.AcquireEditLock())
                {
                    edit.Add(table);
                }
                m_pendingTables1.Add(table);

                if (m_pendingTables1.Count == 10)
                {
                    using (UnionTreeStream <TKey, TValue> reader = new UnionTreeStream <TKey, TValue>(m_pendingTables1.Select(x => new ArchiveTreeStreamWrapper <TKey, TValue>(x)), true))
                    {
                        SortedTreeFile file1 = SortedTreeFile.CreateInMemory(4096);
                        SortedTreeTable <TKey, TValue> table1 = file1.OpenOrCreateTable <TKey, TValue>(m_settings.EncodingMethod);
                        using (SortedTreeTableEditor <TKey, TValue> edit = table1.BeginEdit())
                        {
                            edit.AddPoints(reader);
                            edit.Commit();
                        }

                        using (ArchiveListEditor <TKey, TValue> edit = m_list.AcquireEditLock())
                        {
                            //Add the newly created file.
                            edit.Add(table1);

                            foreach (SortedTreeTable <TKey, TValue> table2 in m_pendingTables1)
                            {
                                edit.TryRemoveAndDelete(table2.ArchiveId);
                            }
                        }

                        m_pendingTables2.Add(table1);
                        m_pendingTables1.Clear();
                    }
                }

                if (m_pendingTables2.Count == 10)
                {
                    using (UnionTreeStream <TKey, TValue> reader = new UnionTreeStream <TKey, TValue>(m_pendingTables2.Select(x => new ArchiveTreeStreamWrapper <TKey, TValue>(x)), true))
                    {
                        SortedTreeFile file1 = SortedTreeFile.CreateInMemory(4096);
                        SortedTreeTable <TKey, TValue> table1 = file1.OpenOrCreateTable <TKey, TValue>(m_settings.EncodingMethod);
                        using (SortedTreeTableEditor <TKey, TValue> edit = table1.BeginEdit())
                        {
                            edit.AddPoints(reader);
                            edit.Commit();
                        }

                        using (ArchiveListEditor <TKey, TValue> edit = m_list.AcquireEditLock())
                        {
                            //Add the newly created file.
                            edit.Add(table1);

                            foreach (SortedTreeTable <TKey, TValue> table2 in m_pendingTables2)
                            {
                                edit.TryRemoveAndDelete(table2.ArchiveId);
                            }
                        }

                        m_pendingTables3.Add(table1);
                        m_pendingTables2.Clear();
                    }
                }

                m_lastCommitedSequenceNumber.Value = args.TransactionId;

                long currentSizeMb = (m_pendingTables1.Sum(x => x.BaseFile.ArchiveSize) + m_pendingTables2.Sum(x => x.BaseFile.ArchiveSize)) >> 20;
                if (currentSizeMb > m_settings.MaximumAllowedMb)
                {
                    shouldWait = true;
                    m_rolloverTask.Start();
                    m_rolloverComplete.Reset();
                }
                else if (currentSizeMb > m_settings.RolloverSizeMb)
                {
                    m_rolloverTask.Start();
                }
                else
                {
                    m_rolloverTask.Start(m_settings.RolloverInterval);
                }
            }

            if (SequenceNumberCommitted != null)
            {
                SequenceNumberCommitted(args.TransactionId);
            }

            if (shouldWait)
            {
                Log.Publish(MessageLevel.NA, MessageFlags.PerformanceIssue, "Queue is full", "Rollover task is taking a long time. A long pause on the inputs is about to occur.");
                m_rolloverComplete.WaitOne();
            }
        }