public void BenchmarkTwoFiles() { MemoryPoolTest.TestMemoryLeak(); const int Max = 1000000; ArchiveList <HistorianKey, HistorianValue> list = new ArchiveList <HistorianKey, HistorianValue>(null); HistorianKey key = new HistorianKey(); HistorianValue value = new HistorianValue(); SortedTreeTable <HistorianKey, HistorianValue> table1 = CreateTable(); SortedTreeTable <HistorianKey, HistorianValue> table2 = CreateTable(); AddData(table1, 100, 100, Max / 2); AddData(table2, 101, 100, Max / 2); using (ArchiveListEditor <HistorianKey, HistorianValue> editor = list.AcquireEditLock()) { editor.Add(table1); editor.Add(table2); } SequentialReaderStream <HistorianKey, HistorianValue> sequencer = new SequentialReaderStream <HistorianKey, HistorianValue>(list); DebugStopwatch sw = new DebugStopwatch(); double sec = sw.TimeEvent(() => { SequentialReaderStream <HistorianKey, HistorianValue> scanner = sequencer; while (scanner.Read(key, value)) { } }); System.Console.WriteLine(Max / sec / 1000000); list.Dispose(); MemoryPoolTest.TestMemoryLeak(); }
public void ConsoleTest3() { MemoryPoolTest.TestMemoryLeak(); ArchiveList <HistorianKey, HistorianValue> list = new ArchiveList <HistorianKey, HistorianValue>(null); DateTime start = DateTime.Now.Date; for (int x = 0; x < 3; x++) { SortedTreeTable <HistorianKey, HistorianValue> table1 = CreateTable(); AddDataTerminal(table1, (ulong)x, start, new TimeSpan(TimeSpan.TicksPerSecond), (ulong)(1000 * x), 1, 60 * 60); using (ArchiveListEditor <HistorianKey, HistorianValue> editor = list.AcquireEditLock()) { editor.Add(table1); } } for (int x = 0; x < 3; x++) { SortedTreeTable <HistorianKey, HistorianValue> table1 = CreateTable(); AddDataTerminal(table1, (ulong)x, start, new TimeSpan(TimeSpan.TicksPerSecond), (ulong)(1000 * x), 1, 60 * 60); using (ArchiveListEditor <HistorianKey, HistorianValue> editor = list.AcquireEditLock()) { editor.Add(table1); } } SeekFilterBase <HistorianKey> filter = TimestampSeekFilter.CreateFromIntervalData <HistorianKey>(start, start.AddMinutes(10), new TimeSpan(TimeSpan.TicksPerSecond * 60), new TimeSpan(TimeSpan.TicksPerSecond)); SequentialReaderStream <HistorianKey, HistorianValue> sequencer = new SequentialReaderStream <HistorianKey, HistorianValue>(list, null, filter); SortedList <DateTime, FrameData> frames = sequencer.GetFrames(); WriteToConsole(frames); list.Dispose(); MemoryPoolTest.TestMemoryLeak(); }
public void TestOneFile() { HistorianKey key1 = new HistorianKey(); HistorianKey key2 = new HistorianKey(); HistorianValue value1 = new HistorianValue(); HistorianValue value2 = new HistorianValue(); Logger.Console.Verbose = VerboseLevel.All; MemoryPoolTest.TestMemoryLeak(); ArchiveList <HistorianKey, HistorianValue> list = new ArchiveList <HistorianKey, HistorianValue>(null); SortedTreeTable <HistorianKey, HistorianValue> master = CreateTable(); SortedTreeTable <HistorianKey, HistorianValue> table1 = CreateTable(); AddData(master, 100, 100, 100); AddData(table1, 100, 100, 100); using (ArchiveListEditor <HistorianKey, HistorianValue> editor = list.AcquireEditLock()) { editor.Add(table1); } using (SortedTreeTableReadSnapshot <HistorianKey, HistorianValue> masterRead = master.BeginRead()) { SortedTreeScannerBase <HistorianKey, HistorianValue> masterScan = masterRead.GetTreeScanner(); masterScan.SeekToStart(); TreeStreamSequential <HistorianKey, HistorianValue> masterScanSequential = masterScan.TestSequential(); using (SequentialReaderStream <HistorianKey, HistorianValue> sequencer = new SequentialReaderStream <HistorianKey, HistorianValue>(list)) { TreeStreamSequential <HistorianKey, HistorianValue> scanner = sequencer.TestSequential(); int count = 0; while (scanner.Read(key1, value1)) { count++; if (!masterScanSequential.Read(key2, value2)) { throw new Exception(); } if (!key1.IsEqualTo(key2)) { throw new Exception(); } if (!value1.IsEqualTo(value2)) { throw new Exception(); } } if (masterScan.Read(key2, value2)) { throw new Exception(); } } } list.Dispose(); master.Dispose(); MemoryPoolTest.TestMemoryLeak(); }
public void BenchmarkRealisticSamples() { MemoryPoolTest.TestMemoryLeak(); const int Max = 1000000; const int FileCount = 1000; ArchiveList <HistorianKey, HistorianValue> list = new ArchiveList <HistorianKey, HistorianValue>(null); DateTime start = DateTime.Now.Date; HistorianKey key = new HistorianKey(); HistorianValue value = new HistorianValue(); for (int x = 0; x < FileCount; x++) { SortedTreeTable <HistorianKey, HistorianValue> table1 = CreateTable(); AddData(table1, start.AddMinutes(2 * x), new TimeSpan(TimeSpan.TicksPerSecond), 60, 100, 1, Max / 60 / FileCount); using (ArchiveListEditor <HistorianKey, HistorianValue> editor = list.AcquireEditLock()) { editor.Add(table1); } } SeekFilterBase <HistorianKey> filter = TimestampSeekFilter.CreateFromIntervalData <HistorianKey>(start, start.AddMinutes(2 * FileCount), new TimeSpan(TimeSpan.TicksPerSecond * 2), new TimeSpan(TimeSpan.TicksPerMillisecond)); SequentialReaderStream <HistorianKey, HistorianValue> sequencer = new SequentialReaderStream <HistorianKey, HistorianValue>(list, null, filter); DebugStopwatch sw = new DebugStopwatch(); int xi = 0; double sec = sw.TimeEvent(() => { SequentialReaderStream <HistorianKey, HistorianValue> scanner = sequencer; while (scanner.Read(key, value)) { xi++; } }); System.Console.WriteLine(Max / sec / 1000000); //TreeKeyMethodsBase<HistorianKey>.WriteToConsole(); //TreeValueMethodsBase<HistorianValue>.WriteToConsole(); //Console.WriteLine("KeyMethodsBase calls"); //for (int x = 0; x < 23; x++) //{ // Console.WriteLine(TreeKeyMethodsBase<HistorianKey>.CallMethods[x] + "\t" + ((TreeKeyMethodsBase<HistorianKey>.Method)(x)).ToString()); //} //Console.WriteLine("ValueMethodsBase calls"); //for (int x = 0; x < 5; x++) //{ // Console.WriteLine(TreeValueMethodsBase<HistorianValue>.CallMethods[x] + "\t" + ((TreeValueMethodsBase<HistorianValue>.Method)(x)).ToString()); //} list.Dispose(); MemoryPoolTest.TestMemoryLeak(); }
/// <summary> /// Recovers this rollover during an application crash. /// </summary> /// <param name="list"></param> public void Recover(ArchiveList list) { using (ArchiveListEditor edit = list.AcquireEditLock()) { //If the destination file exists, the rollover is complete. Therefore remove any source file. if (edit.Contains(DestinationFile)) { foreach (Guid source in SourceFiles) { if (edit.Contains(source)) { edit.TryRemoveAndDelete(source); } } } //Otherwise, delete the destination file (which is allow the ~d2 cleanup to occur). } Delete(); }
private void OnExecute(object sender, EventArgs <ScheduledTaskRunningReason> e) { //The worker can be disposed either via the Stop() method or //the Dispose() method. If via the dispose method, then //don't do any cleanup. if (m_disposed && e.Argument == ScheduledTaskRunningReason.Disposing) { return; } //go ahead and schedule the next rollover since nothing //will happen until this function exits anyway. //if the task is disposing, the following line does nothing. m_rolloverTask.Start(m_settings.ExecuteTimer); lock (m_syncRoot) { if (m_disposed) { return; } using (ArchiveListSnapshot <TKey, TValue> resource = m_archiveList.CreateNewClientResources()) { resource.UpdateSnapshot(); List <ArchiveTableSummary <TKey, TValue> > list = new List <ArchiveTableSummary <TKey, TValue> >(); List <Guid> listIds = new List <Guid>(); for (int x = 0; x < resource.Tables.Length; x++) { ArchiveTableSummary <TKey, TValue> table = resource.Tables[x]; if (table.SortedTreeTable.BaseFile.Snapshot.Header.Flags.Contains(m_settings.MatchFlag) && table.SortedTreeTable.BaseFile.Snapshot.Header.Flags.Contains(FileFlags.IntermediateFile)) { list.Add(table); listIds.Add(table.FileId); } else { resource.Tables[x] = null; } } bool shouldRollover = list.Count >= m_settings.CombineOnFileCount; long size = 0; for (int x = 0; x < list.Count; x++) { size += list[x].SortedTreeTable.BaseFile.ArchiveSize; if (size > m_settings.CombineOnFileSize) { if (x != list.Count - 1)//If not the last entry { list.RemoveRange(x + 1, list.Count - x - 1); } break; } } if (size > m_settings.CombineOnFileSize) { shouldRollover = true; } if (shouldRollover) { TKey startKey = new TKey(); TKey endKey = new TKey(); startKey.SetMax(); endKey.SetMin(); foreach (Guid fileId in listIds) { ArchiveTableSummary <TKey, TValue> table = resource.TryGetFile(fileId); if (table is null) { throw new Exception("File not found"); } if (!table.IsEmpty) { if (startKey.IsGreaterThan(table.FirstKey)) { table.FirstKey.CopyTo(startKey); } if (endKey.IsLessThan(table.LastKey)) { table.LastKey.CopyTo(endKey); } } } RolloverLogFile logFile = null; Action <Guid> createLog = (x) => { logFile = m_rolloverLog.Create(listIds, x); }; using (UnionReader <TKey, TValue> reader = new UnionReader <TKey, TValue>(list)) { SortedTreeTable <TKey, TValue> dest = m_createNextStageFile.CreateArchiveFile(startKey, endKey, size, reader, createLog); resource.Dispose(); using (ArchiveListEditor <TKey, TValue> edit = m_archiveList.AcquireEditLock()) { //Add the newly created file. edit.Add(dest); foreach (ArchiveTableSummary <TKey, TValue> table in list) { edit.TryRemoveAndDelete(table.FileId); } } } if (logFile != null) { logFile.Delete(); } } resource.Dispose(); } m_rolloverComplete.Set(); } }
private void RolloverTask_Running(object sender, EventArgs <ScheduledTaskRunningReason> e) { //The worker can be disposed either via the Stop() method or //the Dispose() method. If via the dispose method, then //don't do any cleanup. if (m_disposed && e.Argument == ScheduledTaskRunningReason.Disposing) { Log.Publish(MessageLevel.Info, "Rollover thread is Disposing"); m_rolloverComplete.Dispose(); return; } List <SortedTreeTable <TKey, TValue> > pendingTables1; List <SortedTreeTable <TKey, TValue> > pendingTables2; List <SortedTreeTable <TKey, TValue> > pendingTables3; long sequenceNumber; lock (m_syncRoot) { pendingTables1 = m_pendingTables1; pendingTables2 = m_pendingTables2; pendingTables3 = m_pendingTables3; sequenceNumber = m_lastCommitedSequenceNumber; m_pendingTables1 = new List <SortedTreeTable <TKey, TValue> >(); m_pendingTables2 = new List <SortedTreeTable <TKey, TValue> >(); m_pendingTables3 = new List <SortedTreeTable <TKey, TValue> >(); m_rolloverComplete.Set(); } TKey startKey = new TKey(); TKey endKey = new TKey(); startKey.SetMax(); endKey.SetMin(); Log.Publish(MessageLevel.Info, "Pending Tables Report", "Pending Tables V1: " + pendingTables1.Count + " V2: " + pendingTables2.Count + " V3: " + pendingTables3.Count); List <ArchiveTableSummary <TKey, TValue> > summaryTables = new List <ArchiveTableSummary <TKey, TValue> >(); foreach (SortedTreeTable <TKey, TValue> table in pendingTables1) { ArchiveTableSummary <TKey, TValue> summary = new ArchiveTableSummary <TKey, TValue>(table); if (!summary.IsEmpty) { summaryTables.Add(summary); if (startKey.IsGreaterThan(summary.FirstKey)) { summary.FirstKey.CopyTo(startKey); } if (endKey.IsLessThan(summary.LastKey)) { summary.LastKey.CopyTo(endKey); } } } foreach (SortedTreeTable <TKey, TValue> table in pendingTables2) { ArchiveTableSummary <TKey, TValue> summary = new ArchiveTableSummary <TKey, TValue>(table); if (!summary.IsEmpty) { summaryTables.Add(summary); if (startKey.IsGreaterThan(summary.FirstKey)) { summary.FirstKey.CopyTo(startKey); } if (endKey.IsLessThan(summary.LastKey)) { summary.LastKey.CopyTo(endKey); } } } foreach (SortedTreeTable <TKey, TValue> table in pendingTables3) { ArchiveTableSummary <TKey, TValue> summary = new ArchiveTableSummary <TKey, TValue>(table); if (!summary.IsEmpty) { summaryTables.Add(summary); if (startKey.IsGreaterThan(summary.FirstKey)) { summary.FirstKey.CopyTo(startKey); } if (endKey.IsLessThan(summary.LastKey)) { summary.LastKey.CopyTo(endKey); } } } long size = summaryTables.Sum(x => x.SortedTreeTable.BaseFile.ArchiveSize); if (summaryTables.Count > 0) { using (UnionTreeStream <TKey, TValue> reader = new UnionTreeStream <TKey, TValue>(summaryTables.Select(x => new ArchiveTreeStreamWrapper <TKey, TValue>(x)), true)) { SortedTreeTable <TKey, TValue> newTable = m_createNextStageFile.CreateArchiveFile(startKey, endKey, size, reader, null); using (ArchiveListEditor <TKey, TValue> edit = m_list.AcquireEditLock()) { //Add the newly created file. edit.Add(newTable); foreach (SortedTreeTable <TKey, TValue> table in pendingTables1) { edit.TryRemoveAndDelete(table.ArchiveId); } foreach (SortedTreeTable <TKey, TValue> table in pendingTables2) { edit.TryRemoveAndDelete(table.ArchiveId); } foreach (SortedTreeTable <TKey, TValue> table in pendingTables3) { edit.TryRemoveAndDelete(table.ArchiveId); } } } } m_lastRolledOverSequenceNumber.Value = sequenceNumber; if (RolloverComplete != null) { RolloverComplete(sequenceNumber); } }
/// <summary> /// Appends this data to this stage. Also queues up for deletion if necessary. /// </summary> /// <param name="args">arguments handed to this class from either the /// PrestageWriter or another StageWriter of a previous generation</param> /// <remarks> /// This method must be called in a single threaded manner. /// </remarks> public void AppendData(PrebufferRolloverArgs <TKey, TValue> args) { if (m_stopped) { Log.Publish(MessageLevel.Info, "No new points can be added. Point queue has been stopped. Data in rollover will be lost"); return; } if (m_disposed) { Log.Publish(MessageLevel.Info, "First stage writer has been disposed. Data in rollover will be lost"); return; } SortedTreeFile file = SortedTreeFile.CreateInMemory(4096); SortedTreeTable <TKey, TValue> table = file.OpenOrCreateTable <TKey, TValue>(m_settings.EncodingMethod); using (SortedTreeTableEditor <TKey, TValue> edit = table.BeginEdit()) { edit.AddPoints(args.Stream); edit.Commit(); } bool shouldWait = false; //If there is data to write then write it to the current archive. lock (m_syncRoot) { if (m_stopped) { Log.Publish(MessageLevel.Info, "No new points can be added. Point queue has been stopped. Data in rollover will be lost"); table.Dispose(); return; } if (m_disposed) { Log.Publish(MessageLevel.Info, "First stage writer has been disposed. Data in rollover will be lost"); table.Dispose(); return; } using (ArchiveListEditor <TKey, TValue> edit = m_list.AcquireEditLock()) { edit.Add(table); } m_pendingTables1.Add(table); if (m_pendingTables1.Count == 10) { using (UnionTreeStream <TKey, TValue> reader = new UnionTreeStream <TKey, TValue>(m_pendingTables1.Select(x => new ArchiveTreeStreamWrapper <TKey, TValue>(x)), true)) { SortedTreeFile file1 = SortedTreeFile.CreateInMemory(4096); SortedTreeTable <TKey, TValue> table1 = file1.OpenOrCreateTable <TKey, TValue>(m_settings.EncodingMethod); using (SortedTreeTableEditor <TKey, TValue> edit = table1.BeginEdit()) { edit.AddPoints(reader); edit.Commit(); } using (ArchiveListEditor <TKey, TValue> edit = m_list.AcquireEditLock()) { //Add the newly created file. edit.Add(table1); foreach (SortedTreeTable <TKey, TValue> table2 in m_pendingTables1) { edit.TryRemoveAndDelete(table2.ArchiveId); } } m_pendingTables2.Add(table1); m_pendingTables1.Clear(); } } if (m_pendingTables2.Count == 10) { using (UnionTreeStream <TKey, TValue> reader = new UnionTreeStream <TKey, TValue>(m_pendingTables2.Select(x => new ArchiveTreeStreamWrapper <TKey, TValue>(x)), true)) { SortedTreeFile file1 = SortedTreeFile.CreateInMemory(4096); SortedTreeTable <TKey, TValue> table1 = file1.OpenOrCreateTable <TKey, TValue>(m_settings.EncodingMethod); using (SortedTreeTableEditor <TKey, TValue> edit = table1.BeginEdit()) { edit.AddPoints(reader); edit.Commit(); } using (ArchiveListEditor <TKey, TValue> edit = m_list.AcquireEditLock()) { //Add the newly created file. edit.Add(table1); foreach (SortedTreeTable <TKey, TValue> table2 in m_pendingTables2) { edit.TryRemoveAndDelete(table2.ArchiveId); } } m_pendingTables3.Add(table1); m_pendingTables2.Clear(); } } m_lastCommitedSequenceNumber.Value = args.TransactionId; long currentSizeMb = (m_pendingTables1.Sum(x => x.BaseFile.ArchiveSize) + m_pendingTables2.Sum(x => x.BaseFile.ArchiveSize)) >> 20; if (currentSizeMb > m_settings.MaximumAllowedMb) { shouldWait = true; m_rolloverTask.Start(); m_rolloverComplete.Reset(); } else if (currentSizeMb > m_settings.RolloverSizeMb) { m_rolloverTask.Start(); } else { m_rolloverTask.Start(m_settings.RolloverInterval); } } if (SequenceNumberCommitted != null) { SequenceNumberCommitted(args.TransactionId); } if (shouldWait) { Log.Publish(MessageLevel.NA, MessageFlags.PerformanceIssue, "Queue is full", "Rollover task is taking a long time. A long pause on the inputs is about to occur."); m_rolloverComplete.WaitOne(); } }