public void TestWritingNotInOrder() { const string subFolderName = "TestWritingNotInOrder"; const int desiredFileSize = 100; Mock.MultiLevelDataRouter router = new Mock.MultiLevelDataRouter(2, 2); IRepositoryFolder targetFolder = Repository.RootFolder.GetDescendant(FixtureRootRepoFolder.LogicalPath, false).CreateSubfolder(subFolderName); targetFolder.Properties.DesiredItemsPerFile = desiredFileSize; const int itemCount = 100000; const int intervalMinutes = 1; IDataItem[] data = GetTestData(itemCount, DateTime.Now, intervalMinutes); for (int j = 0; j < 2; ++j) { using (IRepositoryWriter writer = GetWriter(targetFolder, router)) { for (int n = j; n < itemCount; n = n + 2) { writer.Write(dataItem: data[n]); } } } // the following commented out call is necessary if writing was done instandalone instance because target folder does not know that subfolders have been created // during writing; but now I am not using standalone writer //targetFolder.Refresh(true, true); CheckAllDataInFolder(targetFolder, data); }
public void TestUnsavedItemsWithInterruption() { const string subFolderName = "TestUnsavedItemsWithInterruption"; const int desiredFileSize = 100; Mock.MultiLevelDataRouter router = new Mock.MultiLevelDataRouter(3, 2); Repository.RootFolder.GetDescendant(FixtureRootRepoFolder.LogicalPath, false).CreateSubfolder(subFolderName); IRepositoryWriter writer = GetStandaloneWriter(subFolderName, desiredFileSize, router); const int itemCount = 100000; const int intervalMinutes = 1; int intervalSameFolderMinutes = intervalMinutes * router.SubtreeFolderCount; IDataItem[] data = GetTestData(itemCount, DateTime.Now, intervalMinutes); const int checkIntervalItemCountBase = 57; Assume.That(checkIntervalItemCountBase > router.SubtreeFolderCount); var random = new Random(); int nextCheckCount = checkIntervalItemCountBase; int stopAndRestartCounter = itemCount / 2 + random.Next(-itemCount / 5, itemCount / 5); for (int n = 0; n < itemCount; ++n) { writer.Write(dataItem: data[n]); if (n == nextCheckCount) { nextCheckCount = nextCheckCount + checkIntervalItemCountBase + random.Next(20) - 10; IDictionary <string, IList <IDataItem> > unsavedItemsDict = writer.GetUnsavedItems(); Assert.AreEqual(router.SubtreeFolderCount, unsavedItemsDict.Count); Dictionary <string, DateTime> lastSavedTimestamps = new Dictionary <string, DateTime>(); foreach (KeyValuePair <string, IList <IDataItem> > pair in unsavedItemsDict) { string relativePath = pair.Key.Substring(writer.Folder.LogicalPath.Length); IRepositoryFolder sourceFolder = writer.Folder.GetDescendant(relativePath, false); Assert.IsNotNull(sourceFolder); DateTime lastFlushedTimestamp = sourceFolder.LastTimestamp; if (pair.Value.Count > 0) { if (lastFlushedTimestamp > DateTime.MinValue) { Assert.AreEqual(lastFlushedTimestamp.AddMinutes(intervalSameFolderMinutes), pair.Value[0].DateTime); } else { Assert.Less(n, desiredFileSize * (router.SubtreeFolderCount + 1), "Data must have been flushed by now due to desired file size"); } lastSavedTimestamps[relativePath] = pair.Value[pair.Value.Count - 1].DateTime; } else { Assert.AreNotEqual(DateTime.MinValue, lastFlushedTimestamp); lastSavedTimestamps[relativePath] = lastFlushedTimestamp; } } DateTime lastSavedTimestampTotal = lastSavedTimestamps.Values.Max(); List <DateTime> lastSavedPerFolder = lastSavedTimestamps.Values.ToList(); lastSavedPerFolder.Sort(); for (int j = 0; j < router.SubtreeFolderCount - 2; ++j) { Assert.AreEqual(lastSavedPerFolder[j].AddMinutes(intervalMinutes), lastSavedPerFolder[j + 1]); } Assert.AreEqual(lastSavedPerFolder[router.SubtreeFolderCount - 1], data[n].DateTime); } // if (n == nextCheckCount) if (n == stopAndRestartCounter) { var unsavedItemsDict = writer.GetUnsavedItems(); writer = GetStandaloneWriter(subFolderName, desiredFileSize, router); var unsavedList = MergeUnsavedItems(unsavedItemsDict); foreach (var dataItem in unsavedList) { writer.Write(dataItem); } } } IRepositoryFolder targetFolder = writer.Folder; writer.Close(); CheckAllDataInFolder(targetFolder, data); }