static void Main(string[] args) { // prueba 1 int[] temp1 = { 1, 3, 4, 7, 9, 13, 15, 17 }; int[] temp2 = { 3, 6, 9, 12, 45, 78 }; int[] temp3 = { 4, 6, 8, 13, 46, 57 }; //foreach (var el in Merge.MergeEnumerators<int>(new ComparaEnteros(), ((IEnumerable<int>)temp1).GetEnumerator(), ((IEnumerable<int>)temp2).GetEnumerator(), // ((IEnumerable<int>)temp3).GetEnumerator())) // Console.WriteLine(el); IEnumerator <int> my = new MergeEnumerator <int>(new ComparaEnteros(), ((IEnumerable <int>)temp1).GetEnumerator(), ((IEnumerable <int>)temp2).GetEnumerator(), ((IEnumerable <int>)temp3).GetEnumerator()); while (my.MoveNext()) { Console.WriteLine(my.Current); } my.Reset(); Console.WriteLine(); while (my.MoveNext()) { Console.WriteLine(my.Current); } }
private FileMetadata WriteMergedTable(Version version, MergeEnumerator mergeEnumerator, ref int count) { var newFileMeta = new FileMetadata { FileNumber = version.GetNewFileNumber() }; var newFileInfo = new FileInfo(GetTableFileName(newFileMeta.FileNumber)); using FileStream newFileStream = newFileInfo.Create(); var creator = new TableCreator(newFileStream); byte[] smallestKey = null; byte[] largestKey = null; ReadOnlyMemory <byte> prevKey = null; while (mergeEnumerator.MoveNext()) { BlockEntry entry = mergeEnumerator.Current; count++; ReadOnlyMemory <byte> key = entry.Key; if (prevKey.Length != 0) { if (prevKey.Span.UserKey().SequenceEqual(key.Span.UserKey())) { Log.Warn($"Duplicate keys - Prev Key: {prevKey.ToHexString()}, Key: {key.ToHexString()}"); continue; } } prevKey = key; creator.Add(key.Span, entry.Data.Span); smallestKey ??= key.ToArray(); largestKey = key.ToArray(); if (creator.CurrentSize > Options.MaxTableFileSize) { break; } } creator.Finish(); newFileMeta.SmallestKey = smallestKey; newFileMeta.LargestKey = largestKey; newFileInfo.Refresh(); newFileMeta.FileSize = (ulong)newFileInfo.Length; return(newFileMeta); }
public void CompactNumeric() { var keys = new List <byte[]>(); DirectoryInfo dir = TestUtils.GetTestDirectory(false); // Setup new database and generate values enough to create 2 level 0 tables with overlapping keys. // We use this when we run the real test. ulong idx = 0; var options = new Options() { LevelSizeBaseFactor = 10, RetainAllFiles = true }; List <FileMetadata> level0Files; Version version = null; using (var db = new Database(dir, true, options)) { db.Open(); for (int j = 0; j < 4; j++) { for (int i = 0; i < 8000; i++) { byte[] key = BitConverter.GetBytes(idx++); byte[] data = TestUtils.FillArrayWithRandomBytes(1000, 128); db.Put(key, data); keys.Add(key); } } level0Files = new List <FileMetadata>(db.Level0Tables); version = db.Version; db.Close(); } ((Hierarchy)LogManager.GetRepository(Assembly.GetEntryAssembly())).Root.Level = Level.Warn; { Log.Warn($"Reading {keys.Count} values using regular db.get()"); using (var db = new Database(dir, false, options)) { db.Open(); ulong count = 0; ulong countMissing = 0; foreach (byte[] key in keys) { byte[] value = db.Get(key); if (value == null) { Log.Error($"Missing key {key.ToHexString()} at idx:{count}, {countMissing++}"); } count++; } db.Close(); } } return; //{ // Log.Warn($"Reading {keys.Count} values, from log files"); // List<byte[]> keysToRemove = new List<byte[]>(keys); // FileInfo[] logFiles = dir.GetFiles("*.log"); // foreach (FileInfo fileInfo in logFiles) // { // Log.Warn($"Reading from {fileInfo.Name}. Have {keysToRemove.Count} keys left"); // using var reader = new LogReader(fileInfo.Open(FileMode.Open)); // var cache = new MemCache(); // cache.Load(reader); // foreach (byte[] key in keysToRemove.Take(5000).ToArray()) // { // if (cache.Get(key).State == ResultState.Exist) // { // keysToRemove.Remove(key); // } // } // } // Assert.AreEqual(0, keysToRemove.Count); //} int keysInLevel0 = 0; var keysInCurrentLog = new List <byte[]>(); { Log.Warn($"Reading {keys.Count} values, from level0 files"); List <byte[]> keysToRemove = new List <byte[]>(keys); var enumerators = new List <TableEnumerator>(); foreach (FileMetadata fileMeta in level0Files.OrderBy(f => f.FileNumber)) { string filePath = Path.Combine(dir.FullName, $"{fileMeta.FileNumber:000000}.ldb"); var fileInfo = new FileInfo(filePath); Log.Warn($"Reading from {fileInfo.Name}. Have {keysToRemove.Count} keys left"); var table = new Table(fileInfo); foreach (byte[] key in keysToRemove.ToArray()) { if (table.Get(key).State == ResultState.Exist) { keysInLevel0++; keysToRemove.Remove(key); } } enumerators.Add((TableEnumerator)table.GetEnumerator()); } Assert.Less(0, keysInLevel0); // Read the remaining from current log file { string filePath = Path.Combine(dir.FullName, $"{version.LogNumber:000000}.log"); var fileInfo = new FileInfo(filePath); Log.Warn($"Reading remaining {keysToRemove.Count} values from current log {fileInfo.Name}"); using var reader = new LogReader(fileInfo.Open(FileMode.Open)); var cache = new MemCache(); cache.Load(reader); foreach (byte[] key in keysToRemove.ToArray()) { if (cache.Get(key).State == ResultState.Exist) { keysInCurrentLog.Add(key); keysToRemove.Remove(key); } } Assert.AreEqual(0, keysToRemove.Count); } { Log.Warn($"Reading {keysInLevel0} values, based on merge enumerator of all level0 table files"); var enumerator = new MergeEnumerator(enumerators); int enumCount = 0; while (enumerator.MoveNext()) { enumCount++; } Assert.AreEqual(keysInLevel0, enumCount); // Close the tables foreach (TableEnumerator tableEnumerator in enumerators) { tableEnumerator.TEST_Close(); } } } { var keysLeftToRemove = new List <byte[]>(keys).Except(keysInCurrentLog).ToList(); Log.Warn($"Reading {keysLeftToRemove.Count} values, from all level+1 files + current level0"); var level1Enumerators = new List <TableEnumerator>(); FileInfo[] tableFiles = dir.GetFiles("*.ldb"); foreach (var fileInfo in tableFiles.OrderBy(f => f.Name)) { if (level0Files.Any(f => $"{f.FileNumber:000000}.ldb" == fileInfo.Name)) { if (version.GetFiles(0).All(f => $"{f.FileNumber:000000}.ldb" != fileInfo.Name)) { continue; } Log.Warn($"Reading current level0 file {fileInfo.Name}"); } Log.Warn($"Reading from {fileInfo.Name}. Have {keysLeftToRemove.Count} keys left"); var table = new Table(fileInfo); table.Initialize(); level1Enumerators.Add((TableEnumerator)table.GetEnumerator()); foreach (byte[] key in keysLeftToRemove.ToArray()) { if (table.Get(key).State == ResultState.Exist) { keysLeftToRemove.Remove(key); } } } //Assert.AreEqual(0, keysLeftToRemove.Count); // FAIL { keysLeftToRemove = new List <byte[]>(keys).Except(keysInCurrentLog).ToList(); Log.Warn($"Reading {keysLeftToRemove.Count} values, from all level+1 files + current level0 using merge enumerator"); var enumerator = new MergeEnumerator(level1Enumerators); int enumCount = 0; while (enumerator.MoveNext()) { enumCount++; if (enumerator.Current != null) { byte[] key = enumerator.Current.Key.Span.UserKey().ToArray(); keysLeftToRemove.RemoveAll(bytes => new BytewiseComparator().Compare(bytes, key) == 0); } else { Log.Warn($"Current in enumerator is null"); } } Assert.AreEqual(keys.Count - keysInCurrentLog.Count, enumCount, "Expected to have count of all keys"); Assert.AreEqual(0, keysLeftToRemove.Count, "Expected to have found all keys"); foreach (TableEnumerator tableEnumerator in level1Enumerators) { tableEnumerator.TEST_Close(); } } } Log.Warn($"Done!"); }