public void LevelDbSearchLogTest() { // https://github.com/google/leveldb/blob/master/doc/log_format.md LogReader logReader = new LogReader(new FileInfo(@"TestWorld\000047.log")); logReader.Open(); MemCache memCache = new MemCache(); memCache.Load(logReader); var result = memCache.Get(new byte[] { 0xeb, 0xff, 0xff, 0xff, 0xf3, 0xff, 0xff, 0xff, 0x31 }); Assert.IsTrue(ReadOnlySpan <byte> .Empty != result.Data); Assert.AreEqual(new byte[] { 0xA, 0x00, 0x00, 0x02, 0x05 }, result.Data.Slice(0, 5).ToArray()); }
public void WriteLevel0TableTest() { using var logReader = new LogReader(new FileInfo(Path.Combine(TestUtils.GetTestDirectory().FullName, "000047.log"))); var memCache = new MemCache(); memCache.Load(logReader); var newFileInfo = new FileInfo(Path.Combine(Path.GetTempPath(), Guid.NewGuid() + ".ldb")); var db = new Database(null); FileMetadata tableEntry = db.WriteLevel0Table(memCache, newFileInfo); Assert.IsNotNull(tableEntry.Table); Table table = tableEntry.Table; //Key:fe ff ff ff f1 ff ff ff 76 ResultStatus result = table.Get(new byte[] { 0xfe, 0xff, 0xff, 0xff, 0xf1, 0xff, 0xff, 0xff, 0x76 }); Assert.AreEqual(ResultState.Exist, result.State); //Key:fa 40 ab 14 4d 96 ec 7b 62 38 f7 63 result = table.Get(new byte[] { 0xfa, 0x40, 0xab, 0x14, 0x4d, 0x96, 0xec, 0x7b, 0x62, 0x38, 0xf7, 0x63 }); Assert.AreEqual(ResultState.NotFound, result.State); //Key:fd ff ff ff f1 ff ff ff 39 28036, False, size:0 result = table.Get(new byte[] { 0xfd, 0xff, 0xff, 0xff, 0xf1, 0xff, 0xff, 0xff, 0x39 }); Assert.AreEqual(ResultState.Deleted, result.State); foreach (KeyValuePair <byte[], MemCache.ResultCacheEntry> entry in memCache._resultCache.OrderBy(kvp => kvp.Key, new BytewiseComparator())) { if (entry.Value.ResultState != ResultState.Exist) { continue; } byte[] key = entry.Key; byte[] data = entry.Value.Data; result = table.Get(key); Assert.AreEqual(ResultState.Exist, result.State); Assert.AreEqual(data, result.Data.ToArray()); } }
public void CompactNumeric() { var keys = new List <byte[]>(); DirectoryInfo dir = TestUtils.GetTestDirectory(false); // Setup new database and generate values enough to create 2 level 0 tables with overlapping keys. // We use this when we run the real test. ulong idx = 0; var options = new Options() { LevelSizeBaseFactor = 10, RetainAllFiles = true }; List <FileMetadata> level0Files; Version version = null; using (var db = new Database(dir, true, options)) { db.Open(); for (int j = 0; j < 4; j++) { for (int i = 0; i < 8000; i++) { byte[] key = BitConverter.GetBytes(idx++); byte[] data = TestUtils.FillArrayWithRandomBytes(1000, 128); db.Put(key, data); keys.Add(key); } } level0Files = new List <FileMetadata>(db.Level0Tables); version = db.Version; db.Close(); } ((Hierarchy)LogManager.GetRepository(Assembly.GetEntryAssembly())).Root.Level = Level.Warn; { Log.Warn($"Reading {keys.Count} values using regular db.get()"); using (var db = new Database(dir, false, options)) { db.Open(); ulong count = 0; ulong countMissing = 0; foreach (byte[] key in keys) { byte[] value = db.Get(key); if (value == null) { Log.Error($"Missing key {key.ToHexString()} at idx:{count}, {countMissing++}"); } count++; } db.Close(); } } return; //{ // Log.Warn($"Reading {keys.Count} values, from log files"); // List<byte[]> keysToRemove = new List<byte[]>(keys); // FileInfo[] logFiles = dir.GetFiles("*.log"); // foreach (FileInfo fileInfo in logFiles) // { // Log.Warn($"Reading from {fileInfo.Name}. Have {keysToRemove.Count} keys left"); // using var reader = new LogReader(fileInfo.Open(FileMode.Open)); // var cache = new MemCache(); // cache.Load(reader); // foreach (byte[] key in keysToRemove.Take(5000).ToArray()) // { // if (cache.Get(key).State == ResultState.Exist) // { // keysToRemove.Remove(key); // } // } // } // Assert.AreEqual(0, keysToRemove.Count); //} int keysInLevel0 = 0; var keysInCurrentLog = new List <byte[]>(); { Log.Warn($"Reading {keys.Count} values, from level0 files"); List <byte[]> keysToRemove = new List <byte[]>(keys); var enumerators = new List <TableEnumerator>(); foreach (FileMetadata fileMeta in level0Files.OrderBy(f => f.FileNumber)) { string filePath = Path.Combine(dir.FullName, $"{fileMeta.FileNumber:000000}.ldb"); var fileInfo = new FileInfo(filePath); Log.Warn($"Reading from {fileInfo.Name}. Have {keysToRemove.Count} keys left"); var table = new Table(fileInfo); foreach (byte[] key in keysToRemove.ToArray()) { if (table.Get(key).State == ResultState.Exist) { keysInLevel0++; keysToRemove.Remove(key); } } enumerators.Add((TableEnumerator)table.GetEnumerator()); } Assert.Less(0, keysInLevel0); // Read the remaining from current log file { string filePath = Path.Combine(dir.FullName, $"{version.LogNumber:000000}.log"); var fileInfo = new FileInfo(filePath); Log.Warn($"Reading remaining {keysToRemove.Count} values from current log {fileInfo.Name}"); using var reader = new LogReader(fileInfo.Open(FileMode.Open)); var cache = new MemCache(); cache.Load(reader); foreach (byte[] key in keysToRemove.ToArray()) { if (cache.Get(key).State == ResultState.Exist) { keysInCurrentLog.Add(key); keysToRemove.Remove(key); } } Assert.AreEqual(0, keysToRemove.Count); } { Log.Warn($"Reading {keysInLevel0} values, based on merge enumerator of all level0 table files"); var enumerator = new MergeEnumerator(enumerators); int enumCount = 0; while (enumerator.MoveNext()) { enumCount++; } Assert.AreEqual(keysInLevel0, enumCount); // Close the tables foreach (TableEnumerator tableEnumerator in enumerators) { tableEnumerator.TEST_Close(); } } } { var keysLeftToRemove = new List <byte[]>(keys).Except(keysInCurrentLog).ToList(); Log.Warn($"Reading {keysLeftToRemove.Count} values, from all level+1 files + current level0"); var level1Enumerators = new List <TableEnumerator>(); FileInfo[] tableFiles = dir.GetFiles("*.ldb"); foreach (var fileInfo in tableFiles.OrderBy(f => f.Name)) { if (level0Files.Any(f => $"{f.FileNumber:000000}.ldb" == fileInfo.Name)) { if (version.GetFiles(0).All(f => $"{f.FileNumber:000000}.ldb" != fileInfo.Name)) { continue; } Log.Warn($"Reading current level0 file {fileInfo.Name}"); } Log.Warn($"Reading from {fileInfo.Name}. Have {keysLeftToRemove.Count} keys left"); var table = new Table(fileInfo); table.Initialize(); level1Enumerators.Add((TableEnumerator)table.GetEnumerator()); foreach (byte[] key in keysLeftToRemove.ToArray()) { if (table.Get(key).State == ResultState.Exist) { keysLeftToRemove.Remove(key); } } } //Assert.AreEqual(0, keysLeftToRemove.Count); // FAIL { keysLeftToRemove = new List <byte[]>(keys).Except(keysInCurrentLog).ToList(); Log.Warn($"Reading {keysLeftToRemove.Count} values, from all level+1 files + current level0 using merge enumerator"); var enumerator = new MergeEnumerator(level1Enumerators); int enumCount = 0; while (enumerator.MoveNext()) { enumCount++; if (enumerator.Current != null) { byte[] key = enumerator.Current.Key.Span.UserKey().ToArray(); keysLeftToRemove.RemoveAll(bytes => new BytewiseComparator().Compare(bytes, key) == 0); } else { Log.Warn($"Current in enumerator is null"); } } Assert.AreEqual(keys.Count - keysInCurrentLog.Count, enumCount, "Expected to have count of all keys"); Assert.AreEqual(0, keysLeftToRemove.Count, "Expected to have found all keys"); foreach (TableEnumerator tableEnumerator in level1Enumerators) { tableEnumerator.TEST_Close(); } } } Log.Warn($"Done!"); }
public void LevelDbWriteUserDataTest() { // Plan var operations = new KeyValuePair <byte[], MemCache.ResultCacheEntry> [3]; for (int i = 0; i < 3; i++) { byte[] key = TestUtils.FillArrayWithRandomBytes(20); var entry = new MemCache.ResultCacheEntry(); entry.ResultState = ResultState.Exist; entry.Sequence = 10; entry.Data = TestUtils.FillArrayWithRandomBytes(32768); // 32KB is maz size for a block, not that it matters for this operations[i] = new KeyValuePair <byte[], MemCache.ResultCacheEntry>(key, entry); } MemCache memCache = new MemCache(); // Do ReadOnlySpan <byte> result = memCache.EncodeBatch(operations); // Check SpanReader reader = new SpanReader(result); Assert.AreEqual(10, reader.ReadInt64(), "Sequence number"); Assert.AreEqual(3, reader.ReadInt32(), "Operations count"); for (int i = 0; i < 3; i++) { var expectedKey = operations[i].Key; var expectedData = operations[i].Value.Data; Assert.AreEqual(1, reader.ReadByte(), "Operations type PUT"); var keyLen = reader.ReadVarLong(); Assert.AreEqual(expectedKey.Length, keyLen, "Key len"); Assert.AreEqual(expectedKey, reader.Read(keyLen).ToArray(), "Key"); var dataLen = reader.ReadVarLong(); Assert.AreEqual(expectedData.Length, dataLen, "Data len"); Assert.AreEqual(expectedData, reader.Read(dataLen).ToArray(), "Data"); } // test encoding complete blocks var stream = new MemoryStream(); LogWriter writer = new LogWriter(stream); writer.WriteData(result); Assert.Less(0, stream.Length); stream.Position = 0; // Roundtrip test by making sure i can read blocks I've encoded myself. LogReader logReader = new LogReader(stream); logReader.Open(); MemCache memCache2 = new MemCache(); memCache2.Load(logReader); var cache = memCache2._resultCache; Assert.AreEqual(3, cache.Count); int j = 0; foreach (var entry in cache) { var expectedKey = operations[j].Key; var expectedData = operations[j].Value.Data; Assert.AreEqual(ResultState.Exist, entry.Value.ResultState, "Value exists"); Assert.AreEqual(expectedKey.Length, entry.Key.Length, "Key len"); Assert.AreEqual(expectedKey, entry.Key, "Key"); Assert.AreEqual(expectedData.Length, entry.Value.Data.Length, "Data len"); Assert.AreEqual(expectedData, entry.Value.Data, "Data"); j++; } }