public ObjectVersionStore(Config config, ObjectMetadataStore metadata) { _metadata = metadata; _store = new KeyValueStore(Path.Combine(config.BaseDataPath, "ObjectVersionStore")); _versions = new Dictionary<string, uint>(StringComparer.OrdinalIgnoreCase); _lock = new ReaderWriterLockSlim(LockRecursionPolicy.SupportsRecursion); }
public void BasicPersistentGetAndSet() { string path = Path.GetFullPath("TestData\\BasicPersistentGetAndSet"); using (var db = new KeyValueStore(path)) { db.Truncate(); for (int i = 0; i < 10; i++) { byte[] key = BitConverter.GetBytes(i); byte[] value = Encoding.UTF8.GetBytes("Number " + i.ToString()); db.Set(key, value); } } using (var db = new KeyValueStore(path)) { for (int j = 0; j < 15; j++) { byte[] key = BitConverter.GetBytes(j); byte[] value = db.Get(key); if (j < 10) { Assert.AreEqual(Encoding.UTF8.GetBytes("Number " + j.ToString()), value); } else { Assert.IsNull(value); } } } }
public void CrashTestOnMerge() { string path = Path.GetFullPath("TestData\\CrashTestOnMerge"); using (var db = new KeyValueStore(path)) { db.Truncate(); } var doneSetting = new EventWaitHandle(false, EventResetMode.ManualReset, "CrashTestOnMerge"); doneSetting.Reset(); string testPath = Path.Combine(Path.GetDirectoryName(Assembly.GetExecutingAssembly().GetName().CodeBase), "RazorTest.exe"); var process = Process.Start(testPath, "CrashTestOnMerge"); doneSetting.WaitOne(30000); process.Kill(); process.WaitForExit(); // Open the database created by the other program using (var db = new KeyValueStore(path)) { db.Manifest.Logger = (msg) => Console.WriteLine(msg); Console.WriteLine("Begin enumeration."); ByteArray lastKey = new ByteArray(); int ct = 0; foreach (var pair in db.Enumerate()) { ByteArray k = new ByteArray(pair.Key); Assert.True(lastKey.CompareTo(k) < 0); lastKey = k; ct++; } Assert.AreEqual(50000, ct); Console.WriteLine("Found {0} items in the crashed database.", ct); } }
public static void CrashTestOnMerge() { string path = Path.GetFullPath("TestData\\CrashTestOnMerge"); int num_items = 50000; using (var db = new KeyValueStore(path)) { db.Truncate(); db.Manifest.Logger = (msg) => Console.WriteLine(msg); for (int i = 0; i < num_items; i++) { byte[] keyBytes = new byte[40]; Array.Copy(BitConverter.GetBytes(i).Reverse().ToArray(), keyBytes, 4); Array.Copy(ByteArray.Random(36).InternalBytes, 0, keyBytes, 4, 36); var randomKey = new ByteArray(keyBytes); var randomValue = ByteArray.Random(256); db.Set(randomKey.InternalBytes, randomValue.InternalBytes); } // Signal our test to fall through try { ManualResetEvent.OpenExisting("CrashTestOnMerge").Set(); } catch (WaitHandleCannotBeOpenedException e) { Console.WriteLine("{0}", e); } } }
public static void RunTableMergePass(KeyValueStore kvStore) { try { Interlocked.Increment(ref kvStore.mergeCount); lock (kvStore.mergeLock) { RazorCache cache = kvStore.Cache; Manifest manifest = kvStore.Manifest; while (true) { bool mergedDuringLastPass = false; using (var manifestInst = kvStore.Manifest.GetLatestManifest()) { // Handle level 0 (merge all pages) if (manifestInst.GetNumPagesAtLevel(0) >= Config.MaxPagesOnLevel(0)) { mergedDuringLastPass = true; var inputPageRecords = manifestInst.GetPagesAtLevel(0).OrderBy(p => p.Version).ToList(); var startKey = inputPageRecords.Min(p => p.FirstKey); var endKey = inputPageRecords.Max(p => p.LastKey); var mergePages = manifestInst.FindPagesForKeyRange(1, startKey, endKey).AsPageRefs().ToList(); var allInputPages = inputPageRecords.AsPageRefs().Concat(mergePages).ToList(); var outputPages = SortedBlockTable.MergeTables(cache, manifest, 1, allInputPages, ExceptionHandling.ThrowAll, null).ToList(); manifest.ModifyPages(outputPages, allInputPages); manifest.LogMessage("Merge Level 0 => InputPages: {0} OutputPages:{1}", string.Join(",", allInputPages.Select(p => string.Format("{0}-{1}", p.Level, p.Version)).ToArray()), string.Join(",", outputPages.Select(p => string.Format("{0}-{1}", p.Level, p.Version)).ToArray()) ); } // handle the rest of the levels (merge only one page upwards) for (int level = 1; level < manifestInst.NumLevels - 1; level++) { if (manifestInst.GetNumPagesAtLevel(level) >= Config.MaxPagesOnLevel(level)) { mergedDuringLastPass = true; var inputPage = manifest.NextMergePage(level); var mergePages = manifestInst.FindPagesForKeyRange(level + 1, inputPage.FirstKey, inputPage.LastKey).ToList(); var inputPageRecords = mergePages.Concat(new PageRecord[] { inputPage }); var allInputPages = inputPageRecords.AsPageRefs().ToList(); var outputPages = SortedBlockTable.MergeTables(cache, manifest, level + 1, allInputPages, ExceptionHandling.ThrowAll, null); // Notify if a merge happened, implemented for testing primarily if (kvStore.MergeCallback != null) kvStore.MergeCallback(level, inputPageRecords, outputPages); manifest.ModifyPages(outputPages, allInputPages); manifest.LogMessage("Merge Level >0 => InputPages: {0} OutputPages:{1}", string.Join(",", allInputPages.Select(p => string.Format("{0}-{1}", p.Level, p.Version)).ToArray()), string.Join(",", outputPages.Select(p => string.Format("{0}-{1}", p.Level, p.Version)).ToArray()) ); } } } // No more merging is needed, we are finished with this pass if (!mergedDuringLastPass) return; } } } finally { Interlocked.Decrement(ref kvStore.mergeCount); } }
public void LargeDataSetGetTest() { string path = Path.GetFullPath("TestData\\LargeDataSetGetTest"); int totalSize = 0; int num_items = 500; var timer = new Stopwatch(); using (var db = new KeyValueStore(path)) { db.Truncate(); // Generate a data value that is larger than the block size. var value = ByteArray.Random(Config.SortedBlockSize + 256); // Do it enough times to ensure a roll-over for (int i = 0; i < num_items; i++) { var key = BitConverter.GetBytes(i); db.Set(key, value.InternalBytes); totalSize += value.InternalBytes.Length; } timer.Start(); for (int i = 0; i < num_items; i++) { var key = BitConverter.GetBytes(i); Assert.AreEqual(value.InternalBytes, db.Get(key)); } timer.Stop(); Console.WriteLine("Randomized read throughput of {0} MB/s (avg {1} ms per lookup)", (double)totalSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0), (double)timer.Elapsed.TotalSeconds / (double)num_items); } }
static void CheckDatabase(string baseDir) { Console.WriteLine("Checking Key Value Store '{0}'", baseDir); RazorCache cache = new RazorCache(); var kv = new KeyValueStore(baseDir, cache); try { kv.ScanCheck(); } finally { kv.Close(); } }
public DBController(string journalFile, IEnumerable<IDataVizFactory> vizFactories) { string path = Path.GetDirectoryName(journalFile); _db = new KeyValueStore(path); foreach (var vf in vizFactories) { var v = vf.GetVisualizer(_db); if (v != null) { _viz = v; break; } } if (_viz == null) _viz = new ByteViz(); }
public ObjectMetadataStore(Config config) { if (null == config) { throw new ArgumentNullException("config"); } _config = config; _store = new KeyValueStore(Path.Combine(config.BaseDataPath, "ObjectMetadataStore")); _nsStore = new KeyValueStore(Path.Combine(config.BaseDataPath, "ObjectNameSpaceStore")); _nsCache = new Dictionary<string, ObjectNameSpaceConfig>(StringComparer.OrdinalIgnoreCase); _mdCache = new Dictionary<string, ObjectMetadata>(StringComparer.OrdinalIgnoreCase); _InitCache(); }
public void AddObjectsAndLookup() { string path = Path.GetFullPath("TestData\\AddObjectsAndLookup"); using (var db = new KeyValueStore(path)) { db.Truncate(); var indexed = new SortedDictionary<string, byte[]>(); indexed["NumberType"] = Encoding.UTF8.GetBytes("Fib"); db.Set(BitConverter.GetBytes(112), Encoding.UTF8.GetBytes("112"), indexed); db.Set(BitConverter.GetBytes(1123), Encoding.UTF8.GetBytes("1123"), indexed); db.Set(BitConverter.GetBytes(11235), Encoding.UTF8.GetBytes("11235"), indexed); db.Set(BitConverter.GetBytes(112358), Encoding.UTF8.GetBytes("112358"), indexed); indexed["NumberType"] = Encoding.UTF8.GetBytes("Seq"); db.Set(BitConverter.GetBytes(1), Encoding.UTF8.GetBytes("1"), indexed); db.Set(BitConverter.GetBytes(2), Encoding.UTF8.GetBytes("2"), indexed); db.Set(BitConverter.GetBytes(3), Encoding.UTF8.GetBytes("3"), indexed); db.Set(BitConverter.GetBytes(4), Encoding.UTF8.GetBytes("4"), indexed); indexed["NumberType"] = Encoding.UTF8.GetBytes("Zero"); db.Set(BitConverter.GetBytes(0), Encoding.UTF8.GetBytes("0"), indexed); } using (var db = new KeyValueStore(path)) { var zeros = db.Find("NumberType", Encoding.UTF8.GetBytes("Zero")).ToList(); Assert.AreEqual(1, zeros.Count()); Assert.AreEqual("0", Encoding.UTF8.GetString(zeros[0].Value)); var seqs = db.Find("NumberType", Encoding.UTF8.GetBytes("Seq")).ToList(); Assert.AreEqual(4, seqs.Count()); Assert.AreEqual("1", Encoding.UTF8.GetString(seqs[0].Value)); Assert.AreEqual("2", Encoding.UTF8.GetString(seqs[1].Value)); Assert.AreEqual("3", Encoding.UTF8.GetString(seqs[2].Value)); Assert.AreEqual("4", Encoding.UTF8.GetString(seqs[3].Value)); var fib = db.Find("NumberType", Encoding.UTF8.GetBytes("Fib")).ToList(); Assert.AreEqual(4, seqs.Count()); Assert.AreEqual("1123", Encoding.UTF8.GetString(fib[0].Value)); Assert.AreEqual("112", Encoding.UTF8.GetString(fib[1].Value)); Assert.AreEqual("11235", Encoding.UTF8.GetString(fib[2].Value)); Assert.AreEqual("112358", Encoding.UTF8.GetString(fib[3].Value)); var non = db.Find("NoIndex", new byte[] { 23 }).ToList(); Assert.AreEqual(0, non.Count()); non = db.Find("NumberType", Encoding.UTF8.GetBytes("Unfound")).ToList(); Assert.AreEqual(0, non.Count()); } }
public void CreateLargeObjectDataStore() { string path = Path.GetFullPath("TestData\\LargeObjectV1"); if (!Directory.Exists(path)) Directory.CreateDirectory(path); using (var db = new KeyValueStore(path)) { db.Truncate(); for (int i = 0; i < 6; i++) { var k0 = KeyEx.Random(40); var v0 = Value.Random(Config.MaxSmallValueSize * 100); db.Set(k0.InternalBytes, v0.InternalBytes); } } }
public KeyValueStore GetSecondaryIndex(string IndexName) { KeyValueStore indexStore = null; lock (_secondaryIndexes) { if (!_secondaryIndexes.TryGetValue(IndexName, out indexStore)) { indexStore = new KeyValueStore(Config.IndexBaseName(Manifest.BaseFileName, IndexName), _cache); if (Manifest.Logger != null) { indexStore.Manifest.Logger = msg => Manifest.Logger(string.Format("{0}: {1}", IndexName, msg)); } _secondaryIndexes.Add(IndexName, indexStore); } } return(indexStore); }
public IEnumerable <KeyValuePair <byte[], byte[]> > FindStartsWith(string indexName, byte[] lookupValue) { KeyValueStore indexStore = GetSecondaryIndex(indexName); // Loop over the values foreach (var pair in indexStore.EnumerateFromKey(lookupValue)) { var key = pair.Key; var value = pair.Value; // construct our index key pattern (lookupvalue | key) if (ByteArray.CompareMemCmp(key, 0, lookupValue, 0, lookupValue.Length) == 0) { int offset = 0; byte[] objectKey = null; if (Manifest.RazorFormatVersion < 2) { if (ByteArray.CompareMemCmp(key, key.Length - value.Length, value, 0, value.Length) == 0) { objectKey = pair.Value; } } else { int indexKeyLen = Helper.Decode7BitInt(pair.Value, ref offset); if (lookupValue.Length <= indexKeyLen) { objectKey = ItemKeyFromIndex(pair, indexKeyLen); } } if (objectKey != null) { var primaryValue = Get(objectKey); if (primaryValue != null) { yield return(new KeyValuePair <byte[], byte[]>(objectKey, primaryValue)); } } } else { // if the above condition was not met then we must have enumerated past the end of the indexed value yield break; } } }
public void MarkKeyValueStoreAsModified(KeyValueStore kvStore) { // Only schedule a merge run if no merging is happening if (kvStore.mergeCount == 0) { // determine if we've reached the next time threshold for another update long ticks = Stopwatch.GetTimestamp(); long ticksTillNext = kvStore.ticksTillNextMerge; if (ticks > ticksTillNext) { // Schedule a tablemerge run on the threadpool ThreadPool.QueueUserWorkItem((o) => { RunTableMergePass(kvStore); }); } kvStore.ticksTillNextMerge = ticks + pauseTime; } }
public void RemoveIndexRangeForValue(string indexName, byte[] startAt, byte[] value) { KeyValueStore indexStore = GetSecondaryIndex(indexName); var pairs = indexStore.EnumerateFromKey(startAt); foreach (var pair in pairs) { var itemKey = KeyValueStore.ItemKeyFromIndex(pair); if (ByteArray.CompareMemCmp(itemKey, value) == 0) { indexStore.Delete(pair.Key); } if (ByteArray.CompareMemCmp(startAt, 0, pair.Key, 0, startAt.Length) == 0) { continue; } break; } }
public void AddToIndex(byte[] itemKey, IEnumerable <KeyValuePair <string, byte[]> > indexValues) { foreach (var pair in indexValues) { var IndexName = pair.Key; // Construct Index key by concatenating the indexed value and the target key byte[] indexValue = pair.Value; byte[] indexKey = new byte[itemKey.Length + indexValue.Length]; indexValue.CopyTo(indexKey, 0); itemKey.CopyTo(indexKey, indexValue.Length); KeyValueStore indexStore = GetSecondaryIndex(IndexName); // get indexkey length encoding var lenBytes = new byte[8]; var indexValueLen = Helper.Encode7BitInt(lenBytes, indexValue.Length); var indexValueLenBytes = new byte[indexValueLen]; Helper.BlockCopy(lenBytes, 0, indexValueLenBytes, 0, indexValueLen); indexStore.Set(indexKey, indexValueLenBytes); // we know the key length } }
public static void CrashTestBeforeMerge() { string path = Path.GetFullPath("TestData\\CrashTestBeforeMerge"); int num_items = 10000; using (var db = new KeyValueStore(path)) { db.Truncate(); db.Manifest.Logger = (msg) => Console.WriteLine(msg); for (int i = 0; i < num_items; i++) { var randomKey = ByteArray.Random(4); var randomValue = ByteArray.Random(5); db.Set(randomKey.InternalBytes, randomValue.InternalBytes); } // Signal our test to fall through try { ManualResetEvent.OpenExisting("CrashTestBeforeMerge").Set(); } catch (WaitHandleCannotBeOpenedException e) { Console.WriteLine("{0}", e); } } }
public static void RunTableMergePass(KeyValueStore kvStore) { try { Interlocked.Increment(ref kvStore.mergeCount); lock (kvStore.mergeLock) { RazorCache cache = kvStore.Cache; Manifest manifest = kvStore.Manifest; while (true) { bool mergedDuringLastPass = false; using (var manifestInst = kvStore.Manifest.GetLatestManifest()) { // Handle level 0 (merge all pages) if (manifestInst.GetNumPagesAtLevel(0) >= Config.MaxPagesOnLevel(0)) { mergedDuringLastPass = true; var inputPageRecords = manifestInst.GetPagesAtLevel(0).OrderBy(p => p.Version).ToList(); var startKey = inputPageRecords.Min(p => p.FirstKey); var endKey = inputPageRecords.Max(p => p.LastKey); var mergePages = manifestInst.FindPagesForKeyRange(1, startKey, endKey).AsPageRefs().ToList(); var allInputPages = inputPageRecords.AsPageRefs().Concat(mergePages).ToList(); var outputPages = SortedBlockTable.MergeTables(cache, manifest, 1, allInputPages, ExceptionHandling.ThrowAll, null).ToList(); manifest.ModifyPages(outputPages, allInputPages); manifest.LogMessage("Merge Level 0 => InputPages: {0} OutputPages:{1}", string.Join(",", allInputPages.Select(p => string.Format("{0}-{1}", p.Level, p.Version)).ToArray()), string.Join(",", outputPages.Select(p => string.Format("{0}-{1}", p.Level, p.Version)).ToArray()) ); } // handle the rest of the levels (merge only one page upwards) for (int level = 1; level < manifestInst.NumLevels - 1; level++) { if (manifestInst.GetNumPagesAtLevel(level) >= Config.MaxPagesOnLevel(level)) { mergedDuringLastPass = true; var inputPage = manifest.NextMergePage(level); var mergePages = manifestInst.FindPagesForKeyRange(level + 1, inputPage.FirstKey, inputPage.LastKey).ToList(); var inputPageRecords = mergePages.Concat(new PageRecord[] { inputPage }); var allInputPages = inputPageRecords.AsPageRefs().ToList(); var outputPages = SortedBlockTable.MergeTables(cache, manifest, level + 1, allInputPages, ExceptionHandling.ThrowAll, null); // Notify if a merge happened, implemented for testing primarily if (kvStore.MergeCallback != null) { kvStore.MergeCallback(level, inputPageRecords, outputPages); } manifest.ModifyPages(outputPages, allInputPages); manifest.LogMessage("Merge Level >0 => InputPages: {0} OutputPages:{1}", string.Join(",", allInputPages.Select(p => string.Format("{0}-{1}", p.Level, p.Version)).ToArray()), string.Join(",", outputPages.Select(p => string.Format("{0}-{1}", p.Level, p.Version)).ToArray()) ); } } } // No more merging is needed, we are finished with this pass if (!mergedDuringLastPass) { return; } } } } finally { Interlocked.Decrement(ref kvStore.mergeCount); } }
public void BulkSetEnumerateAll3() { string path = Path.GetFullPath("TestData\\BulkSetEnumerateAll3"); var timer = new Stopwatch(); using (var db = new KeyValueStore(path)) { db.Truncate(); int totalSize = 0; db.Manifest.Logger = msg => Console.WriteLine(msg); int num_items = 1000000; timer.Start(); for (int i = 0; i < num_items; i++) { byte[] key = new byte[8]; BitConverter.GetBytes(i % 100).CopyTo(key,0); BitConverter.GetBytes(i).CopyTo(key,4); byte[] value = BitConverter.GetBytes(i); db.Set(key, value); totalSize += 8 + 4; } timer.Stop(); Console.WriteLine("Wrote data (with indexing) at a throughput of {0} MB/s", (double)totalSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0)); timer.Reset(); timer.Start(); var ctModZeros = db.EnumerateFromKey(BitConverter.GetBytes(0)).Count(); timer.Stop(); Console.WriteLine("Scanned index at a throughput of {0} items/s", (double) ctModZeros / timer.Elapsed.TotalSeconds); } }
public void KeyDensityMaximumPageOverlapTest() { Action<KeyValueStore, int,int, int> InsertDenseBlock = (KeyValueStore db, int key, int density, int count) => { byte[] value = ByteArray.Random(Config.MaxSmallValueSize - 12).InternalBytes; for (int i = 0; i < count; i++) { byte[] keyBytes = BitConverter.GetBytes(key + density * i); Array.Reverse(keyBytes); // make sure they are in lexicographical order so they sort closely together. db.Set(keyBytes, value); } }; // Make sure that when we have high key density, pages don't start to overlap with more than 10 pages at the level higher than the current one. string path = Path.GetFullPath("TestData\\KeyDensityMaximumPageOverlapTest"); using (var db = new KeyValueStore(path)) { db.Truncate(); db.Manifest.Logger = (msg) => Console.WriteLine(msg); InsertDenseBlock(db, 100, 1, 10000); } Console.WriteLine("Database is densely seeded."); // Close out the db to sync up all pending merge operations using (var db = new KeyValueStore(path)) { db.Manifest.Logger = (msg) => Console.WriteLine(msg); // Insert a spanning block that will cover all of the area already covered InsertDenseBlock(db, 0, 10000, 2); } Console.WriteLine("Spanning block inserted."); // Close out the db to sync up all pending merge operations using (var db = new KeyValueStore(path)) { db.Manifest.Logger = (msg) => Console.WriteLine(msg); db.MergeCallback = (level, input, output) => { // We should not have more than 12 pages on the input side or else our page overlap throttle isn't working properly. Assert.LessOrEqual(input.Count(), 12); }; // Now insert a bunch of data into a non-overlapping portion of the space in order to force the spanning block to rise through the levels. InsertDenseBlock(db, 100000, 1, 1000); } }
static void RemoveOrphanedTables(string baseDir) { Console.WriteLine("Removing Orphaned Tables '{0}'", baseDir); RazorCache cache = new RazorCache(); var kv = new KeyValueStore(baseDir, cache); kv.Manifest.Logger = (msg) => Console.WriteLine(msg); try { kv.RemoveOrphanedPages(); } finally { kv.Close(); } }
public void BulkSetEnumerateAll2() { string path = Path.GetFullPath("TestData\\BulkSetEnumerateAll2"); var timer = new Stopwatch(); int totalSize = 0; int readSize = 0; using (var db = new KeyValueStore(path)) { db.Truncate(); db.Manifest.Logger = (msg) => Console.WriteLine(msg); timer.Start(); for (int i = 0; i < 105000; i++) { var randomKey = BitConverter.GetBytes(i); var randomValue = BitConverter.GetBytes(i); db.Set(randomKey, randomValue); readSize += randomKey.Length + randomValue.Length; totalSize += randomKey.Length + randomValue.Length; } timer.Stop(); Console.WriteLine("Wrote sorted table at a throughput of {0} MB/s", (double)totalSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0)); timer.Reset(); Console.WriteLine("Begin enumeration."); timer.Start(); ByteArray lastKey = ByteArray.Empty; int ct = 0; foreach (var pair in db.Enumerate()) { try { ByteArray k = new ByteArray(pair.Key); ByteArray v = new ByteArray(pair.Value); Assert.AreEqual(k, v); Assert.True(lastKey.CompareTo(k) < 0); lastKey = k; ct++; } catch (Exception /*e*/) { //Console.WriteLine("Key: {0}\n{1}",insertedItem.Key,e); //Debugger.Launch(); //db.Get(insertedItem.Key.InternalBytes); //db.Manifest.LogContents(); throw; } } timer.Stop(); Assert.AreEqual(105000, ct, "105000 items should be enumerated."); Console.WriteLine("Enumerated read throughput of {0} MB/s (avg {1} ms per 1000 items)", (double)readSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0), (double)timer.Elapsed.TotalSeconds / (double)105); } }
public void BulkSet() { string path = Path.GetFullPath("TestData\\BulkSet"); var timer = new Stopwatch(); int totalSize = 0; using (var db = new KeyValueStore(path)) { db.Truncate(); db.Manifest.Logger = (msg) => { Console.WriteLine(msg); }; timer.Start(); for (int i = 0; i < 105000; i++) { var randomKey = ByteArray.Random(40); var randomValue = ByteArray.Random(256); db.Set(randomKey.InternalBytes, randomValue.InternalBytes); totalSize += randomKey.Length + randomValue.Length; } timer.Stop(); Console.WriteLine("Wrote sorted table at a throughput of {0} MB/s", (double)totalSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0)); } }
public void TestTooLargeData() { Assert.Throws<InvalidDataException>(() => { string path = Path.GetFullPath("TestData\\TestTooLargeData"); using (var db = new KeyValueStore(path)) { db.Set(Key.Random(10).KeyBytes, ByteArray.Random(Config.MaxLargeValueSize).InternalBytes); } }); }
public void BulkSetEnumerateAllWithMissingSBT_AttemptRecovery() { try { RazorDB.Config.ExceptionHandling = ExceptionHandling.AttemptRecovery; string path = Path.GetFullPath("TestData\\BulkSetEnumerateAllWithMissingSBT_AttemptRecovery"); var timer = new Stopwatch(); int totalSize = 0; int readSize = 0; Action<string> logger = (msg) => { Console.WriteLine(msg); }; using (var db = new KeyValueStore(path)) { db.Truncate(); timer.Start(); for (int i = 0; i < 500000; i++) { var randomKey = BitConverter.GetBytes(i); var randomValue = BitConverter.GetBytes(i); db.Set(randomKey, randomValue); readSize += randomKey.Length + randomValue.Length; totalSize += randomKey.Length + randomValue.Length; } timer.Stop(); Console.WriteLine("Wrote sorted table at a throughput of {0} MB/s", (double)totalSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0)); } // delete the sbt files var files = Directory.GetFiles(path, "*.sbt"); foreach (var fname in files) File.Delete(fname); // Close and re-open the database to force all the sstable merging to complete. Console.WriteLine("Begin enumeration."); using (var db = new KeyValueStore(path)) { timer.Reset(); timer.Start(); ByteArray lastKey = ByteArray.Empty; int ct = 0; foreach (var pair in db.Enumerate()) { try { ByteArray k = new ByteArray(pair.Key); ByteArray v = new ByteArray(pair.Value); Assert.AreEqual(k, v); Assert.True(lastKey.CompareTo(k) < 0); lastKey = k; ct++; } catch (Exception /*e*/) { //Console.WriteLine("Key: {0}\n{1}",insertedItem.Key,e); //Debugger.Launch(); //db.Get(insertedItem.Key.InternalBytes); //db.Manifest.LogContents(); throw; } } timer.Stop(); Assert.AreEqual(80568, ct); Console.WriteLine("Enumerated read throughput of {0} MB/s (avg {1} ms per 1000 items)", (double)readSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0), (double)timer.Elapsed.TotalSeconds / (double)105); } // add some more records after deleting files using (var db = new KeyValueStore(path)) { timer.Start(); // add 1,000,000 new keys for (int i = 1000000; i < 3000000; i++) { var randomKey = BitConverter.GetBytes(i); var randomValue = BitConverter.GetBytes(i); db.Set(randomKey, randomValue); readSize += randomKey.Length + randomValue.Length; totalSize += randomKey.Length + randomValue.Length; } timer.Stop(); Console.WriteLine("Wrote sorted table at a throughput of {0} MB/s", (double)totalSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0)); } // Close and re-open the database to force all the sstable merging to complete. Console.WriteLine("Begin enumeration."); using (var db = new KeyValueStore(path)) { timer.Reset(); timer.Start(); ByteArray lastKey = ByteArray.Empty; int ct = 0; foreach (var pair in db.Enumerate()) { try { ByteArray k = new ByteArray(pair.Key); ByteArray v = new ByteArray(pair.Value); Assert.AreEqual(k, v); Assert.True(lastKey.CompareTo(k) < 0); lastKey = k; ct++; } catch (Exception /*e*/) { //Console.WriteLine("Key: {0}\n{1}",insertedItem.Key,e); //Debugger.Launch(); //db.Get(insertedItem.Key.InternalBytes); //db.Manifest.LogContents(); throw; } } timer.Stop(); Assert.AreEqual(2080568, ct); Console.WriteLine("Enumerated read throughput of {0} MB/s (avg {1} ms per 1000 items)", (double)readSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0), (double)timer.Elapsed.TotalSeconds / (double)105); } } finally { RazorDB.Config.ExceptionHandling = ExceptionHandling.ThrowAll; } }
public void BulkSetEnumerateAllWithMissingSBT_ThrowAll() { string path = Path.GetFullPath("TestData\\BulkSetEnumerateAllWithMissingSBT_ThrowAll"+DateTime.Now.Ticks.ToString()); var timer = new Stopwatch(); int totalSize = 0; int readSize = 0; Action<string> logger = (msg) => { Console.WriteLine(msg); }; using (var db = new KeyValueStore(path)) { db.Truncate(); timer.Start(); for (int i = 0; i < 500000; i++) { var randomKey = BitConverter.GetBytes(i); var randomValue = BitConverter.GetBytes(i); db.Set(randomKey, randomValue); readSize += randomKey.Length + randomValue.Length; totalSize += randomKey.Length + randomValue.Length; } timer.Stop(); Console.WriteLine("Wrote sorted table at a throughput of {0} MB/s", (double)totalSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0)); } // delete the sbt files var files = Directory.GetFiles(path, "*.sbt"); foreach(var fname in files) File.Delete(fname); // Close and re-open the database to force all the sstable merging to complete. Console.WriteLine("Begin enumeration."); RazorDB.Config.ExceptionHandling = ExceptionHandling.ThrowAll; Assert.Throws(typeof(FileNotFoundException), () => { using (var db = new KeyValueStore(path)) { foreach (var pair in db.Enumerate()); } }); }
public void BulkSetEnumerateFromKey() { string path = Path.GetFullPath("TestData\\BulkSetEnumerateFromKey"); var timer = new Stopwatch(); int totalSize = 0; int readSize = 0; using (var db = new KeyValueStore(path)) { db.Truncate(); db.Manifest.Logger = (msg) => Console.WriteLine(msg); timer.Start(); for (int i = 0; i < 105000; i++) { var randomKey = BitConverter.GetBytes(i).Reverse().ToArray(); var randomValue = BitConverter.GetBytes(i); db.Set(randomKey, randomValue); readSize += randomKey.Length + randomValue.Length; totalSize += randomKey.Length + randomValue.Length; } timer.Stop(); Console.WriteLine("Wrote sorted table at a throughput of {0} MB/s", (double)totalSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0)); timer.Reset(); Console.WriteLine("Begin enumeration."); timer.Start(); int lastKeyNum = 0; int ct = 0; int sum = 0; var searchKey = BitConverter.GetBytes(50000).Reverse().ToArray(); foreach (var pair in db.EnumerateFromKey( searchKey )) { try { int num = BitConverter.ToInt32(pair.Key.Reverse().ToArray(),0); Assert.GreaterOrEqual(num, 50000); sum += num; Assert.Less(lastKeyNum, num); lastKeyNum = num; ct++; } catch (Exception /*e*/) { //Console.WriteLine("Key: {0}\n{1}",insertedItem.Key,e); //Debugger.Launch(); //db.Get(insertedItem.Key.InternalBytes); //db.Manifest.LogContents(); throw; } } timer.Stop(); Assert.AreEqual(55000, ct, "55000 items should be enumerated."); Console.WriteLine("Enumerated read throughput of {0} MB/s (avg {1} ms per 1000 items)", (double)readSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0), (double)timer.Elapsed.TotalSeconds / (double)105); } }
public void BulkSetGetWhileReMerging() { string path = Path.GetFullPath("TestData\\BulkSetGetWhileReMerging"); var timer = new Stopwatch(); int totalSize = 0; var items = new Dictionary<ByteArray, ByteArray>(); using (var db = new KeyValueStore(path)) { db.Truncate(); db.Manifest.Logger = (msg) => Console.WriteLine(msg); timer.Start(); for (int i = 0; i < 105000; i++) { var randomKey = ByteArray.Random(40); var randomValue = ByteArray.Random(256); db.Set(randomKey.InternalBytes, randomValue.InternalBytes); items[randomKey] = randomValue; totalSize += randomKey.Length + randomValue.Length; } timer.Stop(); Console.WriteLine("Wrote sorted table at a throughput of {0} MB/s", (double)totalSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0)); timer.Reset(); Console.WriteLine("Begin randomized read back."); timer.Start(); foreach (var insertedItem in items) { try { byte[] value = db.Get(insertedItem.Key.InternalBytes); Assert.AreEqual(insertedItem.Value, new ByteArray(value)); } catch (Exception /*e*/) { //Console.WriteLine("Key: {0}\n{1}", insertedItem.Key, e); //Debugger.Launch(); //db.Get(insertedItem.Key.InternalBytes); //db.Manifest.LogContents(); throw; } } timer.Stop(); Console.WriteLine("Randomized read throughput of {0} MB/s (avg {1} ms per lookup)", (double)totalSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0), (double)timer.Elapsed.TotalSeconds / (double)items.Count); } }
public int CountIndex(string indexName) { KeyValueStore indexStore = GetSecondaryIndex(indexName); return(indexStore.Enumerate().Count()); }
public void DumpKeySpaceUsed() { double valueBytes=0L; double keyBytes = 0L; double dupBytes = 0L; double totalRecords = 0L; Action<string> dumpFolderBytes = (folder) => { double tableValBytes = 0L; double tableKeyBytes = 0L; double tableDupBytes = 0L; double tableRecords = 0L; byte[] lastkey = null; using (var kvs = new KeyValueStore(folder)) { foreach (var pair in kvs.Enumerate()) { tableRecords++; tableValBytes += pair.Value.Length; tableKeyBytes += pair.Key.Length; if (lastkey != null) { int i = 0; for (i = 0; i < lastkey.Length && i < pair.Key.Length; i++) if (lastkey[i] != pair.Key[i]) continue; tableDupBytes += i; } lastkey = pair.Key; } } valueBytes += tableValBytes; keyBytes += tableKeyBytes; dupBytes += tableDupBytes; totalRecords += tableRecords; Console.WriteLine("{0} Total Bytes: {1}", folder, tableValBytes + tableKeyBytes); Console.WriteLine(" #Records: {0}", tableRecords); Console.WriteLine(" Key Bytes: {0}", tableKeyBytes); Console.WriteLine(" Value Bytes: {0}", tableValBytes); Console.WriteLine(" Dupl. Bytes: {0}", tableDupBytes); Console.WriteLine(" %Savings in keys: {0}%", tableDupBytes / tableKeyBytes * 100); Console.WriteLine(" %Savings overall: {0}%", tableDupBytes / (tableValBytes + tableKeyBytes) * 100); Console.WriteLine(); }; var baseFolder = @"d:\ncoverdata\ncover"; foreach (var folder in Directory.GetDirectories(baseFolder, "*", SearchOption.AllDirectories)) dumpFolderBytes(folder); Console.WriteLine("Total KeyValueStore Bytes: {0}", valueBytes + keyBytes); Console.WriteLine(" #Records: {0}", totalRecords); Console.WriteLine(" Key Bytes: {0}", keyBytes); Console.WriteLine(" Value Bytes: {0}", valueBytes); Console.WriteLine(" Dupl. Bytes: {0}", dupBytes); Console.WriteLine(" %Savings in keys: {0}%", dupBytes / keyBytes * 100); Console.WriteLine(" %Savings overall: {0}%", dupBytes / (valueBytes + keyBytes) * 100); }
public void TestLargeAndSmallOddWrites() { string path = Path.GetFullPath("TestData\\TestLargeAndSmallInterlacedWrites"); using (var db = new KeyValueStore(path)) { db.Truncate(); // Create a random set of keybytes List<byte[]> keys = new List<byte[]>(); for (int i = 0; i < 10; i++) { keys.Add(Key.Random(10).KeyBytes); } // Set Odds to large for (int i = 0; i < keys.Count; i++) { var k = keys[i]; var v = ((i & 1) == 1) ? GenerateBlock(Config.MaxLargeValueSize - 100) : GenerateBlock(10); db.Set(k, v); } // Now check the results for (int i = 0; i < keys.Count; i++) { var k = keys[i]; var v = db.Get(k); CheckBlock(v); if ((i & 1) == 0) { Assert.Less(v.Length, 100, " i = {0} should be small, but size={1}", i, v.Length); } else { Assert.Greater(v.Length, 100, " i = {0} should be large, but size={1}", i, v.Length); } } } }
public void GetAndSetWithDelete() { string path = Path.GetFullPath("TestData\\GetAndSetWithDelete"); using (var db = new KeyValueStore(path)) { db.Truncate(); for (int i = 0; i < 10; i++) { byte[] key = BitConverter.GetBytes(i); byte[] value = Encoding.UTF8.GetBytes("Number " + i.ToString()); db.Set(key, value); } db.Delete(BitConverter.GetBytes(3)); db.Delete(BitConverter.GetBytes(30)); db.Delete(BitConverter.GetBytes(7)); db.Delete(BitConverter.GetBytes(1)); db.Delete(BitConverter.GetBytes(3)); } using (var db = new KeyValueStore(path)) { for (int j = 0; j < 15; j++) { byte[] key = BitConverter.GetBytes(j); byte[] value = db.Get(key); if (j == 3 || j == 1 || j == 7) { Assert.IsNull(value); } else if (j < 10) { Assert.AreEqual(Encoding.UTF8.GetBytes("Number " + j.ToString()), value); } else { Assert.IsNull(value); } } } }
public void BulkSetThreadedGetWhileReMerging() { string path = Path.GetFullPath("TestData\\BulkSetThreadedGetWhileReMerging"); var timer = new Stopwatch(); int totalSize = 0; var items = new Dictionary<ByteArray, ByteArray>(); using (var db = new KeyValueStore(path)) { db.Truncate(); db.Manifest.Logger = (msg) => Console.WriteLine(msg); timer.Start(); int totalItems = 105000; for (int i = 0; i < totalItems; i++) { var randomKey = ByteArray.Random(40); var randomValue = ByteArray.Random(256); db.Set(randomKey.InternalBytes, randomValue.InternalBytes); items[randomKey] = randomValue; totalSize += randomKey.Length + randomValue.Length; } timer.Stop(); Console.WriteLine("Wrote sorted table at a throughput of {0} MB/s", (double)totalSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0)); List<KeyValuePair<ByteArray,ByteArray>> itemsList = items.ToList(); int numThreads = 10; List<Thread> threads = new List<Thread>(); for (int j = 0; j < numThreads; j++) { threads.Add(new Thread((num) => { int itemsPerThread = totalItems / numThreads; for (int i = 0; i < itemsPerThread; i++) { try { int idx = i * (int)num; byte[] value = db.Get(itemsList[idx].Key.InternalBytes); Assert.AreEqual(itemsList[idx].Value, new ByteArray(value)); } catch (Exception /*e*/) { //Console.WriteLine("Key: {0}\n{1}", insertedItem.Key, e); //Debugger.Launch(); //db.Get(insertedItem.Key.InternalBytes); //db.Manifest.LogContents(); throw; } } })); } timer.Reset(); Console.WriteLine("Begin randomized read back."); timer.Start(); for (int k=0; k < numThreads; k++) { threads[k].Start(k); } threads.ForEach(t => t.Join()); timer.Stop(); Console.WriteLine("Randomized read throughput of {0} MB/s (avg {1} ms per lookup)", (double)totalSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0), (double)timer.Elapsed.TotalSeconds / (double)items.Count); } }
public void BulkSetBulkEnumerateWithCache() { string path = Path.GetFullPath("TestData\\BulkSetBulkEnumerateWithCache"); var timer = new Stopwatch(); int totalSize = 0; int readSize = 0; int num_items = 100000; using (var db = new KeyValueStore(path)) { db.Truncate(); db.Manifest.Logger = (msg) => Console.WriteLine(msg); timer.Start(); for (int i = 0; i < num_items; i++) { var randomKey = ByteArray.Random(40); var randomValue = ByteArray.Random(256); db.Set(randomKey.InternalBytes, randomValue.InternalBytes); readSize += randomKey.Length + randomValue.Length; totalSize += randomKey.Length + randomValue.Length; } timer.Stop(); Console.WriteLine("Wrote sorted table at a throughput of {0} MB/s", (double)totalSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0)); timer.Reset(); timer.Start(); Assert.AreEqual(num_items, db.Enumerate().Count()); timer.Stop(); Console.WriteLine("Enumerated read throughput of {0} MB/s", (double)readSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0)); timer.Reset(); timer.Start(); Assert.AreEqual(num_items, db.Enumerate().Count()); timer.Stop(); Console.WriteLine("Enumerated (second pass) read throughput of {0} MB/s", (double)readSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0)); } }
public void Close(KeyValueStore kvStore) { RunTableMergePass(kvStore); }
public void BulkSetWithDelete() { int numItems = 100000; string path = Path.GetFullPath("TestData\\BulkSetWithDelete"); if (Directory.Exists(path)) Directory.Delete(path, true); Directory.CreateDirectory(path); using (var db = new KeyValueStore(path)) { db.Manifest.Logger = msg => Console.WriteLine(msg); db.Truncate(); Stopwatch timer = new Stopwatch(); timer.Start(); for (int i = 0; i < numItems; i++) { byte[] key = BitConverter.GetBytes(i); byte[] value = Encoding.UTF8.GetBytes("Number " + i.ToString()); db.Set(key, value); } timer.Stop(); Console.WriteLine("Wrote {0} items in {1}s", numItems, timer.Elapsed.TotalSeconds); int skip = 1000; timer.Reset(); timer.Start(); // Delete every skip-th item in reverse order, for (int j = numItems; j >= 0; j--) { if (j % skip == 0) { byte[] key = BitConverter.GetBytes(j); db.Delete(key); } } timer.Stop(); Console.WriteLine("Deleted every {0}-th item in {1}s", skip, timer.Elapsed.TotalSeconds); // Now check all the results timer.Reset(); timer.Start(); for (int k = 0; k < numItems; k++) { byte[] key = BitConverter.GetBytes(k); byte[] value = db.Get(key); if (k % skip == 0) { Assert.IsNull(value); } else { Assert.AreEqual(Encoding.UTF8.GetBytes("Number " + k.ToString()), value, string.Format("{0}", k)); } } timer.Stop(); Console.WriteLine("Read and check every item in {0}s", timer.Elapsed.TotalSeconds); } }
public KeyValueStore GetSecondaryIndex(string IndexName) { KeyValueStore indexStore = null; lock (_secondaryIndexes) { if (!_secondaryIndexes.TryGetValue(IndexName, out indexStore)) { indexStore = new KeyValueStore(Config.IndexBaseName(Manifest.BaseFileName, IndexName), _cache); if (Manifest.Logger != null) { indexStore.Manifest.Logger = msg => Manifest.Logger(string.Format("{0}: {1}", IndexName, msg)); } _secondaryIndexes.Add(IndexName, indexStore); } } return indexStore; }
public void BulkThreadedSet() { int numThreads = 10; int totalItems = 100000; int totalSize = 0; string path = Path.GetFullPath("TestData\\BulkThreadedSet"); List<Thread> threads = new List<Thread>(); using (var db = new KeyValueStore(path)) { db.Truncate(); for (int j = 0; j < numThreads; j++) { threads.Add(new Thread( (num) => { int itemsPerThread = totalItems / numThreads; for (int i = 0; i < itemsPerThread; i++) { var randomKey = new ByteArray( BitConverter.GetBytes( ((int)num * itemsPerThread) + i ) ); var randomValue = ByteArray.Random(256); db.Set(randomKey.InternalBytes, randomValue.InternalBytes); Interlocked.Add(ref totalSize, randomKey.Length + randomValue.Length); } })); } var timer = new Stopwatch(); timer.Start(); // Start all the threads int tnum = 0; threads.ForEach((t) => t.Start(tnum++)); // Wait on all the threads to complete threads.ForEach((t) => t.Join(300000)); timer.Stop(); Console.WriteLine("Wrote sorted table at a throughput of {0} MB/s", (double)totalSize / timer.Elapsed.TotalSeconds / (1024.0 * 1024.0)); } }