public void Truncate() // Truncates a current database. { _currentJournaledMemTable.Close(); TableManager.Default.Close(this); foreach (var pair in _secondaryIndexes) { pair.Value.Close(FastClose); } string basePath = Path.GetFullPath(Manifest.BaseFileName); foreach (string file in Directory.GetFiles(basePath, "*.*", SearchOption.AllDirectories)) { File.Delete(file); } foreach (string dir in Directory.GetDirectories(basePath, "*.*", SearchOption.AllDirectories)) { Directory.Delete(dir, true); } _manifest = new Manifest(basePath); _currentJournaledMemTable = new JournaledMemTable(_manifest.BaseFileName, _manifest.CurrentVersion(0)); _cache = new RazorCache(); _secondaryIndexes = new Dictionary <string, KeyValueStore> (StringComparer.OrdinalIgnoreCase); Manifest.LogMessage("Database Truncated."); }
public SortedBlockTable(RazorCache cache, string baseFileName, int level, int version) { _baseFileName = baseFileName; _level = level; _version = version; _cache = cache; _path = Config.SortedBlockTableFile(baseFileName, level, version); ReadMetadata(); }
static int FindBlockForKey(string baseFileName, int level, int version, RazorCache indexCache, Key key) { Key[] index = indexCache.GetBlockTableIndex(baseFileName, level, version); int dataBlockNum = Array.BinarySearch(index, key); if (dataBlockNum < 0) { dataBlockNum = ~dataBlockNum - 1; } return(dataBlockNum); }
public KeyValueStore(string baseFileName, RazorCache cache) // Initializes a new instance of the KeyValueStore. { if (!Directory.Exists(baseFileName)) { Directory.CreateDirectory(baseFileName); } _manifest = new Manifest(baseFileName); _manifest.Logger = RazorDBx.C5.Logger.Log; int memTableVersion = _manifest.CurrentVersion(0); CheckForIncompleteJournalRotation(baseFileName, memTableVersion); // Check for a previously aborted journal rotation. _currentJournaledMemTable = new JournaledMemTable(_manifest.BaseFileName, memTableVersion); // Create new journal for this run (and potentially load from disk, if there was data loaded previously). _cache = cache == null ? new RazorCache() : cache; }
public static bool Lookup(string baseFileName, int level, int version, RazorCache cache, Key key, out Value value, ExceptionHandling exceptionHandling, Action <string> logger) { SortedBlockTable sbt = new SortedBlockTable(cache, baseFileName, level, version); try { int dataBlockNum = FindBlockForKey(baseFileName, level, version, cache, key); if (dataBlockNum >= 0 && dataBlockNum < sbt._dataBlocks) { byte[] block = sbt.ReadBlock(LocalThreadAllocatedBlock(), dataBlockNum); return(SearchBlockForKey(block, key, out value)); } } finally { sbt.Close(); } value = Value.Empty; return(false); }
public static IEnumerable <PageRecord> MergeTables(RazorCache cache, Manifest mf, int destinationLevel, IEnumerable <PageRef> tableSpecs, ExceptionHandling exceptionHandling, Action <string> logger) { var orderedTableSpecs = tableSpecs.OrderByPagePriority(); var outputTables = new List <PageRecord> (); SortedBlockTableWriter writer = null; Key firstKey = new Key(); Key lastKey = new Key(); Key maxKey = new Key(); // Maximum key we can span with this table to avoid covering more than 10 pages in the destination Action <KeyValuePair <Key, Value> > OpenPage = (pair) => { writer = new SortedBlockTableWriter(mf.BaseFileName, destinationLevel, mf.NextVersion(destinationLevel)); firstKey = pair.Key; using (var m = mf.GetLatestManifest()) maxKey = m.FindSpanningLimit(destinationLevel + 1, firstKey); }; Action ClosePage = () => { writer.Close(); outputTables.Add(new PageRecord(destinationLevel, writer.Version, firstKey, lastKey)); writer = null; }; foreach (var pair in EnumerateMergedTablesPreCached(cache, mf.BaseFileName, orderedTableSpecs, exceptionHandling, logger)) { if (writer == null) { OpenPage(pair); } if (writer.WrittenSize >= Config.MaxSortedBlockTableSize || (!maxKey.IsEmpty && pair.Key.CompareTo(maxKey) >= 0)) { ClosePage(); } writer.WritePair(pair.Key, pair.Value); lastKey = pair.Key; } if (writer != null) { ClosePage(); } return(outputTables); }
public static IEnumerable <KeyValuePair <Key, Value> > EnumerateMergedTablesPreCached(RazorCache cache, string baseFileName, IEnumerable <PageRef> tableSpecs, ExceptionHandling exceptionHandling, Action <string> logger) { var tables = tableSpecs.Select(pageRef => new SortedBlockTable(cache, baseFileName, pageRef.Level, pageRef.Version)).ToList(); try { foreach (var pair in MergeEnumerator.Merge(tables.Select(t => t.Enumerate().ToList().AsEnumerable()), t => t.Key)) { yield return(pair); } } finally { tables.ForEach(t => t.Close()); } }
public IEnumerable <KeyValuePair <Key, Value> > EnumerateFromKey(RazorCache indexCache, Key key) { if (!FileExists) { yield break; } int startingBlock; if (key.Length == 0) { startingBlock = 0; } else { startingBlock = FindBlockForKey(_baseFileName, _level, _version, indexCache, key); if (startingBlock < 0) { startingBlock = 0; } } if (startingBlock < _dataBlocks) { byte[] allocBlockA = new byte[Config.SortedBlockSize]; byte[] allocBlockB = new byte[Config.SortedBlockSize]; byte[] currentBlock = allocBlockA; var asyncResult = BeginReadBlock(currentBlock, startingBlock); try { for (int i = startingBlock; i < _dataBlocks; i++) { // wait on last block read to complete so we can start processing the data byte[] block = EndReadBlock(asyncResult); asyncResult = null; // Go ahead and kick off the next block read asynchronously while we parse the last one if (i < _dataBlocks) { SwapBlocks(allocBlockA, allocBlockB, ref currentBlock); // swap the blocks so we can issue another disk i/o asyncResult = BeginReadBlock(currentBlock, i + 1); } int offset = 2; // reset offset, start after tree root pointer // On the first block, we need to seek to the key first (if we don't have an empty key) if (i == startingBlock && key.Length != 0) { while (offset >= 0) { var pair = ReadPair(block, ref offset); if (pair.Key.CompareTo(key) >= 0) { yield return(pair); break; } } } while (offset >= 0) { yield return(ReadPair(block, ref offset)); // loop through the rest of the block } } } finally { if (asyncResult != null) { EndReadBlock(asyncResult); } } } }
public static void RunTableMergePass(KeyValueStore kvStore) { try { Interlocked.Increment(ref kvStore.mergeCount); lock (kvStore.mergeLock) { RazorCache cache = kvStore.Cache; Manifest manifest = kvStore.Manifest; while (true) { bool mergedDuringLastPass = false; using (var manifestInst = kvStore.Manifest.GetLatestManifest()) { if (manifestInst.GetNumPagesAtLevel(0) >= Config.MaxPagesOnLevel(0)) // Handle level 0 (merge all pages) { mergedDuringLastPass = true; int Level0PagesToTake = Config.MaxPagesOnLevel(0) * 2; // Grab more pages if they are available (this happens during heavy write pressure) var inputPageRecords = manifestInst.GetPagesAtLevel(0).OrderBy(p => p.Version).Take(Level0PagesToTake).ToList(); var startKey = inputPageRecords.Min(p => p.FirstKey); var endKey = inputPageRecords.Max(p => p.LastKey); var mergePages = manifestInst.FindPagesForKeyRange(1, startKey, endKey).AsPageRefs().ToList(); var allInputPages = inputPageRecords.AsPageRefs().Concat(mergePages).ToList(); var outputPages = SortedBlockTable.MergeTables(cache, manifest, 1, allInputPages, ExceptionHandling.ThrowAll, null).ToList(); manifest.ModifyPages(outputPages, allInputPages); manifest.LogMessage("Merge Level 0 => InputPages: {0} OutputPages:{1}", string.Join(",", allInputPages.Select(p => string.Format("{0}-{1}", p.Level, p.Version)).ToArray()), string.Join(",", outputPages.Select(p => string.Format("{0}-{1}", p.Level, p.Version)).ToArray()) ); } for (int level = 1; level < manifestInst.NumLevels - 1; level++) // handle the rest of the levels (merge only one page upwards) { if (manifestInst.GetNumPagesAtLevel(level) >= Config.MaxPagesOnLevel(level)) { mergedDuringLastPass = true; var inputPage = manifest.NextMergePage(level); var mergePages = manifestInst.FindPagesForKeyRange(level + 1, inputPage.FirstKey, inputPage.LastKey).ToList(); var inputPageRecords = mergePages.Concat(new PageRecord[] { inputPage }); var allInputPages = inputPageRecords.AsPageRefs().ToList(); var outputPages = SortedBlockTable.MergeTables(cache, manifest, level + 1, allInputPages, ExceptionHandling.ThrowAll, null); // Notify if a merge happened, implemented for testing primarily if (kvStore.MergeCallback != null) { kvStore.MergeCallback(level, inputPageRecords, outputPages); } manifest.ModifyPages(outputPages, allInputPages); manifest.LogMessage("Merge Level >0 => InputPages: {0} OutputPages:{1}", string.Join(",", allInputPages.Select(p => string.Format("{0}-{1}", p.Level, p.Version)).ToArray()), string.Join(",", outputPages.Select(p => string.Format("{0}-{1}", p.Level, p.Version)).ToArray()) ); } } } if (!mergedDuringLastPass) { return; // No more merging is needed, we are finished with this pass } } } } finally { Interlocked.Decrement(ref kvStore.mergeCount); } }