void ReadFilter(Slice filterHandleValue) { Slice v = filterHandleValue; BlockHandle filterHandle = new BlockHandle(); if (!filterHandle.DecodeFrom(ref v).IsOk) { return; } // We might want to unify with ReadBlock() if we start // requiring checksum verification in Table::Open. ReadOptions opt = ReadOptions.Default; BlockContents block; if (!FormatHelper.ReadBlock(rep_.File, opt, filterHandle, out block).IsOk) { return; } if (block.HeapAllocated) { rep_.FilterData = block.Data.Data; // Will need to delete later } rep_.Filter = new FilterBlockReader(rep_.Options.FilterPolicy, block.Data); }
internal Status InternalGet( ReadOptions options, Slice k, object arg, HandleResultDelegate saver) { Status s = new Status(); Iterator iiter = rep_.IndexBlock.NewIterator(rep_.Options.Comparator); iiter.Seek(k); if (iiter.Valid) { Slice handle_value = iiter.Value; FilterBlockReader filter = rep_.Filter; BlockHandle handle = new BlockHandle(); if (filter != null && handle.DecodeFrom(ref handle_value).IsOk && !filter.KeyMayMatch(handle.Offset, k)) { // Not found } else { Slice tempHandle = iiter.Value; Iterator blockIter = BlockReader(this, options, iiter.Value); blockIter.Seek(k); if (blockIter.Valid) { saver(arg, blockIter.Key, blockIter.Value); } s = blockIter.Status; } } if (s.IsOk) { s = iiter.Status; } return s; }
private static Iterator BlockReader(object arg, ReadOptions options, Slice indexValue) { Table table = (Table)arg; Cache blockCache = table.rep_.Options.BlockCache; Block block = null; Cache.Handle cacheHandle = null; BlockHandle handle = new BlockHandle(); Slice input = indexValue; Status s = handle.DecodeFrom(ref input); // We intentionally allow extra stuff in index_value so that we // can add more features in the future. if (s.IsOk) { BlockContents contents; if (blockCache != null) { ByteArrayPointer cacheKeyBuffer = new ByteArrayPointer(16); Coding.EncodeFixed64(cacheKeyBuffer, table.rep_.CacheId); Coding.EncodeFixed64(cacheKeyBuffer + 8, handle.Offset); Slice key = new Slice(cacheKeyBuffer, cacheKeyBuffer.Length); cacheHandle = blockCache.Lookup(key); if (cacheHandle != null) { block = (Block)(blockCache.Value(cacheHandle)); } else { s = FormatHelper.ReadBlock(table.rep_.File, options, handle, out contents); if (s.IsOk) { block = new Block(contents); if (contents.Cachable && options.FillCache) { cacheHandle = blockCache.Insert(key, block, block.Size); } } } } else { s = FormatHelper.ReadBlock(table.rep_.File, options, handle, out contents); if (s.IsOk) { block = new Block(contents); } } } Iterator iter; if (block != null) { iter = block.NewIterator(table.rep_.Options.Comparator); if (cacheHandle != null) { iter.RegisterCleanup(ReleaseBlock, blockCache, cacheHandle); } } else { iter = Iterator.NewErrorIterator(s); } return iter; }
/// <summary> /// Given a key, return an approximate byte offset in the file where /// the data for that key begins (or would begin if the key were /// present in the file). The returned value is in terms of file /// bytes, and so includes effects like compression of the underlying data. /// E.g., the approximate offset of the last key in the table will /// be close to the file length. /// </summary> public UInt64 ApproximateOffsetOf(Slice key) { Iterator index_iter = rep_.IndexBlock.NewIterator(rep_.Options.Comparator); index_iter.Seek(key); UInt64 result; if (index_iter.Valid) { BlockHandle handle = new BlockHandle(); Slice input = index_iter.Value; Status s = handle.DecodeFrom(ref input); if (s.IsOk) { result = handle.Offset; } else { // Strange: we can't decode the block handle in the index block. // We'll just return the offset of the metaindex block, which is // close to the whole file size for this case. result = rep_.MetaindexHandle.Offset; } } else { // key is past the last key in the file. Approximate the offset // by returning the offset of the metaindex block (which is // right near the end of the file). result = rep_.MetaindexHandle.Offset; } return result; }