示例#1
0
		public Block(StorageOptions storageOptions, ReadOptions readOptions, BlockHandle handle, FileData fileData)
		{
			try
			{
				_handle = handle;
				_storageOptions = storageOptions;
				_fileData = fileData;
				if (handle.Position > fileData.Size || (handle.Position + handle.Count + BlockTrailerSize) > fileData.Size)
					throw new CorruptedDataException("The specified accessor is beyond the bounds of the provided mappedFile");

				_accessor = _fileData.File.CreateAccessor(handle.Position, handle.Count + BlockTrailerSize);

				if (readOptions.VerifyChecksums)
				{
					var crc = Crc.Unmask(_accessor.ReadInt32(handle.Count + 1));
					var actualCrc = CalculateActualCrc(handle.Count + 1); // data + tag
					if (crc != actualCrc)
						throw new CorruptedDataException("block checksum mismatch");
				}
				RestartsCount = _accessor.ReadInt32(handle.Count - sizeof(int));
				RestartsOffset = handle.Count - (RestartsCount * sizeof(int)) - sizeof(int);
				if (RestartsOffset > handle.Count)
					throw new CorruptedDataException("restart offset wrapped around");
			}
			catch (Exception)
			{
				Dispose();
				throw;
			}
		}
		public DbIterator NewIterator(ReadOptions options)
		{
			var result = state.NewInternalIterator(options);
			var internalIterator = result.Item1;
			var latestSnapshot = result.Item2;

			return new DbIterator(state, internalIterator, options.Snapshot != null ? options.Snapshot.Sequence : latestSnapshot);
		}
		public TwoLevelIterator(
			IIterator indexIterator,
			Func<ReadOptions, BlockHandle, IIterator> getIterator,
			ReadOptions readOptions
		)
		{
			_indexIterator = indexIterator;
			_readOptions = readOptions;
			this.getIterator = getIterator;
		}
		public Stream Read(Slice key, ReadOptions options = null)
		{
			if (options == null)
			{
				options = new ReadOptions();
			}

			var mem = state.MemTable;
			var imm = state.ImmutableMemTable;
			var currentVersion = state.VersionSet.Current;

			var snapshot = options.Snapshot != null ? options.Snapshot.Sequence : state.VersionSet.LastSequence;

			var reference = new Reference<Slice> { Value = key };

			state.PerfCounters.Read();

			Stream stream;
			GetStats stats;

			if (mem.TryGet(reference.Value, snapshot, out stream))
			{
				state.PerfCounters.BytesRead(stream.Length);
				return stream;
			}

			if (imm != null && imm.TryGet(reference.Value, snapshot, out stream))
			{
				state.PerfCounters.BytesRead(stream.Length); 
				return stream;
			}

			if (currentVersion.TryGet(reference.Value, snapshot, options, out stream, out stats))
			{
				if (currentVersion.UpdateStats(stats))
				{
					Background.Work(MaybeScheduleCompactionAsync());
				}

				state.PerfCounters.BytesRead(stream.Length); 
				return stream;
			}

			return null;
		}
		public IIterator NewIterator(ReadOptions options, ulong fileNumber, long fileSize)
		{
			try
			{
				var table = FindTable(fileNumber, fileSize);

				return table.CreateIterator(options);
			}
			catch (Exception e)
			{
				log.InfoException("Could not open iterator for " + fileNumber + ", will return empty iterator", e);

				if (state.Options.ParanoidChecks)
					throw;

				return new EmptyIterator();
			}
		}
		internal IIterator GetFileIterator(ReadOptions readOptions, BlockHandle handle)
		{
			var fileNumber = (ulong)handle.Position;
			var fileSize = handle.Count;

			return storageContext.TableCache.NewIterator(readOptions, fileNumber, fileSize);
		}
		public IIterator MakeInputIterator(Compaction compaction)
		{
			var readOptions = new ReadOptions
								  {
									  VerifyChecksums = storageContext.Options.ParanoidChecks,
									  FillCache = false
								  };

			// Level-0 files have to be merged together.  For other levels,
			// we will make a concatenating iterator per level.
			// TODO(opt): use concatenating iterator for level-0 if there is no overlap
			var list = new List<IIterator>();
			for (int which = 0; which < 2; which++)
			{
				if (compaction.Inputs[which].Count != 0)
				{
					if (compaction.Level + which == 0)
					{
						var files = new List<FileMetadata>(compaction.Inputs[which]);
						list.AddRange(files.Select(file => storageContext.TableCache.NewIterator(readOptions, file.FileNumber, file.FileSize)));
					}
					else
					{
						// Create concatenating iterator for the files from this level
						list.Add(new TwoLevelIterator(
							         new LevelFileNumIterator(storageContext.InternalKeyComparator, compaction.Inputs[which]),
							         GetFileIterator, readOptions));
					}
				}
			}

			return NewMergingIterator(storageContext.InternalKeyComparator, list);
		}
		public ItemState Get(InternalKey key, ulong fileNumber, long fileSize, ReadOptions readOptions, IComparator comparator,
							 out Stream stream)
		{
			Table table = FindTable(fileNumber, fileSize);

			Tuple<Slice, Stream> result = table.InternalGet(readOptions, key);

			stream = null;

			if (result == null)
			{
				return ItemState.NotFound;
			}

			bool shouldDispose = true;
			try
			{
				InternalKey internalKey;
				if (!InternalKey.TryParse(result.Item1, out internalKey))
				{
					return ItemState.Corrupt;
				}

				if (comparator.Compare(internalKey.UserKey, key.UserKey) == 0)
				{
					bool isFound = internalKey.Type == ItemType.Value;
					if (!isFound)
					{
						return ItemState.Deleted;
					}

					stream = result.Item2;
					shouldDispose = false;
					return ItemState.Found;
				}

				return ItemState.NotFound;
			}
			finally
			{
				if (shouldDispose && result.Item2 != null)
					result.Item2.Dispose();
			}
		}
示例#9
0
		public bool TryGet(Slice key, ulong seq, ReadOptions readOptions, out Stream stream, out GetStats stats)
		{
			stats = new GetStats
						{
							SeekFile = null,
							SeekFileLevel = -1
						};

			FileMetadata lastFileRead = null;
			int lastFileReadLevel = -1;

			var internalKey = new InternalKey(key, seq, ItemType.ValueForSeek);

			// We can search level-by-level since entries never hop across
			// levels.  Therefore we are guaranteed that if we find data
			// in an smaller level, later levels are irrelevant.
			for (var level = 0; level < Config.NumberOfLevels; level++)
			{
				if (Files[level].Count == 0)
				{
					continue;
				}

				// Get the list of files to search in this level
				var files = GetRelevantFilesForLevel(level, internalKey);
				if (files == null || files.Count == 0)
					continue;

				foreach (var f in files)
				{
					if (lastFileRead != null && stats.SeekFile == null)
					{
						// We have had more than one seek for this read.  Charge the 1st file.
						stats.SeekFile = lastFileRead;
						stats.SeekFileLevel = lastFileReadLevel;
					}

					lastFileRead = f;
					lastFileReadLevel = level;

					var state = storageContext.TableCache.Get(
						internalKey, 
						f.FileNumber, 
						f.FileSize, 
						readOptions,
						storageContext.InternalKeyComparator.UserComparator,
						out stream);

					switch (state)
					{
						case ItemState.Found:
							return true;
						case ItemState.NotFound:
							break;
						case ItemState.Deleted:
							return false;
						case ItemState.Corrupt:
							return false;
						default:
							throw new NotSupportedException(state.ToString());
					}
				}
			}

			stream = null;
			return false;
		}
示例#10
0
		private IIterator CreateFileIterator(FileMetadata file)
		{
			var readOptions = new ReadOptions
			{
				VerifyChecksums = state.Options.ParanoidChecks,
				FillCache = false
			};

			return state.TableCache.NewIterator(readOptions, file.FileNumber, file.FileSize);
		}
示例#11
0
		private IIterator CreateInputIterator(CompactionState compactionState)
		{
			var readOptions = new ReadOptions
			{
				VerifyChecksums = state.Options.ParanoidChecks,
				FillCache = false
			};

			return new MergingIterator(state.InternalKeyComparator, compactionState.Compaction.Inputs[0].Select(x => state.TableCache.NewIterator(readOptions, x.FileNumber, x.FileSize)).ToList());
		}
		public Tuple<IIterator, ulong> NewInternalIterator(ReadOptions options)
		{
			var mem = MemTable;
			var imm = ImmutableMemTable;
			var currentVersion = VersionSet.Current;

			var snapshot = options.Snapshot != null ? options.Snapshot.Sequence : VersionSet.LastSequence;

			var iterators = new List<IIterator>
				                {
					                mem.NewIterator()
				                };

			if (imm != null)
				iterators.Add(imm.NewIterator());

			// Merge all level zero files together since they may overlap
			iterators.AddRange(currentVersion.Files[0].Select(file => TableCache.NewIterator(options, file.FileNumber, file.FileSize)));

			// For levels > 0, we can use a concatenating iterator that sequentially
			// walks through the non-overlapping files in the level, opening them
			// lazily.
			for (var level = 1; level < Config.NumberOfLevels; level++)
			{
				if (currentVersion.Files[level].Count > 0)
					iterators.Add(new TwoLevelIterator(new LevelFileNumIterator(InternalKeyComparator, currentVersion.Files[level]), VersionSet.GetFileIterator, options));
			}

			var internalIterator = new MergingIterator(InternalKeyComparator, iterators);

			return new Tuple<IIterator, ulong>(internalIterator, snapshot);
		}
示例#13
0
		/// <summary>
		/// Returns a new iterator over the table contents.
		/// The result of NewIterator() is initially invalid (caller must
		/// call one of the Seek methods on the iterator before using it).
		/// </summary>
		public IIterator CreateIterator(ReadOptions readOptions)
		{
			return new TwoLevelIterator(_indexBlock.CreateIterator(_storageState.Options.Comparator), CreateBlockIterator, readOptions);
		}
示例#14
0
		public Table(StorageState storageState, FileData fileData)
		{
			_storageState = storageState;
			try
			{
				_fileData = fileData;

				if (_storageState.Options.MaxBlockCacheSizePerTableFile > 0)
				{
					_blockCache = new LruCache<BlockHandle, Block>(_storageState.Options.MaxBlockCacheSizePerTableFile);
				}

				if (fileData.Size < Footer.EncodedLength)
					throw new CorruptedDataException("File is too short to be an sstable");

				var footer = new Footer();
				using (var accessor = fileData.File.CreateAccessor(fileData.Size - Footer.EncodedLength, Footer.EncodedLength))
				{
					footer.DecodeFrom(accessor);
				}

				var readOptions = new ReadOptions
					{
						VerifyChecksums = _storageState.Options.ParanoidChecks
					};
				_indexBlock = new Block(_storageState.Options, readOptions, footer.IndexHandle, fileData);
				_indexBlock.IncrementUsage();
				if (_storageState.Options.FilterPolicy == null)
					return; // we don't need any metadata

				using (var metaBlock = new Block(_storageState.Options, readOptions, footer.MetaIndexHandle, fileData))
				using (var iterator = metaBlock.CreateIterator(CaseInsensitiveComparator.Default))
				{
					var filterName = ("filter." + _storageState.Options.FilterPolicy.Name);
					iterator.Seek(filterName);
					if (iterator.IsValid && CaseInsensitiveComparator.Default.Compare(filterName, iterator.Key) == 0)
					{
						var handle = new BlockHandle();
						using (var stream = iterator.CreateValueStream())
						{
							handle.DecodeFrom(stream);
						}
						var filterAccessor = _fileData.File.CreateAccessor(handle.Position, handle.Count);
						try
						{
							_filter = _storageState.Options.FilterPolicy.CreateFilter(filterAccessor);
						}
						catch (Exception)
						{
							if (_filter == null)
								filterAccessor.Dispose();
							else
								_filter.Dispose();
							throw;
						}

					}
				}
			}
			catch (Exception)
			{
				Dispose();
				throw;
			}
		}
示例#15
0
		internal IIterator CreateBlockIterator(ReadOptions readOptions, BlockHandle handle)
		{
			if (_blockCache == null)
			{
				Block uncachedBlock = null;
				IIterator blockIterator = null;
				try
				{
					uncachedBlock = new Block(_storageState.Options, readOptions, handle, _fileData);
					// uncachedBlock.IncrementUsage(); - intentionally not calling this, will be disposed when the iterator is disposed
					blockIterator = uncachedBlock.CreateIterator(_storageState.InternalKeyComparator);
					return blockIterator;
				}
				catch (Exception)
				{
					if (uncachedBlock != null)
						uncachedBlock.Dispose();
					if (blockIterator != null)
						blockIterator.Dispose();
					throw;
				}
			}

			Block value;
			if (_blockCache.TryGet(handle, out value))
			{
				return value.CreateIterator(_storageState.InternalKeyComparator);
			}
			var block = new Block(_storageState.Options, readOptions, handle, _fileData);
			block.IncrementUsage(); // the cache is using this, so avoid having it disposed by the cache while in use
			_blockCache.Set(handle, block);
			return block.CreateIterator(_storageState.InternalKeyComparator);
		}
示例#16
0
		internal Tuple<Slice, Stream> InternalGet(ReadOptions readOptions, InternalKey key)
		{
			using (var iterator = _indexBlock.CreateIterator(_storageState.InternalKeyComparator))
			{
				iterator.Seek(key.TheInternalKey);
				if (iterator.IsValid == false)
					return null;
				var handle = new BlockHandle();
				using (var stream = iterator.CreateValueStream())
				{
					handle.DecodeFrom(stream);
				}
				if (_filter != null && _filter.KeyMayMatch(handle.Position, key.UserKey) == false)
				{
					return null; // opptimized not found by filter, no need to read the actual block
				}
				using (var blockIterator = CreateBlockIterator(readOptions, handle))
				{
					blockIterator.Seek(key.TheInternalKey);
					if (blockIterator.IsValid == false)
						return null;
					return Tuple.Create(blockIterator.Key, blockIterator.CreateValueStream());
				}
			}
		}