public async Task CompactAsync(int level, Slice begin, Slice end)
		{
			if (InProgress)
				throw new InvalidOperationException("Manual compaction is already in progess.");

			try
			{
				InProgress = true;
				Done = false;

				Level = level;
				Begin = new InternalKey(begin, Format.MaxSequenceNumber, ItemType.ValueForSeek);
				End = new InternalKey(end, Format.MaxSequenceNumber, ItemType.ValueForSeek);

				Task task = null;

				while (task == null)
				{
					state.CancellationToken.ThrowIfCancellationRequested();

					if (state.BackgroundCompactionScheduled)
					{
						await Task.Delay(100).ConfigureAwait(false);
						continue;
					}

					using (AsyncLock.LockScope locker = await state.Lock.LockAsync().ConfigureAwait(false))
					{
						while (Done == false)
						{
							state.BackgroundCompactionScheduled = true;
							await RunCompactionAsync(locker).ConfigureAwait(false);

							var manualEnd = new InternalKey();

							if (compaction != null)
							{
								manualEnd = compaction.GetInput(0, compaction.GetNumberOfInputFiles(0) - 1).LargestKey;
							}

							if (Done == false)
							{
								// We only compacted part of the requested range. Update to the range that is left to be compacted.
								Begin = manualEnd;
							}
						}

						task = state.BackgroundTask;
					}
				}
			}
			finally
			{
				Done = true;
				InProgress = false;
			}
		}
		public static bool TryParse(Slice input, out InternalKey internalKey)
		{
			internalKey = new InternalKey();

			if (input.Count < 8)
				return false;

			var number = BitConverter.ToUInt64(input.Array, input.Offset + input.Count - 8);
			var sequence = number >> 8;
			var type = (ItemType)number;

			var key = new Slice(input.Array, input.Offset, input.Count - 8);
			internalKey = new InternalKey(key, sequence, type);

			return type <= ItemType.Value;
		}
		internal List<FileMetadata> GetOverlappingInputs(int level, InternalKey begin, InternalKey end)
		{
			var inputs = new List<FileMetadata>();
			var userComparator = storageContext.InternalKeyComparator.UserComparator;

			var userBegin = begin.UserKey;
			var userEnd = end.UserKey;

			for (int i = 0; i < Files[level].Count; )
			{
				var f = Files[level][i++];
				var fileStart = f.SmallestKey.UserKey;
				var fileLimit = f.LargestKey.UserKey;

				if (userBegin.IsEmpty() == false && userComparator.Compare(fileLimit, userBegin) < 0)
				{
					// "f" is completely before specified range; skip it
				}
				else if (userEnd.IsEmpty() == false && userComparator.Compare(fileStart, userEnd) > 0)
				{
					// "f" is completely after specified range; skip it
				}
				else
				{
					inputs.Add(f);
					if (level == 0)
					{
						// Level-0 files may overlap each other.  So check if the newly
						// added file has expanded the range.  If so, restart search.
						if (userBegin.IsEmpty() == false && userComparator.Compare(fileStart, userBegin) < 0)
						{
							userBegin = fileStart;
							inputs.Clear();
							i = 0;
						}
						else if (userEnd.IsEmpty() == false && userComparator.Compare(fileLimit, userEnd) > 0)
						{
							userEnd = fileLimit;
							inputs.Clear();
							i = 0;
						}
					}
				}
			}

			return inputs;
		}
		public int PickLevelForMemTableOutput(Slice smallestKey, Slice largestKey)
		{
			int level = 0;
			if (!OverlapInLevel(0, smallestKey, largestKey))
			{
				var start = new InternalKey(smallestKey, Format.MaxSequenceNumber, ItemType.ValueForSeek);
				var limit = new InternalKey(largestKey, 0, 0);
				while (level < Config.MaxMemCompactLevel)
				{
					if (OverlapInLevel(level + 1, smallestKey, largestKey))
					{
						break;
					}

					var overlaps = GetOverlappingInputs(level + 2, start, limit);
					var totalFileSize = overlaps.Sum(x => x.FileSize);
					if (totalFileSize > Config.MaxGrandParentOverlapBytes)
					{
						break;
					}

					level++;
				}
			}

			return level;
		}
		public void Add(ulong seq, ItemType type, Slice key, UnamangedMemoryAccessor.MemoryHandle memoryHandle)
		{
			var internalKey = new InternalKey(key, seq, type);

			_table.Insert(internalKey, memoryHandle);
		}
		/// <summary>
		/// Stores the minimal range that covers all entries in inputs in
		/// *smallest, *largest.
		/// REQUIRES: inputs is not empty
		/// </summary>
		/// <param name="inputs"></param>
		/// <param name="smallestKey"></param>
		/// <param name="largestKey"></param>
		private void GetRange(IReadOnlyList<FileMetadata> inputs, out InternalKey smallestKey, out InternalKey largestKey)
		{
			Debug.Assert(inputs.Count > 0);

			smallestKey = new InternalKey();
			largestKey = new InternalKey();

			for (var i = 0; i < inputs.Count; i++)
			{
				var file = inputs[i];
				if (i == 0)
				{
					smallestKey = file.SmallestKey;
					largestKey = file.LargestKey;
				}
				else
				{
					if (storageContext.InternalKeyComparator.Compare(file.SmallestKey, smallestKey) < 0)
					{
						smallestKey = file.SmallestKey;
					}

					if (storageContext.InternalKeyComparator.Compare(file.LargestKey, largestKey) > 0)
					{
						largestKey = file.LargestKey;
					}
				}
			}
		}
 public void SetCompactionPointer(int level, InternalKey key)
 {
     CompactionPointers[level].Add(key);
 }
		private List<FileMetadata> GetRelevantFilesForLevel(int level, InternalKey internalKey)
		{
			var files = Files[level];
			var comparator = storageContext.InternalKeyComparator.UserComparator;
			if (level == 0)
			{
				// Level-0 files may overlap each other.  Find all files that
				// overlap user_key and process them in order from newest to oldest.
				var tempFiles = files.Where(
							f => comparator.Compare(internalKey.UserKey, f.SmallestKey.UserKey) >= 0 && 
								 comparator.Compare(internalKey.UserKey, f.LargestKey.UserKey) <= 0)
					     .OrderByDescending(x => x.FileNumber)
						 .ToList();

				return tempFiles.ToList();
			}
			// Binary search to find earliest index whose largest key >= ikey.
			int index;
			if (Files[level].TryFindFile(internalKey.TheInternalKey, storageContext.InternalKeyComparator, out index) == false)
			{
				return null;
			}
			int compare = comparator.Compare(internalKey.UserKey,
			                                 files[index].SmallestKey.UserKey);
			return compare < 0 ? null : files.Skip(index).ToList();
		}
		private void CleanupIfNecessary(InternalKey key, CompactionState compactionState, IIterator iterator, IDictionary<FileMetadata, bool> destinationFiles)
		{
			var filesToRemove = new List<KeyValuePair<FileMetadata, bool>>();
			foreach (var pair in destinationFiles)
			{
				var r = state.InternalKeyComparator.UserComparator.Compare(key.UserKey, pair.Key.LargestKey.UserKey);
				if (r > 0)
				{
					// past the file
					filesToRemove.Add(pair);
				}
				else if (r < 0)
				{
					// past the file - no need to check further
					break;
				}
			}

			if (filesToRemove.Count == 0)
				return;

			foreach (var pair in filesToRemove)
			{
				destinationFiles.Remove(pair);

				if (pair.Value)
					compactionState.Compaction.Inputs[1].Remove(pair.Key);
			}

			if (filesToRemove.Any(x => x.Value))
				FinishCompactionOutputFileIfNecessary(compactionState, iterator, true);
		}
		private async Task PerformRapidCompactionAsync(CompactionState compactionState, Stopwatch watch, AsyncLock.LockScope locker)
		{
			using (var source = CreateInputIterator(compactionState))
			{
				InternalKey sourceInternalKey = new InternalKey();
				InternalKey fileInternalKey;
				FileMetadata file = null;
				IIterator fileIterator = null;

				Slice lastKey = null;
				ulong lastSequence = 0;
				bool addSource = true;

				try
				{
					var destinationFiles = compactionState.Compaction.Inputs[1].ToDictionary(x => x, x => true);

					source.SeekToFirst();
					while (source.IsValid)
					{
						if (state.ImmutableMemTable != null)
							await CompactMemTableAsync(locker).ConfigureAwait(false);

						state.CancellationToken.ThrowIfCancellationRequested();

						if (addSource && !InternalKey.TryParse(source.Key, out sourceInternalKey))
							throw new InvalidOperationException("Source key must be a valid internal key.");

						addSource = false;

						if (destinationFiles.Count > 0 && file == null)
						{
							file = PickOverlapingFile(sourceInternalKey, destinationFiles);
							CleanupIfNecessary(sourceInternalKey, compactionState, null, destinationFiles);
						}

						if (file != null && fileIterator == null)
						{
							fileIterator = CreateFileIterator(file);
							fileIterator.SeekToFirst();
						}

						if (fileIterator != null)
						{
							while (fileIterator.IsValid)
							{
								if (!InternalKey.TryParse(fileIterator.Key, out fileInternalKey))
									throw new InvalidOperationException("File key must be a valid internal key.");

								if (state.InternalKeyComparator.Compare(sourceInternalKey, fileInternalKey) <= 0)
								{
									addSource = true;
									break;
								}

								MaybeAddFile(fileInternalKey, source, compactionState, locker, ref lastKey, ref lastSequence);
								fileIterator.Next();
							}

							if (!fileIterator.IsValid)
							{
								file = null;
								fileIterator.Dispose();
								fileIterator = null;
							}
						}
						else
						{
							addSource = true;
						}

						if (!addSource)
							continue;

						MaybeAddFile(sourceInternalKey, source, compactionState, locker, ref lastKey, ref lastSequence);
						source.Next();
					}

					if (fileIterator != null)
					{
						while (fileIterator.IsValid)
						{
							if (!InternalKey.TryParse(fileIterator.Key, out fileInternalKey))
								throw new InvalidOperationException("File key must be a valid internal key.");

							MaybeAddFile(fileInternalKey, fileIterator, compactionState, locker, ref lastKey, ref lastSequence);

							fileIterator.Next();
						}
					}

					if (sourceInternalKey.UserKey.IsEmpty() == false)
						CleanupIfNecessary(sourceInternalKey, compactionState, null, destinationFiles);
				}
				finally
				{
					if (fileIterator != null)
						fileIterator.Dispose();
				}

				FinishCompactionOutputFileIfNecessary(compactionState, source, force: true);

				CreateCompactionStats(compactionState, watch);

				await InstallCompactionResultsAsync(compactionState, locker).ConfigureAwait(false);
			}
		}
		public static int WriteLengthPrefixedInternalKey(this LogWriter stream, InternalKey internalKey)
		{
			return WriteLengthPrefixedSlice(stream, internalKey.TheInternalKey);
		}
		public static Task<int> WriteLengthPrefixedInternalKeyAsync(this LogWriter stream, InternalKey internalKey)
		{
			return WriteLengthPrefixedSliceAsync(stream, internalKey.TheInternalKey);
		}
		/// <summary>
		/// Returns if the value is found in this mem table or not.
		/// Note that it is posible for the value to be found and the stream to be null, if the value
		/// has been deleted
		/// </summary>
		public bool TryGet(Slice userKey, ulong sequence, out Stream stream)
		{
			var memKey = new InternalKey(userKey, sequence, ItemType.ValueForSeek);
			var iterator = _table.NewIterator();
			iterator.Seek(memKey);
			if (iterator.IsValid == false ||
				_internalKeyComparator.EqualKeys(memKey.TheInternalKey, iterator.Key.TheInternalKey) == false)
			{
				stream = null;
				return false;
			}

			switch (iterator.Key.Type)
			{
				case ItemType.Deletion:
					stream = null;
					return true;
				case ItemType.Value:
					stream = _memoryAccessor.Read(iterator.Val);
					return true;
				default:
					throw new ArgumentOutOfRangeException();
			}
		}
		public void Remove(ulong seq, ItemType type, Slice key)
		{
			var internalKey = new InternalKey(key, seq, type);

			_table.Remove(internalKey);
		}
		private bool SomeFileOverlapsRange(bool disjointSortedFiles, IList<FileMetadata> files, Slice smallestKey, Slice largestKey)
		{
			var userComparator = storageContext.InternalKeyComparator.UserComparator;

			if (!disjointSortedFiles)
			{
				// Need to check against all files
				return files.Any(file => !AfterFile(userComparator, smallestKey, file) && !BeforeFile(userComparator, largestKey, file));
			}

			var index = 0;
			if (smallestKey.IsEmpty() == false)
			{
				var smallInternalKey = new InternalKey(smallestKey, Format.MaxSequenceNumber, ItemType.ValueForSeek);
				files.TryFindFile(smallInternalKey.TheInternalKey, storageContext.InternalKeyComparator, out index);
			}

			if (index >= files.Count)
			{
				// beginning of range is after all files, so no overlap.
				return false;
			}

			return !BeforeFile(userComparator, largestKey, files[index]);
		}
		public bool TryGet(Slice key, ulong seq, ReadOptions readOptions, out Stream stream, out GetStats stats)
		{
			stats = new GetStats
						{
							SeekFile = null,
							SeekFileLevel = -1
						};

			FileMetadata lastFileRead = null;
			int lastFileReadLevel = -1;

			var internalKey = new InternalKey(key, seq, ItemType.ValueForSeek);

			// We can search level-by-level since entries never hop across
			// levels.  Therefore we are guaranteed that if we find data
			// in an smaller level, later levels are irrelevant.
			for (var level = 0; level < Config.NumberOfLevels; level++)
			{
				if (Files[level].Count == 0)
				{
					continue;
				}

				// Get the list of files to search in this level
				var files = GetRelevantFilesForLevel(level, internalKey);
				if (files == null || files.Count == 0)
					continue;

				foreach (var f in files)
				{
					if (lastFileRead != null && stats.SeekFile == null)
					{
						// We have had more than one seek for this read.  Charge the 1st file.
						stats.SeekFile = lastFileRead;
						stats.SeekFileLevel = lastFileReadLevel;
					}

					lastFileRead = f;
					lastFileReadLevel = level;

					var state = storageContext.TableCache.Get(
						internalKey, 
						f.FileNumber, 
						f.FileSize, 
						readOptions,
						storageContext.InternalKeyComparator.UserComparator,
						out stream);

					switch (state)
					{
						case ItemState.Found:
							return true;
						case ItemState.NotFound:
							break;
						case ItemState.Deleted:
							return false;
						case ItemState.Corrupt:
							return false;
						default:
							throw new NotSupportedException(state.ToString());
					}
				}
			}

			stream = null;
			return false;
		}
		public void MaybeAddFile(InternalKey key, IIterator iterator, CompactionState compactionState, AsyncLock.LockScope locker, ref Slice lastKey, ref ulong lastSequence)
		{
			var drop = false;
			if (lastKey.IsEmpty()
				|| state.InternalKeyComparator.UserComparator.Compare(key.UserKey, lastKey) != 0)
			{
				// First occurrence of this user key
				lastKey = key.UserKey.Clone();
				lastSequence = Format.MaxSequenceNumber;
			}

			if (lastSequence <= compactionState.SmallestSnapshot)
			{
				// Hidden by an newer entry for same user key
				drop = true;
			}
			else if (key.Type == ItemType.Deletion && key.Sequence <= compactionState.SmallestSnapshot
					 && compactionState.Compaction.IsBaseLevelForKey(key.UserKey))
			{
				// For this user key:
				// (1) there is no data in higher levels
				// (2) data in lower levels will have larger sequence numbers
				// (3) data in layers that are being compacted here and have
				//     smaller sequence numbers will be dropped in the next
				//     few iterations of this loop (by rule (A) above).
				// Therefore this deletion marker is obsolete and can be dropped.

				drop = true;
			}

			lastSequence = key.Sequence;

			if (!drop)
			{
				if (compactionState.Builder == null)
				{
					using (locker.LockAsync().Result)
						OpenCompactionOutputFileIfNecessary(compactionState, locker);
				}

				Debug.Assert(compactionState.Builder != null);

				if (compactionState.Builder.NumEntries == 0)
					compactionState.CurrentOutput.SmallestKey = new InternalKey(key.TheInternalKey.Clone());

				compactionState.CurrentOutput.LargestKey = new InternalKey(key.TheInternalKey.Clone());

				//Console.WriteLine("Adding " + compactionState.CurrentOutput.LargestKey);

				using (var stream = iterator.CreateValueStream())
					compactionState.Builder.Add(key.TheInternalKey, stream);

				FinishCompactionOutputFileIfNecessary(compactionState, iterator);
			}
		}
		public ItemState Get(InternalKey key, ulong fileNumber, long fileSize, ReadOptions readOptions, IComparator comparator,
							 out Stream stream)
		{
			Table table = FindTable(fileNumber, fileSize);

			Tuple<Slice, Stream> result = table.InternalGet(readOptions, key);

			stream = null;

			if (result == null)
			{
				return ItemState.NotFound;
			}

			bool shouldDispose = true;
			try
			{
				InternalKey internalKey;
				if (!InternalKey.TryParse(result.Item1, out internalKey))
				{
					return ItemState.Corrupt;
				}

				if (comparator.Compare(internalKey.UserKey, key.UserKey) == 0)
				{
					bool isFound = internalKey.Type == ItemType.Value;
					if (!isFound)
					{
						return ItemState.Deleted;
					}

					stream = result.Item2;
					shouldDispose = false;
					return ItemState.Found;
				}

				return ItemState.NotFound;
			}
			finally
			{
				if (shouldDispose && result.Item2 != null)
					result.Item2.Dispose();
			}
		}
		private FileMetadata PickOverlapingFile(InternalKey sourceKey, IDictionary<FileMetadata, bool> destinationFiles)
		{
			if (destinationFiles.Count == 0)
				return null;

			foreach (var pair in destinationFiles)
			{
				// is inside this file
				if (state.InternalKeyComparator.UserComparator.Compare(sourceKey.UserKey, pair.Key.SmallestKey.UserKey) >= 0
					&& state.InternalKeyComparator.UserComparator.Compare(sourceKey.UserKey, pair.Key.LargestKey.UserKey) <= 0)
				{
					destinationFiles[pair.Key] = false;
					return pair.Key;
				}
			}

			return null;
		}
		public Compaction CompactRange(int level, InternalKey begin, InternalKey end)
		{
			var inputs = Current.GetOverlappingInputs(level, begin, end);
			if (inputs.Count == 0)
				return null;

			var compaction = new Compaction(storageContext, level, Current);
			compaction.Inputs[0] = inputs;

			SetupOtherInputs(compaction);

			return compaction;
		}
	    public bool Equals(InternalKey other)
	    {
	        return UserKey.Equals(other.UserKey) && Sequence == other.Sequence && Type == other.Type;
	    }
		/// <summary>
		/// Stores the minimal range that covers all entries in inputs1 and inputs2
		/// in *smallest, *largest.
		/// REQUIRES: inputs is not empty
		/// </summary>
		/// <param name="inputs1"></param>
		/// <param name="inputs2"></param>
		/// <param name="smallestKey"></param>
		/// <param name="largestKey"></param>
		private void GetRange2(IEnumerable<FileMetadata> inputs1, IEnumerable<FileMetadata> inputs2, out InternalKey smallestKey, out InternalKey largestKey)
		{
			var all = new List<FileMetadata>(inputs1);
			all.AddRange(inputs2);

			GetRange(all, out smallestKey, out largestKey);
		}
Beispiel #23
0
		internal Tuple<Slice, Stream> InternalGet(ReadOptions readOptions, InternalKey key)
		{
			using (var iterator = _indexBlock.CreateIterator(_storageState.InternalKeyComparator))
			{
				iterator.Seek(key.TheInternalKey);
				if (iterator.IsValid == false)
					return null;
				var handle = new BlockHandle();
				using (var stream = iterator.CreateValueStream())
				{
					handle.DecodeFrom(stream);
				}
				if (_filter != null && _filter.KeyMayMatch(handle.Position, key.UserKey) == false)
				{
					return null; // opptimized not found by filter, no need to read the actual block
				}
				using (var blockIterator = CreateBlockIterator(readOptions, handle))
				{
					blockIterator.Seek(key.TheInternalKey);
					if (blockIterator.IsValid == false)
						return null;
					return Tuple.Create(blockIterator.Key, blockIterator.CreateValueStream());
				}
			}
		}