public CompactionState(Compaction compaction)
		{
			Compaction = compaction;
			SmallestSnapshot = -1;

			outputs = new List<FileMetadata>();
		}
		protected override Compaction CompactionToProcess()
		{
			compaction = state.VersionSet.CompactRange(Level, Begin, End);

			Done = (compaction == null);

			return compaction;
		}
		public IIterator MakeInputIterator(Compaction compaction)
		{
			var readOptions = new ReadOptions
								  {
									  VerifyChecksums = storageContext.Options.ParanoidChecks,
									  FillCache = false
								  };

			// Level-0 files have to be merged together.  For other levels,
			// we will make a concatenating iterator per level.
			// TODO(opt): use concatenating iterator for level-0 if there is no overlap
			var list = new List<IIterator>();
			for (int which = 0; which < 2; which++)
			{
				if (compaction.Inputs[which].Count != 0)
				{
					if (compaction.Level + which == 0)
					{
						var files = new List<FileMetadata>(compaction.Inputs[which]);
						list.AddRange(files.Select(file => storageContext.TableCache.NewIterator(readOptions, file.FileNumber, file.FileSize)));
					}
					else
					{
						// Create concatenating iterator for the files from this level
						list.Add(new TwoLevelIterator(
							         new LevelFileNumIterator(storageContext.InternalKeyComparator, compaction.Inputs[which]),
							         GetFileIterator, readOptions));
					}
				}
			}

			return NewMergingIterator(storageContext.InternalKeyComparator, list);
		}
		private void SetupOtherInputs(Compaction compaction)
		{
			var level = compaction.Level;
			InternalKey smallestKey, largestKey;

			GetRange(compaction.Inputs[0], out smallestKey, out largestKey);

			compaction.Inputs[1] = Current.GetOverlappingInputs(level + 1, smallestKey, largestKey);

			InternalKey allStart, allLimit;
			GetRange2(compaction.Inputs[0], compaction.Inputs[1], out allStart, out allLimit);

			if (compaction.Inputs[1].Count > 0)
			{
				var expanded0 = Current.GetOverlappingInputs(level, allStart, allLimit);
				var inputs0Size = compaction.Inputs[0].Sum(x => x.FileSize);
				var inputs1Size = compaction.Inputs[1].Sum(x => x.FileSize);
				var expanded0Size = expanded0.Sum(x => x.FileSize);

				if (expanded0.Count > compaction.Inputs[0].Count
					&& inputs1Size + expanded0Size < Config.ExpandedCompactionByteSizeLimit)
				{
					InternalKey newStart, newLimit;
					GetRange(expanded0, out newStart, out newLimit);
					var expanded1 = Current.GetOverlappingInputs(level + 1, newStart, newLimit);

					if (expanded1.Count == compaction.Inputs[1].Count)
					{
						log.Info("Expanding@{0} {1}+{2} ({3}+{4} bytes) to {5}+{6} ({7}+{8} bytes).", level, compaction.Inputs[0].Count, compaction.Inputs[1].Count, inputs0Size, inputs1Size, expanded0.Count, expanded1.Count, expanded0Size, inputs1Size);

						largestKey = newLimit;

						compaction.Inputs[0] = expanded0;
						compaction.Inputs[1] = expanded1;

						GetRange2(compaction.Inputs[0], compaction.Inputs[1], out allStart, out allLimit);
					}
				}
			}

			if (level + 2 < Config.NumberOfLevels)
			{
				compaction.Grandparents = Current.GetOverlappingInputs(level + 2, allStart, allLimit);
			}

			// Update the place where we will do the next compaction for this level.
			// We update this immediately instead of waiting for the VersionEdit
			// to be applied so that if the compaction fails, we will try a different
			// key range next time.
			CompactionPointers[level] = largestKey.TheInternalKey;
			compaction.Edit.SetCompactionPointer(level, largestKey);
		}
		public Compaction CompactRange(int level, InternalKey begin, InternalKey end)
		{
			var inputs = Current.GetOverlappingInputs(level, begin, end);
			if (inputs.Count == 0)
				return null;

			var compaction = new Compaction(storageContext, level, Current);
			compaction.Inputs[0] = inputs;

			SetupOtherInputs(compaction);

			return compaction;
		}
		public Compaction PickCompaction()
		{
			int level;
			Compaction compaction;

			bool sizeCompaction = Current.CompactionScore >= 1;
			bool seekCompaction = Current.FileToCompact != null;

			// We prefer compactions triggered by too much data in a level over
			// the compactions triggered by seeks.
			if (sizeCompaction)
			{
				level = Current.CompactionLevel;
				Debug.Assert(level >= 0);
				Debug.Assert(level + 1 < Config.NumberOfLevels);

				compaction = new Compaction(storageContext, level, Current);

				for (var i = 0; i < Current.Files[level].Count; i++)
				{
					var file = Current.Files[level][i];
					if (CompactionPointers[level].IsEmpty()
						|| storageContext.InternalKeyComparator.Compare(file.LargestKey.TheInternalKey, CompactionPointers[level]) > 0)
					{
						compaction.Inputs[0].Add(file);
						break;
					}
				}

				if (compaction.Inputs[0].Count == 0)
				{
					// Wrap-around to the beginning of the key space
					compaction.Inputs[0].Add(Current.Files[level][0]);
				}
			}
			else if (seekCompaction)
			{
				level = Current.FileToCompactLevel;
				compaction = new Compaction(storageContext, level, Current);
				compaction.Inputs[0].Add(Current.FileToCompact);
			}
			else
			{
				return null;
			}

			// Files in level 0 may overlap each other, so pick up all overlapping ones
			if (level == 0)
			{
				InternalKey smallestKey, largestKey;
				GetRange(compaction.Inputs[0], out smallestKey, out largestKey);

				// Note that the next call will discard the file we placed in
				// c->inputs_[0] earlier and replace it with an overlapping set
				// which will include the picked file.
				compaction.Inputs[0] = Current.GetOverlappingInputs(0, smallestKey, largestKey);
				Debug.Assert(compaction.Inputs[0].Count > 0);
			}

			SetupOtherInputs(compaction);

			return compaction;
		}