Exemplo n.º 1
0
		internal int docShift; // total # deleted docs that were compacted by this merge
		
		public MergeDocIDRemapper(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergedDocCount)
		{
			this.docMaps = docMaps;
			SegmentInfo firstSegment = merge.segments.Info(0);
			int i = 0;
			while (true)
			{
				SegmentInfo info = infos.Info(i);
				if (info.Equals(firstSegment))
					break;
				minDocID += info.docCount;
				i++;
			}
			
			int numDocs = 0;
			for (int j = 0; j < docMaps.Length; i++, j++)
			{
				numDocs += infos.Info(i).docCount;
				System.Diagnostics.Debug.Assert(infos.Info(i).Equals(merge.segments.Info(j)));
			}
			maxDocID = minDocID + numDocs;
			
			starts = new int[docMaps.Length];
			newStarts = new int[docMaps.Length];
			
			starts[0] = minDocID;
			newStarts[0] = minDocID;
			for (i = 1; i < docMaps.Length; i++)
			{
				int lastDocCount = merge.segments.Info(i - 1).docCount;
				starts[i] = starts[i - 1] + lastDocCount;
				newStarts[i] = newStarts[i - 1] + lastDocCount - delCounts[i - 1];
			}
			docShift = numDocs - mergedDocCount;
			
			// There are rare cases when docShift is 0.  It happens
			// if you try to delete a docID that's out of bounds,
			// because the SegmentReader still allocates deletedDocs
			// and pretends it has deletions ... so we can't make
			// this assert here
			// assert docShift > 0;
			
			// Make sure it all adds up:
			System.Diagnostics.Debug.Assert(docShift == maxDocID -(newStarts [docMaps.Length - 1] + merge.segments.Info(docMaps.Length - 1).docCount - delCounts [docMaps.Length - 1]));
		}
Exemplo n.º 2
0
		public void Merge_ForNUnit(MergePolicy.OneMerge merge)
        {
            Merge(merge);
        }
Exemplo n.º 3
0
		private void  HandleMergeException(System.Exception t, MergePolicy.OneMerge merge)
		{
			
			if (infoStream != null)
			{
				Message("handleMergeException: merge=" + merge.SegString(directory) + " exc=" + t);
			}
			
			// Set the exception on the merge, so if
			// optimize() is waiting on us it sees the root
			// cause exception:
			merge.SetException(t);
			AddMergeException(merge);
			
			if (t is MergePolicy.MergeAbortedException)
			{
				// We can ignore this exception (it happens when
				// close(false) or rollback is called), unless the
				// merge involves segments from external directories,
				// in which case we must throw it so, for example, the
				// rollbackTransaction code in addIndexes* is
				// executed.
				if (merge.isExternal)
					throw t;
			}
            else if (t is System.IO.IOException || t is System.SystemException || t is System.ApplicationException)
            {
                throw t;
            }
            else
            {
                // Should not get here
                System.Diagnostics.Debug.Fail("Exception is not expected type!");
                throw new System.SystemException(null, t);
            }
		}
		/// <summary>Does the actual merge, by calling <see cref="IndexWriter.Merge" /> </summary>
		protected internal virtual void  DoMerge(MergePolicy.OneMerge merge)
		{
			writer.Merge(merge);
		}
Exemplo n.º 5
0
		/// <summary>Does the actual (time-consuming) work of the merge,
		/// but without holding synchronized lock on IndexWriter
		/// instance 
		/// </summary>
		private int MergeMiddle(MergePolicy.OneMerge merge)
		{
			
			merge.CheckAborted(directory);
			
			System.String mergedName = merge.info.name;
			
			SegmentMerger merger = null;
			
			int mergedDocCount = 0;
			
			SegmentInfos sourceSegments = merge.segments;
			int numSegments = sourceSegments.Count;
			
			if (infoStream != null)
				Message("merging " + merge.SegString(directory));
			
			merger = new SegmentMerger(this, mergedName, merge);
			
			merge.readers = new SegmentReader[numSegments];
			merge.readersClone = new SegmentReader[numSegments];
			
			bool mergeDocStores = false;

            String currentDocStoreSegment;
            lock(this) {
                currentDocStoreSegment = docWriter.DocStoreSegment;
            }
            bool currentDSSMerged = false;

			// This is try/finally to make sure merger's readers are
			// closed:
			bool success = false;
            try
            {
                int totDocCount = 0;

                for (int i = 0; i < numSegments; i++)
                {

                    SegmentInfo info = sourceSegments.Info(i);

                    // Hold onto the "live" reader; we will use this to
                    // commit merged deletes
                    SegmentReader reader = merge.readers[i] = readerPool.Get(info, merge.mergeDocStores, MERGE_READ_BUFFER_SIZE, -1);

                    // We clone the segment readers because other
                    // deletes may come in while we're merging so we
                    // need readers that will not change
                    SegmentReader clone = merge.readersClone[i] = (SegmentReader)reader.Clone(true);
                    merger.Add(clone);

                    if (clone.HasDeletions)
                    {
                        mergeDocStores = true;
                    }

                    if (info.DocStoreOffset != -1 && currentDocStoreSegment != null)
                    {
                        currentDSSMerged |= currentDocStoreSegment.Equals(info.DocStoreSegment);
                    }

                    totDocCount += clone.NumDocs();
                }

                if (infoStream != null)
                {
                    Message("merge: total " + totDocCount + " docs");
                }

                merge.CheckAborted(directory);

                // If deletions have arrived and it has now become
                // necessary to merge doc stores, go and open them:
                if (mergeDocStores && !merge.mergeDocStores)
                {
                    merge.mergeDocStores = true;
                    lock (this)
                    {
                        if (currentDSSMerged)
                        {
                            if (infoStream != null)
                            {
                                Message("now flush at mergeMiddle");
                            }
                            DoFlush(true, false);
                        }
                    }

                    for (int i = 0; i < numSegments; i++)
                    {
                        merge.readersClone[i].OpenDocStores();
                    }

                    // Clear DSS
                    merge.info.SetDocStore(-1, null, false);

                }

                // This is where all the work happens:
                mergedDocCount = merge.info.docCount = merger.Merge(merge.mergeDocStores);

                System.Diagnostics.Debug.Assert(mergedDocCount == totDocCount);

                if (merge.useCompoundFile)
                {

                    success = false;
                    string compoundFileName = IndexFileNames.SegmentFileName(mergedName, IndexFileNames.COMPOUND_FILE_EXTENSION);

                    try
                    {
                        if (infoStream != null)
                        {
                            Message("create compound file " + compoundFileName);
                        }
                        merger.CreateCompoundFile(compoundFileName);
                        success = true;
                    }
                    catch (System.IO.IOException ioe)
                    {
                        lock (this)
                        {
                            if (merge.IsAborted())
                            {
                                // This can happen if rollback or close(false)
                                // is called -- fall through to logic below to
                                // remove the partially created CFS:
                            }
                            else
                            {
                                HandleMergeException(ioe, merge);
                            }
                        }
                    }
                    catch (Exception t)
                    {
                        HandleMergeException(t, merge);
                    }
                    finally
                    {
                        if (!success)
                        {
                            if (infoStream != null)
                            {
                                Message("hit exception creating compound file during merge");
                            }

                            lock (this)
                            {
                                deleter.DeleteFile(compoundFileName);
                                deleter.DeleteNewFiles(merger.GetMergedFiles());
                            }
                        }
                    }

                    success = false;

                    lock (this)
                    {

                        // delete new non cfs files directly: they were never
                        // registered with IFD
                        deleter.DeleteNewFiles(merger.GetMergedFiles());

                        if (merge.IsAborted())
                        {
                            if (infoStream != null)
                            {
                                Message("abort merge after building CFS");
                            }
                            deleter.DeleteFile(compoundFileName);
                            return 0;
                        }
                    }

                    merge.info.SetUseCompoundFile(true);
                }

                int termsIndexDivisor;
                bool loadDocStores;

                // if the merged segment warmer was not installed when
                // this merge was started, causing us to not force
                // the docStores to close, we can't warm it now
                bool canWarm = merge.info.DocStoreSegment == null || currentDocStoreSegment == null || !merge.info.DocStoreSegment.Equals(currentDocStoreSegment);

                if (poolReaders && mergedSegmentWarmer != null && canWarm)
                {
                    // Load terms index & doc stores so the segment
                    // warmer can run searches, load documents/term
                    // vectors
                    termsIndexDivisor = readerTermsIndexDivisor;
                    loadDocStores = true;
                }
                else
                {
                    termsIndexDivisor = -1;
                    loadDocStores = false;
                }

                // TODO: in the non-realtime case, we may want to only
                // keep deletes (it's costly to open entire reader
                // when we just need deletes)

                SegmentReader mergedReader = readerPool.Get(merge.info, loadDocStores, BufferedIndexInput.BUFFER_SIZE, termsIndexDivisor);
                try
                {
                    if (poolReaders && mergedSegmentWarmer != null)
                    {
                        mergedSegmentWarmer.Warm(mergedReader);
                    }
                    if (!CommitMerge(merge, merger, mergedDocCount, mergedReader))
                    {
                        // commitMerge will return false if this merge was aborted
                        return 0;
                    }
                }
                finally
                {
                    lock (this)
                    {
                        readerPool.Release(mergedReader);
                    }
                }

                success = true;
            }
            finally
            {
                // Readers are already closed in commitMerge if we didn't hit
                // an exc:
                if (!success)
                {
                    CloseMergeReaders(merge, true);
                }
            }

			return mergedDocCount;
		}
Exemplo n.º 6
0
 private void SetMergeDocStoreIsCompoundFile(MergePolicy.OneMerge merge)
 {
     lock (this)
     {
         string mergeDocStoreSegment = merge.info.DocStoreSegment;
         if (mergeDocStoreSegment != null && !merge.info.DocStoreIsCompoundFile)
         {
             int size = segmentInfos.Count;
             for (int i = 0; i < size; i++)
             {
                 SegmentInfo info = segmentInfos.Info(i);
                 string docStoreSegment = info.DocStoreSegment;
                 if (docStoreSegment != null &&
                     docStoreSegment.Equals(mergeDocStoreSegment) &&
                     info.DocStoreIsCompoundFile)
                 {
                     merge.info.DocStoreIsCompoundFile = true;
                     break;
                 }
             }
         }
     }
 }
Exemplo n.º 7
0
		private void  _MergeInit(MergePolicy.OneMerge merge)
		{
			lock (this)
			{
				
				System.Diagnostics.Debug.Assert(TestPoint("startMergeInit"));
				
				System.Diagnostics.Debug.Assert(merge.registerDone);
				System.Diagnostics.Debug.Assert(!merge.optimize || merge.maxNumSegmentsOptimize > 0);
				
				if (hitOOM)
				{
					throw new System.SystemException("this writer hit an OutOfMemoryError; cannot merge");
				}
				
				if (merge.info != null)
				// mergeInit already done
					return ;
				
				if (merge.IsAborted())
					return ;
				
				ApplyDeletes();
				
				SegmentInfos sourceSegments = merge.segments;
				int end = sourceSegments.Count;
				
				// Check whether this merge will allow us to skip
				// merging the doc stores (stored field & vectors).
				// This is a very substantial optimization (saves tons
				// of IO).
				
				Directory lastDir = directory;
				System.String lastDocStoreSegment = null;
				int next = - 1;
				
				bool mergeDocStores = false;
				bool doFlushDocStore = false;
				System.String currentDocStoreSegment = docWriter.DocStoreSegment;
				
				// Test each segment to be merged: check if we need to
				// flush/merge doc stores
				for (int i = 0; i < end; i++)
				{
					SegmentInfo si = sourceSegments.Info(i);
					
					// If it has deletions we must merge the doc stores
					if (si.HasDeletions())
						mergeDocStores = true;
					
					// If it has its own (private) doc stores we must
					// merge the doc stores
					if (- 1 == si.DocStoreOffset)
						mergeDocStores = true;
					
					// If it has a different doc store segment than
					// previous segments, we must merge the doc stores
					System.String docStoreSegment = si.DocStoreSegment;
					if (docStoreSegment == null)
						mergeDocStores = true;
					else if (lastDocStoreSegment == null)
						lastDocStoreSegment = docStoreSegment;
					else if (!lastDocStoreSegment.Equals(docStoreSegment))
						mergeDocStores = true;
					
					// Segments' docScoreOffsets must be in-order,
					// contiguous.  For the default merge policy now
					// this will always be the case but for an arbitrary
					// merge policy this may not be the case
					if (- 1 == next)
						next = si.DocStoreOffset + si.docCount;
					else if (next != si.DocStoreOffset)
						mergeDocStores = true;
					else
						next = si.DocStoreOffset + si.docCount;
					
					// If the segment comes from a different directory
					// we must merge
					if (lastDir != si.dir)
						mergeDocStores = true;
					
					// If the segment is referencing the current "live"
					// doc store outputs then we must merge
					if (si.DocStoreOffset != - 1 && currentDocStoreSegment != null && si.DocStoreSegment.Equals(currentDocStoreSegment))
					{
						doFlushDocStore = true;
					}
				}

                // if a mergedSegmentWarmer is installed, we must merge
                // the doc stores because we will open a full
                // SegmentReader on the merged segment:
                if (!mergeDocStores && mergedSegmentWarmer != null && currentDocStoreSegment != null && lastDocStoreSegment != null && lastDocStoreSegment.Equals(currentDocStoreSegment))
                {
                    mergeDocStores = true;
                }

				int docStoreOffset;
				System.String docStoreSegment2;
				bool docStoreIsCompoundFile;
				
				if (mergeDocStores)
				{
					docStoreOffset = - 1;
					docStoreSegment2 = null;
					docStoreIsCompoundFile = false;
				}
				else
				{
					SegmentInfo si = sourceSegments.Info(0);
					docStoreOffset = si.DocStoreOffset;
					docStoreSegment2 = si.DocStoreSegment;
					docStoreIsCompoundFile = si.DocStoreIsCompoundFile;
				}
				
				if (mergeDocStores && doFlushDocStore)
				{
					// SegmentMerger intends to merge the doc stores
					// (stored fields, vectors), and at least one of the
					// segments to be merged refers to the currently
					// live doc stores.
					
					// TODO: if we know we are about to merge away these
					// newly flushed doc store files then we should not
					// make compound file out of them...
					if (infoStream != null)
						Message("now flush at merge");
					DoFlush(true, false);
				}
				
				merge.mergeDocStores = mergeDocStores;
				
				// Bind a new segment name here so even with
				// ConcurrentMergePolicy we keep deterministic segment
				// names.
				merge.info = new SegmentInfo(NewSegmentName(), 0, directory, false, true, docStoreOffset, docStoreSegment2, docStoreIsCompoundFile, false);


                IDictionary<string, string> details = new Dictionary<string, string>();
				details["optimize"] = merge.optimize + "";
				details["mergeFactor"] = end + "";
				details["mergeDocStores"] = mergeDocStores + "";
				SetDiagnostics(merge.info, "merge", details);
				
				// Also enroll the merged segment into mergingSegments;
				// this prevents it from getting selected for a merge
				// after our merge is done but while we are building the
				// CFS:
                mergingSegments.Add(merge.info);
			}
		}
Exemplo n.º 8
0
		/// <summary>Checks whether this merge involves any segments
		/// already participating in a merge.  If not, this merge
		/// is "registered", meaning we record that its segments
		/// are now participating in a merge, and true is
		/// returned.  Else (the merge conflicts) false is
		/// returned. 
		/// </summary>
		internal bool RegisterMerge(MergePolicy.OneMerge merge)
		{
			lock (this)
			{
				
				if (merge.registerDone)
					return true;
				
				if (stopMerges)
				{
					merge.Abort();
					throw new MergePolicy.MergeAbortedException("merge is aborted: " + merge.SegString(directory));
				}
				
				int count = merge.segments.Count;
				bool isExternal = false;
				for (int i = 0; i < count; i++)
				{
					SegmentInfo info = merge.segments.Info(i);
                    if (mergingSegments.Contains(info))
                    {
                        return false;
                    }
                    if (segmentInfos.IndexOf(info) == -1)
                    {
                        return false;
                    }
                    if (info.dir != directory)
                    {
                        isExternal = true;
                    }
                    if (segmentsToOptimize.Contains(info))
                    {
                        merge.optimize = true;
                        merge.maxNumSegmentsOptimize = optimizeMaxNumSegments;
                    }
				}
				
				EnsureContiguousMerge(merge);
				
				pendingMerges.AddLast(merge);
				
				if (infoStream != null)
					Message("add merge to pendingMerges: " + merge.SegString(directory) + " [total " + pendingMerges.Count + " pending]");
				
				merge.mergeGen = mergeGen;
				merge.isExternal = isExternal;
				
				// OK it does not conflict; now record that this merge
				// is running (while synchronized) to avoid race
				// condition where two conflicting merges from different
				// threads, start
                for (int i = 0; i < count; i++)
                {
                    SegmentInfo si = merge.segments.Info(i);
                    mergingSegments.Add(si);
                }
				
				// Merge is now registered
				merge.registerDone = true;
				return true;
			}
		}
Exemplo n.º 9
0
		// Remaps all buffered deletes based on a completed
		// merge
		internal virtual void  Remap(MergeDocIDRemapper mapper, SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergeDocCount)
		{
			lock (this)
			{
				IDictionary<Term, Num> newDeleteTerms;
				
				// Remap delete-by-term
				if (terms.Count > 0)
				{
                    if (doTermSort)
                    {
                        newDeleteTerms = new SortedDictionary<Term, Num>();
                    }
                    else
                    {
                        newDeleteTerms = new HashMap<Term, Num>();
                    }
					foreach(var entry in terms)
					{
						Num num = entry.Value;
						newDeleteTerms[entry.Key] = new Num(mapper.Remap(num.GetNum()));
					}
				}
				else
					newDeleteTerms = null;
				
				// Remap delete-by-docID
				List<int> newDeleteDocIDs;
				
				if (docIDs.Count > 0)
				{
					newDeleteDocIDs = new List<int>(docIDs.Count);
					foreach(int num in docIDs)
					{
						newDeleteDocIDs.Add(mapper.Remap(num));
					}
				}
				else
					newDeleteDocIDs = null;
				
				// Remap delete-by-query
				HashMap<Query, int> newDeleteQueries;
				
				if (queries.Count > 0)
				{
                    newDeleteQueries = new HashMap<Query, int>(queries.Count);
					foreach(var entry in queries)
					{
						int num = entry.Value;
						newDeleteQueries[entry.Key] = mapper.Remap(num);
					}
				}
				else
					newDeleteQueries = null;
				
				if (newDeleteTerms != null)
					terms = newDeleteTerms;
				if (newDeleteDocIDs != null)
					docIDs = newDeleteDocIDs;
				if (newDeleteQueries != null)
					queries = newDeleteQueries;
			}
		}
Exemplo n.º 10
0
			public CheckAbort(MergePolicy.OneMerge merge, Directory dir)
			{
				this.merge = merge;
				this.dir = dir;
			}
Exemplo n.º 11
0
		internal SegmentMerger(IndexWriter writer, System.String name, MergePolicy.OneMerge merge)
		{
			InitBlock();
			directory = writer.Directory;
			segment = name;
			if (merge != null)
			{
				checkAbort = new CheckAbort(merge, directory);
			}
			else
			{
				checkAbort = new AnonymousClassCheckAbort1(this, null, null);
			}
			termIndexInterval = writer.TermIndexInterval;
		}
			public virtual void  SetRunningMerge(MergePolicy.OneMerge merge)
			{
				lock (this)
				{
					runningMerge = merge;
				}
			}
			public MergeThread(ConcurrentMergeScheduler enclosingInstance, IndexWriter writer, MergePolicy.OneMerge startMerge)
			{
				InitBlock(enclosingInstance);
				this.writer = writer;
				this.startMerge = startMerge;
			}
		/// <summary>Create and return a new MergeThread </summary>
		protected internal virtual MergeThread GetMergeThread(IndexWriter writer, MergePolicy.OneMerge merge)
		{
			lock (this)
			{
				var thread = new MergeThread(this, writer, merge);
				thread.SetThreadPriority(mergeThreadPriority);
				thread.IsBackground = true;
				thread.Name = "Lucene Merge Thread #" + mergeThreadCount++;
				return thread;
			}
		}
Exemplo n.º 15
0
		/// <summary> Merges the indicated segments, replacing them in the stack with a
		/// single segment.
		/// </summary>
		internal void  Merge(MergePolicy.OneMerge merge)
		{
			
			bool success = false;
			
			try
			{
				try
				{
					try
					{
						MergeInit(merge);
						
						if (infoStream != null)
						{
							Message("now merge\n  merge=" + merge.SegString(directory) + "\n  merge=" + merge + "\n  index=" + SegString());
						}
						
						MergeMiddle(merge);
						MergeSuccess(merge);
						success = true;
					}
					catch (System.Exception t)
					{
						HandleMergeException(t, merge);
					}
				}
				finally
				{
					lock (this)
					{
						MergeFinish(merge);
						
						if (!success)
						{
							if (infoStream != null)
								Message("hit exception during merge");
							if (merge.info != null && !segmentInfos.Contains(merge.info))
								deleter.Refresh(merge.info.name);
						}
						
						// This merge (and, generally, any change to the
						// segments) may now enable new merges, so we call
						// merge policy & update pending merges.
						if (success && !merge.IsAborted() && !closed && !closing)
							UpdatePendingMerges(merge.maxNumSegmentsOptimize, merge.optimize);
					}
				}
			}
			catch (System.OutOfMemoryException oom)
			{
				HandleOOM(oom, "merge");
			}
		}
Exemplo n.º 16
0
		/// <summary>Hook that's called when the specified merge is complete. </summary>
		internal virtual void  MergeSuccess(MergePolicy.OneMerge merge)
		{
		}
Exemplo n.º 17
0
		/// <summary>Called whenever a merge has completed and the merged segments had deletions </summary>
		internal void  RemapDeletes(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergeDocCount)
		{
			lock (this)
			{
				if (docMaps == null)
				// The merged segments had no deletes so docIDs did not change and we have nothing to do
					return ;
				MergeDocIDRemapper mapper = new MergeDocIDRemapper(infos, docMaps, delCounts, merge, mergeDocCount);
				deletesInRAM.Remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
				deletesFlushed.Remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
				flushedDocCount -= mapper.docShift;
			}
		}
Exemplo n.º 18
0
		/// <summary>Does initial setup for a merge, which is fast but holds
		/// the synchronized lock on IndexWriter instance.  
		/// </summary>
		internal void  MergeInit(MergePolicy.OneMerge merge)
		{
			lock (this)
			{
				bool success = false;
				try
				{
					_MergeInit(merge);
					success = true;
				}
				finally
				{
					if (!success)
					{
						MergeFinish(merge);
					}
				}
			}
		}
Exemplo n.º 19
0
		/// <summary> Expert: set the merge policy used by this writer.</summary>
		public virtual void  SetMergePolicy(MergePolicy mp)
		{
			EnsureOpen();
			if (mp == null)
				throw new System.NullReferenceException("MergePolicy must be non-null");
			
			if (mergePolicy != mp)
				mergePolicy.Close();
			mergePolicy = mp;
			PushMaxBufferedDocs();
			if (infoStream != null)
			{
				Message("setMergePolicy " + mp);
			}
		}
Exemplo n.º 20
0
		/// <summary>Does fininishing for a merge, which is fast but holds
		/// the synchronized lock on IndexWriter instance. 
		/// </summary>
		internal void  MergeFinish(MergePolicy.OneMerge merge)
		{
			lock (this)
			{
				
				// Optimize, addIndexes or finishMerges may be waiting
				// on merges to finish.
				System.Threading.Monitor.PulseAll(this);
				
				// It's possible we are called twice, eg if there was an
				// exception inside mergeInit
				if (merge.registerDone)
				{
					SegmentInfos sourceSegments = merge.segments;
					int end = sourceSegments.Count;
					for (int i = 0; i < end; i++)
						mergingSegments.Remove(sourceSegments.Info(i));
                    if(merge.info != null)
					    mergingSegments.Remove(merge.info);
					merge.registerDone = false;
				}
				
				runningMerges.Remove(merge);
			}
		}
Exemplo n.º 21
0
		private int EnsureContiguousMerge(MergePolicy.OneMerge merge)
		{
			
			int first = segmentInfos.IndexOf(merge.segments.Info(0));
			if (first == - 1)
				throw new MergePolicy.MergeException("could not find segment " + merge.segments.Info(0).name + " in current index " + SegString(), directory);
			
			int numSegments = segmentInfos.Count;
			
			int numSegmentsToMerge = merge.segments.Count;
			for (int i = 0; i < numSegmentsToMerge; i++)
			{
				SegmentInfo info = merge.segments.Info(i);
				
				if (first + i >= numSegments || !segmentInfos.Info(first + i).Equals(info))
				{
					if (segmentInfos.IndexOf(info) == - 1)
						throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.name + ") that is not in the current index " + SegString(), directory);
					else
						throw new MergePolicy.MergeException("MergePolicy selected non-contiguous segments to merge (" + merge.SegString(directory) + " vs " + SegString() + "), which IndexWriter (currently) cannot handle", directory);
				}
			}
			
			return first;
		}
Exemplo n.º 22
0
        private void CloseMergeReaders(MergePolicy.OneMerge merge, bool suppressExceptions)
        {
            lock (this)
            {
                int numSegments = merge.segments.Count;
                if (suppressExceptions)
                {
                    // Suppress any new exceptions so we throw the
                    // original cause
                    for (int i = 0; i < numSegments; i++)
                    {
                        if (merge.readers[i] != null)
                        {
                            try
                            {
                                readerPool.Release(merge.readers[i], false);
                            }
                            catch (Exception)
                            {
                            }
                            merge.readers[i] = null;
                        }

                        if (merge.readersClone[i] != null)
                        {
                            try
                            {
                                merge.readersClone[i].Close();
                            }
                            catch (Exception)
                            {
                            }
                            // This was a private clone and we had the
                            // only reference
                            System.Diagnostics.Debug.Assert(merge.readersClone[i].RefCount == 0); //: "refCount should be 0 but is " + merge.readersClone[i].getRefCount();
                            merge.readersClone[i] = null;
                        }
                    }
                }
                else
                {
                    for (int i = 0; i < numSegments; i++)
                    {
                        if (merge.readers[i] != null)
                        {
                            readerPool.Release(merge.readers[i], true);
                            merge.readers[i] = null;
                        }

                        if (merge.readersClone[i] != null)
                        {
                            merge.readersClone[i].Close();
                            // This was a private clone and we had the only reference
                            System.Diagnostics.Debug.Assert(merge.readersClone[i].RefCount == 0);
                            merge.readersClone[i] = null;
                        }
                    }
                }
            }
        }
Exemplo n.º 23
0
		/// <summary>Carefully merges deletes for the segments we just
		/// merged.  This is tricky because, although merging will
		/// clear all deletes (compacts the documents), new
		/// deletes may have been flushed to the segments since
		/// the merge was started.  This method "carries over"
		/// such new deletes onto the newly merged segment, and
		/// saves the resulting deletes file (incrementing the
		/// delete generation for merge.info).  If no deletes were
		/// flushed, no new deletes file is saved. 
		/// </summary>
		private void  CommitMergedDeletes(MergePolicy.OneMerge merge, SegmentReader mergeReader)
		{
			lock (this)
			{
				
				System.Diagnostics.Debug.Assert(TestPoint("startCommitMergeDeletes"));
				
				SegmentInfos sourceSegments = merge.segments;
				
				if (infoStream != null)
					Message("commitMergeDeletes " + merge.SegString(directory));
				
				// Carefully merge deletes that occurred after we
				// started merging:
				int docUpto = 0;
				int delCount = 0;
				
				for (int i = 0; i < sourceSegments.Count; i++)
				{
					SegmentInfo info = sourceSegments.Info(i);
					int docCount = info.docCount;
					SegmentReader previousReader = merge.readersClone[i];
					SegmentReader currentReader = merge.readers[i];
					if (previousReader.HasDeletions)
					{
						
						// There were deletes on this segment when the merge
						// started.  The merge has collapsed away those
						// deletes, but, if new deletes were flushed since
						// the merge started, we must now carefully keep any
						// newly flushed deletes but mapping them to the new
						// docIDs.
						
						if (currentReader.NumDeletedDocs > previousReader.NumDeletedDocs)
						{
							// This means this segment has had new deletes
							// committed since we started the merge, so we
							// must merge them:
							for (int j = 0; j < docCount; j++)
							{
								if (previousReader.IsDeleted(j))
								{
									System.Diagnostics.Debug.Assert(currentReader.IsDeleted(j));
                                }
								else
								{
									if (currentReader.IsDeleted(j))
									{
										mergeReader.DoDelete(docUpto);
										delCount++;
									}
									docUpto++;
								}
							}
						}
						else
						{
							docUpto += docCount - previousReader.NumDeletedDocs;
						}
					}
					else if (currentReader.HasDeletions)
					{
						// This segment had no deletes before but now it
						// does:
						for (int j = 0; j < docCount; j++)
						{
							if (currentReader.IsDeleted(j))
							{
								mergeReader.DoDelete(docUpto);
								delCount++;
							}
							docUpto++;
						}
					}
					// No deletes before or after
					else
						docUpto += info.docCount;
				}
				
				System.Diagnostics.Debug.Assert(mergeReader.NumDeletedDocs == delCount);
				
				mergeReader.hasChanges = delCount > 0;
			}
		}
Exemplo n.º 24
0
		internal virtual void  AddMergeException(MergePolicy.OneMerge merge)
		{
			lock (this)
			{
				System.Diagnostics.Debug.Assert(merge.GetException() != null);
				if (!mergeExceptions.Contains(merge) && mergeGen == merge.mergeGen)
					mergeExceptions.Add(merge);
			}
		}
Exemplo n.º 25
0
		/* FIXME if we want to support non-contiguous segment merges */
		private bool CommitMerge(MergePolicy.OneMerge merge, SegmentMerger merger, int mergedDocCount, SegmentReader mergedReader)
		{
			lock (this)
			{
				
				System.Diagnostics.Debug.Assert(TestPoint("startCommitMerge"));
				
				if (hitOOM)
				{
					throw new System.SystemException("this writer hit an OutOfMemoryError; cannot complete merge");
				}
				
				if (infoStream != null)
					Message("commitMerge: " + merge.SegString(directory) + " index=" + SegString());
				
				System.Diagnostics.Debug.Assert(merge.registerDone);
				
				// If merge was explicitly aborted, or, if rollback() or
				// rollbackTransaction() had been called since our merge
				// started (which results in an unqualified
				// deleter.refresh() call that will remove any index
				// file that current segments does not reference), we
				// abort this merge
				if (merge.IsAborted())
				{
					if (infoStream != null)
						Message("commitMerge: skipping merge " + merge.SegString(directory) + ": it was aborted");
					
					return false;
				}
				
				int start = EnsureContiguousMerge(merge);
				
				CommitMergedDeletes(merge, mergedReader);
				docWriter.RemapDeletes(segmentInfos, merger.GetDocMaps(), merger.GetDelCounts(), merge, mergedDocCount);

                // If the doc store we are using has been closed and
                // is in now compound format (but wasn't when we
                // started), then we will switch to the compound
                // format as well:
                SetMergeDocStoreIsCompoundFile(merge);
				
				merge.info.HasProx = merger.HasProx();
				
				segmentInfos.RemoveRange(start, start + merge.segments.Count - start);
				System.Diagnostics.Debug.Assert(!segmentInfos.Contains(merge.info));
				segmentInfos.Insert(start, merge.info);

                CloseMergeReaders(merge, false);
				
				// Must note the change to segmentInfos so any commits
				// in-flight don't lose it:
				Checkpoint();
				
				// If the merged segments had pending changes, clear
				// them so that they don't bother writing them to
				// disk, updating SegmentInfo, etc.:
				readerPool.Clear(merge.segments);

                if (merge.optimize)
                {
                    // cascade the optimize:
                    segmentsToOptimize.Add(merge.info);
                }
				return true;
			}
		}