Пример #1
0
 public virtual void  SetRunningMerge(MergePolicy.OneMerge merge)
 {
     lock (this)
     {
         runningMerge = merge;
     }
 }
Пример #2
0
 /// <summary>Create and return a new MergeThread </summary>
 protected internal virtual MergeThread GetMergeThread(IndexWriter writer, MergePolicy.OneMerge merge)
 {
     lock (this)
     {
         var thread = new MergeThread(this, writer, merge);
         thread.SetThreadPriority(mergeThreadPriority);
         thread.IsBackground = true;
         thread.Name         = "Lucene Merge Thread #" + mergeThreadCount++;
         return(thread);
     }
 }
Пример #3
0
 internal SegmentMerger(IndexWriter writer, System.String name, MergePolicy.OneMerge merge)
 {
     InitBlock();
     directory = writer.Directory;
     segment   = name;
     if (merge != null)
     {
         checkAbort = new CheckAbort(merge, directory);
     }
     else
     {
         checkAbort = new AnonymousClassCheckAbort1(this, null, null);
     }
     termIndexInterval = writer.TermIndexInterval;
 }
Пример #4
0
		/* If any of our segments are using a directory != ours
		* then we have to either copy them over one by one, merge
		* them (if merge policy has chosen to) or wait until
		* currently running merges (in the background) complete.
		* We don't return until the SegmentInfos has no more
		* external segments.  Currently this is only used by
		* addIndexesNoOptimize(). */
		private void  ResolveExternalSegments()
		{
			
			bool any = false;
			
			bool done = false;
			
			while (!done)
			{
				SegmentInfo info = null;
				MergePolicy.OneMerge merge = null;
				lock (this)
				{
					
					if (stopMerges)
						throw new MergePolicy.MergeAbortedException("rollback() was called or addIndexes* hit an unhandled exception");
					
					int numSegments = segmentInfos.Count;
					
					done = true;
					for (int i = 0; i < numSegments; i++)
					{
						info = segmentInfos.Info(i);
						if (info.dir != directory)
						{
							done = false;
							MergePolicy.OneMerge newMerge = new MergePolicy.OneMerge(segmentInfos.Range(i, 1 + i), mergePolicy is LogMergePolicy && UseCompoundFile);
							
							// Returns true if no running merge conflicts
							// with this one (and, records this merge as
							// pending), ie, this segment is not currently
							// being merged:
							if (RegisterMerge(newMerge))
							{
								merge = newMerge;
								
								// If this segment is not currently being
								// merged, then advance it to running & run
								// the merge ourself (below):
                                pendingMerges.Remove(merge);    // {{Aroush-2.9}} From Mike Garski: this is an O(n) op... is that an issue?
								runningMerges.Add(merge);
								break;
							}
						}
					}
					
					if (!done && merge == null)
					// We are not yet done (external segments still
					// exist in segmentInfos), yet, all such segments
					// are currently "covered" by a pending or running
					// merge.  We now try to grab any pending merge
					// that involves external segments:
						merge = GetNextExternalMerge();
					
					if (!done && merge == null)
					// We are not yet done, and, all external segments
					// fall under merges that the merge scheduler is
					// currently running.  So, we now wait and check
					// back to see if the merge has completed.
						DoWait();
				}
				
				if (merge != null)
				{
					any = true;
					Merge(merge);
				}
			}
			
			if (any)
			// Sometimes, on copying an external segment over,
			// more merges may become necessary:
				mergeScheduler.Merge(this);
		}
Пример #5
0
 public override System.String ToString()
 {
     MergePolicy.OneMerge merge = RunningMerge ?? startMerge;
     return("merge thread: " + merge.SegString(Enclosing_Instance.dir));
 }
Пример #6
0
            override public void  Run()
            {
                // First time through the while loop we do the merge
                // that we were started with:
                MergePolicy.OneMerge merge = this.startMerge;

                try
                {
                    if (Enclosing_Instance.Verbose())
                    {
                        Enclosing_Instance.Message("  merge thread: start");
                    }

                    while (true)
                    {
                        SetRunningMerge(merge);
                        Enclosing_Instance.DoMerge(merge);

                        // Subsequent times through the loop we do any new
                        // merge that writer says is necessary:
                        merge = writer.GetNextMerge();
                        if (merge != null)
                        {
                            writer.MergeInit(merge);
                            if (Enclosing_Instance.Verbose())
                            {
                                Enclosing_Instance.Message("  merge thread: do another merge " + merge.SegString(Enclosing_Instance.dir));
                            }
                        }
                        else
                        {
                            break;
                        }
                    }

                    if (Enclosing_Instance.Verbose())
                    {
                        Enclosing_Instance.Message("  merge thread: done");
                    }
                }
                catch (System.Exception exc)
                {
                    // Ignore the exception if it was due to abort:
                    if (!(exc is MergePolicy.MergeAbortedException))
                    {
                        if (!Enclosing_Instance.suppressExceptions)
                        {
                            // suppressExceptions is normally only set during
                            // testing.
                            Lucene.Net.Index.ConcurrentMergeScheduler.anyExceptions = true;
                            Enclosing_Instance.HandleMergeException(exc);
                        }
                    }
                }
                finally
                {
                    lock (Enclosing_Instance)
                    {
                        System.Threading.Monitor.PulseAll(Enclosing_Instance);
                        Enclosing_Instance.mergeThreads.Remove(this);
                        bool removed = !Enclosing_Instance.mergeThreads.Contains(this);
                        System.Diagnostics.Debug.Assert(removed);
                    }
                }
            }
Пример #7
0
 public MergeThread(ConcurrentMergeScheduler enclosingInstance, IndexWriter writer, MergePolicy.OneMerge startMerge)
 {
     InitBlock(enclosingInstance);
     this.writer     = writer;
     this.startMerge = startMerge;
 }
Пример #8
0
 /// <summary>Does the actual merge, by calling <see cref="IndexWriter.Merge" /> </summary>
 protected internal virtual void  DoMerge(MergePolicy.OneMerge merge)
 {
     writer.Merge(merge);
 }
Пример #9
0
        public override void  Merge(IndexWriter writer)
        {
            // TODO: .NET doesn't support this
            // assert !Thread.holdsLock(writer);

            this.writer = writer;

            InitMergeThreadPriority();

            dir = writer.Directory;

            // First, quickly run through the newly proposed merges
            // and add any orthogonal merges (ie a merge not
            // involving segments already pending to be merged) to
            // the queue.  If we are way behind on merging, many of
            // these newly proposed merges will likely already be
            // registered.

            if (Verbose())
            {
                Message("now merge");
                Message("  index: " + writer.SegString());
            }

            // Iterate, pulling from the IndexWriter's queue of
            // pending merges, until it's empty:
            while (true)
            {
                // TODO: we could be careful about which merges to do in
                // the BG (eg maybe the "biggest" ones) vs FG, which
                // merges to do first (the easiest ones?), etc.

                MergePolicy.OneMerge merge = writer.GetNextMerge();
                if (merge == null)
                {
                    if (Verbose())
                    {
                        Message("  no more merges pending; now return");
                    }
                    return;
                }

                // We do this w/ the primary thread to keep
                // deterministic assignment of segment names
                writer.MergeInit(merge);

                bool success = false;
                try
                {
                    lock (this)
                    {
                        while (MergeThreadCount() >= _maxThreadCount)
                        {
                            if (Verbose())
                            {
                                Message("    too many merge threads running; stalling...");
                            }

                            System.Threading.Monitor.Wait(this);
                        }

                        if (Verbose())
                        {
                            Message("  consider merge " + merge.SegString(dir));
                        }

                        System.Diagnostics.Debug.Assert(MergeThreadCount() < _maxThreadCount);

                        // OK to spawn a new merge thread to handle this
                        // merge:
                        MergeThread merger = GetMergeThread(writer, merge);
                        mergeThreads.Add(merger);
                        if (Verbose())
                        {
                            Message("    launch new thread [" + merger.Name + "]");
                        }

                        merger.Start();
                        success = true;
                    }
                }
                finally
                {
                    if (!success)
                    {
                        writer.MergeFinish(merge);
                    }
                }
            }
        }
Пример #10
0
			public CheckAbort(MergePolicy.OneMerge merge, Directory dir)
			{
				this.merge = merge;
				this.dir = dir;
			}
Пример #11
0
        // Remaps all buffered deletes based on a completed
        // merge
        internal virtual void  Remap(MergeDocIDRemapper mapper, SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergeDocCount)
        {
            lock (this)
            {
                IDictionary <Term, Num> newDeleteTerms;

                // Remap delete-by-term
                if (terms.Count > 0)
                {
                    if (doTermSort)
                    {
                        newDeleteTerms = new SortedDictionary <Term, Num>();
                    }
                    else
                    {
                        newDeleteTerms = new HashMap <Term, Num>();
                    }
                    foreach (var entry in terms)
                    {
                        Num num = entry.Value;
                        newDeleteTerms[entry.Key] = new Num(mapper.Remap(num.GetNum()));
                    }
                }
                else
                {
                    newDeleteTerms = null;
                }

                // Remap delete-by-docID
                List <int> newDeleteDocIDs;

                if (docIDs.Count > 0)
                {
                    newDeleteDocIDs = new List <int>(docIDs.Count);
                    foreach (int num in docIDs)
                    {
                        newDeleteDocIDs.Add(mapper.Remap(num));
                    }
                }
                else
                {
                    newDeleteDocIDs = null;
                }

                // Remap delete-by-query
                HashMap <Query, int> newDeleteQueries;

                if (queries.Count > 0)
                {
                    newDeleteQueries = new HashMap <Query, int>(queries.Count);
                    foreach (var entry in queries)
                    {
                        int num = entry.Value;
                        newDeleteQueries[entry.Key] = mapper.Remap(num);
                    }
                }
                else
                {
                    newDeleteQueries = null;
                }

                if (newDeleteTerms != null)
                {
                    terms = newDeleteTerms;
                }
                if (newDeleteDocIDs != null)
                {
                    docIDs = newDeleteDocIDs;
                }
                if (newDeleteQueries != null)
                {
                    queries = newDeleteQueries;
                }
            }
        }
			public virtual void  SetRunningMerge(MergePolicy.OneMerge merge)
			{
				lock (this)
				{
					runningMerge = merge;
				}
			}
			public MergeThread(ConcurrentMergeScheduler enclosingInstance, IndexWriter writer, MergePolicy.OneMerge startMerge)
			{
				InitBlock(enclosingInstance);
				this.writer = writer;
				this.startMerge = startMerge;
			}
Пример #14
0
        internal int docShift;         // total # deleted docs that were compacted by this merge

        public MergeDocIDRemapper(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergedDocCount)
        {
            this.docMaps = docMaps;
            SegmentInfo firstSegment = merge.segments.Info(0);
            int         i            = 0;

            while (true)
            {
                SegmentInfo info = infos.Info(i);
                if (info.Equals(firstSegment))
                {
                    break;
                }
                minDocID += info.docCount;
                i++;
            }

            int numDocs = 0;

            for (int j = 0; j < docMaps.Length; i++, j++)
            {
                numDocs += infos.Info(i).docCount;
                System.Diagnostics.Debug.Assert(infos.Info(i).Equals(merge.segments.Info(j)));
            }
            maxDocID = minDocID + numDocs;

            starts    = new int[docMaps.Length];
            newStarts = new int[docMaps.Length];

            starts[0]    = minDocID;
            newStarts[0] = minDocID;
            for (i = 1; i < docMaps.Length; i++)
            {
                int lastDocCount = merge.segments.Info(i - 1).docCount;
                starts[i]    = starts[i - 1] + lastDocCount;
                newStarts[i] = newStarts[i - 1] + lastDocCount - delCounts[i - 1];
            }
            docShift = numDocs - mergedDocCount;

            // There are rare cases when docShift is 0.  It happens
            // if you try to delete a docID that's out of bounds,
            // because the SegmentReader still allocates deletedDocs
            // and pretends it has deletions ... so we can't make
            // this assert here
            // assert docShift > 0;

            // Make sure it all adds up:
            System.Diagnostics.Debug.Assert(docShift == maxDocID - (newStarts [docMaps.Length - 1] + merge.segments.Info(docMaps.Length - 1).docCount - delCounts [docMaps.Length - 1]));
        }
Пример #15
0
 public CheckAbort(MergePolicy.OneMerge merge, Directory dir)
 {
     this.merge = merge;
     this.dir   = dir;
 }