Esempio n. 1
0
 public virtual void  SetRunningMerge(MergePolicy.OneMerge merge)
 {
     lock (this)
     {
         runningMerge = merge;
     }
 }
Esempio n. 2
0
 public override System.String ToString()
 {
     MergePolicy.OneMerge merge = GetRunningMerge();
     if (merge == null)
     {
         merge = startMerge;
     }
     return("merge thread: " + merge.SegString(Enclosing_Instance.dir));
 }
Esempio n. 3
0
 /// <summary>Create and return a new MergeThread </summary>
 protected internal virtual MergeThread GetMergeThread(IndexWriter writer, MergePolicy.OneMerge merge)
 {
     lock (this)
     {
         MergeThread thread = new MergeThread(this, writer, merge);
         thread.SetThreadPriority(mergeThreadPriority);
         thread.IsBackground = true;
         thread.Name         = "Lucene Merge Thread #" + mergeThreadCount++;
         return(thread);
     }
 }
Esempio n. 4
0
 /// <summary>Just do the merges in sequence. We do this
 /// "synchronized" so that even if the application is using
 /// multiple threads, only one merge may run at a time.
 /// </summary>
 public override void  Merge(IndexWriter writer)
 {
     lock (this)
     {
         while (true)
         {
             MergePolicy.OneMerge merge = writer.GetNextMerge();
             if (merge == null)
             {
                 break;
             }
             writer.Merge(merge);
         }
     }
 }
Esempio n. 5
0
 internal SegmentMerger(IndexWriter writer, System.String name, MergePolicy.OneMerge merge)
 {
     InitBlock();
     directory = writer.GetDirectory();
     segment   = name;
     if (merge != null)
     {
         checkAbort = new CheckAbort(merge, directory);
     }
     else
     {
         checkAbort = new AnonymousClassCheckAbort1(this, null, null);
     }
     termIndexInterval = writer.GetTermIndexInterval();
 }
Esempio n. 6
0
 private int MergeThreadCount(bool excludeDone)
 {
     lock (this)
     {
         int count      = 0;
         int numThreads = mergeThreads.Count;
         for (int i = 0; i < numThreads; i++)
         {
             MergeThread t = (MergeThread)mergeThreads[i];
             if (t.IsAlive)
             {
                 MergePolicy.OneMerge runningMerge = t.GetRunningMerge();
                 if (!excludeDone || (runningMerge != null && !runningMerge.mergeDone))
                 {
                     count++;
                 }
             }
         }
         return(count);
     }
 }
Esempio n. 7
0
            override public void  Run()
            {
                // First time through the while loop we do the merge
                // that we were started with:
                MergePolicy.OneMerge merge = this.startMerge;

                try
                {
                    if (Enclosing_Instance.Verbose())
                    {
                        Enclosing_Instance.Message("  merge thread: start");
                    }

                    while (true)
                    {
                        SetRunningMerge(merge);
                        Enclosing_Instance.DoMerge(merge);

                        // Subsequent times through the loop we do any new
                        // merge that writer says is necessary:
                        merge = writer.GetNextMerge();
                        if (merge != null)
                        {
                            writer.MergeInit(merge);
                            if (Enclosing_Instance.Verbose())
                            {
                                Enclosing_Instance.Message("  merge thread: do another merge " + merge.SegString(Enclosing_Instance.dir));
                            }
                        }
                        else
                        {
                            break;
                        }
                    }

                    if (Enclosing_Instance.Verbose())
                    {
                        Enclosing_Instance.Message("  merge thread: done");
                    }
                }
                catch (System.Exception exc)
                {
                    // Ignore the exception if it was due to abort:
                    if (!(exc is MergePolicy.MergeAbortedException))
                    {
                        if (!Enclosing_Instance.suppressExceptions)
                        {
                            // suppressExceptions is normally only set during
                            // testing.
                            Mono.Lucene.Net.Index.ConcurrentMergeScheduler.anyExceptions = true;
                            Enclosing_Instance.HandleMergeException(exc);
                        }
                    }
                }
                finally
                {
                    lock (Enclosing_Instance)
                    {
                        System.Threading.Monitor.PulseAll(Enclosing_Instance);
                        Enclosing_Instance.mergeThreads.Remove(this);
                        bool removed = !Enclosing_Instance.mergeThreads.Contains(this);
                        System.Diagnostics.Debug.Assert(removed);
                    }
                }
            }
Esempio n. 8
0
			public CheckAbort(MergePolicy.OneMerge merge, Directory dir)
			{
				this.merge = merge;
				this.dir = dir;
			}
Esempio n. 9
0
 public MergeThread(ConcurrentMergeScheduler enclosingInstance, IndexWriter writer, MergePolicy.OneMerge startMerge)
 {
     InitBlock(enclosingInstance);
     this.writer     = writer;
     this.startMerge = startMerge;
 }
Esempio n. 10
0
        internal int docShift;         // total # deleted docs that were compacted by this merge

        public MergeDocIDRemapper(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergedDocCount)
        {
            this.docMaps = docMaps;
            SegmentInfo firstSegment = merge.segments.Info(0);
            int         i            = 0;

            while (true)
            {
                SegmentInfo info = infos.Info(i);
                if (info.Equals(firstSegment))
                {
                    break;
                }
                minDocID += info.docCount;
                i++;
            }

            int numDocs = 0;

            for (int j = 0; j < docMaps.Length; i++, j++)
            {
                numDocs += infos.Info(i).docCount;
                System.Diagnostics.Debug.Assert(infos.Info(i).Equals(merge.segments.Info(j)));
            }
            maxDocID = minDocID + numDocs;

            starts    = new int[docMaps.Length];
            newStarts = new int[docMaps.Length];

            starts[0]    = minDocID;
            newStarts[0] = minDocID;
            for (i = 1; i < docMaps.Length; i++)
            {
                int lastDocCount = merge.segments.Info(i - 1).docCount;
                starts[i]    = starts[i - 1] + lastDocCount;
                newStarts[i] = newStarts[i - 1] + lastDocCount - delCounts[i - 1];
            }
            docShift = numDocs - mergedDocCount;

            // There are rare cases when docShift is 0.  It happens
            // if you try to delete a docID that's out of bounds,
            // because the SegmentReader still allocates deletedDocs
            // and pretends it has deletions ... so we can't make
            // this assert here
            // assert docShift > 0;

            // Make sure it all adds up:
            System.Diagnostics.Debug.Assert(docShift == maxDocID - (newStarts [docMaps.Length - 1] + merge.segments.Info(docMaps.Length - 1).docCount - delCounts [docMaps.Length - 1]));
        }
Esempio n. 11
0
 /// <summary>Does the actual merge, by calling {@link IndexWriter#merge} </summary>
 protected internal virtual void  DoMerge(MergePolicy.OneMerge merge)
 {
     writer.Merge(merge);
 }
Esempio n. 12
0
        public override void  Merge(IndexWriter writer)
        {
            // TODO: enable this once we are on JRE 1.5
            // assert !Thread.holdsLock(writer);

            this.writer = writer;

            InitMergeThreadPriority();

            dir = writer.GetDirectory();

            // First, quickly run through the newly proposed merges
            // and add any orthogonal merges (ie a merge not
            // involving segments already pending to be merged) to
            // the queue.  If we are way behind on merging, many of
            // these newly proposed merges will likely already be
            // registered.

            if (Verbose())
            {
                Message("now merge");
                Message("  index: " + writer.SegString());
            }

            // Iterate, pulling from the IndexWriter's queue of
            // pending merges, until it's empty:
            while (true)
            {
                // TODO: we could be careful about which merges to do in
                // the BG (eg maybe the "biggest" ones) vs FG, which
                // merges to do first (the easiest ones?), etc.

                MergePolicy.OneMerge merge = writer.GetNextMerge();
                if (merge == null)
                {
                    if (Verbose())
                    {
                        Message("  no more merges pending; now return");
                    }
                    return;
                }

                // We do this w/ the primary thread to keep
                // deterministic assignment of segment names
                writer.MergeInit(merge);

                bool success = false;
                try
                {
                    lock (this)
                    {
                        MergeThread merger;
                        while (MergeThreadCount(true) >= maxThreadCount)
                        {
                            if (Verbose())
                            {
                                Message("    too many merge threads running; stalling...");
                            }
                            try
                            {
                                System.Threading.Monitor.Wait(this);
                            }
                            catch (System.Threading.ThreadInterruptedException ie)
                            {
                                // In 3.0 we will change this to throw
                                // InterruptedException instead
                                SupportClass.ThreadClass.Current().Interrupt();
                                throw new System.SystemException(ie.Message, ie);
                            }
                        }

                        if (Verbose())
                        {
                            Message("  consider merge " + merge.SegString(dir));
                        }


                        // OK to spawn a new merge thread to handle this
                        // merge:
                        merger = GetMergeThread(writer, merge);
                        mergeThreads.Add(merger);
                        if (Verbose())
                        {
                            Message("    launch new thread [" + merger.Name + "]");
                        }

                        merger.Start();
                        success = true;
                    }
                }
                finally
                {
                    if (!success)
                    {
                        writer.MergeFinish(merge);
                    }
                }
            }
        }
Esempio n. 13
0
 public CheckAbort(MergePolicy.OneMerge merge, Directory dir)
 {
     this.merge = merge;
     this.dir   = dir;
 }
Esempio n. 14
0
			public virtual void  SetRunningMerge(MergePolicy.OneMerge merge)
			{
				lock (this)
				{
					runningMerge = merge;
				}
			}
Esempio n. 15
0
			public MergeThread(ConcurrentMergeScheduler enclosingInstance, IndexWriter writer, MergePolicy.OneMerge startMerge)
			{
				InitBlock(enclosingInstance);
				this.writer = writer;
				this.startMerge = startMerge;
			}
Esempio n. 16
0
        // Remaps all buffered deletes based on a completed
        // merge
        internal virtual void  Remap(MergeDocIDRemapper mapper, SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergeDocCount)
        {
            lock (this)
            {
                System.Collections.IDictionary newDeleteTerms;

                // Remap delete-by-term
                if (terms.Count > 0)
                {
                    if (doTermSort)
                    {
                        newDeleteTerms = new System.Collections.Generic.SortedDictionary <object, object>();
                    }
                    else
                    {
                        newDeleteTerms = new System.Collections.Hashtable();
                    }
                    System.Collections.IEnumerator iter = new System.Collections.Hashtable(terms).GetEnumerator();
                    while (iter.MoveNext())
                    {
                        System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry)iter.Current;
                        Num num = (Num)entry.Value;
                        newDeleteTerms[entry.Key] = new Num(mapper.Remap(num.GetNum()));
                    }
                }
                else
                {
                    newDeleteTerms = null;
                }

                // Remap delete-by-docID
                System.Collections.ArrayList newDeleteDocIDs;

                if (docIDs.Count > 0)
                {
                    newDeleteDocIDs = new System.Collections.ArrayList(docIDs.Count);
                    System.Collections.IEnumerator iter = docIDs.GetEnumerator();
                    while (iter.MoveNext())
                    {
                        System.Int32 num = (System.Int32)iter.Current;
                        newDeleteDocIDs.Add((System.Int32)mapper.Remap(num));
                    }
                }
                else
                {
                    newDeleteDocIDs = null;
                }

                // Remap delete-by-query
                System.Collections.Hashtable newDeleteQueries;

                if (queries.Count > 0)
                {
                    newDeleteQueries = new System.Collections.Hashtable(queries.Count);
                    System.Collections.IEnumerator iter = new System.Collections.Hashtable(queries).GetEnumerator();
                    while (iter.MoveNext())
                    {
                        System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry)iter.Current;
                        System.Int32 num = (System.Int32)entry.Value;
                        newDeleteQueries[entry.Key] = (System.Int32)mapper.Remap(num);
                    }
                }
                else
                {
                    newDeleteQueries = null;
                }

                if (newDeleteTerms != null)
                {
                    terms = newDeleteTerms;
                }
                if (newDeleteDocIDs != null)
                {
                    docIDs = newDeleteDocIDs;
                }
                if (newDeleteQueries != null)
                {
                    queries = newDeleteQueries;
                }
            }
        }
Esempio n. 17
0
		/* If any of our segments are using a directory != ours
		* then we have to either copy them over one by one, merge
		* them (if merge policy has chosen to) or wait until
		* currently running merges (in the background) complete.
		* We don't return until the SegmentInfos has no more
		* external segments.  Currently this is only used by
		* addIndexesNoOptimize(). */
		private void  ResolveExternalSegments()
		{
			
			bool any = false;
			
			bool done = false;
			
			while (!done)
			{
				SegmentInfo info = null;
				MergePolicy.OneMerge merge = null;
				lock (this)
				{
					
					if (stopMerges)
						throw new MergePolicy.MergeAbortedException("rollback() was called or addIndexes* hit an unhandled exception");
					
					int numSegments = segmentInfos.Count;
					
					done = true;
					for (int i = 0; i < numSegments; i++)
					{
						info = segmentInfos.Info(i);
						if (info.dir != directory)
						{
							done = false;
							MergePolicy.OneMerge newMerge = new MergePolicy.OneMerge(segmentInfos.Range(i, 1 + i), mergePolicy is LogMergePolicy && GetUseCompoundFile());
							
							// Returns true if no running merge conflicts
							// with this one (and, records this merge as
							// pending), ie, this segment is not currently
							// being merged:
							if (RegisterMerge(newMerge))
							{
								merge = newMerge;
								
								// If this segment is not currently being
								// merged, then advance it to running & run
								// the merge ourself (below):
                                pendingMerges.Remove(merge);    // {{Aroush-2.9}} From Mike Garski: this is an O(n) op... is that an issue?
								runningMerges.Add(merge);
								break;
							}
						}
					}
					
					if (!done && merge == null)
					// We are not yet done (external segments still
					// exist in segmentInfos), yet, all such segments
					// are currently "covered" by a pending or running
					// merge.  We now try to grab any pending merge
					// that involves external segments:
						merge = GetNextExternalMerge();
					
					if (!done && merge == null)
					// We are not yet done, and, all external segments
					// fall under merges that the merge scheduler is
					// currently running.  So, we now wait and check
					// back to see if the merge has completed.
						DoWait();
				}
				
				if (merge != null)
				{
					any = true;
					Merge(merge);
				}
			}
			
			if (any)
			// Sometimes, on copying an external segment over,
			// more merges may become necessary:
				mergeScheduler.Merge(this);
		}