Esempio n. 1
0
		/// <summary>Merges the provided indexes into this index.
		/// <p>After this completes, the index is optimized. </p>
		/// <p>The provided IndexReaders are not closed.</p>
        /// 
        /// <p><b>NOTE:</b> the index in each Directory must not be
        /// changed (opened by a writer) while this method is
        /// running.  Thiw method does not acquire a write lock in
        /// each input Directory, so it is up to the caller to
        /// enforce this.
        /// </p>
        /// 
        /// <p><b>NOTE:</b> while this is running, any attempts to
        /// add or delete documents (with another thread) will be 
        /// paused until this method completes.</p>
        /// 
        /// <p>See {@link #AddIndexes(Directory[])} for
		/// details on transactional semantics, temporary free
		/// space required in the Directory, and non-CFS segments
		/// on an Exception.</p>
		/// </summary>
		/// <throws>  CorruptIndexException if the index is corrupt </throws>
		/// <throws>  IOException if there is a low-level IO error </throws>
		public virtual void  AddIndexes(IndexReader[] readers)
		{
		
            EnsureOpen();

            // Do not allow add docs or deletes while we are running:
            docWriter.PauseAllThreads();

            try
            {
                Optimize(); // start with zero or 1 seg

                System.String mergedName = NewSegmentName();
                SegmentMerger merger = new SegmentMerger(this, mergedName, null);

                SegmentInfo info;

                IndexReader sReader = null;
                try
                {
                    lock (this)
                    {
                        if (segmentInfos.Count == 1)
                        {
                            // add existing index, if any
                            sReader = SegmentReader.Get(segmentInfos.Info(0));
                            merger.Add(sReader);
                        }
                    }


                    for (int i = 0; i < readers.Length; i++)
                        // add new indexes
                        merger.Add(readers[i]);

                    bool success = false;

                    StartTransaction();

                    try
                    {
                        int docCount = merger.Merge(); // merge 'em

                        if (sReader != null)
                        {
                            sReader.Close();
                            sReader = null;
                        }

                        lock (this)
                        {
                            segmentInfos.RemoveRange(0, segmentInfos.Count); // pop old infos & add new
                            info = new SegmentInfo(mergedName, docCount, directory, false, true, -1, null, false);
                            segmentInfos.Add(info);
                        }
                        success = true;
                    }
                    finally
                    {
                        if (!success)
                        {
                            if (infoStream != null)
                                Message("hit exception in addIndexes during merge");

                            RollbackTransaction();
                        }
                        else
                        {
                            CommitTransaction();
                        }
                    }
                }
                finally
                {
                    if (sReader != null)
                    {
                        sReader.Close();
                    }
                }

                if (mergePolicy is LogMergePolicy && GetUseCompoundFile())
                {

                    bool success = false;

                    StartTransaction();

                    try
                    {
                        merger.CreateCompoundFile(mergedName + ".cfs");
                        lock (this)
                        {
                            info.SetUseCompoundFile(true);
                        }
                    }
                    finally
                    {
                        if (!success)
                        {
                            if (infoStream != null)
                                Message("hit exception building compound file in addIndexes during merge");

                            RollbackTransaction();
                        }
                        else
                        {
                            CommitTransaction();
                        }
                    }
                }
            }
            catch (OutOfMemoryException oom)
            {
                hitOOM = true;
                throw oom;
            }
            finally
            {
                docWriter.ResumeAllThreads();
            }
        }
Esempio n. 2
0
		/// <summary>Merges the provided indexes into this index.
		/// <p/>After this completes, the index is optimized. <p/>
		/// <p/>The provided IndexReaders are not closed.<p/>
		/// 
		/// <p/><b>NOTE:</b> while this is running, any attempts to
		/// add or delete documents (with another thread) will be
		/// paused until this method completes.
		/// 
		/// <p/>See {@link #AddIndexesNoOptimize(Directory[])} for
		/// details on transactional semantics, temporary free
		/// space required in the Directory, and non-CFS segments
		/// on an Exception.<p/>
		/// 
		/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
		/// you should immediately close the writer.  See <a
		/// href="#OOME">above</a> for details.<p/>
		/// 
		/// </summary>
		/// <throws>  CorruptIndexException if the index is corrupt </throws>
		/// <throws>  IOException if there is a low-level IO error </throws>
		public virtual void  AddIndexes(IndexReader[] readers)
		{
			
			EnsureOpen();
			
			// Do not allow add docs or deletes while we are running:
			docWriter.PauseAllThreads();
			
			// We must pre-acquire a read lock here (and upgrade to
			// write lock in startTransaction below) so that no
			// other addIndexes is allowed to start up after we have
			// flushed & optimized but before we then start our
			// transaction.  This is because the merging below
			// requires that only one segment is present in the
			// index:
			AcquireRead();
			
			try
			{
				
				SegmentInfo info = null;
				System.String mergedName = null;
				SegmentMerger merger = null;
				
				bool success = false;
				
				try
				{
					Flush(true, false, true);
					Optimize(); // start with zero or 1 seg
					success = true;
				}
				finally
				{
					// Take care to release the read lock if we hit an
					// exception before starting the transaction
					if (!success)
						ReleaseRead();
				}
				
				// true means we already have a read lock; if this
				// call hits an exception it will release the write
				// lock:
				StartTransaction(true);
				
				try
				{
					mergedName = NewSegmentName();
					merger = new SegmentMerger(this, mergedName, null);
					
					SegmentReader sReader = null;
					lock (this)
					{
						if (segmentInfos.Count == 1)
						{
							// add existing index, if any
							sReader = readerPool.Get(segmentInfos.Info(0), true, BufferedIndexInput.BUFFER_SIZE, - 1);
						}
					}
					
					success = false;
					
					try
					{
						if (sReader != null)
							merger.Add(sReader);
						
						for (int i = 0; i < readers.Length; i++)
						// add new indexes
							merger.Add(readers[i]);
						
						int docCount = merger.Merge(); // merge 'em
						
						lock (this)
						{
							segmentInfos.Clear(); // pop old infos & add new
							info = new SegmentInfo(mergedName, docCount, directory, false, true, - 1, null, false, merger.HasProx());
							SetDiagnostics(info, "addIndexes(IndexReader[])");
							segmentInfos.Add(info);
						}
						
						// Notify DocumentsWriter that the flushed count just increased
						docWriter.UpdateFlushedDocCount(docCount);
						
						success = true;
					}
					finally
					{
						if (sReader != null)
						{
							readerPool.Release(sReader);
						}
					}
				}
				finally
				{
					if (!success)
					{
						if (infoStream != null)
							Message("hit exception in addIndexes during merge");
						RollbackTransaction();
					}
					else
					{
						CommitTransaction();
					}
				}
				
				if (mergePolicy is LogMergePolicy && GetUseCompoundFile())
				{
					
					System.Collections.Generic.IList<string> files = null;
					
					lock (this)
					{
						// Must incRef our files so that if another thread
						// is running merge/optimize, it doesn't delete our
						// segment's files before we have a change to
						// finish making the compound file.
						if (segmentInfos.Contains(info))
						{
							files = info.Files();
							deleter.IncRef(files);
						}
					}
					
					if (files != null)
					{
						
						success = false;
						
						StartTransaction(false);
						
						try
						{
							merger.CreateCompoundFile(mergedName + ".cfs");
							lock (this)
							{
								info.SetUseCompoundFile(true);
							}
							
							success = true;
						}
						finally
						{
                            lock (this)
                            {
                                deleter.DecRef(files);
                            }
														
							if (!success)
							{
								if (infoStream != null)
									Message("hit exception building compound file in addIndexes during merge");
								
								RollbackTransaction();
							}
							else
							{
								CommitTransaction();
							}
						}
					}
				}
			}
			catch (System.OutOfMemoryException oom)
			{
				HandleOOM(oom, "addIndexes(IndexReader[])");
			}
			finally
			{
				if (docWriter != null)
				{
					docWriter.ResumeAllThreads();
				}
			}
		}
Esempio n. 3
0
		/// <summary> Merges the provided indexes into this index.
		/// <p>
		/// After this completes, the index is optimized.
		/// </p>
		/// <p>
		/// The provided IndexReaders are not closed.
		/// </p>
		/// 
		/// <p>
		/// See {@link #AddIndexes(Directory[])} for details on transactional
		/// semantics, temporary free space required in the Directory, and non-CFS
		/// segments on an Exception.
		/// </p>
		/// </summary>
		public virtual void  AddIndexes(IndexReader[] readers)
		{
			lock (this)
			{
				
				Optimize(); // start with zero or 1 seg
				
				System.String mergedName = NewSegmentName();
				SegmentMerger merger = new SegmentMerger(this, mergedName);
				
				System.Collections.ArrayList segmentsToDelete = System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(10));
				IndexReader sReader = null;
				if (segmentInfos.Count == 1)
				{
					// add existing index, if any
					sReader = SegmentReader.Get(segmentInfos.Info(0));
					merger.Add(sReader);
					segmentsToDelete.Add(sReader); // queue segment for
					// deletion
				}
				
				for (int i = 0; i < readers.Length; i++)
				// add new indexes
					merger.Add(readers[i]);
				
				SegmentInfo info;
				
				System.String segmentsInfosFileName = segmentInfos.GetCurrentSegmentFileName();
				
				bool success = false;
				
				StartTransaction();
				
				try
				{
					int docCount = merger.Merge(); // merge 'em
					
					segmentInfos.RemoveRange(0, segmentInfos.Count); // pop old infos & add new
					info = new SegmentInfo(mergedName, docCount, directory, false, true);
					segmentInfos.Add(info);
					commitPending = true;
					
					if (sReader != null)
						sReader.Close();
					
					success = true;
				}
				finally
				{
					if (!success)
					{
						RollbackTransaction();
					}
					else
					{
						CommitTransaction();
					}
				}
				
				deleter.DeleteFile(segmentsInfosFileName); // delete old segments_N
				// file
				deleter.DeleteSegments(segmentsToDelete); // delete now-unused
				// segments
				
				if (useCompoundFile)
				{
					success = false;
					
					segmentsInfosFileName = segmentInfos.GetCurrentSegmentFileName();
					System.Collections.ArrayList filesToDelete;
					
					StartTransaction();
					
					try
					{
						
						filesToDelete = merger.CreateCompoundFile(mergedName + ".cfs");
						
						info.SetUseCompoundFile(true);
						commitPending = true;
						success = true;
					}
					finally
					{
						if (!success)
						{
							RollbackTransaction();
						}
						else
						{
							CommitTransaction();
						}
					}
					
					deleter.DeleteFile(segmentsInfosFileName); // delete old segments_N
					// file
					deleter.DeleteFiles(filesToDelete); // delete now unused files of
					// segment
				}
			}
		}