예제 #1
0
		internal CompoundFileWriter(Directory dir, System.String name, SegmentMerger.CheckAbort checkAbort)
		{
			if (dir == null)
				throw new System.NullReferenceException("directory cannot be null");
			if (name == null)
				throw new System.NullReferenceException("name cannot be null");
			this.checkAbort = checkAbort;
			directory = dir;
			fileName = name;
            ids = new System.Collections.Hashtable();
			entries = new System.Collections.ArrayList();
		}
예제 #2
0
		/* FIXME if we want to support non-contiguous segment merges */
		private bool CommitMerge(MergePolicy.OneMerge merge, SegmentMerger merger, int mergedDocCount, SegmentReader mergedReader)
		{
			lock (this)
			{
				
				System.Diagnostics.Debug.Assert(TestPoint("startCommitMerge"));
				
				if (hitOOM)
				{
					throw new System.SystemException("this writer hit an OutOfMemoryError; cannot complete merge");
				}
				
				if (infoStream != null)
					Message("commitMerge: " + merge.SegString(directory) + " index=" + SegString());
				
				System.Diagnostics.Debug.Assert(merge.registerDone);
				
				// If merge was explicitly aborted, or, if rollback() or
				// rollbackTransaction() had been called since our merge
				// started (which results in an unqualified
				// deleter.refresh() call that will remove any index
				// file that current segments does not reference), we
				// abort this merge
				if (merge.IsAborted())
				{
					if (infoStream != null)
						Message("commitMerge: skipping merge " + merge.SegString(directory) + ": it was aborted");
					
					return false;
				}
				
				int start = EnsureContiguousMerge(merge);
				
				CommitMergedDeletes(merge, mergedReader);
				docWriter.RemapDeletes(segmentInfos, merger.GetDocMaps(), merger.GetDelCounts(), merge, mergedDocCount);

                // If the doc store we are using has been closed and
                // is in now compound format (but wasn't when we
                // started), then we will switch to the compound
                // format as well:
                SetMergeDocStoreIsCompoundFile(merge);
				
				merge.info.SetHasProx(merger.HasProx());
				
				((System.Collections.IList) ((System.Collections.ArrayList) segmentInfos).GetRange(start, start + merge.segments.Count - start)).Clear();
				System.Diagnostics.Debug.Assert(!segmentInfos.Contains(merge.info));
				segmentInfos.Insert(start, merge.info);

                CloseMergeReaders(merge, false);
				
				// Must note the change to segmentInfos so any commits
				// in-flight don't lose it:
				Checkpoint();
				
				// If the merged segments had pending changes, clear
				// them so that they don't bother writing them to
				// disk, updating SegmentInfo, etc.:
				readerPool.Clear(merge.segments);

                if (merge.optimize)
                {
                    // cascade the optimize:
                    segmentsToOptimize[merge.info] = merge.info;
                }
				return true;
			}
		}
예제 #3
0
		/// <summary>Does the actual (time-consuming) work of the merge,
		/// but without holding synchronized lock on IndexWriter
		/// instance 
		/// </summary>
		private int MergeMiddle(MergePolicy.OneMerge merge)
		{
			
			merge.CheckAborted(directory);
			
			System.String mergedName = merge.info.name;
			
			SegmentMerger merger = null;
			
			int mergedDocCount = 0;
			
			SegmentInfos sourceSegments = merge.segments;
			int numSegments = sourceSegments.Count;
			
			if (infoStream != null)
				Message("merging " + merge.SegString(directory));
			
			merger = new SegmentMerger(this, mergedName, merge);
			
			merge.readers = new SegmentReader[numSegments];
			merge.readersClone = new SegmentReader[numSegments];
			
			bool mergeDocStores = false;

            System.Collections.Hashtable dss = new System.Collections.Hashtable();
			
            String currentDocStoreSegment;
            lock(this) {
                currentDocStoreSegment = docWriter.GetDocStoreSegment();
            }
            bool currentDSSMerged = false;

			// This is try/finally to make sure merger's readers are
			// closed:
			bool success = false;
            try
            {
                int totDocCount = 0;

                for (int i = 0; i < numSegments; i++)
                {

                    SegmentInfo info = sourceSegments.Info(i);

                    // Hold onto the "live" reader; we will use this to
                    // commit merged deletes
                    SegmentReader reader = merge.readers[i] = readerPool.Get(info, merge.mergeDocStores, MERGE_READ_BUFFER_SIZE, -1);

                    // We clone the segment readers because other
                    // deletes may come in while we're merging so we
                    // need readers that will not change
                    SegmentReader clone = merge.readersClone[i] = (SegmentReader)reader.Clone(true);
                    merger.Add(clone);

                    if (clone.HasDeletions())
                    {
                        mergeDocStores = true;
                    }

                    if (info.GetDocStoreOffset() != -1 && currentDocStoreSegment != null)
                    {
                        currentDSSMerged |= currentDocStoreSegment.Equals(info.GetDocStoreSegment());
                    }

                    totDocCount += clone.NumDocs();
                }

                if (infoStream != null)
                {
                    Message("merge: total " + totDocCount + " docs");
                }

                merge.CheckAborted(directory);

                // If deletions have arrived and it has now become
                // necessary to merge doc stores, go and open them:
                if (mergeDocStores && !merge.mergeDocStores)
                {
                    merge.mergeDocStores = true;
                    lock (this)
                    {
                        if (currentDSSMerged)
                        {
                            if (infoStream != null)
                            {
                                Message("now flush at mergeMiddle");
                            }
                            DoFlush(true, false);
                        }
                    }

                    for (int i = 0; i < numSegments; i++)
                    {
                        merge.readersClone[i].OpenDocStores();
                    }

                    // Clear DSS
                    merge.info.SetDocStore(-1, null, false);

                }

                // This is where all the work happens:
                mergedDocCount = merge.info.docCount = merger.Merge(merge.mergeDocStores);

                System.Diagnostics.Debug.Assert(mergedDocCount == totDocCount);

                if (merge.useCompoundFile)
                {

                    success = false;
                    string compoundFileName = IndexFileNames.SegmentFileName(mergedName, IndexFileNames.COMPOUND_FILE_EXTENSION);

                    try
                    {
                        if (infoStream != null)
                        {
                            Message("create compound file " + compoundFileName);
                        }
                        merger.CreateCompoundFile(compoundFileName);
                        success = true;
                    }
                    catch (System.IO.IOException ioe)
                    {
                        lock (this)
                        {
                            if (merge.IsAborted())
                            {
                                // This can happen if rollback or close(false)
                                // is called -- fall through to logic below to
                                // remove the partially created CFS:
                            }
                            else
                            {
                                HandleMergeException(ioe, merge);
                            }
                        }
                    }
                    catch (Exception t)
                    {
                        HandleMergeException(t, merge);
                    }
                    finally
                    {
                        if (!success)
                        {
                            if (infoStream != null)
                            {
                                Message("hit exception creating compound file during merge");
                            }

                            lock (this)
                            {
                                deleter.DeleteFile(compoundFileName);
                                deleter.DeleteNewFiles(merger.GetMergedFiles());
                            }
                        }
                    }

                    success = false;

                    lock (this)
                    {

                        // delete new non cfs files directly: they were never
                        // registered with IFD
                        deleter.DeleteNewFiles(merger.GetMergedFiles());

                        if (merge.IsAborted())
                        {
                            if (infoStream != null)
                            {
                                Message("abort merge after building CFS");
                            }
                            deleter.DeleteFile(compoundFileName);
                            return 0;
                        }
                    }

                    merge.info.SetUseCompoundFile(true);
                }

                int termsIndexDivisor;
                bool loadDocStores;

                // if the merged segment warmer was not installed when
                // this merge was started, causing us to not force
                // the docStores to close, we can't warm it now
                bool canWarm = merge.info.GetDocStoreSegment() == null || currentDocStoreSegment == null || !merge.info.GetDocStoreSegment().Equals(currentDocStoreSegment);

                if (poolReaders && mergedSegmentWarmer != null && canWarm)
                {
                    // Load terms index & doc stores so the segment
                    // warmer can run searches, load documents/term
                    // vectors
                    termsIndexDivisor = readerTermsIndexDivisor;
                    loadDocStores = true;
                }
                else
                {
                    termsIndexDivisor = -1;
                    loadDocStores = false;
                }

                // TODO: in the non-realtime case, we may want to only
                // keep deletes (it's costly to open entire reader
                // when we just need deletes)

                SegmentReader mergedReader = readerPool.Get(merge.info, loadDocStores, BufferedIndexInput.BUFFER_SIZE, termsIndexDivisor);
                try
                {
                    if (poolReaders && mergedSegmentWarmer != null)
                    {
                        mergedSegmentWarmer.Warm(mergedReader);
                    }
                    if (!CommitMerge(merge, merger, mergedDocCount, mergedReader))
                    {
                        // commitMerge will return false if this merge was aborted
                        return 0;
                    }
                }
                finally
                {
                    lock (this)
                    {
                        readerPool.Release(mergedReader);
                    }
                }

                success = true;
            }
            finally
            {
                // Readers are already closed in commitMerge if we didn't hit
                // an exc:
                if (!success)
                {
                    CloseMergeReaders(merge, true);
                }
            }

            merge.mergeDone = true;

            lock (mergeScheduler)
            {
                System.Threading.Monitor.PulseAll(mergeScheduler); 
            }

			// Force a sync after commiting the merge.  Once this
			// sync completes then all index files referenced by the
			// current segmentInfos are on stable storage so if the
			// OS/machine crashes, or power cord is yanked, the
			// index will be intact.  Note that this is just one
			// (somewhat arbitrary) policy; we could try other
			// policies like only sync if it's been > X minutes or
			// more than Y bytes have been written, etc.
			if (autoCommit)
			{
				long size;
				lock (this)
				{
					size = merge.info.SizeInBytes();
				}
				Commit(size);
			}
			
			return mergedDocCount;
		}
예제 #4
0
			public AnonymousClassFieldSelector(SegmentMerger enclosingInstance)
			{
				InitBlock(enclosingInstance);
			}
예제 #5
0
		/// <summary>Merges the provided indexes into this index.
		/// <p/>After this completes, the index is optimized. <p/>
		/// <p/>The provided IndexReaders are not closed.<p/>
		/// 
		/// <p/><b>NOTE:</b> while this is running, any attempts to
		/// add or delete documents (with another thread) will be
		/// paused until this method completes.
		/// 
		/// <p/>See {@link #AddIndexesNoOptimize(Directory[])} for
		/// details on transactional semantics, temporary free
		/// space required in the Directory, and non-CFS segments
		/// on an Exception.<p/>
		/// 
		/// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
		/// you should immediately close the writer.  See <a
		/// href="#OOME">above</a> for details.<p/>
		/// 
		/// </summary>
		/// <throws>  CorruptIndexException if the index is corrupt </throws>
		/// <throws>  IOException if there is a low-level IO error </throws>
		public virtual void  AddIndexes(IndexReader[] readers)
		{
			
			EnsureOpen();
			
			// Do not allow add docs or deletes while we are running:
			docWriter.PauseAllThreads();
			
			// We must pre-acquire a read lock here (and upgrade to
			// write lock in startTransaction below) so that no
			// other addIndexes is allowed to start up after we have
			// flushed & optimized but before we then start our
			// transaction.  This is because the merging below
			// requires that only one segment is present in the
			// index:
			AcquireRead();
			
			try
			{
				
				SegmentInfo info = null;
				System.String mergedName = null;
				SegmentMerger merger = null;
				
				bool success = false;
				
				try
				{
					Flush(true, false, true);
					Optimize(); // start with zero or 1 seg
					success = true;
				}
				finally
				{
					// Take care to release the read lock if we hit an
					// exception before starting the transaction
					if (!success)
						ReleaseRead();
				}
				
				// true means we already have a read lock; if this
				// call hits an exception it will release the write
				// lock:
				StartTransaction(true);
				
				try
				{
					mergedName = NewSegmentName();
					merger = new SegmentMerger(this, mergedName, null);
					
					SegmentReader sReader = null;
					lock (this)
					{
						if (segmentInfos.Count == 1)
						{
							// add existing index, if any
							sReader = readerPool.Get(segmentInfos.Info(0), true, BufferedIndexInput.BUFFER_SIZE, - 1);
						}
					}
					
					success = false;
					
					try
					{
						if (sReader != null)
							merger.Add(sReader);
						
						for (int i = 0; i < readers.Length; i++)
						// add new indexes
							merger.Add(readers[i]);
						
						int docCount = merger.Merge(); // merge 'em
						
						lock (this)
						{
							segmentInfos.Clear(); // pop old infos & add new
							info = new SegmentInfo(mergedName, docCount, directory, false, true, - 1, null, false, merger.HasProx());
							SetDiagnostics(info, "addIndexes(IndexReader[])");
							segmentInfos.Add(info);
						}
						
						// Notify DocumentsWriter that the flushed count just increased
						docWriter.UpdateFlushedDocCount(docCount);
						
						success = true;
					}
					finally
					{
						if (sReader != null)
						{
							readerPool.Release(sReader);
						}
					}
				}
				finally
				{
					if (!success)
					{
						if (infoStream != null)
							Message("hit exception in addIndexes during merge");
						RollbackTransaction();
					}
					else
					{
						CommitTransaction();
					}
				}
				
				if (mergePolicy is LogMergePolicy && GetUseCompoundFile())
				{
					
					System.Collections.Generic.IList<string> files = null;
					
					lock (this)
					{
						// Must incRef our files so that if another thread
						// is running merge/optimize, it doesn't delete our
						// segment's files before we have a change to
						// finish making the compound file.
						if (segmentInfos.Contains(info))
						{
							files = info.Files();
							deleter.IncRef(files);
						}
					}
					
					if (files != null)
					{
						
						success = false;
						
						StartTransaction(false);
						
						try
						{
							merger.CreateCompoundFile(mergedName + ".cfs");
							lock (this)
							{
								info.SetUseCompoundFile(true);
							}
							
							success = true;
						}
						finally
						{
                            lock (this)
                            {
                                deleter.DecRef(files);
                            }
														
							if (!success)
							{
								if (infoStream != null)
									Message("hit exception building compound file in addIndexes during merge");
								
								RollbackTransaction();
							}
							else
							{
								CommitTransaction();
							}
						}
					}
				}
			}
			catch (System.OutOfMemoryException oom)
			{
				HandleOOM(oom, "addIndexes(IndexReader[])");
			}
			finally
			{
				if (docWriter != null)
				{
					docWriter.ResumeAllThreads();
				}
			}
		}
예제 #6
0
			internal AnonymousClassCheckAbort1(SegmentMerger enclosingInstance, Mono.Lucene.Net.Index.MergePolicy.OneMerge Param1, Mono.Lucene.Net.Store.Directory Param2):base(Param1, Param2)
			{
				InitBlock(enclosingInstance);
			}
예제 #7
0
			private void  InitBlock(SegmentMerger enclosingInstance)
			{
				this.enclosingInstance = enclosingInstance;
			}
예제 #8
0
 public AnonymousClassFieldSelector(SegmentMerger enclosingInstance)
 {
     InitBlock(enclosingInstance);
 }
예제 #9
0
 internal AnonymousClassCheckAbort1(SegmentMerger enclosingInstance, Mono.Lucene.Net.Index.MergePolicy.OneMerge Param1, Mono.Lucene.Net.Store.Directory Param2) : base(Param1, Param2)
 {
     InitBlock(enclosingInstance);
 }
예제 #10
0
 private void  InitBlock(SegmentMerger enclosingInstance)
 {
     this.enclosingInstance = enclosingInstance;
 }