Example #1
0
 internal void  DecRef(SegmentInfos segmentInfos)
 {
     System.Collections.Generic.IEnumerator <string> it = segmentInfos.Files(directory, false).GetEnumerator();
     while (it.MoveNext())
     {
         DecRef(it.Current);
     }
 }
Example #2
0
 internal void  IncRef(SegmentInfos segmentInfos, bool isCommit)
 {
     // If this is a commit point, also incRef the
     // segments_N file:
     System.Collections.Generic.IEnumerator <string> it = segmentInfos.Files(directory, isCommit).GetEnumerator();
     while (it.MoveNext())
     {
         IncRef(it.Current);
     }
 }
Example #3
0
            public CommitPoint(IndexFileDeleter enclosingInstance, System.Collections.ICollection commitsToDelete, Directory directory, SegmentInfos segmentInfos)
            {
                InitBlock(enclosingInstance);
                this.directory       = directory;
                this.commitsToDelete = commitsToDelete;
                userData             = segmentInfos.GetUserData();
                segmentsFileName     = segmentInfos.GetCurrentSegmentFileName();
                version     = segmentInfos.GetVersion();
                generation  = segmentInfos.GetGeneration();
                files       = segmentInfos.Files(directory, true);
                gen         = segmentInfos.GetGeneration();
                isOptimized = segmentInfos.Count == 1 && !segmentInfos.Info(0).HasDeletions();

                System.Diagnostics.Debug.Assert(!segmentInfos.HasExternalSegments(directory));
            }
Example #4
0
			public CommitPoint(IndexFileDeleter enclosingInstance, System.Collections.ICollection commitsToDelete, Directory directory, SegmentInfos segmentInfos)
			{
				InitBlock(enclosingInstance);
				this.directory = directory;
				this.commitsToDelete = commitsToDelete;
				userData = segmentInfos.GetUserData();
				segmentsFileName = segmentInfos.GetCurrentSegmentFileName();
				version = segmentInfos.GetVersion();
				generation = segmentInfos.GetGeneration();
                files = segmentInfos.Files(directory, true);
				gen = segmentInfos.GetGeneration();
				isOptimized = segmentInfos.Count == 1 && !segmentInfos.Info(0).HasDeletions();
				
				System.Diagnostics.Debug.Assert(!segmentInfos.HasExternalSegments(directory));
			}
Example #5
0
		internal void  DecRef(SegmentInfos segmentInfos)
		{
			System.Collections.Generic.IEnumerator<string> it = segmentInfos.Files(directory, false).GetEnumerator();
			while (it.MoveNext())
			{
				DecRef(it.Current);
			}
		}
Example #6
0
		internal void  IncRef(SegmentInfos segmentInfos, bool isCommit)
		{
			// If this is a commit point, also incRef the
			// segments_N file:
			System.Collections.Generic.IEnumerator<string> it = segmentInfos.Files(directory, isCommit).GetEnumerator();
			while (it.MoveNext())
			{
				IncRef(it.Current);
			}
		}
Example #7
0
		/// <summary> For definition of "check point" see IndexWriter comments:
		/// "Clarification: Check Points (and commits)".
		/// 
		/// Writer calls this when it has made a "consistent
		/// change" to the index, meaning new files are written to
		/// the index and the in-memory SegmentInfos have been
		/// modified to point to those files.
		/// 
		/// This may or may not be a commit (segments_N may or may
		/// not have been written).
		/// 
		/// We simply incref the files referenced by the new
		/// SegmentInfos and decref the files we had previously
		/// seen (if any).
		/// 
		/// If this is a commit, we also call the policy to give it
		/// a chance to remove other commits.  If any commits are
		/// removed, we decref their files as well.
		/// </summary>
		public void  Checkpoint(SegmentInfos segmentInfos, bool isCommit)
		{
			
			if (infoStream != null)
			{
				Message("now checkpoint \"" + segmentInfos.GetCurrentSegmentFileName() + "\" [" + segmentInfos.Count + " segments " + "; isCommit = " + isCommit + "]");
			}
			
			// Try again now to delete any previously un-deletable
			// files (because they were in use, on Windows):
			DeletePendingFiles();
			
			// Incref the files:
			IncRef(segmentInfos, isCommit);
			
			if (isCommit)
			{
				// Append to our commits list:
				commits.Add(new CommitPoint(this, commitsToDelete, directory, segmentInfos));
				
				// Tell policy so it can remove commits:
				policy.OnCommit(commits);
				
				// Decref files for commits that were deleted by the policy:
				DeleteCommits();
			}
			else
			{
				
				System.Collections.Generic.IList<string> docWriterFiles;
				if (docWriter != null)
				{
					docWriterFiles = docWriter.OpenFiles();
					if (docWriterFiles != null)
					// We must incRef these files before decRef'ing
					// last files to make sure we don't accidentally
					// delete them:
						IncRef(docWriterFiles);
				}
				else
					docWriterFiles = null;
				
				// DecRef old files from the last checkpoint, if any:
				int size = lastFiles.Count;
				if (size > 0)
				{
					for (int i = 0; i < size; i++)
						DecRef(lastFiles[i]);
					lastFiles.Clear();
				}
				
				// Save files so we can decr on next checkpoint/commit:
                foreach (string fname in segmentInfos.Files(directory, false))
                {
                    lastFiles.Add(fname);
                }
				
                if (docWriterFiles != null)
                {
                    foreach (string fname in docWriterFiles)
                    {
                        lastFiles.Add(fname);
                    }
                }
			}
		}
Example #8
0
		/// <summary>This constructor is only used for {@link #Reopen()} </summary>
		internal DirectoryReader(Directory directory, SegmentInfos infos, SegmentReader[] oldReaders, int[] oldStarts, System.Collections.IDictionary oldNormsCache, bool readOnly, bool doClone, int termInfosIndexDivisor)
		{
			this.directory = directory;
			this.readOnly = readOnly;
			this.segmentInfos = infos;
			this.termInfosIndexDivisor = termInfosIndexDivisor;
			if (!readOnly)
			{
				// We assume that this segments_N was previously
				// properly sync'd:
				SupportClass.CollectionsHelper.AddAllIfNotContains(synced, infos.Files(directory, true));
			}
			
			// we put the old SegmentReaders in a map, that allows us
			// to lookup a reader using its segment name
			System.Collections.IDictionary segmentReaders = new System.Collections.Hashtable();
			
			if (oldReaders != null)
			{
				// create a Map SegmentName->SegmentReader
				for (int i = 0; i < oldReaders.Length; i++)
				{
					segmentReaders[oldReaders[i].GetSegmentName()] = (System.Int32) i;
				}
			}
			
			SegmentReader[] newReaders = new SegmentReader[infos.Count];
			
			// remember which readers are shared between the old and the re-opened
			// DirectoryReader - we have to incRef those readers
			bool[] readerShared = new bool[infos.Count];
			
			for (int i = infos.Count - 1; i >= 0; i--)
			{
				// find SegmentReader for this segment
                int? oldReaderIndex = (int?)segmentReaders[infos.Info(i).name];
                if (oldReaderIndex.HasValue == false)
                {
                    // this is a new segment, no old SegmentReader can be reused
                    newReaders[i] = null;
                }
                else
                {
                    // there is an old reader for this segment - we'll try to reopen it
                    newReaders[i] = oldReaders[oldReaderIndex.Value];
                }
				
				bool success = false;
				try
				{
					SegmentReader newReader;
					if (newReaders[i] == null || infos.Info(i).GetUseCompoundFile() != newReaders[i].GetSegmentInfo().GetUseCompoundFile())
					{
						
						// We should never see a totally new segment during cloning
						System.Diagnostics.Debug.Assert(!doClone);
						
						// this is a new reader; in case we hit an exception we can close it safely
						newReader = SegmentReader.Get(readOnly, infos.Info(i), termInfosIndexDivisor);
					}
					else
					{
						newReader = newReaders[i].ReopenSegment(infos.Info(i), doClone, readOnly);
					}
					if (newReader == newReaders[i])
					{
						// this reader will be shared between the old and the new one,
						// so we must incRef it
						readerShared[i] = true;
						newReader.IncRef();
					}
					else
					{
						readerShared[i] = false;
						newReaders[i] = newReader;
					}
					success = true;
				}
				finally
				{
					if (!success)
					{
						for (i++; i < infos.Count; i++)
						{
							if (newReaders[i] != null)
							{
								try
								{
									if (!readerShared[i])
									{
										// this is a new subReader that is not used by the old one,
										// we can close it
										newReaders[i].Close();
									}
									else
									{
										// this subReader is also used by the old reader, so instead
										// closing we must decRef it
										newReaders[i].DecRef();
									}
								}
								catch (System.IO.IOException ignore)
								{
									// keep going - we want to clean up as much as possible
								}
							}
						}
					}
				}
			}
			
			// initialize the readers to calculate maxDoc before we try to reuse the old normsCache
			Initialize(newReaders);
			
			// try to copy unchanged norms from the old normsCache to the new one
			if (oldNormsCache != null)
			{
				System.Collections.IEnumerator it = new System.Collections.Hashtable(oldNormsCache).GetEnumerator();
				while (it.MoveNext())
				{
					System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
					System.String field = (System.String) entry.Key;
					if (!HasNorms(field))
					{
						continue;
					}
					
					byte[] oldBytes = (byte[]) entry.Value;
					
					byte[] bytes = new byte[MaxDoc()];
					
					for (int i = 0; i < subReaders.Length; i++)
					{
                        int? oldReaderIndex = (int?)segmentReaders[subReaders[i].GetSegmentName()];

                        // this SegmentReader was not re-opened, we can copy all of its norms 
                        if (oldReaderIndex.HasValue &&
                             (oldReaders[oldReaderIndex.Value] == subReaders[i]
                               || oldReaders[oldReaderIndex.Value].norms[field] == subReaders[i].norms[field]))
                        {
                            // we don't have to synchronize here: either this constructor is called from a SegmentReader,
                            // in which case no old norms cache is present, or it is called from MultiReader.reopen(),
                            // which is synchronized
                            Array.Copy(oldBytes, oldStarts[oldReaderIndex.Value], bytes, starts[i], starts[i + 1] - starts[i]);
                        }
                        else
                        {
                            subReaders[i].Norms(field, bytes, starts[i]);
                        }
					}
					
					normsCache[field] = bytes; // update cache
				}
			}
		}
Example #9
0
		// Used by near real-time search
		internal DirectoryReader(IndexWriter writer, SegmentInfos infos, int termInfosIndexDivisor)
		{
			this.directory = writer.GetDirectory();
			this.readOnly = true;
			segmentInfos = infos;
			segmentInfosStart = (SegmentInfos) infos.Clone();
			this.termInfosIndexDivisor = termInfosIndexDivisor;
			if (!readOnly)
			{
				// We assume that this segments_N was previously
				// properly sync'd:
				SupportClass.CollectionsHelper.AddAllIfNotContains(synced, infos.Files(directory, true));
			}
			
			// IndexWriter synchronizes externally before calling
			// us, which ensures infos will not change; so there's
			// no need to process segments in reverse order
			int numSegments = infos.Count;
			SegmentReader[] readers = new SegmentReader[numSegments];
			Directory dir = writer.GetDirectory();
			int upto = 0;
			
			for (int i = 0; i < numSegments; i++)
			{
				bool success = false;
				try
				{
					SegmentInfo info = infos.Info(i);
					if (info.dir == dir)
					{
						readers[upto++] = writer.readerPool.GetReadOnlyClone(info, true, termInfosIndexDivisor);
					}
					success = true;
				}
				finally
				{
					if (!success)
					{
						// Close all readers we had opened:
						for (upto--; upto >= 0; upto--)
						{
							try
							{
								readers[upto].Close();
							}
							catch (System.Exception ignore)
							{
								// keep going - we want to clean up as much as possible
							}
						}
					}
				}
			}
			
			this.writer = writer;
			
			if (upto < readers.Length)
			{
				// This means some segments were in a foreign Directory
				SegmentReader[] newReaders = new SegmentReader[upto];
				Array.Copy(readers, 0, newReaders, 0, upto);
				readers = newReaders;
			}
			
			Initialize(readers);
		}
Example #10
0
		/// <summary>Construct reading the named set of readers. </summary>
		internal DirectoryReader(Directory directory, SegmentInfos sis, IndexDeletionPolicy deletionPolicy, bool readOnly, int termInfosIndexDivisor)
		{
			this.directory = directory;
			this.readOnly = readOnly;
			this.segmentInfos = sis;
			this.deletionPolicy = deletionPolicy;
			this.termInfosIndexDivisor = termInfosIndexDivisor;
			
			if (!readOnly)
			{
				// We assume that this segments_N was previously
				// properly sync'd:
				SupportClass.CollectionsHelper.AddAllIfNotContains(synced, sis.Files(directory, true));
			}
			
			// To reduce the chance of hitting FileNotFound
			// (and having to retry), we open segments in
			// reverse because IndexWriter merges & deletes
			// the newest segments first.
			
			SegmentReader[] readers = new SegmentReader[sis.Count];
			for (int i = sis.Count - 1; i >= 0; i--)
			{
				bool success = false;
				try
				{
					readers[i] = SegmentReader.Get(readOnly, sis.Info(i), termInfosIndexDivisor);
					success = true;
				}
				finally
				{
					if (!success)
					{
						// Close all readers we had opened:
						for (i++; i < sis.Count; i++)
						{
							try
							{
								readers[i].Close();
							}
							catch (System.Exception ignore)
							{
								// keep going - we want to clean up as much as possible
							}
						}
					}
				}
			}
			
			Initialize(readers);
		}
Example #11
0
			internal ReaderCommit(SegmentInfos infos, Directory dir)
			{
				segmentsFileName = infos.GetCurrentSegmentFileName();
				this.dir = dir;
				userData = infos.GetUserData();
                files = infos.Files(dir, true);
				version = infos.GetVersion();
				generation = infos.GetGeneration();
				isOptimized = infos.Count == 1 && !infos.Info(0).HasDeletions();
			}
Example #12
0
        /// <summary> For definition of "check point" see IndexWriter comments:
        /// "Clarification: Check Points (and commits)".
        ///
        /// Writer calls this when it has made a "consistent
        /// change" to the index, meaning new files are written to
        /// the index and the in-memory SegmentInfos have been
        /// modified to point to those files.
        ///
        /// This may or may not be a commit (segments_N may or may
        /// not have been written).
        ///
        /// We simply incref the files referenced by the new
        /// SegmentInfos and decref the files we had previously
        /// seen (if any).
        ///
        /// If this is a commit, we also call the policy to give it
        /// a chance to remove other commits.  If any commits are
        /// removed, we decref their files as well.
        /// </summary>
        public void  Checkpoint(SegmentInfos segmentInfos, bool isCommit)
        {
            if (infoStream != null)
            {
                Message("now checkpoint \"" + segmentInfos.GetCurrentSegmentFileName() + "\" [" + segmentInfos.Count + " segments " + "; isCommit = " + isCommit + "]");
            }

            // Try again now to delete any previously un-deletable
            // files (because they were in use, on Windows):
            DeletePendingFiles();

            // Incref the files:
            IncRef(segmentInfos, isCommit);

            if (isCommit)
            {
                // Append to our commits list:
                commits.Add(new CommitPoint(this, commitsToDelete, directory, segmentInfos));

                // Tell policy so it can remove commits:
                policy.OnCommit(commits);

                // Decref files for commits that were deleted by the policy:
                DeleteCommits();
            }
            else
            {
                System.Collections.Generic.IList <string> docWriterFiles;
                if (docWriter != null)
                {
                    docWriterFiles = docWriter.OpenFiles();
                    if (docWriterFiles != null)
                    {
                        // We must incRef these files before decRef'ing
                        // last files to make sure we don't accidentally
                        // delete them:
                        IncRef(docWriterFiles);
                    }
                }
                else
                {
                    docWriterFiles = null;
                }

                // DecRef old files from the last checkpoint, if any:
                int size = lastFiles.Count;
                if (size > 0)
                {
                    for (int i = 0; i < size; i++)
                    {
                        DecRef(lastFiles[i]);
                    }
                    lastFiles.Clear();
                }

                // Save files so we can decr on next checkpoint/commit:
                foreach (string fname in segmentInfos.Files(directory, false))
                {
                    lastFiles.Add(fname);
                }

                if (docWriterFiles != null)
                {
                    foreach (string fname in docWriterFiles)
                    {
                        lastFiles.Add(fname);
                    }
                }
            }
        }