internal DirectoryReader DoOpenIfChanged(SegmentInfos infos)
 {
     return(StandardDirectoryReader.Open(m_directory, infos, GetSequentialSubReaders().OfType <AtomicReader>().ToList(), termInfosIndexDivisor));
 }
        /// <summary>
        /// Used by near real-time search </summary>
        internal static DirectoryReader Open(IndexWriter writer, SegmentInfos infos, bool applyAllDeletes)
        {
            // IndexWriter synchronizes externally before calling
            // us, which ensures infos will not change; so there's
            // no need to process segments in reverse order
            int numSegments = infos.Count;

            IList <SegmentReader> readers = new List <SegmentReader>();
            Directory             dir     = writer.Directory;

            SegmentInfos segmentInfos = (SegmentInfos)infos.Clone();
            int          infosUpto    = 0;
            bool         success      = false;

            try
            {
                for (int i = 0; i < numSegments; i++)
                {
                    // NOTE: important that we use infos not
                    // segmentInfos here, so that we are passing the
                    // actual instance of SegmentInfoPerCommit in
                    // IndexWriter's segmentInfos:
                    SegmentCommitInfo info = infos.Info(i);
                    Debug.Assert(info.Info.Dir == dir);
                    ReadersAndUpdates rld = writer.readerPool.Get(info, true);
                    try
                    {
                        SegmentReader reader = rld.GetReadOnlyClone(IOContext.READ);
                        if (reader.NumDocs > 0 || writer.KeepFullyDeletedSegments)
                        {
                            // Steal the ref:
                            readers.Add(reader);
                            infosUpto++;
                        }
                        else
                        {
                            reader.DecRef();
                            segmentInfos.Remove(infosUpto);
                        }
                    }
                    finally
                    {
                        writer.readerPool.Release(rld);
                    }
                }

                writer.IncRefDeleter(segmentInfos);

                StandardDirectoryReader result = new StandardDirectoryReader(dir, readers.ToArray(), writer, segmentInfos, writer.Config.ReaderTermsIndexDivisor, applyAllDeletes);
                success = true;
                return(result);
            }
            finally
            {
                if (!success)
                {
                    foreach (SegmentReader r in readers)
                    {
                        try
                        {
                            r.DecRef();
                        }
#pragma warning disable 168
                        catch (Exception th)
#pragma warning restore 168
                        {
                            // ignore any exception that is thrown here to not mask any original
                            // exception.
                        }
                    }
                }
            }
        }
Example #3
0
 /// <summary>
 /// Expert: Returns a <see cref="IndexReader"/> reading the index in the given
 /// <see cref="Store.Directory"/> with the given termInfosIndexDivisor. </summary>
 /// <param name="directory"> the index directory </param>
 /// <param name="termInfosIndexDivisor"> Subsamples which indexed
 /// terms are loaded into RAM. this has the same effect as setting
 /// <see cref="LiveIndexWriterConfig.TermIndexInterval"/> (on <see cref="IndexWriterConfig"/>) except that setting
 /// must be done at indexing time while this setting can be
 /// set per reader.  When set to N, then one in every
 /// N*termIndexInterval terms in the index is loaded into
 /// memory.  By setting this to a value &gt; 1 you can reduce
 /// memory usage, at the expense of higher latency when
 /// loading a TermInfo.  The default value is 1.  Set this
 /// to -1 to skip loading the terms index entirely.
 /// <b>NOTE:</b> divisor settings &gt; 1 do not apply to all <see cref="Codecs.PostingsFormat"/>
 /// implementations, including the default one in this release. It only makes
 /// sense for terms indexes that can efficiently re-sample terms at load time. </param>
 /// <exception cref="IOException"> if there is a low-level IO error </exception>
 new public static DirectoryReader Open(Directory directory, int termInfosIndexDivisor)
 {
     return(StandardDirectoryReader.Open(directory, null, termInfosIndexDivisor));
 }
 public FindSegmentsFileAnonymousInnerClassHelper2(StandardDirectoryReader outerInstance, Directory directory)
     : base(directory)
 {
     this.outerInstance = outerInstance;
 }
Example #5
0
 /// <summary>
 /// Returns a <see cref="IndexReader"/> reading the index in the given
 /// <see cref="Store.Directory"/> </summary>
 /// <param name="directory"> the index directory </param>
 /// <exception cref="IOException"> if there is a low-level IO error </exception>
 new public static DirectoryReader Open(Directory directory)
 {
     return(StandardDirectoryReader.Open(directory, null, DEFAULT_TERMS_INDEX_DIVISOR));
 }
Example #6
0
 /// <summary>
 /// Expert: returns an <see cref="IndexReader"/> reading the index in the given
 /// <seealso cref="Index.IndexCommit"/> and <paramref name="termInfosIndexDivisor"/>. </summary>
 /// <param name="commit"> the commit point to open </param>
 /// <param name="termInfosIndexDivisor"> Subsamples which indexed
 /// terms are loaded into RAM. this has the same effect as setting
 /// <see cref="LiveIndexWriterConfig.TermIndexInterval"/> (on <see cref="IndexWriterConfig"/>) except that setting
 /// must be done at indexing time while this setting can be
 /// set per reader.  When set to N, then one in every
 /// N*termIndexInterval terms in the index is loaded into
 /// memory.  By setting this to a value &gt; 1 you can reduce
 /// memory usage, at the expense of higher latency when
 /// loading a TermInfo.  The default value is 1.  Set this
 /// to -1 to skip loading the terms index entirely.
 /// <b>NOTE:</b> divisor settings &gt; 1 do not apply to all <see cref="Codecs.PostingsFormat"/>
 /// implementations, including the default one in this release. It only makes
 /// sense for terms indexes that can efficiently re-sample terms at load time. </param>
 /// <exception cref="IOException"> if there is a low-level IO error </exception>
 new public static DirectoryReader Open(IndexCommit commit, int termInfosIndexDivisor)
 {
     return(StandardDirectoryReader.Open(commit.Directory, commit, termInfosIndexDivisor));
 }
Example #7
0
 /// <summary>
 /// Expert: returns an <see cref="IndexReader"/> reading the index in the given
 /// <see cref="Index.IndexCommit"/>. </summary>
 /// <param name="commit"> the commit point to open </param>
 /// <exception cref="IOException"> if there is a low-level IO error </exception>
 new public static DirectoryReader Open(IndexCommit commit)
 {
     return(StandardDirectoryReader.Open(commit.Directory, commit, DEFAULT_TERMS_INDEX_DIVISOR));
 }
        /// <summary>
        /// Used by near real-time search </summary>
        internal static DirectoryReader Open(IndexWriter writer, SegmentInfos infos, bool applyAllDeletes)
        {
            // IndexWriter synchronizes externally before calling
            // us, which ensures infos will not change; so there's
            // no need to process segments in reverse order
            int numSegments = infos.Size();

            IList<SegmentReader> readers = new List<SegmentReader>();
            Directory dir = writer.Directory;

            SegmentInfos segmentInfos = (SegmentInfos)infos.Clone();
            int infosUpto = 0;
            bool success = false;
            try
            {
                for (int i = 0; i < numSegments; i++)
                {
                    // NOTE: important that we use infos not
                    // segmentInfos here, so that we are passing the
                    // actual instance of SegmentInfoPerCommit in
                    // IndexWriter's segmentInfos:
                    SegmentCommitInfo info = infos.Info(i);
                    Debug.Assert(info.Info.Dir == dir);
                    ReadersAndUpdates rld = writer.readerPool.Get(info, true);
                    try
                    {
                        SegmentReader reader = rld.GetReadOnlyClone(IOContext.READ);
                        if (reader.NumDocs > 0 || writer.KeepFullyDeletedSegments)
                        {
                            // Steal the ref:
                            readers.Add(reader);
                            infosUpto++;
                        }
                        else
                        {
                            reader.DecRef();
                            segmentInfos.Remove(infosUpto);
                        }
                    }
                    finally
                    {
                        writer.readerPool.Release(rld);
                    }
                }

                writer.IncRefDeleter(segmentInfos);

                StandardDirectoryReader result = new StandardDirectoryReader(dir, readers.ToArray(), writer, segmentInfos, writer.Config.ReaderTermsIndexDivisor, applyAllDeletes);
                success = true;
                return result;
            }
            finally
            {
                if (!success)
                {
                    foreach (SegmentReader r in readers)
                    {
                        try
                        {
                            r.DecRef();
                        }
                        catch (Exception th)
                        {
                            // ignore any exception that is thrown here to not mask any original
                            // exception.
                        }
                    }
                }
            }
        }
 public FindSegmentsFileAnonymousInnerClassHelper2(StandardDirectoryReader outerInstance, Directory directory)
     : base(directory)
 {
     this.OuterInstance = outerInstance;
 }