示例#1
0
 /// <summary>
 /// Creates a new config that that handles the live <see cref="IndexWriter"/>
 /// settings.
 /// </summary>
 internal LiveIndexWriterConfig(IndexWriterConfig config)
 {
     maxBufferedDeleteTerms  = config.MaxBufferedDeleteTerms;
     maxBufferedDocs         = config.MaxBufferedDocs;
     mergedSegmentWarmer     = config.MergedSegmentWarmer;
     ramBufferSizeMB         = config.RAMBufferSizeMB;
     readerTermsIndexDivisor = config.ReaderTermsIndexDivisor;
     termIndexInterval       = config.TermIndexInterval;
     matchVersion            = config.matchVersion;
     analyzer              = config.Analyzer;
     delPolicy             = config.IndexDeletionPolicy;
     commit                = config.IndexCommit;
     openMode              = config.OpenMode;
     similarity            = config.Similarity;
     mergeScheduler        = config.MergeScheduler;
     writeLockTimeout      = config.WriteLockTimeout;
     indexingChain         = config.IndexingChain;
     codec                 = config.Codec;
     infoStream            = config.InfoStream;
     mergePolicy           = config.MergePolicy;
     indexerThreadPool     = config.IndexerThreadPool;
     readerPooling         = config.UseReaderPooling;
     flushPolicy           = config.FlushPolicy;
     perThreadHardLimitMB  = config.RAMPerThreadHardLimitMB;
     useCompoundFile       = config.UseCompoundFile;
     checkIntegrityAtMerge = config.CheckIntegrityAtMerge;
 }
示例#2
0
        // used by IndexWriterConfig
        internal LiveIndexWriterConfig(Analyzer analyzer, LuceneVersion matchVersion)
        {
            this.analyzer           = analyzer;
            this.matchVersion       = matchVersion;
            ramBufferSizeMB         = IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB;
            maxBufferedDocs         = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
            maxBufferedDeleteTerms  = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DELETE_TERMS;
            readerTermsIndexDivisor = IndexWriterConfig.DEFAULT_READER_TERMS_INDEX_DIVISOR;
            mergedSegmentWarmer     = null;
            termIndexInterval       = IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL; // TODO: this should be private to the codec, not settable here
            delPolicy       = new KeepOnlyLastCommitDeletionPolicy();
            commit          = null;
            useCompoundFile = IndexWriterConfig.DEFAULT_USE_COMPOUND_FILE_SYSTEM;
            openMode        = Index.OpenMode.CREATE_OR_APPEND;
            similarity      = IndexSearcher.DefaultSimilarity;
#if !FEATURE_CONCURRENTMERGESCHEDULER
            mergeScheduler = new TaskMergeScheduler();
#else
            mergeScheduler = new ConcurrentMergeScheduler();
#endif
            writeLockTimeout = IndexWriterConfig.WRITE_LOCK_TIMEOUT;
            indexingChain    = DocumentsWriterPerThread.DefaultIndexingChain;
            codec            = Codec.Default;
            if (codec == null)
            {
                throw new NullReferenceException();
            }
            infoStream           = Util.InfoStream.Default;
            mergePolicy          = new TieredMergePolicy();
            flushPolicy          = new FlushByRamOrCountsPolicy();
            readerPooling        = IndexWriterConfig.DEFAULT_READER_POOLING;
            indexerThreadPool    = new DocumentsWriterPerThreadPool(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES);
            perThreadHardLimitMB = IndexWriterConfig.DEFAULT_RAM_PER_THREAD_HARD_LIMIT_MB;
        }
示例#3
0
        public virtual void TestClone()
        {
            IndexWriterConfig conf  = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
            IndexWriterConfig clone = (IndexWriterConfig)conf.Clone();

            // Make sure parameters that can't be reused are cloned
            IndexDeletionPolicy delPolicy      = conf.DelPolicy;
            IndexDeletionPolicy delPolicyClone = clone.DelPolicy;

            Assert.IsTrue(delPolicy.GetType() == delPolicyClone.GetType() && (delPolicy != delPolicyClone || delPolicy.Clone() == delPolicyClone.Clone()));

            FlushPolicy flushPolicy      = conf.FlushPolicy;
            FlushPolicy flushPolicyClone = clone.FlushPolicy;

            Assert.IsTrue(flushPolicy.GetType() == flushPolicyClone.GetType() && (flushPolicy != flushPolicyClone || flushPolicy.Clone() == flushPolicyClone.Clone()));

            DocumentsWriterPerThreadPool pool      = conf.IndexerThreadPool;
            DocumentsWriterPerThreadPool poolClone = clone.IndexerThreadPool;

            Assert.IsTrue(pool.GetType() == poolClone.GetType() && (pool != poolClone || pool.Clone() == poolClone.Clone()));

            MergePolicy mergePolicy      = conf.MergePolicy;
            MergePolicy mergePolicyClone = clone.MergePolicy;

            Assert.IsTrue(mergePolicy.GetType() == mergePolicyClone.GetType() && (mergePolicy != mergePolicyClone || mergePolicy.Clone() == mergePolicyClone.Clone()));

            IMergeScheduler mergeSched      = conf.MergeScheduler;
            IMergeScheduler mergeSchedClone = clone.MergeScheduler;

            Assert.IsTrue(mergeSched.GetType() == mergeSchedClone.GetType() && (mergeSched != mergeSchedClone || mergeSched.Clone() == mergeSchedClone.Clone()));

            conf.SetMergeScheduler(new SerialMergeScheduler());
            Assert.AreEqual(typeof(ConcurrentMergeScheduler), clone.MergeScheduler.GetType());
        }
示例#4
0
 /// <summary>
 /// Expert: sets the merge scheduler used by this writer. The default is
 /// <seealso cref="ConcurrentMergeScheduler"/>.
 /// <p>
 /// <b>NOTE:</b> the merge scheduler cannot be null.
 ///
 /// <p>Only takes effect when IndexWriter is first created.
 /// </summary>
 public IndexWriterConfig SetMergeScheduler(IMergeScheduler mergeScheduler)
 {
     if (mergeScheduler == null)
     {
         throw new System.ArgumentException("mergeScheduler must not be null");
     }
     this.mergeScheduler = mergeScheduler;
     return(this);
 }
示例#5
0
        /// <summary>
        /// Just tries to configure things to keep the open file
        /// count lowish.
        /// </summary>
        public static void ReduceOpenFiles(IndexWriter w)
        {
            // keep number of open files lowish
            MergePolicy mp = w.Config.MergePolicy;

            if (mp is LogMergePolicy lmp)
            {
                lmp.MergeFactor = Math.Min(5, lmp.MergeFactor);
                lmp.NoCFSRatio  = 1.0;
            }
            else if (mp is TieredMergePolicy tmp)
            {
                tmp.MaxMergeAtOnce  = Math.Min(5, tmp.MaxMergeAtOnce);
                tmp.SegmentsPerTier = Math.Min(5, tmp.SegmentsPerTier);
                tmp.NoCFSRatio      = 1.0;
            }
            IMergeScheduler ms = w.Config.MergeScheduler;

            if (ms is IConcurrentMergeScheduler concurrentMergeScheduler)
            {
                // wtf... shouldnt it be even lower since its 1 by default?!?!
                concurrentMergeScheduler.SetMaxMergesAndThreads(3, 2);
            }
        }
示例#6
0
 /// <summary>
 /// Builder method for <see cref="IndexWriterConfig.MergeScheduler"/>.
 /// </summary>
 /// <param name="config">this <see cref="IndexWriterConfig"/> instance</param>
 /// <param name="mergeScheduler"></param>
 /// <returns>this <see cref="IndexWriterConfig"/> instance</returns>
 public static IndexWriterConfig SetMergeScheduler(this IndexWriterConfig config, IMergeScheduler mergeScheduler)
 {
     config.MergeScheduler = mergeScheduler;
     return(config);
 }
        public virtual void TestAddDocumentOnDiskFull()
        {
            for (int pass = 0; pass < 2; pass++)
            {
                if (VERBOSE)
                {
                    Console.WriteLine("TEST: pass="******"TEST: cycle: diskFree=" + diskFree);
                    }
                    MockDirectoryWrapper dir = new MockDirectoryWrapper(Random(), new RAMDirectory());
                    dir.MaxSizeInBytes = diskFree;
                    IndexWriter     writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
                    IMergeScheduler ms     = writer.Config.MergeScheduler;
                    if (ms is IConcurrentMergeScheduler)
                    {
                        // this test intentionally produces exceptions
                        // in the threads that CMS launches; we don't
                        // want to pollute test output with these.
                        ((IConcurrentMergeScheduler)ms).SetSuppressExceptions();
                    }

                    bool hitError = false;
                    try
                    {
                        for (int i = 0; i < 200; i++)
                        {
                            AddDoc(writer);
                        }
                        if (VERBOSE)
                        {
                            Console.WriteLine("TEST: done adding docs; now commit");
                        }
                        writer.Commit();
                    }
                    catch (IOException e)
                    {
                        if (VERBOSE)
                        {
                            Console.WriteLine("TEST: exception on addDoc");
                            Console.WriteLine(e.StackTrace);
                        }
                        hitError = true;
                    }

                    if (hitError)
                    {
                        if (doAbort)
                        {
                            if (VERBOSE)
                            {
                                Console.WriteLine("TEST: now rollback");
                            }
                            writer.Rollback();
                        }
                        else
                        {
                            try
                            {
                                if (VERBOSE)
                                {
                                    Console.WriteLine("TEST: now close");
                                }
                                writer.Dispose();
                            }
                            catch (IOException e)
                            {
                                if (VERBOSE)
                                {
                                    Console.WriteLine("TEST: exception on close; retry w/ no disk space limit");
                                    Console.WriteLine(e.StackTrace);
                                }
                                dir.MaxSizeInBytes = 0;
                                writer.Dispose();
                            }
                        }

                        //TestUtil.SyncConcurrentMerges(ms);

                        if (TestUtil.AnyFilesExceptWriteLock(dir))
                        {
                            TestIndexWriter.AssertNoUnreferencedFiles(dir, "after disk full during addDocument");

                            // Make sure reader can open the index:
                            DirectoryReader.Open(dir).Dispose();
                        }

                        dir.Dispose();
                        // Now try again w/ more space:

                        diskFree += TEST_NIGHTLY ? TestUtil.NextInt(Random(), 400, 600) : TestUtil.NextInt(Random(), 3000, 5000);
                    }
                    else
                    {
                        //TestUtil.SyncConcurrentMerges(writer);
                        dir.MaxSizeInBytes = 0;
                        writer.Dispose();
                        dir.Dispose();
                        break;
                    }
                }
            }
        }
        public virtual void TestAddIndexOnDiskFull()
        {
            // MemoryCodec, since it uses FST, is not necessarily
            // "additive", ie if you add up N small FSTs, then merge
            // them, the merged result can easily be larger than the
            // sum because the merged FST may use array encoding for
            // some arcs (which uses more space):

            string idFormat      = TestUtil.GetPostingsFormat("id");
            string contentFormat = TestUtil.GetPostingsFormat("content");

            AssumeFalse("this test cannot run with Memory codec", idFormat.Equals("Memory") || contentFormat.Equals("Memory"));

            int START_COUNT = 57;
            int NUM_DIR     = TEST_NIGHTLY ? 50 : 5;
            int END_COUNT   = START_COUNT + NUM_DIR * (TEST_NIGHTLY ? 25 : 5);

            // Build up a bunch of dirs that have indexes which we
            // will then merge together by calling addIndexes(*):
            Directory[] dirs           = new Directory[NUM_DIR];
            long        inputDiskUsage = 0;

            for (int i = 0; i < NUM_DIR; i++)
            {
                dirs[i] = NewDirectory();
                IndexWriter writer = new IndexWriter(dirs[i], NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
                for (int j = 0; j < 25; j++)
                {
                    AddDocWithIndex(writer, 25 * i + j);
                }
                writer.Dispose();
                string[] files = dirs[i].ListAll();
                for (int j = 0; j < files.Length; j++)
                {
                    inputDiskUsage += dirs[i].FileLength(files[j]);
                }
            }

            // Now, build a starting index that has START_COUNT docs.  We
            // will then try to addIndexes into a copy of this:
            MockDirectoryWrapper startDir  = NewMockDirectory();
            IndexWriter          indWriter = new IndexWriter(startDir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));

            for (int j = 0; j < START_COUNT; j++)
            {
                AddDocWithIndex(indWriter, j);
            }
            indWriter.Dispose();

            // Make sure starting index seems to be working properly:
            Term        searchTerm = new Term("content", "aaa");
            IndexReader reader     = DirectoryReader.Open(startDir);

            Assert.AreEqual(57, reader.DocFreq(searchTerm), "first docFreq");

            IndexSearcher searcher = NewSearcher(reader);

            ScoreDoc[] hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs;
            Assert.AreEqual(57, hits.Length, "first number of hits");
            reader.Dispose();

            // Iterate with larger and larger amounts of free
            // disk space.  With little free disk space,
            // addIndexes will certainly run out of space &
            // fail.  Verify that when this happens, index is
            // not corrupt and index in fact has added no
            // documents.  Then, we increase disk space by 2000
            // bytes each iteration.  At some point there is
            // enough free disk space and addIndexes should
            // succeed and index should show all documents were
            // added.

            // String[] files = startDir.ListAll();
            long diskUsage = startDir.GetSizeInBytes();

            long startDiskUsage = 0;

            string[] files_ = startDir.ListAll();
            for (int i = 0; i < files_.Length; i++)
            {
                startDiskUsage += startDir.FileLength(files_[i]);
            }

            for (int iter = 0; iter < 3; iter++)
            {
                if (VERBOSE)
                {
                    Console.WriteLine("TEST: iter=" + iter);
                }

                // Start with 100 bytes more than we are currently using:
                long diskFree = diskUsage + TestUtil.NextInt(Random(), 50, 200);

                int method = iter;

                bool success = false;
                bool done    = false;

                string methodName;
                if (0 == method)
                {
                    methodName = "addIndexes(Directory[]) + forceMerge(1)";
                }
                else if (1 == method)
                {
                    methodName = "addIndexes(IndexReader[])";
                }
                else
                {
                    methodName = "addIndexes(Directory[])";
                }

                while (!done)
                {
                    if (VERBOSE)
                    {
                        Console.WriteLine("TEST: cycle...");
                    }

                    // Make a new dir that will enforce disk usage:
                    MockDirectoryWrapper dir = new MockDirectoryWrapper(Random(), new RAMDirectory(startDir, NewIOContext(Random())));
                    indWriter = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND).SetMergePolicy(NewLogMergePolicy(false)));
                    IOException err = null;

                    IMergeScheduler ms = indWriter.Config.MergeScheduler;
                    for (int x = 0; x < 2; x++)
                    {
                        if (ms is IConcurrentMergeScheduler)
                        // this test intentionally produces exceptions
                        // in the threads that CMS launches; we don't
                        // want to pollute test output with these.
                        {
                            if (0 == x)
                            {
                                ((IConcurrentMergeScheduler)ms).SetSuppressExceptions();
                            }
                            else
                            {
                                ((IConcurrentMergeScheduler)ms).ClearSuppressExceptions();
                            }
                        }

                        // Two loops: first time, limit disk space &
                        // throw random IOExceptions; second time, no
                        // disk space limit:

                        double rate      = 0.05;
                        double diskRatio = ((double)diskFree) / diskUsage;
                        long   thisDiskFree;

                        string testName = null;

                        if (0 == x)
                        {
                            dir.RandomIOExceptionRateOnOpen = Random().NextDouble() * 0.01;
                            thisDiskFree = diskFree;
                            if (diskRatio >= 2.0)
                            {
                                rate /= 2;
                            }
                            if (diskRatio >= 4.0)
                            {
                                rate /= 2;
                            }
                            if (diskRatio >= 6.0)
                            {
                                rate = 0.0;
                            }
                            if (VERBOSE)
                            {
                                testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes";
                            }
                        }
                        else
                        {
                            dir.RandomIOExceptionRateOnOpen = 0.0;
                            thisDiskFree = 0;
                            rate         = 0.0;
                            if (VERBOSE)
                            {
                                testName = "disk full test " + methodName + " with unlimited disk space";
                            }
                        }

                        if (VERBOSE)
                        {
                            Console.WriteLine("\ncycle: " + testName);
                        }

                        dir.TrackDiskUsage        = true;
                        dir.MaxSizeInBytes        = thisDiskFree;
                        dir.RandomIOExceptionRate = rate;

                        try
                        {
                            if (0 == method)
                            {
                                if (VERBOSE)
                                {
                                    Console.WriteLine("TEST: now addIndexes count=" + dirs.Length);
                                }
                                indWriter.AddIndexes(dirs);
                                if (VERBOSE)
                                {
                                    Console.WriteLine("TEST: now forceMerge");
                                }
                                indWriter.ForceMerge(1);
                            }
                            else if (1 == method)
                            {
                                IndexReader[] readers = new IndexReader[dirs.Length];
                                for (int i = 0; i < dirs.Length; i++)
                                {
                                    readers[i] = DirectoryReader.Open(dirs[i]);
                                }
                                try
                                {
                                    indWriter.AddIndexes(readers);
                                }
                                finally
                                {
                                    for (int i = 0; i < dirs.Length; i++)
                                    {
                                        readers[i].Dispose();
                                    }
                                }
                            }
                            else
                            {
                                indWriter.AddIndexes(dirs);
                            }

                            success = true;
                            if (VERBOSE)
                            {
                                Console.WriteLine("  success!");
                            }

                            if (0 == x)
                            {
                                done = true;
                            }
                        }
                        catch (IOException e)
                        {
                            success = false;
                            err     = e;
                            if (VERBOSE)
                            {
                                Console.WriteLine("  hit IOException: " + e);
                                Console.WriteLine(e.StackTrace);
                            }

                            if (1 == x)
                            {
                                Console.WriteLine(e.StackTrace);
                                Assert.Fail(methodName + " hit IOException after disk space was freed up");
                            }
                        }

                        // Make sure all threads from
                        // ConcurrentMergeScheduler are done
                        TestUtil.SyncConcurrentMerges(indWriter);

                        if (VERBOSE)
                        {
                            Console.WriteLine("  now test readers");
                        }

                        // Finally, verify index is not corrupt, and, if
                        // we succeeded, we see all docs added, and if we
                        // failed, we see either all docs or no docs added
                        // (transactional semantics):
                        dir.RandomIOExceptionRateOnOpen = 0.0;
                        try
                        {
                            reader = DirectoryReader.Open(dir);
                        }
                        catch (IOException e)
                        {
                            Console.WriteLine(e.StackTrace);
                            Assert.Fail(testName + ": exception when creating IndexReader: " + e);
                        }
                        int result = reader.DocFreq(searchTerm);
                        if (success)
                        {
                            if (result != START_COUNT)
                            {
                                Assert.Fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT);
                            }
                        }
                        else
                        {
                            // On hitting exception we still may have added
                            // all docs:
                            if (result != START_COUNT && result != END_COUNT)
                            {
                                Console.WriteLine(err.StackTrace);
                                Assert.Fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
                            }
                        }

                        searcher = NewSearcher(reader);
                        try
                        {
                            hits = searcher.Search(new TermQuery(searchTerm), null, END_COUNT).ScoreDocs;
                        }
                        catch (IOException e)
                        {
                            Console.WriteLine(e.StackTrace);
                            Assert.Fail(testName + ": exception when searching: " + e);
                        }
                        int result2 = hits.Length;
                        if (success)
                        {
                            if (result2 != result)
                            {
                                Assert.Fail(testName + ": method did not throw exception but hits.Length for search on term 'aaa' is " + result2 + " instead of expected " + result);
                            }
                        }
                        else
                        {
                            // On hitting exception we still may have added
                            // all docs:
                            if (result2 != result)
                            {
                                Console.WriteLine(err.StackTrace);
                                Assert.Fail(testName + ": method did throw exception but hits.Length for search on term 'aaa' is " + result2 + " instead of expected " + result);
                            }
                        }

                        reader.Dispose();
                        if (VERBOSE)
                        {
                            Console.WriteLine("  count is " + result);
                        }

                        if (done || result == END_COUNT)
                        {
                            break;
                        }
                    }

                    if (VERBOSE)
                    {
                        Console.WriteLine("  start disk = " + startDiskUsage + "; input disk = " + inputDiskUsage + "; max used = " + dir.MaxUsedSizeInBytes);
                    }

                    if (done)
                    {
                        // Javadocs state that temp free Directory space
                        // required is at most 2X total input size of
                        // indices so let's make sure:
                        Assert.IsTrue((dir.MaxUsedSizeInBytes - startDiskUsage) < 2 * (startDiskUsage + inputDiskUsage), "max free Directory space required exceeded 1X the total input index sizes during " + methodName + ": max temp usage = " + (dir.MaxUsedSizeInBytes - startDiskUsage) + " bytes vs limit=" + (2 * (startDiskUsage + inputDiskUsage)) + "; starting disk usage = " + startDiskUsage + " bytes; " + "input index disk usage = " + inputDiskUsage + " bytes");
                    }

                    // Make sure we don't hit disk full during close below:
                    dir.MaxSizeInBytes              = 0;
                    dir.RandomIOExceptionRate       = 0.0;
                    dir.RandomIOExceptionRateOnOpen = 0.0;

                    indWriter.Dispose();

                    // Wait for all BG threads to finish else
                    // dir.Dispose() will throw IOException because
                    // there are still open files
                    TestUtil.SyncConcurrentMerges(ms);

                    dir.Dispose();

                    // Try again with more free space:
                    diskFree += TEST_NIGHTLY ? TestUtil.NextInt(Random(), 4000, 8000) : TestUtil.NextInt(Random(), 40000, 80000);
                }
            }

            startDir.Dispose();
            foreach (Directory dir in dirs)
            {
                dir.Dispose();
            }
        }
示例#9
0
 /// <summary>
 /// Expert: sets the merge scheduler used by this writer. The default is
 /// <seealso cref="ConcurrentMergeScheduler"/>.
 /// <p>
 /// <b>NOTE:</b> the merge scheduler cannot be null.
 ///
 /// <p>Only takes effect when IndexWriter is first created.
 /// </summary>
 public IndexWriterConfig SetMergeScheduler(IMergeScheduler mergeScheduler)
 {
     if (mergeScheduler == null)
     {
         throw new System.ArgumentException("mergeScheduler must not be null");
     }
     this.mergeScheduler = mergeScheduler;
     return this;
 }
示例#10
0
        /// <summary>
        /// Constructs a new IndexWriter per the settings given in <code>conf</code>.
        /// If you want to make "live" changes to this writer instance, use
        /// <seealso cref="#getConfig()"/>.
        ///
        /// <p>
        /// <b>NOTE:</b> after ths writer is created, the given configuration instance
        /// cannot be passed to another writer. If you intend to do so, you should
        /// <seealso cref="IndexWriterConfig#clone() clone"/> it beforehand.
        /// </summary>
        /// <param name="d">
        ///          the index directory. The index is either created or appended
        ///          according <code>conf.getOpenMode()</code>. </param>
        /// <param name="conf">
        ///          the configuration settings according to which IndexWriter should
        ///          be initialized. </param>
        /// <exception cref="IOException">
        ///           if the directory cannot be read/written to, or if it does not
        ///           exist and <code>conf.getOpenMode()</code> is
        ///           <code>OpenMode.APPEND</code> or if there is any other low-level
        ///           IO error </exception>
        public IndexWriter(Directory d, IndexWriterConfig conf)
        {
            /*if (!InstanceFieldsInitialized)
            {
                InitializeInstanceFields();
                InstanceFieldsInitialized = true;
            }*/
            readerPool = new ReaderPool(this);
            conf.SetIndexWriter(this); // prevent reuse by other instances
            Config_Renamed = new LiveIndexWriterConfig(conf);
            directory = d;
            analyzer = Config_Renamed.Analyzer;
            infoStream = Config_Renamed.InfoStream;
            mergePolicy = Config_Renamed.MergePolicy;
            mergePolicy.IndexWriter = this;
            mergeScheduler = Config_Renamed.MergeScheduler;
            Codec = Config_Renamed.Codec;

            BufferedUpdatesStream = new BufferedUpdatesStream(infoStream);
            PoolReaders = Config_Renamed.ReaderPooling;

            WriteLock = directory.MakeLock(WRITE_LOCK_NAME);

            if (!WriteLock.Obtain(Config_Renamed.WriteLockTimeout)) // obtain write lock
            {
                throw new LockObtainFailedException("Index locked for write: " + WriteLock);
            }

            bool success = false;
            try
            {
                OpenMode_e? mode = Config_Renamed.OpenMode;
                bool create;
                if (mode == OpenMode_e.CREATE)
                {
                    create = true;
                }
                else if (mode == OpenMode_e.APPEND)
                {
                    create = false;
                }
                else
                {
                    // CREATE_OR_APPEND - create only if an index does not exist
                    create = !DirectoryReader.IndexExists(directory);
                }

                // If index is too old, reading the segments will throw
                // IndexFormatTooOldException.
                segmentInfos = new SegmentInfos();

                bool initialIndexExists = true;

                if (create)
                {
                    // Try to read first.  this is to allow create
                    // against an index that's currently open for
                    // searching.  In this case we write the next
                    // segments_N file with no segments:
                    try
                    {
                        segmentInfos.Read(directory);
                        segmentInfos.Clear();
                    }
                    catch (IOException)
                    {
                        // Likely this means it's a fresh directory
                        initialIndexExists = false;
                    }

                    // Record that we have a change (zero out all
                    // segments) pending:
                    Changed();
                }
                else
                {
                    segmentInfos.Read(directory);

                    IndexCommit commit = Config_Renamed.IndexCommit;
                    if (commit != null)
                    {
                        // Swap out all segments, but, keep metadata in
                        // SegmentInfos, like version & generation, to
                        // preserve write-once.  this is important if
                        // readers are open against the future commit
                        // points.
                        if (commit.Directory != directory)
                        {
                            throw new ArgumentException(string.Format("IndexCommit's directory doesn't match my directory (mine: {0}, commit's: {1})", directory, commit.Directory));
                        }
                        SegmentInfos oldInfos = new SegmentInfos();
                        oldInfos.Read(directory, commit.SegmentsFileName);
                        segmentInfos.Replace(oldInfos);
                        Changed();
                        if (infoStream.IsEnabled("IW"))
                        {
                            infoStream.Message("IW", "init: loaded commit \"" + commit.SegmentsFileName + "\"");
                        }
                    }
                }

                RollbackSegments = segmentInfos.CreateBackupSegmentInfos();

                // start with previous field numbers, but new FieldInfos
                GlobalFieldNumberMap = FieldNumberMap;
                Config_Renamed.FlushPolicy.Init(Config_Renamed);
                DocWriter = new DocumentsWriter(this, Config_Renamed, directory);
                eventQueue = DocWriter.EventQueue();

                // Default deleter (for backwards compatibility) is
                // KeepOnlyLastCommitDeleter:
                lock (this)
                {
                    Deleter = new IndexFileDeleter(directory, Config_Renamed.DelPolicy, segmentInfos, infoStream, this, initialIndexExists);
                }

                if (Deleter.StartingCommitDeleted)
                {
                    // Deletion policy deleted the "head" commit point.
                    // We have to mark ourself as changed so that if we
                    // are closed w/o any further changes we write a new
                    // segments_N file.
                    Changed();
                }

                if (infoStream.IsEnabled("IW"))
                {
                    infoStream.Message("IW", "init: create=" + create);
                    MessageState();
                }

                success = true;
            }
            finally
            {
                if (!success)
                {
                    if (infoStream.IsEnabled("IW"))
                    {
                        infoStream.Message("IW", "init: hit exception on init; releasing write lock");
                    }
                    WriteLock.Release();
                    IOUtils.CloseWhileHandlingException(WriteLock);
                    WriteLock = null;
                }
            }
        }
 /// <summary>
 /// Creates a new config that that handles the live <seealso cref="IndexWriter"/>
 /// settings.
 /// </summary>
 internal LiveIndexWriterConfig(IndexWriterConfig config)
 {
     maxBufferedDeleteTerms = config.MaxBufferedDeleteTerms;
     maxBufferedDocs = config.MaxBufferedDocs;
     mergedSegmentWarmer = config.MergedSegmentWarmer;
     RamBufferSizeMB = config.RAMBufferSizeMB;
     readerTermsIndexDivisor = config.ReaderTermsIndexDivisor;
     termIndexInterval = config.TermIndexInterval;
     MatchVersion = config.MatchVersion;
     analyzer = config.Analyzer;
     delPolicy = config.DelPolicy;
     Commit = config.IndexCommit;
     openMode = config.OpenMode;
     similarity = config.Similarity;
     mergeScheduler = config.MergeScheduler;
     writeLockTimeout = config.WriteLockTimeout;
     indexingChain = config.IndexingChain;
     codec = config.Codec;
     infoStream = config.InfoStream;
     mergePolicy = config.MergePolicy;
     indexerThreadPool = config.IndexerThreadPool;
     readerPooling = config.ReaderPooling;
     flushPolicy = config.FlushPolicy;
     PerThreadHardLimitMB = config.RAMPerThreadHardLimitMB;
     useCompoundFile = config.UseCompoundFile;
     checkIntegrityAtMerge = config.CheckIntegrityAtMerge;
 }
        private volatile int termIndexInterval; // TODO: this should be private to the codec, not settable here

        #endregion Fields

        #region Constructors

        // used by IndexWriterConfig
        internal LiveIndexWriterConfig(Analyzer analyzer, LuceneVersion matchVersion)
        {
            this.analyzer = analyzer;
            this.MatchVersion = matchVersion;
            RamBufferSizeMB = IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB;
            maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
            maxBufferedDeleteTerms = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DELETE_TERMS;
            readerTermsIndexDivisor = IndexWriterConfig.DEFAULT_READER_TERMS_INDEX_DIVISOR;
            mergedSegmentWarmer = null;
            termIndexInterval = IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL; // TODO: this should be private to the codec, not settable here
            delPolicy = new KeepOnlyLastCommitDeletionPolicy();
            Commit = null;
            useCompoundFile = IndexWriterConfig.DEFAULT_USE_COMPOUND_FILE_SYSTEM;
            openMode = OpenMode_e.CREATE_OR_APPEND;
            similarity = IndexSearcher.DefaultSimilarity;
            mergeScheduler = new ConcurrentMergeScheduler();
            writeLockTimeout = IndexWriterConfig.WRITE_LOCK_TIMEOUT;
            indexingChain = DocumentsWriterPerThread.defaultIndexingChain;
            codec = Codec.Default;
            if (codec == null)
            {
                throw new System.NullReferenceException();
            }
            infoStream = Util.InfoStream.Default;
            mergePolicy = new TieredMergePolicy();
            flushPolicy = new FlushByRamOrCountsPolicy();
            readerPooling = IndexWriterConfig.DEFAULT_READER_POOLING;
            indexerThreadPool = new ThreadAffinityDocumentsWriterThreadPool(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES);
            PerThreadHardLimitMB = IndexWriterConfig.DEFAULT_RAM_PER_THREAD_HARD_LIMIT_MB;
        }