A {@link MergeScheduler} that runs each merge using a separate thread, up until a maximum number of threads ({@link #setMaxThreadCount}) at which points merges are run in the foreground, serially. This is a simple way to use concurrency in the indexing process without having to create and manage application level threads.
Inheritance: Lucene.Net.Index.MergeScheduler
Ejemplo n.º 1
0
 public MergeThread(ConcurrentMergeScheduler enclosingInstance, IndexWriter writer, MergePolicy.OneMerge startMerge, IState state)
 {
     InitBlock(enclosingInstance);
     this.writer     = writer;
     this.startMerge = startMerge;
     this.state      = state;
 }
Ejemplo n.º 2
0
        public virtual void  TestNoExtraFiles()
        {
            RAMDirectory directory = new MockRAMDirectory();

            for (int pass = 0; pass < 2; pass++)
            {
                bool        autoCommit = pass == 0;
                IndexWriter writer     = new IndexWriter(directory, autoCommit, ANALYZER, true);

                for (int iter = 0; iter < 7; iter++)
                {
                    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
                    writer.SetMergeScheduler(cms);
                    writer.SetMaxBufferedDocs(2);

                    for (int j = 0; j < 21; j++)
                    {
                        Document doc = new Document();
                        doc.Add(new Field("content", "a b c", Field.Store.NO, Field.Index.ANALYZED));
                        writer.AddDocument(doc);
                    }

                    writer.Close();
                    TestIndexWriter.AssertNoUnreferencedFiles(directory, "testNoExtraFiles autoCommit=" + autoCommit);

                    // Reopen
                    writer = new IndexWriter(directory, autoCommit, ANALYZER, false);
                }

                writer.Close();
            }

            directory.Close();
        }
Ejemplo n.º 3
0
        public virtual void TestNoExtraFiles()
        {
            RAMDirectory directory = new MockRAMDirectory();
            IndexWriter  writer    = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);

            for (int iter = 0; iter < 7; iter++)
            {
                ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
                writer.SetMergeScheduler(cms);
                writer.SetMaxBufferedDocs(2);

                for (int j = 0; j < 21; j++)
                {
                    Document doc = new Document();
                    doc.Add(new Field("content", "a b c", Field.Store.NO, Field.Index.ANALYZED));
                    writer.AddDocument(doc);
                }

                writer.Close();
                TestIndexWriter.AssertNoUnreferencedFiles(directory, "testNoExtraFiles");
                // Reopen
                writer = new IndexWriter(directory, ANALYZER, false, IndexWriter.MaxFieldLength.UNLIMITED);
            }
            writer.Close();
            directory.Close();
        }
Ejemplo n.º 4
0
        public void TestExceptionOnBackgroundThreadIsPropagatedToCallingThread()
        {
            using (MockDirectoryWrapper dir = NewMockDirectory())
            {
                dir.FailOn(new FailOnlyOnMerge());

                Document doc     = new Document();
                Field    idField = NewStringField("id", "", Field.Store.YES);
                doc.Add(idField);

                var mergeScheduler = new ConcurrentMergeScheduler();
                using (IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergeScheduler(mergeScheduler).SetMaxBufferedDocs(2).SetRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).SetMergePolicy(NewLogMergePolicy())))
                {
                    LogMergePolicy logMP = (LogMergePolicy)writer.Config.MergePolicy;
                    logMP.MergeFactor = 10;
                    for (int i = 0; i < 20; i++)
                    {
                        writer.AddDocument(doc);
                    }

                    bool exceptionHit = false;
                    try
                    {
                        mergeScheduler.Sync();
                    }
                    catch (MergePolicy.MergeException)
                    {
                        exceptionHit = true;
                    }

                    assertTrue(exceptionHit);
                }
            }
        }
Ejemplo n.º 5
0
        public override IMergeScheduler Clone()
        {
            ConcurrentMergeScheduler clone = (ConcurrentMergeScheduler)base.Clone();

            clone.Writer       = null;
            clone.Dir          = null;
            clone.MergeThreads = new List <MergeThread>();
            return(clone);
        }
Ejemplo n.º 6
0
        private void  Crash(IndexWriter writer)
        {
            MockRAMDirectory         dir = (MockRAMDirectory)writer.Directory;
            ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler)writer.MergeScheduler;

            dir.Crash();
            cms.Sync();
            dir.ClearCrash();
        }
        public override object Clone()
        {
            ConcurrentMergeScheduler clone = (ConcurrentMergeScheduler)base.Clone();

            clone.m_writer       = null;
            clone.m_dir          = null;
            clone.m_mergeThreads = new List <MergeThread>();
            return(clone);
        }
Ejemplo n.º 8
0
        public virtual void TestFlushExceptions()
        {
            MockRAMDirectory directory = new MockRAMDirectory();
            FailOnlyOnFlush  failure   = new FailOnlyOnFlush();

            directory.FailOn(failure);

            IndexWriter writer           = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
            ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();

            writer.SetMergeScheduler(cms);
            writer.SetMaxBufferedDocs(2);
            Document doc     = new Document();
            Field    idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);

            doc.Add(idField);
            int extraCount = 0;

            for (int i = 0; i < 10; i++)
            {
                for (int j = 0; j < 20; j++)
                {
                    idField.SetValue(System.Convert.ToString(i * 20 + j));
                    writer.AddDocument(doc);
                }

                while (true)
                {
                    // must cycle here because sometimes the merge flushes
                    // the doc we just added and so there's nothing to
                    // flush, and we don't hit the exception
                    writer.AddDocument(doc);
                    failure.SetDoFail();
                    try
                    {
                        writer.Flush(true, false, true);
                        if (failure.hitExc)
                        {
                            Assert.Fail("failed to hit IOException");
                        }
                        extraCount++;
                    }
                    catch (System.IO.IOException ioe)
                    {
                        failure.ClearDoFail();
                        break;
                    }
                }
            }

            writer.Close();
            IndexReader reader = IndexReader.Open(directory, true);

            Assert.AreEqual(200 + extraCount, reader.NumDocs());
            reader.Close();
            directory.Close();
        }
Ejemplo n.º 9
0
        private void Crash(IndexWriter writer)
        {
            MockDirectoryWrapper     dir = (MockDirectoryWrapper)writer.Directory;
            ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler)writer.Config.MergeScheduler;

            cms.Sync();
            dir.Crash();
            cms.Sync();
            dir.ClearCrash();
        }
Ejemplo n.º 10
0
        public virtual void  TestNoWaitClose()
        {
            RAMDirectory directory = new MockRAMDirectory();

            Document doc     = new Document();
            Field    idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);

            doc.Add(idField);

            for (int pass = 0; pass < 2; pass++)
            {
                bool        autoCommit = pass == 0;
                IndexWriter writer     = new IndexWriter(directory, autoCommit, ANALYZER, true);

                for (int iter = 0; iter < 10; iter++)
                {
                    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
                    writer.SetMergeScheduler(cms);
                    writer.SetMaxBufferedDocs(2);
                    writer.SetMergeFactor(100);

                    for (int j = 0; j < 201; j++)
                    {
                        idField.SetValue(System.Convert.ToString(iter * 201 + j));
                        writer.AddDocument(doc);
                    }

                    int delID = iter * 201;
                    for (int j = 0; j < 20; j++)
                    {
                        writer.DeleteDocuments(new Term("id", System.Convert.ToString(delID)));
                        delID += 5;
                    }

                    // Force a bunch of merge threads to kick off so we
                    // stress out aborting them on close:
                    writer.SetMergeFactor(3);
                    writer.AddDocument(doc);
                    writer.Flush();

                    writer.Close(false);

                    IndexReader reader = IndexReader.Open(directory);
                    Assert.AreEqual((1 + iter) * 182, reader.NumDocs());
                    reader.Close();

                    // Reopen
                    writer = new IndexWriter(directory, autoCommit, ANALYZER, false);
                }
                writer.Close();
            }

            directory.Close();
        }
        public virtual void TestFlushExceptions()
        {
            MockRAMDirectory directory = new MockRAMDirectory();
            FailOnlyOnFlush failure = new FailOnlyOnFlush();
            directory.FailOn(failure);

            IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
            ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
            writer.SetMergeScheduler(cms);
            writer.SetMaxBufferedDocs(2);
            Document doc = new Document();
            Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
            doc.Add(idField);
            int extraCount = 0;

            for (int i = 0; i < 10; i++)
            {
                for (int j = 0; j < 20; j++)
                {
                    idField.SetValue(System.Convert.ToString(i*20 + j));
                    writer.AddDocument(doc);
                }

                while (true)
                {
                    // must cycle here because sometimes the merge flushes
                    // the doc we just added and so there's nothing to
                    // flush, and we don't hit the exception
                    writer.AddDocument(doc);
                    failure.SetDoFail();
                    try
                    {
                        writer.Flush(true, false, true);
                        if (failure.hitExc)
                            Assert.Fail("failed to hit IOException");
                        extraCount++;
                    }
                    catch (System.IO.IOException ioe)
                    {
                        failure.ClearDoFail();
                        break;
                    }
                }
            }

            writer.Close();
            IndexReader reader = IndexReader.Open(directory, true);
            Assert.AreEqual(200 + extraCount, reader.NumDocs());
            reader.Close();
            directory.Close();
        }
        public virtual void TestNoWaitClose()
        {
            RAMDirectory directory = new MockRAMDirectory();

            Document doc     = new Document();
            Field    idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);

            doc.Add(idField);

            IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED, null);

            for (int iter = 0; iter < 10; iter++)
            {
                ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
                writer.SetMergeScheduler(cms, null);
                writer.SetMaxBufferedDocs(2);
                writer.MergeFactor = 100;

                for (int j = 0; j < 201; j++)
                {
                    idField.SetValue(System.Convert.ToString(iter * 201 + j));
                    writer.AddDocument(doc, null);
                }

                int delID = iter * 201;
                for (int j = 0; j < 20; j++)
                {
                    writer.DeleteDocuments(null, new Term("id", delID.ToString()));
                    delID += 5;
                }

                // Force a bunch of merge threads to kick off so we
                // stress out aborting them on close:
                writer.MergeFactor = 3;
                writer.AddDocument(doc, null);
                writer.Commit(null);

                writer.Close(false);

                IndexReader reader = IndexReader.Open((Directory)directory, true, null);
                Assert.AreEqual((1 + iter) * 182, reader.NumDocs());
                reader.Close();

                // Reopen
                writer = new IndexWriter(directory, ANALYZER, false, IndexWriter.MaxFieldLength.UNLIMITED, null);
            }
            writer.Close();

            directory.Close();
        }
Ejemplo n.º 13
0
        public virtual void  TestDeleteMerging()
        {
            RAMDirectory directory = new MockRAMDirectory();

            IndexWriter writer           = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
            ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();

            writer.SetMergeScheduler(cms);

            LogDocMergePolicy mp = new LogDocMergePolicy(writer);

            writer.SetMergePolicy(mp);

            // Force degenerate merging so we can get a mix of
            // merging of segments with and without deletes at the
            // start:
            mp.MinMergeDocs = 1000;

            Document doc     = new Document();
            Field    idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);

            doc.Add(idField);
            for (int i = 0; i < 10; i++)
            {
                for (int j = 0; j < 100; j++)
                {
                    idField.SetValue(System.Convert.ToString(i * 100 + j));
                    writer.AddDocument(doc);
                }

                int delID = i;
                while (delID < 100 * (1 + i))
                {
                    writer.DeleteDocuments(new Term("id", "" + delID));
                    delID += 10;
                }

                writer.Commit();
            }

            writer.Close();
            IndexReader reader = IndexReader.Open(directory, true);

            // Verify that we did not lose any deletes...
            Assert.AreEqual(450, reader.NumDocs());
            reader.Close();
            directory.Close();
        }
Ejemplo n.º 14
0
        public virtual void  TestFlushExceptions()
        {
            MockRAMDirectory directory = new MockRAMDirectory();
            FailOnlyOnFlush  failure   = new FailOnlyOnFlush();

            directory.FailOn(failure);

            IndexWriter writer           = new IndexWriter(directory, true, ANALYZER, true);
            ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();

            writer.SetMergeScheduler(cms);
            writer.SetMaxBufferedDocs(2);
            Document doc     = new Document();
            Field    idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);

            doc.Add(idField);
            for (int i = 0; i < 10; i++)
            {
                for (int j = 0; j < 20; j++)
                {
                    idField.SetValue(System.Convert.ToString(i * 20 + j));
                    writer.AddDocument(doc);
                }

                writer.AddDocument(doc);

                failure.SetDoFail();
                try
                {
                    writer.Flush();
                    Assert.Fail("failed to hit IOException");
                }
                catch (System.IO.IOException ioe)
                {
                    failure.ClearDoFail();
                }
            }

            writer.Close();
            IndexReader reader = IndexReader.Open(directory);

            Assert.AreEqual(200, reader.NumDocs());
            reader.Close();
            directory.Close();
        }
		public virtual void  TestFlushExceptions()
		{
			
			MockRAMDirectory directory = new MockRAMDirectory();
			FailOnlyOnFlush failure = new FailOnlyOnFlush();
			directory.FailOn(failure);
			
			IndexWriter writer = new IndexWriter(directory, true, ANALYZER, true);
			ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
			writer.SetMergeScheduler(cms);
			writer.SetMaxBufferedDocs(2);
			Document doc = new Document();
			Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
			doc.Add(idField);
			for (int i = 0; i < 10; i++)
			{
				for (int j = 0; j < 20; j++)
				{
					idField.SetValue(System.Convert.ToString(i * 20 + j));
					writer.AddDocument(doc);
				}
				
				writer.AddDocument(doc);
				
				failure.SetDoFail();
				try
				{
					writer.Flush();
					Assert.Fail("failed to hit IOException");
				}
				catch (System.IO.IOException ioe)
				{
					failure.ClearDoFail();
				}
			}
			
			writer.Close();
			IndexReader reader = IndexReader.Open(directory);
			Assert.AreEqual(200, reader.NumDocs());
			reader.Close();
			directory.Close();
		}
Ejemplo n.º 16
0
 /// <summary>Used for testing </summary>
 private void  AddMyself()
 {
     lock (allInstances)
     {
         int size = allInstances.Count;
         int upto = 0;
         for (int i = 0; i < size; i++)
         {
             ConcurrentMergeScheduler other = allInstances[i];
             if (!(other.closed && 0 == other.MergeThreadCount()))
             {
                 // Keep this one for now: it still has threads or
                 // may spawn new threads
                 allInstances[upto++] = other;
             }
         }
         allInstances.RemoveRange(upto, allInstances.Count - upto);
         allInstances.Add(this);
     }
 }
Ejemplo n.º 17
0
 /// <summary>Used for testing </summary>
 private void  AddMyself()
 {
     lock (allInstances.SyncRoot)
     {
         int size = 0;
         int upto = 0;
         for (int i = 0; i < size; i++)
         {
             ConcurrentMergeScheduler other = (ConcurrentMergeScheduler)allInstances[i];
             if (!(other.closed && 0 == other.MergeThreadCount()))
             {
                 // Keep this one for now: it still has threads or
                 // may spawn new threads
                 allInstances[upto++] = other;
             }
         }
         ((System.Collections.IList)((System.Collections.ArrayList)allInstances).GetRange(upto, allInstances.Count - upto)).Clear();
         allInstances.Add(this);
     }
 }
Ejemplo n.º 18
0
 private void  InitBlock(ConcurrentMergeScheduler enclosingInstance)
 {
     this.enclosingInstance = enclosingInstance;
 }
Ejemplo n.º 19
0
		public virtual void  TestCloseWithThreads()
		{
			int NUM_THREADS = 3;
			
			for (int iter = 0; iter < 20; iter++)
			{
				MockRAMDirectory dir = new MockRAMDirectory();
				IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
				ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
				
				// We expect AlreadyClosedException
				cms.SetSuppressExceptions();
				
				writer.SetMergeScheduler(cms);
				writer.SetMaxBufferedDocs(10);
				writer.SetMergeFactor(4);
				
				IndexerThread[] threads = new IndexerThread[NUM_THREADS];
				
				for (int i = 0; i < NUM_THREADS; i++)
					threads[i] = new IndexerThread(this, writer, false);
				
				for (int i = 0; i < NUM_THREADS; i++)
					threads[i].Start();
				
				bool done = false;
				while (!done)
				{
					System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * 100));
					for (int i = 0; i < NUM_THREADS; i++)
					// only stop when at least one thread has added a doc
						if (threads[i].addCount > 0)
						{
							done = true;
							break;
						}
				}
				
				writer.Close(false);
				
				// Make sure threads that are adding docs are not hung:
				for (int i = 0; i < NUM_THREADS; i++)
				{
					// Without fix for LUCENE-1130: one of the
					// threads will hang
					threads[i].Join();
					if (threads[i].IsAlive)
						Assert.Fail("thread seems to be hung");
				}
				
				// Quick test to make sure index is not corrupt:
				IndexReader reader = IndexReader.Open(dir);
				TermDocs tdocs = reader.TermDocs(new Term("field", "aaa"));
				int count = 0;
				while (tdocs.Next())
				{
					count++;
				}
				Assert.IsTrue(count > 0);
				reader.Close();
				
				dir.Close();
			}
		}
Ejemplo n.º 20
0
		public virtual void  TestImmediateDiskFullWithThreads()
		{
			
			int NUM_THREADS = 3;
			
			for (int iter = 0; iter < 10; iter++)
			{
				MockRAMDirectory dir = new MockRAMDirectory();
				IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer());
				ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
				// We expect disk full exceptions in the merge threads
				cms.SetSuppressExceptions();
				writer.SetMergeScheduler(cms);
				writer.SetMaxBufferedDocs(2);
				writer.SetMergeFactor(4);
				dir.SetMaxSizeInBytes(4 * 1024 + 20 * iter);
				
				IndexerThread[] threads = new IndexerThread[NUM_THREADS];
				
				for (int i = 0; i < NUM_THREADS; i++)
					threads[i] = new IndexerThread(this, writer, true);
				
				for (int i = 0; i < NUM_THREADS; i++)
					threads[i].Start();
				
				for (int i = 0; i < NUM_THREADS; i++)
				{
					// Without fix for LUCENE-1130: one of the
					// threads will hang
					threads[i].Join();
					Assert.IsTrue(threads[i].error == null, "hit unexpected Throwable");
				}
				
				try
				{
					writer.Close(false);
				}
				catch (System.IO.IOException ioe)
				{
				}
				
				dir.Close();
			}
		}
Ejemplo n.º 21
0
		// Runs test, with multiple threads, using the specific
		// failure to trigger an IOException
		public virtual void  _testMultipleThreadsFailure(MockRAMDirectory.Failure failure)
		{
			
			int NUM_THREADS = 3;
			
			for (int iter = 0; iter < 5; iter++)
			{
				MockRAMDirectory dir = new MockRAMDirectory();
				IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
				ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
				// We expect disk full exceptions in the merge threads
				cms.SetSuppressExceptions();
				writer.SetMergeScheduler(cms);
				writer.SetMaxBufferedDocs(2);
				writer.SetMergeFactor(4);
				
				IndexerThread[] threads = new IndexerThread[NUM_THREADS];
				
				for (int i = 0; i < NUM_THREADS; i++)
					threads[i] = new IndexerThread(this, writer, true);
				
				for (int i = 0; i < NUM_THREADS; i++)
					threads[i].Start();
				
				System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * 10));
				
				dir.FailOn(failure);
				failure.SetDoFail();
				
				for (int i = 0; i < NUM_THREADS; i++)
				{
					threads[i].Join();
					Assert.IsTrue(threads[i].error == null, "hit unexpected Throwable");
				}
				
				bool success = false;
				try
				{
					writer.Close(false);
					success = true;
				}
				catch (System.IO.IOException ioe)
				{
					failure.ClearDoFail();
					writer.Close(false);
				}
				
				if (success)
				{
					IndexReader reader = IndexReader.Open(dir);
					for (int j = 0; j < reader.MaxDoc(); j++)
					{
						if (!reader.IsDeleted(j))
						{
							reader.Document(j);
							reader.GetTermFreqVectors(j);
						}
					}
					reader.Close();
				}
				
				dir.Close();
			}
		}
		public virtual void  TestNoWaitClose()
		{
			RAMDirectory directory = new MockRAMDirectory();
			
			Document doc = new Document();
			Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
			doc.Add(idField);
			
			for (int pass = 0; pass < 2; pass++)
			{
				bool autoCommit = pass == 0;
				IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true);
				
				for (int iter = 0; iter < 10; iter++)
				{
					ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
					writer.SetMergeScheduler(cms);
					writer.SetMaxBufferedDocs(2);
					writer.SetMergeFactor(100);
					
					for (int j = 0; j < 201; j++)
					{
						idField.SetValue(System.Convert.ToString(iter * 201 + j));
						writer.AddDocument(doc);
					}
					
					int delID = iter * 201;
					for (int j = 0; j < 20; j++)
					{
						writer.DeleteDocuments(new Term("id", System.Convert.ToString(delID)));
						delID += 5;
					}
					
					// Force a bunch of merge threads to kick off so we
					// stress out aborting them on close:
					writer.SetMergeFactor(3);
					writer.AddDocument(doc);
					writer.Flush();
					
					writer.Close(false);
					
					IndexReader reader = IndexReader.Open(directory);
					Assert.AreEqual((1 + iter) * 182, reader.NumDocs());
					reader.Close();
					
					// Reopen
					writer = new IndexWriter(directory, autoCommit, ANALYZER, false);
				}
				writer.Close();
			}
			
			directory.Close();
		}
Ejemplo n.º 23
0
			public RunAddIndexesThreads(int numCopy)
			{
				NUM_COPY = numCopy;
				dir = new MockRAMDirectory();
				IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
				writer.SetMaxBufferedDocs(2);
				for (int i = 0; i < NUM_INIT_DOCS; i++)
					AddDoc(writer);
				writer.Close();
				
				dir2 = new MockRAMDirectory();
				writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
				cms = (ConcurrentMergeScheduler) writer2.GetMergeScheduler();
				
				readers = new IndexReader[NUM_COPY];
				for (int i = 0; i < NUM_COPY; i++)
					readers[i] = IndexReader.Open(dir);
			}
Ejemplo n.º 24
0
		public virtual void  TestNoWaitClose()
		{
			RAMDirectory directory = new MockRAMDirectory();
			
			Document doc = new Document();
			Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
			doc.Add(idField);
			
			for (int pass = 0; pass < 3; pass++)
			{
				bool autoCommit = pass % 2 == 0;
				IndexWriter writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), true);
				
				//System.out.println("TEST: pass="******" ac=" + autoCommit + " cms=" + (pass >= 2));
				for (int iter = 0; iter < 10; iter++)
				{
					//System.out.println("TEST: iter=" + iter);
					MergeScheduler ms;
					if (pass >= 2)
						ms = new ConcurrentMergeScheduler();
					else
						ms = new SerialMergeScheduler();
					
					writer.SetMergeScheduler(ms);
					writer.SetMaxBufferedDocs(2);
					writer.SetMergeFactor(100);
					
					for (int j = 0; j < 199; j++)
					{
						idField.SetValue(System.Convert.ToString(iter * 201 + j));
						writer.AddDocument(doc);
					}
					
					int delID = iter * 199;
					for (int j = 0; j < 20; j++)
					{
						writer.DeleteDocuments(new Term("id", System.Convert.ToString(delID)));
						delID += 5;
					}
					
					// Force a bunch of merge threads to kick off so we
					// stress out aborting them on close:
					writer.SetMergeFactor(2);
					
					IndexWriter finalWriter = writer;
					System.Collections.ArrayList failure = new System.Collections.ArrayList();
					SupportClass.ThreadClass t1 = new AnonymousClassThread1(finalWriter, doc, failure, this);
					
					if (failure.Count > 0)
					{
						throw (System.Exception) failure[0];
					}
					
					t1.Start();
					
					writer.Close(false);
					t1.Join();
					
					// Make sure reader can read
					IndexReader reader = IndexReader.Open(directory);
					reader.Close();
					
					// Reopen
					writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), false);
				}
				writer.Close();
			}
			
			directory.Close();
		}
Ejemplo n.º 25
0
			public MergeThread(ConcurrentMergeScheduler enclosingInstance, IndexWriter writer, MergePolicy.OneMerge startMerge)
			{
				InitBlock(enclosingInstance);
				this.writer = writer;
				this.startMerge = startMerge;
			}
Ejemplo n.º 26
0
			private void  InitBlock(ConcurrentMergeScheduler enclosingInstance)
			{
				this.enclosingInstance = enclosingInstance;
			}
Ejemplo n.º 27
0
		// Runs test, with multiple threads, using the specific
		// failure to trigger an IOException
		public virtual void  _testMultipleThreadsFailure(MockRAMDirectory.Failure failure)
		{
			
			int NUM_THREADS = 3;
			
			for (int iter = 0; iter < 5; iter++)
			{
				MockRAMDirectory dir = new MockRAMDirectory();
				IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer());
				ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
				// We expect disk full exceptions in the merge threads
				cms.SetSuppressExceptions_ForNUnitTest();
				writer.SetMergeScheduler(cms);
				writer.SetMaxBufferedDocs(2);
				writer.SetMergeFactor(4);
				
				IndexerThread[] threads = new IndexerThread[NUM_THREADS];
				//bool diskFull = false;
				
				for (int i = 0; i < NUM_THREADS; i++)
					threads[i] = new IndexerThread(this, writer, true);
				
				for (int i = 0; i < NUM_THREADS; i++)
					threads[i].Start();
				
				try
				{
					System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * 10));
				}
				catch (System.Threading.ThreadInterruptedException)
				{
					SupportClass.ThreadClass.Current().Interrupt();
				}
				
				dir.FailOn(failure);
				failure.SetDoFail();
				
				for (int i = 0; i < NUM_THREADS; i++)
				{
					while (true)
					{
						try
						{
							threads[i].Join();
							break;
						}
						catch (System.Threading.ThreadInterruptedException)
						{
							SupportClass.ThreadClass.Current().Interrupt();
						}
					}
					if (threads[i].IsAlive)
						Assert.Fail("thread seems to be hung");
					else
						Assert.IsTrue(threads[i].error == null, "hit unexpected Throwable");
				}
				
				bool success = false;
				try
				{
					writer.Close(false);
					success = true;
				}
				catch (System.IO.IOException)
				{
				}
				
				if (success)
				{
					IndexReader reader = IndexReader.Open(dir);
					for (int j = 0; j < reader.MaxDoc(); j++)
					{
						if (!reader.IsDeleted(j))
						{
							reader.Document(j);
							reader.GetTermFreqVectors(j);
						}
					}
					reader.Close();
				}
				
				dir.Close();
			}
		}
Ejemplo n.º 28
0
		public virtual void  TestImmediateDiskFullWithThreads()
		{
			
			int NUM_THREADS = 3;
			
			for (int iter = 0; iter < 10; iter++)
			{
				MockRAMDirectory dir = new MockRAMDirectory();
				IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer());
				ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
				// We expect disk full exceptions in the merge threads
				cms.SetSuppressExceptions_ForNUnitTest();
				writer.SetMergeScheduler(cms);
				writer.SetMaxBufferedDocs(2);
				writer.SetMergeFactor(4);
				dir.SetMaxSizeInBytes(4 * 1024 + 20 * iter);
				
				IndexerThread[] threads = new IndexerThread[NUM_THREADS];
				//bool diskFull = false;
				
				for (int i = 0; i < NUM_THREADS; i++)
					threads[i] = new IndexerThread(this, writer, true);
				
				for (int i = 0; i < NUM_THREADS; i++)
					threads[i].Start();
				
				for (int i = 0; i < NUM_THREADS; i++)
				{
					while (true)
					{
						try
						{
							// Without fix for LUCENE-1130: one of the
							// threads will hang
							threads[i].Join();
							break;
						}
						catch (System.Threading.ThreadInterruptedException)
						{
							SupportClass.ThreadClass.Current().Interrupt();
						}
					}
					if (threads[i].IsAlive)
						Assert.Fail("thread seems to be hung");
					else
						Assert.IsTrue(threads[i].error == null, "hit unexpected Throwable");
				}
				
				try
				{
					writer.Close(false);
				}
				catch (System.IO.IOException)
				{
				}
				
				dir.Close();
			}
		}
Ejemplo n.º 29
0
		public virtual void  TestCloseWithThreads()
		{
			int NUM_THREADS = 3;
			
			for (int iter = 0; iter < 50; iter++)
			{
				MockRAMDirectory dir = new MockRAMDirectory();
				IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer());
				ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
				
				writer.SetMergeScheduler(cms);
				writer.SetMaxBufferedDocs(10);
				writer.SetMergeFactor(4);
				
				IndexerThread[] threads = new IndexerThread[NUM_THREADS];
				//bool diskFull = false;
				
				for (int i = 0; i < NUM_THREADS; i++)
					threads[i] = new IndexerThread(this, writer, false);
				
				for (int i = 0; i < NUM_THREADS; i++)
					threads[i].Start();
				
				try
				{
					System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * 50));
				}
				catch (System.Threading.ThreadInterruptedException)
				{
					SupportClass.ThreadClass.Current().Interrupt();
				}
				
				writer.Close(false);
				
				// Make sure threads that are adding docs are not hung:
				for (int i = 0; i < NUM_THREADS; i++)
				{
					while (true)
					{
						try
						{
							// Without fix for LUCENE-1130: one of the
							// threads will hang
							threads[i].Join();
							break;
						}
						catch (System.Threading.ThreadInterruptedException)
						{
							SupportClass.ThreadClass.Current().Interrupt();
						}
					}
					if (threads[i].IsAlive)
						Assert.Fail("thread seems to be hung");
				}
				
				// Quick test to make sure index is not corrupt:
				IndexReader reader = IndexReader.Open(dir);
				TermDocs tdocs = reader.TermDocs(new Term("field", "aaa"));
				int count = 0;
				while (tdocs.Next())
				{
					count++;
				}
				Assert.IsTrue(count > 0);
				reader.Close();
				
				dir.Close();
			}
		}
Ejemplo n.º 30
0
		public virtual void  TestExceptionDuringSync()
		{
			MockRAMDirectory dir = new MockRAMDirectory();
			FailOnlyInSync failure = new FailOnlyInSync();
			dir.FailOn(failure);
			
			IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer());
			failure.SetDoFail();
			
			ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
			// We expect sync exceptions in the merge threads
			cms.SetSuppressExceptions();
			writer.SetMergeScheduler(cms);
			writer.SetMaxBufferedDocs(2);
			writer.SetMergeFactor(5);
			
			for (int i = 0; i < 23; i++)
				AddDoc(writer);
			
			cms.Sync();
			Assert.IsTrue(failure.didFail);
			failure.ClearDoFail();
			writer.Close();
			
			IndexReader reader = IndexReader.Open(dir);
			Assert.AreEqual(23, reader.NumDocs());
			reader.Close();
			dir.Close();
		}
		public virtual void  TestDeleteMerging()
		{
			
			RAMDirectory directory = new MockRAMDirectory();
			
			IndexWriter writer = new IndexWriter(directory, true, ANALYZER, true);
			ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
			writer.SetMergeScheduler(cms);
			
			LogDocMergePolicy mp = new LogDocMergePolicy(writer);
			writer.SetMergePolicy(mp);
			
			// Force degenerate merging so we can get a mix of
			// merging of segments with and without deletes at the
			// start:
			mp.SetMinMergeDocs(1000);
			
			Document doc = new Document();
			Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
			doc.Add(idField);
			for (int i = 0; i < 10; i++)
			{
				for (int j = 0; j < 100; j++)
				{
					idField.SetValue(System.Convert.ToString(i * 100 + j));
					writer.AddDocument(doc);
				}
				
				int delID = i;
				while (delID < 100 * (1 + i))
				{
					writer.DeleteDocuments(new Term("id", "" + delID));
					delID += 10;
				}
				
				writer.Flush();
			}
			
			writer.Close();
			IndexReader reader = IndexReader.Open(directory);
			// Verify that we did not lose any deletes...
			Assert.AreEqual(450, reader.NumDocs());
			reader.Close();
			directory.Close();
		}
 /// <summary>
 /// Sole constructor. </summary>
 public MergeThread(ConcurrentMergeScheduler outerInstance, IndexWriter writer, MergePolicy.OneMerge startMerge)
 {
     this.outerInstance = outerInstance;
     this.tWriter       = writer;
     this.startMerge    = startMerge;
 }
		public virtual void  TestNoExtraFiles()
		{
			
			RAMDirectory directory = new MockRAMDirectory();
			
			for (int pass = 0; pass < 2; pass++)
			{
				
				bool autoCommit = pass == 0;
				IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true);
				
				for (int iter = 0; iter < 7; iter++)
				{
					ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
					writer.SetMergeScheduler(cms);
					writer.SetMaxBufferedDocs(2);
					
					for (int j = 0; j < 21; j++)
					{
						Document doc = new Document();
						doc.Add(new Field("content", "a b c", Field.Store.NO, Field.Index.ANALYZED));
						writer.AddDocument(doc);
					}
					
					writer.Close();
					TestIndexWriter.AssertNoUnreferencedFiles(directory, "testNoExtraFiles autoCommit=" + autoCommit);
					
					// Reopen
					writer = new IndexWriter(directory, autoCommit, ANALYZER, false);
				}
				
				writer.Close();
			}
			
			directory.Close();
		}
Ejemplo n.º 34
0
        /// <summary>
        /// create a new index writer config with random defaults using the specified random </summary>
        public static IndexWriterConfig NewIndexWriterConfig(Random r, LuceneVersion v, Analyzer a)
        {
            IndexWriterConfig c = new IndexWriterConfig(v, a);
            c.SetSimilarity(ClassEnvRule.Similarity);
            if (VERBOSE)
            {
                // Even though TestRuleSetupAndRestoreClassEnv calls
                // InfoStream.setDefault, we do it again here so that
                // the PrintStreamInfoStream.messageID increments so
                // that when there are separate instances of
                // IndexWriter created we see "IW 0", "IW 1", "IW 2",
                // ... instead of just always "IW 0":
                c.InfoStream = new TestRuleSetupAndRestoreClassEnv.ThreadNameFixingPrintStreamInfoStream(Console.Out);
            }

            if (r.NextBoolean())
            {
                c.SetMergeScheduler(new SerialMergeScheduler());
            }
            else if (Rarely(r))
            {
                int maxThreadCount = TestUtil.NextInt(Random(), 1, 4);
                int maxMergeCount = TestUtil.NextInt(Random(), maxThreadCount, maxThreadCount + 4);
                ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
                cms.SetMaxMergesAndThreads(maxMergeCount, maxThreadCount);
                c.SetMergeScheduler(cms);
            }
            if (r.NextBoolean())
            {
                if (Rarely(r))
                {
                    // crazy value
                    c.SetMaxBufferedDocs(TestUtil.NextInt(r, 2, 15));
                }
                else
                {
                    // reasonable value
                    c.SetMaxBufferedDocs(TestUtil.NextInt(r, 16, 1000));
                }
            }
            if (r.NextBoolean())
            {
                if (Rarely(r))
                {
                    // crazy value
                    c.SetTermIndexInterval(r.NextBoolean() ? TestUtil.NextInt(r, 1, 31) : TestUtil.NextInt(r, 129, 1000));
                }
                else
                {
                    // reasonable value
                    c.SetTermIndexInterval(TestUtil.NextInt(r, 32, 128));
                }
            }
            if (r.NextBoolean())
            {
                int maxNumThreadStates = Rarely(r) ? TestUtil.NextInt(r, 5, 20) : TestUtil.NextInt(r, 1, 4); // reasonable value -  crazy value

                if (Rarely(r))
                {
                    // Retrieve the package-private setIndexerThreadPool
                    // method:
                    MethodInfo setIndexerThreadPoolMethod = typeof(IndexWriterConfig).GetMethod("SetIndexerThreadPool", new Type[] { typeof(DocumentsWriterPerThreadPool) });
                    //setIndexerThreadPoolMethod.setAccessible(true);
                    Type clazz = typeof(RandomDocumentsWriterPerThreadPool);
                    ConstructorInfo ctor = clazz.GetConstructor(new[] { typeof(int), typeof(Random) });
                    //ctor.Accessible = true;
                    // random thread pool
                    setIndexerThreadPoolMethod.Invoke(c, new[] { ctor.Invoke(new object[] { maxNumThreadStates, r }) });
                }
                else
                {
                    // random thread pool
                    c.SetMaxThreadStates(maxNumThreadStates);
                }
            }

            c.SetMergePolicy(NewMergePolicy(r));

            if (Rarely(r))
            {
                c.SetMergedSegmentWarmer(new SimpleMergedSegmentWarmer(c.InfoStream));
            }
            c.SetUseCompoundFile(r.NextBoolean());
            c.SetReaderPooling(r.NextBoolean());
            c.SetReaderTermsIndexDivisor(TestUtil.NextInt(r, 1, 4));
            c.SetCheckIntegrityAtMerge(r.NextBoolean());
            return c;
        }
        public virtual void TestNoExtraFiles()
        {
            RAMDirectory directory = new MockRAMDirectory();
            IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);

            for (int iter = 0; iter < 7; iter++)
            {
                ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
                writer.SetMergeScheduler(cms);
                writer.SetMaxBufferedDocs(2);

                for (int j = 0; j < 21; j++)
                {
                    Document doc = new Document();
                    doc.Add(new Field("content", "a b c", Field.Store.NO, Field.Index.ANALYZED));
                    writer.AddDocument(doc);
                }

                writer.Close();
                TestIndexWriter.AssertNoUnreferencedFiles(directory, "testNoExtraFiles");
                // Reopen
                writer = new IndexWriter(directory, ANALYZER, false, IndexWriter.MaxFieldLength.UNLIMITED);
            }
            writer.Close();
            directory.Close();
        }