public virtual void TestAddIndexes2() { bool optimize = false; Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); // create a 2nd index Directory dir2 = new MockRAMDirectory(); IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer2.SetInfoStream(infoStream); CreateIndexNoClose(!optimize, "index2", writer2); writer2.Close(); writer.AddIndexesNoOptimize(new Directory[] { dir2 }); writer.AddIndexesNoOptimize(new Directory[] { dir2 }); writer.AddIndexesNoOptimize(new Directory[] { dir2 }); writer.AddIndexesNoOptimize(new Directory[] { dir2 }); writer.AddIndexesNoOptimize(new Directory[] { dir2 }); IndexReader r1 = writer.GetReader(); Assert.AreEqual(500, r1.MaxDoc()); r1.Close(); writer.Close(); dir1.Close(); }
public virtual void TestAddIndexesAndDoDeletesThreads() { int numIter = 5; int numDirs = 3; Directory mainDir = new MockRAMDirectory(); IndexWriter mainWriter = new IndexWriter(mainDir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); mainWriter.SetInfoStream(infoStream); AddDirectoriesThreads addDirThreads = new AddDirectoriesThreads(this, numIter, mainWriter); addDirThreads.LaunchThreads(numDirs); addDirThreads.JoinThreads(); //Assert.AreEqual(100 + numDirs * (3 * numIter / 4) * addDirThreads.NUM_THREADS // * addDirThreads.NUM_INIT_DOCS, addDirThreads.mainWriter.numDocs()); Assert.AreEqual(addDirThreads.count.IntValue(), addDirThreads.mainWriter.NumDocs()); addDirThreads.Close(true); Assert.IsTrue(addDirThreads.failures.Count == 0); _TestUtil.CheckIndex(mainDir); IndexReader reader = IndexReader.Open(mainDir); Assert.AreEqual(addDirThreads.count.IntValue(), reader.NumDocs()); //Assert.AreEqual(100 + numDirs * (3 * numIter / 4) * addDirThreads.NUM_THREADS // * addDirThreads.NUM_INIT_DOCS, reader.numDocs()); reader.Close(); addDirThreads.CloseDir(); mainDir.Close(); }
public virtual void TestAfterClose() { Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); // create the index CreateIndexNoClose(false, "test", writer); IndexReader r = writer.GetReader(); writer.Close(); _TestUtil.CheckIndex(dir1); // reader should remain usable even after IndexWriter is closed: Assert.AreEqual(100, r.NumDocs()); Query q = new TermQuery(new Term("indexname", "test")); Assert.AreEqual(100, new IndexSearcher(r).Search(q, 10).totalHits); try { r.Reopen(); Assert.Fail("failed to hit AlreadyClosedException"); } catch (AlreadyClosedException ace) { // expected } r.Close(); dir1.Close(); }
public IndexWriter GetIndexWriter(bool createNewIndex, IndexWriter.MaxFieldLength maxFieldLength) { var indexWriter = new IndexWriter(IndexDirectory, Analyzer, createNewIndex, maxFieldLength); if (Debug) { indexWriter.SetInfoStream(new StreamWriter(Console.OpenStandardOutput())); } return indexWriter; }
/// <summary> Tests creating a segment, then check to insure the segment can be seen via /// IW.getReader /// </summary> public virtual void DoTestIndexWriterReopenSegment(bool optimize) { Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); IndexReader r1 = writer.GetReader(); Assert.AreEqual(0, r1.MaxDoc()); CreateIndexNoClose(false, "index1", writer); writer.Flush(!optimize, true, true); IndexReader iwr1 = writer.GetReader(); Assert.AreEqual(100, iwr1.MaxDoc()); IndexReader r2 = writer.GetReader(); Assert.AreEqual(r2.MaxDoc(), 100); // add 100 documents for (int x = 10000; x < 10000 + 100; x++) { Document d = CreateDocument(x, "index1", 5); writer.AddDocument(d); } writer.Flush(false, true, true); // verify the reader was reopened internally IndexReader iwr2 = writer.GetReader(); Assert.IsTrue(iwr2 != r1); Assert.AreEqual(200, iwr2.MaxDoc()); // should have flushed out a segment IndexReader r3 = writer.GetReader(); Assert.IsTrue(r2 != r3); Assert.AreEqual(200, r3.MaxDoc()); // dec ref the readers rather than close them because // closing flushes changes to the writer r1.Close(); iwr1.Close(); r2.Close(); r3.Close(); iwr2.Close(); writer.Close(); // test whether the changes made it to the directory writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); IndexReader w2r1 = writer.GetReader(); // insure the deletes were actually flushed to the directory Assert.AreEqual(200, w2r1.MaxDoc()); w2r1.Close(); writer.Close(); dir1.Close(); }
public virtual void TestDeleteFromIndexWriter() { bool optimize = true; Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED, null); writer.ReaderTermsIndexDivisor = 2; writer.SetInfoStream(infoStream, null); // create the index CreateIndexNoClose(!optimize, "index1", writer); writer.Flush(false, true, true, null); // get a reader IndexReader r1 = writer.GetReader(null); System.String id10 = r1.Document(10, null).GetField("id").StringValue(null); // deleted IW docs should not show up in the next getReader writer.DeleteDocuments(null, new Term("id", id10)); IndexReader r2 = writer.GetReader(null); Assert.AreEqual(1, Count(new Term("id", id10), r1)); Assert.AreEqual(0, Count(new Term("id", id10), r2)); System.String id50 = r1.Document(50, null).GetField("id").StringValue(null); Assert.AreEqual(1, Count(new Term("id", id50), r1)); writer.DeleteDocuments(null, new Term("id", id50)); IndexReader r3 = writer.GetReader(null); Assert.AreEqual(0, Count(new Term("id", id10), r3)); Assert.AreEqual(0, Count(new Term("id", id50), r3)); System.String id75 = r1.Document(75, null).GetField("id").StringValue(null); writer.DeleteDocuments(null, new TermQuery(new Term("id", id75))); IndexReader r4 = writer.GetReader(null); Assert.AreEqual(1, Count(new Term("id", id75), r3)); Assert.AreEqual(0, Count(new Term("id", id75), r4)); r1.Close(); r2.Close(); r3.Close(); r4.Close(); writer.Close(); // reopen the writer to verify the delete made it to the directory writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED, null); writer.SetInfoStream(infoStream, null); IndexReader w2r1 = writer.GetReader(null); Assert.AreEqual(0, Count(new Term("id", id10), w2r1)); w2r1.Close(); writer.Close(); dir1.Close(); }
public virtual void TestDuringAddDelete() { Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); writer.SetMergeFactor(2); // create the index CreateIndexNoClose(false, "test", writer); writer.Commit(); IndexReader r = writer.GetReader(); int NUM_THREAD = 5; float SECONDS = 3; long endTime = (long)((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) + 1000.0 * SECONDS); System.Collections.IList excs = (System.Collections.IList)System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(new System.Collections.ArrayList())); SupportClass.ThreadClass[] threads = new SupportClass.ThreadClass[NUM_THREAD]; for (int i = 0; i < NUM_THREAD; i++) { threads[i] = new AnonymousClassThread1(endTime, writer, excs, this); threads[i].IsBackground = true; threads[i].Start(); } int sum = 0; while ((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) < endTime) { IndexReader r2 = r.Reopen(); if (r2 != r) { r.Close(); r = r2; } Query q = new TermQuery(new Term("indexname", "test")); sum += new IndexSearcher(r).Search(q, 10).totalHits; } for (int i = 0; i < NUM_THREAD; i++) { threads[i].Join(); } Assert.IsTrue(sum > 0); Assert.AreEqual(0, excs.Count); writer.Close(); _TestUtil.CheckIndex(dir1); r.Close(); dir1.Close(); }
/// <summary> If non-null, information about merges and a message when /// {@link #GetMaxFieldLength()} is reached will be printed to this. /// <p>Example: <tt>index.setInfoStream(System.err);</tt> /// </summary> /// <seealso cref="IndexWriter#SetInfoStream(PrintStream)"> /// </seealso> /// <throws> IllegalStateException if the index is closed </throws> public virtual void SetInfoStream(System.IO.StreamWriter infoStream) { lock (directory) { AssureOpen(); if (indexWriter != null) { indexWriter.SetInfoStream(infoStream); } this.infoStream = infoStream; } }
public virtual void TestAddIndexes() { bool optimize = false; Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); // create the index CreateIndexNoClose(!optimize, "index1", writer); writer.Flush(false, true, true); // create a 2nd index Directory dir2 = new MockRAMDirectory(); IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer2.SetInfoStream(infoStream); CreateIndexNoClose(!optimize, "index2", writer2); writer2.Close(); IndexReader r0 = writer.GetReader(); Assert.IsTrue(r0.IsCurrent()); writer.AddIndexesNoOptimize(new Directory[] { dir2 }); Assert.IsFalse(r0.IsCurrent()); r0.Close(); IndexReader r1 = writer.GetReader(); Assert.IsTrue(r1.IsCurrent()); writer.Commit(); Assert.IsTrue(r1.IsCurrent()); Assert.AreEqual(200, r1.MaxDoc()); int index2df = r1.DocFreq(new Term("indexname", "index2")); Assert.AreEqual(100, index2df); // verify the docs are from different indexes Document doc5 = r1.Document(5); Assert.AreEqual("index1", doc5.Get("indexname")); Document doc150 = r1.Document(150); Assert.AreEqual("index2", doc150.Get("indexname")); r1.Close(); writer.Close(); dir1.Close(); }
/// <summary> Close the IndexReader and open an IndexWriter.</summary> /// <throws> IOException </throws> protected internal virtual void CreateIndexWriter() { if (indexWriter == null) { if (indexReader != null) { indexReader.Close(); indexReader = null; } indexWriter = new IndexWriter(directory, analyzer, false); indexWriter.SetInfoStream(infoStream); indexWriter.SetUseCompoundFile(useCompoundFile); indexWriter.SetMaxBufferedDocs(maxBufferedDocs); indexWriter.SetMaxFieldLength(maxFieldLength); indexWriter.SetMergeFactor(mergeFactor); } }
/// <summary> Close the IndexReader and open an IndexWriter.</summary> /// <throws> CorruptIndexException if the index is corrupt </throws> /// <throws> LockObtainFailedException if another writer </throws> /// <summary> has this index open (<code>write.lock</code> could not /// be obtained) /// </summary> /// <throws> IOException if there is a low-level IO error </throws> protected internal virtual void CreateIndexWriter() { if (indexWriter == null) { if (indexReader != null) { indexReader.Close(); indexReader = null; } indexWriter = new IndexWriter(directory, analyzer, false, new IndexWriter.MaxFieldLength(maxFieldLength)); // IndexModifier cannot use ConcurrentMergeScheduler // because it synchronizes on the directory which can // cause deadlock indexWriter.SetMergeScheduler(new SerialMergeScheduler()); indexWriter.SetInfoStream(infoStream); indexWriter.SetUseCompoundFile(useCompoundFile); if (maxBufferedDocs != IndexWriter.DISABLE_AUTO_FLUSH) { indexWriter.SetMaxBufferedDocs(maxBufferedDocs); } indexWriter.SetMergeFactor(mergeFactor); } }
public virtual void TestMergeWarmer() { Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); // create the index CreateIndexNoClose(false, "test", writer); // get a reader to put writer into near real-time mode IndexReader r1 = writer.GetReader(); // Enroll warmer MyWarmer warmer = new MyWarmer(); writer.SetMergedSegmentWarmer(warmer); writer.SetMergeFactor(2); writer.SetMaxBufferedDocs(2); for (int i = 0; i < 10; i++) { writer.AddDocument(CreateDocument(i, "test", 4)); } ((ConcurrentMergeScheduler)writer.GetMergeScheduler()).Sync(); Assert.IsTrue(warmer.warmCount > 0); int count = warmer.warmCount; writer.AddDocument(CreateDocument(17, "test", 4)); writer.Optimize(); Assert.IsTrue(warmer.warmCount > count); writer.Close(); r1.Close(); dir1.Close(); }
public virtual void TestAfterCommit() { Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); // create the index CreateIndexNoClose(false, "test", writer); // get a reader to put writer into near real-time mode IndexReader r1 = writer.GetReader(); _TestUtil.CheckIndex(dir1); writer.Commit(); _TestUtil.CheckIndex(dir1); Assert.AreEqual(100, r1.NumDocs()); for (int i = 0; i < 10; i++) { writer.AddDocument(CreateDocument(i, "test", 4)); } ((ConcurrentMergeScheduler)writer.GetMergeScheduler()).Sync(); IndexReader r2 = r1.Reopen(); if (r2 != r1) { r1.Close(); r1 = r2; } Assert.AreEqual(110, r1.NumDocs()); writer.Close(); r1.Close(); dir1.Close(); }
/// <summary> Close the IndexReader and open an IndexWriter.</summary> /// <throws> CorruptIndexException if the index is corrupt </throws> /// <throws> LockObtainFailedException if another writer </throws> /// <summary> has this index open (<c>write.lock</c> could not /// be obtained) /// </summary> /// <throws> IOException if there is a low-level IO error </throws> protected internal virtual void CreateIndexWriter() { if (indexWriter == null) { if (indexReader != null) { indexReader.Close(); indexReader = null; } indexWriter = new IndexWriter(directory, analyzer, false, new IndexWriter.MaxFieldLength(maxFieldLength)); // IndexModifier cannot use ConcurrentMergeScheduler // because it synchronizes on the directory which can // cause deadlock indexWriter.SetMergeScheduler(new SerialMergeScheduler()); indexWriter.SetInfoStream(infoStream); indexWriter.SetUseCompoundFile(useCompoundFile); if (maxBufferedDocs != IndexWriter.DISABLE_AUTO_FLUSH) indexWriter.SetMaxBufferedDocs(maxBufferedDocs); indexWriter.SetMergeFactor(mergeFactor); } }
public virtual void TestAddIndexes() { bool optimize = false; Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); // create the index CreateIndexNoClose(!optimize, "index1", writer); writer.Flush(false, true, true); // create a 2nd index Directory dir2 = new MockRAMDirectory(); IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer2.SetInfoStream(infoStream); CreateIndexNoClose(!optimize, "index2", writer2); writer2.Close(); IndexReader r0 = writer.GetReader(); Assert.IsTrue(r0.IsCurrent()); writer.AddIndexesNoOptimize(new Directory[]{dir2}); Assert.IsFalse(r0.IsCurrent()); r0.Close(); IndexReader r1 = writer.GetReader(); Assert.IsTrue(r1.IsCurrent()); writer.Commit(); Assert.IsFalse(r1.IsCurrent()); Assert.AreEqual(200, r1.MaxDoc); int index2df = r1.DocFreq(new Term("indexname", "index2")); Assert.AreEqual(100, index2df); // verify the docs are from different indexes Document doc5 = r1.Document(5); Assert.AreEqual("index1", doc5.Get("indexname")); Document doc150 = r1.Document(150); Assert.AreEqual("index2", doc150.Get("indexname")); r1.Close(); writer.Close(); dir1.Close(); }
static void Do() { //var directory = new SimpleFSDirectory(new DirectoryInfo(@"c:\temp\lucene")); using (var connection = new SqlConnection(@"MultipleActiveResultSets=True;Data Source=(localdb)\v11.0;Initial Catalog=TestLucene;Integrated Security=True;Connect Timeout=30;Encrypt=False;TrustServerCertificate=False")) { connection.Open(); var directory = new SqlServerDirectory(connection, new Options() { SchemaName = "[search]" }); for (int outer = 0; outer < 1000; outer++) { IndexWriter indexWriter = null; while (indexWriter == null) { try { indexWriter = new IndexWriter(directory, new StandardAnalyzer(Lucene.Net.Util.Version.LUCENE_30), !IndexReader.IndexExists(directory), new Lucene.Net.Index.IndexWriter.MaxFieldLength(IndexWriter.DEFAULT_MAX_FIELD_LENGTH)); } catch (LockObtainFailedException) { Console.WriteLine("Lock is taken, waiting for timeout..."); Thread.Sleep(1000); } } ; Console.WriteLine("IndexWriter lock obtained, this process has exclusive write access to index"); indexWriter.SetRAMBufferSizeMB(100.0); indexWriter.SetInfoStream(new StreamWriter(Console.OpenStandardOutput())); indexWriter.UseCompoundFile = false; for (int iDoc = 0; iDoc < 1000; iDoc++) { if (iDoc % 10 == 0) Console.WriteLine(iDoc); Document doc = new Document(); doc.Add(new Field("id", DateTime.Now.ToFileTimeUtc().ToString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); doc.Add(new Field("Title", "dog " + GeneratePhrase(50), Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO)); doc.Add(new Field("Body", "dog " + GeneratePhrase(50), Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO)); indexWriter.AddDocument(doc); } Console.WriteLine("Total docs is {0}", indexWriter.NumDocs()); Console.Write("Flushing and disposing writer..."); indexWriter.Flush(true, true, true); indexWriter.Dispose(); } IndexSearcher searcher; using (new AutoStopWatch("Creating searcher")) { searcher = new IndexSearcher(directory); } using (new AutoStopWatch("Count")) Console.WriteLine("Number of docs: {0}", searcher.IndexReader.NumDocs()); while (true) { SearchForPhrase(searcher, "microsoft"); Thread.Sleep(1000); //Console.WriteLine("Press a key to search again"); //Console.ReadKey(); } } }
public virtual void TestAddIndexes2() { bool optimize = false; Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); // create a 2nd index Directory dir2 = new MockRAMDirectory(); IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer2.SetInfoStream(infoStream); CreateIndexNoClose(!optimize, "index2", writer2); writer2.Close(); writer.AddIndexesNoOptimize(new Directory[]{dir2}); writer.AddIndexesNoOptimize(new Directory[]{dir2}); writer.AddIndexesNoOptimize(new Directory[]{dir2}); writer.AddIndexesNoOptimize(new Directory[]{dir2}); writer.AddIndexesNoOptimize(new Directory[]{dir2}); IndexReader r1 = writer.GetReader(); Assert.AreEqual(500, r1.MaxDoc); r1.Close(); writer.Close(); dir1.Close(); }
public virtual void TestDeleteFromIndexWriter() { bool optimize = true; Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.ReaderTermsIndexDivisor = 2; writer.SetInfoStream(infoStream); // create the index CreateIndexNoClose(!optimize, "index1", writer); writer.Flush(false, true, true); // get a reader IndexReader r1 = writer.GetReader(); System.String id10 = r1.Document(10).GetField("id").StringValue; // deleted IW docs should not show up in the next getReader writer.DeleteDocuments(new Term("id", id10)); IndexReader r2 = writer.GetReader(); Assert.AreEqual(1, Count(new Term("id", id10), r1)); Assert.AreEqual(0, Count(new Term("id", id10), r2)); System.String id50 = r1.Document(50).GetField("id").StringValue; Assert.AreEqual(1, Count(new Term("id", id50), r1)); writer.DeleteDocuments(new Term("id", id50)); IndexReader r3 = writer.GetReader(); Assert.AreEqual(0, Count(new Term("id", id10), r3)); Assert.AreEqual(0, Count(new Term("id", id50), r3)); System.String id75 = r1.Document(75).GetField("id").StringValue; writer.DeleteDocuments(new TermQuery(new Term("id", id75))); IndexReader r4 = writer.GetReader(); Assert.AreEqual(1, Count(new Term("id", id75), r3)); Assert.AreEqual(0, Count(new Term("id", id75), r4)); r1.Close(); r2.Close(); r3.Close(); r4.Close(); writer.Close(); // reopen the writer to verify the delete made it to the directory writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); IndexReader w2r1 = writer.GetReader(); Assert.AreEqual(0, Count(new Term("id", id10), w2r1)); w2r1.Close(); writer.Close(); dir1.Close(); }
public virtual void TestAddIndexesAndDoDeletesThreads() { int numIter = 5; int numDirs = 3; Directory mainDir = new MockRAMDirectory(); IndexWriter mainWriter = new IndexWriter(mainDir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); mainWriter.SetInfoStream(infoStream); AddDirectoriesThreads addDirThreads = new AddDirectoriesThreads(this, numIter, mainWriter); addDirThreads.LaunchThreads(numDirs); addDirThreads.JoinThreads(); //Assert.AreEqual(100 + numDirs * (3 * numIter / 4) * addDirThreads.NUM_THREADS // * addDirThreads.NUM_INIT_DOCS, addDirThreads.mainWriter.numDocs()); Assert.AreEqual(addDirThreads.count.IntValue(), addDirThreads.mainWriter.NumDocs()); addDirThreads.Close(true); Assert.IsTrue(addDirThreads.failures.Count == 0); _TestUtil.CheckIndex(mainDir); IndexReader reader = IndexReader.Open(mainDir, true); Assert.AreEqual(addDirThreads.count.IntValue(), reader.NumDocs()); //Assert.AreEqual(100 + numDirs * (3 * numIter / 4) * addDirThreads.NUM_THREADS // * addDirThreads.NUM_INIT_DOCS, reader.numDocs()); reader.Close(); addDirThreads.CloseDir(); mainDir.Close(); }
/// <summary> Tests creating a segment, then check to insure the segment can be seen via /// IW.getReader /// </summary> public virtual void DoTestIndexWriterReopenSegment(bool optimize) { Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); IndexReader r1 = writer.GetReader(); Assert.AreEqual(0, r1.MaxDoc); CreateIndexNoClose(false, "index1", writer); writer.Flush(!optimize, true, true); IndexReader iwr1 = writer.GetReader(); Assert.AreEqual(100, iwr1.MaxDoc); IndexReader r2 = writer.GetReader(); Assert.AreEqual(r2.MaxDoc, 100); // add 100 documents for (int x = 10000; x < 10000 + 100; x++) { Document d = CreateDocument(x, "index1", 5); writer.AddDocument(d); } writer.Flush(false, true, true); // verify the reader was reopened internally IndexReader iwr2 = writer.GetReader(); Assert.IsTrue(iwr2 != r1); Assert.AreEqual(200, iwr2.MaxDoc); // should have flushed out a segment IndexReader r3 = writer.GetReader(); Assert.IsTrue(r2 != r3); Assert.AreEqual(200, r3.MaxDoc); // dec ref the readers rather than close them because // closing flushes changes to the writer r1.Close(); iwr1.Close(); r2.Close(); r3.Close(); iwr2.Close(); writer.Close(); // test whether the changes made it to the directory writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); IndexReader w2r1 = writer.GetReader(); // insure the deletes were actually flushed to the directory Assert.AreEqual(200, w2r1.MaxDoc); w2r1.Close(); writer.Close(); dir1.Close(); }
public virtual void TestDuringAddIndexes() { Assert.Ignore("Known issue"); MockRAMDirectory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED, null); writer.SetInfoStream(infoStream, null); writer.MergeFactor = 2; // create the index CreateIndexNoClose(false, "test", writer); writer.Commit(null); Directory[] dirs = new Directory[10]; for (int i = 0; i < 10; i++) { dirs[i] = new MockRAMDirectory(dir1); } IndexReader r = writer.GetReader(null); int NUM_THREAD = 5; float SECONDS = 3; long endTime = (long)((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) + 1000.0 * SECONDS); System.Collections.IList excs = (System.Collections.IList)System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(new System.Collections.ArrayList())); ThreadClass[] threads = new ThreadClass[NUM_THREAD]; for (int i = 0; i < NUM_THREAD; i++) { threads[i] = new AnonymousClassThread(endTime, writer, dirs, excs, this); threads[i].IsBackground = true; threads[i].Start(); } int lastCount = 0; while ((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) < endTime) { IndexReader r2 = r.Reopen(null); if (r2 != r) { r.Close(); r = r2; } Query q = new TermQuery(new Term("indexname", "test")); int count = new IndexSearcher(r).Search(q, 10, null).TotalHits; Assert.IsTrue(count >= lastCount); lastCount = count; } for (int i = 0; i < NUM_THREAD; i++) { threads[i].Join(); } Assert.AreEqual(0, excs.Count); r.Close(); try { Assert.AreEqual(0, dir1.GetOpenDeletedFiles().Count); } catch { //DIGY: //I think this is an expected behaviour. //There isn't any pending files to be deleted after "writer.Close()". //But, since lucene.java's test case is designed that way //and I might be wrong, I will add a warning // Assert only in debug mode, so that CheckIndex is called during release. #if DEBUG Assert.Inconclusive("", 0, dir1.GetOpenDeletedFiles().Count); #endif } writer.Close(); _TestUtil.CheckIndex(dir1); dir1.Close(); }
public virtual void TestMergeWarmer() { Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); // create the index CreateIndexNoClose(false, "test", writer); // get a reader to put writer into near real-time mode IndexReader r1 = writer.GetReader(); // Enroll warmer MyWarmer warmer = new MyWarmer(); writer.MergedSegmentWarmer = warmer; writer.MergeFactor = 2; writer.SetMaxBufferedDocs(2); for (int i = 0; i < 100; i++) { writer.AddDocument(CreateDocument(i, "test", 4)); } ((ConcurrentMergeScheduler) writer.MergeScheduler).Sync(); Assert.IsTrue(warmer.warmCount > 0); int count = warmer.warmCount; writer.AddDocument(CreateDocument(17, "test", 4)); writer.Optimize(); Assert.IsTrue(warmer.warmCount > count); writer.Close(); r1.Close(); dir1.Close(); }
public virtual void TestDuringAddDelete() { Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); writer.MergeFactor = 2; // create the index CreateIndexNoClose(false, "test", writer); writer.Commit(); IndexReader r = writer.GetReader(); int NUM_THREAD = 5; float SECONDS = 3; long endTime = (long) ((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) + 1000.0 * SECONDS); System.Collections.IList excs = (System.Collections.IList) System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(new System.Collections.ArrayList())); ThreadClass[] threads = new ThreadClass[NUM_THREAD]; for (int i = 0; i < NUM_THREAD; i++) { threads[i] = new AnonymousClassThread1(endTime, writer, excs, this); threads[i].IsBackground = true; threads[i].Start(); } int sum = 0; while ((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) < endTime) { IndexReader r2 = r.Reopen(); if (r2 != r) { r.Close(); r = r2; } Query q = new TermQuery(new Term("indexname", "test")); sum += new IndexSearcher(r).Search(q, 10).TotalHits; } for (int i = 0; i < NUM_THREAD; i++) { threads[i].Join(); } Assert.IsTrue(sum > 0); Assert.AreEqual(0, excs.Count); writer.Close(); _TestUtil.CheckIndex(dir1); r.Close(); dir1.Close(); }
public virtual void TestDuringAddDelete() { Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); writer.SetMergeFactor(2); // create the index CreateIndexNoClose(false, "test", writer); writer.Commit(); IndexReader r = writer.GetReader(); int NUM_THREAD = 5; float SECONDS = 3; long endTime = (long) ((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) + 1000.0 * SECONDS); System.Collections.IList excs = (System.Collections.IList) System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(new System.Collections.ArrayList())); System.Threading.Thread[] threads = new System.Threading.Thread[NUM_THREAD]; for (int i = 0; i < NUM_THREAD; i++) { threads[i] = new System.Threading.Thread(() => { int count = 0; System.Random rnd = new System.Random(); while ((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) < endTime) { try { for (int docUpto = 0; docUpto < 10; docUpto++) { writer.AddDocument(Lucene.Net.Index.TestIndexWriterReader.CreateDocument(10 * count + docUpto, "test", 4)); } count++; int limit = count * 10; for (int delUpto = 0; delUpto < 5; delUpto++) { int x = rnd.Next(limit); writer.DeleteDocuments(new Term("field3", "b" + x)); } } catch (System.Exception t) { excs.Add(t); throw new System.SystemException("", t); } } }); threads[i].IsBackground = true; threads[i].Start(); } int sum = 0; while ((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) < endTime) { IndexReader r2 = r.Reopen(); if (r2 != r) { r.Close(); r = r2; } Query q = new TermQuery(new Term("indexname", "test")); sum += new IndexSearcher(r).Search(q, 10).TotalHits; } for (int i = 0; i < NUM_THREAD; i++) { threads[i].Join(); } Assert.IsTrue(sum > 0); Assert.AreEqual(0, excs.Count); writer.Close(); _TestUtil.CheckIndex(dir1); r.Close(); dir1.Close(); }
public virtual void TestDuringAddIndexes() { MockRAMDirectory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); writer.MergeFactor = 2; // create the index CreateIndexNoClose(false, "test", writer); writer.Commit(); Directory[] dirs = new Directory[10]; for (int i = 0; i < 10; i++) { dirs[i] = new MockRAMDirectory(dir1); } IndexReader r = writer.GetReader(); int NUM_THREAD = 5; float SECONDS = 3; long endTime = (long) ((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) + 1000.0 * SECONDS); System.Collections.IList excs = (System.Collections.IList) System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(new System.Collections.ArrayList())); ThreadClass[] threads = new ThreadClass[NUM_THREAD]; for (int i = 0; i < NUM_THREAD; i++) { threads[i] = new AnonymousClassThread(endTime, writer, dirs, excs, this); threads[i].IsBackground = true; threads[i].Start(); } int lastCount = 0; while ((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) < endTime) { IndexReader r2 = r.Reopen(); if (r2 != r) { r.Close(); r = r2; } Query q = new TermQuery(new Term("indexname", "test")); int count = new IndexSearcher(r).Search(q, 10).TotalHits; Assert.IsTrue(count >= lastCount); lastCount = count; } for (int i = 0; i < NUM_THREAD; i++) { threads[i].Join(); } Assert.AreEqual(0, excs.Count); r.Close(); try { Assert.AreEqual(0, dir1.GetOpenDeletedFiles().Count); } catch { //DIGY: //I think this is an expected behaviour. //There isn't any pending files to be deleted after "writer.Close()". //But, since lucene.java's test case is designed that way //and I might be wrong, I will add a warning // Assert only in debug mode, so that CheckIndex is called during release. #if DEBUG Assert.Inconclusive("", 0, dir1.GetOpenDeletedFiles().Count); #endif } writer.Close(); _TestUtil.CheckIndex(dir1); dir1.Close(); }
public virtual void TestAfterClose() { Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); // create the index CreateIndexNoClose(false, "test", writer); IndexReader r = writer.GetReader(); writer.Close(); _TestUtil.CheckIndex(dir1); // reader should remain usable even after IndexWriter is closed: Assert.AreEqual(100, r.NumDocs()); Query q = new TermQuery(new Term("indexname", "test")); Assert.AreEqual(100, new IndexSearcher(r).Search(q, 10).TotalHits); Assert.Throws<AlreadyClosedException>(() => r.Reopen(), "failed to hit AlreadyClosedException"); r.Close(); dir1.Close(); }
public virtual void TestAfterCommit() { Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); // create the index CreateIndexNoClose(false, "test", writer); // get a reader to put writer into near real-time mode IndexReader r1 = writer.GetReader(); _TestUtil.CheckIndex(dir1); writer.Commit(); _TestUtil.CheckIndex(dir1); Assert.AreEqual(100, r1.NumDocs()); for (int i = 0; i < 10; i++) { writer.AddDocument(CreateDocument(i, "test", 4)); } ((ConcurrentMergeScheduler) writer.MergeScheduler).Sync(); IndexReader r2 = r1.Reopen(); if (r2 != r1) { r1.Close(); r1 = r2; } Assert.AreEqual(110, r1.NumDocs()); writer.Close(); r1.Close(); dir1.Close(); }
public virtual void TestDuringAddIndexes_LuceneNet() { MockRAMDirectory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED, null); writer.SetInfoStream(infoStream, null); writer.MergeFactor = 2; // create the index CreateIndexNoClose(false, "test", writer); writer.Commit(null); Directory[] dirs = new Directory[10]; for (int i = 0; i < 10; i++) { dirs[i] = new MockRAMDirectory(dir1); } IndexReader r = writer.GetReader(null); int NUM_THREAD = 5; float SECONDS = 3; long endTime = (long)((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) + 1000.0 * SECONDS); System.Collections.IList excs = (System.Collections.IList)System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(new System.Collections.ArrayList())); ThreadClass[] threads = new ThreadClass[NUM_THREAD]; for (int i = 0; i < NUM_THREAD; i++) { threads[i] = new AnonymousClassThread(endTime, writer, dirs, excs, this); threads[i].IsBackground = true; threads[i].Start(); } int lastCount = 0; while ((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) < endTime) { using (IndexReader r2 = writer.GetReader(null)) { Query q = new TermQuery(new Term("indexname", "test")); int count = new IndexSearcher(r2).Search(q, 10, null).TotalHits; Assert.IsTrue(count >= lastCount); lastCount = count; } } for (int i = 0; i < NUM_THREAD; i++) { threads[i].Join(); } Assert.AreEqual(0, excs.Count); r.Close(); Assert.AreEqual(0, dir1.GetOpenDeletedFiles().Count); writer.Close(); _TestUtil.CheckIndex(dir1); dir1.Close(); }
public virtual void TestDuringAddIndexes_LuceneNet() { MockRAMDirectory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); writer.MergeFactor = 2; // create the index CreateIndexNoClose(false, "test", writer); writer.Commit(); Directory[] dirs = new Directory[10]; for (int i = 0; i < 10; i++) { dirs[i] = new MockRAMDirectory(dir1); } IndexReader r = writer.GetReader(); int NUM_THREAD = 5; float SECONDS = 3; long endTime = (long)((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) + 1000.0 * SECONDS); System.Collections.IList excs = (System.Collections.IList)System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(new System.Collections.ArrayList())); ThreadClass[] threads = new ThreadClass[NUM_THREAD]; for (int i = 0; i < NUM_THREAD; i++) { threads[i] = new AnonymousClassThread(endTime, writer, dirs, excs, this); threads[i].IsBackground = true; threads[i].Start(); } int lastCount = 0; while ((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) < endTime) { using (IndexReader r2 = writer.GetReader()) { Query q = new TermQuery(new Term("indexname", "test")); int count = new IndexSearcher(r2).Search(q, 10).TotalHits; Assert.IsTrue(count >= lastCount); lastCount = count; } } for (int i = 0; i < NUM_THREAD; i++) { threads[i].Join(); } Assert.AreEqual(0, excs.Count); r.Close(); Assert.AreEqual(0, dir1.GetOpenDeletedFiles().Count); writer.Close(); _TestUtil.CheckIndex(dir1); dir1.Close(); }