NOTE: if this method hits an OutOfMemoryError you should immediately close the writer. See above for details.
public virtual void TestPrepareCommit() { Directory dir = NewDirectory(); IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetMergePolicy(NewLogMergePolicy(5))); writer.Commit(); for (int i = 0; i < 23; i++) { AddDoc(writer); } DirectoryReader reader = DirectoryReader.Open(dir); Assert.AreEqual(0, reader.NumDocs); writer.PrepareCommit(); IndexReader reader2 = DirectoryReader.Open(dir); Assert.AreEqual(0, reader2.NumDocs); writer.Commit(); IndexReader reader3 = DirectoryReader.OpenIfChanged(reader); Assert.IsNotNull(reader3); Assert.AreEqual(0, reader.NumDocs); Assert.AreEqual(0, reader2.NumDocs); Assert.AreEqual(23, reader3.NumDocs); reader.Dispose(); reader2.Dispose(); for (int i = 0; i < 17; i++) { AddDoc(writer); } Assert.AreEqual(23, reader3.NumDocs); reader3.Dispose(); reader = DirectoryReader.Open(dir); Assert.AreEqual(23, reader.NumDocs); reader.Dispose(); writer.PrepareCommit(); reader = DirectoryReader.Open(dir); Assert.AreEqual(23, reader.NumDocs); reader.Dispose(); writer.Commit(); reader = DirectoryReader.Open(dir); Assert.AreEqual(40, reader.NumDocs); reader.Dispose(); writer.Dispose(); dir.Dispose(); }
public override void DoWork() { IndexWriter writer1 = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED, null); writer1.SetMaxBufferedDocs(3); writer1.MergeFactor = 2; ((ConcurrentMergeScheduler)writer1.MergeScheduler).SetSuppressExceptions(); IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED, null); // Intentionally use different params so flush/merge // happen @ different times writer2.SetMaxBufferedDocs(2); writer2.MergeFactor = 3; ((ConcurrentMergeScheduler)writer2.MergeScheduler).SetSuppressExceptions(); Update(writer1); Update(writer2); TestTransactions.doFail = true; try { lock (lock_Renamed) { try { writer1.PrepareCommit(null); } catch (System.Exception t) { writer1.Rollback(null); writer2.Rollback(null); return; } try { writer2.PrepareCommit(null); } catch (System.Exception t) { writer1.Rollback(null); writer2.Rollback(null); return; } writer1.Commit(null); writer2.Commit(null); } } finally { TestTransactions.doFail = false; } writer1.Close(); writer2.Close(); }
public override void DoWork() { IndexWriter writer1 = new IndexWriter(Dir1, ((IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(3)).SetMergeScheduler(new ConcurrentMergeScheduler()).SetMergePolicy(NewLogMergePolicy(2))); ((ConcurrentMergeScheduler)writer1.Config.MergeScheduler).SetSuppressExceptions(); // Intentionally use different params so flush/merge // happen @ different times IndexWriter writer2 = new IndexWriter(Dir2, ((IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2)).SetMergeScheduler(new ConcurrentMergeScheduler()).SetMergePolicy(NewLogMergePolicy(3))); ((ConcurrentMergeScheduler)writer2.Config.MergeScheduler).SetSuppressExceptions(); Update(writer1); Update(writer2); DoFail = true; try { lock (@lock) { try { writer1.PrepareCommit(); } catch (Exception t) { writer1.Rollback(); writer2.Rollback(); return; } try { writer2.PrepareCommit(); } catch (Exception t) { writer1.Rollback(); writer2.Rollback(); return; } writer1.Commit(); writer2.Commit(); } } finally { DoFail = false; } writer1.Dispose(); writer2.Dispose(); }
public virtual void TestPrepareCommitNoChanges() { Directory dir = NewDirectory(); IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); writer.PrepareCommit(); writer.Commit(); writer.Dispose(); IndexReader reader = DirectoryReader.Open(dir); Assert.AreEqual(0, reader.NumDocs); reader.Dispose(); dir.Dispose(); }
public override void DoWork() { IndexWriter writer1 = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer1.SetMaxBufferedDocs(3); writer1.MergeFactor = 2; ((ConcurrentMergeScheduler) writer1.MergeScheduler).SetSuppressExceptions(); IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); // Intentionally use different params so flush/merge // happen @ different times writer2.SetMaxBufferedDocs(2); writer2.MergeFactor = 3; ((ConcurrentMergeScheduler) writer2.MergeScheduler).SetSuppressExceptions(); Update(writer1); Update(writer2); TestTransactions.doFail = true; try { lock (lock_Renamed) { try { writer1.PrepareCommit(); } catch (System.Exception t) { writer1.Rollback(); writer2.Rollback(); return ; } try { writer2.PrepareCommit(); } catch (System.Exception t) { writer1.Rollback(); writer2.Rollback(); return ; } writer1.Commit(); writer2.Commit(); } } finally { TestTransactions.doFail = false; } writer1.Close(); writer2.Close(); }
public virtual void TestPrepareCommitRollback() { Directory dir = NewDirectory(); if (dir is MockDirectoryWrapper) { ((MockDirectoryWrapper)dir).PreventDoubleWrite = false; } IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetMergePolicy(NewLogMergePolicy(5))); writer.Commit(); for (int i = 0; i < 23; i++) { AddDoc(writer); } DirectoryReader reader = DirectoryReader.Open(dir); Assert.AreEqual(0, reader.NumDocs); writer.PrepareCommit(); IndexReader reader2 = DirectoryReader.Open(dir); Assert.AreEqual(0, reader2.NumDocs); writer.Rollback(); IndexReader reader3 = DirectoryReader.OpenIfChanged(reader); Assert.IsNull(reader3); Assert.AreEqual(0, reader.NumDocs); Assert.AreEqual(0, reader2.NumDocs); reader.Dispose(); reader2.Dispose(); writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); for (int i = 0; i < 17; i++) { AddDoc(writer); } reader = DirectoryReader.Open(dir); Assert.AreEqual(0, reader.NumDocs); reader.Dispose(); writer.PrepareCommit(); reader = DirectoryReader.Open(dir); Assert.AreEqual(0, reader.NumDocs); reader.Dispose(); writer.Commit(); reader = DirectoryReader.Open(dir); Assert.AreEqual(17, reader.NumDocs); reader.Dispose(); writer.Dispose(); dir.Dispose(); }
public virtual void TestPrepareCommitRollback() { Directory dir = NewDirectory(); if (dir is MockDirectoryWrapper) { ((MockDirectoryWrapper)dir).PreventDoubleWrite = false; } IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetMergePolicy(NewLogMergePolicy(5))); writer.Commit(); for (int i = 0; i < 23; i++) { TestIndexWriter.AddDoc(writer); } DirectoryReader reader = DirectoryReader.Open(dir); Assert.AreEqual(0, reader.NumDocs); writer.PrepareCommit(); IndexReader reader2 = DirectoryReader.Open(dir); Assert.AreEqual(0, reader2.NumDocs); writer.Rollback(); IndexReader reader3 = DirectoryReader.OpenIfChanged(reader); Assert.IsNull(reader3); Assert.AreEqual(0, reader.NumDocs); Assert.AreEqual(0, reader2.NumDocs); reader.Dispose(); reader2.Dispose(); writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); for (int i = 0; i < 17; i++) { TestIndexWriter.AddDoc(writer); } reader = DirectoryReader.Open(dir); Assert.AreEqual(0, reader.NumDocs); reader.Dispose(); writer.PrepareCommit(); reader = DirectoryReader.Open(dir); Assert.AreEqual(0, reader.NumDocs); reader.Dispose(); writer.Commit(); reader = DirectoryReader.Open(dir); Assert.AreEqual(17, reader.NumDocs); reader.Dispose(); writer.Dispose(); dir.Dispose(); }
public virtual void TestPrepareCommit() { Directory dir = NewDirectory(); IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetMergePolicy(NewLogMergePolicy(5))); writer.Commit(); for (int i = 0; i < 23; i++) { TestIndexWriter.AddDoc(writer); } DirectoryReader reader = DirectoryReader.Open(dir); Assert.AreEqual(0, reader.NumDocs); writer.PrepareCommit(); IndexReader reader2 = DirectoryReader.Open(dir); Assert.AreEqual(0, reader2.NumDocs); writer.Commit(); IndexReader reader3 = DirectoryReader.OpenIfChanged(reader); Assert.IsNotNull(reader3); Assert.AreEqual(0, reader.NumDocs); Assert.AreEqual(0, reader2.NumDocs); Assert.AreEqual(23, reader3.NumDocs); reader.Dispose(); reader2.Dispose(); for (int i = 0; i < 17; i++) { TestIndexWriter.AddDoc(writer); } Assert.AreEqual(23, reader3.NumDocs); reader3.Dispose(); reader = DirectoryReader.Open(dir); Assert.AreEqual(23, reader.NumDocs); reader.Dispose(); writer.PrepareCommit(); reader = DirectoryReader.Open(dir); Assert.AreEqual(23, reader.NumDocs); reader.Dispose(); writer.Commit(); reader = DirectoryReader.Open(dir); Assert.AreEqual(40, reader.NumDocs); reader.Dispose(); writer.Dispose(); dir.Dispose(); }
public virtual void TestPrepareCommitIsCurrent() { Directory dir = NewDirectory(); IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); writer.Commit(); Document doc = new Document(); writer.AddDocument(doc); DirectoryReader r = DirectoryReader.Open(dir); Assert.IsTrue(r.Current); writer.AddDocument(doc); writer.PrepareCommit(); Assert.IsTrue(r.Current); DirectoryReader r2 = DirectoryReader.OpenIfChanged(r); Assert.IsNull(r2); writer.Commit(); Assert.IsFalse(r.Current); writer.Dispose(); r.Dispose(); dir.Dispose(); }
public void TestPrepareCommitIsCurrent() { Directory dir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); Document doc = new Document(); writer.AddDocument(doc); IndexReader r = IndexReader.Open(dir, true); Assert.IsTrue(r.IsCurrent()); writer.AddDocument(doc); writer.PrepareCommit(); Assert.IsTrue(r.IsCurrent()); IndexReader r2 = r.Reopen(); Assert.IsTrue(r == r2); writer.Commit(); Assert.IsFalse(r.IsCurrent()); writer.Close(); r.Close(); dir.Close(); }
public override void DoWork() { var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)) .SetMaxBufferedDocs(3) .SetMergeScheduler(new ConcurrentMergeScheduler()) .SetMergePolicy(NewLogMergePolicy(2)); IndexWriter writer1 = new IndexWriter(dir1, config); ((IConcurrentMergeScheduler)writer1.Config.MergeScheduler).SetSuppressExceptions(); // Intentionally use different params so flush/merge // happen @ different times var config2 = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)) .SetMaxBufferedDocs(2) .SetMergeScheduler(new ConcurrentMergeScheduler()) .SetMergePolicy(NewLogMergePolicy(3)); IndexWriter writer2 = new IndexWriter(dir2, config2); ((IConcurrentMergeScheduler)writer2.Config.MergeScheduler).SetSuppressExceptions(); Update(writer1); Update(writer2); doFail = true; try { UninterruptableMonitor.Enter(@lock); try { try { writer1.PrepareCommit(); } catch (Exception t) when(t.IsThrowable()) { writer1.Rollback(); writer2.Rollback(); return; } try { writer2.PrepareCommit(); } catch (Exception t) when(t.IsThrowable()) { writer1.Rollback(); writer2.Rollback(); return; } writer1.Commit(); writer2.Commit(); } finally { UninterruptableMonitor.Exit(@lock); } } finally { doFail = false; } writer1.Dispose(); writer2.Dispose(); }
public virtual void TestPrepareCommitNoChanges() { MockRAMDirectory dir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.PrepareCommit(); writer.Commit(); writer.Close(); IndexReader reader = IndexReader.Open(dir); Assert.AreEqual(0, reader.NumDocs()); reader.Close(); dir.Close(); }
public virtual void TestPrepareCommitRollback() { MockRAMDirectory dir = new MockRAMDirectory(); dir.SetPreventDoubleWrite(false); IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetMaxBufferedDocs(2); writer.SetMergeFactor(5); for (int i = 0; i < 23; i++) AddDoc(writer); IndexReader reader = IndexReader.Open(dir); Assert.AreEqual(0, reader.NumDocs()); writer.PrepareCommit(); IndexReader reader2 = IndexReader.Open(dir); Assert.AreEqual(0, reader2.NumDocs()); writer.Rollback(); IndexReader reader3 = reader.Reopen(); Assert.AreEqual(0, reader.NumDocs()); Assert.AreEqual(0, reader2.NumDocs()); Assert.AreEqual(0, reader3.NumDocs()); reader.Close(); reader2.Close(); writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); for (int i = 0; i < 17; i++) AddDoc(writer); Assert.AreEqual(0, reader3.NumDocs()); reader3.Close(); reader = IndexReader.Open(dir); Assert.AreEqual(0, reader.NumDocs()); reader.Close(); writer.PrepareCommit(); reader = IndexReader.Open(dir); Assert.AreEqual(0, reader.NumDocs()); reader.Close(); writer.Commit(); reader = IndexReader.Open(dir); Assert.AreEqual(17, reader.NumDocs()); reader.Close(); writer.Close(); dir.Close(); }
public override void DoWork() { var config = NewIndexWriterConfig( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION outerInstance, #endif TEST_VERSION_CURRENT, new MockAnalyzer(Random)) .SetMaxBufferedDocs(3) .SetMergeScheduler(new ConcurrentMergeScheduler()) .SetMergePolicy(NewLogMergePolicy(2)); IndexWriter writer1 = new IndexWriter(dir1, config); ((IConcurrentMergeScheduler)writer1.Config.MergeScheduler).SetSuppressExceptions(); // Intentionally use different params so flush/merge // happen @ different times var config2 = NewIndexWriterConfig( #if FEATURE_INSTANCE_TESTDATA_INITIALIZATION outerInstance, #endif TEST_VERSION_CURRENT, new MockAnalyzer(Random)) .SetMaxBufferedDocs(2) .SetMergeScheduler(new ConcurrentMergeScheduler()) .SetMergePolicy(NewLogMergePolicy(3)); IndexWriter writer2 = new IndexWriter(dir2, config2); ((IConcurrentMergeScheduler)writer2.Config.MergeScheduler).SetSuppressExceptions(); Update(writer1); Update(writer2); doFail = true; try { lock (@lock) { try { writer1.PrepareCommit(); } catch (Exception) { writer1.Rollback(); writer2.Rollback(); return; } try { writer2.PrepareCommit(); } catch (Exception) { writer1.Rollback(); writer2.Rollback(); return; } writer1.Commit(); writer2.Commit(); } } finally { doFail = false; } writer1.Dispose(); writer2.Dispose(); }