public virtual void TestRAMDirectoryNoLocking() { MockDirectoryWrapper dir = new MockDirectoryWrapper(Random(), new RAMDirectory()); dir.LockFactory = NoLockFactory.DoNoLockFactory; dir.WrapLockFactory = false; // we are gonna explicitly test we get this back Assert.IsTrue(typeof(NoLockFactory).IsInstanceOfType(dir.LockFactory), "RAMDirectory.setLockFactory did not take"); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); writer.Commit(); // required so the second open succeed // Create a 2nd IndexWriter. this is normally not allowed but it should run through since we're not // using any locks: IndexWriter writer2 = null; try { writer2 = new IndexWriter(dir, (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))).SetOpenMode(IndexWriterConfig.OpenMode_e.APPEND)); } catch (Exception e) { Console.Out.Write(e.StackTrace); Assert.Fail("Should not have hit an IOException with no locking"); } writer.Dispose(); if (writer2 != null) { writer2.Dispose(); } }
public virtual void TestRAMDirectoryMem() { Directory dir = NewFSDirectory(IndexDir); MockDirectoryWrapper ramDir = new MockDirectoryWrapper(Random(), new RAMDirectory(dir, NewIOContext(Random()))); // close the underlaying directory dir.Dispose(); // Check size Assert.AreEqual(ramDir.SizeInBytes(), ramDir.RecomputedSizeInBytes); // open reader to test document count IndexReader reader = DirectoryReader.Open(ramDir); Assert.AreEqual(DocsToAdd, reader.NumDocs); // open search zo check if all doc's are there IndexSearcher searcher = NewSearcher(reader); // search for all documents for (int i = 0; i < DocsToAdd; i++) { Document doc = searcher.Doc(i); Assert.IsTrue(doc.GetField("content") != null); } // cleanup reader.Dispose(); }
public virtual void TestCustomLockFactory() { Directory dir = new MockDirectoryWrapper(Random(), new RAMDirectory()); MockLockFactory lf = new MockLockFactory(this); dir.LockFactory = lf; // Lock prefix should have been set: Assert.IsTrue(lf.LockPrefixSet, "lock prefix was not set by the RAMDirectory"); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); // add 100 documents (so that commit lock is used) for (int i = 0; i < 100; i++) { AddDoc(writer); } // Both write lock and commit lock should have been created: Assert.AreEqual(1, lf.LocksCreated.Count, "# of unique locks created (after instantiating IndexWriter)"); Assert.IsTrue(lf.MakeLockCount >= 1, "# calls to makeLock is 0 (after instantiating IndexWriter)"); foreach (String lockName in lf.LocksCreated.Keys) { MockLockFactory.MockLock @lock = (MockLockFactory.MockLock)lf.LocksCreated[lockName]; Assert.IsTrue(@lock.LockAttempts > 0, "# calls to Lock.obtain is 0 (after instantiating IndexWriter)"); } writer.Dispose(); }
public void TestExceptionOnBackgroundThreadIsPropagatedToCallingThread() { using (MockDirectoryWrapper dir = NewMockDirectory()) { dir.FailOn(new FailOnlyOnMerge()); Document doc = new Document(); Field idField = NewStringField("id", "", Field.Store.YES); doc.Add(idField); var mergeScheduler = new ConcurrentMergeScheduler(); using (IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)).SetMergeScheduler(mergeScheduler).SetMaxBufferedDocs(2).SetRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).SetMergePolicy(NewLogMergePolicy()))) { LogMergePolicy logMP = (LogMergePolicy)writer.Config.MergePolicy; logMP.MergeFactor = 10; for (int i = 0; i < 20; i++) { writer.AddDocument(doc); } bool exceptionHit = false; try { mergeScheduler.Sync(); } catch (MergePolicy.MergeException) { exceptionHit = true; } assertTrue(exceptionHit); } } }
/// <summary> /// Construct an empty output buffer. </summary> public MockIndexInputWrapper(MockDirectoryWrapper dir, string name, IndexInput @delegate) : base("MockIndexInputWrapper(name=" + name + " delegate=" + @delegate + ")") { this.Name = name; this.Dir = dir; this.@delegate = @delegate; }
public override void Eval(MockDirectoryWrapper dir) { // LUCENENET specific: for these to work in release mode, we have added [MethodImpl(MethodImplOptions.NoInlining)] // to each possible target of the StackTraceHelper. If these change, so must the attribute on the target methods. if (StackTraceHelper.DoesStackTraceContainMethod("DoMerge")) { throw new IOException("now failing during merge"); } }
public virtual void TestBasic() { HashSet<string> fileExtensions = new HashSet<string>(); fileExtensions.Add(Lucene40StoredFieldsWriter.FIELDS_EXTENSION); fileExtensions.Add(Lucene40StoredFieldsWriter.FIELDS_INDEX_EXTENSION); MockDirectoryWrapper primaryDir = new MockDirectoryWrapper(Random(), new RAMDirectory()); primaryDir.CheckIndexOnClose = false; // only part of an index MockDirectoryWrapper secondaryDir = new MockDirectoryWrapper(Random(), new RAMDirectory()); secondaryDir.CheckIndexOnClose = false; // only part of an index FileSwitchDirectory fsd = new FileSwitchDirectory(fileExtensions, primaryDir, secondaryDir, true); // for now we wire Lucene40Codec because we rely upon its specific impl bool oldValue = OLD_FORMAT_IMPERSONATION_IS_ACTIVE; OLD_FORMAT_IMPERSONATION_IS_ACTIVE = true; IndexWriter writer = new IndexWriter(fsd, (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))).SetMergePolicy(NewLogMergePolicy(false)).SetCodec(Codec.ForName("Lucene40")).SetUseCompoundFile(false)); TestIndexWriterReader.CreateIndexNoClose(true, "ram", writer); IndexReader reader = DirectoryReader.Open(writer, true); Assert.AreEqual(100, reader.MaxDoc); writer.Commit(); // we should see only fdx,fdt files here string[] files = primaryDir.ListAll(); Assert.IsTrue(files.Length > 0); for (int x = 0; x < files.Length; x++) { string ext = FileSwitchDirectory.GetExtension(files[x]); Assert.IsTrue(fileExtensions.Contains(ext)); } files = secondaryDir.ListAll(); Assert.IsTrue(files.Length > 0); // we should not see fdx,fdt files here for (int x = 0; x < files.Length; x++) { string ext = FileSwitchDirectory.GetExtension(files[x]); Assert.IsFalse(fileExtensions.Contains(ext)); } reader.Dispose(); writer.Dispose(); files = fsd.ListAll(); for (int i = 0; i < files.Length; i++) { Assert.IsNotNull(files[i]); } fsd.Dispose(); OLD_FORMAT_IMPERSONATION_IS_ACTIVE = oldValue; }
public override void Eval(MockDirectoryWrapper dir) { if (doFail && IsTestThread) { // LUCENENET specific: for these to work in release mode, we have added [MethodImpl(MethodImplOptions.NoInlining)] // to each possible target of the StackTraceHelper. If these change, so must the attribute on the target methods. bool isDoFlush = Util.StackTraceHelper.DoesStackTraceContainMethod("Flush"); bool isClose = Util.StackTraceHelper.DoesStackTraceContainMethod("Close") || Util.StackTraceHelper.DoesStackTraceContainMethod("Dispose"); if (isDoFlush && !isClose && Random.NextBoolean()) { hitExc = true; throw new IOException(Thread.CurrentThread.Name + ": now failing during flush"); } } }
public SlowOpeningMockIndexInputWrapper(MockDirectoryWrapper dir, string name, IndexInput @delegate) : base(dir, name, @delegate) { try { Thread.Sleep(50); } catch (ThreadInterruptedException ie) { try { base.Dispose(); } // we didnt open successfully catch (Exception) { } throw new ThreadInterruptedException("Thread Interrupted Exception", ie); } }
public virtual void TestAtomicUpdates() { Directory directory; // First in a RAM directory: using (directory = new MockDirectoryWrapper(Random(), new RAMDirectory())) { RunTest(directory); } // Second in an FSDirectory: DirectoryInfo dirPath = CreateTempDir("lucene.test.atomic"); using (directory = NewFSDirectory(dirPath)) { RunTest(directory); } System.IO.Directory.Delete(dirPath.FullName, true); }
private void AssertChunking(Random random, int chunkSize) { DirectoryInfo path = CreateTempDir("mmap" + chunkSize); MMapDirectory mmapDir = new MMapDirectory(path, null, chunkSize); // we will map a lot, try to turn on the unmap hack if (MMapDirectory.UNMAP_SUPPORTED) { mmapDir.UseUnmap = true; } MockDirectoryWrapper dir = new MockDirectoryWrapper(random, mmapDir); RandomIndexWriter writer = new RandomIndexWriter(random, dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).SetMergePolicy(NewLogMergePolicy())); Document doc = new Document(); Field docid = NewStringField("docid", "0", Field.Store.YES); Field junk = NewStringField("junk", "", Field.Store.YES); doc.Add(docid); doc.Add(junk); int numDocs = 100; for (int i = 0; i < numDocs; i++) { docid.StringValue = "" + i; junk.StringValue = TestUtil.RandomUnicodeString(random); writer.AddDocument(doc); } IndexReader reader = writer.Reader; writer.Dispose(); int numAsserts = AtLeast(100); for (int i = 0; i < numAsserts; i++) { int docID = random.Next(numDocs); Assert.AreEqual("" + docID, reader.Document(docID).Get("docid")); } reader.Dispose(); dir.Dispose(); }
public IndexInputSlicerAnonymousInnerClassHelper(MockDirectoryWrapper outerInstance, string name, IndexInputSlicer delegateHandle) : base(outerInstance) { this.OuterInstance = outerInstance; this.Name = name; this.DelegateHandle = delegateHandle; }
public virtual void TestRAMDirectorySize() { Directory dir = NewFSDirectory(IndexDir); MockDirectoryWrapper ramDir = new MockDirectoryWrapper(Random(), new RAMDirectory(dir, NewIOContext(Random()))); dir.Dispose(); IndexWriter writer = new IndexWriter(ramDir, (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))).SetOpenMode(IndexWriterConfig.OpenMode_e.APPEND)); writer.ForceMerge(1); Assert.AreEqual(ramDir.SizeInBytes(), ramDir.RecomputedSizeInBytes); ThreadClass[] threads = new ThreadClass[NumThreads]; for (int i = 0; i < NumThreads; i++) { int num = i; threads[i] = new ThreadAnonymousInnerClassHelper(this, writer, num); } for (int i = 0; i < NumThreads; i++) { threads[i].Start(); } for (int i = 0; i < NumThreads; i++) { threads[i].Join(); } writer.ForceMerge(1); Assert.AreEqual(ramDir.SizeInBytes(), ramDir.RecomputedSizeInBytes); writer.Dispose(); }
/// <summary> /// Construct an empty output buffer. </summary> public MockIndexOutputWrapper(MockDirectoryWrapper dir, IndexOutput @delegate, string name) { this.Dir = dir; this.Name = name; this.@delegate = @delegate; }
public SlowClosingMockIndexInputWrapper(MockDirectoryWrapper dir, string name, IndexInput @delegate) : base(dir, name, @delegate) { }
public BufferedIndexOutputWrapper(MockDirectoryWrapper outerInstance, int bufferSize, IndexOutput io) : base(bufferSize) { this.OuterInstance = outerInstance; this.Io = io; }
public virtual void TestFlushExceptions() { #if NETCOREAPP2_0 fail("LUCENENET TODO: Causing fatal crashes intermittently on NETCOREAPP2.0"); #endif MockDirectoryWrapper directory = NewMockDirectory(); FailOnlyOnFlush failure = new FailOnlyOnFlush(this); directory.FailOn(failure); IndexWriter writer = new IndexWriter(directory, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)).SetMaxBufferedDocs(2)); Document doc = new Document(); Field idField = NewStringField("id", "", Field.Store.YES); doc.Add(idField); int extraCount = 0; for (int i = 0; i < 10; i++) { if (VERBOSE) { Console.WriteLine("TEST: iter=" + i); } for (int j = 0; j < 20; j++) { idField.SetStringValue(Convert.ToString(i * 20 + j)); writer.AddDocument(doc); } // must cycle here because sometimes the merge flushes // the doc we just added and so there's nothing to // flush, and we don't hit the exception while (true) { writer.AddDocument(doc); failure.SetDoFail(); try { writer.Flush(true, true); if (failure.hitExc) { Assert.Fail("failed to hit IOException"); } extraCount++; } catch (IOException ioe) { if (VERBOSE) { Console.WriteLine(ioe.StackTrace); } failure.ClearDoFail(); break; } } Assert.AreEqual(20 * (i + 1) + extraCount, writer.NumDocs); } writer.Dispose(); IndexReader reader = DirectoryReader.Open(directory); Assert.AreEqual(200 + extraCount, reader.NumDocs); reader.Dispose(); directory.Dispose(); }
/// <summary> /// eval is called on the first write of every new file. /// </summary> public virtual void Eval(MockDirectoryWrapper dir) { }