public virtual void TestRAMDirectorySize() { Directory dir = NewFSDirectory(indexDir); MockDirectoryWrapper ramDir = new MockDirectoryWrapper(Random, new RAMDirectory(dir, NewIOContext(Random))); dir.Dispose(); IndexWriter writer = new IndexWriter(ramDir, (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random))).SetOpenMode(OpenMode.APPEND)); writer.ForceMerge(1); Assert.AreEqual(ramDir.GetSizeInBytes(), ramDir.GetRecomputedSizeInBytes()); ThreadJob[] threads = new ThreadJob[numThreads]; for (int i = 0; i < numThreads; i++) { int num = i; threads[i] = new ThreadAnonymousClass(this, writer, num); } for (int i = 0; i < numThreads; i++) { threads[i].Start(); } for (int i = 0; i < numThreads; i++) { threads[i].Join(); } writer.ForceMerge(1); Assert.AreEqual(ramDir.GetSizeInBytes(), ramDir.GetRecomputedSizeInBytes()); writer.Dispose(); }
public virtual void TestGetDocsWithFieldThreadSafety() { IFieldCache cache = FieldCache.DEFAULT; cache.PurgeAllCaches(); int NUM_THREADS = 3; ThreadJob[] threads = new ThreadJob[NUM_THREADS]; AtomicBoolean failed = new AtomicBoolean(); AtomicInt32 iters = new AtomicInt32(); int NUM_ITER = 200 * RandomMultiplier; Barrier restart = new Barrier(NUM_THREADS, (barrier) => new RunnableAnonymousClass(this, cache, iters).Run()); for (int threadIDX = 0; threadIDX < NUM_THREADS; threadIDX++) { threads[threadIDX] = new ThreadAnonymousClass(this, cache, failed, iters, NUM_ITER, restart); threads[threadIDX].Start(); } for (int threadIDX = 0; threadIDX < NUM_THREADS; threadIDX++) { threads[threadIDX].Join(); } Assert.IsFalse(failed); }
public virtual void TestCommitThreadSafety() { const int NUM_THREADS = 5; const double RUN_SEC = 0.5; var dir = NewDirectory(); var w = new RandomIndexWriter(Random, dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)).SetMergePolicy(NewLogMergePolicy())); TestUtil.ReduceOpenFiles(w.IndexWriter); w.Commit(); var failed = new AtomicBoolean(); var threads = new ThreadJob[NUM_THREADS]; long endTime = Environment.TickCount + ((long)(RUN_SEC * 1000)); for (int i = 0; i < NUM_THREADS; i++) { int finalI = i; threads[i] = new ThreadAnonymousClass(dir, w, failed, endTime, finalI, NewStringField); threads[i].Start(); } for (int i = 0; i < NUM_THREADS; i++) { threads[i].Join(); } Assert.IsFalse(failed); w.Dispose(); dir.Dispose(); }
public virtual void TestCommitThreadSafety() { const int NUM_THREADS = 5; const double RUN_SEC = 0.5; var dir = NewDirectory(); var w = new RandomIndexWriter(Random, dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)).SetMergePolicy(NewLogMergePolicy())); TestUtil.ReduceOpenFiles(w.IndexWriter); w.Commit(); var failed = new AtomicBoolean(); var threads = new ThreadJob[NUM_THREADS]; long endTime = (J2N.Time.NanoTime() / J2N.Time.MillisecondsPerNanosecond) + ((long)(RUN_SEC * 1000)); // LUCENENET: Use NanoTime() rather than CurrentTimeMilliseconds() for more accurate/reliable results for (int i = 0; i < NUM_THREADS; i++) { int finalI = i; threads[i] = new ThreadAnonymousClass(dir, w, failed, endTime, finalI, NewStringField); threads[i].Start(); } for (int i = 0; i < NUM_THREADS; i++) { threads[i].Join(); } Assert.IsFalse(failed); w.Dispose(); dir.Dispose(); }
public virtual void TestRandom() { DocumentsWriterStallControl ctrl = new DocumentsWriterStallControl(); ctrl.UpdateStalled(false); ThreadJob[] stallThreads = new ThreadJob[AtLeast(3)]; for (int i = 0; i < stallThreads.Length; i++) { int stallProbability = 1 + Random.Next(10); stallThreads[i] = new ThreadAnonymousClass(ctrl, stallProbability); } Start(stallThreads); long time = J2N.Time.NanoTime() / J2N.Time.MillisecondsPerNanosecond; // LUCENENET: Use NanoTime() rather than CurrentTimeMilliseconds() for more accurate/reliable results /* * use a 100 sec timeout to make sure we not hang forever. join will fail in * that case */ while (((J2N.Time.NanoTime() / J2N.Time.MillisecondsPerNanosecond) - time) < 100 * 1000 && !Terminated(stallThreads)) // LUCENENET: Use NanoTime() rather than CurrentTimeMilliseconds() for more accurate/reliable results { ctrl.UpdateStalled(false); if (Random.NextBoolean()) { Thread.Sleep(0); } else { Thread.Sleep(1); } } Join(stallThreads); }
public virtual void TestRandom() { DocumentsWriterStallControl ctrl = new DocumentsWriterStallControl(); ctrl.UpdateStalled(false); ThreadJob[] stallThreads = new ThreadJob[AtLeast(3)]; for (int i = 0; i < stallThreads.Length; i++) { int stallProbability = 1 + Random.Next(10); stallThreads[i] = new ThreadAnonymousClass(ctrl, stallProbability); } Start(stallThreads); long time = Environment.TickCount; /* * use a 100 sec timeout to make sure we not hang forever. join will fail in * that case */ while ((Environment.TickCount - time) < 100 * 1000 && !Terminated(stallThreads)) { ctrl.UpdateStalled(false); if (Random.NextBoolean()) { Thread.Sleep(0); } else { Thread.Sleep(1); } } Join(stallThreads); }
public virtual void RunTest(Random random, Directory directory) { IndexWriter writer = new IndexWriter(directory, ((IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, ANALYZER).SetOpenMode(OpenMode.CREATE).SetMaxBufferedDocs(2)).SetMergePolicy(NewLogMergePolicy())); for (int iter = 0; iter < NUM_ITER; iter++) { int iterFinal = iter; ((LogMergePolicy)writer.Config.MergePolicy).MergeFactor = 1000; FieldType customType = new FieldType(StringField.TYPE_STORED); customType.OmitNorms = true; for (int i = 0; i < 200; i++) { Document d = new Document(); d.Add(NewField("id", Convert.ToString(i), customType)); d.Add(NewField("contents", English.Int32ToEnglish(i), customType)); writer.AddDocument(d); } ((LogMergePolicy)writer.Config.MergePolicy).MergeFactor = 4; ThreadJob[] threads = new ThreadJob[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { int iFinal = i; IndexWriter writerFinal = writer; threads[i] = new ThreadAnonymousClass(this, iterFinal, customType, iFinal, writerFinal); } for (int i = 0; i < NUM_THREADS; i++) { threads[i].Start(); } for (int i = 0; i < NUM_THREADS; i++) { threads[i].Join(); } Assert.IsTrue(!failed); int expectedDocCount = (int)((1 + iter) * (200 + 8 * NUM_ITER2 * (NUM_THREADS / 2.0) * (1 + NUM_THREADS))); Assert.AreEqual(expectedDocCount, writer.NumDocs, "index=" + writer.SegString() + " numDocs=" + writer.NumDocs + " maxDoc=" + writer.MaxDoc + " config=" + writer.Config); Assert.AreEqual(expectedDocCount, writer.MaxDoc, "index=" + writer.SegString() + " numDocs=" + writer.NumDocs + " maxDoc=" + writer.MaxDoc + " config=" + writer.Config); writer.Dispose(); writer = new IndexWriter(directory, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, ANALYZER).SetOpenMode(OpenMode.APPEND).SetMaxBufferedDocs(2)); DirectoryReader reader = DirectoryReader.Open(directory); Assert.AreEqual(1, reader.Leaves.Count, "reader=" + reader); Assert.AreEqual(expectedDocCount, reader.NumDocs); reader.Dispose(); } writer.Dispose(); }
public virtual void TestThreadStarvationNoDeleteNRTReader() { IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)); conf.SetMergePolicy(Random.NextBoolean() ? NoMergePolicy.COMPOUND_FILES : NoMergePolicy.NO_COMPOUND_FILES); Directory d = NewDirectory(); CountdownEvent latch = new CountdownEvent(1); CountdownEvent signal = new CountdownEvent(1); LatchedIndexWriter _writer = new LatchedIndexWriter(d, conf, latch, signal); TrackingIndexWriter writer = new TrackingIndexWriter(_writer); SearcherManager manager = new SearcherManager(_writer, false, null); Document doc = new Document(); doc.Add(NewTextField("test", "test", Field.Store.YES)); writer.AddDocument(doc); manager.MaybeRefresh(); var t = new ThreadAnonymousClass(this, latch, signal, writer, manager); t.Start(); _writer.waitAfterUpdate = true; // wait in addDocument to let some reopens go through long lastGen = writer.UpdateDocument(new Term("foo", "bar"), doc); // once this returns the doc is already reflected in the last reopen assertFalse(manager.IsSearcherCurrent()); // false since there is a delete in the queue IndexSearcher searcher = manager.Acquire(); try { assertEquals(2, searcher.IndexReader.NumDocs); } finally { manager.Release(searcher); } ControlledRealTimeReopenThread <IndexSearcher> thread = new ControlledRealTimeReopenThread <IndexSearcher>(writer, manager, 0.01, 0.01); thread.Start(); // start reopening if (Verbose) { Console.WriteLine("waiting now for generation " + lastGen); } AtomicBoolean finished = new AtomicBoolean(false); var waiter = new ThreadAnonymousClass2(this, lastGen, thread, finished); waiter.Start(); manager.MaybeRefresh(); waiter.Join(1000); if (!finished) { waiter.Interrupt(); fail("thread deadlocked on waitForGeneration"); } thread.Dispose(); thread.Join(); IOUtils.Dispose(manager, _writer, d); }
public static void Main(string[] args) { if (args.Length != 2) { // LUCENENET specific - our wrapper console shows the correct usage throw new ArgumentException(); //Console.WriteLine("Usage: java Lucene.Net.Store.LockVerifyServer bindToIp clients\n"); //Environment.FailFast("1"); } int arg = 0; string hostname = args[arg++]; int maxClients = Convert.ToInt32(args[arg++], CultureInfo.InvariantCulture); IPAddress ipAddress = IPAddress.Parse(hostname); using Socket s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); s.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.ReuseAddress, 1); s.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.ReceiveTimeout, 30000);// SoTimeout = 30000; // initially 30 secs to give clients enough time to startup s.Bind(new IPEndPoint(ipAddress, 0)); s.Listen(maxClients); Console.WriteLine("Listening on " + ((IPEndPoint)s.LocalEndPoint).Port.ToString() + "..."); // we set the port as a sysprop, so the ANT task can read it. For that to work, this server must run in-process: Environment.SetEnvironmentVariable("lockverifyserver.port", ((IPEndPoint)s.LocalEndPoint).Port.ToString(CultureInfo.InvariantCulture)); object localLock = new object(); int[] lockedID = new int[1]; lockedID[0] = -1; CountdownEvent startingGun = new CountdownEvent(1); ThreadJob[] threads = new ThreadJob[maxClients]; for (int count = 0; count < maxClients; count++) { Socket cs = s.Accept(); threads[count] = new ThreadAnonymousClass(localLock, lockedID, startingGun, cs); threads[count].Start(); } // start Console.WriteLine("All clients started, fire gun..."); startingGun.Signal(); // wait for all threads to finish foreach (ThreadJob t in threads) { t.Join(); } // cleanup sysprop Environment.SetEnvironmentVariable("lockverifyserver.port", null); Console.WriteLine("Server terminated."); }
public virtual void Test() { Directory dir = NewDirectory(); IndexWriter w = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)).SetMergePolicy(NewLogMergePolicy())); IList <long?> numbers = new List <long?>(); IList <BytesRef> binary = new List <BytesRef>(); IList <BytesRef> sorted = new List <BytesRef>(); int numDocs = AtLeast(100); for (int i = 0; i < numDocs; i++) { Document d = new Document(); long number = Random.NextInt64(); d.Add(new NumericDocValuesField("number", number)); BytesRef bytes = new BytesRef(TestUtil.RandomRealisticUnicodeString(Random)); d.Add(new BinaryDocValuesField("bytes", bytes)); binary.Add(bytes); bytes = new BytesRef(TestUtil.RandomRealisticUnicodeString(Random)); d.Add(new SortedDocValuesField("sorted", bytes)); sorted.Add(bytes); w.AddDocument(d); numbers.Add(number); } w.ForceMerge(1); IndexReader r = w.GetReader(); w.Dispose(); Assert.AreEqual(1, r.Leaves.Count); AtomicReader ar = (AtomicReader)r.Leaves[0].Reader; int numThreads = TestUtil.NextInt32(Random, 2, 5); IList <ThreadJob> threads = new List <ThreadJob>(); CountdownEvent startingGun = new CountdownEvent(1); for (int t = 0; t < numThreads; t++) { Random threadRandom = new Random(Random.Next()); ThreadJob thread = new ThreadAnonymousClass(this, numbers, binary, sorted, numDocs, ar, startingGun, threadRandom); thread.Start(); threads.Add(thread); } startingGun.Signal(); foreach (ThreadJob thread in threads) { thread.Join(); } r.Dispose(); dir.Dispose(); }
protected override void DoSearching(TaskScheduler es, long stopTime) { ThreadJob reopenThread = new ThreadAnonymousClass(this, stopTime); reopenThread.IsBackground = (true); reopenThread.Start(); RunSearchThreads(stopTime); reopenThread.Join(); }
private void RunTest(Random random, Directory dir) { // Run for ~1 seconds long stopTime = Environment.TickCount + 1000; SnapshotDeletionPolicy dp = DeletionPolicy; IndexWriter writer = new IndexWriter(dir, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).SetIndexDeletionPolicy(dp).SetMaxBufferedDocs(2)); // Verify we catch misuse: try { dp.Snapshot(); Assert.Fail("did not hit exception"); } #pragma warning disable 168 catch (InvalidOperationException ise) #pragma warning restore 168 { // expected } dp = (SnapshotDeletionPolicy)writer.Config.IndexDeletionPolicy; writer.Commit(); ThreadJob t = new ThreadAnonymousClass(stopTime, writer, NewField); t.Start(); // While the above indexing thread is running, take many // backups: do { BackupIndex(dir, dp); Thread.Sleep(20); } while (t.IsAlive); t.Join(); // Add one more document to force writer to commit a // final segment, so deletion policy has a chance to // delete again: Document doc = new Document(); FieldType customType = new FieldType(TextField.TYPE_STORED); customType.StoreTermVectors = true; customType.StoreTermVectorPositions = true; customType.StoreTermVectorOffsets = true; doc.Add(NewField("content", "aaa", customType)); writer.AddDocument(doc); // Make sure we don't have any leftover files in the // directory: writer.Dispose(); TestIndexWriter.AssertNoUnreferencedFiles(dir, "some files were not deleted but should have been"); }
public virtual void Test() { Directory dir = NewFSDirectory(CreateTempDir("livefieldupdates")); IndexWriterConfig iwc = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)); IndexWriter w = new IndexWriter(dir, iwc); SearcherManager mgr = new SearcherManager(w, true, new SearcherFactoryAnonymousClass()); const int missing = -1; LiveFieldValues <IndexSearcher, int?> rt = new LiveFieldValuesAnonymousClass(mgr, missing); int numThreads = TestUtil.NextInt32(Random, 2, 5); if (Verbose) { Console.WriteLine(numThreads + " threads"); } CountdownEvent startingGun = new CountdownEvent(1); IList <ThreadJob> threads = new JCG.List <ThreadJob>(); int iters = AtLeast(1000); int idCount = TestUtil.NextInt32(Random, 100, 10000); double reopenChance = Random.NextDouble() * 0.01; double deleteChance = Random.NextDouble() * 0.25; double addChance = Random.NextDouble() * 0.5; for (int t = 0; t < numThreads; t++) { int threadID = t; Random threadRandom = new Random(Random.Next()); ThreadJob thread = new ThreadAnonymousClass(w, mgr, missing, rt, startingGun, iters, idCount, reopenChance, deleteChance, addChance, t, threadID, threadRandom); threads.Add(thread); thread.Start(); } startingGun.Signal(); foreach (ThreadJob thread in threads) { thread.Join(); } mgr.MaybeRefresh(); Assert.AreEqual(0, rt.Count); rt.Dispose(); mgr.Dispose(); w.Dispose(); dir.Dispose(); }
public virtual void TestThreadLeak() { ThreadJob t = new ThreadAnonymousClass(this); t.Start(); while (!t.IsAlive) { Thread.Yield(); } // once alive, leave it to run outside of the test scope. }
public virtual void AssertThreadSafe(Analyzer analyzer) { int numTestPoints = 100; int numThreads = TestUtil.NextInt32(Random, 3, 5); Dictionary <string, BytesRef> map = new Dictionary <string, BytesRef>(); // create a map<String,SortKey> up front. // then with multiple threads, generate sort keys for all the keys in the map // and ensure they are the same as the ones we produced in serial fashion. for (int i = 0; i < numTestPoints; i++) { string term = TestUtil.RandomSimpleString(Random); Exception priorException = null; // LUCENENET: No need to cast to IOExcpetion TokenStream ts = analyzer.GetTokenStream("fake", new StringReader(term)); try { ITermToBytesRefAttribute termAtt = ts.AddAttribute <ITermToBytesRefAttribute>(); BytesRef bytes = termAtt.BytesRef; ts.Reset(); Assert.IsTrue(ts.IncrementToken()); termAtt.FillBytesRef(); // ensure we make a copy of the actual bytes too map[term] = BytesRef.DeepCopyOf(bytes); Assert.IsFalse(ts.IncrementToken()); ts.End(); } catch (Exception e) when(e.IsIOException()) { priorException = e; } finally { IOUtils.DisposeWhileHandlingException(priorException, ts); } } ThreadJob[] threads = new ThreadJob[numThreads]; for (int i = 0; i < numThreads; i++) { threads[i] = new ThreadAnonymousClass(analyzer, map); } for (int i = 0; i < numThreads; i++) { threads[i].Start(); } for (int i = 0; i < numThreads; i++) { threads[i].Join(); } }
public void TestMultiThreaded() { FileInfo file = new FileInfo(Path.Combine(getWorkDir().FullName, "one-line")); PerfRunData runData = createPerfRunData(file, false, typeof(ThreadingDocMaker).AssemblyQualifiedName); ThreadJob[] threads = new ThreadJob[10]; using (WriteLineDocTask wldt = new WriteLineDocTask(runData)) { for (int i = 0; i < threads.Length; i++) { threads[i] = new ThreadAnonymousClass("t" + i, wldt); } foreach (ThreadJob t in threads) { t.Start(); } foreach (ThreadJob t in threads) { t.Join(); } } // wldt.Dispose(); ISet <String> ids = new JCG.HashSet <string>(); TextReader br = new StreamReader(new FileStream(file.FullName, FileMode.Open, FileAccess.Read, FileShare.None), Encoding.UTF8); try { String line = br.ReadLine(); assertHeaderLine(line); // header line is written once, no matter how many threads there are for (int i = 0; i < threads.Length; i++) { line = br.ReadLine(); assertNotNull($"line for index {i} is missing", line); // LUCENENET specific - ensure the line is there before splitting String[] parts = line.Split(WriteLineDocTask.SEP).TrimEnd(); assertEquals(line, 3, parts.Length); // check that all thread names written are the same in the same line String tname = parts[0].Substring(parts[0].IndexOf('_')); ids.add(tname); assertEquals(tname, parts[1].Substring(parts[1].IndexOf('_'))); assertEquals(tname, parts[2].Substring(parts[2].IndexOf('_'))); } // only threads.length lines should exist assertNull(br.ReadLine()); assertEquals(threads.Length, ids.size()); } finally { br.Dispose(); } }
public virtual void Test() { Directory d = NewDirectory(); MockAnalyzer analyzer = new MockAnalyzer(Random); analyzer.MaxTokenLength = TestUtil.NextInt32(Random, 1, IndexWriter.MAX_TERM_LENGTH); MyIndexWriter w = new MyIndexWriter(d, NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); // Try to make an index that requires merging: w.Config.SetMaxBufferedDocs(TestUtil.NextInt32(Random, 2, 11)); int numStartDocs = AtLeast(20); LineFileDocs docs = new LineFileDocs(Random, DefaultCodecSupportsDocValues); for (int docIDX = 0; docIDX < numStartDocs; docIDX++) { w.AddDocument(docs.NextDoc()); } MergePolicy mp = w.Config.MergePolicy; int mergeAtOnce = 1 + w.segmentInfos.Count; if (mp is TieredMergePolicy) { ((TieredMergePolicy)mp).MaxMergeAtOnce = mergeAtOnce; } else if (mp is LogMergePolicy) { ((LogMergePolicy)mp).MergeFactor = mergeAtOnce; } else { // skip test w.Dispose(); d.Dispose(); return; } AtomicBoolean doStop = new AtomicBoolean(); w.Config.SetMaxBufferedDocs(2); ThreadJob t = new ThreadAnonymousClass(this, w, numStartDocs, docs, doStop); t.Start(); w.ForceMerge(1); doStop.Value = true; t.Join(); Assert.IsTrue(w.mergeCount <= 1, "merge count is " + w.mergeCount); w.Dispose(); d.Dispose(); docs.Dispose(); }
public virtual void TestRollbackAndCommitWithThreads() { BaseDirectoryWrapper d = NewDirectory(); if (d is MockDirectoryWrapper) { ((MockDirectoryWrapper)d).PreventDoubleWrite = false; } int threadCount = TestUtil.NextInt32(Random, 2, 6); MockAnalyzer analyzer = new MockAnalyzer(Random); analyzer.MaxTokenLength = TestUtil.NextInt32(Random, 1, IndexWriter.MAX_TERM_LENGTH); AtomicReference <IndexWriter> writerRef = new AtomicReference <IndexWriter>(new IndexWriter(d, NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer))); LineFileDocs docs = new LineFileDocs(Random); ThreadJob[] threads = new ThreadJob[threadCount]; int iters = AtLeast(100); AtomicBoolean failed = new AtomicBoolean(); ReentrantLock rollbackLock = new ReentrantLock(); ReentrantLock commitLock = new ReentrantLock(); for (int threadID = 0; threadID < threadCount; threadID++) { threads[threadID] = new ThreadAnonymousClass(this, d, writerRef, docs, iters, failed, rollbackLock, commitLock); threads[threadID].Start(); } for (int threadID = 0; threadID < threadCount; threadID++) { try { threads[threadID].Join(); } catch (Exception e) { Console.WriteLine("EXCEPTION in ThreadAnonymousClass: " + Environment.NewLine + e); } } Assert.IsTrue(!failed.Value); writerRef.Value.Dispose(); d.Dispose(); }
public virtual void TestThreadSafety() { const int numThreads = 5; int numDocs = AtLeast(50); ByteArrayPool pool = new ByteArrayPool(numThreads, 5); Directory dir = NewDirectory(); IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random))); const string field = "test"; ThreadJob[] ingesters = new ThreadJob[numThreads]; for (int i = 0; i < numThreads; i++) { ingesters[i] = new ThreadAnonymousClass(this, numDocs, pool, writer, field); ingesters[i].Start(); } for (int i = 0; i < numThreads; i++) { ingesters[i].Join(); } writer.Dispose(); IndexReader reader = DirectoryReader.Open(dir); TermsEnum terms = MultiFields.GetFields(reader).GetTerms(field).GetEnumerator(); IBits liveDocs = MultiFields.GetLiveDocs(reader); DocsAndPositionsEnum tp = null; while (terms.MoveNext()) { string termText = terms.Term.Utf8ToString(); tp = terms.DocsAndPositions(liveDocs, tp); while (tp.NextDoc() != DocIdSetIterator.NO_MORE_DOCS) { int freq = tp.Freq; for (int i = 0; i < freq; i++) { tp.NextPosition(); BytesRef payload = tp.GetPayload(); Assert.AreEqual(termText, payload.Utf8ToString()); } } } reader.Dispose(); dir.Dispose(); Assert.AreEqual(pool.Count, numThreads); }
private ThreadJob[] LaunchIndexingThreads(LineFileDocs docs, int numThreads, long stopTime, ISet <string> delIDs, ISet <string> delPackIDs, ConcurrentQueue <SubDocs> allSubDocs) { ThreadJob[] threads = new ThreadJob[numThreads]; for (int thread = 0; thread < numThreads; thread++) { threads[thread] = new ThreadAnonymousClass(this, docs, stopTime, delIDs, delPackIDs, allSubDocs); threads[thread].IsBackground = (true); threads[thread].Start(); } return(threads); }
public virtual void TestWithThreads() { // LUCENE-5303: OrdinalsCache used the ThreadLocal BinaryDV instead of reader.getCoreCacheKey(). Store.Directory indexDir = NewDirectory(); Store.Directory taxoDir = NewDirectory(); IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)); IndexWriter writer = new IndexWriter(indexDir, conf); var taxoWriter = new DirectoryTaxonomyWriter(taxoDir); FacetsConfig config = new FacetsConfig(); Document doc = new Document(); doc.Add(new FacetField("A", "1")); writer.AddDocument(config.Build(taxoWriter, doc)); doc = new Document(); doc.Add(new FacetField("A", "2")); writer.AddDocument(config.Build(taxoWriter, doc)); var reader = DirectoryReader.Open(writer, true); CachedOrdinalsReader ordsReader = new CachedOrdinalsReader(new DocValuesOrdinalsReader(FacetsConfig.DEFAULT_INDEX_FIELD_NAME)); ThreadJob[] threads = new ThreadJob[3]; for (int i = 0; i < threads.Length; i++) { threads[i] = new ThreadAnonymousClass(this, "CachedOrdsThread-" + i, reader, ordsReader); } long ramBytesUsed = 0; foreach (ThreadJob t in threads) { t.Start(); t.Join(); if (ramBytesUsed == 0) { ramBytesUsed = ordsReader.RamBytesUsed(); } else { Assert.AreEqual(ramBytesUsed, ordsReader.RamBytesUsed()); } } IOUtils.Dispose(writer, taxoWriter, reader, indexDir, taxoDir); }
public void TestICUConcurrency() { int numThreads = 8; CountdownEvent startingGun = new CountdownEvent(1); ThreadAnonymousClass[] threads = new ThreadAnonymousClass[numThreads]; for (int i = 0; i < threads.Length; i++) { threads[i] = new ThreadAnonymousClass(startingGun); threads[i].Start(); } startingGun.Signal(); for (int i = 0; i < threads.Length; i++) { threads[i].Join(); } }
// run random tests from different threads to make sure the per-thread clones // don't share mutable data public virtual void TestClone() { RandomDocumentFactory docFactory = new RandomDocumentFactory(this, 5, 20); int numDocs = AtLeast(100); foreach (Options options in ValidOptions()) { RandomDocument[] docs = new RandomDocument[numDocs]; for (int i = 0; i < numDocs; ++i) { docs[i] = docFactory.NewDocument(TestUtil.NextInt32(Random, 1, 3), AtLeast(10), options); } AtomicReference <Exception> exception = new AtomicReference <Exception>(); using (Directory dir = NewDirectory()) using (RandomIndexWriter writer = new RandomIndexWriter(Random, dir)) { for (int i = 0; i < numDocs; ++i) { writer.AddDocument(AddId(docs[i].ToDocument(), "" + i)); } using IndexReader reader = writer.GetReader(); for (int i = 0; i < numDocs; ++i) { int docID = DocID(reader, "" + i); AssertEquals(docs[i], reader.GetTermVectors(docID)); } ThreadJob[] threads = new ThreadJob[2]; for (int i = 0; i < threads.Length; ++i) { threads[i] = new ThreadAnonymousClass(this, numDocs, docs, reader, exception); } foreach (ThreadJob thread in threads) { thread.Start(); } foreach (ThreadJob thread in threads) { thread.Join(); } }// writer.Dispose();, dir.Dispose(); Assert.IsNull(exception.Value, "One thread threw an exception"); } }
private void DoTestMultiThreads(bool withTimeout) { ThreadJob[] threadArray = new ThreadJob[N_THREADS]; OpenBitSet success = new OpenBitSet(N_THREADS); for (int i = 0; i < threadArray.Length; ++i) { int num = i; threadArray[num] = new ThreadAnonymousClass(this, success, withTimeout, num); } for (int i = 0; i < threadArray.Length; ++i) { threadArray[i].Start(); } for (int i = 0; i < threadArray.Length; ++i) { threadArray[i].Join(); } assertEquals("some threads failed!", N_THREADS, success.Cardinality()); }
public virtual void TestMixedTypesDifferentThreads() { Directory dir = NewDirectory(); IndexWriter w = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random))); CountdownEvent startingGun = new CountdownEvent(1); AtomicBoolean hitExc = new AtomicBoolean(); ThreadJob[] threads = new ThreadJob[3]; for (int i = 0; i < 3; i++) { Field field; if (i == 0) { field = new SortedDocValuesField("foo", new BytesRef("hello")); } else if (i == 1) { field = new NumericDocValuesField("foo", 0); } else { field = new BinaryDocValuesField("foo", new BytesRef("bazz")); } Document doc = new Document(); doc.Add(field); threads[i] = new ThreadAnonymousClass(this, w, startingGun, hitExc, doc); threads[i].Start(); } startingGun.Signal(); foreach (ThreadJob t in threads) { t.Join(); } Assert.IsTrue(hitExc); w.Dispose(); dir.Dispose(); }
public virtual void TestHashCodeWithThreads() { AutomatonQuery[] queries = new AutomatonQuery[1000]; for (int i = 0; i < queries.Length; i++) { queries[i] = new AutomatonQuery(new Term("bogus", "bogus"), AutomatonTestUtil.RandomAutomaton(Random)); } CountdownEvent startingGun = new CountdownEvent(1); int numThreads = TestUtil.NextInt32(Random, 2, 5); ThreadJob[] threads = new ThreadJob[numThreads]; for (int threadID = 0; threadID < numThreads; threadID++) { ThreadJob thread = new ThreadAnonymousClass(this, queries, startingGun); threads[threadID] = thread; thread.Start(); } startingGun.Signal(); foreach (ThreadJob thread in threads) { thread.Join(); } }
public virtual void TestPartiallyAppliedGlobalSlice() { DocumentsWriterDeleteQueue queue = new DocumentsWriterDeleteQueue(); System.Reflection.FieldInfo field = typeof(DocumentsWriterDeleteQueue).GetField("globalBufferLock", BindingFlags.NonPublic | BindingFlags.GetField | BindingFlags.Instance); ReentrantLock @lock = (ReentrantLock)field.GetValue(queue); @lock.Lock(); var t = new ThreadAnonymousClass(this, queue); t.Start(); t.Join(); @lock.Unlock(); Assert.IsTrue(queue.AnyChanges(), "changes in del queue but not in slice yet"); queue.TryApplyGlobalSlice(); Assert.IsTrue(queue.AnyChanges(), "changes in global buffer"); FrozenBufferedUpdates freezeGlobalBuffer = queue.FreezeGlobalBuffer(null); Assert.IsTrue(freezeGlobalBuffer.Any()); Assert.AreEqual(1, freezeGlobalBuffer.termCount); Assert.IsFalse(queue.AnyChanges(), "all changes applied"); }
private void Dotest(int ncats, int range) { AtomicInt32 numCats = new AtomicInt32(ncats); Directory[] dirs = new Directory[2]; for (int i = 0; i < dirs.Length; i++) { dirs[i] = NewDirectory(); var tw = new DirectoryTaxonomyWriter(dirs[i]); ThreadJob[] addThreads = new ThreadJob[4]; for (int j = 0; j < addThreads.Length; j++) { addThreads[j] = new ThreadAnonymousClass(this, range, numCats, tw); } foreach (ThreadJob t in addThreads) { t.Start(); } foreach (ThreadJob t in addThreads) { t.Join(); } tw.Dispose(); } var tw1 = new DirectoryTaxonomyWriter(dirs[0]); IOrdinalMap map = randomOrdinalMap(); tw1.AddTaxonomy(dirs[1], map); tw1.Dispose(); validate(dirs[0], dirs[1], map); IOUtils.Dispose(dirs); }
public virtual void TestStressMultiThreading() { Directory dir = NewDirectory(); IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)); IndexWriter writer = new IndexWriter(dir, conf); // create index int numThreads = TestUtil.NextInt32(Random, 3, 6); int numDocs = AtLeast(2000); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.Add(new StringField("id", "doc" + i, Store.NO)); double group = Random.NextDouble(); string g; if (group < 0.1) { g = "g0"; } else if (group < 0.5) { g = "g1"; } else if (group < 0.8) { g = "g2"; } else { g = "g3"; } doc.Add(new StringField("updKey", g, Store.NO)); for (int j = 0; j < numThreads; j++) { long value = Random.Next(); doc.Add(new BinaryDocValuesField("f" + j, TestBinaryDocValuesUpdates.ToBytes(value))); doc.Add(new NumericDocValuesField("cf" + j, value * 2)); // control, always updated to f * 2 } writer.AddDocument(doc); } CountdownEvent done = new CountdownEvent(numThreads); AtomicInt32 numUpdates = new AtomicInt32(AtLeast(100)); // same thread updates a field as well as reopens ThreadJob[] threads = new ThreadJob[numThreads]; for (int i = 0; i < threads.Length; i++) { string f = "f" + i; string cf = "cf" + i; threads[i] = new ThreadAnonymousClass(this, "UpdateThread-" + i, writer, numDocs, done, numUpdates, f, cf); } foreach (ThreadJob t in threads) { t.Start(); } done.Wait(); writer.Dispose(); DirectoryReader reader = DirectoryReader.Open(dir); BytesRef scratch = new BytesRef(); foreach (AtomicReaderContext context in reader.Leaves) { AtomicReader r = context.AtomicReader; for (int i = 0; i < numThreads; i++) { BinaryDocValues bdv = r.GetBinaryDocValues("f" + i); NumericDocValues control = r.GetNumericDocValues("cf" + i); IBits docsWithBdv = r.GetDocsWithField("f" + i); IBits docsWithControl = r.GetDocsWithField("cf" + i); IBits liveDocs = r.LiveDocs; for (int j = 0; j < r.MaxDoc; j++) { if (liveDocs is null || liveDocs.Get(j)) { Assert.AreEqual(docsWithBdv.Get(j), docsWithControl.Get(j)); if (docsWithBdv.Get(j)) { long ctrlValue = control.Get(j); long bdvValue = TestBinaryDocValuesUpdates.GetValue(bdv, j, scratch) * 2; // if (ctrlValue != bdvValue) { // System.out.println("seg=" + r + ", f=f" + i + ", doc=" + j + ", group=" + r.Document(j).Get("updKey") + ", ctrlValue=" + ctrlValue + ", bdvBytes=" + scratch); // } Assert.AreEqual(ctrlValue, bdvValue); } } } } } reader.Dispose(); dir.Dispose(); }
public virtual void Test() { IList <string> postingsList = new JCG.List <string>(); int numTerms = AtLeast(300); int maxTermsPerDoc = TestUtil.NextInt32(Random, 10, 20); bool isSimpleText = "SimpleText".Equals(TestUtil.GetPostingsFormat("field"), StringComparison.Ordinal); IndexWriterConfig iwc = NewIndexWriterConfig(Random, TEST_VERSION_CURRENT, new MockAnalyzer(Random)); if ((isSimpleText || iwc.MergePolicy is MockRandomMergePolicy) && (TestNightly || RandomMultiplier > 1)) { // Otherwise test can take way too long (> 2 hours) //numTerms /= 2; // LUCENENET specific - To keep this under the 1 hour free limit // of Azure DevOps, this was reduced from /2 to /6. numTerms /= 6; } if (Verbose) { Console.WriteLine("maxTermsPerDoc=" + maxTermsPerDoc); Console.WriteLine("numTerms=" + numTerms); } for (int i = 0; i < numTerms; i++) { string term = Convert.ToString(i, CultureInfo.InvariantCulture); for (int j = 0; j < i; j++) { postingsList.Add(term); } } postingsList.Shuffle(Random); ConcurrentQueue <string> postings = new ConcurrentQueue <string>(postingsList); Directory dir = NewFSDirectory(CreateTempDir(GetFullMethodName())); RandomIndexWriter iw = new RandomIndexWriter(Random, dir, iwc); int threadCount = TestUtil.NextInt32(Random, 1, 5); if (Verbose) { Console.WriteLine("config: " + iw.IndexWriter.Config); Console.WriteLine("threadCount=" + threadCount); } Field prototype = NewTextField("field", "", Field.Store.NO); FieldType fieldType = new FieldType(prototype.FieldType); if (Random.NextBoolean()) { fieldType.OmitNorms = true; } int options = Random.Next(3); if (options == 0) { fieldType.IndexOptions = IndexOptions.DOCS_AND_FREQS; // we dont actually need positions fieldType.StoreTermVectors = true; // but enforce term vectors when we do this so we check SOMETHING } else if (options == 1 && !DoesntSupportOffsets.Contains(TestUtil.GetPostingsFormat("field"))) { fieldType.IndexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS; } // else just positions ThreadJob[] threads = new ThreadJob[threadCount]; CountdownEvent startingGun = new CountdownEvent(1); for (int threadID = 0; threadID < threadCount; threadID++) { Random threadRandom = new Random(Random.Next()); Document document = new Document(); Field field = new Field("field", "", fieldType); document.Add(field); threads[threadID] = new ThreadAnonymousClass(this, numTerms, maxTermsPerDoc, postings, iw, startingGun, threadRandom, document, field); threads[threadID].Start(); } startingGun.Signal(); foreach (ThreadJob t in threads) { t.Join(); } iw.ForceMerge(1); DirectoryReader ir = iw.GetReader(); Assert.AreEqual(1, ir.Leaves.Count); AtomicReader air = (AtomicReader)ir.Leaves[0].Reader; Terms terms = air.GetTerms("field"); // numTerms-1 because there cannot be a term 0 with 0 postings: Assert.AreEqual(numTerms - 1, terms.Count); TermsEnum termsEnum = terms.GetEnumerator(); while (termsEnum.MoveNext()) { int value = Convert.ToInt32(termsEnum.Term.Utf8ToString(), CultureInfo.InvariantCulture); Assert.AreEqual(value, termsEnum.TotalTermFreq); // don't really need to check more than this, as CheckIndex // will verify that totalTermFreq == total number of positions seen // from a docsAndPositionsEnum. } ir.Dispose(); iw.Dispose(); dir.Dispose(); }