public void SetUp() { this.complete.Reset(); this.waitingTaskCompleted = false; this.doneLatch = new CountDownLatch(JOB_COUNT); this.count = 0; }
public override void SetUp() { base.SetUp(); this.doneLatch = new CountDownLatch(1); this.counter = 0; this.errorMessage = null; }
public virtual void Test() { Directory dir = NewDirectory(); IndexWriter w = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NewLogMergePolicy())); IList<long?> numbers = new List<long?>(); IList<BytesRef> binary = new List<BytesRef>(); IList<BytesRef> sorted = new List<BytesRef>(); int numDocs = AtLeast(100); for (int i = 0; i < numDocs; i++) { Document d = new Document(); long number = Random().NextLong(); d.Add(new NumericDocValuesField("number", number)); BytesRef bytes = new BytesRef(TestUtil.RandomRealisticUnicodeString(Random())); d.Add(new BinaryDocValuesField("bytes", bytes)); binary.Add(bytes); bytes = new BytesRef(TestUtil.RandomRealisticUnicodeString(Random())); d.Add(new SortedDocValuesField("sorted", bytes)); sorted.Add(bytes); w.AddDocument(d); numbers.Add(number); } w.ForceMerge(1); IndexReader r = w.Reader; w.Dispose(); Assert.AreEqual(1, r.Leaves.Count); AtomicReader ar = (AtomicReader)r.Leaves[0].Reader; int numThreads = TestUtil.NextInt(Random(), 2, 5); IList<ThreadClass> threads = new List<ThreadClass>(); CountDownLatch startingGun = new CountDownLatch(1); for (int t = 0; t < numThreads; t++) { Random threadRandom = new Random(Random().Next()); ThreadClass thread = new ThreadAnonymousInnerClassHelper(this, numbers, binary, sorted, numDocs, ar, startingGun, threadRandom); thread.Start(); threads.Add(thread); } startingGun.countDown(); foreach (ThreadClass thread in threads) { thread.Join(); } r.Dispose(); dir.Dispose(); }
public virtual void TestIsCurrentWithThreads() { Directory dir = NewDirectory(); IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())); IndexWriter writer = new IndexWriter(dir, conf); ReaderHolder holder = new ReaderHolder(); ReaderThread[] threads = new ReaderThread[AtLeast(3)]; CountDownLatch latch = new CountDownLatch(1); WriterThread writerThread = new WriterThread(holder, writer, AtLeast(500), Random(), latch); for (int i = 0; i < threads.Length; i++) { threads[i] = new ReaderThread(holder, latch); threads[i].Start(); } writerThread.Start(); writerThread.Join(); bool failed = writerThread.Failed != null; if (failed) { Console.WriteLine(writerThread.Failed.ToString()); Console.Write(writerThread.Failed.StackTrace); } for (int i = 0; i < threads.Length; i++) { threads[i].Join(); if (threads[i].Failed != null) { Console.WriteLine(threads[i].Failed.ToString()); Console.Write(threads[i].Failed.StackTrace); failed = true; } } Assert.IsFalse(failed); writer.Dispose(); dir.Dispose(); }
private void SignalInterruptionProcessingComplete() { CountDownLatch cdl = this.transportInterruptionProcessingComplete; if(cdl.Remaining == 0) { if(Tracer.IsDebugEnabled) { Tracer.Debug("transportInterruptionProcessingComplete for: " + this.info.ConnectionId); } this.transportInterruptionProcessingComplete = null; FailoverTransport failoverTransport = transport.Narrow(typeof(FailoverTransport)) as FailoverTransport; if(failoverTransport != null) { failoverTransport.ConnectionInterruptProcessingComplete(this.info.ConnectionId); if(Tracer.IsDebugEnabled) { Tracer.Debug("notified failover transport (" + failoverTransport + ") of interruption completion for: " + this.info.ConnectionId); } } } }
public virtual void TestMixedTypesDifferentThreads() { Directory dir = NewDirectory(); IndexWriter w = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); CountDownLatch startingGun = new CountDownLatch(1); AtomicBoolean hitExc = new AtomicBoolean(); ThreadClass[] threads = new ThreadClass[3]; for (int i = 0; i < 3; i++) { Field field; if (i == 0) { field = new SortedDocValuesField("foo", new BytesRef("hello")); } else if (i == 1) { field = new NumericDocValuesField("foo", 0); } else { field = new BinaryDocValuesField("foo", new BytesRef("bazz")); } Document doc = new Document(); doc.Add(field); threads[i] = new ThreadAnonymousInnerClassHelper(this, w, startingGun, hitExc, doc); threads[i].Start(); } startingGun.countDown(); foreach (ThreadClass t in threads) { t.Join(); } Assert.IsTrue(hitExc.Get()); w.Dispose(); dir.Dispose(); }
public virtual void TestHashCodeWithThreads() { AutomatonQuery[] queries = new AutomatonQuery[1000]; for (int i = 0; i < queries.Length; i++) { queries[i] = new AutomatonQuery(new Term("bogus", "bogus"), AutomatonTestUtil.RandomAutomaton(Random())); } CountDownLatch startingGun = new CountDownLatch(1); int numThreads = TestUtil.NextInt(Random(), 2, 5); ThreadClass[] threads = new ThreadClass[numThreads]; for (int threadID = 0; threadID < numThreads; threadID++) { ThreadClass thread = new ThreadAnonymousInnerClassHelper(this, queries, startingGun); threads[threadID] = thread; thread.Start(); } startingGun.countDown(); foreach (ThreadClass thread in threads) { thread.Join(); } }
public ThreadAnonymousInnerClassHelper(TestNumericDocValuesUpdates outerInstance, string str, IndexWriter writer, int numDocs, CountDownLatch done, AtomicInteger numUpdates, string f, string cf) : base(str) { this.OuterInstance = outerInstance; this.Writer = writer; this.NumDocs = numDocs; this.Done = done; this.NumUpdates = numUpdates; this.f = f; this.Cf = cf; }
public virtual void TestDeleteAllNoDeadLock() { Directory dir = NewDirectory(); RandomIndexWriter modifier = new RandomIndexWriter(Random(), dir); int numThreads = AtLeast(2); ThreadClass[] threads = new ThreadClass[numThreads]; CountDownLatch latch = new CountDownLatch(1); CountDownLatch doneLatch = new CountDownLatch(numThreads); for (int i = 0; i < numThreads; i++) { int offset = i; threads[i] = new ThreadAnonymousInnerClassHelper(this, modifier, latch, doneLatch, offset); threads[i].Start(); } latch.countDown(); //Wait for 1 millisecond while (!doneLatch.@await(new TimeSpan(0, 0, 0, 0, 1))) { modifier.DeleteAll(); if (VERBOSE) { Console.WriteLine("del all"); } } modifier.DeleteAll(); foreach (ThreadClass thread in threads) { thread.Join(); } modifier.Dispose(); DirectoryReader reader = DirectoryReader.Open(dir); Assert.AreEqual(reader.MaxDoc, 0); Assert.AreEqual(reader.NumDocs, 0); Assert.AreEqual(reader.NumDeletedDocs, 0); reader.Dispose(); dir.Dispose(); }
public virtual void TestStressDeleteQueue() { DocumentsWriterDeleteQueue queue = new DocumentsWriterDeleteQueue(); HashSet<Term> uniqueValues = new HashSet<Term>(); int size = 10000 + Random().Next(500) * RANDOM_MULTIPLIER; int?[] ids = new int?[size]; for (int i = 0; i < ids.Length; i++) { ids[i] = Random().Next(); uniqueValues.Add(new Term("id", ids[i].ToString())); } CountDownLatch latch = new CountDownLatch(1); AtomicInteger index = new AtomicInteger(0); int numThreads = 2 + Random().Next(5); UpdateThread[] threads = new UpdateThread[numThreads]; for (int i = 0; i < threads.Length; i++) { threads[i] = new UpdateThread(queue, index, ids, latch); threads[i].Start(); } latch.countDown(); for (int i = 0; i < threads.Length; i++) { threads[i].Join(); } foreach (UpdateThread updateThread in threads) { DeleteSlice slice = updateThread.Slice; queue.UpdateSlice(slice); BufferedUpdates deletes = updateThread.Deletes; slice.Apply(deletes, BufferedUpdates.MAX_INT); Assert.AreEqual(uniqueValues, deletes.Terms_Nunit().Keys); } queue.TryApplyGlobalSlice(); HashSet<Term> frozenSet = new HashSet<Term>(); foreach (Term t in queue.FreezeGlobalBuffer(null).TermsIterable()) { BytesRef bytesRef = new BytesRef(); bytesRef.CopyBytes(t.Bytes()); frozenSet.Add(new Term(t.Field(), bytesRef)); } Assert.AreEqual(0, queue.NumGlobalTermDeletes(), "num deletes must be 0 after freeze"); Assert.AreEqual(uniqueValues.Count, frozenSet.Count); Assert.AreEqual(uniqueValues, frozenSet); }
public virtual void Test() { IList<string> postingsList = new List<string>(); int numTerms = AtLeast(300); int maxTermsPerDoc = TestUtil.NextInt(Random(), 10, 20); bool isSimpleText = "SimpleText".Equals(TestUtil.GetPostingsFormat("field")); IndexWriterConfig iwc = NewIndexWriterConfig(Random(), TEST_VERSION_CURRENT, new MockAnalyzer(Random())); if ((isSimpleText || iwc.MergePolicy is MockRandomMergePolicy) && (TEST_NIGHTLY || RANDOM_MULTIPLIER > 1)) { // Otherwise test can take way too long (> 2 hours) numTerms /= 2; } if (VERBOSE) { Console.WriteLine("maxTermsPerDoc=" + maxTermsPerDoc); Console.WriteLine("numTerms=" + numTerms); } for (int i = 0; i < numTerms; i++) { string term = Convert.ToString(i); for (int j = 0; j < i; j++) { postingsList.Add(term); } } postingsList = CollectionsHelper.Shuffle(postingsList); ConcurrentQueue<string> postings = new ConcurrentQueue<string>(postingsList); Directory dir = NewFSDirectory(CreateTempDir("bagofpostings")); RandomIndexWriter iw = new RandomIndexWriter(Random(), dir, iwc); int threadCount = TestUtil.NextInt(Random(), 1, 5); if (VERBOSE) { Console.WriteLine("config: " + iw.w.Config); Console.WriteLine("threadCount=" + threadCount); } ThreadClass[] threads = new ThreadClass[threadCount]; CountDownLatch startingGun = new CountDownLatch(1); for (int threadID = 0; threadID < threadCount; threadID++) { threads[threadID] = new ThreadAnonymousInnerClassHelper(this, maxTermsPerDoc, postings, iw, startingGun); threads[threadID].Start(); } startingGun.countDown(); foreach (ThreadClass t in threads) { t.Join(); } iw.ForceMerge(1); DirectoryReader ir = iw.Reader; Assert.AreEqual(1, ir.Leaves.Count); AtomicReader air = (AtomicReader)ir.Leaves[0].Reader; Terms terms = air.Terms("field"); // numTerms-1 because there cannot be a term 0 with 0 postings: Assert.AreEqual(numTerms - 1, air.Fields.UniqueTermCount); if (iwc.Codec is Lucene3xCodec == false) { Assert.AreEqual(numTerms - 1, terms.Size()); } TermsEnum termsEnum = terms.Iterator(null); BytesRef term_; while ((term_ = termsEnum.Next()) != null) { int value = Convert.ToInt32(term_.Utf8ToString()); Assert.AreEqual(value, termsEnum.DocFreq()); // don't really need to check more than this, as CheckIndex // will verify that docFreq == actual number of documents seen // from a docsAndPositionsEnum. } ir.Dispose(); iw.Dispose(); dir.Dispose(); }
public ThreadAnonymousInnerClassHelper(TestBagOfPostings outerInstance, int maxTermsPerDoc, ConcurrentQueue<string> postings, RandomIndexWriter iw, CountDownLatch startingGun) { this.OuterInstance = outerInstance; this.MaxTermsPerDoc = maxTermsPerDoc; this.Postings = postings; this.Iw = iw; this.StartingGun = startingGun; }
public SearcherFactoryAnonymousInnerClassHelper2(TestSearcherManager outerInstance, CountDownLatch awaitEnterWarm, CountDownLatch awaitClose, AtomicBoolean triedReopen, TaskScheduler es) { this.OuterInstance = outerInstance; this.AwaitEnterWarm = awaitEnterWarm; this.AwaitClose = awaitClose; this.TriedReopen = triedReopen; this.Es = es; }
public ConcurrentMergeSchedulerAnonymousInnerClassHelper(TestConcurrentMergeScheduler outerInstance, int maxMergeCount, CountDownLatch enoughMergesWaiting, AtomicInteger runningMergeCount, AtomicBoolean failed) { this.OuterInstance = outerInstance; this.MaxMergeCount = maxMergeCount; this.EnoughMergesWaiting = enoughMergesWaiting; this.RunningMergeCount = runningMergeCount; this.Failed = failed; }
public virtual void TestMaxMergeCount() { Directory dir = NewDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())); int maxMergeCount = TestUtil.NextInt(Random(), 1, 5); int maxMergeThreads = TestUtil.NextInt(Random(), 1, maxMergeCount); CountDownLatch enoughMergesWaiting = new CountDownLatch(maxMergeCount); AtomicInteger runningMergeCount = new AtomicInteger(0); AtomicBoolean failed = new AtomicBoolean(); if (VERBOSE) { Console.WriteLine("TEST: maxMergeCount=" + maxMergeCount + " maxMergeThreads=" + maxMergeThreads); } ConcurrentMergeScheduler cms = new ConcurrentMergeSchedulerAnonymousInnerClassHelper(this, maxMergeCount, enoughMergesWaiting, runningMergeCount, failed); cms.SetMaxMergesAndThreads(maxMergeCount, maxMergeThreads); iwc.SetMergeScheduler(cms); iwc.SetMaxBufferedDocs(2); TieredMergePolicy tmp = new TieredMergePolicy(); iwc.SetMergePolicy(tmp); tmp.MaxMergeAtOnce = 2; tmp.SegmentsPerTier = 2; IndexWriter w = new IndexWriter(dir, iwc); Document doc = new Document(); doc.Add(NewField("field", "field", TextField.TYPE_NOT_STORED)); while (enoughMergesWaiting.Remaining != 0 && !failed.Get()) { for (int i = 0; i < 10; i++) { w.AddDocument(doc); } } w.Dispose(false); dir.Dispose(); }
public virtual void Test() { Directory dir = NewDirectory(); MockAnalyzer analyzer = new MockAnalyzer(Random()); analyzer.MaxTokenLength = TestUtil.NextInt(Random(), 1, IndexWriter.MAX_TERM_LENGTH); RandomIndexWriter w = new RandomIndexWriter(Random(), dir, analyzer); LineFileDocs docs = new LineFileDocs(Random(), DefaultCodecSupportsDocValues()); int charsToIndex = AtLeast(100000); int charsIndexed = 0; //System.out.println("bytesToIndex=" + charsToIndex); while (charsIndexed < charsToIndex) { Document doc = docs.NextDoc(); charsIndexed += doc.Get("body").Length; w.AddDocument(doc); //System.out.println(" bytes=" + charsIndexed + " add: " + doc); } IndexReader r = w.Reader; //System.out.println("numDocs=" + r.NumDocs); w.Dispose(); IndexSearcher s = NewSearcher(r); Terms terms = MultiFields.GetFields(r).Terms("body"); int termCount = 0; TermsEnum termsEnum = terms.Iterator(null); while (termsEnum.Next() != null) { termCount++; } Assert.IsTrue(termCount > 0); // Target ~10 terms to search: double chance = 10.0 / termCount; termsEnum = terms.Iterator(termsEnum); IDictionary<BytesRef, TopDocs> answers = new Dictionary<BytesRef, TopDocs>(); while (termsEnum.Next() != null) { if (Random().NextDouble() <= chance) { BytesRef term = BytesRef.DeepCopyOf(termsEnum.Term()); answers[term] = s.Search(new TermQuery(new Term("body", term)), 100); } } if (answers.Count > 0) { CountDownLatch startingGun = new CountDownLatch(1); int numThreads = TestUtil.NextInt(Random(), 2, 5); ThreadClass[] threads = new ThreadClass[numThreads]; for (int threadID = 0; threadID < numThreads; threadID++) { ThreadClass thread = new ThreadAnonymousInnerClassHelper(this, s, answers, startingGun); threads[threadID] = thread; thread.Start(); } startingGun.countDown(); foreach (ThreadClass thread in threads) { thread.Join(); } } r.Dispose(); dir.Dispose(); }
public ThreadAnonymousInnerClassHelper(TestDocValuesWithThreads outerInstance, IList<long?> numbers, IList<BytesRef> binary, IList<BytesRef> sorted, int numDocs, AtomicReader ar, CountDownLatch startingGun, Random threadRandom) { this.OuterInstance = outerInstance; this.Numbers = numbers; this.Binary = binary; this.Sorted = sorted; this.NumDocs = numDocs; this.Ar = ar; this.StartingGun = startingGun; this.ThreadRandom = threadRandom; }
protected internal UpdateThread(DocumentsWriterDeleteQueue queue, AtomicInteger index, int?[] ids, CountDownLatch latch) { this.Queue = queue; this.Index = index; this.Ids = ids; this.Slice = queue.NewSlice(); Deletes = new BufferedUpdates(); this.Latch = latch; }
public void Reset(int numUpdaters, int numThreads) { this.Waiter = new CountDownLatch(1); this.UpdateJoin = new CountDownLatch(numUpdaters); this.LeftCheckpoint = new CountDownLatch(numUpdaters); }
/* * LUCENE-3528 - NRTManager hangs in certain situations */ public virtual void TestThreadStarvationNoDeleteNRTReader() { IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())); conf.SetMergePolicy(Random().NextBoolean() ? NoMergePolicy.COMPOUND_FILES : NoMergePolicy.NO_COMPOUND_FILES); Directory d = NewDirectory(); CountDownLatch latch = new CountDownLatch(1); CountDownLatch signal = new CountDownLatch(1); LatchedIndexWriter _writer = new LatchedIndexWriter(d, conf, latch, signal); TrackingIndexWriter writer = new TrackingIndexWriter(_writer); SearcherManager manager = new SearcherManager(_writer, false, null); Document doc = new Document(); doc.Add(NewTextField("test", "test", Field.Store.YES)); writer.AddDocument(doc); manager.MaybeRefresh(); ThreadClass t = new ThreadAnonymousInnerClassHelper(this, latch, signal, writer, manager); t.Start(); _writer.WaitAfterUpdate = true; // wait in addDocument to let some reopens go through long lastGen = writer.UpdateDocument(new Term("foo", "bar"), doc); // once this returns the doc is already reflected in the last reopen Assert.IsFalse(manager.SearcherCurrent); // false since there is a delete in the queue IndexSearcher searcher = manager.Acquire(); try { Assert.AreEqual(2, searcher.IndexReader.NumDocs()); } finally { manager.Release(searcher); } ControlledRealTimeReopenThread<IndexSearcher> thread = new ControlledRealTimeReopenThread<IndexSearcher>(writer, manager, 0.01, 0.01); thread.Start(); // start reopening if (VERBOSE) { Console.WriteLine("waiting now for generation " + lastGen); } AtomicBoolean finished = new AtomicBoolean(false); ThreadClass waiter = new ThreadAnonymousInnerClassHelper2(this, lastGen, thread, finished); waiter.Start(); manager.MaybeRefresh(); waiter.Join(1000); if (!finished.Get()) { waiter.Interrupt(); Assert.Fail("thread deadlocked on waitForGeneration"); } thread.Dispose(); thread.Join(); IOUtils.Close(manager, _writer, d); }
public ThreadAnonymousInnerClassHelper(TestIndexWriterDelete outerInstance, RandomIndexWriter modifier, CountDownLatch latch, CountDownLatch doneLatch, int offset) { this.OuterInstance = outerInstance; this.Modifier = modifier; this.Latch = latch; this.DoneLatch = doneLatch; this.Offset = offset; }
public LatchedIndexWriter(Directory d, IndexWriterConfig conf, CountDownLatch latch, CountDownLatch signal) : base(d, conf) { this.Latch = latch; this.Signal = signal; }
public virtual void TestStressMultiThreading() { Directory dir = NewDirectory(); IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())); IndexWriter writer = new IndexWriter(dir, conf); // create index int numThreads = TestUtil.NextInt(Random(), 3, 6); int numDocs = AtLeast(2000); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.Add(new StringField("id", "doc" + i, Store.NO)); double group = Random().NextDouble(); string g; if (group < 0.1) { g = "g0"; } else if (group < 0.5) { g = "g1"; } else if (group < 0.8) { g = "g2"; } else { g = "g3"; } doc.Add(new StringField("updKey", g, Store.NO)); for (int j = 0; j < numThreads; j++) { long value = Random().Next(); doc.Add(new NumericDocValuesField("f" + j, value)); doc.Add(new NumericDocValuesField("cf" + j, value * 2)); // control, always updated to f * 2 } writer.AddDocument(doc); } CountDownLatch done = new CountDownLatch(numThreads); AtomicInteger numUpdates = new AtomicInteger(AtLeast(100)); // same thread updates a field as well as reopens ThreadClass[] threads = new ThreadClass[numThreads]; for (int i = 0; i < threads.Length; i++) { string f = "f" + i; string cf = "cf" + i; threads[i] = new ThreadAnonymousInnerClassHelper(this, "UpdateThread-" + i, writer, numDocs, done, numUpdates, f, cf); } foreach (ThreadClass t in threads) { t.Start(); } done.@await(); writer.Dispose(); DirectoryReader reader = DirectoryReader.Open(dir); foreach (AtomicReaderContext context in reader.Leaves) { AtomicReader r = context.AtomicReader; for (int i = 0; i < numThreads; i++) { NumericDocValues ndv = r.GetNumericDocValues("f" + i); NumericDocValues control = r.GetNumericDocValues("cf" + i); Bits docsWithNdv = r.GetDocsWithField("f" + i); Bits docsWithControl = r.GetDocsWithField("cf" + i); Bits liveDocs = r.LiveDocs; for (int j = 0; j < r.MaxDoc; j++) { if (liveDocs == null || liveDocs.Get(j)) { Assert.AreEqual(docsWithNdv.Get(j), docsWithControl.Get(j)); if (docsWithNdv.Get(j)) { Assert.AreEqual(control.Get(j), ndv.Get(j) * 2); } } } } } reader.Dispose(); dir.Dispose(); }
public ThreadAnonymousInnerClassHelper(TestControlledRealTimeReopenThread outerInstance, CountDownLatch latch, CountDownLatch signal, TrackingIndexWriter writer, SearcherManager manager) { this.OuterInstance = outerInstance; this.Latch = latch; this.Signal = signal; this.Writer = writer; this.Manager = manager; }
public ThreadAnonymousInnerClassHelper(TestAutomatonQuery outerInstance, AutomatonQuery[] queries, CountDownLatch startingGun) { this.OuterInstance = outerInstance; this.Queries = queries; this.StartingGun = startingGun; }
public virtual void TestOpenTwoIndexWritersOnDifferentThreads() { Directory dir = NewDirectory(); CountDownLatch oneIWConstructed = new CountDownLatch(1); DelayedIndexAndCloseRunnable thread1 = new DelayedIndexAndCloseRunnable(dir, oneIWConstructed); DelayedIndexAndCloseRunnable thread2 = new DelayedIndexAndCloseRunnable(dir, oneIWConstructed); thread1.Start(); thread2.Start(); oneIWConstructed.@await(); thread1.StartIndexing(); thread2.StartIndexing(); thread1.Join(); thread2.Join(); // ensure the directory is closed if we hit the timeout and throw assume // TODO: can we improve this in LuceneTestCase? I dont know what the logic would be... try { AssumeFalse("aborting test: timeout obtaining lock", thread1.Failure is LockObtainFailedException); AssumeFalse("aborting test: timeout obtaining lock", thread2.Failure is LockObtainFailedException); Assert.IsFalse(thread1.Failed, "Failed due to: " + thread1.Failure); Assert.IsFalse(thread2.Failed, "Failed due to: " + thread2.Failure); // now verify that we have two documents in the index IndexReader reader = DirectoryReader.Open(dir); Assert.AreEqual(2, reader.NumDocs, "IndexReader should have one document per thread running"); reader.Dispose(); } finally { dir.Dispose(); } }
public ThreadAnonymousInnerClassHelper(TestDocValuesIndexing outerInstance, IndexWriter w, CountDownLatch startingGun, AtomicBoolean hitExc, Document doc) { this.OuterInstance = outerInstance; this.w = w; this.StartingGun = startingGun; this.HitExc = hitExc; this.Doc = doc; }
public DelayedIndexAndCloseRunnable(Directory dir, CountDownLatch iwConstructed) { this.Dir = dir; this.IwConstructed = iwConstructed; }
protected void OnTransportInterrupted(ITransport sender) { Tracer.Debug("Connection: Transport has been Interrupted."); this.transportInterruptionProcessingComplete = new CountDownLatch(dispatchers.Count); if(Tracer.IsDebugEnabled) { Tracer.Debug("transport interrupted, dispatchers: " + dispatchers.Count); } SignalInterruptionProcessingNeeded(); foreach(Session session in this.sessions) { try { session.ClearMessagesInProgress(); } catch(Exception ex) { Tracer.Warn("Exception while clearing messages: " + ex.Message); Tracer.Warn(ex.StackTrace); } } if(this.ConnectionInterruptedListener != null && !this.closing.Value) { try { this.ConnectionInterruptedListener(); } catch { } } }
public ThreadAnonymousInnerClassHelper(TestSameScoresWithThreads outerInstance, IndexSearcher s, IDictionary<BytesRef, TopDocs> answers, CountDownLatch startingGun) { this.OuterInstance = outerInstance; this.s = s; this.Answers = answers; this.StartingGun = startingGun; }