public Directory GetDirectory(string sessionId, string source) { Directory dir = @in.GetDirectory(sessionId, source); if (Random().nextBoolean() && failures.Get() > 0) { // client should fail, return wrapped dir MockDirectoryWrapper mdw = new MockDirectoryWrapper(Random(), dir); mdw.RandomIOExceptionRateOnOpen = clientExRate; mdw.MaxSizeInBytes = clientMaxSize; mdw.RandomIOExceptionRate = clientExRate; mdw.CheckIndexOnClose = false; clientMaxSize *= 2; clientExRate /= 2; return(mdw); } if (failures.Get() > 0 && Random().nextBoolean()) { // handler should fail test.handlerDir.MaxSizeInBytes = handlerMaxSize; test.handlerDir.RandomIOExceptionRateOnOpen = handlerExRate; test.handlerDir.RandomIOExceptionRate = handlerExRate; handlerMaxSize *= 2; handlerExRate /= 2; } else { // disable errors test.handlerDir.MaxSizeInBytes = 0; test.handlerDir.RandomIOExceptionRate = 0; test.handlerDir.RandomIOExceptionRateOnOpen = 0.0; } return(dir); }
protected override void HandleUpdateException(Exception exception) { if (exception is IOException) { try { if (VERBOSE) { Console.WriteLine("hit exception during update: " + exception); } // test that the index can be read and also some basic statistics DirectoryReader reader = DirectoryReader.Open(test.handlerIndexDir.Delegate); try { int numDocs = reader.NumDocs; int version = int.Parse(reader.IndexCommit.UserData[VERSION_ID], NumberStyles.HexNumber); assertEquals(numDocs, version); } finally { reader.Dispose(); } // verify index consistency TestUtil.CheckIndex(test.handlerIndexDir.Delegate); // verify taxonomy index is fully consistent (since we only add one // category to all documents, there's nothing much more to validate TestUtil.CheckIndex(test.handlerTaxoDir.Delegate); } finally { // count-down number of failures failures.DecrementAndGet(); Debug.Assert(failures.Get() >= 0, "handler failed too many times: " + failures.Get()); if (VERBOSE) { if (failures.Get() == 0) { Console.WriteLine("no more failures expected"); } else { Console.WriteLine("num failures left: " + failures.Get()); } } } } else { throw exception; } }
/// <summary> /// Expert: increments the <see cref="RefCount"/> of this <see cref="IndexReader"/> /// instance only if the <see cref="IndexReader"/> has not been disposed yet /// and returns <c>true</c> iff the <see cref="RefCount"/> was /// successfully incremented, otherwise <c>false</c>. /// If this method returns <c>false</c> the reader is either /// already disposed or is currently being disposed. Either way this /// reader instance shouldn't be used by an application unless /// <c>true</c> is returned. /// <para/> /// <see cref="RefCount"/>s are used to determine when a /// reader can be disposed safely, i.e. as soon as there are /// no more references. Be sure to always call a /// corresponding <see cref="DecRef"/>, in a finally clause; /// otherwise the reader may never be disposed. Note that /// <see cref="Dispose(bool)"/> simply calls <see cref="DecRef()"/>, which means that /// the <see cref="IndexReader"/> will not really be disposed until /// <see cref="DecRef()"/> has been called for all outstanding /// references. /// </summary> /// <seealso cref="DecRef"/> /// <seealso cref="IncRef"/> public bool TryIncRef() { int count; while ((count = refCount.Get()) > 0) { if (refCount.CompareAndSet(count, count + 1)) { return(true); } } return(false); }
public Directory GetDirectory(string sessionId, string source) { Directory dir = @in.GetDirectory(sessionId, source); if (Random.nextBoolean() && failures.Get() > 0) { // client should fail, return wrapped dir MockDirectoryWrapper mdw = new MockDirectoryWrapper(Random, dir); mdw.RandomIOExceptionRateOnOpen = clientExRate; mdw.MaxSizeInBytes = clientMaxSize; mdw.RandomIOExceptionRate = clientExRate; mdw.CheckIndexOnDispose = false; clientMaxSize *= 2; clientExRate /= 2; return(mdw); } if (failures.Get() > 0 && Random.nextBoolean()) { // handler should fail if (Random.nextBoolean()) { // index dir fail test.handlerIndexDir.MaxSizeInBytes = (handlerIndexMaxSize); test.handlerIndexDir.RandomIOExceptionRate = (handlerIndexExRate); test.handlerIndexDir.RandomIOExceptionRateOnOpen = (handlerIndexExRate); handlerIndexMaxSize *= 2; handlerIndexExRate /= 2; } else { // taxo dir fail test.handlerTaxoDir.MaxSizeInBytes = (handlerTaxoMaxSize); test.handlerTaxoDir.RandomIOExceptionRate = (handlerTaxoExRate); test.handlerTaxoDir.RandomIOExceptionRateOnOpen = (handlerTaxoExRate); test.handlerTaxoDir.CheckIndexOnDispose = (false); handlerTaxoMaxSize *= 2; handlerTaxoExRate /= 2; } } else { // disable all errors test.handlerIndexDir.MaxSizeInBytes = (0); test.handlerIndexDir.RandomIOExceptionRate = (0.0); test.handlerIndexDir.RandomIOExceptionRateOnOpen = (0.0); test.handlerTaxoDir.MaxSizeInBytes = (0); test.handlerTaxoDir.RandomIOExceptionRate = (0.0); test.handlerTaxoDir.RandomIOExceptionRateOnOpen = (0.0); } return(dir); }
/// <summary/> /// <exception cref="InvalidOperationException"></exception> public virtual void DecRef() { if (refCount.Get() <= 0) { throw new InvalidOperationException("this revision is already released"); } var rc = refCount.DecrementAndGet(); if (rc == 0) { bool success = false; try { Revision.Release(); success = true; } finally { if (!success) { // Put reference back on failure refCount.IncrementAndGet(); } } } else if (rc < 0) { throw new InvalidOperationException(string.Format("too many decRef calls: refCount is {0} after decrement", rc)); } }
public virtual int RefCount() { int rc = refCount.Get(); Debug.Assert(rc >= 0); return(rc); }
internal bool AnyChanges() { if (infoStream.IsEnabled("DW")) { infoStream.Message("DW", "anyChanges? numDocsInRam=" + numDocsInRAM.Get() + " deletes=" + AnyDeletions() + " hasTickets:" + ticketQueue.HasTickets + " pendingChangesInFullFlush: " + pendingChangesInCurrentFullFlush); } /* * changes are either in a DWPT or in the deleteQueue. * yet if we currently flush deletes and / or dwpt there * could be a window where all changes are in the ticket queue * before they are published to the IW. ie we need to check if the * ticket queue has any tickets. */ return(numDocsInRAM.Get() != 0 || AnyDeletions() || ticketQueue.HasTickets || pendingChangesInCurrentFullFlush); }
public virtual void TestBooleanScorerMax() { Directory dir = NewDirectory(); RandomIndexWriter riw = new RandomIndexWriter(Random(), dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); int docCount = AtLeast(10000); for (int i = 0; i < docCount; i++) { Document doc = new Document(); doc.Add(NewField("field", "a", TextField.TYPE_NOT_STORED)); riw.AddDocument(doc); } riw.ForceMerge(1); IndexReader r = riw.Reader; riw.Dispose(); IndexSearcher s = NewSearcher(r); BooleanQuery bq = new BooleanQuery(); bq.Add(new TermQuery(new Term("field", "a")), Occur.SHOULD); bq.Add(new TermQuery(new Term("field", "a")), Occur.SHOULD); Weight w = s.CreateNormalizedWeight(bq); Assert.AreEqual(1, s.IndexReader.Leaves.Count); BulkScorer scorer = w.GetBulkScorer(s.IndexReader.Leaves[0], false, null); FixedBitSet hits = new FixedBitSet(docCount); AtomicInt32 end = new AtomicInt32(); ICollector c = new CollectorAnonymousInnerClassHelper(this, scorer, hits, end); while (end.Get() < docCount) { int inc = TestUtil.NextInt(Random(), 1, 1000); end.AddAndGet(inc); scorer.Score(c, end.Get()); } Assert.AreEqual(docCount, hits.Cardinality()); r.Dispose(); dir.Dispose(); }
public void TestConsistencyOnExceptions() { // so the handler's index isn't empty replicator.Publish(CreateRevision(1)); client.UpdateNow(); client.Dispose(); callback.Dispose(); // Replicator violates write-once policy. It may be that the // handler copies files to the index dir, then fails to copy a // file and reverts the copy operation. On the next attempt, it // will copy the same file again. There is nothing wrong with this // in a real system, but it does violate write-once, and MDW // doesn't like it. Disabling it means that we won't catch cases // where the handler overwrites an existing index file, but // there's nothing currently we can do about it, unless we don't // use MDW. handlerIndexDir.PreventDoubleWrite = (false); handlerTaxoDir.PreventDoubleWrite = (false); // wrap sourceDirFactory to return a MockDirWrapper so we can simulate errors ISourceDirectoryFactory @in = sourceDirFactory; AtomicInt32 failures = new AtomicInt32(AtLeast(10)); sourceDirFactory = new SourceDirectoryFactoryAnonymousInnerClass(this, @in, failures); handler = new IndexAndTaxonomyReplicationHandler(handlerIndexDir, handlerTaxoDir, () => { if (Random().NextDouble() < 0.2 && failures.Get() > 0) { throw new Exception("random exception from callback"); } return(null); }); client = new ReplicationClientAnonymousInnerClass(this, replicator, handler, @in, failures); client.StartUpdateThread(10, "indexAndTaxo"); Directory baseHandlerIndexDir = handlerIndexDir.Delegate; int numRevisions = AtLeast(20) + 2; for (int i = 2; i < numRevisions; i++) { replicator.Publish(CreateRevision(i)); AssertHandlerRevision(i, baseHandlerIndexDir); } // disable errors -- maybe randomness didn't exhaust all allowed failures, // and we don't want e.g. CheckIndex to hit false errors. handlerIndexDir.MaxSizeInBytes = (0); handlerIndexDir.RandomIOExceptionRate = (0.0); handlerIndexDir.RandomIOExceptionRateOnOpen = (0.0); handlerTaxoDir.MaxSizeInBytes = (0); handlerTaxoDir.RandomIOExceptionRate = (0.0); handlerTaxoDir.RandomIOExceptionRateOnOpen = (0.0); }
public override string ToString() { if (VERBOSE_DELETES) { return("gen=" + gen + " numTerms=" + numTermDeletes + ", terms=" + Arrays.ToString(terms) + ", queries=" + Arrays.ToString(queries) + ", docIDs=" + Arrays.ToString(docIDs) + ", numericUpdates=" + Arrays.ToString(numericUpdates) + ", binaryUpdates=" + Arrays.ToString(binaryUpdates) + ", bytesUsed=" + bytesUsed); } else { string s = "gen=" + gen; if (numTermDeletes.Get() != 0) { s += " " + numTermDeletes.Get() + " deleted terms (unique count=" + terms.Count + ")"; } if (queries.Count != 0) { s += " " + queries.Count + " deleted queries"; } if (docIDs.Count != 0) { s += " " + docIDs.Count + " deleted docIDs"; } if (numNumericUpdates.Get() != 0) { s += " " + numNumericUpdates.Get() + " numeric updates (unique count=" + numericUpdates.Count + ")"; } if (numBinaryUpdates.Get() != 0) { s += " " + numBinaryUpdates.Get() + " binary updates (unique count=" + binaryUpdates.Count + ")"; } if (bytesUsed.Get() != 0) { s += " bytesUsed=" + bytesUsed.Get(); } return(s); } }
private void Prune(int count) { lock (this) { if (count > 0) { if (infoStream.IsEnabled("BD")) { infoStream.Message("BD", "pruneDeletes: prune " + count + " packets; " + (updates.Count - count) + " packets remain"); } for (int delIDX = 0; delIDX < count; delIDX++) { FrozenBufferedUpdates packet = updates[delIDX]; numTerms.AddAndGet(-packet.numTermDeletes); Debug.Assert(numTerms.Get() >= 0); bytesUsed.AddAndGet(-packet.bytesUsed); Debug.Assert(bytesUsed.Get() >= 0); } updates.SubList(0, count).Clear(); } } }
internal void IncRef() { int count; while ((count = @ref.Get()) > 0) { if (@ref.CompareAndSet(count, count + 1)) { return; } } throw new ObjectDisposedException(this.GetType().GetTypeInfo().FullName, "SegmentCoreReaders is already closed"); }
public override void Run() { try { while (!Failed.Get()) { int op = Random().Next(3); if (op == 0) { // Purge all caches & resume, once all // threads get here: Restart.SignalAndWait(); if (Iters.Get() >= NUM_ITER) { break; } } else if (op == 1) { IBits docsWithField = Cache.GetDocsWithField(Reader, "sparse"); for (int i = 0; i < docsWithField.Length; i++) { Assert.AreEqual(i % 2 == 0, docsWithField.Get(i)); } } else { Int32s ints = Cache.GetInt32s(Reader, "sparse", true); IBits docsWithField = Cache.GetDocsWithField(Reader, "sparse"); for (int i = 0; i < docsWithField.Length; i++) { if (i % 2 == 0) { Assert.IsTrue(docsWithField.Get(i)); Assert.AreEqual(i, ints.Get(i)); } else { Assert.IsFalse(docsWithField.Get(i)); } } } } } catch (Exception t) { Failed.Set(true); throw new Exception(t.Message, t); } }
/// <summary> /// Returns the current reference count. </summary> public int GetRefCount() // LUCENENET NOTE: although this would be a good candidate for a property, doing so would cause a naming conflict { return(refCount.Get()); }
public virtual void TestConcurrency() { AtomicInt32 numDocs = new AtomicInt32(AtLeast(10000)); Directory indexDir = NewDirectory(); Directory taxoDir = NewDirectory(); ConcurrentDictionary <string, string> values = new ConcurrentDictionary <string, string>(); IndexWriter iw = new IndexWriter(indexDir, NewIndexWriterConfig(TEST_VERSION_CURRENT, null)); var tw = new DirectoryTaxonomyWriter(taxoDir, OpenMode.CREATE, NewTaxoWriterCache(numDocs.Get())); ThreadClass[] indexThreads = new ThreadClass[AtLeast(4)]; FacetsConfig config = new FacetsConfig(); for (int i = 0; i < 10; i++) { config.SetHierarchical("l1." + i, true); config.SetMultiValued("l1." + i, true); } for (int i = 0; i < indexThreads.Length; i++) { indexThreads[i] = new ThreadAnonymousInnerClassHelper(this, numDocs, values, iw, tw, config); } foreach (ThreadClass t in indexThreads) { t.Start(); } foreach (ThreadClass t in indexThreads) { t.Join(); } var tr = new DirectoryTaxonomyReader(tw); // +1 for root category if (values.Count + 1 != tr.Count) { foreach (string value in values.Keys) { FacetLabel label = new FacetLabel(FacetsConfig.StringToPath(value)); if (tr.GetOrdinal(label) == -1) { Console.WriteLine("FAIL: path=" + label + " not recognized"); } } Fail("mismatch number of categories"); } int[] parents = tr.ParallelTaxonomyArrays.Parents; foreach (string cat in values.Keys) { FacetLabel cp = new FacetLabel(FacetsConfig.StringToPath(cat)); Assert.True(tr.GetOrdinal(cp) > 0, "category not found " + cp); int level = cp.Length; int parentOrd = 0; // for root, parent is always virtual ROOT (ord=0) FacetLabel path = null; for (int i = 0; i < level; i++) { path = cp.Subpath(i + 1); int ord = tr.GetOrdinal(path); Assert.AreEqual(parentOrd, parents[ord], "invalid parent for cp=" + path); parentOrd = ord; // next level should have this parent } } IOUtils.Dispose(tw, iw, tr, taxoDir, indexDir); }
// we need to guarantee that if several threads call this concurrently, only // one executes it, and after it returns, the cache is updated and is either // complete or not. private void PerhapsFillCache() { lock (this) { if (cacheMisses.Get() < cacheMissesUntilFill) { return; } if (!shouldFillCache) { // we already filled the cache once, there's no need to re-fill it return; } shouldFillCache = false; InitReaderManager(); bool aborted = false; DirectoryReader reader = readerManager.Acquire(); try { TermsEnum termsEnum = null; DocsEnum docsEnum = null; foreach (AtomicReaderContext ctx in reader.Leaves) { Terms terms = ctx.AtomicReader.GetTerms(Consts.FULL); if (terms != null) // cannot really happen, but be on the safe side { termsEnum = terms.GetIterator(termsEnum); while (termsEnum.Next() != null) { if (!cache.IsFull) { BytesRef t = termsEnum.Term; // Since we guarantee uniqueness of categories, each term has exactly // one document. Also, since we do not allow removing categories (and // hence documents), there are no deletions in the index. Therefore, it // is sufficient to call next(), and then doc(), exactly once with no // 'validation' checks. FacetLabel cp = new FacetLabel(FacetsConfig.StringToPath(t.Utf8ToString())); docsEnum = termsEnum.Docs(null, docsEnum, DocsFlags.NONE); bool res = cache.Put(cp, docsEnum.NextDoc() + ctx.DocBase); Debug.Assert(!res, "entries should not have been evicted from the cache"); } else { // the cache is full and the next put() will evict entries from it, therefore abort the iteration. aborted = true; break; } } } if (aborted) { break; } } } finally { readerManager.Release(reader); } cacheIsComplete = !aborted; if (cacheIsComplete) { lock (this) { // everything is in the cache, so no need to keep readerManager open. // this block is executed in a sync block so that it works well with // initReaderManager called in parallel. readerManager.Dispose(); readerManager = null; initializedReaderManager = false; } } } }
public virtual void TestCloseUnderException() { int iters = 1000 + 1 + Random.nextInt(20); for (int j = 0; j < iters; j++) { Directory dir = NewDirectory(); IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(Random, TEST_VERSION_CURRENT, new MockAnalyzer(Random))); writer.Commit(); writer.Dispose(); DirectoryReader open = DirectoryReader.Open(dir); bool throwOnClose = !Rarely(); AtomicReader wrap = SlowCompositeReaderWrapper.Wrap(open); FilterAtomicReader reader = new FilterAtomicReaderAnonymousInnerClassHelper(this, wrap, throwOnClose); IList <IndexReader.IReaderClosedListener> listeners = new List <IndexReader.IReaderClosedListener>(); int listenerCount = Random.Next(20); AtomicInt32 count = new AtomicInt32(); bool faultySet = false; for (int i = 0; i < listenerCount; i++) { if (Rarely()) { faultySet = true; reader.AddReaderClosedListener(new FaultyListener()); } else { count.IncrementAndGet(); reader.AddReaderClosedListener(new CountListener(count)); } } if (!faultySet && !throwOnClose) { reader.AddReaderClosedListener(new FaultyListener()); } try { reader.Dispose(); Assert.Fail("expected Exception"); } catch (InvalidOperationException ex) { if (throwOnClose) { Assert.AreEqual("BOOM!", ex.Message); } else { Assert.AreEqual("GRRRRRRRRRRRR!", ex.Message); } } try { var aaa = reader.Fields; Assert.Fail("we are closed"); } #pragma warning disable 168 catch (ObjectDisposedException ex) #pragma warning restore 168 { } if (Random.NextBoolean()) { reader.Dispose(); // call it again } Assert.AreEqual(0, count.Get()); wrap.Dispose(); dir.Dispose(); } }
private readonly IDictionary <SegmentCoreReaders, bool?> warmed = new WeakDictionary <SegmentCoreReaders, bool?>(); //new ConcurrentHashMapWrapper<SegmentCoreReaders, bool?>(new HashMap<SegmentCoreReaders, bool?>()); // Collections.synchronizedMap(new WeakHashMap<SegmentCoreReaders, bool?>()); public virtual void RunTest(string testName) { failed.Set(false); addCount.Set(0); delCount.Set(0); packCount.Set(0); long t0 = Environment.TickCount; Random random = new Random(Random().Next()); LineFileDocs docs = new LineFileDocs(random, DefaultCodecSupportsDocValues()); DirectoryInfo tempDir = CreateTempDir(testName); dir = GetDirectory(NewMockFSDirectory(tempDir)); // some subclasses rely on this being MDW if (dir is BaseDirectoryWrapper) { ((BaseDirectoryWrapper)dir).CheckIndexOnClose = false; // don't double-checkIndex, we do it ourselves. } MockAnalyzer analyzer = new MockAnalyzer(Random()); analyzer.MaxTokenLength = TestUtil.NextInt(Random(), 1, IndexWriter.MAX_TERM_LENGTH); IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).SetInfoStream(new FailOnNonBulkMergesInfoStream()); if (LuceneTestCase.TEST_NIGHTLY) { // newIWConfig makes smallish max seg size, which // results in tons and tons of segments for this test // when run nightly: MergePolicy mp = conf.MergePolicy; if (mp is TieredMergePolicy) { ((TieredMergePolicy)mp).MaxMergedSegmentMB = 5000.0; } else if (mp is LogByteSizeMergePolicy) { ((LogByteSizeMergePolicy)mp).MaxMergeMB = 1000.0; } else if (mp is LogMergePolicy) { ((LogMergePolicy)mp).MaxMergeDocs = 100000; } } conf.SetMergedSegmentWarmer(new IndexReaderWarmerAnonymousInnerClassHelper(this)); if (VERBOSE) { conf.SetInfoStream(new PrintStreamInfoStreamAnonymousInnerClassHelper(this, Console.Out)); } writer = new IndexWriter(dir, conf); TestUtil.ReduceOpenFiles(writer); TaskScheduler es = Random().NextBoolean() ? null : TaskScheduler.Default; DoAfterWriter(es); int NUM_INDEX_THREADS = TestUtil.NextInt(Random(), 2, 4); int RUN_TIME_SEC = LuceneTestCase.TEST_NIGHTLY ? 300 : RANDOM_MULTIPLIER; ISet <string> delIDs = new ConcurrentHashSet <string>(new HashSet <string>()); ISet <string> delPackIDs = new ConcurrentHashSet <string>(new HashSet <string>()); ConcurrentQueue <SubDocs> allSubDocs = new ConcurrentQueue <SubDocs>(); long stopTime = Environment.TickCount + (RUN_TIME_SEC * 1000); ThreadClass[] indexThreads = LaunchIndexingThreads(docs, NUM_INDEX_THREADS, stopTime, delIDs, delPackIDs, allSubDocs.ToList()); if (VERBOSE) { Console.WriteLine("TEST: DONE start " + NUM_INDEX_THREADS + " indexing threads [" + (Environment.TickCount - t0) + " ms]"); } // Let index build up a bit Thread.Sleep(100); DoSearching(es, stopTime); if (VERBOSE) { Console.WriteLine("TEST: all searching done [" + (Environment.TickCount - t0) + " ms]"); } for (int thread = 0; thread < indexThreads.Length; thread++) { indexThreads[thread].Join(); } if (VERBOSE) { Console.WriteLine("TEST: done join indexing threads [" + (Environment.TickCount - t0) + " ms]; addCount=" + addCount + " delCount=" + delCount); } IndexSearcher s = FinalSearcher; if (VERBOSE) { Console.WriteLine("TEST: finalSearcher=" + s); } assertFalse(failed.Get()); bool doFail = false; // Verify: make sure delIDs are in fact deleted: foreach (string id in delIDs) { TopDocs hits = s.Search(new TermQuery(new Term("docid", id)), 1); if (hits.TotalHits != 0) { Console.WriteLine("doc id=" + id + " is supposed to be deleted, but got " + hits.TotalHits + " hits; first docID=" + hits.ScoreDocs[0].Doc); doFail = true; } } // Verify: make sure delPackIDs are in fact deleted: foreach (string id in delPackIDs) { TopDocs hits = s.Search(new TermQuery(new Term("packID", id)), 1); if (hits.TotalHits != 0) { Console.WriteLine("packID=" + id + " is supposed to be deleted, but got " + hits.TotalHits + " matches"); doFail = true; } } // Verify: make sure each group of sub-docs are still in docID order: foreach (SubDocs subDocs in allSubDocs.ToList()) { TopDocs hits = s.Search(new TermQuery(new Term("packID", subDocs.packID)), 20); if (!subDocs.deleted) { // We sort by relevance but the scores should be identical so sort falls back to by docID: if (hits.TotalHits != subDocs.subIDs.Count) { Console.WriteLine("packID=" + subDocs.packID + ": expected " + subDocs.subIDs.Count + " hits but got " + hits.TotalHits); doFail = true; } else { int lastDocID = -1; int startDocID = -1; foreach (ScoreDoc scoreDoc in hits.ScoreDocs) { int docID = scoreDoc.Doc; if (lastDocID != -1) { assertEquals(1 + lastDocID, docID); } else { startDocID = docID; } lastDocID = docID; Document doc = s.Doc(docID); assertEquals(subDocs.packID, doc.Get("packID")); } lastDocID = startDocID - 1; foreach (string subID in subDocs.subIDs) { hits = s.Search(new TermQuery(new Term("docid", subID)), 1); assertEquals(1, hits.TotalHits); int docID = hits.ScoreDocs[0].Doc; if (lastDocID != -1) { assertEquals(1 + lastDocID, docID); } lastDocID = docID; } } } else { // Pack was deleted -- make sure its docs are // deleted. We can't verify packID is deleted // because we can re-use packID for update: foreach (string subID in subDocs.subIDs) { assertEquals(0, s.Search(new TermQuery(new Term("docid", subID)), 1).TotalHits); } } } // Verify: make sure all not-deleted docs are in fact // not deleted: int endID = Convert.ToInt32(docs.NextDoc().Get("docid"), CultureInfo.InvariantCulture); docs.Dispose(); for (int id = 0; id < endID; id++) { string stringID = id.ToString(CultureInfo.InvariantCulture); if (!delIDs.Contains(stringID)) { TopDocs hits = s.Search(new TermQuery(new Term("docid", stringID)), 1); if (hits.TotalHits != 1) { Console.WriteLine("doc id=" + stringID + " is not supposed to be deleted, but got hitCount=" + hits.TotalHits + "; delIDs=" + string.Join(",", delIDs.ToArray())); doFail = true; } } } assertFalse(doFail); assertEquals("index=" + writer.SegString() + " addCount=" + addCount + " delCount=" + delCount, addCount.Get() - delCount.Get(), s.IndexReader.NumDocs); ReleaseSearcher(s); writer.Commit(); assertEquals("index=" + writer.SegString() + " addCount=" + addCount + " delCount=" + delCount, addCount.Get() - delCount.Get(), writer.NumDocs); DoClose(); writer.Dispose(false); // Cannot shutdown until after writer is closed because // writer has merged segment warmer that uses IS to run // searches, and that IS may be using this es! /*if (es != null) * { * es.shutdown(); * es.awaitTermination(1, TimeUnit.SECONDS); * }*/ TestUtil.CheckIndex(dir); dir.Dispose(); //System.IO.Directory.Delete(tempDir.FullName, true); TestUtil.Rm(tempDir); if (VERBOSE) { Console.WriteLine("TEST: done [" + (Environment.TickCount - t0) + " ms]"); } }
public override void Run() { if (VERBOSE) { Console.WriteLine(Thread.CurrentThread.Name + ": launch search thread"); } while (Environment.TickCount < stopTimeMS) { try { IndexSearcher s = outerInstance.GetCurrentSearcher(); try { // Verify 1) IW is correctly setting // diagnostics, and 2) segment warming for // merged segments is actually happening: foreach (AtomicReaderContext sub in s.IndexReader.Leaves) { SegmentReader segReader = (SegmentReader)sub.Reader; IDictionary <string, string> diagnostics = segReader.SegmentInfo.Info.Diagnostics; assertNotNull(diagnostics); string source; diagnostics.TryGetValue("source", out source); assertNotNull(source); if (source.Equals("merge", StringComparison.Ordinal)) { assertTrue("sub reader " + sub + " wasn't warmed: warmed=" + outerInstance.warmed + " diagnostics=" + diagnostics + " si=" + segReader.SegmentInfo, !outerInstance.m_assertMergedSegmentsWarmed || outerInstance.warmed.ContainsKey(segReader.core)); } } if (s.IndexReader.NumDocs > 0) { outerInstance.SmokeTestSearcher(s); Fields fields = MultiFields.GetFields(s.IndexReader); if (fields == null) { continue; } Terms terms = fields.GetTerms("body"); if (terms == null) { continue; } TermsEnum termsEnum = terms.GetIterator(null); int seenTermCount = 0; int shift; int trigger; if (totTermCount.Get() < 30) { shift = 0; trigger = 1; } else { trigger = totTermCount.Get() / 30; shift = Random.Next(trigger); } while (Environment.TickCount < stopTimeMS) { BytesRef term = termsEnum.Next(); if (term == null) { totTermCount.Set(seenTermCount); break; } seenTermCount++; // search 30 terms if ((seenTermCount + shift) % trigger == 0) { //if (VERBOSE) { //System.out.println(Thread.currentThread().getName() + " now search body:" + term.Utf8ToString()); //} totHits.AddAndGet(outerInstance.RunQuery(s, new TermQuery(new Term("body", term)))); } } //if (VERBOSE) { //System.out.println(Thread.currentThread().getName() + ": search done"); //} } } finally { outerInstance.ReleaseSearcher(s); } } catch (Exception t) { Console.WriteLine(Thread.CurrentThread.Name + ": hit exc"); outerInstance.m_failed.Set(true); Console.WriteLine(t.ToString()); throw new Exception(t.ToString(), t); } } }
public virtual void Collect(int doc) { Assert.IsTrue(doc < End.Get(), "collected doc=" + doc + " beyond max=" + End); Hits.Set(doc); }