public void Run() { Cache.PurgeAllCaches(); Iters.IncrementAndGet(); }
public override void Run() { try { while (Operations.Get() > 0) { int oper = rand.Next(100); if (oper < CommitPercent) { if (NumCommitting.IncrementAndGet() <= MaxConcurrentCommits) { IDictionary <int, long> newCommittedModel; long version; DirectoryReader oldReader; lock (OuterInstance) { newCommittedModel = new Dictionary <int, long>(OuterInstance.Model); // take a snapshot version = OuterInstance.SnapshotCount++; oldReader = OuterInstance.Reader; oldReader.IncRef(); // increment the reference since we will use this for reopening } DirectoryReader newReader; if (rand.Next(100) < SoftCommitPercent) { // assertU(h.Commit("softCommit","true")); if (Random.NextBoolean()) { if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": call writer.getReader"); } newReader = Writer.GetReader(true); } else { if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": reopen reader=" + oldReader + " version=" + version); } newReader = DirectoryReader.OpenIfChanged(oldReader, Writer.IndexWriter, true); } } else { // assertU(commit()); if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": commit+reopen reader=" + oldReader + " version=" + version); } Writer.Commit(); if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": now reopen after commit"); } newReader = DirectoryReader.OpenIfChanged(oldReader); } // Code below assumes newReader comes w/ // extra ref: if (newReader == null) { oldReader.IncRef(); newReader = oldReader; } oldReader.DecRef(); lock (OuterInstance) { // install the new reader if it's newest (and check the current version since another reader may have already been installed) //System.out.println(Thread.currentThread().getName() + ": newVersion=" + newReader.getVersion()); Debug.Assert(newReader.RefCount > 0); Debug.Assert(OuterInstance.Reader.RefCount > 0); if (newReader.Version > OuterInstance.Reader.Version) { if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": install new reader=" + newReader); } OuterInstance.Reader.DecRef(); OuterInstance.Reader = newReader; // Silly: forces fieldInfos to be // loaded so we don't hit IOE on later // reader.toString newReader.ToString(); // install this snapshot only if it's newer than the current one if (version >= OuterInstance.CommittedModelClock) { if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": install new model version=" + version); } OuterInstance.CommittedModel = newCommittedModel; OuterInstance.CommittedModelClock = version; } else { if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": skip install new model version=" + version); } } } else { // if the same reader, don't decRef. if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": skip install new reader=" + newReader); } newReader.DecRef(); } } } NumCommitting.DecrementAndGet(); } else { int id = rand.Next(Ndocs); object sync = OuterInstance.SyncArr[id]; // set the lastId before we actually change it sometimes to try and // uncover more race conditions between writing and reading bool before = Random.NextBoolean(); if (before) { OuterInstance.LastId = id; } // We can't concurrently update the same document and retain our invariants of increasing values // since we can't guarantee what order the updates will be executed. lock (sync) { long val = OuterInstance.Model[id]; long nextVal = Math.Abs(val) + 1; if (oper < CommitPercent + DeletePercent) { // assertU("<delete><id>" + id + "</id></delete>"); // add tombstone first if (Tombstones) { Document d = new Document(); d.Add(NewStringField("id", "-" + Convert.ToString(id), Documents.Field.Store.YES)); d.Add(NewField(OuterInstance.Field, Convert.ToString(nextVal), StoredOnlyType)); Writer.UpdateDocument(new Term("id", "-" + Convert.ToString(id)), d); } if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": term delDocs id:" + id + " nextVal=" + nextVal); } Writer.DeleteDocuments(new Term("id", Convert.ToString(id))); OuterInstance.Model[id] = -nextVal; } else if (oper < CommitPercent + DeletePercent + DeleteByQueryPercent) { //assertU("<delete><query>id:" + id + "</query></delete>"); // add tombstone first if (Tombstones) { Document d = new Document(); d.Add(NewStringField("id", "-" + Convert.ToString(id), Documents.Field.Store.YES)); d.Add(NewField(OuterInstance.Field, Convert.ToString(nextVal), StoredOnlyType)); Writer.UpdateDocument(new Term("id", "-" + Convert.ToString(id)), d); } if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": query delDocs id:" + id + " nextVal=" + nextVal); } Writer.DeleteDocuments(new TermQuery(new Term("id", Convert.ToString(id)))); OuterInstance.Model[id] = -nextVal; } else { // assertU(adoc("id",Integer.toString(id), field, Long.toString(nextVal))); Document d = new Document(); d.Add(NewStringField("id", Convert.ToString(id), Documents.Field.Store.YES)); d.Add(NewField(OuterInstance.Field, Convert.ToString(nextVal), StoredOnlyType)); if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": u id:" + id + " val=" + nextVal); } Writer.UpdateDocument(new Term("id", Convert.ToString(id)), d); if (Tombstones) { // remove tombstone after new addition (this should be optional?) Writer.DeleteDocuments(new Term("id", "-" + Convert.ToString(id))); } OuterInstance.Model[id] = nextVal; } } if (!before) { OuterInstance.LastId = id; } } } } catch (Exception e) { Console.WriteLine(Thread.CurrentThread.Name + ": FAILED: unexpected exception"); Console.WriteLine(e.StackTrace); throw new Exception(e.Message, e); } }
public virtual void IncRef() { int rc = refCount.IncrementAndGet(); Debug.Assert(rc > 1); }
public virtual void TestCloseUnderException() { int iters = 1000 + 1 + Random.nextInt(20); for (int j = 0; j < iters; j++) { Directory dir = NewDirectory(); IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(Random, TEST_VERSION_CURRENT, new MockAnalyzer(Random))); writer.Commit(); writer.Dispose(); DirectoryReader open = DirectoryReader.Open(dir); bool throwOnClose = !Rarely(); AtomicReader wrap = SlowCompositeReaderWrapper.Wrap(open); FilterAtomicReader reader = new FilterAtomicReaderAnonymousClass(this, wrap, throwOnClose); IList <IndexReader.IReaderClosedListener> listeners = new JCG.List <IndexReader.IReaderClosedListener>(); int listenerCount = Random.Next(20); AtomicInt32 count = new AtomicInt32(); bool faultySet = false; for (int i = 0; i < listenerCount; i++) { if (Rarely()) { faultySet = true; reader.AddReaderClosedListener(new FaultyListener()); } else { count.IncrementAndGet(); reader.AddReaderClosedListener(new CountListener(count)); } } if (!faultySet && !throwOnClose) { reader.AddReaderClosedListener(new FaultyListener()); } try { reader.Dispose(); Assert.Fail("expected Exception"); } catch (Exception ex) when(ex.IsIllegalStateException()) { if (throwOnClose) { Assert.AreEqual("BOOM!", ex.Message); } else { Assert.AreEqual("GRRRRRRRRRRRR!", ex.Message); } } try { var aaa = reader.Fields; Assert.Fail("we are closed"); } catch (Exception ex) when(ex.IsAlreadyClosedException()) { } if (Random.NextBoolean()) { reader.Dispose(); // call it again } Assert.AreEqual(0, count); wrap.Dispose(); dir.Dispose(); } }
/// <summary> /// Look up the given category in the cache and/or the on-disk storage, /// returning the category's ordinal, or a negative number in case the /// category does not yet exist in the taxonomy. /// </summary> protected virtual int FindCategory(FacetLabel categoryPath) { lock (this) { // If we can find the category in the cache, or we know the cache is // complete, we can return the response directly from it int res = cache.Get(categoryPath); if (res >= 0 || cacheIsComplete) { return(res); } cacheMisses.IncrementAndGet(); // After a few cache misses, it makes sense to read all the categories // from disk and into the cache. The reason not to do this on the first // cache miss (or even when opening the writer) is that it will // significantly slow down the case when a taxonomy is opened just to // add one category. The idea only spending a long time on reading // after enough time was spent on cache misses is known as an "online // algorithm". PerhapsFillCache(); res = cache.Get(categoryPath); if (res >= 0 || cacheIsComplete) { // if after filling the cache from the info on disk, the category is in it // or the cache is complete, return whatever cache.get returned. return(res); } // if we get here, it means the category is not in the cache, and it is not // complete, and therefore we must look for the category on disk. // We need to get an answer from the on-disk index. InitReaderManager(); int doc = -1; DirectoryReader reader = readerManager.Acquire(); try { BytesRef catTerm = new BytesRef(FacetsConfig.PathToString(categoryPath.Components, categoryPath.Length)); TermsEnum termsEnum = null; // reuse DocsEnum docs = null; // reuse foreach (AtomicReaderContext ctx in reader.Leaves) { Terms terms = ctx.AtomicReader.GetTerms(Consts.FULL); if (terms != null) { termsEnum = terms.GetIterator(termsEnum); if (termsEnum.SeekExact(catTerm)) { // liveDocs=null because the taxonomy has no deletes docs = termsEnum.Docs(null, docs, 0); // freqs not required // if the term was found, we know it has exactly one document. doc = docs.NextDoc() + ctx.DocBase; break; } } } } finally { readerManager.Release(reader); } if (doc > 0) { AddToCache(categoryPath, doc); } return(doc); } }