/// <summary> /// Appends a new packet of buffered deletes to the stream, /// setting its generation: /// </summary> public virtual long Push(FrozenBufferedUpdates packet) { lock (this) { /* * The insert operation must be atomic. If we let threads increment the gen * and push the packet afterwards we risk that packets are out of order. * With DWPT this is possible if two or more flushes are racing for pushing * updates. If the pushed packets get our of order would loose documents * since deletes are applied to the wrong segments. */ packet.DelGen = nextGen++; Debug.Assert(packet.Any()); Debug.Assert(CheckDeleteStats()); Debug.Assert(packet.DelGen < nextGen); Debug.Assert(updates.Count == 0 || updates[updates.Count - 1].DelGen < packet.DelGen, "Delete packets must be in order"); updates.Add(packet); numTerms.AddAndGet(packet.numTermDeletes); bytesUsed.AddAndGet(packet.bytesUsed); if (infoStream.IsEnabled("BD")) { infoStream.Message("BD", "push deletes " + packet + " delGen=" + packet.DelGen + " packetCount=" + updates.Count + " totBytesUsed=" + bytesUsed.Get()); } Debug.Assert(CheckDeleteStats()); return(packet.DelGen); } }
public override string ToString() { if (VERBOSE_DELETES) { return("gen=" + gen + " numTerms=" + numTermDeletes + ", terms=" + Arrays.ToString(terms) + ", queries=" + Arrays.ToString(queries) + ", docIDs=" + Arrays.ToString(docIDs) + ", numericUpdates=" + Arrays.ToString(numericUpdates) + ", binaryUpdates=" + Arrays.ToString(binaryUpdates) + ", bytesUsed=" + bytesUsed); } else { string s = "gen=" + gen; if (numTermDeletes.Get() != 0) { s += " " + numTermDeletes.Get() + " deleted terms (unique count=" + terms.Count + ")"; } if (queries.Count != 0) { s += " " + queries.Count + " deleted queries"; } if (docIDs.Count != 0) { s += " " + docIDs.Count + " deleted docIDs"; } if (numNumericUpdates.Get() != 0) { s += " " + numNumericUpdates.Get() + " numeric updates (unique count=" + numericUpdates.Count + ")"; } if (numBinaryUpdates.Get() != 0) { s += " " + numBinaryUpdates.Get() + " binary updates (unique count=" + binaryUpdates.Count + ")"; } if (bytesUsed.Get() != 0) { s += " bytesUsed=" + bytesUsed.Get(); } return(s); } }
public static void Main2(string[] args) { int nThreads = 2; int numIters = 200; string fname1 = "facet1"; FacetSpec fspec = new FacetSpec(); fspec.ExpandSelection = (true); fspec.MaxCount = (50); fspec.MinHitCount = (1); fspec.OrderBy = FacetSpec.FacetSortSpec.OrderHitsDesc; List <IFacetAccessible> list1 = new List <IFacetAccessible>(numSegs); for (int i = 0; i < numSegs; ++i) { list1.Add(BuildSubAccessible(fname1, i, fspec)); } AtomicInt64 timeCounter = new AtomicInt64(); Thread[] threads = new Thread[nThreads]; RunnerThread[] threadStates = new RunnerThread[nThreads]; for (int i = 0; i < threads.Length; ++i) { var threadState = new RunnerThread(timeCounter, numIters, fspec, list1); threadStates[i] = threadState; threads[i] = new Thread(new ThreadStart(threadState.Run)); } // System.out.println("press key to start load test... "); // { // BufferedReader br = new BufferedReader(new InputStreamReader( // System.in)); // int ch = br.read(); // char c = (char) ch; // } foreach (Thread t in threads) { t.Start(); } foreach (Thread t in threads) { t.Join(); } Console.WriteLine("average time: " + timeCounter.Get() / numIters / nThreads + " ms"); }
public override void Run() { try { while (Operations.Get() > 0) { int oper = rand.Next(100); if (oper < CommitPercent) { if (NumCommitting.IncrementAndGet() <= MaxConcurrentCommits) { IDictionary <int, long> newCommittedModel; long version; DirectoryReader oldReader; lock (OuterInstance) { newCommittedModel = new Dictionary <int, long>(OuterInstance.Model); // take a snapshot version = OuterInstance.SnapshotCount++; oldReader = OuterInstance.Reader; oldReader.IncRef(); // increment the reference since we will use this for reopening } DirectoryReader newReader; if (rand.Next(100) < SoftCommitPercent) { // assertU(h.Commit("softCommit","true")); if (Random().NextBoolean()) { if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": call writer.getReader"); } newReader = Writer.GetReader(true); } else { if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": reopen reader=" + oldReader + " version=" + version); } newReader = DirectoryReader.OpenIfChanged(oldReader, Writer.w, true); } } else { // assertU(commit()); if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": commit+reopen reader=" + oldReader + " version=" + version); } Writer.Commit(); if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": now reopen after commit"); } newReader = DirectoryReader.OpenIfChanged(oldReader); } // Code below assumes newReader comes w/ // extra ref: if (newReader == null) { oldReader.IncRef(); newReader = oldReader; } oldReader.DecRef(); lock (OuterInstance) { // install the new reader if it's newest (and check the current version since another reader may have already been installed) //System.out.println(Thread.currentThread().getName() + ": newVersion=" + newReader.getVersion()); Debug.Assert(newReader.RefCount > 0); Debug.Assert(OuterInstance.Reader.RefCount > 0); if (newReader.Version > OuterInstance.Reader.Version) { if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": install new reader=" + newReader); } OuterInstance.Reader.DecRef(); OuterInstance.Reader = newReader; // Silly: forces fieldInfos to be // loaded so we don't hit IOE on later // reader.toString newReader.ToString(); // install this snapshot only if it's newer than the current one if (version >= OuterInstance.CommittedModelClock) { if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": install new model version=" + version); } OuterInstance.CommittedModel = newCommittedModel; OuterInstance.CommittedModelClock = version; } else { if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": skip install new model version=" + version); } } } else { // if the same reader, don't decRef. if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": skip install new reader=" + newReader); } newReader.DecRef(); } } } NumCommitting.DecrementAndGet(); } else { int id = rand.Next(Ndocs); object sync = OuterInstance.SyncArr[id]; // set the lastId before we actually change it sometimes to try and // uncover more race conditions between writing and reading bool before = Random().NextBoolean(); if (before) { OuterInstance.LastId = id; } // We can't concurrently update the same document and retain our invariants of increasing values // since we can't guarantee what order the updates will be executed. lock (sync) { long val = OuterInstance.Model[id]; long nextVal = Math.Abs(val) + 1; if (oper < CommitPercent + DeletePercent) { // assertU("<delete><id>" + id + "</id></delete>"); // add tombstone first if (Tombstones) { Document d = new Document(); d.Add(OuterInstance.NewStringField("id", "-" + Convert.ToString(id), Documents.Field.Store.YES)); d.Add(OuterInstance.NewField(OuterInstance.Field, Convert.ToString(nextVal), StoredOnlyType)); Writer.UpdateDocument(new Term("id", "-" + Convert.ToString(id)), d); } if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": term delDocs id:" + id + " nextVal=" + nextVal); } Writer.DeleteDocuments(new Term("id", Convert.ToString(id))); OuterInstance.Model[id] = -nextVal; } else if (oper < CommitPercent + DeletePercent + DeleteByQueryPercent) { //assertU("<delete><query>id:" + id + "</query></delete>"); // add tombstone first if (Tombstones) { Document d = new Document(); d.Add(OuterInstance.NewStringField("id", "-" + Convert.ToString(id), Documents.Field.Store.YES)); d.Add(OuterInstance.NewField(OuterInstance.Field, Convert.ToString(nextVal), StoredOnlyType)); Writer.UpdateDocument(new Term("id", "-" + Convert.ToString(id)), d); } if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": query delDocs id:" + id + " nextVal=" + nextVal); } Writer.DeleteDocuments(new TermQuery(new Term("id", Convert.ToString(id)))); OuterInstance.Model[id] = -nextVal; } else { // assertU(adoc("id",Integer.toString(id), field, Long.toString(nextVal))); Document d = new Document(); d.Add(OuterInstance.NewStringField("id", Convert.ToString(id), Documents.Field.Store.YES)); d.Add(OuterInstance.NewField(OuterInstance.Field, Convert.ToString(nextVal), StoredOnlyType)); if (VERBOSE) { Console.WriteLine("TEST: " + Thread.CurrentThread.Name + ": u id:" + id + " val=" + nextVal); } Writer.UpdateDocument(new Term("id", Convert.ToString(id)), d); if (Tombstones) { // remove tombstone after new addition (this should be optional?) Writer.DeleteDocuments(new Term("id", "-" + Convert.ToString(id))); } OuterInstance.Model[id] = nextVal; } } if (!before) { OuterInstance.LastId = id; } } } } catch (Exception e) { Console.WriteLine(Thread.CurrentThread.Name + ": FAILED: unexpected exception"); Console.WriteLine(e.StackTrace); throw new Exception(e.Message, e); } }
public override long RamBytesUsed() { return(ramBytesUsed.Get()); }
/// <summary> /// Return total size in bytes of all files in this directory. This is /// currently quantized to <see cref="RAMOutputStream.BUFFER_SIZE"/>. /// </summary> public long GetSizeInBytes() { EnsureOpen(); return m_sizeInBytes.Get(); }
public virtual void Test() { Directory dir = NewDirectory(); RandomIndexWriter w = new RandomIndexWriter(Random(), dir, Similarity, TimeZone); long startTime = Environment.TickCount; // TODO: replace w/ the @nightly test data; make this // into an optional @nightly stress test Document doc = new Document(); Field body = NewTextField("body", "", Field.Store.NO); doc.Add(body); StringBuilder sb = new StringBuilder(); for (int docCount = 0; docCount < NUM_DOCS; docCount++) { int numTerms = Random().Next(10); for (int termCount = 0; termCount < numTerms; termCount++) { sb.Append(Random().NextBoolean() ? "aaa" : "bbb"); sb.Append(' '); } body.SetStringValue(sb.ToString()); w.AddDocument(doc); sb.Remove(0, sb.Length); } IndexReader r = w.Reader; w.Dispose(); long endTime = Environment.TickCount; if (VERBOSE) { Console.WriteLine("BUILD took " + (endTime - startTime)); } IndexSearcher s = NewSearcher(r); AtomicBoolean failed = new AtomicBoolean(); AtomicInt64 netSearch = new AtomicInt64(); ThreadClass[] threads = new ThreadClass[NUM_SEARCH_THREADS]; for (int threadID = 0; threadID < NUM_SEARCH_THREADS; threadID++) { threads[threadID] = new ThreadAnonymousInnerClassHelper(this, s, failed, netSearch); threads[threadID].SetDaemon(true); } foreach (ThreadClass t in threads) { t.Start(); } foreach (ThreadClass t in threads) { t.Join(); } if (VERBOSE) { Console.WriteLine(NUM_SEARCH_THREADS + " threads did " + netSearch.Get() + " searches"); } r.Dispose(); dir.Dispose(); }
/// <summary> /// Calls /// <see cref="IndexWriter.UpdateDocument(Term, IEnumerable{IIndexableField}, Analyzer)"/> /// and returns the generation that reflects this change. /// </summary> public virtual long UpdateDocument(Term t, IEnumerable <IIndexableField> d, Analyzer a) { writer.UpdateDocument(t, d, a); // Return gen as of when indexing finished: return(indexingGen.Get()); }
public override long Get() { return(count.Get()); }