private void btnViewState_Click(object sender, System.EventArgs e) { //ThreadStart start; //start = new ThreadStart(ThreadFunc); ThreadClass start; start = new ThreadClass(edtNroSolicitud.Text, listView1); Thread t = new Thread(new ThreadStart(start.ThreadFunc)); t.Start(); t.IsBackground = false; //Bloquea el hilo hasta la finalizacion de t //t.Join(); }
void mThread_MyEvent(object sender, ThreadClass.myThread.MyEventArgs e) { System.Diagnostics.Debug.WriteLine("Received event from thread: " + e.Message); lock (thisLock) { //stringQueue.Enqueue(e.Message); //better do the following from a separate thread byte[] bBuf = Encoding.ASCII.GetBytes(e.Message); //only digits and letters are the same in Byte and VK_ values byte bVkey = 0x00; bool bShift = false; for (int i = 0; i < bBuf.Length; i++) { // System.Diagnostics.Debug.WriteLine("buffer: " + bBuf[i]); //so start a translation bVkey = virtual_key_codes.vkTable[bBuf[i]].VKval; bShift = virtual_key_codes.vkTable[bBuf[i]].bShift; if (bVkey != (byte)virtual_key_codes.V_KEY.VK_undef_0xff) { if (bShift) { keybd_event((byte)virtual_key_codes.V_KEY.VK_SHIFT, 0x00, keyFlag.KEYEVENTF_KEYDOWN, 0); System.Threading.Thread.Sleep(2); } //send key keybd_event(bVkey, 0x00, keyFlag.KEYEVENTF_KEYDOWN, 0); System.Threading.Thread.Sleep(2); keybd_event(bVkey, 0x00, keyFlag.KEYEVENTF_KEYUP, 0); System.Threading.Thread.Sleep(2); if (bShift) { keybd_event((byte)virtual_key_codes.V_KEY.VK_SHIFT, 0x00, keyFlag.KEYEVENTF_KEYUP, 0); System.Threading.Thread.Sleep(2); } } } } }
public void CreateThread() { Global.global_var = 0; ThreadClass my_thread = new ThreadClass(); Thread thread = new Thread(new ThreadStart(my_thread.Run)); thread.Start(); Global.semaphore.WaitOne(1000); Assert.AreEqual(Global.global_var, 1); }
public virtual void Test2() { Random random = Random(); int NUM_DOCS = AtLeast(100); Directory dir = NewDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, dir, Similarity, TimeZone); bool allowDups = random.NextBoolean(); HashSet <string> seen = new HashSet <string>(); if (VERBOSE) { Console.WriteLine("TEST: NUM_DOCS=" + NUM_DOCS + " allowDups=" + allowDups); } int numDocs = 0; IList <BytesRef> docValues = new List <BytesRef>(); // TODO: deletions while (numDocs < NUM_DOCS) { string s; if (random.NextBoolean()) { s = TestUtil.RandomSimpleString(random); } else { s = TestUtil.RandomUnicodeString(random); } BytesRef br = new BytesRef(s); if (!allowDups) { if (seen.Contains(s)) { continue; } seen.Add(s); } if (VERBOSE) { Console.WriteLine(" " + numDocs + ": s=" + s); } Document doc = new Document(); doc.Add(new SortedDocValuesField("stringdv", br)); doc.Add(new NumericDocValuesField("id", numDocs)); docValues.Add(br); writer.AddDocument(doc); numDocs++; if (random.Next(40) == 17) { // force flush writer.Reader.Dispose(); } } writer.ForceMerge(1); DirectoryReader r = writer.Reader; writer.Dispose(); AtomicReader sr = GetOnlySegmentReader(r); long END_TIME = Environment.TickCount + (TEST_NIGHTLY ? 30 : 1); int NUM_THREADS = TestUtil.NextInt(Random(), 1, 10); ThreadClass[] threads = new ThreadClass[NUM_THREADS]; for (int thread = 0; thread < NUM_THREADS; thread++) { threads[thread] = new ThreadAnonymousInnerClassHelper2(random, docValues, sr, END_TIME); threads[thread].Start(); } foreach (ThreadClass thread in threads) { thread.Join(); } r.Dispose(); dir.Dispose(); }
public virtual void TestDuringAddIndexes() { Assert.Ignore("Known issue"); MockRAMDirectory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED, null); writer.SetInfoStream(infoStream, null); writer.MergeFactor = 2; // create the index CreateIndexNoClose(false, "test", writer); writer.Commit(null); Directory[] dirs = new Directory[10]; for (int i = 0; i < 10; i++) { dirs[i] = new MockRAMDirectory(dir1); } IndexReader r = writer.GetReader(null); int NUM_THREAD = 5; float SECONDS = 3; long endTime = (long)((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) + 1000.0 * SECONDS); System.Collections.IList excs = (System.Collections.IList)System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(new System.Collections.ArrayList())); ThreadClass[] threads = new ThreadClass[NUM_THREAD]; for (int i = 0; i < NUM_THREAD; i++) { threads[i] = new AnonymousClassThread(endTime, writer, dirs, excs, this); threads[i].IsBackground = true; threads[i].Start(); } int lastCount = 0; while ((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) < endTime) { IndexReader r2 = r.Reopen(null); if (r2 != r) { r.Close(); r = r2; } Query q = new TermQuery(new Term("indexname", "test")); int count = new IndexSearcher(r).Search(q, 10, null).TotalHits; Assert.IsTrue(count >= lastCount); lastCount = count; } for (int i = 0; i < NUM_THREAD; i++) { threads[i].Join(); } Assert.AreEqual(0, excs.Count); r.Close(); try { Assert.AreEqual(0, dir1.GetOpenDeletedFiles().Count); } catch { //DIGY: //I think this is an expected behaviour. //There isn't any pending files to be deleted after "writer.Close()". //But, since lucene.java's test case is designed that way //and I might be wrong, I will add a warning // Assert only in debug mode, so that CheckIndex is called during release. #if DEBUG Assert.Inconclusive("", 0, dir1.GetOpenDeletedFiles().Count); #endif } writer.Close(); _TestUtil.CheckIndex(dir1); dir1.Close(); }
public virtual void TestDuringAddIndexes_LuceneNet() { MockRAMDirectory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED, null); writer.SetInfoStream(infoStream, null); writer.MergeFactor = 2; // create the index CreateIndexNoClose(false, "test", writer); writer.Commit(null); Directory[] dirs = new Directory[10]; for (int i = 0; i < 10; i++) { dirs[i] = new MockRAMDirectory(dir1); } IndexReader r = writer.GetReader(null); int NUM_THREAD = 5; float SECONDS = 3; long endTime = (long)((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) + 1000.0 * SECONDS); System.Collections.IList excs = (System.Collections.IList)System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(new System.Collections.ArrayList())); ThreadClass[] threads = new ThreadClass[NUM_THREAD]; for (int i = 0; i < NUM_THREAD; i++) { threads[i] = new AnonymousClassThread(endTime, writer, dirs, excs, this); threads[i].IsBackground = true; threads[i].Start(); } int lastCount = 0; while ((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) < endTime) { using (IndexReader r2 = writer.GetReader(null)) { Query q = new TermQuery(new Term("indexname", "test")); int count = new IndexSearcher(r2).Search(q, 10, null).TotalHits; Assert.IsTrue(count >= lastCount); lastCount = count; } } for (int i = 0; i < NUM_THREAD; i++) { threads[i].Join(); } Assert.AreEqual(0, excs.Count); r.Close(); Assert.AreEqual(0, dir1.GetOpenDeletedFiles().Count); writer.Close(); _TestUtil.CheckIndex(dir1); dir1.Close(); }
public virtual void TestConcurrency() { int ncats = AtLeast(100000); // add many categories int range = ncats * 3; // affects the categories selection AtomicInteger numCats = new AtomicInteger(ncats); Directory dir = NewDirectory(); var values = new ConcurrentDictionary <string, string>(); double d = Random().NextDouble(); TaxonomyWriterCache cache; if (d < 0.7) { // this is the fastest, yet most memory consuming cache = new Cl2oTaxonomyWriterCache(1024, 0.15f, 3); } else if (TEST_NIGHTLY && d > 0.98) { // this is the slowest, but tests the writer concurrency when no caching is done. // only pick it during NIGHTLY tests, and even then, with very low chances. cache = NO_OP_CACHE; } else { // this is slower than CL2O, but less memory consuming, and exercises finding categories on disk too. cache = new LruTaxonomyWriterCache(ncats / 10); } if (VERBOSE) { Console.WriteLine("TEST: use cache=" + cache); } var tw = new DirectoryTaxonomyWriter(dir, OpenMode.CREATE, cache); ThreadClass[] addThreads = new ThreadClass[AtLeast(4)]; for (int z = 0; z < addThreads.Length; z++) { addThreads[z] = new ThreadAnonymousInnerClassHelper(this, range, numCats, values, tw); } foreach (var t in addThreads) { t.Start(); } foreach (var t in addThreads) { t.Join(); } tw.Dispose(); DirectoryTaxonomyReader dtr = new DirectoryTaxonomyReader(dir); // +1 for root category if (values.Count + 1 != dtr.Size) { foreach (string value in values.Keys) { FacetLabel label = new FacetLabel(FacetsConfig.StringToPath(value)); if (dtr.GetOrdinal(label) == -1) { Console.WriteLine("FAIL: path=" + label + " not recognized"); } } Fail("mismatch number of categories"); } int[] parents = dtr.ParallelTaxonomyArrays.Parents(); foreach (string cat in values.Keys) { FacetLabel cp = new FacetLabel(FacetsConfig.StringToPath(cat)); Assert.True(dtr.GetOrdinal(cp) > 0, "category not found " + cp); int level = cp.Length; int parentOrd = 0; // for root, parent is always virtual ROOT (ord=0) FacetLabel path = new FacetLabel(); for (int i = 0; i < level; i++) { path = cp.Subpath(i + 1); int ord = dtr.GetOrdinal(path); Assert.AreEqual(parentOrd, parents[ord], "invalid parent for cp=" + path); parentOrd = ord; // next level should have this parent } } IOUtils.Close(dtr, dir); }
internal void stop() { mcast_receiver = null; mcast_recv_sock.Close(); mcast_recv_sock = null; }
public static ThreadClass Current() { ThreadClass CurrentThread = new ThreadClass(); CurrentThread.Instance = System.Threading.Thread.CurrentThread; return CurrentThread; }
override public void Run() { Document doc = new Document(); doc.Add(new Field("content1", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.ANALYZED, enclosingInstance.RandomTVSetting(enclosingInstance.random))); doc.Add(new Field("content6", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED, enclosingInstance.RandomTVSetting(enclosingInstance.random))); doc.Add(new Field("content2", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NOT_ANALYZED, enclosingInstance.RandomTVSetting(enclosingInstance.random))); doc.Add(new Field("content3", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NO)); doc.Add(new Field("content4", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED, enclosingInstance.RandomTVSetting(enclosingInstance.random))); doc.Add(new Field("content5", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED, enclosingInstance.RandomTVSetting(enclosingInstance.random))); doc.Add(new Field("content7", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED, enclosingInstance.RandomTVSetting(enclosingInstance.random))); Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED, enclosingInstance.RandomTVSetting(enclosingInstance.random)); doc.Add(idField); long stopTime = (DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) + 3000; while ((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) < stopTime) { Enclosing_Instance.doFail.Value = this; System.String id = "" + r.Next(50); idField.SetValue(id); Term idTerm = new Term("id", id); try { writer.UpdateDocument(idTerm, doc, null); } catch (System.SystemException re) { if (Lucene.Net.Index.TestIndexWriterExceptions.DEBUG) { System.Console.Out.WriteLine(ThreadClass.CurrentThread().Name + ": EXC: "); System.Console.Out.WriteLine(re.StackTrace); } try { _TestUtil.CheckIndex(writer.Directory); } catch (System.IO.IOException ioe) { System.Console.Out.WriteLine(ThreadClass.Current().Name + ": unexpected exception1"); System.Console.Out.WriteLine(ioe.StackTrace); failure = ioe; break; } } catch (System.Exception t) { System.Console.Out.WriteLine(ThreadClass.Current().Name + ": unexpected exception2"); System.Console.Out.WriteLine(t.StackTrace); failure = t; break; } Enclosing_Instance.doFail.Value = null; // After a possible exception (above) I should be able // to add a new document without hitting an // exception: try { writer.UpdateDocument(idTerm, doc, null); } catch (System.Exception t) { System.Console.Out.WriteLine(ThreadClass.Current().Name + ": unexpected exception3"); System.Console.Out.WriteLine(t.StackTrace); failure = t; break; } } }
private void button1_Click(object sender, System.EventArgs e) { Thread[] ts = new Thread[100]; ThreadClass start; for (int i = 0; i < ts.Length; i++) { start = new ThreadClass(i.ToString(), listView1); ts[i] = new Thread(new ThreadStart(start.ThreadFunc2)); } for (int i = 0; i < ts.Length; i++) { ts[i].Start(); ts[i].IsBackground = false; //Modo de apartment (COM) //MTA: multi threading apartment //STA: single threading apartment //ts[i].ApartmentState = ApartmentState.MTA; } myEvent = new AutoResetEvent(false); // lanzo un thread y le paso el event por parametro // cuando el thread lo considrea hace // myEvent.Set(); // hago una sub-tarea //..... myEvent.WaitOne(); //Espero por todos los threads en el array... //WaitHandle.WaitAll(new WaitHandle[]{ts[1], ts[3],....}); /*System.IntPtr a = new IntPtr(0); unsafe { if (a.ToPointer() == IntPtr.Zero.ToPointer()) { MessageBox.Show("Soy null, puto!!!"); } void * ptr = a.ToPointer(); }*/ }
/// <summary> /// Gets the currently running thread /// </summary> /// <returns>The currently running thread</returns> public static ThreadClass Current() { ThreadClass CurrentThread = new ThreadClass(); CurrentThread.threadField = Thread.CurrentThread; return CurrentThread; }
public virtual bool WriteLoop() { IJob job; try { lock (_jobs) { // check for an unsent job if ((job = OldestUnsentJob()) == null) // automatically marks as sent { // failed -- wait and drop out of the loop and come in again Debug("" + ThreadClass.Current().Name + "Waiting for a job to send"); SlaveMonitor.WaitOnMonitor(_jobs); } } if (job != null) // we got a job inside our synchronized wait { // send the job Debug("" + ThreadClass.Current().Name + "Sending Job"); if (job.Type == SlaveEvaluationType.Simple) { // Tell the server we're evaluating a SimpleProblemForm DataOut.Write((byte)SlaveEvaluationType.Simple); } else { // Tell the server we're evaluating a IGroupedProblem DataOut.Write((byte)SlaveEvaluationType.Grouped); // Tell the server whether to count victories only or not. DataOut.Write(job.CountVictoriesOnly); } // transmit number of individuals DataOut.Write(job.Inds.Length); // Transmit the subpops to the slave foreach (var t in job.Subpops) { DataOut.Write(t); } Debug("Starting to transmit individuals"); // Transmit the individuals to the server for evaluation... for (var i = 0; i < job.Inds.Length; i++) { job.Inds[i].WriteIndividual(State, DataOut); DataOut.Write(job.UpdateFitness[i]); } DataOut.Flush(); } } catch (Exception) { Shutdown(State); return(false); } return(true); }
public virtual void Test() { IList <string> postingsList = new List <string>(); int numTerms = AtLeast(300); int maxTermsPerDoc = TestUtil.NextInt(Random(), 10, 20); bool isSimpleText = "SimpleText".Equals(TestUtil.GetPostingsFormat("field")); IndexWriterConfig iwc = NewIndexWriterConfig(Random(), TEST_VERSION_CURRENT, new MockAnalyzer(Random())); if ((isSimpleText || iwc.MergePolicy is MockRandomMergePolicy) && (TEST_NIGHTLY || RANDOM_MULTIPLIER > 1)) { // Otherwise test can take way too long (> 2 hours) numTerms /= 2; } if (VERBOSE) { Console.WriteLine("maxTermsPerDoc=" + maxTermsPerDoc); Console.WriteLine("numTerms=" + numTerms); } for (int i = 0; i < numTerms; i++) { string term = Convert.ToString(i); for (int j = 0; j < i; j++) { postingsList.Add(term); } } postingsList = CollectionsHelper.Shuffle(postingsList); ConcurrentQueue <string> postings = new ConcurrentQueue <string>(postingsList); Directory dir = NewFSDirectory(CreateTempDir(GetFullMethodName())); RandomIndexWriter iw = new RandomIndexWriter(Random(), dir, iwc); int threadCount = TestUtil.NextInt(Random(), 1, 5); if (VERBOSE) { Console.WriteLine("config: " + iw.w.Config); Console.WriteLine("threadCount=" + threadCount); } Field prototype = NewTextField("field", "", Field.Store.NO); FieldType fieldType = new FieldType((FieldType)prototype.FieldType); if (Random().NextBoolean()) { fieldType.OmitNorms = true; } int options = Random().Next(3); if (options == 0) { fieldType.IndexOptions = FieldInfo.IndexOptions.DOCS_AND_FREQS; // we dont actually need positions fieldType.StoreTermVectors = true; // but enforce term vectors when we do this so we check SOMETHING } else if (options == 1 && !DoesntSupportOffsets.Contains(TestUtil.GetPostingsFormat("field"))) { fieldType.IndexOptions = FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS; } // else just positions ThreadClass[] threads = new ThreadClass[threadCount]; CountdownEvent startingGun = new CountdownEvent(1); for (int threadID = 0; threadID < threadCount; threadID++) { Random threadRandom = new Random(Random().Next()); Document document = new Document(); Field field = new Field("field", "", fieldType); document.Add(field); threads[threadID] = new ThreadAnonymousInnerClassHelper(this, numTerms, maxTermsPerDoc, postings, iw, startingGun, threadRandom, document, field); threads[threadID].Start(); } startingGun.Signal(); foreach (ThreadClass t in threads) { t.Join(); } iw.ForceMerge(1); DirectoryReader ir = iw.Reader; Assert.AreEqual(1, ir.Leaves.Count); AtomicReader air = (AtomicReader)ir.Leaves[0].Reader; Terms terms = air.Terms("field"); // numTerms-1 because there cannot be a term 0 with 0 postings: Assert.AreEqual(numTerms - 1, terms.Size()); TermsEnum termsEnum = terms.Iterator(null); BytesRef termBR; while ((termBR = termsEnum.Next()) != null) { int value = Convert.ToInt32(termBR.Utf8ToString()); Assert.AreEqual(value, termsEnum.TotalTermFreq()); // don't really need to check more than this, as CheckIndex // will verify that totalTermFreq == total number of positions seen // from a docsAndPositionsEnum. } ir.Dispose(); iw.Dispose(); dir.Dispose(); }
public virtual void Run() { ThreadClass.Current().Name = "SlaveMonitor:: "; TcpClient slaveSock; while (!Enclosing_Instance.ShutdownInProgress) { slaveSock = null; while (slaveSock == null && !Enclosing_Instance.ShutdownInProgress) { try { slaveSock = Enclosing_Instance.ServSock.AcceptTcpClient(); } catch (IOException) { slaveSock = null; } } Enclosing_Instance.Debug(ThreadClass.Current().Name + " Slave attempts to connect."); if (Enclosing_Instance.ShutdownInProgress) { break; } try { Stream tmpIn = slaveSock.GetStream(); Stream tmpOut = slaveSock.GetStream(); if (Enclosing_Instance.UseCompression) { /* * state.Output.Fatal("JDK 1.5 has broken compression. For now, you must set eval.compression=false"); * tmpIn = new CompressingInputStream(tmpIn); * tmpOut = new CompressingOutputStream(tmpOut); */ tmpIn = Output.MakeCompressingInputStream(tmpIn); tmpOut = Output.MakeCompressingOutputStream(tmpOut); if (tmpIn == null || tmpOut == null) { Output.InitialError( "You do not appear to have JZLib installed on your system, and so must set eval.compression=false. " + "To get JZLib, download from the ECJ website or from http://www.jcraft.com/jzlib/", false); Environment.Exit(1); // This was originally part of the InitialError call in ECJ. But we make SlaveMonitor responsible. } } var dataIn = new BinaryReader(tmpIn); var dataOut = new BinaryWriter(tmpOut); var slaveName = dataIn.ReadString(); dataOut.Write(Enclosing_Instance.RandomSeed); Enclosing_Instance.RandomSeed += SEED_INCREMENT; // Write random state for eval thread to slave dataOut.Flush(); // write out additional data as necessary _problemPrototype.SendAdditionalData(_state, dataOut); dataOut.Flush(); // write out additional data as necessary _problemPrototype.SendAdditionalData(_state, dataOut); dataOut.Flush(); Enclosing_Instance.RegisterSlave(_state, slaveName, slaveSock, dataOut, dataIn); _state.Output.SystemMessage("Slave " + slaveName + " connected successfully."); } catch (IOException) { } } Enclosing_Instance.Debug(ThreadClass.Current().Name + " The monitor is shutting down."); }
public void TestMultiThreaded() { FileInfo file = new FileInfo(Path.Combine(getWorkDir().FullName, "one-line")); PerfRunData runData = createPerfRunData(file, false, typeof(ThreadingDocMaker).AssemblyQualifiedName); WriteLineDocTask wldt = new WriteLineDocTask(runData); ThreadClass[] threads = new ThreadClass[10]; for (int i = 0; i < threads.Length; i++) { threads[i] = new ThreadAnonymousHelper("t" + i, wldt); } foreach (ThreadClass t in threads) { t.Start(); } foreach (ThreadClass t in threads) { t.Join(); } wldt.Dispose(); // LUCENENET specific - need to transfer any exception that occurred back to this thread foreach (ThreadClass t in threads) { var thread = t as ThreadAnonymousHelper; if (thread?.Exception != null) { throw thread.Exception; } } ISet <String> ids = new HashSet <string>(); TextReader br = new StreamReader(new FileStream(file.FullName, FileMode.Open, FileAccess.Read, FileShare.ReadWrite), Encoding.UTF8); try { String line = br.ReadLine(); assertHeaderLine(line); // header line is written once, no matter how many threads there are for (int i = 0; i < threads.Length; i++) { line = br.ReadLine(); String[] parts = line.Split(WriteLineDocTask.SEP).TrimEnd(); assertEquals(line, 3, parts.Length); // check that all thread names written are the same in the same line String tname = parts[0].Substring(parts[0].IndexOf('_')); ids.add(tname); assertEquals(tname, parts[1].Substring(parts[1].IndexOf('_'))); assertEquals(tname, parts[2].Substring(parts[2].IndexOf('_'))); } // only threads.length lines should exist assertNull(br.ReadLine()); assertEquals(threads.Length, ids.size()); } finally { br.Dispose(); } }
private void Message(System.String message) { infoStream.WriteLine("IFD [" + new DateTime().ToString() + "; " + ThreadClass.Current().Name + "]: " + message); }
public Randomness(int seed, params ISeedDecorator[] decorators) : this(ThreadClass.Current(), seed, decorators) { }
public virtual void Run() { Connection value_Renamed; System.Collections.DictionaryEntry entry; long curr_time; ArrayList temp = new ArrayList(); if (enclosingInstance.NCacheLog.IsInfoEnabled) { enclosingInstance.NCacheLog.Info("connection reaper thread was started. Number of connections=" + Enclosing_Instance.conns_NIC_1.Count + ", reaper_interval=" + Enclosing_Instance.reaper_interval + ", conn_expire_time=" + Enclosing_Instance.conn_expire_time); } while (Enclosing_Instance.conns_NIC_1.Count > 0 && t != null) { // first sleep Util.Util.sleep(Enclosing_Instance.reaper_interval); if (enclosingInstance.NCacheLog.IsInfoEnabled) { enclosingInstance.NCacheLog.Info("ConnectionTable.Reaper", "b4 lock conns.SyncRoot"); } lock (Enclosing_Instance.conns_NIC_1.SyncRoot) { curr_time = (System.DateTime.Now.Ticks - 621355968000000000) / 10000; for (System.Collections.IEnumerator it = Enclosing_Instance.conns_NIC_1.GetEnumerator(); it.MoveNext();) { entry = (System.Collections.DictionaryEntry)it.Current; value_Renamed = (Connection)entry.Value; if (enclosingInstance.NCacheLog.IsInfoEnabled) { enclosingInstance.NCacheLog.Info("connection is " + ((curr_time - value_Renamed.last_access) / 1000) + " seconds old (curr-time=" + curr_time + ", last_access=" + value_Renamed.last_access + ')'); } if (value_Renamed.last_access + Enclosing_Instance.conn_expire_time < curr_time) { if (enclosingInstance.NCacheLog.IsInfoEnabled) { enclosingInstance.NCacheLog.Info("connection " + value_Renamed + " has been idle for too long (conn_expire_time=" + Enclosing_Instance.conn_expire_time + "), will be removed"); } value_Renamed.Destroy(); temp.Add(it.Current); } } // Now remove closed connection from the connection hashtable for (int i = 0; i < temp.Count; i++) { if (Enclosing_Instance.conns_NIC_1.Contains((Address)temp[i])) { Enclosing_Instance.conns_NIC_1.Remove((Address)temp[i]); temp[i] = null; } } } if (enclosingInstance.NCacheLog.IsInfoEnabled) { enclosingInstance.NCacheLog.Info("ConnectionTable.Reaper", "after lock conns.SyncRoot"); } } if (enclosingInstance.NCacheLog.IsInfoEnabled) { enclosingInstance.NCacheLog.Info("reaper terminated"); } t = null; }
public virtual void TestConcurrency() { AtomicInt32 numDocs = new AtomicInt32(AtLeast(10000)); Directory indexDir = NewDirectory(); Directory taxoDir = NewDirectory(); ConcurrentDictionary <string, string> values = new ConcurrentDictionary <string, string>(); IndexWriter iw = new IndexWriter(indexDir, NewIndexWriterConfig(TEST_VERSION_CURRENT, null)); var tw = new DirectoryTaxonomyWriter(taxoDir, OpenMode.CREATE, NewTaxoWriterCache(numDocs.Get())); ThreadClass[] indexThreads = new ThreadClass[AtLeast(4)]; FacetsConfig config = new FacetsConfig(); for (int i = 0; i < 10; i++) { config.SetHierarchical("l1." + i, true); config.SetMultiValued("l1." + i, true); } for (int i = 0; i < indexThreads.Length; i++) { indexThreads[i] = new ThreadAnonymousInnerClassHelper(this, numDocs, values, iw, tw, config); } foreach (ThreadClass t in indexThreads) { t.Start(); } foreach (ThreadClass t in indexThreads) { t.Join(); } var tr = new DirectoryTaxonomyReader(tw); // +1 for root category if (values.Count + 1 != tr.Count) { foreach (string value in values.Keys) { FacetLabel label = new FacetLabel(FacetsConfig.StringToPath(value)); if (tr.GetOrdinal(label) == -1) { Console.WriteLine("FAIL: path=" + label + " not recognized"); } } Fail("mismatch number of categories"); } int[] parents = tr.ParallelTaxonomyArrays.Parents; foreach (string cat in values.Keys) { FacetLabel cp = new FacetLabel(FacetsConfig.StringToPath(cat)); Assert.True(tr.GetOrdinal(cp) > 0, "category not found " + cp); int level = cp.Length; int parentOrd = 0; // for root, parent is always virtual ROOT (ord=0) FacetLabel path = null; for (int i = 0; i < level; i++) { path = cp.Subpath(i + 1); int ord = tr.GetOrdinal(path); Assert.AreEqual(parentOrd, parents[ord], "invalid parent for cp=" + path); parentOrd = ord; // next level should have this parent } } IOUtils.Dispose(tw, iw, tr, taxoDir, indexDir); }
internal virtual void init() { if (NCacheLog.IsInfoEnabled) NCacheLog.Info("connection was created to " + peer_addr); if (handler == null) { // Roland Kurmann 4/7/2003, put in thread_group handler = new ThreadClass(new System.Threading.ThreadStart(this.Run), "ConnectionTable.Connection.HandlerThread"); handler.IsBackground = true; handler.Start(); } }
/// <summary> /// Gets the currently running thread /// </summary> /// <returns>The currently running thread</returns> public static ThreadClass Current() { var currentThread = new ThreadClass {Instance = System.Threading.Thread.CurrentThread}; return currentThread; }
internal virtual void Destroy() { closeSocket(); // should terminate handler as well if (handler != null && handler.IsAlive) { try { NCacheLog.Flush(); handler.Abort(); } catch (Exception) { } } handler = null; if (inStream != null) inStream.Close(); }
private void InitBlock(TestIndexWriterReader enclosingInstance) { this.enclosingInstance = enclosingInstance; threads = new ThreadClass[NUM_THREADS]; }
public virtual void Run() { Message msg= null; byte[] buf = null; int len = 0; while (handler != null) { Stream stmIn = null; BinaryReader msgReader = null; try { if (sock == null) { NCacheLog.Error("input stream is null !"); break; } byte[] lenBuff = new byte[4]; buf = null; Util.Util.ReadInput(sock, lenBuff, 0, lenBuff.Length); len = Util.Util.convertToInt32(lenBuff); buf = receiveBuffer; if (len > receiveBuffer.Length) buf = new byte[len]; HPTimeStats socketReceiveTimeStats = null; if (enclosingInstance.enableMonitoring) { socketReceiveTimeStats = new HPTimeStats(); socketReceiveTimeStats.BeginSample(); } DateTime dt = DateTime.Now; int recLength = Util.Util.ReadInput(sock, buf, 0, len); DateTime now = DateTime.Now; TimeSpan ts = now - dt; if (ts.TotalMilliseconds > _worsRecvTime.TotalMilliseconds) _worsRecvTime = ts; if (socketReceiveTimeStats != null) { socketReceiveTimeStats.EndSample(); enclosingInstance.enclosingInstance.Stack.perfStatsColl.IncrementSocketReceiveTimeStats((long)socketReceiveTimeStats.Current); enclosingInstance.enclosingInstance.Stack.perfStatsColl.IncrementSocketReceiveSizeStats((long)len); } enclosingInstance.publishBytesReceivedStats(len + 4); if (recLength == len) { int noOfMessages = Util.Util.convertToInt32(buf, 0); int messageBaseIndex = 4; for (int msgCount = 0; msgCount < noOfMessages; msgCount++) { int totalMessagelength = Util.Util.convertToInt32(buf, messageBaseIndex); int messageLength = Util.Util.convertToInt32(buf, messageBaseIndex + 4); stmIn = new MemoryStream(); stmIn.Position = 0; stmIn.Write(buf, messageBaseIndex + 8, messageLength); stmIn.Position = 0; msgReader = new BinaryReader(stmIn, new UTF8Encoding(true)); FlagsByte flags = new FlagsByte(); flags.DataByte = msgReader.ReadByte(); if (flags.AnyOn(FlagsByte.Flag.TRANS)) { Message tmpMsg = new Message(); tmpMsg.DeserializeLocal(msgReader); msg = tmpMsg; } else { msg = (Message)CompactBinaryFormatter.Deserialize(stmIn, null, false, null); } if (msg != null) { int payLoadLength = totalMessagelength - messageLength - 4; if (payLoadLength > 0) { int noOfChunks = payLoadLength / LARGE_OBJECT_SIZE; noOfChunks += (payLoadLength - (noOfChunks * LARGE_OBJECT_SIZE)) != 0 ? 1 : 0; Array payload = new Array[noOfChunks]; int nextChunk = 0; int nextChunkSize = 0; int startIndex = messageBaseIndex + 8 + messageLength; for (int i = 0; i < noOfChunks; i++) { nextChunkSize = payLoadLength - nextChunk; if (nextChunkSize > LARGE_OBJECT_SIZE) nextChunkSize = LARGE_OBJECT_SIZE; byte[] binaryChunk = new byte[nextChunkSize]; Buffer.BlockCopy(buf, startIndex, binaryChunk, 0, nextChunkSize); nextChunk += nextChunkSize; startIndex += nextChunkSize; payload.SetValue(binaryChunk, i); } msg.Payload = payload; } messageBaseIndex += (totalMessagelength + 4); ConnectionHeader hdr = msg.getHeader("ConnectionHeader") as ConnectionHeader; if (hdr != null) { switch (hdr.Type) { case ConnectionHeader.CLOSE_SILENT: if (NCacheLog.IsInfoEnabled) NCacheLog.Info("Connection.Run", "connection being closed silently"); this.self_close = true; handler = null; continue; case ConnectionHeader.LEAVE: //The node is leaving the cluster gracefully. leavingGracefully = true; if (NCacheLog.IsInfoEnabled) NCacheLog.Info("Connection.Run", peer_addr.ToString() + " is leaving gracefully"); handler = null; continue; case ConnectionHeader.GET_SECOND_ADDRESS_REQ: SendSecondaryAddressofPeer(); continue; case ConnectionHeader.GET_SECOND_ADDRESS_RSP: lock (get_addr_sync) { secondaryAddress = hdr.MySecondaryAddress; Monitor.PulseAll(get_addr_sync); } continue; case ConnectionHeader.ARE_U_IN_INITIALIZATION_PHASE: try { bool iMinInitializationPhase = !enclosingInstance.enclosingInstance.Stack.IsOperational; SendInitializationPhaseRsp(iMinInitializationPhase); } catch (Exception e) { } break; case ConnectionHeader.INITIALIZATION_PHASE_RSP: lock (initializationPhase_mutex) { inInitializationPhase = hdr.InitializationPhase; Monitor.PulseAll(inInitializationPhase); } break; } } } msg.Src = peer_addr; msg.MarkArrived(); Enclosing_Instance.receive(msg); // calls receiver.receiver(msg) } } } catch (ObjectDisposedException) { lock (send_mutex) { socket_error = true; isConnected = false; } break; } catch (ThreadAbortException) { lock (send_mutex) { socket_error = true; isConnected = false; } break; } catch (ThreadInterruptedException) { lock (send_mutex) { socket_error = true; isConnected = false; } break; } catch (System.OutOfMemoryException memExc) { lock (send_mutex) { isConnected = false; } NCacheLog.CriticalInfo("Connection.Run()", Enclosing_Instance.local_addr + "-->" + peer_addr.ToString() + " memory exception " + memExc.ToString()); break; // continue; } catch (ExtSocketException sock_exp) { lock (send_mutex) { socket_error = true; isConnected = false; } // peer closed connection NCacheLog.Error("Connection.Run()", Enclosing_Instance.local_addr + "-->" + peer_addr.ToString() + " exception is " + sock_exp.Message); break; } catch (System.IO.EndOfStreamException eof_ex) { lock (send_mutex) { isConnected = false; } // peer closed connection NCacheLog.Error("Connection.Run()", "data :" + len + Enclosing_Instance.local_addr + "-->" + peer_addr.ToString() + " exception is " + eof_ex); break; } catch (System.Net.Sockets.SocketException io_ex) { lock (send_mutex) { socket_error = true; isConnected = false; } NCacheLog.Error("Connection.Run()", Enclosing_Instance.local_addr + "-->" + peer_addr.ToString() + " exception is " + io_ex.Message); break; } catch (System.ArgumentException ex) { lock (send_mutex) { isConnected = false; } break; } catch (System.Exception e) { lock (send_mutex) { isConnected = false; } NCacheLog.Error("Connection.Run()", Enclosing_Instance.local_addr + "-->" + peer_addr.ToString() + " exception is " + e); break; } finally { if (stmIn != null) stmIn.Close(); if (msgReader != null) msgReader.Close(); } } handler = null; if (LeavingGracefully) { enclosingInstance.notifyConnectionClosed(peer_addr); enclosingInstance.remove(peer_addr, IsPrimary); } }
public virtual void TestIntermediateClose() { Directory dir = NewDirectory(); // Test can deadlock if we use SMS: IConcurrentMergeScheduler scheduler; #if !FEATURE_CONCURRENTMERGESCHEDULER scheduler = new TaskMergeScheduler(); #else scheduler = new ConcurrentMergeScheduler(); #endif IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergeScheduler(scheduler)); writer.AddDocument(new Document()); writer.Commit(); CountdownEvent awaitEnterWarm = new CountdownEvent(1); CountdownEvent awaitClose = new CountdownEvent(1); AtomicBoolean triedReopen = new AtomicBoolean(false); //TaskScheduler es = Random().NextBoolean() ? null : Executors.newCachedThreadPool(new NamedThreadFactory("testIntermediateClose")); TaskScheduler es = Random().NextBoolean() ? null : TaskScheduler.Default; SearcherFactory factory = new SearcherFactoryAnonymousInnerClassHelper2(this, awaitEnterWarm, awaitClose, triedReopen, es); SearcherManager searcherManager = Random().NextBoolean() ? new SearcherManager(dir, factory) : new SearcherManager(writer, Random().NextBoolean(), factory); if (VERBOSE) { Console.WriteLine("sm created"); } IndexSearcher searcher = searcherManager.Acquire(); try { assertEquals(1, searcher.IndexReader.NumDocs); } finally { searcherManager.Release(searcher); } writer.AddDocument(new Document()); writer.Commit(); AtomicBoolean success = new AtomicBoolean(false); Exception[] exc = new Exception[1]; ThreadClass thread = new ThreadClass(() => new RunnableAnonymousInnerClassHelper(this, triedReopen, searcherManager, success, exc).Run()); thread.Start(); if (VERBOSE) { Console.WriteLine("THREAD started"); } awaitEnterWarm.Wait(); if (VERBOSE) { Console.WriteLine("NOW call close"); } searcherManager.Dispose(); awaitClose.Signal(); thread.Join(); try { searcherManager.Acquire(); fail("already closed"); } #pragma warning disable 168 catch (ObjectDisposedException ex) #pragma warning restore 168 { // expected } assertFalse(success.Get()); assertTrue(triedReopen.Get()); assertNull("" + exc[0], exc[0]); writer.Dispose(); dir.Dispose(); //if (es != null) //{ // es.shutdown(); // es.awaitTermination(1, TimeUnit.SECONDS); //} }
public virtual void start() { if (Enclosing_Instance.conns_NIC_1.Count == 0) return; if (t != null && !t.IsAlive) t = null; if (t == null) { //RKU 7.4.2003, put in threadgroup t = new ThreadClass(new System.Threading.ThreadStart(this.Run), "ConnectionTable.ReaperThread"); t.IsBackground = true; // will allow us to terminate if all remaining threads are daemons t.Start(); } }
public virtual void Test() { IList <string> postingsList = new List <string>(); int numTerms = AtLeast(300); int maxTermsPerDoc = TestUtil.NextInt(Random(), 10, 20); bool isSimpleText = "SimpleText".Equals(TestUtil.GetPostingsFormat("field")); IndexWriterConfig iwc = NewIndexWriterConfig(Random(), TEST_VERSION_CURRENT, new MockAnalyzer(Random())); if ((isSimpleText || iwc.MergePolicy is MockRandomMergePolicy) && (TEST_NIGHTLY || RANDOM_MULTIPLIER > 1)) { // Otherwise test can take way too long (> 2 hours) numTerms /= 2; } if (VERBOSE) { Console.WriteLine("maxTermsPerDoc=" + maxTermsPerDoc); Console.WriteLine("numTerms=" + numTerms); } for (int i = 0; i < numTerms; i++) { string term = Convert.ToString(i); for (int j = 0; j < i; j++) { postingsList.Add(term); } } postingsList = CollectionsHelper.Shuffle(postingsList); ConcurrentQueue <string> postings = new ConcurrentQueue <string>(postingsList); Directory dir = NewFSDirectory(CreateTempDir("bagofpostings")); RandomIndexWriter iw = new RandomIndexWriter(Random(), dir, iwc); int threadCount = TestUtil.NextInt(Random(), 1, 5); if (VERBOSE) { Console.WriteLine("config: " + iw.w.Config); Console.WriteLine("threadCount=" + threadCount); } ThreadClass[] threads = new ThreadClass[threadCount]; CountdownEvent startingGun = new CountdownEvent(1); for (int threadID = 0; threadID < threadCount; threadID++) { threads[threadID] = new ThreadAnonymousInnerClassHelper(this, maxTermsPerDoc, postings, iw, startingGun); threads[threadID].Start(); } startingGun.Signal(); foreach (ThreadClass t in threads) { t.Join(); } iw.ForceMerge(1); DirectoryReader ir = iw.Reader; Assert.AreEqual(1, ir.Leaves.Count); AtomicReader air = (AtomicReader)ir.Leaves[0].Reader; Terms terms = air.Terms("field"); // numTerms-1 because there cannot be a term 0 with 0 postings: Assert.AreEqual(numTerms - 1, air.Fields.UniqueTermCount); if (iwc.Codec is Lucene3xCodec == false) { Assert.AreEqual(numTerms - 1, terms.Size()); } TermsEnum termsEnum = terms.Iterator(null); BytesRef term_; while ((term_ = termsEnum.Next()) != null) { int value = Convert.ToInt32(term_.Utf8ToString()); Assert.AreEqual(value, termsEnum.DocFreq()); // don't really need to check more than this, as CheckIndex // will verify that docFreq == actual number of documents seen // from a docsAndPositionsEnum. } ir.Dispose(); iw.Dispose(); dir.Dispose(); }
public virtual void stop() { if (t != null) t = null; }
public virtual void TestAccquireReleaseRace() { DocumentsWriterStallControl ctrl = new DocumentsWriterStallControl(); ctrl.UpdateStalled(false); AtomicBoolean stop = new AtomicBoolean(false); AtomicBoolean checkPoint = new AtomicBoolean(true); int numStallers = AtLeast(1); int numReleasers = AtLeast(1); int numWaiters = AtLeast(1); var sync = new Synchronizer(numStallers + numReleasers, numStallers + numReleasers + numWaiters); var threads = new ThreadClass[numReleasers + numStallers + numWaiters]; IList <Exception> exceptions = new SynchronizedCollection <Exception>(); for (int i = 0; i < numReleasers; i++) { threads[i] = new Updater(stop, checkPoint, ctrl, sync, true, exceptions); } for (int i = numReleasers; i < numReleasers + numStallers; i++) { threads[i] = new Updater(stop, checkPoint, ctrl, sync, false, exceptions); } for (int i = numReleasers + numStallers; i < numReleasers + numStallers + numWaiters; i++) { threads[i] = new Waiter(stop, checkPoint, ctrl, sync, exceptions); } Start(threads); int iters = AtLeast(10000); float checkPointProbability = TEST_NIGHTLY ? 0.5f : 0.1f; for (int i = 0; i < iters; i++) { if (checkPoint.Get()) { Assert.IsTrue(sync.UpdateJoin.@await(new TimeSpan(0, 0, 0, 10)), "timed out waiting for update threads - deadlock?"); if (exceptions.Count > 0) { foreach (Exception throwable in exceptions) { Console.WriteLine(throwable.ToString()); Console.Write(throwable.StackTrace); } Assert.Fail("got exceptions in threads"); } if (ctrl.HasBlocked() && ctrl.Healthy) { AssertState(numReleasers, numStallers, numWaiters, threads, ctrl); } checkPoint.Set(false); sync.Waiter.countDown(); sync.LeftCheckpoint.@await(); } Assert.IsFalse(checkPoint.Get()); Assert.AreEqual(0, sync.Waiter.Remaining); if (checkPointProbability >= (float)Random().NextDouble()) { sync.Reset(numStallers + numReleasers, numStallers + numReleasers + numWaiters); checkPoint.Set(true); } } if (!checkPoint.Get()) { sync.Reset(numStallers + numReleasers, numStallers + numReleasers + numWaiters); checkPoint.Set(true); } Assert.IsTrue(sync.UpdateJoin.@await(new TimeSpan(0, 0, 0, 10))); AssertState(numReleasers, numStallers, numWaiters, threads, ctrl); checkPoint.Set(false); stop.Set(true); sync.Waiter.countDown(); sync.LeftCheckpoint.@await(); for (int i = 0; i < threads.Length; i++) { ctrl.UpdateStalled(false); threads[i].Join(2000); if (threads[i].IsAlive && threads[i] is Waiter) { if (threads[i].State == ThreadState.WaitSleepJoin) { Assert.Fail("waiter is not released - anyThreadsStalled: " + ctrl.AnyStalledThreads()); } } } }
public virtual void Run() { Connection value_Renamed; System.Collections.DictionaryEntry entry; long curr_time; ArrayList temp = new ArrayList(); if (enclosingInstance.NCacheLog.IsInfoEnabled) enclosingInstance.NCacheLog.Info("connection reaper thread was started. Number of connections=" + Enclosing_Instance.conns_NIC_1.Count + ", reaper_interval=" + Enclosing_Instance.reaper_interval + ", conn_expire_time=" + Enclosing_Instance.conn_expire_time); while (Enclosing_Instance.conns_NIC_1.Count > 0 && t != null) { // first sleep Util.Util.sleep(Enclosing_Instance.reaper_interval); if (enclosingInstance.NCacheLog.IsInfoEnabled) enclosingInstance.NCacheLog.Info("ConnectionTable.Reaper", "b4 lock conns.SyncRoot"); lock (Enclosing_Instance.conns_NIC_1.SyncRoot) { curr_time = (System.DateTime.Now.Ticks - 621355968000000000) / 10000; for (System.Collections.IEnumerator it = Enclosing_Instance.conns_NIC_1.GetEnumerator(); it.MoveNext(); ) { entry = (System.Collections.DictionaryEntry)it.Current; value_Renamed = (Connection)entry.Value; if (enclosingInstance.NCacheLog.IsInfoEnabled) enclosingInstance.NCacheLog.Info("connection is " + ((curr_time - value_Renamed.last_access) / 1000) + " seconds old (curr-time=" + curr_time + ", last_access=" + value_Renamed.last_access + ')'); if (value_Renamed.last_access + Enclosing_Instance.conn_expire_time < curr_time) { if (enclosingInstance.NCacheLog.IsInfoEnabled) enclosingInstance.NCacheLog.Info("connection " + value_Renamed + " has been idle for too long (conn_expire_time=" + Enclosing_Instance.conn_expire_time + "), will be removed"); value_Renamed.Destroy(); temp.Add(it.Current); } } // Now remove closed connection from the connection hashtable for (int i = 0; i < temp.Count; i++) { if (Enclosing_Instance.conns_NIC_1.Contains((Address)temp[i])) { Enclosing_Instance.conns_NIC_1.Remove((Address)temp[i]); temp[i] = null; } } } if (enclosingInstance.NCacheLog.IsInfoEnabled) enclosingInstance.NCacheLog.Info("ConnectionTable.Reaper", "after lock conns.SyncRoot"); } if (enclosingInstance.NCacheLog.IsInfoEnabled) enclosingInstance.NCacheLog.Info("reaper terminated"); t = null; }
public virtual void runTest(Directory directory, MergeScheduler merger) { IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED, null); writer.SetMaxBufferedDocs(2); if (merger != null) { writer.SetMergeScheduler(merger, null); } for (int iter = 0; iter < NUM_ITER; iter++) { int iterFinal = iter; writer.MergeFactor = 1000; for (int i = 0; i < 200; i++) { Document d = new Document(); d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); d.Add(new Field("contents", English.IntToEnglish(i), Field.Store.NO, Field.Index.ANALYZED)); writer.AddDocument(d, null); } writer.MergeFactor = 4; //writer.setInfoStream(System.out); ThreadClass[] threads = new ThreadClass[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { int iFinal = i; IndexWriter writerFinal = writer; threads[i] = new AnonymousClassThread(writerFinal, iFinal, iterFinal, this); } for (int i = 0; i < NUM_THREADS; i++) { threads[i].Start(); } for (int i = 0; i < NUM_THREADS; i++) { threads[i].Join(); } Assert.IsTrue(!failed); int expectedDocCount = (int)((1 + iter) * (200 + 8 * NUM_ITER2 * (NUM_THREADS / 2.0) * (1 + NUM_THREADS))); // System.out.println("TEST: now index=" + writer.segString()); Assert.AreEqual(expectedDocCount, writer.MaxDoc()); writer.Close(); writer = new IndexWriter(directory, ANALYZER, false, IndexWriter.MaxFieldLength.UNLIMITED, null); writer.SetMaxBufferedDocs(2); IndexReader reader = IndexReader.Open(directory, true, null); Assert.IsTrue(reader.IsOptimized()); Assert.AreEqual(expectedDocCount, reader.NumDocs()); reader.Close(); } writer.Close(); }
public virtual void TestIndexing() { DirectoryInfo tmpDir = CreateTempDir("TestNeverDelete"); BaseDirectoryWrapper d = NewFSDirectory(tmpDir); // We want to "see" files removed if Lucene removed // them. this is still worth running on Windows since // some files the IR opens and closes. if (d is MockDirectoryWrapper) { ((MockDirectoryWrapper)d).NoDeleteOpenFile = false; } RandomIndexWriter w = new RandomIndexWriter(Random(), d, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetIndexDeletionPolicy(NoDeletionPolicy.INSTANCE)); w.w.Config.SetMaxBufferedDocs(TestUtil.NextInt(Random(), 5, 30)); w.Commit(); ThreadClass[] indexThreads = new ThreadClass[Random().Next(4)]; long stopTime = Environment.TickCount + AtLeast(1000); for (int x = 0; x < indexThreads.Length; x++) { indexThreads[x] = new ThreadAnonymousInnerClassHelper(w, stopTime, NewStringField, NewTextField); indexThreads[x].Name = "Thread " + x; indexThreads[x].Start(); } HashSet <string> allFiles = new HashSet <string>(); DirectoryReader r = DirectoryReader.Open(d); while (Environment.TickCount < stopTime) { IndexCommit ic = r.IndexCommit; if (VERBOSE) { Console.WriteLine("TEST: check files: " + ic.FileNames); } allFiles.AddAll(ic.FileNames); // Make sure no old files were removed foreach (string fileName in allFiles) { Assert.IsTrue(SlowFileExists(d, fileName), "file " + fileName + " does not exist"); } DirectoryReader r2 = DirectoryReader.OpenIfChanged(r); if (r2 != null) { r.Dispose(); r = r2; } Thread.Sleep(1); } r.Dispose(); foreach (ThreadClass t in indexThreads) { t.Join(); } w.Dispose(); d.Dispose(); System.IO.Directory.Delete(tmpDir.FullName, true); }
public virtual void TestStressMultiThreading() { Directory dir = NewDirectory(); IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())); IndexWriter writer = new IndexWriter(dir, conf); // create index int numThreads = TestUtil.NextInt(Random(), 3, 6); int numDocs = AtLeast(2000); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.Add(new StringField("id", "doc" + i, Store.NO)); double group = Random().NextDouble(); string g; if (group < 0.1) { g = "g0"; } else if (group < 0.5) { g = "g1"; } else if (group < 0.8) { g = "g2"; } else { g = "g3"; } doc.Add(new StringField("updKey", g, Store.NO)); for (int j = 0; j < numThreads; j++) { long value = Random().Next(); doc.Add(new BinaryDocValuesField("f" + j, TestBinaryDocValuesUpdates.ToBytes(value))); doc.Add(new NumericDocValuesField("cf" + j, value * 2)); // control, always updated to f * 2 } writer.AddDocument(doc); } CountdownEvent done = new CountdownEvent(numThreads); AtomicInt32 numUpdates = new AtomicInt32(AtLeast(100)); // same thread updates a field as well as reopens ThreadClass[] threads = new ThreadClass[numThreads]; for (int i = 0; i < threads.Length; i++) { string f = "f" + i; string cf = "cf" + i; threads[i] = new ThreadAnonymousInnerClassHelper(this, "UpdateThread-" + i, writer, numDocs, done, numUpdates, f, cf); } foreach (ThreadClass t in threads) { t.Start(); } done.Wait(); writer.Dispose(); DirectoryReader reader = DirectoryReader.Open(dir); BytesRef scratch = new BytesRef(); foreach (AtomicReaderContext context in reader.Leaves) { AtomicReader r = context.AtomicReader; for (int i = 0; i < numThreads; i++) { BinaryDocValues bdv = r.GetBinaryDocValues("f" + i); NumericDocValues control = r.GetNumericDocValues("cf" + i); IBits docsWithBdv = r.GetDocsWithField("f" + i); IBits docsWithControl = r.GetDocsWithField("cf" + i); IBits liveDocs = r.LiveDocs; for (int j = 0; j < r.MaxDoc; j++) { if (liveDocs == null || liveDocs.Get(j)) { Assert.AreEqual(docsWithBdv.Get(j), docsWithControl.Get(j)); if (docsWithBdv.Get(j)) { long ctrlValue = control.Get(j); long bdvValue = TestBinaryDocValuesUpdates.GetValue(bdv, j, scratch) * 2; // if (ctrlValue != bdvValue) { // System.out.println("seg=" + r + ", f=f" + i + ", doc=" + j + ", group=" + r.Document(j).Get("updKey") + ", ctrlValue=" + ctrlValue + ", bdvBytes=" + scratch); // } Assert.AreEqual(ctrlValue, bdvValue); } } } } } reader.Dispose(); dir.Dispose(); }
public virtual void Test() { Directory dir = NewDirectory(); RandomIndexWriter w = new RandomIndexWriter(Random(), dir, Similarity, TimeZone); long startTime = Environment.TickCount; // TODO: replace w/ the @nightly test data; make this // into an optional @nightly stress test Document doc = new Document(); Field body = NewTextField("body", "", Field.Store.NO); doc.Add(body); StringBuilder sb = new StringBuilder(); for (int docCount = 0; docCount < NUM_DOCS; docCount++) { int numTerms = Random().Next(10); for (int termCount = 0; termCount < numTerms; termCount++) { sb.Append(Random().NextBoolean() ? "aaa" : "bbb"); sb.Append(' '); } body.SetStringValue(sb.ToString()); w.AddDocument(doc); sb.Remove(0, sb.Length); } IndexReader r = w.Reader; w.Dispose(); long endTime = Environment.TickCount; if (VERBOSE) { Console.WriteLine("BUILD took " + (endTime - startTime)); } IndexSearcher s = NewSearcher(r); AtomicBoolean failed = new AtomicBoolean(); AtomicInt64 netSearch = new AtomicInt64(); ThreadClass[] threads = new ThreadClass[NUM_SEARCH_THREADS]; for (int threadID = 0; threadID < NUM_SEARCH_THREADS; threadID++) { threads[threadID] = new ThreadAnonymousInnerClassHelper(this, s, failed, netSearch); threads[threadID].SetDaemon(true); } foreach (ThreadClass t in threads) { t.Start(); } foreach (ThreadClass t in threads) { t.Join(); } if (VERBOSE) { Console.WriteLine(NUM_SEARCH_THREADS + " threads did " + netSearch.Get() + " searches"); } r.Dispose(); dir.Dispose(); }
public Randomness(ThreadClass owner, int seed, params ISeedDecorator[] decorators) : this(owner, seed, decorators.ToList()) { }
public virtual void Test() { Directory dir = NewDirectory(); MockAnalyzer analyzer = new MockAnalyzer(Random()); analyzer.MaxTokenLength = TestUtil.NextInt(Random(), 1, IndexWriter.MAX_TERM_LENGTH); RandomIndexWriter w = new RandomIndexWriter(Random(), dir, analyzer); LineFileDocs docs = new LineFileDocs(Random(), DefaultCodecSupportsDocValues()); int charsToIndex = AtLeast(100000); int charsIndexed = 0; //System.out.println("bytesToIndex=" + charsToIndex); while (charsIndexed < charsToIndex) { Document doc = docs.NextDoc(); charsIndexed += doc.Get("body").Length; w.AddDocument(doc); //System.out.println(" bytes=" + charsIndexed + " add: " + doc); } IndexReader r = w.Reader; //System.out.println("numDocs=" + r.NumDocs); w.Dispose(); IndexSearcher s = NewSearcher(r); Terms terms = MultiFields.GetFields(r).Terms("body"); int termCount = 0; TermsEnum termsEnum = terms.Iterator(null); while (termsEnum.Next() != null) { termCount++; } Assert.IsTrue(termCount > 0); // Target ~10 terms to search: double chance = 10.0 / termCount; termsEnum = terms.Iterator(termsEnum); IDictionary <BytesRef, TopDocs> answers = new Dictionary <BytesRef, TopDocs>(); while (termsEnum.Next() != null) { if (Random().NextDouble() <= chance) { BytesRef term = BytesRef.DeepCopyOf(termsEnum.Term()); answers[term] = s.Search(new TermQuery(new Term("body", term)), 100); } } if (answers.Count > 0) { CountdownEvent startingGun = new CountdownEvent(1); int numThreads = TestUtil.NextInt(Random(), 2, 5); ThreadClass[] threads = new ThreadClass[numThreads]; for (int threadID = 0; threadID < numThreads; threadID++) { ThreadClass thread = new ThreadAnonymousInnerClassHelper(this, s, answers, startingGun); threads[threadID] = thread; thread.Start(); } startingGun.Signal(); foreach (ThreadClass thread in threads) { thread.Join(); } } r.Dispose(); dir.Dispose(); }
public Randomness Clone(ThreadClass newOwner) { return(new Randomness(newOwner, this.Seed, this.decorators)); }
private void button2_Click(object sender, System.EventArgs e) { listView1.Items.Clear(); Thread[] t = new Thread[100]; ThreadClass start; for (int i = 0; i < 100; i++) { start = new ThreadClass(i.ToString(), listView1); ThreadStart st = new ThreadStart(start.ThreadFunc); t[i] = new Thread(st); } for (int i = 0; i < 100; i++) { t[i].Start(); t[i].IsBackground = false; //t.Join(); } //WaitHandle.WaitAll(); myEvent = new AutoResetEvent(false); //... myEvent.Set();... //En otro thread myEvent.WaitOne(); }