public static void Shutdown() { if (!_enabled) { return; } _shutdown = true; _cancellationSource.Cancel(); _singleInstanceMutex.ReleaseMutex(); _singleInstanceMutex.Dispose(); CommandLineQueue.Close(); if (!_producerThread.Join(TimeSpan.FromSeconds(2))) { _producerThread.Abort(); } if (!_consumerThread.Join(TimeSpan.FromSeconds(2))) { _consumerThread.Abort(); } _enabled = false; }
public void Queue_on_closed_queue_throws() { BlockingQueue <string> q = new BlockingQueue <string>(); q.Enqueue("foo"); Assert.IsFalse(q.IsClosed); q.Close(); Assert.IsTrue(q.IsClosed); q.Enqueue("bar"); }
public void Dequeue_on_closed_queue_throws() { BlockingQueue<string> q = new BlockingQueue<string>(); q.Enqueue("foo"); Assert.IsFalse(q.IsClosed); q.Close(); Assert.IsTrue(q.IsClosed); string x = q.Dequeue(); Assert.AreEqual("foo", x); x = q.Dequeue(); }
/// <summary> /// Shutdown will disconnect all the sims except for the current sim /// first, and then kill the connection to CurrentSim. This should only /// be called if the logout process times out on <code>RequestLogout</code> /// </summary> public void Shutdown(DisconnectType type) { Client.Log("NetworkManager shutdown initiated", Helpers.LogLevel.Info); // Send a CloseCircuit packet to simulators if we are initiating the disconnect bool sendCloseCircuit = (type == DisconnectType.ClientInitiated || type == DisconnectType.NetworkTimeout); lock (Simulators) { // Disconnect all simulators except the current one for (int i = 0; i < Simulators.Count; i++) { if (Simulators[i] != null && Simulators[i] != CurrentSim) { Simulators[i].Disconnect(sendCloseCircuit); // Fire the SimDisconnected event if a handler is registered if (OnSimDisconnected != null) { try { OnSimDisconnected(Simulators[i], type); } catch (Exception e) { Client.Log(e.ToString(), Helpers.LogLevel.Error); } } } } Simulators.Clear(); } if (CurrentSim != null) { // Kill the connection to the curent simulator CurrentSim.Disconnect(sendCloseCircuit); // Fire the SimDisconnected event if a handler is registered if (OnSimDisconnected != null) { try { OnSimDisconnected(CurrentSim, type); } catch (Exception e) { Client.Log(e.ToString(), Helpers.LogLevel.Error); } } } // Clear out all of the packets that never had time to process PacketInbox.Close(); connected = false; // Fire the disconnected callback if (OnDisconnected != null) { try { OnDisconnected(DisconnectType.ClientInitiated, String.Empty); } catch (Exception e) { Client.Log(e.ToString(), Helpers.LogLevel.Error); } } }
public void Dequeue_on_closed_queue_throws() { BlockingQueue <string> q = new BlockingQueue <string>(); q.Enqueue("foo"); Assert.IsFalse(q.IsClosed); q.Close(); Assert.IsTrue(q.IsClosed); string x = q.Dequeue(); Assert.AreEqual("foo", x); x = q.Dequeue(); }
private object[] Proxy(ISender sender, int maxResultsToWait, string method, object[] args) { BlockingQueue q = new BlockingQueue(maxResultsToWait); args = AdrXmlRpcConverter.XmlRpc2AdrParams(args); _rpc.Invoke(sender, q, method, args); ArrayList allValues = new ArrayList(); int counter = 0; ISender rsSender = null; try { do { rsSender = null; //Reset it before the following: RpcResult rpcRs = (RpcResult)q.Dequeue(); rsSender = rpcRs.ResultSender; //get it before exception thrown object val = rpcRs.Result; Debug.WriteLine(string.Format("Original Result: {0}", val)); object xmlrpc_val = AdrXmlRpcConverter.Adr2XmlRpc(val); //conversion in here counter++; allValues.Add(xmlrpc_val); } while (maxResultsToWait < 0 ? true : (counter < maxResultsToWait)); } catch (Exception e) { Debug.WriteLine(e); string s = string.Empty; if (e is AdrException) { if (rsSender != null) { s = AdrXmlRpcConverter.Adr2XmlRpc(rsSender) as string; } } if (e is InvalidOperationException) { /* * this is what we expect at the end of Dequeuing, so just return what we've gotten so far * it could be an empty array */ return(allValues.ToArray()); } Exception new_e = AdrXmlRpcConverter.Adr2XmlRpc(e) as Exception; throw new Exception(new_e.Message + (s.Equals(string.Empty) ? string.Empty : string.Format("thrown by: {0}", s))); } finally { if (!q.Closed) { q.Close(); } } return(allValues.ToArray()); }
public void One_producer_many_consumers_loop_with_foreach() { int n = 500; var enqueued = new List <string>(); var dequeued = new List <string>(); var q = new BlockingQueue <string>(); var c1 = new Thread(MultiConsumerForeachLoop) { IsBackground = true }; var c2 = new Thread(MultiConsumerForeachLoop) { IsBackground = true }; var c3 = new Thread(MultiConsumerForeachLoop) { IsBackground = true }; var v1 = new Tuplet <BlockingQueue <string>, List <string>, int, ManualResetEvent>(q, dequeued, 0, new ManualResetEvent(false)); c1.Start(v1); var v2 = new Tuplet <BlockingQueue <string>, List <string>, int, ManualResetEvent>(q, dequeued, 0, new ManualResetEvent(false)); c2.Start(v2); var v3 = new Tuplet <BlockingQueue <string>, List <string>, int, ManualResetEvent>(q, dequeued, 0, new ManualResetEvent(false)); c3.Start(v3); Thread.Sleep(1000); for (int i = 0; i < n; i++) { string guid = Guid.NewGuid().ToString(); q.Enqueue(guid); enqueued.Add(guid); } q.Close(); Assert.IsTrue(v1.Item4.WaitOne(10000, false), "thread 1 did not finish"); Assert.IsTrue(v2.Item4.WaitOne(10000, false), "thread 2 did not finish"); Assert.IsTrue(v3.Item4.WaitOne(10000, false), "thread 3 did not finish"); _log.DebugFormat("Thread 1 processed {0}", v1.Item3); _log.DebugFormat("Thread 2 processed {0}", v2.Item3); _log.DebugFormat("Thread 3 processed {0}", v3.Item3); Console.WriteLine("Thread 1 processed {0}", v1.Item3); Console.WriteLine("Thread 2 processed {0}", v2.Item3); Console.WriteLine("Thread 3 processed {0}", v3.Item3); Assert.GreaterOrEqual(v1.Item3, n / 4); Assert.GreaterOrEqual(v2.Item3, n / 4); Assert.GreaterOrEqual(v3.Item3, n / 4); Assert.AreEqual(n, dequeued.Count); Assert.AreEqual(dequeued.OrderBy(x => x).ToArray(), enqueued.OrderBy(x => x).ToArray()); }
/// <summary> /// Добавить сообщение в очередь на отправку. /// </summary> /// <param name="message">Сообщение.</param> private void EnqueueMessage(LogMessage message) { if (message.IsDispose) { _queue.Close(); return; } _queue.Enqueue(Tuple.Create(GetSubject(message), message.Message)); lock (_queue.SyncRoot) { if (_isThreadStarted) { return; } _isThreadStarted = true; ThreadingHelper.Thread(() => { try { using (var email = CreateClient()) { while (true) { Tuple <string, string> m; if (!_queue.TryDequeue(out m)) { break; } email.Send(From, To, m.Item1, m.Item2); } } lock (_queue.SyncRoot) _isThreadStarted = false; } catch (Exception ex) { Trace.WriteLine(ex); } }).Name("Email log queue").Launch(); } }
private void AddStuffToBQAndClose(object ostate) { object[] state = (object[])ostate; BlockingQueue bq = (BlockingQueue)state[0]; ISender target = (ISender)state[1]; foreach (object o in this.CurrentInvokeState.RetValues) { if (CurrentInvokeState.EnqueueIntervalInMillisec >= 0) { Thread.Sleep(CurrentInvokeState.EnqueueIntervalInMillisec); } RpcResult rs = new RpcResult(target, o); bq.Enqueue(rs); } if (CurrentInvokeState.IntervalBetweenLastEnqueueAndClose >= 0) { Thread.Sleep(CurrentInvokeState.IntervalBetweenLastEnqueueAndClose); } bq.Close(); }
public static void StartServices() { Stopwatch stopwatch = new Stopwatch(); stopwatch.Start(); IBlockingQueue <ServiceBE> services = new BlockingQueue <ServiceBE>(); List <ServiceBE> servicesToStart = new List <ServiceBE>(DbUtils.CurrentSession.Services_GetAll()); // extract all auth services and start them synchronously first List <ServiceBE> authServices = servicesToStart.FindAll(service => service.Type == ServiceType.AUTH); servicesToStart.RemoveAll(service => service.Type == ServiceType.AUTH); foreach (ServiceBE authService in authServices) { try { StartService(authService, false, false); } catch { //Services started on deki startup do not get disabled if they fail to start } } // start remaining services in parallel foreach (ServiceBE service in servicesToStart) { if (service.ServiceEnabled) { services.Enqueue(service); } } services.Close(); List <Result> workers = new List <Result>(); for (int i = 0; i < 10; i++) { workers.Add(Async.ForkThread(() => StartServices_Helper(services), new Result())); } workers.Join(new Result()).Wait(); _log.InfoFormat("Services started for instance '{0}' in {1}ms", DekiContext.Current.Instance.Id, stopwatch.ElapsedMilliseconds); }
public void Dispose() { SaveCache(new object()); if (client != null) { DeregisterEvents(client); } if (requestTimer != null) { requestTimer.Dispose(); requestTimer = null; } if (cacheTimer != null) { cacheTimer.Dispose(); cacheTimer = null; } try { if (useRequestThread) { PendingLookups.Close(); if (requestThread != null) { if (!requestThread.Join(5 * 1000)) { requestThread.Abort(); } requestThread = null; } } } catch { } }
public void One_producer_one_consumer_loop_with_foreach_and_stop() { int n = 10000; List <string> enqueued = new List <string>(); List <string> dequeued = new List <string>(); BlockingQueue <string> q = new BlockingQueue <string>(); Thread consumer = new Thread(SingleConsumerForeachLoopAndStop); consumer.Start(new Tuplet <IBlockingQueue <string>, List <string> >(q, dequeued)); for (int i = 0; i < n; i++) { string guid = Guid.NewGuid().ToString(); q.Enqueue(guid); enqueued.Add(guid); } q.Close(); Assert.IsTrue(consumer.Join(1000)); Assert.AreEqual(n, enqueued.Count); Assert.AreEqual(n, dequeued.Count); for (int i = 0; i < n; i++) { Assert.AreEqual(enqueued[i], dequeued[i]); } }
private static void Crawl() { int count = 0, consistency = 0; NodeMapping nm = (NodeMapping) nodes.GetByIndex(0); Node lnode = nm.Node; Address rem_addr = lnode.Address, prev = null, first_left = null; bool failed = false; try { do { Console.WriteLine("Current address: " + rem_addr); ISender sender = new AHGreedySender(lnode, rem_addr); BlockingQueue q = new BlockingQueue(); lnode.Rpc.Invoke(sender, q, "sys:link.GetNeighbors"); RpcResult res = (RpcResult) q.Dequeue(); Hashtable ht = (Hashtable) res.Result; Address tmp = AddressParser.Parse((String) ht["left"]); Address next = AddressParser.Parse((String) ht["right"]); if(prev != null && tmp.Equals(prev)) { consistency++; } else { first_left = tmp; } if(next == lnode.Address && first_left == rem_addr) { consistency++; } prev = rem_addr; rem_addr = next; q.Close(); count++; } while((rem_addr != lnode.Address) && (count < nodes.Count)); } catch(Exception e) { failed = true; Console.WriteLine("Crawl failed due to exception..."); Console.WriteLine(e); } if(!failed) { if(count != nodes.Count) { Console.WriteLine("Crawl failed due to missing nodes!"); Console.WriteLine("Expected nodes: {0}, found: {1}.", nodes.Count, count); } else if(consistency != count) { Console.WriteLine("Crawl failed due to bad consistency!"); Console.WriteLine("Expected consistency: {0}, actual: {1}.", count, consistency); } else { Console.WriteLine("Crawl succeeded!"); } } }
private object[] Proxy(ISender sender,int maxResultsToWait, string method, object[] args) { BlockingQueue q = new BlockingQueue(maxResultsToWait); args = AdrXmlRpcConverter.XmlRpc2AdrParams(args); _rpc.Invoke(sender, q, method, args); ArrayList allValues = new ArrayList(); int counter = 0; ISender rsSender = null; try { do { rsSender = null; //Reset it before the following: RpcResult rpcRs = (RpcResult)q.Dequeue(); rsSender = rpcRs.ResultSender; //get it before exception thrown object val = rpcRs.Result; Debug.WriteLine(string.Format("Original Result: {0}", val)); object xmlrpc_val = AdrXmlRpcConverter.Adr2XmlRpc(val); //conversion in here counter++; allValues.Add(xmlrpc_val); } while (maxResultsToWait < 0 ? true : (counter < maxResultsToWait)); } catch (Exception e) { Debug.WriteLine(e); string s = string.Empty; if (e is AdrException) { if (rsSender != null) { s = AdrXmlRpcConverter.Adr2XmlRpc(rsSender) as string; } } if (e is InvalidOperationException) { /* * this is what we expect at the end of Dequeuing, so just return what we've gotten so far * it could be an empty array */ return allValues.ToArray(); } Exception new_e = AdrXmlRpcConverter.Adr2XmlRpc(e) as Exception; throw new Exception(new_e.Message + (s.Equals(string.Empty) ? string.Empty : string.Format("thrown by: {0}", s))); } finally { if (!q.Closed) { q.Close(); } } return allValues.ToArray(); }
/// <summary> /// Starts executing the test suite. /// </summary> public void Start() { // Create a ScriptEngine and freeze its state. SaveScriptEngineSnapshot(); // Create a queue to hold the tests. var queue = new BlockingQueue<TestExecutionState>(100); // Create a thread per processor. var threads = new List<Thread>(); for (int i = 0; i < GetThreadCount(); i++) { var thread = new Thread(ThreadStart); thread.Start(queue); threads.Add(thread); } for (int i = 0; i < this.zipFile.Count; i++) { var zipEntry = this.zipFile[i]; if (zipEntry.IsFile && zipEntry.Name.EndsWith(".js")) { // This is a test file. // Read out the contents (assume UTF-8). string fileContents; using (var entryStream = this.zipFile.GetInputStream(zipEntry)) using (var reader = new StreamReader(entryStream)) { fileContents = reader.ReadToEnd(); } // Parse out the test metadata. var test = new Test(this, zipEntry.Name, fileContents); // Check if the test should be skipped. if (this.skippedTestNames.Contains(Path.GetFileNameWithoutExtension(zipEntry.Name))) { this.skippedTestCount++; TestFinished(this, new TestEventArgs(TestRunStatus.Skipped, test, false)); continue; } if (this.IncludedTests.Count > 0 && this.IncludedTests.Contains(Path.GetFileNameWithoutExtension(zipEntry.Name)) == false) { this.skippedTestCount++; TestFinished(this, new TestEventArgs(TestRunStatus.Skipped, test, false)); continue; } // Queue the test. if (test.RunInNonStrictMode) queue.Enqueue(new TestExecutionState(test, runInStrictMode: false)); if (test.RunInStrictMode) queue.Enqueue(new TestExecutionState(test, runInStrictMode: true)); } } // Signal the threads that no more tests will be provided. queue.Close(); // Wait for all threads to exit. foreach (var thread in threads) thread.Join(); }
public void Queue_on_closed_queue_throws() { BlockingQueue<string> q = new BlockingQueue<string>(); q.Enqueue("foo"); Assert.IsFalse(q.IsClosed); q.Close(); Assert.IsTrue(q.IsClosed); q.Enqueue("bar"); }
public void One_producer_one_consumer_loop_with_foreach_and_stop() { int n = 10000; List<string> enqueued = new List<string>(); List<string> dequeued = new List<string>(); BlockingQueue<string> q = new BlockingQueue<string>(); Thread consumer = new Thread(SingleConsumerForeachLoopAndStop); consumer.Start(new Tuplet<IBlockingQueue<string>, List<string>>(q, dequeued)); for(int i = 0; i < n; i++) { string guid = Guid.NewGuid().ToString(); q.Enqueue(guid); enqueued.Add(guid); } q.Close(); Assert.IsTrue(consumer.Join(1000)); Assert.AreEqual(n, enqueued.Count); Assert.AreEqual(n, dequeued.Count); for(int i = 0; i < n; i++) { Assert.AreEqual(enqueued[i], dequeued[i]); } }
public void One_producer_many_consumers_loop_with_foreach() { int n = 500; var enqueued = new List<string>(); var dequeued = new List<string>(); var q = new BlockingQueue<string>(); var c1 = new Thread(MultiConsumerForeachLoop) { IsBackground = true }; var c2 = new Thread(MultiConsumerForeachLoop) { IsBackground = true }; var c3 = new Thread(MultiConsumerForeachLoop) { IsBackground = true }; var v1 = new Tuplet<BlockingQueue<string>, List<string>, int, ManualResetEvent>(q, dequeued, 0, new ManualResetEvent(false)); c1.Start(v1); var v2 = new Tuplet<BlockingQueue<string>, List<string>, int, ManualResetEvent>(q, dequeued, 0, new ManualResetEvent(false)); c2.Start(v2); var v3 = new Tuplet<BlockingQueue<string>, List<string>, int, ManualResetEvent>(q, dequeued, 0, new ManualResetEvent(false)); c3.Start(v3); Thread.Sleep(1000); for(int i = 0; i < n; i++) { string guid = Guid.NewGuid().ToString(); q.Enqueue(guid); enqueued.Add(guid); } q.Close(); Assert.IsTrue(v1.Item4.WaitOne(10000, false), "thread 1 did not finish"); Assert.IsTrue(v2.Item4.WaitOne(10000, false), "thread 2 did not finish"); Assert.IsTrue(v3.Item4.WaitOne(10000, false), "thread 3 did not finish"); _log.DebugFormat("Thread 1 processed {0}", v1.Item3); _log.DebugFormat("Thread 2 processed {0}", v2.Item3); _log.DebugFormat("Thread 3 processed {0}", v3.Item3); Console.WriteLine("Thread 1 processed {0}", v1.Item3); Console.WriteLine("Thread 2 processed {0}", v2.Item3); Console.WriteLine("Thread 3 processed {0}", v3.Item3); Assert.GreaterOrEqual(v1.Item3, n / 4); Assert.GreaterOrEqual(v2.Item3, n / 4); Assert.GreaterOrEqual(v3.Item3, n / 4); Assert.AreEqual(n, dequeued.Count); Assert.AreEqual(dequeued.OrderBy(x => x).ToArray(), enqueued.OrderBy(x => x).ToArray()); }
public void Many_producers_many_consumers_loop_with_foreach() { int n = 200; List<string> enqueued = new List<string>(); List<string> dequeued = new List<string>(); BlockingQueue<string> q = new BlockingQueue<string>(); Thread c1 = new Thread(MultiConsumerForeachLoop); Thread c2 = new Thread(MultiConsumerForeachLoop); Thread c3 = new Thread(MultiConsumerForeachLoop); c1.IsBackground = true; c2.IsBackground = true; c3.IsBackground = true; Tuplet<BlockingQueue<string>, List<string>, int, ManualResetEvent> v1 = new Tuplet<BlockingQueue<string>, List<string>, int, ManualResetEvent>(q, dequeued, 0, new ManualResetEvent(false)); c1.Start(v1); Tuplet<BlockingQueue<string>, List<string>, int, ManualResetEvent> v2 = new Tuplet<BlockingQueue<string>, List<string>, int, ManualResetEvent>(q, dequeued, 0, new ManualResetEvent(false)); c2.Start(v2); Tuplet<BlockingQueue<string>, List<string>, int, ManualResetEvent> v3 = new Tuplet<BlockingQueue<string>, List<string>, int, ManualResetEvent>(q, dequeued, 0, new ManualResetEvent(false)); c3.Start(v3); Thread p1 = new Thread(MultiProducer); Thread p2 = new Thread(MultiProducer); Thread p3 = new Thread(MultiProducer); p1.IsBackground = true; p2.IsBackground = true; p3.IsBackground = true; Tuplet<BlockingQueue<string>, List<string>, int, ManualResetEvent> p1v = new Tuplet<BlockingQueue<string>, List<string>, int, ManualResetEvent>(q, enqueued, n, new ManualResetEvent(false)); p1.Start(p1v); Tuplet<BlockingQueue<string>, List<string>, int, ManualResetEvent> p2v = new Tuplet<BlockingQueue<string>, List<string>, int, ManualResetEvent>(q, enqueued, n, new ManualResetEvent(false)); p2.Start(p2v); Tuplet<BlockingQueue<string>, List<string>, int, ManualResetEvent> p3v = new Tuplet<BlockingQueue<string>, List<string>, int, ManualResetEvent>(q, enqueued, n, new ManualResetEvent(false)); p3.Start(p3v); Assert.IsTrue(p1v.Item4.WaitOne(5000, false), "producer 1 did not finish"); Assert.IsTrue(p2v.Item4.WaitOne(5000, false), "producer 2 did not finish"); Assert.IsTrue(p3v.Item4.WaitOne(5000, false), "producer 3 did not finish"); q.Close(); Assert.IsTrue(v1.Item4.WaitOne(15000, false), "consumer 1 did not finish"); Assert.IsTrue(v2.Item4.WaitOne(15000, false), "consumer 2 did not finish"); Assert.IsTrue(v3.Item4.WaitOne(15000, false), "consumer 3 did not finish"); _log.DebugFormat("consumer 1 processed {0}", v1.Item3); _log.DebugFormat("consumer 2 processed {0}", v2.Item3); _log.DebugFormat("consumer 3 processed {0}", v3.Item3); Assert.GreaterOrEqual(v1.Item3, n * 3 / 4); Assert.GreaterOrEqual(v2.Item3, n * 3 / 4); Assert.GreaterOrEqual(v3.Item3, n * 3 / 4); Assert.AreEqual(enqueued.Count, dequeued.Count); for(int i = 0; i < n; i++) { Assert.Contains(dequeued[i], enqueued); } }
public void MultipleWriterTest() { const int WRITERS = 5; const int READERS = 5; const int writes = 10000; ArrayList written_list = new ArrayList(); ArrayList read_list = new ArrayList(); ArrayList write_threads = new ArrayList(); ArrayList read_threads = new ArrayList(); BlockingQueue q = new BlockingQueue(); /* Start the writers */ for( int i = 0; i < WRITERS; i++ ) { WriterState ws = new WriterState(q, written_list, writes); Thread t = new Thread( ws.Start ); write_threads.Add( t ); t.Start(); } /* Start the readers */ for( int i = 0; i < READERS; i++) { ReaderState rs = new ReaderState(q, read_list); Thread t = new Thread( rs.Start ); read_threads.Add( t ); t.Start(); } foreach(Thread t in write_threads) { t.Join(); } //Writing is done, close the queue, and join the readers: q.Close(); foreach(Thread t in read_threads) { t.Join(); } //Check that the reader list is the same as the written list: ArrayList read_copy = new ArrayList(read_list); ArrayList write_copy = new ArrayList(written_list); //Remove all the reads from the written copy: foreach(object o in read_list) { int i = write_copy.IndexOf(o); Assert.IsTrue( i >= 0, "read something not in written"); write_copy.RemoveAt(i); } Assert.IsTrue( write_copy.Count == 0, "More written than read"); //Remove all the writes from the read copy: foreach(object o in written_list) { int i = read_copy.IndexOf(o); Assert.IsTrue( i >= 0, "wrote something not in read"); read_copy.RemoveAt(i); } Assert.IsTrue( read_copy.Count == 0, "More written than read"); }
public void Many_producers_many_consumers_loop_with_foreach() { int n = 200; List <string> enqueued = new List <string>(); List <string> dequeued = new List <string>(); BlockingQueue <string> q = new BlockingQueue <string>(); Thread c1 = new Thread(MultiConsumerForeachLoop); Thread c2 = new Thread(MultiConsumerForeachLoop); Thread c3 = new Thread(MultiConsumerForeachLoop); c1.IsBackground = true; c2.IsBackground = true; c3.IsBackground = true; Tuplet <BlockingQueue <string>, List <string>, int, ManualResetEvent> v1 = new Tuplet <BlockingQueue <string>, List <string>, int, ManualResetEvent>(q, dequeued, 0, new ManualResetEvent(false)); c1.Start(v1); Tuplet <BlockingQueue <string>, List <string>, int, ManualResetEvent> v2 = new Tuplet <BlockingQueue <string>, List <string>, int, ManualResetEvent>(q, dequeued, 0, new ManualResetEvent(false)); c2.Start(v2); Tuplet <BlockingQueue <string>, List <string>, int, ManualResetEvent> v3 = new Tuplet <BlockingQueue <string>, List <string>, int, ManualResetEvent>(q, dequeued, 0, new ManualResetEvent(false)); c3.Start(v3); Thread p1 = new Thread(MultiProducer); Thread p2 = new Thread(MultiProducer); Thread p3 = new Thread(MultiProducer); p1.IsBackground = true; p2.IsBackground = true; p3.IsBackground = true; Tuplet <BlockingQueue <string>, List <string>, int, ManualResetEvent> p1v = new Tuplet <BlockingQueue <string>, List <string>, int, ManualResetEvent>(q, enqueued, n, new ManualResetEvent(false)); p1.Start(p1v); Tuplet <BlockingQueue <string>, List <string>, int, ManualResetEvent> p2v = new Tuplet <BlockingQueue <string>, List <string>, int, ManualResetEvent>(q, enqueued, n, new ManualResetEvent(false)); p2.Start(p2v); Tuplet <BlockingQueue <string>, List <string>, int, ManualResetEvent> p3v = new Tuplet <BlockingQueue <string>, List <string>, int, ManualResetEvent>(q, enqueued, n, new ManualResetEvent(false)); p3.Start(p3v); Assert.IsTrue(p1v.Item4.WaitOne(5000, false), "producer 1 did not finish"); Assert.IsTrue(p2v.Item4.WaitOne(5000, false), "producer 2 did not finish"); Assert.IsTrue(p3v.Item4.WaitOne(5000, false), "producer 3 did not finish"); q.Close(); Assert.IsTrue(v1.Item4.WaitOne(15000, false), "consumer 1 did not finish"); Assert.IsTrue(v2.Item4.WaitOne(15000, false), "consumer 2 did not finish"); Assert.IsTrue(v3.Item4.WaitOne(15000, false), "consumer 3 did not finish"); _log.DebugFormat("consumer 1 processed {0}", v1.Item3); _log.DebugFormat("consumer 2 processed {0}", v2.Item3); _log.DebugFormat("consumer 3 processed {0}", v3.Item3); Assert.GreaterOrEqual(v1.Item3, n * 3 / 4); Assert.GreaterOrEqual(v2.Item3, n * 3 / 4); Assert.GreaterOrEqual(v3.Item3, n * 3 / 4); Assert.AreEqual(enqueued.Count, dequeued.Count); for (int i = 0; i < n; i++) { Assert.Contains(dequeued[i], enqueued); } }
/// <summary> /// Release resources. /// </summary> protected override void DisposeManaged() { _queue.Close(); base.DisposeManaged(); }
/// <summary> /// Release resources. /// </summary> protected override void DisposeManaged() { _alerts.Close(); base.DisposeManaged(); }
void glControl_Disposed(object sender, EventArgs e) { TextureThreadRunning = false; PendingTextures.Close(); }
public LuaSession(IdGenerator transactionIdGenerator) : base(transactionIdGenerator) { Requests = new BlockingQueue <LuaRequest>(); Requests.Close(); }
public static void StartServices() { Stopwatch stopwatch = new Stopwatch(); stopwatch.Start(); IBlockingQueue<ServiceBE> services = new BlockingQueue<ServiceBE>(); List<ServiceBE> servicesToStart = new List<ServiceBE>(DbUtils.CurrentSession.Services_GetAll()); // extract all auth services and start them synchronously first List<ServiceBE> authServices = servicesToStart.FindAll(service => service.Type == ServiceType.AUTH); servicesToStart.RemoveAll(service => service.Type == ServiceType.AUTH); foreach(ServiceBE authService in authServices) { try { StartService(authService, false, false); } catch { //Services started on deki startup do not get disabled if they fail to start } } // start remaining services in parallel foreach(ServiceBE service in servicesToStart) { if(service.ServiceEnabled) { services.Enqueue(service); } } services.Close(); List<Result> workers = new List<Result>(); for(int i = 0; i < 10; i++) { workers.Add(Async.ForkThread(() => StartServices_Helper(services), new Result())); } workers.Join(new Result()).Wait(); _log.InfoFormat("Services started for instance '{0}' in {1}ms", DekiContext.Current.Instance.Id, stopwatch.ElapsedMilliseconds); }
/// <summary> /// Создать <see cref="LuaFixServer"/>. /// </summary> public LuaFixServer() { _requests.Close(); _securityClassInfo.FillDefault(); _fixServer = new FixServerEx((l, p) => { if (Login.IsEmpty() || (l.CompareIgnoreCase(Login) && p == _password)) { _prevLevel1.Clear(); return(Tuple.Create(TimeSpan.FromMilliseconds(100), FixClientRoles.Admin)); } return(null); }); _fixServer.NewOutMessage += message => { _fixServer.AddDebugLog("In. {0}", message); switch (message.Type) { case MessageTypes.CandlePnF: case MessageTypes.CandleRange: case MessageTypes.CandleRenko: case MessageTypes.CandleTick: case MessageTypes.CandleTimeFrame: case MessageTypes.CandleVolume: throw new NotSupportedException(); case MessageTypes.MarketData: { var mdMsg = (MarketDataMessage)message; ProcessMarketDataMessage(mdMsg); break; } case MessageTypes.SecurityLookup: { var secMsg = (SecurityLookupMessage)message; var securityId = new SecurityId { SecurityCode = secMsg.SecurityId.SecurityCode, BoardCode = !secMsg.SecurityId.BoardCode.IsEmpty() ? _securityClassInfo.GetSecurityClass(secMsg.SecurityId) : null }; _requests.Enqueue(new LuaRequest { MessageType = MessageTypes.SecurityLookup, TransactionId = secMsg.TransactionId, SecurityId = securityId, Value = secMsg.UnderlyingSecurityCode }); break; } case MessageTypes.OrderPairReplace: case MessageTypes.Portfolio: case MessageTypes.Position: throw new NotSupportedException(); case MessageTypes.PortfolioLookup: var pfMsg = (PortfolioLookupMessage)message; _requests.Enqueue(new LuaRequest { MessageType = MessageTypes.PortfolioLookup, TransactionId = pfMsg.TransactionId }); break; case MessageTypes.OrderStatus: var statusMsg = (OrderStatusMessage)message; _requests.Enqueue(new LuaRequest { MessageType = MessageTypes.OrderStatus, TransactionId = statusMsg.TransactionId }); break; case MessageTypes.OrderRegister: case MessageTypes.OrderReplace: case MessageTypes.OrderCancel: case MessageTypes.OrderGroupCancel: var orderMsg = (OrderMessage)message; ProcessOrderMessage(orderMsg); break; default: throw new ArgumentOutOfRangeException(); } }; _fixServer.TransactionSession.TimeZone = TimeHelper.Moscow; _fixServer.MarketDataSession.TimeZone = TimeHelper.Moscow; _logManager.Application = new QuikNativeApp(); _logManager.Sources.Add(_fixServer); LogFile = "StockSharp.QuikLua.log"; }
internal void StopExecution() { _toggleActivityQueue.Close(); InterruptExecution(Scheme.ExitActivity); }