public void Execute(Task completedTask) { lock (this) { if (evt != null) { evt.CountDown(); } } }
public void Execute() { lock (this) { if (evt != null) { evt.CountDown(); } } }
/// <summary> /// Computes a result, or throws an exception if unable to do so. /// </summary> /// <returns></returns> public T Call() { try { WaitForStartSignal(); log.DebugFormat("Starting execution of {0} against shard {1}", operation.OperationName, shard); ///If addResult() returns true it means there is no more work to be ///performed. Cancel all the outstanding tasks. if (exitStrategy.AddResult(operation.Execute(shard), shard)) { log.DebugFormat("Short-circuiting execution of {0} on other threads after execution against shard {1}", operation.OperationName, shard); //It's ok to cancel ourselves because StartAwareFutureTask.cancel() //will return false if a task has already started executing, and we're //already executing. log.DebugFormat("Checking {0} future tasks to see if they need to be cancelled.", futureTasks.Count); foreach (StartAwareFutureTask <T> ft in futureTasks) { log.DebugFormat("Preparing to cancel future task %d.", ft.Id); //If a task was successfully cancelled that means it had not yet //started running. Since the task won't run, the task won't be // able to decrement the CountDownLatch. We need to decrement //it on behalf of the cancelled task. if (ft.Cancel(INTERRUPT_IF_RUNNING)) { log.Debug("Task cancel returned true, decrementing counter on its behalf."); doneSignal.CountDown(); } else { log.Debug("Task cancel returned false, not decrementing counter on its behalf."); } } } else { log.DebugFormat("No need to short-cirtcuit execution of {0} on other threads after execution against shard {1}", operation.OperationName, shard); } } finally { // counter must get decremented no matter what log.DebugFormat("Decrementing counter for operation {0} on shard {1}", operation.OperationName, shard); doneSignal.CountDown(); } return(default(T)); }
private void RunAssertionNonBlockingCancel(EPServiceProvider epService) { // declare epService.EPAdministrator.CreateEPL("create schema SomeType ()"); epService.EPAdministrator.CreateEPL( "create dataflow MyDataFlowOne " + "SourceOne -> outstream<SomeType> {}" + "OutputOp(outstream) {}"); // instantiate var latchOne = new CountDownLatch(1); var ops = new Dictionary<string, object>(); ops.Put("SourceOne", new DefaultSupportSourceOp(new object[] {latchOne, new object[] {1}})); var output = new DefaultSupportCaptureOp(SupportContainer.Instance.LockManager()); ops.Put("OutputOp", output); var options = new EPDataFlowInstantiationOptions().OperatorProvider(new DefaultSupportGraphOpProviderByOpName(ops)); var dfOne = epService.EPRuntime.DataFlowRuntime.Instantiate("MyDataFlowOne", options); dfOne.Start(); Assert.AreEqual(EPDataFlowState.RUNNING, dfOne.State); dfOne.Cancel(); latchOne.CountDown(); Thread.Sleep(100); Assert.AreEqual(EPDataFlowState.CANCELLED, dfOne.State); Assert.AreEqual(0, output.GetAndReset().Count); epService.EPAdministrator.DestroyAllStatements(); }
public void AfterPropertiesSet(IBeanContextFactory beanContextFactory) { if (dispatcherThread != null) { throw new Exception("Module instantiated twice"); } SynchronizationContext syncContext = null; CountDownLatch latch = new CountDownLatch(1); Log.Info("Create SyncContext..."); dispatcherThread = new Thread(delegate() { // Create our context, and install it: try { syncContext = new DispatcherSynchronizationContext(Dispatcher.CurrentDispatcher); SynchronizationContext.SetSynchronizationContext(syncContext); Log.Info("I am the UI Thread"); } finally { latch.CountDown(); } // Start the Dispatcher Processing System.Windows.Threading.Dispatcher.Run(); }); dispatcherThread.Name = "TestDispatcherThread"; dispatcherThread.IsBackground = true; dispatcherThread.Start(); latch.Await(); Log.Info("SyncContext created"); }
public void SetValue(Java.Lang.String value) { returnValue = value.ToString(); try { latch.CountDown(); } catch (System.Exception ex) { DLogger.WriteLog(ex); } }
private void RunAssertionNonBlockingJoinSingleRunnable(EPServiceProvider epService) { // declare epService.EPAdministrator.CreateEPL("create schema SomeType ()"); epService.EPAdministrator.CreateEPL( "create dataflow MyDataFlowOne " + "DefaultSupportSourceOp -> outstream<SomeType> {}" + "DefaultSupportCaptureOp(outstream) {}"); // instantiate var latch = new CountDownLatch(1); var source = new DefaultSupportSourceOp(new object[] {latch, new object[] {1}}); var future = new DefaultSupportCaptureOp(1, SupportContainer.Instance.LockManager()); var options = new EPDataFlowInstantiationOptions().OperatorProvider( new DefaultSupportGraphOpProvider(source, future)); var dfOne = epService.EPRuntime.DataFlowRuntime.Instantiate("MyDataFlowOne", options); Assert.AreEqual("MyDataFlowOne", dfOne.DataFlowName); Assert.AreEqual(EPDataFlowState.INSTANTIATED, dfOne.State); dfOne.Start(); Thread.Sleep(100); Assert.AreEqual(EPDataFlowState.RUNNING, dfOne.State); latch.CountDown(); dfOne.Join(); Assert.AreEqual(EPDataFlowState.COMPLETE, dfOne.State); Assert.AreEqual(1, future.GetAndReset()[0].Count); Assert.AreEqual(2, source.GetCurrentCount()); dfOne.Cancel(); Assert.AreEqual(EPDataFlowState.COMPLETE, dfOne.State); epService.EPAdministrator.DestroyAllStatements(); }
public virtual void TestKillJob() { CountDownLatch latch = new CountDownLatch(1); MRApp app = new TestKill.BlockingMRApp(1, 0, latch); //this will start the job but job won't complete as task is //blocked Org.Apache.Hadoop.Mapreduce.V2.App.Job.Job job = app.Submit(new Configuration()); //wait and vailidate for Job to become RUNNING app.WaitForState(job, JobState.Running); //send the kill signal to Job app.GetContext().GetEventHandler().Handle(new JobEvent(job.GetID(), JobEventType. JobKill)); //unblock Task latch.CountDown(); //wait and validate for Job to be KILLED app.WaitForState(job, JobState.Killed); IDictionary <TaskId, Task> tasks = job.GetTasks(); NUnit.Framework.Assert.AreEqual("No of tasks is not correct", 1, tasks.Count); Task task = tasks.Values.GetEnumerator().Next(); NUnit.Framework.Assert.AreEqual("Task state not correct", TaskState.Killed, task. GetReport().GetTaskState()); IDictionary <TaskAttemptId, TaskAttempt> attempts = tasks.Values.GetEnumerator().Next ().GetAttempts(); NUnit.Framework.Assert.AreEqual("No of attempts is not correct", 1, attempts.Count ); IEnumerator <TaskAttempt> it = attempts.Values.GetEnumerator(); NUnit.Framework.Assert.AreEqual("Attempt state not correct", TaskAttemptState.Killed , it.Next().GetReport().GetTaskAttemptState()); }
public virtual void PutMetrics(MetricsRecord record) { while (!closed) { collectingLatch.CountDown(); } }
protected void CreateDispatcherThread(IBeanContextFactory beanContextFactory) { SynchronizationContext syncContext = null; CountDownLatch latch = new CountDownLatch(1); dispatcherThread = new Thread(delegate() { // Create our context, and install it: try { syncContext = new DispatcherSynchronizationContext(Dispatcher.CurrentDispatcher); SynchronizationContext.SetSynchronizationContext(syncContext); Log.Info("I am the UI Thread"); } finally { latch.CountDown(); } // Start the Dispatcher Processing System.Windows.Threading.Dispatcher.Run(); }); dispatcherThread.Name = "TestDispatcherThread"; //dispatcherThread.IsBackground = true; dispatcherThread.Start(); latch.Await(); //SynchronizationContext.SetSynchronizationContext(syncContext); beanContextFactory.RegisterExternalBean(syncContext).Autowireable <SynchronizationContext>(); beanContextFactory.RegisterExternalBean(new UIThreadWrapper(dispatcherThread)).Autowireable <UIThreadWrapper>(); }
public void TestBlockingMultipleRunnable() { // declare _epService.EPAdministrator.CreateEPL( "create dataflow MyDataFlowOne " + "SourceOne -> outstream<SomeType> {}" + "SourceTwo -> outstream<SomeType> {}" + "Future(outstream) {}"); // instantiate var latchOne = new CountDownLatch(1); var latchTwo = new CountDownLatch(1); IDictionary <String, Object> ops = new Dictionary <String, Object>(); ops.Put( "SourceOne", new DefaultSupportSourceOp( new Object[] { latchOne, new Object[] { 1 } })); ops.Put( "SourceTwo", new DefaultSupportSourceOp( new Object[] { latchTwo, new Object[] { 1 } })); var future = new DefaultSupportCaptureOp(2); ops["Future"] = future; EPDataFlowInstantiationOptions options = new EPDataFlowInstantiationOptions().OperatorProvider(new DefaultSupportGraphOpProviderByOpName(ops)); EPDataFlowInstance dfOne = _epService.EPRuntime.DataFlowRuntime.Instantiate("MyDataFlowOne", options); try { dfOne.Run(); Assert.Fail(); } catch (UnsupportedOperationException ex) { Assert.AreEqual( "The data flow 'MyDataFlowOne' has zero or multiple sources and requires the use of the start method instead", ex.Message); } latchTwo.CountDown(); dfOne.Start(); latchOne.CountDown(); dfOne.Join(); Assert.AreEqual(EPDataFlowState.COMPLETE, dfOne.State); Assert.AreEqual(2, future.GetAndReset().Count); }
/// <summary> /// Run a set of threads making changes to the deprecations /// concurrently with another set of threads calling get() /// and set() on Configuration objects. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestConcurrentDeprecateAndManipulate() { int NumThreadIds = 10; int NumKeysPerThread = 1000; ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(2 * NumThreadIds , new ThreadFactoryBuilder().SetDaemon(true).SetNameFormat("testConcurrentDeprecateAndManipulate modification thread %d" ).Build()); CountDownLatch latch = new CountDownLatch(1); AtomicInteger highestModificationThreadId = new AtomicInteger(1); IList <Future <Void> > futures = new List <Future <Void> >(); for (int i = 0; i < NumThreadIds; i++) { futures.AddItem(executor.Schedule(new _Callable_363(latch, highestModificationThreadId , NumKeysPerThread), 0, TimeUnit.Seconds)); } AtomicInteger highestAccessThreadId = new AtomicInteger(1); for (int i_1 = 0; i_1 < NumThreadIds; i_1++) { futures.AddItem(executor.Schedule(new _Callable_382(latch, highestAccessThreadId, NumKeysPerThread), 0, TimeUnit.Seconds)); } latch.CountDown(); // allow all threads to proceed foreach (Future <Void> future in futures) { Uninterruptibles.GetUninterruptibly(future); } }
private CountDownLatch ReplicationWatcherThread(Replication replication) { var doneSignal = new CountDownLatch(2); Task.Factory.StartNew(() => { var started = false; var done = false; while (!done) { started |= replication.IsRunning; var statusIsDone = ( replication.Status == ReplicationStatus.Stopped || replication.Status == ReplicationStatus.Idle ); if (started && statusIsDone) { done = true; } try { Thread.Sleep(10000); } catch (Exception e) { Runtime.PrintStackTrace(e); } } doneSignal.CountDown(); }); return(doneSignal); }
public virtual void TestAddResourceConcurrency() { StartEmptyStore(); string key = "key1"; int count = 5; ExecutorService exec = Executors.NewFixedThreadPool(count); IList <Future <string> > futures = new AList <Future <string> >(count); CountDownLatch start = new CountDownLatch(1); for (int i = 0; i < count; i++) { string fileName = "foo-" + i + ".jar"; Callable <string> task = new _Callable_129(this, start, key, fileName); futures.AddItem(exec.Submit(task)); } // start them all at the same time start.CountDown(); // check the result; they should all agree with the value ICollection <string> results = new HashSet <string>(); foreach (Future <string> future in futures) { results.AddItem(future.Get()); } NUnit.Framework.Assert.AreSame(1, results.Count); exec.Shutdown(); }
public virtual void TestAddResourceRefAddResourceConcurrency() { StartEmptyStore(); string key = "key1"; string fileName = "foo.jar"; string user = "******"; ApplicationId id = CreateAppId(1, 1L); // add the resource and add the resource ref at the same time ExecutorService exec = Executors.NewFixedThreadPool(2); CountDownLatch start = new CountDownLatch(1); Callable <string> addKeyTask = new _Callable_240(this, start, key, fileName); Callable <string> addAppIdTask = new _Callable_246(this, start, key, id, user); Future <string> addAppIdFuture = exec.Submit(addAppIdTask); Future <string> addKeyFuture = exec.Submit(addKeyTask); // start them at the same time start.CountDown(); // get the results string addKeyResult = addKeyFuture.Get(); string addAppIdResult = addAppIdFuture.Get(); NUnit.Framework.Assert.AreEqual(fileName, addKeyResult); System.Console.Out.WriteLine("addAppId() result: " + addAppIdResult); // it may be null or the fileName depending on the timing NUnit.Framework.Assert.IsTrue(addAppIdResult == null || addAppIdResult.Equals(fileName )); exec.Shutdown(); }
public virtual void Changed(Replication.ChangeEvent @event) { Replication replicator = @event.GetSource(); Log.D(Tag, replicator + " changed. " + replicator.GetCompletedChangesCount() + " / " + replicator.GetChangesCount()); NUnit.Framework.Assert.IsTrue(replicator.GetCompletedChangesCount() <= replicator .GetChangesCount()); if (replicator.GetCompletedChangesCount() > replicator.GetChangesCount()) { throw new RuntimeException("replicator.getCompletedChangesCount() > replicator.getChangesCount()" ); } if (!replicator.IsRunning()) { replicationFinished = true; string msg = string.Format("ReplicationFinishedObserver.changed called, set replicationFinished to: %b" , replicationFinished); Log.D(Tag, msg); doneSignal.CountDown(); } else { string msg = string.Format("ReplicationFinishedObserver.changed called, but replicator still running, so ignore it" ); Log.D(Tag, msg); } }
public void SimpleCountDown() { var cdl = new CountDownLatch(3); cdl.CountDown(); Assert.AreEqual(2, cdl.Count); cdl.CountDown(); Assert.AreEqual(1, cdl.Count); cdl.CountDown(); Assert.AreEqual(0, cdl.Count); cdl.CountDown(); Assert.AreEqual(0, cdl.Count); }
public void WaitingTask2() { var cdl = new CountDownLatch(2); var task = new Task(cdl); var runner = new Thread(task.DoSomething); runner.Start(); Thread.Sleep(600); cdl.CountDown(); Thread.Sleep(600); cdl.CountDown(); runner.Join(); Assert.IsTrue(task.Elapsed >= 1100, "The task should cost more than 1100 millis"); Assert.IsTrue(task.Elapsed <= 1300, "The task should cost less than 1300 millis"); }
/// <summary> /// Sign in Background /// </summary> private bool SignInBackend() { Log.Logger.Info(Tag, "signInBackend"); ClearAccountInfo(); if (service == null) { return(false); } CountDownLatch countDownLatch = new CountDownLatch(1); Huawei.Hmf.Tasks.Task task = service.SilentSignIn(); task.AddOnSuccessListener(new OnSuccessListener(delegate(Java.Lang.Object authHuaweiId) { Log.Logger.Info(Tag, "silentSignIn success"); DealSignInResult((AuthHuaweiId)authHuaweiId); countDownLatch.CountDown(); } )); task.AddOnFailureListener(new OnFailureListener(delegate(Java.Lang.Exception authHuaweiId) { Log.Logger.Info(Tag, "silentSignIn error"); countDownLatch.CountDown(); } )); try { countDownLatch.Await(15, TimeUnit.Seconds); } catch (Java.Lang.InterruptedException e) { Log.Logger.Info(Tag, "signInBackend catch InterruptedException"); countDownLatch.CountDown(); } if (TextUtils.IsEmpty(AccessToken)) { return(false); } else { return(true); } }
/// <exception cref="System.IO.IOException" /> private void ProcessBlock(Block m) { Debug.Assert(Thread.CurrentThread == _thread); try { // Was this block requested by getblock? lock (_pendingGetBlockFutures) { for (var i = 0; i < _pendingGetBlockFutures.Count; i++) { var f = _pendingGetBlockFutures[i]; if (f.Item.Hash.SequenceEqual(m.Hash)) { // Yes, it was. So pass it through the future. f.SetResult(m); // Blocks explicitly requested don't get sent to the block chain. _pendingGetBlockFutures.RemoveAt(i); return; } } } // Otherwise it's a block sent to us because the peer thought we needed it, so add it to the block chain. // This call will synchronize on blockChain. if (_blockChain.Add(m)) { // The block was successfully linked into the chain. Notify the user of our progress. if (_chainCompletionLatch != null) { _chainCompletionLatch.CountDown(); if (_chainCompletionLatch.Count == 0) { // All blocks fetched, so we don't need this anymore. _chainCompletionLatch = null; } } } else { // This block is unconnected - we don't know how to get from it back to the genesis block yet. That // must mean that there are blocks we are missing, so do another getblocks with a new block locator // to ask the peer to send them to us. This can happen during the initial block chain download where // the peer will only send us 500 at a time and then sends us the head block expecting us to request // the others. // TODO: Should actually request root of orphan chain here. BlockChainDownload(m.Hash); } } catch (VerificationException e) { // We don't want verification failures to kill the thread. _log.Warn("block verification failed", e); } catch (ScriptException e) { // We don't want script failures to kill the thread. _log.Warn("script exception", e); } }
public void HandleMessage(IMessage message) { _threadName = Thread.CurrentThread.Name; if (_latch != null) { _latch.CountDown(); } }
/// <summary> /// Called by the Peer when the result has arrived. Completes the task. /// </summary> internal void SetResult(T result) { Debug.Assert(Thread.CurrentThread == _enclosing._thread); // Called from peer thread. _result = result; // Now release the thread that is waiting. We don't need to synchronize here as the latch establishes // a memory barrier. _latch.CountDown(); }
public void Run(RegressionEnvironment env) { // declare var path = new RegressionPath(); env.CompileDeploy("create schema SomeType ()", path); env.CompileDeploy( "@Name('flow') create dataflow MyDataFlowOne " + "DefaultSupportSourceOp -> outstream<SomeType> { name: 'SourceOne' }" + "DefaultSupportSourceOp -> outstream<SomeType> { name: 'SourceTwo' }" + "DefaultSupportCaptureOp(outstream) {}", path); // instantiate var latchOne = new CountDownLatch(1); var latchTwo = new CountDownLatch(1); IDictionary<string, object> ops = new Dictionary<string, object>(); ops.Put( "SourceOne", new DefaultSupportSourceOp( new object[] { latchOne, new object[] {1} })); ops.Put( "SourceTwo", new DefaultSupportSourceOp( new object[] { latchTwo, new object[] {1} })); var future = new DefaultSupportCaptureOp(2, env.Container.LockManager()); ops.Put("DefaultSupportCaptureOp", future); var options = new EPDataFlowInstantiationOptions().WithOperatorProvider( new DefaultSupportGraphOpProviderByOpName(ops)); var dfOne = env.Runtime.DataFlowService.Instantiate(env.DeploymentId("flow"), "MyDataFlowOne", options); dfOne.Start(); Sleep(50); Assert.AreEqual(EPDataFlowState.RUNNING, dfOne.State); latchOne.CountDown(); Sleep(200); Assert.AreEqual(EPDataFlowState.RUNNING, dfOne.State); latchTwo.CountDown(); try { dfOne.Join(); } catch (ThreadInterruptedException e) { throw new EPException(e); } Assert.AreEqual(EPDataFlowState.COMPLETE, dfOne.State); Assert.AreEqual(2, future.GetAndReset().Count); env.UndeployAll(); }
public virtual void Changed(Replication.ChangeEvent @event) { Replication replicator = @event.GetSource(); if (replicator.GetStatus() == Replication.ReplicationStatus.ReplicationIdle) { doneSignal.CountDown(); } }
public virtual void Changed(Replication.ChangeEvent @event) { Replication replicator = @event.GetSource(); if (replicator.IsRunning()) { doneSignal.CountDown(); } }
public void Run(RegressionEnvironment env) { // declare var path = new RegressionPath(); env.CompileDeploy("create schema SomeType ()", path); env.CompileDeploy( "@Name('flow') create dataflow MyDataFlowOne " + "DefaultSupportSourceOp -> s<SomeType> {}" + "DefaultSupportCaptureOp(s) {}", path); // instantiate var latch = new CountDownLatch(1); var source = new DefaultSupportSourceOp( new object[] { latch, new object[] {1} }); var future = new DefaultSupportCaptureOp(1, env.Container.LockManager()); var options = new EPDataFlowInstantiationOptions().WithOperatorProvider( new DefaultSupportGraphOpProvider(future, source)); var dfOne = env.Runtime.DataFlowService.Instantiate(env.DeploymentId("flow"), "MyDataFlowOne", options); Assert.AreEqual("MyDataFlowOne", dfOne.DataFlowName); Assert.AreEqual(EPDataFlowState.INSTANTIATED, dfOne.State); var unlatchingThread = new Thread( () => { try { while (dfOne.State != EPDataFlowState.RUNNING) { Thread.Sleep(0); } Thread.Sleep(100); latch.CountDown(); } catch (Exception e) { log.Error("Unexpected exception", e); } }); unlatchingThread.Name = GetType().Name + "-unlatching"; // blocking run unlatchingThread.Start(); dfOne.Run(); Assert.AreEqual(EPDataFlowState.COMPLETE, dfOne.State); Assert.AreEqual(1, future.GetAndReset()[0].Count); Assert.AreEqual(2, source.CurrentCount); try { unlatchingThread.Join(); } catch (ThreadInterruptedException e) { throw new EPException(e); } env.UndeployAll(); }
public void Changed(ReplicationChangeEventArgs args) { var replicator = args.Source; if (replicator.LastError != null) { doneSignal.CountDown(); } }
public virtual void Changed(Replication.ChangeEvent @event) { Replication replicator = @event.GetSource(); if (replicator.GetLastError() != null) { doneSignal.CountDown(); } }
public void Changed(ReplicationChangeEventArgs args) { var replicator = args.Source; if (replicator.Status == ReplicationStatus.Stopped) { doneSignal.CountDown(); } }
private void RunAssertionBlockingRunJoin(EPServiceProvider epService) { // declare epService.EPAdministrator.CreateEPL("create schema SomeType ()"); epService.EPAdministrator.CreateEPL( "create dataflow MyDataFlowOne " + "DefaultSupportSourceOp -> s<SomeType> {}" + "DefaultSupportCaptureOp(s) {}"); // instantiate var latch = new CountDownLatch(1); var source = new DefaultSupportSourceOp(new object[] {latch, new object[] {1}}); var future = new DefaultSupportCaptureOp(1, SupportContainer.Instance.LockManager()); var options = new EPDataFlowInstantiationOptions().OperatorProvider( new DefaultSupportGraphOpProvider(source, future)); var dfOne = epService.EPRuntime.DataFlowRuntime.Instantiate("MyDataFlowOne", options); Assert.AreEqual("MyDataFlowOne", dfOne.DataFlowName); Assert.AreEqual(EPDataFlowState.INSTANTIATED, dfOne.State); var joiningRunnable = new MyJoiningRunnable(dfOne); var joiningThread = new Thread(joiningRunnable.Run); var unlatchingThread = new Thread( () => { try { while (dfOne.State != EPDataFlowState.RUNNING) { Thread.Sleep(10); } Thread.Sleep(1000); latch.CountDown(); } catch (Exception e) { Console.Error.WriteLine(e.StackTrace); } }); joiningThread.Start(); unlatchingThread.Start(); dfOne.Run(); Assert.AreEqual(EPDataFlowState.COMPLETE, dfOne.State); Assert.AreEqual(1, future.GetAndReset()[0].Count); Assert.AreEqual(2, source.GetCurrentCount()); joiningThread.Join(); unlatchingThread.Join(); var deltaJoin = joiningRunnable.End - joiningRunnable.Start; Assert.IsTrue(deltaJoin >= 500, "deltaJoin=" + deltaJoin); epService.EPAdministrator.DestroyAllStatements(); }