public void verify_bahaviour_for_concurrent_access_under_different_keys() { var keys = new[] {"a", "b"}; var counter = new ConcurrentStack<int>(); // value factory threads var storage = new ConcurrentStack<TestItem>(); // cached items // first run var threads = MakeThreads(keys); threads.ForEach(t => t.Start(new object[] {storage, counter})); threads.ForEach(t => t.Join()); Assert.Equal(2, counter.Count); Assert.Equal(2, storage.Count); Assert.NotSame(storage.First(), storage.Last()); var a = storage.FirstOrDefault(x => x.Id == "a"); var b = storage.FirstOrDefault(x => x.Id == "b"); // cleanups and second run storage.Clear(); counter.Clear(); threads = MakeThreads(keys); threads.ForEach(t => t.Start(new object[] {storage, counter})); threads.ForEach(t => t.Join()); Assert.Equal(0, counter.Count); Assert.Equal(2, storage.Count); Assert.NotSame(storage.First(), storage.Last()); var aa = storage.FirstOrDefault(x => x.Id == "a"); var bb = storage.FirstOrDefault(x => x.Id == "b"); Assert.Same(a, aa); Assert.Same(b, bb); }
public void verify_bahaviour_for_concurrent_access_under_identical_keys() { var keys = new[] {"a", "a"}; var counter = new ConcurrentStack<int>(); var storage = new ConcurrentStack<TestItem>(); // first run var threads = MakeThreads(keys); threads.ForEach(t => t.Start(new object[] {storage, counter})); threads.ForEach(t => t.Join()); Assert.Equal(1, counter.Count); Assert.Equal(2, storage.Count); var a = storage.First(); Assert.Same(storage.First(), storage.Last()); // cleanups and second run storage.Clear(); counter.Clear(); threads = MakeThreads(keys); threads.ForEach(t => t.Start(new object[] {storage, counter})); threads.ForEach(t => t.Join()); Assert.Equal(0, counter.Count); Assert.Equal(2, storage.Count); var aa = storage.First(); Assert.Same(storage.First(), storage.Last()); Assert.Same(a, aa); }
public void SetMainMasterDetailPage(object masterName, object detailName, Dictionary <string, object> navParams = null, bool invokeOnMainThread = false) { if (_isBusy) { return; } if (string.IsNullOrEmpty(masterName?.ToString())) { throw new ArgumentNullException(nameof(masterName)); } if (string.IsNullOrEmpty(detailName?.ToString())) { throw new ArgumentNullException(nameof(detailName)); } Action setMainPage = () => { _isBusy = true; var masterDetailPage = new MasterDetailPage { Master = GetInitializedPage(masterName.ToString(), navParams: navParams, withBackButton: false, toTitle: masterName.ToString()), Detail = GetInitializedPage(detailName.ToString(), NavigationMode.Normal, navParams, true, false, false) }; _app.MainPage = masterDetailPage; _navigations?.Clear(); _navigations?.Push(masterDetailPage.Detail.Navigation); _isBusy = false; }; if (invokeOnMainThread) { Device.BeginInvokeOnMainThread(setMainPage); } else { setMainPage.Invoke(); } }
public static ActionBlock<StatsdMessage> CreateBlock(ITargetBlock<Bucket> target, IIntervalService intervalService) { var rawLines = new ConcurrentStack<Raw>(); var incoming = new ActionBlock<StatsdMessage>(p => { rawLines.Push(p as Raw); }, Utility.UnboundedExecution()); intervalService.Elapsed += (sender, e) => { if (rawLines.Count == 0) { return; } var lines = rawLines.ToArray(); rawLines.Clear(); var bucket = new RawBucket(lines, e.Epoch); target.Post(bucket); }; return incoming; }
public void Reset() { this.ChangeState(CircuitBreakerStateEnum.Closed); exceptionsSinceLastStateChange.Clear(); }
public void Clear() { _objectStack.Clear(); Interlocked.Exchange(ref _count, 0); }
/// <summary> /// 清空堆栈 /// </summary> public void Clear() { pool?.Clear(); }
public void Clear() { _data.Clear(); }
/// <summary> /// Ensures that the last compiled algorithm will not be re-used. This should be called /// whenever a change is made that requires recompiling (but not rebuilding) the model. /// </summary> internal void InvalidateCompiledAlgorithms() { compiledAlgorithmForVariable.Clear(); compiledAlgorithms.Clear(); }
private void Validate_Solutions(double Removal_Ratio) { Solution_List_Threading = new ConcurrentStack<Solution>() { }; List<Solution> Holder = new List<Solution>() { }; count = Convert.ToInt32(Solution_List.Count * (1 - Removal_Ratio)); int Count_Original = count; for (int n = 0; n < Count_Original; n++) { using (e = new CountdownEvent(NumberOfThreads)) { for (int i = 0; i < NumberOfThreads; i++) { ThreadPool.QueueUserWorkItem(new WaitCallback(Validate_Solutions), i); } e.Wait(); } int max = -1; int mx = -1; for (int i = 0; i < Solution_List_Threading.Count; i++) { if (max < Solution_List_Threading.ElementAt(i).Unknowns_Sum) { max = Solution_List_Threading.ElementAt(i).Unknowns_Sum; mx = i; } } Holder.Add(new Solution(Solution_List_Threading.ElementAt(mx))); Solution_List_Threading.Clear(); Solution_List.RemoveAt(mx); count = Convert.ToInt32(Solution_List.Count * (1 - Removal_Ratio)); } Solution_List = new List<Solution>(Holder); Holder.Clear(); }
public void pulse_AD5360_Reset() { ad5360_Stack.Clear(); ad5360_Board.pulse_AD5360_Reset(); }
/// <summary> /// Clears the current contents of the log /// </summary> public void ClearLog() { _activityLog.Clear(); }
public void _SetUp() { _disposables.Clear(); SetUp(); }
public void ClearHistory() { m_undoBuffer.Clear(); m_redoBuffer.Clear(); m_previousData = null; }
public UnitTestBase() { RebusTimeMachine.Reset(); _disposables.Clear(); }
/// <summary> /// Disposes listener making it no longer listening for new messages. <br/> /// Upon dispose, the received messages are cleared and all pending receive method calls aborted. /// </summary> public void Dispose() { _source.OnMessage -= OnHandle; _messages.Clear(); _listenerDisposedTokenSource.Cancel(); }
internal static void DownloadFromStack(ConcurrentStack <String> cs, String userName, String passWord) { var appUserSettings = Helper.UserSettings(); var url = appUserSettings.SharesUrl; //eg = "http://www.bsb-software.de/rese/"; string item; if (cs.Count > 0 && cs.TryPop(out item)) { //NOTE, there may be a leading 'tick' at the beginning of item //make sure to not let it mess up the file name! Match m = Regex.Match(item, @"(\d{4}_\d{2}_\d{2}.TXT) (\d+) "); if (m.Success) { string targetFile = m.Groups[1].Value; Int64 targetFileReportedSize = Convert.ToInt64(m.Groups[2].Value); var webResource = url + $"/{targetFile}"; var webClient = new WebClient(); webClient.Credentials = new NetworkCredential(userName, passWord); var localFilename = appUserSettings.ExtraFolder + @"\" + targetFile; var fileInfo = new FileInfo(localFilename); if (!File.Exists(localFilename) || (fileInfo.Length < targetFileReportedSize)) { try { var downloadTask = webClient.DownloadFileTaskAsync(webResource, localFilename); var awaiter = downloadTask.GetAwaiter(); awaiter.OnCompleted(() => { Helper.Log("Info", $"{targetFile} downloaded."); Helper.DecrementProgressCountdown("progressBarDownload", "labelBusyDownload"); var totalFiles = 0; if (Helper.MarkListboxItem("listBoxInhalt", item, out totalFiles) == 0) { Helper.Status($"Done. {totalFiles} files downloaded."); } //go get another (even if stack is empty, since we want to exit down below) DownloadFromStack(cs, userName, passWord); }); } catch (Exception e) { Helper.LogStatus("Error", $"Exception: {e.Message}"); Helper.Log("Error", "Download terminated early."); cs.Clear(); MessageBox.Show(e.Message, "An error ocurred - download will end early.", MessageBoxButtons.OK); cs.Clear(); } } else { Helper.Log("Warn", $"{targetFile} exists, skipping file."); Helper.DecrementProgressCountdown("progressBarDownload", "labelBusyDownload"); var totalFiles = 0; Helper.MarkListboxItem("listBoxInhalt", item, out totalFiles); //go get another (even if stack is empty, since we want to exit down below) DownloadFromStack(cs, userName, passWord); } } else { Helper.LogStatus("Warn", $"skipping malformed entry {item}"); Helper.DecrementProgressCountdown("progressBarDownload", "labelBusyDownload"); //go get another (even if stack is empty, since we want to exit down below) DownloadFromStack(cs, userName, passWord); } } else { if (cs.Count == 0) { //exit route... for each concurrent task Helper.Log("Info", $"Exiting DownloadFromStack. NumDownloadTasksActive={NumDownloadTasksActive}"); NumDownloadTasksActive--; if (NumDownloadTasksActive == 0) { LocalStore.TickOffListboxFileItems("listBoxInhalt", appUserSettings.ExtraFolder); Helper.HoldWhileDownloadingDayData(false); } } } }
public static void Reset(IViewModel viewModel) { m_viewModelStack.Clear(); Push(viewModel); }
/// <summary> /// Перенести частицы из стэка в Лист с частицами /// </summary> public void DumpStack() { Particles.AddRange(ParticleStack); ParticleStack.Clear(); }
public void Clear() { _clients.Clear(); _socketArgs.Clear(); _bufferForAll = null; }
public void Dispose() { buffers.Clear(); }
/// <summary> /// Clears received event information. /// </summary> /// <param name="expectedCount">The expected count of events to be received.</param> public static void ClearReceived(int expectedCount) { ReceivedEvents.Clear(); ReceivedEvent.Reset(expectedCount); LastNodeIds.Clear(); }
// Just validates clearing the stack's contents. private static void Test3_Clear(int count) { ConcurrentStack<int> s = new ConcurrentStack<int>(); for (int i = 0; i < count; i++) s.Push(i); s.Clear(); Assert.True(s.IsEmpty); Assert.Equal(0, s.Count); }
/// <summary> /// Function to reset the allocator heap and "free" all previous instances. /// </summary> /// <remarks> /// <para> /// This method does not actually free any memory in the traditional sense, but merely resets the allocation pointer back to the beginning of the heap /// to allow re-use of objects. /// </para> /// </remarks> public void Reset() { _freeList.Clear(); Interlocked.Exchange(ref _availableSlots, TotalSize); }
// Just validates clearing the stack's contents. private static bool RunConcurrentStackTest3_Clear(int count) { TestHarness.TestLog("* RunConcurrentStackTest3_Clear()"); ConcurrentStack<int> s = new ConcurrentStack<int>(); for (int i = 0; i < count; i++) s.Push(i); s.Clear(); bool isEmpty = s.IsEmpty; int sawCount = s.Count; TestHarness.TestLog(" > IsEmpty={0}, Count={1}", isEmpty, sawCount); return isEmpty && sawCount == 0; }
private async Task ShowOverlayWithLockAsync(object content, bool allowClosing, TimeSpan waitSpan, CancellationToken ct) { if (content == null) { throw new ArgumentNullException(nameof(content)); } try { var WAIT_HANDLE = new SemaphoreSlim(0, 1); void TryPop() { var CURRENT_OVERLAY_CONTENT = _OVERLAY_CONTENT_HOST.Content; if (CURRENT_OVERLAY_CONTENT == null) { return; } if (OVERLAY_CONTENT_STACK.TryPop(out var TOP_CONTENT)) { TOP_CONTENT.WaitHandle?.Release(); } if (OVERLAY_CONTENT_STACK.IsEmpty) { WAIT_HANDLE.Release(); } else { OVERLAY_CONTENT_STACK.TryPeek(out var NEW_CONTENT); _OVERLAY_CONTENT_HOST.Content = NEW_CONTENT.Content; } } void mouseButtonEventHandler(object sender, MouseButtonEventArgs args) { if (args.Handled) { return; } if (!allowClosing) { return; } if (args.Source == this) { return; } if (args.Source != _OVERLAY_CONTENT_HOST) { TryPop(); } } void keyEventHandler(object sender, KeyEventArgs args) { if (args.Handled) { return; } if (!allowClosing) { return; } if (args.Key != Key.Escape) { return; } TryPop(); } OVERLAY_CONTENT_STACK.Push(new ContentLock() { Content = content }); MouseDown += mouseButtonEventHandler; KeyDown += keyEventHandler; await Dispatcher.InvokeAsync(() => { //make overlay visible OVERLAY_HOST.IsHitTestVisible = true; //set desired content _OVERLAY_CONTENT_HOST.Content = content; if (!_OVERLAY_CONTENT_HOST.Focus()) { return; } _OVERLAY_CONTENT_HOST.MoveFocus(new TraversalRequest(FocusNavigationDirection.First)); }); OverlayEvent?.Invoke(this, new OverlayEventArgs(true)); try { await WAIT_HANDLE.WaitAsync(ct); } catch { throw; } finally { MouseDown -= mouseButtonEventHandler; KeyDown -= keyEventHandler; await Dispatcher.InvokeAsync(() => { //make overlay visible OVERLAY_HOST.IsHitTestVisible = false; _OVERLAY_CONTENT_HOST.Content = null; }); } } catch (OperationCanceledException) { if (IsOverlayGracefullClosed == false) { throw; } } catch { throw; } finally { //release all locks OVERLAY_CONTENT_STACK?.ToList().ForEach(CL => CL.WaitHandle?.Release()); //content stack is no longer valid OVERLAY_CONTENT_STACK?.Clear(); //reset the gacefull close flag IsOverlayGracefullClosed = null; //raise event OverlayEvent?.Invoke(this, new OverlayEventArgs(false)); } }
private void FaceNodeSpliting(FaceDistTreeNode faceNode) { // check if the face is leaf just in case if (faceNode.isLeaf) { // find the median #if false // sort base on x faceNode.FaceList.Sort(delegate(Face fa1, Face fa2) { return fa1.Min_X.CompareTo(fa2.Min_X); }); // get the x median value faceNode.X_mid = faceNode.FaceList[(int)Math.Truncate(faceNode.FaceList.Count / 2.0f)].Max_X; // sort base on y faceNode.FaceList.Sort(delegate(Face fa1, Face fa2) { return fa1.Min_Y.CompareTo(fa2.Min_Y); }); // get the y median value faceNode.Y_mid = faceNode.FaceList[(int)Math.Truncate(faceNode.FaceList.Count / 2.0f)].Max_Y; #else float max_X= float.MinValue,min_X= float.MaxValue; float max_Y= float.MinValue,min_Y= float.MaxValue; foreach (Face fa in faceNode.FaceList) { // get the max x if (max_X < fa.Max_X) max_X = fa.Max_X; // get the min x if (min_X > fa.Min_X) min_X = fa.Min_X; // get the max Y if (max_Y < fa.Max_Y) max_Y = fa.Max_Y; // get the min Y if (min_Y > fa.Min_Y) min_Y = fa.Min_Y; } // get the mid value faceNode.X_mid = (min_X + max_X ) / 2; faceNode.Y_mid = (min_Y + max_Y) / 2; #endif // create the 4 leaf faceNode.UpLeft = new FaceDistTreeNode(); faceNode.DownLeft = new FaceDistTreeNode(); faceNode.UpRight = new FaceDistTreeNode(); faceNode.DownRight = new FaceDistTreeNode(); // disable the leaf mode of the node faceNode.isLeaf = false; // the faces that is going to remain to this node ConcurrentStack<Face> remain = new ConcurrentStack<Face>(); // fill the nodes foreach (Face fa in faceNode.FaceList) { if (fa.Min_X > faceNode.X_mid) { if (fa.Min_Y > faceNode.Y_mid) { faceNode.UpRight.FaceList.Add(fa); } else if (fa.Max_Y < faceNode.Y_mid) { faceNode.DownRight.FaceList.Add(fa); } else { // add in both sides :P remain.Push(fa); } } else if (fa.Max_X < faceNode.X_mid) { if (fa.Min_Y > faceNode.Y_mid) { faceNode.UpLeft.FaceList.Add(fa); } else if (fa.Max_Y < faceNode.Y_mid) { faceNode.DownLeft.FaceList.Add(fa); } else { // add in both sides :P remain.Push(fa); } } else { remain.Push(fa); } } // clean the list faceNode.FaceList.Clear(); // add the remain faces to the node list faceNode.FaceList.AddRange(remain); // clean the stack remain.Clear(); } }
static async Task Run() { int items = 10000; ConcurrentStack <int> stack = new ConcurrentStack <int>(); // Create an action to push items onto the stack Action pusher = () => { for (int i = 0; i < items; i++) { stack.Push(i); } }; // Run the action once pusher(); if (stack.TryPeek(out int result)) { Console.WriteLine($"TryPeek() saw {result} on top of the stack."); } else { Console.WriteLine("Could not peek most recently added number."); } // Empty the stack stack.Clear(); if (stack.IsEmpty) { Console.WriteLine("Cleared the stack."); } // Create an action to push and pop items Action pushAndPop = () => { Console.WriteLine($"Task started on {Task.CurrentId}"); int item; for (int i = 0; i < items; i++) { stack.Push(i); } for (int i = 0; i < items; i++) { stack.TryPop(out item); } Console.WriteLine($"Task ended on {Task.CurrentId}"); }; // Spin up five concurrent tasks of the action var tasks = new Task[5]; for (int i = 0; i < tasks.Length; i++) { tasks[i] = Task.Factory.StartNew(pushAndPop); } // Wait for all the tasks to finish up await Task.WhenAll(tasks); if (!stack.IsEmpty) { Console.WriteLine("Did not take all the items off the stack"); } }
public async Task AllSenderEventsMailMerge(string somePlaceholder, bool withParseFailure) { #region * Sync and Async preparation * var actualEvents = new ConcurrentStack <string>(); var expectedEvents = new ConcurrentStack <string>(); var mms = new MailMergeSender { Config = _settings.SenderConfig }; mms.Config.MaxNumOfSmtpClients = 1; // Event raising before merging starts mms.OnMergeBegin += (mailMergeSender, mergeBeginArgs) => { actualEvents.Push(nameof(mms.OnMergeBegin)); }; // Event raising when getting the merged MimeMessage of the MailMergeMessage has failed. mms.OnMessageFailure += (mailMergeSender, messageFailureArgs) => { actualEvents.Push(nameof(mms.OnMessageFailure)); }; // Event raising before sending a single mail message starts mms.OnBeforeSend += (smtpClient, beforeSendArgs) => { actualEvents.Push(nameof(mms.OnBeforeSend)); }; // Event raising right after the SmtpClient's connection to the server is up (but not yet authenticated). mms.OnSmtpConnected += (smtpClient, smtpClientArgs) => { actualEvents.Push(nameof(mms.OnSmtpConnected)); }; // Event raising after the SmtpClient has authenticated on the server. mms.OnSmtpAuthenticated += (smtpClient, smtpClientArgs) => { actualEvents.Push(nameof(mms.OnSmtpAuthenticated)); }; // Event raising after the SmtpClient has disconnected from the SMTP mail server. mms.OnSmtpDisconnected += (smtpClient, smtpClientArgs) => { actualEvents.Push(nameof(mms.OnSmtpDisconnected)); }; // Event raising if sending a single mail message fails mms.OnSendFailure += (smtpClient, sendFailureArgs) => { actualEvents.Push(nameof(mms.OnSendFailure)); }; // Event raising before sending a single mail message is finished mms.OnAfterSend += (smtpClient, afterSendArgs) => { actualEvents.Push(nameof(mms.OnAfterSend)); }; // Event raising each time before and after a single message was sent mms.OnMergeProgress += (mailMergeSender, progressArgs) => { actualEvents.Push(nameof(mms.OnMergeProgress)); }; // Event raising after merging is completed mms.OnMergeComplete += (mailMergeSender, completedArgs) => { actualEvents.Push(nameof(mms.OnMergeComplete)); }; var recipients = new List <Recipient>(); for (var i = 0; i < 1; i++) { recipients.Add(new Recipient { Email = $"recipient-{i}@example.com", Name = $"Name of {i}" }); } var mmm = new MailMergeMessage("Event tests" + somePlaceholder, "This is the plain text part for {Name} ({Email})") { Config = _settings.MessageConfig }; mmm.MailMergeAddresses.Add(new MailMergeAddress(MailAddressType.To, "{Name}", "{Email}")); var sequenceOfExpectedEvents = new List <string>(); if (withParseFailure) { sequenceOfExpectedEvents.Clear(); sequenceOfExpectedEvents.AddRange(new[] { nameof(mms.OnMergeBegin), nameof(mms.OnMergeProgress), nameof(mms.OnMessageFailure), nameof(mms.OnMergeProgress), nameof(mms.OnMergeComplete) }); } else { sequenceOfExpectedEvents.Clear(); sequenceOfExpectedEvents.AddRange(new[] { nameof(mms.OnMergeBegin), nameof(mms.OnMergeProgress), nameof(mms.OnBeforeSend), nameof(mms.OnSmtpConnected), nameof(mms.OnAfterSend), nameof(mms.OnMergeProgress), nameof(mms.OnSmtpDisconnected), nameof(mms.OnMergeComplete) }); } #endregion #region * Synchronous send method * try { mms.Send(mmm, recipients); } catch (Exception e) { Console.WriteLine(e.Message); } expectedEvents.Clear(); expectedEvents.PushRange(sequenceOfExpectedEvents.ToArray()); Assert.AreEqual(expectedEvents.Count, actualEvents.Count); // sequence of sync sending is predefined while (actualEvents.Count > 0) { expectedEvents.TryPop(out string expected); actualEvents.TryPop(out string actual); Assert.AreEqual(expected, actual); } #endregion #region * Async send method * actualEvents.Clear(); expectedEvents.Clear(); expectedEvents.PushRange(sequenceOfExpectedEvents.ToArray()); try { await mms.SendAsync(mmm, recipients); } catch (Exception e) { Console.WriteLine(e.Message); } Assert.AreEqual(expectedEvents.Count, actualEvents.Count); // sequence of async sending may be different from sync, but all events must exists var sortedActual = actualEvents.OrderBy(e => e).ToArray(); var sortedExpected = expectedEvents.OrderBy(e => e).ToArray(); for (var i = 0; i < sortedActual.Length; i++) { Assert.AreEqual(sortedExpected[i], sortedActual[i]); } #endregion }
public void SuspendRequested(object sender, RubberduckStatusSuspendParserEventArgs e) { if (ParsingSuspendLock.IsReadLockHeld) { e.Result = SuspensionOutcome.ReadLockAlreadyHeld; const string errorMessage = "A suspension action was attempted while a read lock was held. This indicates a bug in the code logic as suspension should not be requested from same thread that has a read lock."; Logger.Error(errorMessage); Debug.Assert(false, errorMessage); return; } object parseRequestor = null; try { if (!ParsingSuspendLock.TryEnterWriteLock(e.MillisecondsTimeout)) { e.Result = SuspensionOutcome.TimedOut; return; } lock (SuspendStackSyncObject) { _isSuspended = true; } var originalStatus = State.Status; if (!e.AllowedRunStates.Contains(originalStatus)) { e.Result = SuspensionOutcome.IncompatibleState; return; } _parserStateManager.SetStatusAndFireStateChanged(e.Requestor, ParserState.Busy, CancellationToken.None); e.BusyAction.Invoke(); } catch (OperationCanceledException ex) { e.Result = SuspensionOutcome.Canceled; e.EncounteredException = ex; } catch (Exception ex) { e.Result = SuspensionOutcome.UnexpectedError; e.EncounteredException = ex; } finally { lock (SuspendStackSyncObject) { _isSuspended = false; if (_requestorStack.TryPop(out var lastRequestor)) { _requestorStack.Clear(); parseRequestor = lastRequestor; } // Though there were no reparse requests, we need to reset the state before we release the // write lock to avoid introducing discrepancy in the parser state due to readers being // blocked. Any reparse requests must be done outside the write lock; see further below. if (parseRequestor == null) { // We cannot make any assumptions about the original state, nor do we know // anything about resuming the previous state, so we must delegate the state // evaluation to the state manager. _parserStateManager.EvaluateOverallParserState(CancellationToken.None); } } if (ParsingSuspendLock.IsWriteLockHeld) { ParsingSuspendLock.ExitWriteLock(); } if (e.Result == SuspensionOutcome.Pending) { e.Result = SuspensionOutcome.Completed; } } // Any reparse requests must be done outside the write lock to avoid deadlocks if (parseRequestor != null) { BeginParse(parseRequestor); } }
public void Clear() { _tasks.Clear(); }
/// <summary> /// 清空堆栈 /// </summary> public void Clear() { pool?.Clear(); bufferManager?.Clear(); }
public void Clear() => m_Stack.Clear();