private void onDiscussionParserEvent(UserEvents.DiscussionEvent e, DateTime eventTimestamp, DiscussionUpdateType type) { switch (type) { case DiscussionUpdateType.InitialSnapshot: // Don't send out any notifications on initial snapshot, e.g. when just connected to host // because we don't want to notify about all old events return; case DiscussionUpdateType.NewMergeRequest: // Notify about whatever is found in a new merge request DiscussionEvent?.Invoke(e); return; case DiscussionUpdateType.PeriodicUpdate: // Notify about new events in merge requests that are cached already if (_cachedDiscussions.TryGetValue(e.MergeRequestKey, out CachedDiscussions cached) && cached.PrevTimeStamp.HasValue && eventTimestamp > cached.PrevTimeStamp.Value) { DiscussionEvent?.Invoke(e); } return; } Debug.Assert(false); }
private void scheduleUpdate(IEnumerable <MergeRequestKey> keys, DiscussionUpdateType type) { bool isSchedulingGlobalPeriodicUpdate() => keys == null && type == DiscussionUpdateType.PeriodicUpdate; bool isProcessingGlobalPeriodicUpdate() => _scheduledUpdates .Any(item => item.MergeRequests == null && item.Type == DiscussionUpdateType.PeriodicUpdate); if (isSchedulingGlobalPeriodicUpdate() && isProcessingGlobalPeriodicUpdate()) { return; } _scheduledUpdates.Enqueue(new ScheduledUpdate(keys, type)); _timer?.SynchronizingObject.BeginInvoke(new Action(async() => { // To avoid re-entrance in updateDiscussionsAsync() await waitForUpdateCompetion(null); Debug.Assert(!_updating.Any()); if (_scheduledUpdates.Any()) { ScheduledUpdate scheduledUpdate = _scheduledUpdates.Peek(); await processScheduledUpdate(scheduledUpdate); _scheduledUpdates.Dequeue(); } }), null); }
async private Task updateDiscussionsAsync(MergeRequestKey mrk, DiscussionUpdateType type) { if (_operator == null) { return; } if (_updating.Contains(mrk)) { // Such update can be caused by LoadDiscussions() called while we are looping in processScheduledUpdate() traceInformation(String.Format( "Update is skipped due to concurrent update request for MR: " + "Host={0}, Project={1}, IId={2}", mrk.ProjectKey.HostName, mrk.ProjectKey.ProjectName, mrk.IId.ToString())); return; } try { _updating.Add(mrk); Tuple <Note, int> mostRecentNoteAndNoteCount = await _operator.GetMostRecentUpdatedNoteAndCountAsync(mrk); if (!isCacheUpdateNeeded(mostRecentNoteAndNoteCount.Item1, mostRecentNoteAndNoteCount.Item2, mrk)) { return; } CachedDiscussionsTimestamp?ts = mostRecentNoteAndNoteCount.Item1 == null ? new CachedDiscussionsTimestamp?() : new CachedDiscussionsTimestamp( mostRecentNoteAndNoteCount.Item1.Updated_At, mostRecentNoteAndNoteCount.Item2); IEnumerable <Discussion> discussions; try { _loading.Add(mrk); DiscussionsLoading?.Invoke(mrk); discussions = await _operator.GetDiscussionsAsync(mrk, ts); } catch (OperatorException) { throw; } finally { _loading.Remove(mrk); } cacheDiscussions(mrk, discussions); DiscussionsLoaded?.Invoke(mrk, discussions); DiscussionsLoadedInternal?.Invoke(mrk, discussions, type); } finally { _updating.Remove(mrk); } }
private void onDiscussionsLoaded(MergeRequestKey mrk, IEnumerable <Discussion> discussions, DiscussionUpdateType type) { if (discussions.Count() == 0) { return; } foreach (Discussion discussion in discussions) { foreach (DiscussionNote note in discussion.Notes) { if (note.System && note.Body == "resolved all threads") { DiscussionEvent?.Invoke(new UserEvents.DiscussionEvent( mrk, UserEvents.DiscussionEvent.Type.ResolvedAllThreads, note.Author), note.Updated_At, type); } else if (note.System && note.Body == "approved this merge request") { DiscussionEvent?.Invoke(new UserEvents.DiscussionEvent( mrk, UserEvents.DiscussionEvent.Type.ApprovalStatusChange, new UserEvents.DiscussionEvent.ApprovalStatusChangeDescription(true, note.Author)), note.Updated_At, type); } else if (note.System && note.Body == "unapproved this merge request") { DiscussionEvent?.Invoke(new UserEvents.DiscussionEvent( mrk, UserEvents.DiscussionEvent.Type.ApprovalStatusChange, new UserEvents.DiscussionEvent.ApprovalStatusChangeDescription(false, note.Author)), note.Updated_At, type); } else if (Helpers.IsUserMentioned(note.Body, _currentUser)) { DiscussionEvent?.Invoke(new UserEvents.DiscussionEvent( mrk, UserEvents.DiscussionEvent.Type.MentionedCurrentUser, note.Author), note.Updated_At, type); } else if (_keywords != null) { foreach (string keyword in _keywords) { if (note.Body.Trim().StartsWith(keyword, StringComparison.CurrentCultureIgnoreCase)) { DiscussionEvent?.Invoke(new UserEvents.DiscussionEvent( mrk, UserEvents.DiscussionEvent.Type.Keyword, new UserEvents.DiscussionEvent.KeywordDescription(keyword, note.Author)), note.Updated_At, type); } } } } } }
async Task updateDiscussionsSafeAsync(MergeRequestKey mrk, DiscussionUpdateType type) { try { await updateDiscussionsAsync(mrk, type); } catch (OperatorException ex) { ExceptionHandlers.Handle(String.Format( "Cannot update discussions for MR: Host={0}, Project={1}, IId={2}", mrk.ProjectKey.HostName, mrk.ProjectKey.ProjectName, mrk.IId.ToString()), ex); } }
private void scheduleUpdate(IEnumerable <MergeRequestKey> keys, DiscussionUpdateType type) { if (type == DiscussionUpdateType.InitialSnapshot) { _reconnect = true; } // make a copy of `keys` just in case _scheduledUpdates.Enqueue(new ScheduledUpdate(keys, type)); _timer?.SynchronizingObject.BeginInvoke(new Action(async() => { // 1. To avoid re-entrance in updateDiscussionsAsync() // 2. Make it before resetting _reconnect flag to allow an ongoing update loop to break await waitForUpdateCompetion(null); Debug.Assert(!_updating.Any()); if (_scheduledUpdates.Any()) { ScheduledUpdate scheduledUpdate = _scheduledUpdates.Dequeue(); if (_reconnect) { if (scheduledUpdate.Type != DiscussionUpdateType.InitialSnapshot) { Trace.TraceInformation("[DiscussionManager] update is skipped due to _reconnect state"); return; } Debug.Assert(!_cachedDiscussions.Any() && !_closed.Any()); _reconnect = false; } await processScheduledUpdate(scheduledUpdate); } }), null); }
internal ScheduledUpdate(IEnumerable <MergeRequestKey> mergeRequests, DiscussionUpdateType type) { MergeRequests = mergeRequests; Type = type; }