/// <summary> /// Save signals for a specific container in a queue. This is done in scenario where the container /// is not yet ready to accept signals. /// </summary> /// <param name="analysis"></param> /// <param name="data"></param> /// <returns></returns> private Task SaveToQueueAsync(AnalysisContainer analysis, ScenarioData data) { Assert.IsNotNull(data, "data"); var key = analysis.GetUniqueIdentity(); ConcurrentQueue <ScenarioData> existingQueue; if (this.signalWaitingToBeProcessedMap.TryGetValue(key, out existingQueue)) { if (existingQueue.Contains(data)) { throw new SignalAlreadyPresentException(string.Format(CultureInfo.InvariantCulture, "Save Queue. Analysis: {0}, Data: {1}", analysis, data)); } existingQueue.Enqueue(data); } else { // A tiny window exists for race condition. I thought about using AddOrUpdate but that won't help either since // the update lambda is called outside lock. So keep it the way it is for now. In future, consider using a lock // for entire operation. existingQueue = this.signalWaitingToBeProcessedMap.GetOrAdd(key, new ConcurrentQueue <ScenarioData>(new[] { data })); } // Persist it. return(this.signalWaitingToBeProcessedStoreInstance.SetEntityAsync( data.GetUniqueIdentity(), HandyUtil.Serialize(existingQueue), this.CancelToken)); }
/// <summary> /// Entry point for a an Analysis container to Begin. This is true for the main analysis as well /// as the update to an existing analysis container /// </summary> /// <param name="analysis"></param> /// <returns></returns> /// <exception cref="Exception"></exception> private async Task ScheduleAnalysisAsync(AnalysisContainer analysis) { Assert.IsNotNull(analysis, "analysis"); var key = analysis.GetUniqueIdentity(); this.Logger.LogMessage("ScheduleAnalysisAsync : Key : {0}", key); // Get the Associated Analysis Metadata. var analysisMetadata = await this.analysisMetadataObjectStore.GetOrAddTypedObjectAsync(key, new AnalysisMetadata(key)).ConfigureAwait(false); if (analysis.InterestFilter != null) { analysisMetadata.HasRegisteredInterest = true; await this.analysisMetadataObjectStore.PersistTypedObjectAsync(analysisMetadata).ConfigureAwait(false); } this.AssertIfComputePipelineInFaultedState(); // Push the signal into our dataflow pipeline. var result = await this.signalConsumeDataFlow.SendAsync(analysis, this.CancelToken).ConfigureAwait(false); if (!result) { this.Logger.LogWarning(string.Format(CultureInfo.InvariantCulture, "Failed to Process Analysis: '{0}'", analysis)); throw new Exception(string.Format(CultureInfo.InvariantCulture, "Failed to Process Analysis: '{0}'", analysis)); } }
private async Task PreAnalysisActionsAsync(AnalysisContainer analysis) { var key = analysis.GetUniqueIdentity(); var analysisMetadata = await this.analysisMetadataObjectStore.GetTypedObjectAsync(key); analysisMetadata.SchedulingInfo.MarkStarted(); //// We don't increment analysis attempted count if this is simply an update. ////if (schedulingInfo.GetContinuationType() != ContinuationType.WaitForInterest) ////{ //// schedulingInfo.IncrementAnalysisAttemptedCount(); ////} analysisMetadata.LastInvokedTime = DateTimeOffset.UtcNow; await this.analysisMetadataObjectStore.PersistTypedObjectAsync(analysisMetadata).ConfigureAwait(false); }
private async Task KickOffAnalysisAsync(AnalysisContainer analysis) { await this.PreAnalysisActionsAsync(analysis).ConfigureAwait(false); this.CancelToken.ThrowIfCancellationRequested(); var key = analysis.GetUniqueIdentity(); var schedulingInfo = (await this.analysisMetadataObjectStore.GetTypedObjectAsync(key)).SchedulingInfo; Continuation continuation = null; ExceptionDispatchInfo dispatchInfo = null; try { Assert.IsNotNull(AgentDirectory.SingleInstance, "AgentDirectory.SingleInstance != null"); var agent = AgentDirectory.SingleInstance.GetOrCreateAgentInstance(analysis.Agent); if (schedulingInfo.GetContinuationType() == ContinuationType.WaitForInterest) { this.Logger.LogMessage("KickOffAnalysisAsync:: Launching Update. Key : {0}", key); continuation = await agent.UpdateAnalysisAsync(analysis).ConfigureAwait(false); } else { this.Logger.LogMessage("KickOffAnalysisAsync:: Calling Main Analysis. Key : {0}", key); continuation = await agent.DoAnalysisAsync(analysis).ConfigureAwait(false); } this.Logger.LogMessage("KickOffAnalysisAsync:: Continuation : {0} Analysis {1}, key: {2}", continuation, analysis, key); } catch (Exception exp) { this.Logger.LogMessage("KickOffAnalysisAsync:: Exception {0} Encountered while Analysing Container: {1}", exp, analysis); dispatchInfo = ExceptionDispatchInfo.Capture(exp); } await this.PostAnalysisActionsAsync(analysis, continuation, dispatchInfo).ConfigureAwait(false); }
private async Task PostAnalysisActionsAsync(AnalysisContainer analysis, Continuation continuation, ExceptionDispatchInfo expDispatchInfo) { var key = analysis.GetUniqueIdentity(); var analysisMetadata = await this.analysisMetadataObjectStore.GetTypedObjectAsync(key).ConfigureAwait(false); analysisMetadata.SchedulingInfo.StartUse(); // if we have encountered an exception while processing this container, mark as failed. if (expDispatchInfo != null) { // expDispatchInfo.SourceException.Data.Add("FaultingAnalysisSchedulingInfo", schedulingInfo); // expDispatchInfo.SourceException.Data.Add("FaultingAnalysis", analysis); analysis.AddExceptionSeen(expDispatchInfo.SourceException); analysisMetadata.SchedulingInfo.MarkFailed(); } else { analysisMetadata.SchedulingInfo.SetContinuation(continuation); } analysis.SetAnalysisStatus(analysisMetadata.SchedulingInfo.CurrentStatus); await this.analysisContainerObjectStore.PersistTypedObjectAsync(analysis).ConfigureAwait(false); analysisMetadata.SchedulingInfo.StopUse(); // We delete the object from presisted store once the analysis is finished (Finished, or Failed) if (analysisMetadata.SchedulingInfo.CurrentStatus == AnalysisStatus.Completed || analysisMetadata.SchedulingInfo.CurrentStatus == AnalysisStatus.Failed) { await this.analysisMetadataObjectStore.DeletedTypedObjectAsync(analysisMetadata).ConfigureAwait(false); } else { // Persist it. await this.analysisMetadataObjectStore.PersistTypedObjectAsync(analysisMetadata); } if (this.consumerMap.ContainsKey(analysis.Agent)) { // TODO: Today we expect analysisConsumer to be very light weight. In future, we can potentially // post them in a new Task. foreach (var oneConsumer in this.consumerMap[analysis.Agent]) { if (analysisMetadata.SchedulingInfo.CurrentStatus == AnalysisStatus.Completed && (oneConsumer.Value == ConsumeOptions.Finished || oneConsumer.Value == ConsumeOptions.OnlySuccessfullyFinished)) { await oneConsumer.Key.ConsumeAsync(analysis.AnalysisEvent, this.CancelToken).ConfigureAwait(false); continue; } if (analysisMetadata.SchedulingInfo.CurrentStatus == AnalysisStatus.Failed && (oneConsumer.Value == ConsumeOptions.Finished || oneConsumer.Value == ConsumeOptions.OnlyFailed)) { await oneConsumer.Key.ConsumeAsync(analysis.AnalysisEvent, this.CancelToken).ConfigureAwait(false); } } } // Re-throw the exception. if (expDispatchInfo != null) { expDispatchInfo.Throw(); } }
/// <summary> /// Check if a container is in position to accept updates /// </summary> /// <remarks> /// Updates can only be accepted once main analysis is finished. This routines checks that. /// </remarks> /// <param name="analysis"></param> /// <returns></returns> private async Task <bool> IsAnalysisContainerInPositionToAcceptEventsAsync(AnalysisContainer analysis) { Assert.IsNotNull(analysis, "analysis"); var schedulingInfo = (await this.analysisMetadataObjectStore.GetTypedObjectAsync(analysis.GetUniqueIdentity()).ConfigureAwait(false)).SchedulingInfo; return(schedulingInfo.CurrentStatus == AnalysisStatus.Suspended && schedulingInfo.GetContinuationType() == ContinuationType.WaitForInterest); }