/// <summary> /// <see cref="UnarySequentialDispatcherRemoteNode{TInput}"/> /// </summary> /// <param name="persistentCache">Persistent cache to avoid dropped data on system crash</param> /// <param name="progress">Progress of the current bulk</param> /// <param name="host"><see cref="Host"/></param> /// <param name="cts"><see cref="CancellationTokenSource"/></param> /// <param name="circuitBreakerOptions"><see cref="CircuitBreakerOptions"/></param> /// <param name="clusterOptions"><see cref="ClusterOptions"/></param> /// <param name="logger"><see cref="ILogger"/></param> public UnarySequentialDispatcherRemoteNode( IAppCache persistentCache, IProgress <double> progress, Host host, CancellationTokenSource cts, CircuitBreakerOptions circuitBreakerOptions, ClusterOptions clusterOptions, ILogger logger) : base( Policy.Handle <Exception>() .AdvancedCircuitBreakerAsync(circuitBreakerOptions.CircuitBreakerFailureThreshold, circuitBreakerOptions.CircuitBreakerSamplingDuration, circuitBreakerOptions.CircuitBreakerMinimumThroughput, circuitBreakerOptions.CircuitBreakerDurationOfBreak, onBreak: (ex, timespan, context) => { logger.LogError( $"Batch processor breaker: Breaking the circuit for {timespan.TotalMilliseconds}ms due to {ex.Message}."); }, onReset: context => { logger.LogInformation( "Batch processor breaker: Succeeded, closed the circuit."); }, onHalfOpen: () => { logger.LogWarning( "Batch processor breaker: Half-open, next call is a trial."); }), clusterOptions, progress, cts, logger) { _logger = logger; _clusterOptions = clusterOptions; ISubject <PersistentItem <TInput> > dispatcherSubject = new Subject <PersistentItem <TInput> >(); _synchronizedDispatcherSubject = Subject.Synchronize(dispatcherSubject); _synchronizedDispatcherSubjectSubscription = _synchronizedDispatcherSubject .ObserveOn(new EventLoopScheduler(ts => new Thread(ts))) .Select(item => { return(Observable.FromAsync(() => persistentCache.AddItemAsync(item.Entity, item.CancellationTokenSource.Token))); }) .Merge() .Subscribe(); var channel = new Channel(host.MachineName, host.Port, ChannelCredentials.Insecure); _remoteContract = MagicOnionClient.Create <IRemoteContract <TInput> >(channel); IRemoteNodeSubject nodeReceiver = new NodeReceiver(_logger); _remoteNodeHealthSubscription = nodeReceiver.RemoteNodeHealthSubject.Subscribe(remoteNodeHealth => { NodeMetrics.RemoteNodeHealth = remoteNodeHealth; }); _nodeHub = StreamingHubClient.Connect <INodeHub, INodeReceiver>(channel, (INodeReceiver)nodeReceiver); NodeMetrics = new NodeMetrics(Guid.NewGuid()); }
/// <summary> /// <see cref="AsyncDispatcherRemoteNode{TInput,TOutput}"/> /// </summary> /// <param name="host"><see cref="Host"/></param> /// <param name="cts"><see cref="CancellationTokenSource"/></param> /// <param name="circuitBreakerOptions"><see cref="CircuitBreakerOptions"/></param> /// <param name="clusterOptions"><see cref="ClusterOptions"/></param> /// <param name="logger"><see cref="ILogger"/></param> public AsyncDispatcherRemoteNode( Host host, CancellationTokenSource cts, CircuitBreakerOptions circuitBreakerOptions, ClusterOptions clusterOptions, ILogger logger) : base( Policy.Handle <Exception>() .AdvancedCircuitBreakerAsync(circuitBreakerOptions.CircuitBreakerFailureThreshold, circuitBreakerOptions.CircuitBreakerSamplingDuration, circuitBreakerOptions.CircuitBreakerMinimumThroughput, circuitBreakerOptions.CircuitBreakerDurationOfBreak, onBreak: (ex, timespan, context) => { logger.LogError( $"Batch processor breaker: Breaking the circuit for {timespan.TotalMilliseconds}ms due to {ex.Message}."); }, onReset: context => { logger.LogInformation( "Batch processor breaker: Succeeded, closed the circuit."); }, onHalfOpen: () => { logger.LogWarning( "Batch processor breaker: Half-open, next call is a trial."); }), clusterOptions, cts, logger) { _logger = logger; _clusterOptions = clusterOptions; _channel = new Channel(host.MachineName, host.Port, ChannelCredentials.Insecure); _remoteContract = MagicOnionClient.Create <IOutputRemoteContract <TInput, TOutput> >(_channel); IRemoteNodeSubject nodeReceiver = new NodeReceiver(_logger); _remoteNodeHealthSubscription = nodeReceiver.RemoteNodeHealthSubject.Subscribe(remoteNodeHealth => { NodeMetrics.RemoteNodeHealth = remoteNodeHealth; }); _nodeHub = StreamingHubClient.Connect <INodeHub, INodeReceiver>(_channel, (INodeReceiver)nodeReceiver); NodeMetrics = new NodeMetrics(Guid.NewGuid()); }
/// <summary> /// <see cref="DualParallelDispatcherRemoteNode{TInput1,TInput2,TOutput1,TOutput2}"/> /// </summary> /// <param name="persistentCache">Persistent cache to avoid dropped data on system crash</param> /// <param name="progress">Progress of the current bulk</param> /// <param name="host"><see cref="Host"/></param> /// <param name="cts"><see cref="CancellationTokenSource"/></param> /// <param name="circuitBreakerOptions"><see cref="CircuitBreakerOptions"/></param> /// <param name="clusterOptions"><see cref="ClusterOptions"/></param> /// <param name="logger"><see cref="ILogger"/></param> public DualParallelDispatcherRemoteNode( IAppCache persistentCache, IProgress <double> progress, Host host, CancellationTokenSource cts, CircuitBreakerOptions circuitBreakerOptions, ClusterOptions clusterOptions, ILogger logger) : base( Policy.Handle <Exception>() .AdvancedCircuitBreakerAsync(circuitBreakerOptions.CircuitBreakerFailureThreshold, circuitBreakerOptions.CircuitBreakerSamplingDuration, circuitBreakerOptions.CircuitBreakerMinimumThroughput, circuitBreakerOptions.CircuitBreakerDurationOfBreak, onBreak: (ex, timespan, context) => { logger.LogError( $"Batch processor breaker: Breaking the circuit for {timespan.TotalMilliseconds}ms due to {ex.Message}."); }, onReset: context => { logger.LogInformation( "Batch processor breaker: Succeeded, closed the circuit."); }, onHalfOpen: () => { logger.LogWarning( "Batch processor breaker: Half-open, next call is a trial."); }), clusterOptions, progress, cts, logger) { _logger = logger; _clusterOptions = clusterOptions; ISubject <LinkedItem <TInput1> > item1DispatcherSubject = new Subject <LinkedItem <TInput1> >(); _item1SynchronizedDispatcherSubject = Subject.Synchronize(item1DispatcherSubject); _item1SynchronizedDispatcherSubjectSubscription = _item1SynchronizedDispatcherSubject .ObserveOn(new EventLoopScheduler(ts => new Thread(ts))) .Select(item => { return(Observable.FromAsync(() => persistentCache.AddItem1Async(item.Key.ToString(), item.Entity, item.CancellationTokenSource.Token))); }) .Merge() .Subscribe(); ISubject <LinkedItem <TInput2> > item2DispatcherSubject = new Subject <LinkedItem <TInput2> >(); _item2SynchronizedDispatcherSubject = Subject.Synchronize(item2DispatcherSubject); _item2SynchronizedDispatcherSubjectSubscription = _item2SynchronizedDispatcherSubject .ObserveOn(new EventLoopScheduler(ts => new Thread(ts))) .Select(item => { return(Observable.FromAsync(() => persistentCache.AddItem2Async(item.Key.ToString(), item.Entity, item.CancellationTokenSource.Token))); }) .Merge() .Subscribe(); _channel = new Channel(host.MachineName, host.Port, ChannelCredentials.Insecure); _remoteContract = MagicOnionClient.Create <IRemoteContract <TOutput1, TOutput2> >(_channel); _item1RemoteContract = MagicOnionClient.Create <IOutputItem1RemoteContract <TInput1, TOutput1> >(_channel); _item2RemoteContract = MagicOnionClient.Create <IOutputItem2RemoteContract <TInput2, TOutput2> >(_channel); IRemoteNodeSubject nodeReceiver = new NodeReceiver(_logger); _remoteNodeHealthSubscription = nodeReceiver.RemoteNodeHealthSubject.Subscribe(remoteNodeHealth => { NodeMetrics.RemoteNodeHealth = remoteNodeHealth; }); _nodeHub = StreamingHubClient.Connect <INodeHub, INodeReceiver>(_channel, (INodeReceiver)nodeReceiver); NodeMetrics = new NodeMetrics(Guid.NewGuid()); var item1ProcessSource = new ConcurrentDictionary <Guid, TOutput1>(); var item2ProcessSource = new ConcurrentDictionary <Guid, TOutput2>(); var joinBlock = new JoinBlock <KeyValuePair <Guid, CancellationTokenSource>, KeyValuePair <Guid, CancellationTokenSource> >( new GroupingDataflowBlockOptions { Greedy = false }); _item1Source = new TransformBlock <Tuple <Guid, TOutput1, CancellationTokenSource>, KeyValuePair <Guid, CancellationTokenSource> >(source => { if (!item1ProcessSource.ContainsKey(source.Item1) && !item1ProcessSource.TryAdd(source.Item1, source.Item2)) { _logger.LogError( $"Could not add item of type {source.Item2.GetType()} and key {source.Item1.ToString()} to the buffer."); } return(new KeyValuePair <Guid, CancellationTokenSource>(source.Item1, source.Item3)); }); _item2Source = new TransformBlock <Tuple <Guid, TOutput2, CancellationTokenSource>, KeyValuePair <Guid, CancellationTokenSource> >( source => { if (!item2ProcessSource.ContainsKey(source.Item1) && !item2ProcessSource.TryAdd(source.Item1, source.Item2)) { _logger.LogError( $"Could not add item of type {source.Item2.GetType()} and key {source.Item1.ToString()} to the buffer."); } return(new KeyValuePair <Guid, CancellationTokenSource>(source.Item1, source.Item3)); }); var processBlock = new ActionBlock <Tuple <KeyValuePair <Guid, CancellationTokenSource>, KeyValuePair <Guid, CancellationTokenSource> > >( async combined => { var policy = Policy .Handle <Exception>(ex => !(ex is TaskCanceledException || ex is OperationCanceledException)) .WaitAndRetryAsync(_clusterOptions.RetryAttempt, retryAttempt => TimeSpan.FromSeconds(Math.Pow(2, retryAttempt)), (exception, sleepDuration, retry, context) => { if (retry >= _clusterOptions.RetryAttempt) { _logger.LogError( $"Could not process item after {retry} retry times: {exception.Message}"); } }); var policyResult = await policy.ExecuteAndCaptureAsync(async ct => { try { if (CpuUsage > _clusterOptions.LimitCpuUsage) { var suspensionTime = (CpuUsage - _clusterOptions.LimitCpuUsage) / CpuUsage * 100; await Task.Delay((int)suspensionTime, ct); } if (item1ProcessSource.ContainsKey(combined.Item1.Key) && item2ProcessSource.ContainsKey(combined.Item2.Key) && item1ProcessSource.TryGetValue(combined.Item1.Key, out var item1) && item2ProcessSource.TryGetValue(combined.Item2.Key, out var item2)) { await _remoteContract.ProcessRemotely(item1, item2, NodeMetrics); combined.Item1.Value.Cancel(); combined.Item2.Value.Cancel(); } } catch (Exception ex) when(ex is TaskCanceledException || ex is OperationCanceledException) { _logger.LogTrace("The item process has been cancelled."); } }, cts.Token).ConfigureAwait(false); if (policyResult.Outcome == OutcomeType.Failure) { _logger.LogCritical( policyResult.FinalException != null ? $"Could not process item: {policyResult.FinalException.Message}." : "An error has occured while processing the item."); } if (!item1ProcessSource.TryRemove(combined.Item1.Key, out _)) { _logger.LogWarning( $"Could not remove item of key {combined.Item1.ToString()} from the buffer."); } if (!item2ProcessSource.TryRemove(combined.Item2.Key, out _)) { _logger.LogWarning( $"Could not remove item of key {combined.Item2.ToString()} from the buffer."); } }); var options = new DataflowLinkOptions { PropagateCompletion = true }; _item1Source.LinkTo(joinBlock.Target1, options); _item2Source.LinkTo(joinBlock.Target2, options); joinBlock.LinkTo(processBlock, options); }