public void Test_ConfigPollInterval_Default_Is_2_5Seconds() { var options = new ClusterOptions(); Assert.Equal(TimeSpan.FromSeconds(2.5), options.ConfigPollInterval); }
public HttpStreamingConfigListenerFactory(ClusterOptions clusterOptions, IServiceProvider serviceProvider, ILogger <HttpStreamingConfigListener> logger) { _clusterOptions = clusterOptions ?? throw new ArgumentNullException(nameof(clusterOptions)); _serviceProvider = serviceProvider ?? throw new ArgumentNullException(nameof(serviceProvider)); _logger = logger ?? throw new ArgumentNullException(nameof(logger)); }
public RedisQueueAdapterFactory( string name, RedisStreamOptions options, IConnectionMultiplexerFactory connectionMultiplexerFactory, HashRingStreamQueueMapperOptions queueMapperOptions, SimpleQueueCacheOptions cacheOptions, IServiceProvider serviceProvider, IOptions <ClusterOptions> clusterOptions, IRedisDataAdapter dataAdapter, ILogger logger, ISerializationManager serializationManager) { if (string.IsNullOrEmpty(name)) { throw new ArgumentNullException(nameof(name)); } if (options == null) { throw new ArgumentNullException(nameof(options)); } if (connectionMultiplexerFactory == null) { throw new ArgumentNullException(nameof(connectionMultiplexerFactory)); } if (queueMapperOptions == null) { throw new ArgumentNullException(nameof(queueMapperOptions)); } if (cacheOptions == null) { throw new ArgumentNullException(nameof(cacheOptions)); } if (serviceProvider == null) { throw new ArgumentNullException(nameof(serviceProvider)); } if (clusterOptions == null) { throw new ArgumentNullException(nameof(clusterOptions)); } if (dataAdapter == null) { throw new ArgumentNullException(nameof(dataAdapter)); } if (serializationManager == null) { throw new ArgumentNullException(nameof(serializationManager)); } _providerName = name; _options = options; _connectionMultiplexerFactory = connectionMultiplexerFactory; _clusterOptions = clusterOptions.Value; _logger = logger != null?logger.ForContext <RedisQueueAdapterFactory>() : SilentLogger.Logger; _dataAdapter = dataAdapter; _streamQueueMapper = new HashRingBasedStreamQueueMapper(queueMapperOptions, _providerName); var microsoftLoggerFactory = serviceProvider.GetService <Microsoft.Extensions.Logging.ILoggerFactory>(); _adapterCache = new SimpleQueueAdapterCache(cacheOptions, _providerName, microsoftLoggerFactory); }
public async void StartElection() { try { var lastLogTerm = _nodeStorage.GetLastLogTerm(); if (_nodeStorage.CurrentTerm > lastLogTerm + 3) { Logger.LogInformation("Detected that the node is too far ahead of its last log (3), restarting election from the term of the last log term " + lastLogTerm); _nodeStorage.SetCurrentTerm(lastLogTerm + 1); } else { _nodeStorage.SetCurrentTerm(_nodeStorage.CurrentTerm + 1); } //Vote for yourself _nodeStorage.SetVotedFor(_nodeStorage.Id); var election = new Election(_loggerFactory.CreateLogger <Election>(), TimeSpan.FromMilliseconds(ClusterOptions.LatencyToleranceMs), ClusterOptions.GetClusterUrls().Where(url => url != NodeStateService.Url)); var collectedNodes = await election.CollectVotes( _nodeStorage.CurrentTerm, _nodeStorage.Id, _nodeStorage.GetLastLogIndex(), _nodeStorage.GetLastLogTerm()); if (collectedNodes.Count() >= ClusterOptions.MinimumNodes - 1) { Logger.LogInformation(NodeStateService.GetNodeLogId() + "Recieved enough votes to be promoted, promoting to leader. Registered nodes: " + (collectedNodes.Count() + 1) + " collection nodes " + ClusterOptions.MinimumNodes); StopTimer(_electionTimeoutTimer); SetNodeRole(NodeState.Leader); AddNodesToCluster(collectedNodes.Select(cn => new NodeInformation() { Id = cn.Key, TransportAddress = cn.Value, IsContactable = true, Name = "" })); } else { NodeStateService.CurrentLeader = null; _nodeStorage.SetVotedFor(null); SetNodeRole(NodeState.Follower); } } catch (Exception e) { Logger.LogError("Failed to run election with error " + e.StackTrace); } }
public async Task InitializeAsync() { // DNS-SRV if (ClusterOptions.IsValidDnsSrv()) { try { var dnsResolver = ServiceProvider.GetRequiredService <IDnsResolver>(); var bootstrapUri = ClusterOptions.ConnectionStringValue.GetDnsBootStrapUri(); var servers = (await dnsResolver.GetDnsSrvEntriesAsync(bootstrapUri)).ToList(); if (servers.Any()) { _logger.LogInformation($"Successfully retrieved DNS SRV entries: [{string.Join(",", servers)}]"); ClusterOptions.Servers(servers); } } catch (Exception exception) { _logger.LogInformation(exception, "Error trying to retrieve DNS SRV entries."); } } foreach (var server in ClusterOptions.ServersValue) { var bsEndpoint = server.GetIpEndPoint(ClusterOptions.KvPort, ClusterOptions.EnableIPV6Addressing); var node = await _clusterNodeFactory.CreateAndConnectAsync(bsEndpoint); node.BootstrapUri = server; GlobalConfig = await node.GetClusterMap(); if (GlobalConfig == null) //TODO NCBC-1966 xerror info is being hidden, so on failure this will not be null { AddNode(node); //GCCCP is not supported - pre-6.5 server fall back to CCCP like SDK 2 } else { GlobalConfig.IsGlobal = true; foreach (var nodeAdapter in GlobalConfig.GetNodes()) //Initialize cluster nodes for global services { if (server.Host.Equals(nodeAdapter.Hostname)) //this is the bootstrap node so update { node.BootstrapUri = server; node.NodesAdapter = nodeAdapter; node.BuildServiceUris(); SupportsCollections = node.Supports(ServerFeatures.Collections); AddNode(node); } else { var endpoint = nodeAdapter.GetIpEndPoint(ClusterOptions.EnableTls); if (endpoint.Port == 0) { endpoint.Port = 11210; } var newNode = await _clusterNodeFactory.CreateAndConnectAsync(endpoint); newNode.BootstrapUri = server; newNode.NodesAdapter = nodeAdapter; newNode.BuildServiceUris(); SupportsCollections = node.Supports(ServerFeatures.Collections); AddNode(newNode); } } } } }
public Silo(ILocalSiloDetails siloDetails, IServiceProvider services) { string name = siloDetails.Name; // Temporarily still require this. Hopefuly gone when 2.0 is released. this.siloDetails = siloDetails; this.SystemStatus = SystemStatus.Creating; AsynchAgent.IsStarting = true; // todo. use ISiloLifecycle instead? var startTime = DateTime.UtcNow; IOptions <ClusterMembershipOptions> clusterMembershipOptions = services.GetRequiredService <IOptions <ClusterMembershipOptions> >(); initTimeout = clusterMembershipOptions.Value.MaxJoinAttemptTime; if (Debugger.IsAttached) { initTimeout = StandardExtensions.Max(TimeSpan.FromMinutes(10), clusterMembershipOptions.Value.MaxJoinAttemptTime); stopTimeout = initTimeout; } var localEndpoint = this.siloDetails.SiloAddress.Endpoint; services.GetService <SerializationManager>().RegisterSerializers(services.GetService <IApplicationPartManager>()); this.Services = services; this.Services.InitializeSiloUnobservedExceptionsHandler(); //set PropagateActivityId flag from node config IOptions <SiloMessagingOptions> messagingOptions = services.GetRequiredService <IOptions <SiloMessagingOptions> >(); RequestContext.PropagateActivityId = messagingOptions.Value.PropagateActivityId; this.loggerFactory = this.Services.GetRequiredService <ILoggerFactory>(); logger = this.loggerFactory.CreateLogger <Silo>(); logger.Info(ErrorCode.SiloGcSetting, "Silo starting with GC settings: ServerGC={0} GCLatencyMode={1}", GCSettings.IsServerGC, Enum.GetName(typeof(GCLatencyMode), GCSettings.LatencyMode)); if (!GCSettings.IsServerGC) { logger.Warn(ErrorCode.SiloGcWarning, "Note: Silo not running with ServerGC turned on - recommend checking app config : <configuration>-<runtime>-<gcServer enabled=\"true\">"); logger.Warn(ErrorCode.SiloGcWarning, "Note: ServerGC only kicks in on multi-core systems (settings enabling ServerGC have no effect on single-core machines)."); } logger.Info(ErrorCode.SiloInitializing, "-------------- Initializing silo on host {0} MachineName {1} at {2}, gen {3} --------------", this.siloDetails.DnsHostName, Environment.MachineName, localEndpoint, this.siloDetails.SiloAddress.Generation); logger.Info(ErrorCode.SiloInitConfig, "Starting silo {0}", name); var siloMessagingOptions = this.Services.GetRequiredService <IOptions <SiloMessagingOptions> >(); BufferPool.InitGlobalBufferPool(siloMessagingOptions.Value); try { grainFactory = Services.GetRequiredService <GrainFactory>(); } catch (InvalidOperationException exc) { logger.Error(ErrorCode.SiloStartError, "Exception during Silo.Start, GrainFactory was not registered in Dependency Injection container", exc); throw; } // Performance metrics siloStatistics = Services.GetRequiredService <SiloStatisticsManager>(); // The scheduler scheduler = Services.GetRequiredService <OrleansTaskScheduler>(); healthCheckParticipants.Add(scheduler); runtimeClient = Services.GetRequiredService <InsideRuntimeClient>(); // Initialize the message center messageCenter = Services.GetRequiredService <MessageCenter>(); var dispatcher = this.Services.GetRequiredService <Dispatcher>(); messageCenter.RerouteHandler = dispatcher.RerouteMessage; messageCenter.SniffIncomingMessage = runtimeClient.SniffIncomingMessage; // Now the router/directory service // This has to come after the message center //; note that it then gets injected back into the message center.; localGrainDirectory = Services.GetRequiredService <LocalGrainDirectory>(); // Now the activation directory. activationDirectory = Services.GetRequiredService <ActivationDirectory>(); // Now the consistent ring provider RingProvider = Services.GetRequiredService <IConsistentRingProvider>(); catalog = Services.GetRequiredService <Catalog>(); executorService = Services.GetRequiredService <ExecutorService>(); // Now the incoming message agents var messageFactory = this.Services.GetRequiredService <MessageFactory>(); incomingSystemAgent = new IncomingMessageAgent(Message.Categories.System, messageCenter, activationDirectory, scheduler, catalog.Dispatcher, messageFactory, executorService, this.loggerFactory); incomingPingAgent = new IncomingMessageAgent(Message.Categories.Ping, messageCenter, activationDirectory, scheduler, catalog.Dispatcher, messageFactory, executorService, this.loggerFactory); incomingAgent = new IncomingMessageAgent(Message.Categories.Application, messageCenter, activationDirectory, scheduler, catalog.Dispatcher, messageFactory, executorService, this.loggerFactory); siloStatusOracle = Services.GetRequiredService <ISiloStatusOracle>(); this.membershipService = Services.GetRequiredService <IMembershipService>(); this.clusterOptions = Services.GetRequiredService <IOptions <ClusterOptions> >().Value; var multiClusterOptions = Services.GetRequiredService <IOptions <MultiClusterOptions> >().Value; if (!multiClusterOptions.HasMultiClusterNetwork) { logger.Info("Skip multicluster oracle creation (no multicluster network configured)"); } else { multiClusterOracle = Services.GetRequiredService <IMultiClusterOracle>(); } this.SystemStatus = SystemStatus.Created; AsynchAgent.IsStarting = false; StringValueStatistic.FindOrCreate(StatisticNames.SILO_START_TIME, () => LogFormatter.PrintDate(startTime)); // this will help troubleshoot production deployment when looking at MDS logs. this.siloLifecycle = this.Services.GetRequiredService <ISiloLifecycleSubject>(); // register all lifecycle participants IEnumerable <ILifecycleParticipant <ISiloLifecycle> > lifecycleParticipants = this.Services.GetServices <ILifecycleParticipant <ISiloLifecycle> >(); foreach (ILifecycleParticipant <ISiloLifecycle> participant in lifecycleParticipants) { participant?.Participate(this.siloLifecycle); } // register all named lifecycle participants IKeyedServiceCollection <string, ILifecycleParticipant <ISiloLifecycle> > namedLifecycleParticipantCollection = this.Services.GetService <IKeyedServiceCollection <string, ILifecycleParticipant <ISiloLifecycle> > >(); foreach (ILifecycleParticipant <ISiloLifecycle> participant in namedLifecycleParticipantCollection ?.GetServices(this.Services) ?.Select(s => s.GetService(this.Services))) { participant?.Participate(this.siloLifecycle); } // add self to lifecycle this.Participate(this.siloLifecycle); logger.Info(ErrorCode.SiloInitializingFinished, "-------------- Started silo {0}, ConsistentHashCode {1:X} --------------", SiloAddress.ToLongString(), SiloAddress.GetConsistentHashCode()); }
public static int GetKeyValuePort(this Node node, ClusterOptions clusterOptions) { return(clusterOptions.EnableTls ? node.Ports.Direct : node.Ports.SslDirect); }
public SearchClient(HttpClient httpClient, IDataMapper dataMapper, ClusterOptions clusterOptions) : base(httpClient, dataMapper, clusterOptions) { }
public ConfigContext(ClusterOptions clusterOptions) { _clusterOptions = clusterOptions; _httpClient = new CouchbaseHttpClient(_clusterOptions); }
/// <summary> /// <see cref="DualParallelDispatcherRemoteNode{TInput1,TInput2,TOutput1,TOutput2}"/> /// </summary> /// <param name="persistentCache">Persistent cache to avoid dropped data on system crash</param> /// <param name="progress">Progress of the current bulk</param> /// <param name="host"><see cref="Host"/></param> /// <param name="cts"><see cref="CancellationTokenSource"/></param> /// <param name="circuitBreakerOptions"><see cref="CircuitBreakerOptions"/></param> /// <param name="clusterOptions"><see cref="ClusterOptions"/></param> /// <param name="logger"><see cref="ILogger"/></param> public DualParallelDispatcherRemoteNode( IAppCache persistentCache, IProgress <double> progress, Host host, CancellationTokenSource cts, CircuitBreakerOptions circuitBreakerOptions, ClusterOptions clusterOptions, ILogger logger) : base( Policy.Handle <Exception>() .AdvancedCircuitBreakerAsync(circuitBreakerOptions.CircuitBreakerFailureThreshold, circuitBreakerOptions.CircuitBreakerSamplingDuration, circuitBreakerOptions.CircuitBreakerMinimumThroughput, circuitBreakerOptions.CircuitBreakerDurationOfBreak, onBreak: (ex, timespan, context) => { logger.LogError( $"Batch processor breaker: Breaking the circuit for {timespan.TotalMilliseconds}ms due to {ex.Message}."); }, onReset: context => { logger.LogInformation( "Batch processor breaker: Succeeded, closed the circuit."); }, onHalfOpen: () => { logger.LogWarning( "Batch processor breaker: Half-open, next call is a trial."); }), clusterOptions, progress, cts, logger) { _logger = logger; _clusterOptions = clusterOptions; ISubject <LinkedItem <TInput1> > item1DispatcherSubject = new Subject <LinkedItem <TInput1> >(); _item1SynchronizedDispatcherSubject = Subject.Synchronize(item1DispatcherSubject); _item1SynchronizedDispatcherSubjectSubscription = _item1SynchronizedDispatcherSubject .ObserveOn(new EventLoopScheduler(ts => new Thread(ts))) .Select(item => { return(Observable.FromAsync(() => persistentCache.AddItem1Async(item.Key.ToString(), item.Entity, item.CancellationTokenSource.Token))); }) .Merge() .Subscribe(); ISubject <LinkedItem <TInput2> > item2DispatcherSubject = new Subject <LinkedItem <TInput2> >(); _item2SynchronizedDispatcherSubject = Subject.Synchronize(item2DispatcherSubject); _item2SynchronizedDispatcherSubjectSubscription = _item2SynchronizedDispatcherSubject .ObserveOn(new EventLoopScheduler(ts => new Thread(ts))) .Select(item => { return(Observable.FromAsync(() => persistentCache.AddItem2Async(item.Key.ToString(), item.Entity, item.CancellationTokenSource.Token))); }) .Merge() .Subscribe(); _channel = new Channel(host.MachineName, host.Port, ChannelCredentials.Insecure); _remoteContract = MagicOnionClient.Create <IRemoteContract <TOutput1, TOutput2> >(_channel); _item1RemoteContract = MagicOnionClient.Create <IOutputItem1RemoteContract <TInput1, TOutput1> >(_channel); _item2RemoteContract = MagicOnionClient.Create <IOutputItem2RemoteContract <TInput2, TOutput2> >(_channel); IRemoteNodeSubject nodeReceiver = new NodeReceiver(_logger); _remoteNodeHealthSubscription = nodeReceiver.RemoteNodeHealthSubject.Subscribe(remoteNodeHealth => { NodeMetrics.RemoteNodeHealth = remoteNodeHealth; }); _nodeHub = StreamingHubClient.Connect <INodeHub, INodeReceiver>(_channel, (INodeReceiver)nodeReceiver); NodeMetrics = new NodeMetrics(Guid.NewGuid()); var item1ProcessSource = new ConcurrentDictionary <Guid, TOutput1>(); var item2ProcessSource = new ConcurrentDictionary <Guid, TOutput2>(); var joinBlock = new JoinBlock <KeyValuePair <Guid, CancellationTokenSource>, KeyValuePair <Guid, CancellationTokenSource> >( new GroupingDataflowBlockOptions { Greedy = false }); _item1Source = new TransformBlock <Tuple <Guid, TOutput1, CancellationTokenSource>, KeyValuePair <Guid, CancellationTokenSource> >(source => { if (!item1ProcessSource.ContainsKey(source.Item1) && !item1ProcessSource.TryAdd(source.Item1, source.Item2)) { _logger.LogError( $"Could not add item of type {source.Item2.GetType()} and key {source.Item1.ToString()} to the buffer."); } return(new KeyValuePair <Guid, CancellationTokenSource>(source.Item1, source.Item3)); }); _item2Source = new TransformBlock <Tuple <Guid, TOutput2, CancellationTokenSource>, KeyValuePair <Guid, CancellationTokenSource> >( source => { if (!item2ProcessSource.ContainsKey(source.Item1) && !item2ProcessSource.TryAdd(source.Item1, source.Item2)) { _logger.LogError( $"Could not add item of type {source.Item2.GetType()} and key {source.Item1.ToString()} to the buffer."); } return(new KeyValuePair <Guid, CancellationTokenSource>(source.Item1, source.Item3)); }); var processBlock = new ActionBlock <Tuple <KeyValuePair <Guid, CancellationTokenSource>, KeyValuePair <Guid, CancellationTokenSource> > >( async combined => { var policy = Policy .Handle <Exception>(ex => !(ex is TaskCanceledException || ex is OperationCanceledException)) .WaitAndRetryAsync(_clusterOptions.RetryAttempt, retryAttempt => TimeSpan.FromSeconds(Math.Pow(2, retryAttempt)), (exception, sleepDuration, retry, context) => { if (retry >= _clusterOptions.RetryAttempt) { _logger.LogError( $"Could not process item after {retry} retry times: {exception.Message}"); } }); var policyResult = await policy.ExecuteAndCaptureAsync(async ct => { try { if (CpuUsage > _clusterOptions.LimitCpuUsage) { var suspensionTime = (CpuUsage - _clusterOptions.LimitCpuUsage) / CpuUsage * 100; await Task.Delay((int)suspensionTime, ct); } if (item1ProcessSource.ContainsKey(combined.Item1.Key) && item2ProcessSource.ContainsKey(combined.Item2.Key) && item1ProcessSource.TryGetValue(combined.Item1.Key, out var item1) && item2ProcessSource.TryGetValue(combined.Item2.Key, out var item2)) { await _remoteContract.ProcessRemotely(item1, item2, NodeMetrics); combined.Item1.Value.Cancel(); combined.Item2.Value.Cancel(); } } catch (Exception ex) when(ex is TaskCanceledException || ex is OperationCanceledException) { _logger.LogTrace("The item process has been cancelled."); } }, cts.Token).ConfigureAwait(false); if (policyResult.Outcome == OutcomeType.Failure) { _logger.LogCritical( policyResult.FinalException != null ? $"Could not process item: {policyResult.FinalException.Message}." : "An error has occured while processing the item."); } if (!item1ProcessSource.TryRemove(combined.Item1.Key, out _)) { _logger.LogWarning( $"Could not remove item of key {combined.Item1.ToString()} from the buffer."); } if (!item2ProcessSource.TryRemove(combined.Item2.Key, out _)) { _logger.LogWarning( $"Could not remove item of key {combined.Item2.ToString()} from the buffer."); } }); var options = new DataflowLinkOptions { PropagateCompletion = true }; _item1Source.LinkTo(joinBlock.Target1, options); _item2Source.LinkTo(joinBlock.Target2, options); joinBlock.LinkTo(processBlock, options); }
//private static readonly ILog Log = LogManager.GetLogger<SearchClient>(); //for log redaction //private Func<object, string> User = RedactableArgument.UserAction; public SearchClient(ClusterOptions clusterOptions) : this( new HttpClient(new AuthenticatingHttpClientHandler(clusterOptions.UserName, clusterOptions.Password)), new SearchDataMapper(), clusterOptions) { }
public CommitRow(ClusterOptions clusterOptions, long firstLSN) { // All entities are in the same partition for atomic read/writes. PartitionKey = ArchivalRow.MakePartitionKey(clusterOptions.ServiceId); RowKey = MakeRowKey(firstLSN); }
public void X509Certificate_Is_Null_By_Default() { var options = new ClusterOptions(); Assert.Null(options.X509CertificateFactory); }
public void Test_EnableConfigPolling_Default_Is_True() { var options = new ClusterOptions(); Assert.True(options.EnableConfigPolling); }
public BucketManager(ClusterOptions clusterOptions) : this(clusterOptions, new HttpClient(new AuthenticatingHttpClientHandler(clusterOptions.UserName, clusterOptions.Password))) { }
public IpEndPointService(IDnsResolver dnsResolver, ClusterOptions clusterOptions) { _dnsResolver = dnsResolver ?? throw new ArgumentNullException(nameof(dnsResolver)); _clusterOptions = clusterOptions ?? throw new ArgumentNullException(nameof(clusterOptions)); }
public BucketManager(ClusterOptions clusterOptions, HttpClient client) { _clusterOptions = clusterOptions; _client = client; }
public FakeBucket(string name, ClusterOptions clusterOptions) { Name = name; _clusterOptions = clusterOptions; }
public ActivityRequestTracer(ILoggerFactory loggerFactory, ClusterOptions options) { _loggerFactory = loggerFactory; _diagnosticListener = new DiagnosticListener(RequestTracing.SourceName); _diagnosticListener.Subscribe(new ThresholdActivityObserver(loggerFactory, options.ThresholdOptions ?? new ThresholdOptions())); }
public ClusterNodeProvider(IConnectionMultiplexer multiplexer, RedisOptions redisOptions, ClusterOptions clusterOptions) { _redisOptions = redisOptions; _multiplexer = multiplexer; _db = multiplexer.GetDatabase(_redisOptions.Database); _clusterOptions = clusterOptions; }
public RaftService( ILoggerFactory logger, IOptions <ClusterOptions> clusterOptions, IOptions <NodeOptions> nodeOptions, IClusterConnectionPool clusterConnectionPool, INodeStorage <State> nodeStorage, IStateMachine <State> stateMachine, NodeStateService nodeStateService, ClusterClient clusterClient ) : base(logger.CreateLogger <RaftService <State> >(), clusterOptions.Value, nodeOptions.Value, stateMachine, nodeStateService) { _nodeStorage = nodeStorage; _loggerFactory = logger; //Bootstrap the node _snapshotService = new Snapshotter <State>(logger.CreateLogger <Snapshotter <State> >(), nodeStorage, stateMachine, nodeStateService); _bootstrapService = new Bootstrapper <State>(logger.CreateLogger <Bootstrapper <State> >(), clusterOptions.Value, nodeOptions.Value, nodeStorage, StateMachine, NodeStateService); _commitService = new CommitService <State>(logger.CreateLogger <CommitService <State> >(), clusterOptions.Value, nodeOptions.Value, nodeStorage, StateMachine, NodeStateService); _discovery = new Discovery(logger.CreateLogger <Discovery>()); _clusterClient = clusterClient; _clusterConnectionPool = clusterConnectionPool; NodeStateService.Id = _nodeStorage.Id; _electionTimeoutTimer = new Timer(ElectionTimeoutEventHandler); _heartbeatTimer = new Timer(HeartbeatTimeoutEventHandler); if (!ClusterOptions.TestMode) { _bootstrapTask = Task.Run(async() => { //Wait for the rest of the node to bootup Logger.LogInformation("Starting bootstrap..."); Thread.Sleep(3000); nodeStateService.Url = await _bootstrapService.GetMyUrl(ClusterOptions.GetClusterUrls(), TimeSpan.FromMilliseconds(ClusterOptions.LatencyToleranceMs)); NodeStateService.IsBootstrapped = true; SetNodeRole(NodeState.Follower); }); } else { Logger.LogInformation("Running in test mode..."); SetNodeRole(NodeState.Leader); NodeStateService.IsBootstrapped = true; Handle(new ExecuteCommands() { Commands = new List <BaseCommand>() { { new UpsertNodeInformation() { Id = NodeStateService.Id, Name = "", TransportAddress = "https://localhost:5021", IsContactable = true } } } }).GetAwaiter().GetResult(); } }
/// <summary> /// Seam for injecting mock. /// </summary> protected virtual Task <ICluster> CreateClusterAsync(ClusterOptions clusterOptions) { clusterOptions.WithLogging(_loggerFactory); return(Cluster.ConnectAsync(clusterOptions)); }
public static void Configure(this ClusterOptions options) { options.ClusterId = Constants.OrleansClusterId; options.ServiceId = Constants.OrleansClusterId; }
public ClusterIconGeneratorHandler(ClusterOptions options) { iconCache = new NSCache(); this.options = options; }
public ClusterContext(CancellationTokenSource tokenSource, ClusterOptions options) : this(null, tokenSource, options) { }
public ConnectionFactory(ClusterOptions clusterOptions, ILogger <MultiplexingConnection> multiplexLogger, ILogger <SslConnection> sslLogger) { _clusterOptions = clusterOptions ?? throw new ArgumentNullException(nameof(clusterOptions)); _multiplexLogger = multiplexLogger ?? throw new ArgumentNullException(nameof(multiplexLogger)); _sslLogger = sslLogger ?? throw new ArgumentNullException(nameof(sslLogger)); }
public KetamaKeyMapperFactory(ClusterOptions clusterOptions, ILogger <KetamaKeyMapperFactory> logger) { _clusterOptions = clusterOptions ?? throw new ArgumentNullException(nameof(clusterOptions)); _logger = logger ?? throw new ArgumentNullException(nameof(logger)); }
/// <summary> /// Seam for injecting mock. /// </summary> protected virtual Task <ICluster> CreateClusterAsync(ClusterOptions clusterOptions) { return(Cluster.ConnectAsync(clusterOptions)); }
public Redactor(ClusterOptions options) { RedactionLevel = options.RedactionLevel; }
public void KvSendQueueCapacity_Defaults_To_1024() { var options = new ClusterOptions(); Assert.Equal(1024u, options.KvSendQueueCapacity); }