public GetAllUsersUICommand(MainWindowViewModel mainWindowViewModel, CommandExecutor commandExecutor, QueryExecutor queryExecutor, RequestExecutor requestExecutor) : base(commandExecutor, queryExecutor, requestExecutor) { Argument.IsNotNull(mainWindowViewModel, "mainWindowViewModel"); _mainWindowViewModel = mainWindowViewModel; }
protected BaseUICommand(CommandExecutor commandExecutor, QueryExecutor queryExecutor, RequestExecutor requestExecutor) { Argument.IsNotNull(commandExecutor, "commandExecutor"); Argument.IsNotNull(queryExecutor, "queryExecutor"); Argument.IsNotNull(requestExecutor, "requestExecutor"); CommandExecutor = commandExecutor; QueryExecutor = queryExecutor; RequestExecutor = requestExecutor; }
public GraphQLController(RequestExecutor executor, Swapi api, StarWarsSchema schema) { Schema = schema; _api = api; _executor = executor; }
internal TransactionManager(RequestExecutor requestExecutor) { _requestExecutor = requestExecutor; }
/// <summary> /// Initializes a new instance of the <see cref="DocumentSession"/> class. /// </summary> public DocumentSession(string dbName, DocumentStore documentStore, Guid id, RequestExecutor requestExecutor) : base(dbName, documentStore, requestExecutor, id) { Attachments = new DocumentSessionAttachments(this); Revisions = new DocumentSessionRevisions(this); }
public async Task ReplicateRevision_WhenSourceDataFromExportAndDocDeleted_ShouldNotRecreateTheDoc() { var exportFile = GetTempFileName(); var settings = new Dictionary <string, string>() { [RavenConfiguration.GetKey(x => x.Cluster.OperationTimeout)] = "120", }; var(nodes, leader) = await CreateRaftCluster(2, customSettings : settings, watcherCluster : true); var nodeTags = nodes.Select(n => n.ServerStore.NodeTag).ToArray(); using (var store = GetDocumentStore(new Options { Server = leader, ReplicationFactor = 1 })) { await store.Maintenance.SendAsync(new ConfigureRevisionsOperation(new RevisionsConfiguration { Default = new RevisionsCollectionConfiguration() })); var firstNode = await AssertWaitForNotNullAsync(async() => (await store.Maintenance.Server.SendAsync(new GetDatabaseRecordOperation(store.Database))).Topology.Members?.FirstOrDefault()); var entity = new User(); using (var session = store.OpenAsyncSession()) { //Add first revision with first node tag await session.StoreAsync(entity); await session.SaveChangesAsync(); } await store.Maintenance.Server.SendAsync(new AddDatabaseNodeOperation(store.Database)); await WaitAndAssertForValueAsync( async() => (await store.Maintenance.Server.SendAsync(new GetDatabaseRecordOperation(store.Database))).Topology.Members?.Count, 2); await store.Maintenance.Server.SendAsync(new DeleteDatabasesOperation(store.Database, true, nodeTags.First(n => n == firstNode))); await WaitAndAssertForValueAsync( async() => (await store.Maintenance.Server.SendAsync(new GetDatabaseRecordOperation(store.Database))).Topology.Members?.Count, 1); await WaitAndAssertForValueAsync(async() => { var dbRecord = await store.Maintenance.Server.SendAsync(new GetDatabaseRecordOperation(store.Database)); return(dbRecord?.DeletionInProgress == null || dbRecord.DeletionInProgress.Count == 0); }, true); await store.GetRequestExecutor().UpdateTopologyAsync(new RequestExecutor.UpdateTopologyParameters(new ServerNode { Url = store.Urls.First(), Database = store.Database })); using (var session = store.OpenAsyncSession()) { //Add update revision with second node tag entity.Name = "Changed"; await session.StoreAsync(entity); await session.SaveChangesAsync(); // Add delete revision with second node tag session.Delete(entity.Id); await session.SaveChangesAsync(); } var operation = await store.Smuggler.ExportAsync(new DatabaseSmugglerExportOptions(), exportFile); await operation.WaitForCompletionAsync(); } using (var store = GetDocumentStore(new Options { Server = leader, ReplicationFactor = 1 })) { var srcTag = await AssertWaitForNotNullAsync(async() => (await store.Maintenance.Server.SendAsync(new GetDatabaseRecordOperation(store.Database))).Topology.Members.FirstOrDefault()); var src = nodes.First(n => n.ServerStore.NodeTag == srcTag); var dest = nodes.First(n => n.ServerStore.NodeTag != srcTag); var operation = await store.Smuggler.ImportAsync(new DatabaseSmugglerImportOptions(), exportFile); await operation.WaitForCompletionAsync(); using (var session = store.OpenAsyncSession()) { WaitForIndexing(store, store.Database, nodeTag: src.ServerStore.NodeTag); var firstNodeDocs = await session.Query <User>().ToArrayAsync(); Assert.Equal(0, firstNodeDocs.Length); } var result = await store.Maintenance.Server.SendAsync(new AddDatabaseNodeOperation(store.Database)); await WaitAndAssertForValueAsync(async() => (await store.Maintenance.Server.SendAsync(new GetDatabaseRecordOperation(store.Database))).Topology.Members?.Count, 2); await store.GetRequestExecutor().UpdateTopologyAsync(new RequestExecutor.UpdateTopologyParameters(new ServerNode { Url = store.Urls.First(), Database = store.Database })); using var re = RequestExecutor.CreateForSingleNodeWithConfigurationUpdates(dest.WebUrl, store.Database, null, store.Conventions); using (var secondSession = store.OpenAsyncSession(new SessionOptions { RequestExecutor = re })) { WaitForIndexing(store, store.Database, nodeTag: dest.ServerStore.NodeTag); var secondNodeDocs = await secondSession.Query <User>().ToArrayAsync(); Assert.Equal(0, secondNodeDocs.Length); } } }
public RavenEtl(Transformation transformation, RavenEtlConfiguration configuration, DocumentDatabase database, ServerStore serverStore) : base(transformation, configuration, database, serverStore, RavenEtlTag) { Metrics = new EtlMetricsCountersManager(); _requestExecutor = RequestExecutor.Create(configuration.Connection.TopologyDiscoveryUrls, configuration.Connection.Database, serverStore.Server.Certificate.Certificate, DocumentConventions.Default); _script = new RavenEtlDocumentTransformer.ScriptInput(transformation); }
public static async Task Throw(JsonOperationContext context, HttpResponseMessage response, Action <StringBuilder> additionalErrorInfo = null) { if (response == null) { throw new ArgumentNullException(nameof(response)); } using (var stream = await RequestExecutor.ReadAsStreamUncompressedAsync(response).ConfigureAwait(false)) using (var json = await GetJson(context, response, stream).ConfigureAwait(false)) { var schema = GetExceptionSchema(response, json); if (response.StatusCode == HttpStatusCode.Conflict) { ThrowConflict(schema, json); return; } var type = GetType(schema.Type); if (type == null) { throw RavenException.Generic(schema.Error, json); } Exception exception; try { string message; if (additionalErrorInfo != null) { var sb = new StringBuilder(schema.Error); additionalErrorInfo(sb); message = sb.ToString(); } else { message = schema.Error; } exception = (Exception)Activator.CreateInstance(type, BindingFlags.Instance | BindingFlags.NonPublic | BindingFlags.Public, null, new[] { message }, null, null); } catch (Exception) { throw RavenException.Generic(schema.Error, json); } if (typeof(RavenException).IsAssignableFrom(type) == false) { throw new RavenException(schema.Error, exception); } if (type == typeof(IndexCompilationException)) { var indexCompilationException = (IndexCompilationException)exception; json.TryGet(nameof(IndexCompilationException.IndexDefinitionProperty), out indexCompilationException.IndexDefinitionProperty); json.TryGet(nameof(IndexCompilationException.ProblematicText), out indexCompilationException.ProblematicText); throw indexCompilationException; } throw exception; } }
public Operation(RequestExecutor requestExecutor, Func <IDatabaseChanges> changes, DocumentConventions conventions, long id, string nodeTag = null) : this(requestExecutor, changes, conventions, id, nodeTag : nodeTag, additionalTask : null) { }
public OperationExecutor(DocumentStoreBase store, string databaseName = null) { _store = store; _databaseName = databaseName ?? store.Database; _requestExecutor = store.GetRequestExecutor(databaseName); }
internal SharpBucket(string baseUrl, RequestExecutor requestExecutor) { this.BaseUrl = baseUrl; this.RequestExecutor = requestExecutor; NoAuthentication(); }
public async Task EvaluateAnswer(long answerId) { var answer = await _context.Answers.FirstOrDefaultAsync(x => x.AnswerId == answerId); if (answer == null) { throw new Exception("Answer not found"); } var data = new DataTable("Define the disease"); var questions = (await GetQuestions()).ToArray(); var questionsLength = questions.Length; var answers = await GetAnswers(); foreach (var question in questions) { data.Columns.Add(new DataColumn(question.Text, typeof(string))); } data.Columns.Add(new DataColumn("Age category", typeof(byte))); data.Columns.Add(new DataColumn("Gender", typeof(bool))); data.Columns.Add(new DataColumn("Diagnosed Disease", typeof(string))); foreach (var trainningAnswer in answers) { var answerArr = trainningAnswer.AnswerData.Split(';'); if (answerArr.Length != questionsLength) { throw new Exception("Answers count must be equals questions count"); } var patient = data.NewRow(); for (var i = 0; i < answerArr.Length; i++) { patient[questions[i].Text] = answerArr[i]; } var userResponse = await RequestExecutor.ExecuteRequestAsync( MicroservicesEnum.User, RequestUrl.GetPatientById, new Parameter[] { new Parameter("patientId", (int)trainningAnswer.PatientId.Value, ParameterType.GetOrPost) }); var patientData = JsonConvert.DeserializeObject <MksResponse>(userResponse); if (!patientData.Success) { throw new Exception(patientData.Data); } var patientCtx = JsonConvert.DeserializeObject <Patients>(patientData.Data); patient["Age category"] = (byte)new AgeLimit((byte)Math.Round((DateTime.UtcNow - patientCtx.DateBirth).TotalDays / 365.2425)).Limit; patient["Gender"] = patientCtx.Gender; var diseaseResponseName = await RequestExecutor.ExecuteRequestAsync( MicroservicesEnum.Medical, RequestUrl.GetDiseaseNameById, new Parameter[] { new Parameter("diseaseId", trainningAnswer.DeseaseId.Value, ParameterType.GetOrPost) }); var diseaseNameResponse = JsonConvert.DeserializeObject <MksResponse>(diseaseResponseName); if (!diseaseNameResponse.Success) { throw new Exception(diseaseNameResponse.Data); } patient["Diagnosed Disease"] = JsonConvert.DeserializeObject <string>(diseaseNameResponse.Data); data.Rows.Add(patient); } var codification = new Codification(data); var codifiedData = codification.Apply(data); int[][] input = codifiedData.ToJagged <int>(questions.Select(x => x.Text).ToArray()); int[] predictions = codifiedData.ToArray <int>("Diagnosed Disease"); var decisionTreeLearningAlgorithm = new ID3Learning { }; var decisionTree = decisionTreeLearningAlgorithm.Learn(input, predictions); var answerArray = answer.AnswerData.Split(';'); if (answerArray.Length != questionsLength) { throw new Exception("Answers count must be equals questions count"); } var inputValues = new string[questions.Length, 2]; for (var i = 0; i < answerArray.Length; i++) { inputValues[i, 0] = questions[i].Text; inputValues[i, 1] = answerArray[i]; } var query = codification.Transform(inputValues); var result = decisionTree.Decide(query); var diagnosis = codification.Revert("Diagnosed Disease", result); var diseaseIdResponse = await RequestExecutor.ExecuteRequestAsync( MicroservicesEnum.Medical, RequestUrl.GetDiseaseIdByName, new Parameter[] { new Parameter("name", diagnosis, ParameterType.GetOrPost) }); var diseaseResponseId = JsonConvert.DeserializeObject <MksResponse>(diseaseIdResponse); if (!diseaseResponseId.Success) { throw new Exception(diseaseResponseId.Data); } answer.DeseaseId = long.Parse(diseaseResponseId.Data); await _context.SaveChangesAsync(); }
private static RequestExecutor CreateNewRequestExecutor(RavenEtlConfiguration configuration, ServerStore serverStore) { return(RequestExecutor.Create(configuration.Connection.TopologyDiscoveryUrls, configuration.Connection.Database, serverStore.Server.Certificate.Certificate, DocumentConventions.Default)); }
internal EnvironmentManager(RequestExecutor requestExecutor) { _requestExecutor = requestExecutor; }
public IHttpActionResult ExecuteRequest([FromBody] Request request) { return(Ok(RequestExecutor.Execute(request))); }
public ServerWideOperation(RequestExecutor requestExecutor, DocumentConventions conventions, long id) : base(requestExecutor, null, conventions, id) { _work = true; }
internal RequestHandlerFake(Method method, string path, List <IParameterResolver> parameterResolvers) : base(method, path, parameterResolvers) { _executor = (request, mediaTypeMapper1, errorHandler1, logger1) => RequestExecutor.ExecuteRequest(() => Completes.WithSuccess(Response.Of(Response.ResponseStatus.Ok)), errorHandler1, logger1); }
public RequestHandler8 <T, TR, TU, TI, TJ, TK, TL, TM> Handle(Handler8 handler) { _executor = (request, param1, param2, param3, param4, param5, param6, param7, param8, mediaTypeMapper1, errorHandler1, logger1) => RequestExecutor.ExecuteRequest(() => handler.Invoke(param1, param2, param3, param4, param5, param6, param7, param8), errorHandler1, logger1); return(this); }
internal RequestHandlerFake(Method method, string path, List <IParameterResolver> parameterResolvers, RequestHandler0.Handler0 handler) : base(method, path, parameterResolvers) { _executor = (request, mediaTypeMapper1, errorHandler1, logger1) => RequestExecutor.ExecuteRequest(handler.Invoke, errorHandler1, logger1); }
public BulkInsertOperation(string database, IDocumentStore store, CancellationToken token = default) { _disposeOnce = new DisposeOnceAsync <SingleAttempt>(async() => { try { if (_streamExposerContent.IsDone) { return; } Exception flushEx = null; if (_stream != null) { try { _currentWriter.Write(']'); _currentWriter.Flush(); await _asyncWrite.ConfigureAwait(false); ((MemoryStream)_currentWriter.BaseStream).TryGetBuffer(out var buffer); await _requestBodyStream.WriteAsync(buffer.Array, buffer.Offset, buffer.Count, _token).ConfigureAwait(false); _compressedStream?.Dispose(); await _stream.FlushAsync(_token).ConfigureAwait(false); } catch (Exception e) { flushEx = e; } } _streamExposerContent.Done(); if (_operationId == -1) { // closing without calling a single store. return; } if (_bulkInsertExecuteTask != null) { try { await _bulkInsertExecuteTask.ConfigureAwait(false); } catch (Exception e) { await ThrowBulkInsertAborted(e, flushEx).ConfigureAwait(false); } } } finally { _streamExposerContent?.Dispose(); _resetContext.Dispose(); } }); _token = token; _conventions = store.Conventions; if (string.IsNullOrWhiteSpace(database)) { ThrowNoDatabase(); } _requestExecutor = store.GetRequestExecutor(database); _resetContext = _requestExecutor.ContextPool.AllocateOperationContext(out _context); _currentWriter = new StreamWriter(new MemoryStream()); _backgroundWriter = new StreamWriter(new MemoryStream()); _streamExposerContent = new StreamExposerContent(); _defaultSerializer = _requestExecutor.Conventions.CreateSerializer(); _customEntitySerializer = _requestExecutor.Conventions.BulkInsert.TrySerializeEntityToJsonStream; _generateEntityIdOnTheClient = new GenerateEntityIdOnTheClient(_requestExecutor.Conventions, entity => AsyncHelpers.RunSync(() => _requestExecutor.Conventions.GenerateDocumentIdAsync(database, entity))); }
/// <summary> /// Осуществляет выход пользователя из системы. /// </summary> public async void SignOutAsync() { var requestUri = BuildRequestUri("/Auth/SignOut"); await RequestExecutor.PostAsync <DynamicWrapper>(requestUri); }
public async Task ReplicateRevision_WhenSourceDataFromIncrementalBackupAndDocDeleted_ShouldNotRecreateTheDoc() { var backupPath = NewDataPath(suffix: "BackupFolder", forceCreateDir: true); var(nodes, leader) = await CreateRaftCluster(2, watcherCluster : true); using (var store = GetDocumentStore(new Options { Server = leader, ReplicationFactor = 2 })) { await store.Maintenance.SendAsync(new ConfigureRevisionsOperation(new RevisionsConfiguration { Default = new RevisionsCollectionConfiguration() })); var firstNodeTag = await AssertWaitForNotNullAsync(async() => (await store.Maintenance.Server.SendAsync(new GetDatabaseRecordOperation(store.Database))).Topology.Members?.FirstOrDefault()); var firstNode = nodes.First(n => n.ServerStore.NodeTag == firstNodeTag); var secondNode = nodes.First(n => n.ServerStore.NodeTag != firstNodeTag); var entity = new User(); using (var re = RequestExecutor.CreateForSingleNodeWithConfigurationUpdates(firstNode.WebUrl, store.Database, null, store.Conventions)) using (var session = store.OpenAsyncSession(new SessionOptions { RequestExecutor = re })) { //Add first revision with first node tag await session.StoreAsync(entity); await session.SaveChangesAsync(); } var config = Backup.CreateBackupConfiguration(backupPath, incrementalBackupFrequency: "0 * * * *", mentorNode: secondNode.ServerStore.NodeTag); var backupTaskId = await Backup.CreateAndRunBackupInClusterAsync(config, store, isFullBackup : true); await store.Maintenance.Server.SendAsync(new DeleteDatabasesOperation(store.Database, true, firstNodeTag)); await WaitAndAssertForValueAsync(async() => { var dbRecord = await store.Maintenance.Server.SendAsync(new GetDatabaseRecordOperation(store.Database)); return(dbRecord?.DeletionInProgress == null || dbRecord.DeletionInProgress.Count == 0); }, true); using (var session = store.OpenAsyncSession()) { //Add update revision with second node tag entity.Name = "Changed"; await session.StoreAsync(entity); await session.SaveChangesAsync(); // Add delete revision with second node tag session.Delete(entity.Id); await session.SaveChangesAsync(); } await Backup.RunBackupInClusterAsync(store, backupTaskId, isFullBackup : false); } using (var store = GetDocumentStore(new Options { Server = leader, ReplicationFactor = 1 })) { var srcTag = await AssertWaitForNotNullAsync(async() => (await store.Maintenance.Server.SendAsync(new GetDatabaseRecordOperation(store.Database))).Topology.Members.FirstOrDefault()); var src = nodes.First(n => n.ServerStore.NodeTag == srcTag); var dest = nodes.First(n => n.ServerStore.NodeTag != srcTag); await store.Smuggler.ImportIncrementalAsync(new DatabaseSmugglerImportOptions(), Directory.GetDirectories(backupPath).First()); using (var session = store.OpenAsyncSession()) { WaitForIndexing(store, store.Database, nodeTag: src.ServerStore.NodeTag); var firstNodeDocs = await session.Query <User>().ToArrayAsync(); Assert.Equal(0, firstNodeDocs.Length); } var result = await store.Maintenance.Server.SendAsync(new AddDatabaseNodeOperation(store.Database)); await WaitAndAssertForValueAsync(async() => (await store.Maintenance.Server.SendAsync(new GetDatabaseRecordOperation(store.Database))).Topology.Members?.Count, 2); await store.GetRequestExecutor().UpdateTopologyAsync(new RequestExecutor.UpdateTopologyParameters(new ServerNode { Url = store.Urls.First(), Database = store.Database })); using var re = RequestExecutor.CreateForSingleNodeWithConfigurationUpdates(dest.WebUrl, store.Database, null, store.Conventions); using (var secondSession = store.OpenAsyncSession(new SessionOptions { RequestExecutor = re })) { WaitForIndexing(store, store.Database, nodeTag: dest.ServerStore.NodeTag); var secondNodeDocs = await secondSession.Query <User>().ToArrayAsync(); Assert.Equal(0, secondNodeDocs.Length); } } }
public async Task <List <Drug> > GetDrugs() { var actualDrugs = await RequestExecutor.ExecuteExternalRequestAsync("/drugs", Method.GET); return(JsonConvert.DeserializeObject <List <Drug> >(actualDrugs)); }
internal FolderManager(RequestExecutor requestExecutor) { _requestExecutor = requestExecutor; }
/// <summary> /// Initializes a new instance of the <see cref="ExportApi"/> class. /// </summary> /// <param name="httpClient">The HTTP client.</param> /// <param name="options">The options.</param> public UsersApi(HttpClient httpClient, SurveySolutionsApiConfiguration options) { this.options = options; this.requestExecutor = new RequestExecutor(httpClient); }
public RequestHandler5 <T, TR, TU, TI, TJ> Handle(Handler5 handler) { _executor = (request, param1, param2, param3, param4, param5, mediaTypeMapper1, errorHandler1, logger1) => RequestExecutor.ExecuteRequest(() => handler.Invoke(param1, param2, param3, param4, param5), errorHandler1, logger1); return(this); }
/// <summary> /// Конструктор класса. /// </summary> /// <param name="acc">аккаунт</param> /// <param name="re">запрос</param> public CreateRuleForm(string acc, RequestExecutor re) { InitializeComponent(); _acc = acc; rc = new RulesCreator(re); }
public ServerWideOperation(RequestExecutor requestExecutor, Func <IDatabaseChanges> changes, DocumentConventions conventions, long id) : base(requestExecutor, changes, conventions, id) { }
public StartBlogApplicationUICommand(CommandExecutor commandExecutor, QueryExecutor queryExecutor, RequestExecutor requestExecutor) : base(commandExecutor, queryExecutor, requestExecutor) { }
public ServerWideOperation(RequestExecutor requestExecutor, DocumentConventions conventions, long id, string nodeTag = null) : base(requestExecutor, null, conventions, id, nodeTag) { StatusFetchMode = OperationStatusFetchMode.Polling; NodeTag = nodeTag; }
private void ApplyProxiesOnRequestExecutor(Dictionary <RavenServer, ProxyServer> serversToProxies, RequestExecutor requestExecutor) { void ApplyProxies(object sender, TopologyUpdatedEventArgs args) { var topology = args.Topology; if (topology == null) { return; } for (var i = 0; i < topology.Nodes.Count; i++) { var node = topology.Nodes[i]; var kvp = serversToProxies.FirstOrDefault(x => x.Key.ServerStore.NodeTag == node.ClusterTag); Assert.NotNull(kvp); node.Url = ReplacePort(node.Url, kvp.Value.Port); topology.Nodes[i] = node; } } if (requestExecutor.Topology != null) { ApplyProxies(requestExecutor, new TopologyUpdatedEventArgs(requestExecutor.Topology)); } requestExecutor.OnTopologyUpdated += ApplyProxies; }
public async Task Round_robin_load_balancing_with_failing_node_should_work() { var databaseName = GetDatabaseName(); var(nodes, leader) = await CreateRaftCluster(3); var followers = Servers.Where(x => x.ServerStore.IsLeader() == false).ToArray(); var conventionsForLoadBalancing = new DocumentConventions { ReadBalanceBehavior = ReadBalanceBehavior.RoundRobin }; using (var leaderStore = new DocumentStore { Urls = new[] { leader.WebUrl }, Database = databaseName, Conventions = conventionsForLoadBalancing }) using (var follower1 = new DocumentStore { Urls = new[] { followers[0].WebUrl }, Database = databaseName, Conventions = conventionsForLoadBalancing }) using (var follower2 = new DocumentStore { Urls = new[] { followers[1].WebUrl }, Database = databaseName, Conventions = conventionsForLoadBalancing }) using (var context = JsonOperationContext.ShortTermSingleUse()) { leaderStore.Initialize(); follower1.Initialize(); follower2.Initialize(); var(index, _) = await CreateDatabaseInCluster(databaseName, 3, leader.WebUrl); await Cluster.WaitForRaftIndexToBeAppliedInClusterAsync(index, TimeSpan.FromSeconds(30)); var leaderRequestExecutor = leaderStore.GetRequestExecutor(); //wait until all nodes in database cluster are members (and not promotables) //GetDatabaseTopologyCommand -> does not retrieve promotables var topology = new Topology(); while (topology.Nodes?.Count != 3) { var topologyGetCommand = new GetDatabaseTopologyCommand(); await leaderRequestExecutor.ExecuteAsync(topologyGetCommand, context); topology = topologyGetCommand.Result; Thread.Sleep(50); } foreach (var server in Servers) { await server.ServerStore.Cluster.WaitForIndexNotification(index); } using (var session = leaderStore.OpenSession()) { session.Store(new User { Name = "John Dow" }); session.Store(new User { Name = "Jack Dow" }); session.Store(new User { Name = "Jane Dow" }); session.Store(new User { Name = "FooBar" }, "marker"); session.SaveChanges(); await WaitForDocumentInClusterAsync <User>(nodes, databaseName, "marker", x => true, leader.ServerStore.Configuration.Cluster.OperationTimeout.AsTimeSpan); } using (var requestExecutor = RequestExecutor.Create(follower1.Urls, databaseName, null, follower1.Conventions)) { do //make sure there are three nodes in the topology { await Task.Delay(100); } while (requestExecutor.TopologyNodes == null); DisposeServerAndWaitForFinishOfDisposal(leader); var failedRequests = new HashSet <(string, Exception)>(); requestExecutor.OnFailedRequest += (sender, args) => failedRequests.Add((args.Url, args.Exception)); using (var tmpContext = JsonOperationContext.ShortTermSingleUse()) { for (var sessionId = 0; sessionId < 5; sessionId++) { requestExecutor.Cache.Clear(); //make sure we do not use request cache await requestExecutor.ExecuteAsync(new GetStatisticsOperation().GetCommand(DocumentConventions.Default, tmpContext), tmpContext); } } } } }
internal LibraryManager(RequestExecutor requestExecutor) { _requestExecutor = requestExecutor; }