public void ReadStriping() { var replicationInformer = new ReplicationInformer(new DocumentConvention { FailoverBehavior = FailoverBehavior.ReadFromAllServers }) { ReplicationDestinations = { "http://localhost:2", "http://localhost:3", "http://localhost:4", } }; var urlsTried = new List <Tuple <int, string> >(); for (int i = 0; i < 10; i++) { var req = i + 1; replicationInformer.ExecuteWithReplication("GET", "http://localhost:1", req, req, url => { urlsTried.Add(Tuple.Create(req, url)); return(1); }); } var expectedUrls = GetExepctedUrlForReadStriping().Take(urlsTried.Count).ToList(); Assert.Equal(expectedUrls, urlsTried); }
public void ReadStriping() { var replicationInformer = new ReplicationInformer(new DocumentConvention { FailoverBehavior = FailoverBehavior.ReadFromAllServers }, new HttpJsonRequestFactory(MaxNumber)) { ReplicationDestinations = { new OperationMetadata("http://localhost:2"), new OperationMetadata("http://localhost:3"), new OperationMetadata("http://localhost:4"), } }; var urlsTried = new List<Tuple<int, string>>(); for (int i = 0; i < 10; i++) { var req = i + 1; replicationInformer.ExecuteWithReplicationAsync("GET", "http://localhost:1", new OperationCredentials(null, CredentialCache.DefaultNetworkCredentials), req, req, async url => { urlsTried.Add(Tuple.Create(req, url.Url)); return 1; }).Wait(); } var expectedUrls = GetExpectedUrlForReadStriping().Take(urlsTried.Count).ToList(); Assert.Equal(expectedUrls, urlsTried); }
public static async Task<AsyncServerClient> CreateAsyncServerClient(IAsyncDocumentSession session, ServerRecord server, ServerCredentials serverCredentials = null) { var documentStore = (DocumentStore)session.Advanced.DocumentStore; var replicationInformer = new ReplicationInformer(new DocumentConvention { FailoverBehavior = FailoverBehavior.FailImmediately }); ICredentials credentials = null; if (serverCredentials != null) { credentials = serverCredentials.GetCredentials(); } else if (server.CredentialsId != null) { serverCredentials = await session.LoadAsync<ServerCredentials>(server.CredentialsId); if (serverCredentials == null) { server.CredentialsId = null; } else { credentials = serverCredentials.GetCredentials(); } } return new AsyncServerClient(server.Url, documentStore.Conventions, credentials, documentStore.JsonRequestFactory, null, s => replicationInformer, null, new IDocumentConflictListener[0]); }
public void ReplicationInformerShouldThrowAfterSecondTimeoutIfReadStripingEnabled() { var replicationInformer = new ReplicationInformer(new DocumentConvention { FailoverBehavior = FailoverBehavior.ReadFromAllServers }) { ReplicationDestinations = { new OperationMetadata("http://localhost:2"), new OperationMetadata("http://localhost:3"), new OperationMetadata("http://localhost:4") } }; var urlsTried = new List<string>(); var webException = Assert.Throws<WebException>(() => { replicationInformer.ExecuteWithReplication("GET", "http://localhost:1", new OperationCredentials(null, CredentialCache.DefaultNetworkCredentials), 1, 1, url => { urlsTried.Add(url.Url); throw new WebException("Timeout", WebExceptionStatus.Timeout); return 1; }); }); Assert.Equal(2, urlsTried.Count); Assert.Equal("http://localhost:3", urlsTried[0]); // striped Assert.Equal("http://localhost:1", urlsTried[1]); // master Assert.Equal(WebExceptionStatus.Timeout, webException.Status); }
public void ReadStriping() { var replicationInformer = new ReplicationInformer(new DocumentConvention { FailoverBehavior = FailoverBehavior.ReadFromAllServers }) { ReplicationDestinations = { "http://localhost:2", "http://localhost:3", "http://localhost:4", } }; var urlsTried = new List<Tuple<int, string>>(); for (int i = 0; i < 10; i++) { var req = i + 1; replicationInformer.ExecuteWithReplication("GET", "http://localhost:1", req, req, url => { urlsTried.Add(Tuple.Create(req, url)); return 1; }); } var expectedUrls = GetExepctedUrlForReadStriping().Take(urlsTried.Count).ToList(); Assert.Equal(expectedUrls, urlsTried); }
/// <summary> /// Initialize the document store access method to RavenDB /// </summary> protected virtual void InitializeInternal() { #if !SILVERLIGHT var replicationInformer = new ReplicationInformer(Conventions); databaseCommandsGenerator = () => { var serverClient = new ServerClient(Url, Conventions, credentials, replicationInformer, jsonRequestFactory, currentSessionId); if (string.IsNullOrEmpty(DefaultDatabase)) { return(serverClient); } return(serverClient.ForDatabase(DefaultDatabase)); }; #endif #if !NET_3_5 asyncDatabaseCommandsGenerator = () => { var asyncServerClient = new AsyncServerClient(Url, Conventions, credentials, jsonRequestFactory, currentSessionId); if (string.IsNullOrEmpty(DefaultDatabase)) { return(asyncServerClient); } return(asyncServerClient.ForDatabase(DefaultDatabase)); }; #endif }
public RemoteDatabaseChanges( string url, string apiKey, ICredentials credentials, HttpJsonRequestFactory jsonRequestFactory, DocumentConvention conventions, ReplicationInformer replicationInformer, Action onDispose, Func <string, Etag, string[], OperationMetadata, Task <bool> > tryResolveConflictByUsingRegisteredConflictListenersAsync) { ConnectionStatusChanged = LogOnConnectionStatusChanged; id = Interlocked.Increment(ref connectionCounter) + "/" + Base62Util.Base62Random(); this.url = url; this.credentials = new OperationCredentials(apiKey, credentials); this.jsonRequestFactory = jsonRequestFactory; this.conventions = conventions; this.replicationInformer = replicationInformer; this.onDispose = onDispose; this.tryResolveConflictByUsingRegisteredConflictListenersAsync = tryResolveConflictByUsingRegisteredConflictListenersAsync; Task = EstablishConnection() .ObserveException() .ContinueWith(task => { task.AssertNotFailed(); return((IDatabaseChanges)this); }); }
public void ReplicationInformerShouldThrowAfterSecondTimeout() { using (var replicationInformer = new ReplicationInformer(new DocumentConvention { FailoverBehavior = FailoverBehavior.AllowReadsFromSecondariesAndWritesToSecondaries }) { ReplicationDestinations = { new OperationMetadata("http://localhost:2"), new OperationMetadata("http://localhost:3"), new OperationMetadata("http://localhost:4") } }) { var urlsTried = new List<string>(); var webException = (WebException)Assert.Throws<AggregateException>(() => { replicationInformer.ExecuteWithReplicationAsync("GET", "http://localhost:1", new OperationCredentials(null, CredentialCache.DefaultNetworkCredentials), 1, 1, async url => { urlsTried.Add(url.Url); throw new WebException("Timeout", WebExceptionStatus.Timeout); return 1; }).Wait(); }).ExtractSingleInnerException(); Assert.Equal(2, urlsTried.Count); Assert.Equal("http://localhost:1", urlsTried[0]); Assert.Equal("http://localhost:2", urlsTried[1]); Assert.Equal(WebExceptionStatus.Timeout, webException.Status); } }
public void ReplicationInformerShouldThrowAfterSecondTimeoutIfReadStripingEnabled() { using (var replicationInformer = new ReplicationInformer(new DocumentConvention { FailoverBehavior = FailoverBehavior.ReadFromAllServers }, new HttpJsonRequestFactory(MaxNumber)) { ReplicationDestinations = { new OperationMetadata("http://localhost:2"), new OperationMetadata("http://localhost:3"), new OperationMetadata("http://localhost:4") } }) { var urlsTried = new List<string>(); var webException = (WebException) Assert.Throws<AggregateException>(() => replicationInformer.ExecuteWithReplicationAsync<int>("GET", "http://localhost:1", new OperationCredentials(null, CredentialCache.DefaultNetworkCredentials), 1, 1, url => { urlsTried.Add(url.Url); throw new WebException("Timeout", WebExceptionStatus.Timeout); }).Wait()).ExtractSingleInnerException(); Assert.Equal(2, urlsTried.Count); Assert.Equal("http://localhost:3", urlsTried[0]); // striped Assert.Equal("http://localhost:1", urlsTried[1]); // master Assert.Equal(WebExceptionStatus.Timeout, webException.Status); } }
public void ReplicationInformerShouldThrowAfterSecondTimeoutIfReadStripingEnabled_Async() { var replicationInformer = new ReplicationInformer(new DocumentConvention { FailoverBehavior = FailoverBehavior.ReadFromAllServers }) { ReplicationDestinations = { new OperationMetadata("http://localhost:2"), new OperationMetadata("http://localhost:3"), new OperationMetadata("http://localhost:4") } }; var urlsTried = new List <string>(); var aggregateException = Assert.Throws <AggregateException>(() => replicationInformer.ExecuteWithReplicationAsync <int>("GET", "http://localhost:1", new OperationCredentials(null, CredentialCache.DefaultNetworkCredentials), 1, 1, url => { urlsTried.Add(url.Url); return(new CompletedTask <int>(new WebException("Timeout", WebExceptionStatus.Timeout))); }).Wait() ); var webException = aggregateException.ExtractSingleInnerException() as WebException; Assert.NotNull(webException); Assert.Equal(WebExceptionStatus.Timeout, webException.Status); Assert.Equal(2, urlsTried.Count); Assert.Equal("http://localhost:3", urlsTried[0]); // striped Assert.Equal("http://localhost:1", urlsTried[1]); // master }
public void ReplicationInformerShouldThrowAfterSecondTimeout() { var replicationInformer = new ReplicationInformer(new DocumentConvention { FailoverBehavior = FailoverBehavior.AllowReadsFromSecondariesAndWritesToSecondaries }) { ReplicationDestinations = { new OperationMetadata("http://localhost:2"), new OperationMetadata("http://localhost:3"), new OperationMetadata("http://localhost:4") } }; var urlsTried = new List <string>(); var webException = Assert.Throws <WebException>(() => { replicationInformer.ExecuteWithReplication("GET", "http://localhost:1", new OperationCredentials(null, CredentialCache.DefaultNetworkCredentials), 1, 1, url => { urlsTried.Add(url.Url); throw new WebException("Timeout", WebExceptionStatus.Timeout); return(1); }); }); Assert.Equal(2, urlsTried.Count); Assert.Equal("http://localhost:1", urlsTried[0]); Assert.Equal("http://localhost:2", urlsTried[1]); Assert.Equal(WebExceptionStatus.Timeout, webException.Status); }
public async Task DeleteRangeAsync(string type, string key, DateTimeOffset start, DateTimeOffset end, CancellationToken token = new CancellationToken()) { AssertInitialized(); if (string.IsNullOrEmpty(type) || string.IsNullOrEmpty(key)) { throw new InvalidOperationException("Data is invalid"); } if (start > end) { throw new InvalidOperationException("start cannot be greater than end"); } await ReplicationInformer.UpdateReplicationInformationIfNeededAsync().ConfigureAwait(false); await ReplicationInformer.ExecuteWithReplicationAsync(Url, HttpMethods.Post, async (url, timeSeriesName) => { var requestUriString = string.Format(CultureInfo.InvariantCulture, "{0}ts/{1}/delete-range/{2}?key={3}", url, timeSeriesName, type, Uri.EscapeDataString(key)); using (var request = CreateHttpJsonRequest(requestUriString, HttpMethods.Delete)) { await request.WriteWithObjectAsync(new TimeSeriesDeleteRange { Type = type, Key = key, Start = start, End = end }).ConfigureAwait(false); return(await request.ReadResponseJsonAsync().WithCancellation(token).ConfigureAwait(false)); } }, token).ConfigureAwait(false); }
public async Task CreateTypeAsync(string type, string[] fields, CancellationToken token = new CancellationToken()) { AssertInitialized(); if (string.IsNullOrEmpty(type)) { throw new InvalidOperationException("Prefix cannot be empty"); } if (fields.Length < 1) { throw new InvalidOperationException("Number of fields should be at least 1"); } await ReplicationInformer.UpdateReplicationInformationIfNeededAsync().ConfigureAwait(false); await ReplicationInformer.ExecuteWithReplicationAsync(Url, HttpMethods.Put, async (url, timeSeriesName) => { var requestUriString = string.Format(CultureInfo.InvariantCulture, "{0}ts/{1}/types/{2}", url, timeSeriesName, type); using (var request = CreateHttpJsonRequest(requestUriString, HttpMethods.Put)) { await request.WriteWithObjectAsync(new TimeSeriesType { Type = type, Fields = fields }).ConfigureAwait(false); return(await request.ReadResponseJsonAsync().WithCancellation(token).ConfigureAwait(false)); } }, token).ConfigureAwait(false); }
protected virtual void InitializeInternal() { ReplicationInformer replicationInformer = new ReplicationInformer(this.Conventions); this.databaseCommandsGenerator = (Func <IDatabaseCommands>)(() => { ServerClient local_0 = new ServerClient(this.Url, this.Conventions, this.credentials, replicationInformer, this.jsonRequestFactory, DocumentStore.currentSessionId); if (string.IsNullOrEmpty(this.DefaultDatabase)) { return((IDatabaseCommands)local_0); } else { return(local_0.ForDatabase(this.DefaultDatabase)); } }); this.asyncDatabaseCommandsGenerator = (Func <IAsyncDatabaseCommands>)(() => { AsyncServerClient local_0 = new AsyncServerClient(this.Url, this.Conventions, this.credentials, this.jsonRequestFactory, DocumentStore.currentSessionId); if (string.IsNullOrEmpty(this.DefaultDatabase)) { return((IAsyncDatabaseCommands)local_0); } else { return(local_0.ForDatabase(this.DefaultDatabase)); } }); }
internal async Task <T> ExecuteWithReplication <T>(HttpMethod method, Func <OperationMetadata, IRequestTimeMetric, Task <T> > operation) { var currentRequest = Interlocked.Increment(ref requestCount); if (currentlyExecuting && Conventions.AllowMultipuleAsyncOperations == false) { throw new InvalidOperationException("Only a single concurrent async request is allowed per async client instance."); } currentlyExecuting = true; try { return(await ReplicationInformer .ExecuteWithReplicationAsync(method, BaseUrl, CredentialsThatShouldBeUsedOnlyInOperationsWithoutReplication, currentRequest, ReadStrippingBase, operation) .ConfigureAwait(false)); } catch (AggregateException e) { var singleException = e.ExtractSingleInnerException(); if (singleException != null) { throw singleException; } throw; } finally { currentlyExecuting = false; } }
public async Task AppendAsync(string type, string key, DateTimeOffset at, CancellationToken token, params double[] values) { AssertInitialized(); if (string.IsNullOrEmpty(type) || string.IsNullOrEmpty(key) || at < DateTimeOffset.MinValue || values == null || values.Length == 0) { throw new InvalidOperationException("Append data is invalid"); } await ReplicationInformer.UpdateReplicationInformationIfNeededAsync().ConfigureAwait(false); await ReplicationInformer.ExecuteWithReplicationAsync(Url, HttpMethods.Put, async (url, timeSeriesName) => { var requestUriString = string.Format(CultureInfo.InvariantCulture, "{0}ts/{1}/append/{2}?key={3}", url, timeSeriesName, type, Uri.EscapeDataString(key)); using (var request = CreateHttpJsonRequest(requestUriString, HttpMethods.Put)) { await request.WriteWithObjectAsync(new TimeSeriesFullPoint { Type = type, Key = key, At = at, Values = values }).ConfigureAwait(false); return(await request.ReadResponseJsonAsync().WithCancellation(token).ConfigureAwait(false)); } }, token).ConfigureAwait(false); }
public void BackoffStrategy() { var replicationInformer = new ReplicationInformer(new DocumentConvention()) { ReplicationDestinations = { new OperationMetadata("http://localhost:2") } }; var urlsTried = new List<Tuple<int, string>>(); for (int i = 0; i < 5000; i++) { var req = i + 1; replicationInformer.ExecuteWithReplication("GET", "http://localhost:1", new OperationCredentials(null, CredentialCache.DefaultNetworkCredentials), req, 1, url => { urlsTried.Add(Tuple.Create(req, url.Url)); if (url.Url.EndsWith("1")) throw new WebException("bad", WebExceptionStatus.ConnectFailure); return 1; }); } var expectedUrls = GetExpectedUrlForFailure().Take(urlsTried.Count).ToList(); Assert.Equal(expectedUrls, urlsTried); }
public static async Task <AsyncServerClient> CreateAsyncServerClient(IAsyncDocumentSession session, ServerRecord server, ServerCredentials serverCredentials = null) { var documentStore = (DocumentStore)session.Advanced.DocumentStore; var replicationInformer = new ReplicationInformer(new DocumentConvention { FailoverBehavior = FailoverBehavior.FailImmediately }); ICredentials credentials = null; if (serverCredentials != null) { credentials = serverCredentials.GetCredentials(); } else if (server.CredentialsId != null) { serverCredentials = await session.LoadAsync <ServerCredentials>(server.CredentialsId); if (serverCredentials == null) { server.CredentialsId = null; } else { credentials = serverCredentials.GetCredentials(); } } return(new AsyncServerClient(server.Url, documentStore.Conventions, credentials, documentStore.JsonRequestFactory, null, s => replicationInformer, null, new IDocumentConflictListener[0])); }
public void ReadStriping() { var replicationInformer = new ReplicationInformer(new DocumentConvention { FailoverBehavior = FailoverBehavior.ReadFromAllServers }, new HttpJsonRequestFactory(MaxNumber)) { ReplicationDestinations = { new OperationMetadata("http://localhost:2"), new OperationMetadata("http://localhost:3"), new OperationMetadata("http://localhost:4"), } }; var urlsTried = new List <Tuple <int, string> >(); for (int i = 0; i < 10; i++) { var req = i + 1; replicationInformer.ExecuteWithReplicationAsync <int>(HttpMethods.Get, "http://localhost:1", new OperationCredentials(null, CredentialCache.DefaultNetworkCredentials), null, req, req, url => { urlsTried.Add(Tuple.Create(req, url.Url)); return(new CompletedTask <int>(1)); }).Wait(); } var expectedUrls = GetExpectedUrlForReadStriping().Take(urlsTried.Count).ToList(); Assert.Equal(expectedUrls, urlsTried); }
public void ReplicationInformerShouldThrowAfterSecondTimeoutIfReadStripingEnabled() { using (var replicationInformer = new ReplicationInformer(new DocumentConvention { FailoverBehavior = FailoverBehavior.ReadFromAllServers }, new HttpJsonRequestFactory(MaxNumber)) { ReplicationDestinations = { new OperationMetadata("http://localhost:2"), new OperationMetadata("http://localhost:3"), new OperationMetadata("http://localhost:4") } }) { var urlsTried = new List <string>(); var webException = (WebException)Assert.Throws <AggregateException>(() => { replicationInformer.ExecuteWithReplicationAsync("GET", "http://localhost:1", new OperationCredentials(null, CredentialCache.DefaultNetworkCredentials), 1, 1, async url => { urlsTried.Add(url.Url); throw new WebException("Timeout", WebExceptionStatus.Timeout); return(1); }).Wait(); }).ExtractSingleInnerException(); Assert.Equal(2, urlsTried.Count); Assert.Equal("http://localhost:3", urlsTried[0]); // striped Assert.Equal("http://localhost:1", urlsTried[1]); // master Assert.Equal(WebExceptionStatus.Timeout, webException.Status); } }
public void BackoffStrategy() { var replicationInformer = new ReplicationInformer(new DocumentConvention()) { ReplicationDestinations = { new ReplicationDestinationData { Url = "http://localhost:2" }, } }; var urlsTried = new List <Tuple <int, string> >(); for (int i = 0; i < 5000; i++) { var req = i + 1; replicationInformer.ExecuteWithReplication("GET", "http://localhost:1", req, 1, url => { urlsTried.Add(Tuple.Create(req, url)); if (url.EndsWith("1")) { throw new WebException("bad", WebExceptionStatus.ConnectFailure); } return(1); }); } var expectedUrls = GetExepctedUrlForFailure().Take(urlsTried.Count).ToList(); Assert.Equal(expectedUrls, urlsTried); }
public void ReplicationInformerShouldThrowAfterSecondTimeout_Async() { using (var replicationInformer = new ReplicationInformer(new DocumentConvention { FailoverBehavior = FailoverBehavior.AllowReadsFromSecondariesAndWritesToSecondaries }, new HttpJsonRequestFactory(MaxNumber)) { ReplicationDestinations = { new OperationMetadata("http://localhost:2"), new OperationMetadata("http://localhost:3"), new OperationMetadata("http://localhost:4") } }) { var urlsTried = new List <string>(); var aggregateException = Assert.Throws <AggregateException>(() => replicationInformer.ExecuteWithReplicationAsync <int>("GET", "http://localhost:1", new OperationCredentials(null, CredentialCache.DefaultNetworkCredentials), 1, 1, url => { urlsTried.Add(url.Url); return(new CompletedTask <int>(new WebException("Timeout", WebExceptionStatus.Timeout))); }).Wait() ); var webException = aggregateException.ExtractSingleInnerException() as WebException; Assert.NotNull(webException); Assert.Equal(WebExceptionStatus.Timeout, webException.Status); Assert.Equal(2, urlsTried.Count); Assert.Equal("http://localhost:1", urlsTried[0]); Assert.Equal("http://localhost:2", urlsTried[1]); } }
public async Task <CounterTotal> GetOverallTotalAsync(string groupName, string counterName, CancellationToken token = default(CancellationToken)) { AssertInitialized(); return(await ReplicationInformer.ExecuteWithReplicationAsync(Url, HttpMethods.Get, async (url, counterStoreName) => { var requestUriString = $"{url}/cs/{counterStoreName}/getCounterOverallTotal?groupName={groupName}&counterName={counterName}"; using (var request = CreateHttpJsonRequest(requestUriString, HttpMethods.Get)) { try { var response = await request.ReadResponseJsonAsync().WithCancellation(token).ConfigureAwait(false); return response.ToObject <CounterTotal>(); } catch (ErrorResponseException e) { if (e.StatusCode == HttpStatusCode.NotFound) { throw new InvalidOperationException(e.Message, e); } throw; } } }, token).WithCancellation(token).ConfigureAwait(false)); }
public async Task DeleteAsync(string groupName, string counterName, CancellationToken token = new CancellationToken()) { AssertInitialized(); await ReplicationInformer.UpdateReplicationInformationIfNeededAsync().WithCancellation(token).ConfigureAwait(false); await ReplicationInformer.ExecuteWithReplicationAsync(Url, HttpMethods.Post, async (url, counterStoreName) => { var requestUriString = $"{url}/cs/{counterStoreName}/delete/?groupName={groupName}&counterName={counterName}"; using (var request = CreateHttpJsonRequest(requestUriString, HttpMethods.Delete)) return(await request.ReadResponseJsonAsync().WithCancellation(token).ConfigureAwait(false)); }, token).WithCancellation(token).ConfigureAwait(false); }
public RemoteDatabaseChanges(string url, ICredentials credentials, HttpJsonRequestFactory jsonRequestFactory, DocumentConvention conventions, ReplicationInformer replicationInformer, Action onDispose) { id = Interlocked.Increment(ref connectionCounter) + "/" + Base62Util.Base62Random(); this.url = url; this.credentials = credentials; this.jsonRequestFactory = jsonRequestFactory; this.conventions = conventions; this.replicationInformer = replicationInformer; this.onDispose = onDispose; Task = EstablishConnection() .ObserveException(); }
public async Task <List <TimeSeriesReplicationStats> > GetTimeSeriesReplicationStatsAsync(CancellationToken token = default(CancellationToken)) { AssertInitialized(); await ReplicationInformer.UpdateReplicationInformationIfNeededAsync().ConfigureAwait(false); var requestUriString = String.Format("{0}ts/{1}/replications/stats", Url, Name); using (var request = CreateHttpJsonRequest(requestUriString, HttpMethods.Get)) { var response = await request.ReadResponseJsonAsync().WithCancellation(token).ConfigureAwait(false); return(response.ToObject <List <TimeSeriesReplicationStats> >(JsonSerializer)); } }
public async Task <CountersStorageMetrics> GetCounterMetricsAsync(CancellationToken token = default(CancellationToken)) { AssertInitialized(); await ReplicationInformer.UpdateReplicationInformationIfNeededAsync().ConfigureAwait(false); var requestUriString = $"{Url}/cs/{Name}/metrics"; using (var request = CreateHttpJsonRequest(requestUriString, HttpMethods.Get)) { var response = await request.ReadResponseJsonAsync().WithCancellation(token).ConfigureAwait(false); return(response.ToObject <CountersStorageMetrics>(JsonSerializer)); } }
public async Task <IReadOnlyList <CounterStorageReplicationStats> > GetCounterReplicationStatsAsync( CancellationToken token = default(CancellationToken), int skip = 0, int take = 1024) { AssertInitialized(); await ReplicationInformer.UpdateReplicationInformationIfNeededAsync().ConfigureAwait(false); var requestUriString = $"{Url}/cs/{Name}/replications/stats&skip={skip}&take={take}"; using (var request = CreateHttpJsonRequest(requestUriString, HttpMethods.Get)) { var response = await request.ReadResponseJsonAsync().WithCancellation(token).ConfigureAwait(false); return(response.ToObject <List <CounterStorageReplicationStats> >(JsonSerializer)); } }
public async Task DeletePointsAsync(IEnumerable <TimeSeriesPointId> points, CancellationToken token = new CancellationToken()) { AssertInitialized(); await ReplicationInformer.UpdateReplicationInformationIfNeededAsync().ConfigureAwait(false); await ReplicationInformer.ExecuteWithReplicationAsync(Url, HttpMethods.Post, async (url, timeSeriesName) => { var requestUriString = string.Format(CultureInfo.InvariantCulture, "{0}ts/{1}/delete-points", url, timeSeriesName); using (var request = CreateHttpJsonRequest(requestUriString, HttpMethods.Delete)) { await request.WriteWithObjectAsync(points).ConfigureAwait(false); return(await request.ReadResponseJsonAsync().WithCancellation(token).ConfigureAwait(false)); } }, token).ConfigureAwait(false); }
protected AsyncServerClientBase(string serverUrl, TConvention convention, OperationCredentials credentials, HttpJsonRequestFactory jsonRequestFactory, Guid?sessionId, NameValueCollection operationsHeaders) { WasDisposed = false; ServerUrl = serverUrl.TrimEnd('/'); Conventions = convention; CredentialsThatShouldBeUsedOnlyInOperationsWithoutReplication = credentials; RequestFactory = jsonRequestFactory; SessionId = sessionId; OperationsHeaders = operationsHeaders ?? new NameValueCollection(); replicationInformer = new Lazy <TReplicationInformer>(GetReplicationInformer, true); readStrippingBase = new Lazy <int>(() => ReplicationInformer.GetReadStripingBase(true), true); MaxQuerySizeForGetRequest = 8 * 1024; }
protected AsyncServerClientBase(string serverUrl, TConvention convention, OperationCredentials credentials, HttpJsonRequestFactory jsonRequestFactory, Guid?sessionId, NameValueCollection operationsHeaders, Func <string, TReplicationInformer> replicationInformerGetter, string resourceName) { WasDisposed = false; ServerUrl = serverUrl.TrimEnd('/'); Conventions = convention ?? new TConvention(); CredentialsThatShouldBeUsedOnlyInOperationsWithoutReplication = credentials; RequestFactory = jsonRequestFactory ?? new HttpJsonRequestFactory(DefaultNumberOfCachedRequests); SessionId = sessionId; OperationsHeaders = operationsHeaders ?? DefaultNameValueCollection; ReplicationInformerGetter = replicationInformerGetter ?? DefaultReplicationInformerGetter(); replicationInformer = new Lazy <TReplicationInformer>(() => ReplicationInformerGetter(resourceName), true); readStrippingBase = new Lazy <int>(() => ReplicationInformer.GetReadStripingBase(true), true); MaxQuerySizeForGetRequest = 8 * 1024; }
public void ReplicationInformerShouldThrowAfterSecondTimeout_Async() { var replicationInformer = new ReplicationInformer(new DocumentConvention { FailoverBehavior = FailoverBehavior.AllowReadsFromSecondariesAndWritesToSecondaries }) { ReplicationDestinations = { new ReplicationDestinationData { Url = "http://localhost:2" }, new ReplicationDestinationData { Url = "http://localhost:3" }, new ReplicationDestinationData { Url = "http://localhost:4" }, } }; var urlsTried = new List <string>(); var aggregateException = Assert.Throws <AggregateException>(() => replicationInformer.ExecuteWithReplicationAsync <int>("GET", "http://localhost:1", 1, 1, url => { urlsTried.Add(url); return(new CompletedTask <int>(new WebException("Timeout", WebExceptionStatus.Timeout))); }).Wait() ); var webException = aggregateException.ExtractSingleInnerException() as WebException; Assert.NotNull(webException); Assert.Equal(WebExceptionStatus.Timeout, webException.Status); Assert.Equal(2, urlsTried.Count); Assert.Equal("http://localhost:1", urlsTried[0]); Assert.Equal("http://localhost:2", urlsTried[1]); }
public RemoteDatabaseChanges(string url, ICredentials credentials, HttpJsonRequestFactory jsonRequestFactory, DocumentConvention conventions, ReplicationInformer replicationInformer, Action onDispose) { ConnectionStatusChanged = LogOnConnectionStatusChanged; id = Interlocked.Increment(ref connectionCounter) + "/" + Base62Util.Base62Random(); this.url = url; this.credentials = credentials; this.jsonRequestFactory = jsonRequestFactory; this.conventions = conventions; this.replicationInformer = replicationInformer; this.onDispose = onDispose; Task = EstablishConnection() .ObserveException() .ContinueWith(task => { task.AssertNotFailed(); return((IDatabaseChanges)this); }); }
public void ReplicationInformerShouldThrowAfterSecondTimeoutIfReadStripingEnabled() { var replicationInformer = new ReplicationInformer(new DocumentConvention { FailoverBehavior = FailoverBehavior.ReadFromAllServers }) { ReplicationDestinations = { new ReplicationDestinationData { Url = "http://localhost:2" }, new ReplicationDestinationData { Url = "http://localhost:3" }, new ReplicationDestinationData { Url = "http://localhost:4" }, } }; var urlsTried = new List <string>(); var webException = Assert.Throws <WebException>(() => { replicationInformer.ExecuteWithReplication("GET", "http://localhost:1", 1, 1, url => { urlsTried.Add(url); throw new WebException("Timeout", WebExceptionStatus.Timeout); return(1); }); }); Assert.Equal(2, urlsTried.Count); Assert.Equal("http://localhost:3", urlsTried[0]); // striped Assert.Equal("http://localhost:1", urlsTried[1]); // master Assert.Equal(WebExceptionStatus.Timeout, webException.Status); }
public void ReplicationInformerShouldThrowAfterSecondTimeout() { var replicationInformer = new ReplicationInformer(new DocumentConvention { FailoverBehavior = FailoverBehavior.AllowReadsFromSecondariesAndWritesToSecondaries }) { ReplicationDestinations = { new ReplicationDestinationData { Url = "http://localhost:2" }, new ReplicationDestinationData { Url = "http://localhost:3" }, new ReplicationDestinationData { Url = "http://localhost:4" }, } }; var urlsTried = new List<string>(); var webException = Assert.Throws<WebException>(() => { replicationInformer.ExecuteWithReplication("GET", "http://localhost:1", 1, 1, url => { urlsTried.Add(url); throw new WebException("Timeout", WebExceptionStatus.Timeout); return 1; }); }); Assert.Equal(2, urlsTried.Count); Assert.Equal("http://localhost:1", urlsTried[0]); Assert.Equal("http://localhost:2", urlsTried[1]); Assert.Equal(WebExceptionStatus.Timeout, webException.Status); }
public async Task DeleteTypeAsync(string type, CancellationToken token = default(CancellationToken)) { AssertInitialized(); if (string.IsNullOrEmpty(type)) { throw new InvalidOperationException("Prefix cannot be empty"); } await ReplicationInformer.UpdateReplicationInformationIfNeededAsync().ConfigureAwait(false); await ReplicationInformer.ExecuteWithReplicationAsync(Url, HttpMethods.Delete, async (url, timeSeriesName) => { var requestUriString = string.Format(CultureInfo.InvariantCulture, "{0}ts/{1}/types/{2}", url, timeSeriesName, type); using (var request = CreateHttpJsonRequest(requestUriString, HttpMethods.Delete)) { return(await request.ReadResponseJsonAsync().WithCancellation(token).ConfigureAwait(false)); } }, token).ConfigureAwait(false); }
public async Task DeleteKeyAsync(string type, string key, CancellationToken token = new CancellationToken()) { AssertInitialized(); if (string.IsNullOrEmpty(type) || string.IsNullOrEmpty(key)) { throw new InvalidOperationException("Data is invalid"); } await ReplicationInformer.UpdateReplicationInformationIfNeededAsync().ConfigureAwait(false); await ReplicationInformer.ExecuteWithReplicationAsync(Url, HttpMethods.Post, async (url, timeSeriesName) => { var requestUriString = string.Format(CultureInfo.InvariantCulture, "{0}ts/{1}/delete-key/{2}?key={3}", url, timeSeriesName, type, Uri.EscapeDataString(key)); using (var request = CreateHttpJsonRequest(requestUriString, HttpMethods.Delete)) { return(await request.ReadResponseJsonAsync().WithCancellation(token).ConfigureAwait(false)); } }, token).ConfigureAwait(false); }
public void ReplicationInformerShouldThrowAfterSecondTimeoutIfReadStripingEnabled_Async() { var replicationInformer = new ReplicationInformer(new DocumentConvention { FailoverBehavior = FailoverBehavior.ReadFromAllServers }) { ReplicationDestinations = { new ReplicationDestinationData { Url = "http://localhost:2" }, new ReplicationDestinationData { Url = "http://localhost:3" }, new ReplicationDestinationData { Url = "http://localhost:4" }, } }; var urlsTried = new List<string>(); var aggregateException = Assert.Throws<AggregateException>(() => replicationInformer.ExecuteWithReplicationAsync<int>("GET", "http://localhost:1", 1, 1, url => { urlsTried.Add(url); return new CompletedTask<int>(new WebException("Timeout", WebExceptionStatus.Timeout)); }).Wait() ); var webException = aggregateException.ExtractSingleInnerException() as WebException; Assert.NotNull(webException); Assert.Equal(WebExceptionStatus.Timeout, webException.Status); Assert.Equal(2, urlsTried.Count); Assert.Equal("http://localhost:3", urlsTried[0]); // striped Assert.Equal("http://localhost:1", urlsTried[1]); // master }
public HttpJsonRequest AddReplicationStatusHeaders(string thePrimaryUrl, string currentUrl, ReplicationInformer replicationInformer, FailoverBehavior failoverBehavior, Action<NameValueCollection, string, string> handleReplicationStatusChanges) { if (thePrimaryUrl.Equals(currentUrl, StringComparison.InvariantCultureIgnoreCase)) return this; if (replicationInformer.GetFailureCount(thePrimaryUrl) <= 0) return this; // not because of failover, no need to do this. var lastPrimaryCheck = replicationInformer.GetFailureLastCheck(thePrimaryUrl); webRequest.Headers.Add(Constants.RavenClientPrimaryServerUrl, ToRemoteUrl(thePrimaryUrl)); webRequest.Headers.Add(Constants.RavenClientPrimaryServerLastCheck, lastPrimaryCheck.ToString("s")); primaryUrl = thePrimaryUrl; operationUrl = currentUrl; HandleReplicationStatusChanges = handleReplicationStatusChanges; return this; }
protected virtual void InitializeInternal() { ReplicationInformer replicationInformer = new ReplicationInformer(this.Conventions); this.databaseCommandsGenerator = (Func<IDatabaseCommands>) (() => { ServerClient local_0 = new ServerClient(this.Url, this.Conventions, this.credentials, replicationInformer, this.jsonRequestFactory, DocumentStore.currentSessionId); if (string.IsNullOrEmpty(this.DefaultDatabase)) return (IDatabaseCommands) local_0; else return local_0.ForDatabase(this.DefaultDatabase); }); this.asyncDatabaseCommandsGenerator = (Func<IAsyncDatabaseCommands>) (() => { AsyncServerClient local_0 = new AsyncServerClient(this.Url, this.Conventions, this.credentials, this.jsonRequestFactory, DocumentStore.currentSessionId); if (string.IsNullOrEmpty(this.DefaultDatabase)) return (IAsyncDatabaseCommands) local_0; else return local_0.ForDatabase(this.DefaultDatabase); }); }
public HttpJsonRequest AddReplicationStatusHeaders(string thePrimaryUrl, string currentUrl, ReplicationInformer replicationInformer, FailoverBehavior failoverBehavior, Action <NameValueCollection, string, string> handleReplicationStatusChanges) { if (thePrimaryUrl.Equals(currentUrl, StringComparison.InvariantCultureIgnoreCase)) { return(this); } if (replicationInformer.GetFailureCount(thePrimaryUrl) <= 0) { return(this); // not because of failover, no need to do this. } var lastPrimaryCheck = replicationInformer.GetFailureLastCheck(thePrimaryUrl); webRequest.Headers[Constants.RavenClientPrimaryServerUrl] = ToRemoteUrl(thePrimaryUrl); webRequest.Headers[Constants.RavenClientPrimaryServerLastCheck] = lastPrimaryCheck.ToString("s"); primaryUrl = thePrimaryUrl; operationUrl = currentUrl; HandleReplicationStatusChanges = handleReplicationStatusChanges; return(this); }