public async Task ClusterWithFiveNodesAndMultipleElections() { PredictableSeeds = true; var a = SetupServer(true); var b = SetupServer(); var c = SetupServer(); var d = SetupServer(); var e = SetupServer(); var followers = new[] { b, c, d, e }; foreach (var follower in followers) { await a.AddToClusterAsync(follower.Url); await follower.WaitForTopology(Leader.TopologyModification.Voter); } var leaderSelected = followers.Select(x => x.WaitForState(RachisState.Leader).ContinueWith(_ => x)).ToArray(); using (var ctx = JsonOperationContext.ShortTermSingleUse()) { for (int i = 0; i < 10; i++) { await a.PutAsync(new TestCommand { Name = "test", Value = i }); } } foreach (var follower in followers) { Disconnect(follower.Url, a.Url); } var leader = await await Task.WhenAny(leaderSelected); using (var ctx = JsonOperationContext.ShortTermSingleUse()) { for (int i = 10; i < 20; i++) { await leader.PutAsync(new TestCommand { Name = "test", Value = i }); } } followers = followers.Except(new[] { leader }).ToArray(); leaderSelected = followers.Select(x => x.WaitForState(RachisState.Leader).ContinueWith(_ => x)).ToArray(); foreach (var follower in followers) { Disconnect(follower.Url, leader.Url); } leader = await await Task.WhenAny(leaderSelected); using (var ctx = JsonOperationContext.ShortTermSingleUse()) { for (int i = 20; i < 30; i++) { await leader.PutAsync(new TestCommand { Name = "test", Value = i }); } } TransactionOperationContext context; using (leader.ContextPool.AllocateOperationContext(out context)) using (context.OpenReadTransaction()) { var actual = leader.StateMachine.Read(context, "test"); var expected = Enumerable.Range(0, 30).Sum(); Assert.Equal(expected, actual); } }
public bool Update(UpdateStep step) { var oldCompareExchangeSchema = new TableSchema(). DefineKey(new TableSchema.SchemaIndexDef { StartIndex = (int)ClusterStateMachine.CompareExchangeTable.Key, Count = 1 }); var newCompareExchangeSchema = new TableSchema() .DefineKey(new TableSchema.SchemaIndexDef { StartIndex = (int)ClusterStateMachine.CompareExchangeTable.Key, Count = 1 }).DefineIndex(new TableSchema.SchemaIndexDef { StartIndex = (int)ClusterStateMachine.CompareExchangeTable.PrefixIndex, Count = 1, Name = ClusterStateMachine.CompareExchangeIndex }); const string oldTableName = "CmpXchg"; using (Slice.From(step.WriteTx.Allocator, oldTableName, out var oldCompareExchangeTable)) { var oldTable = step.ReadTx.OpenTable(oldCompareExchangeSchema, oldCompareExchangeTable); if (oldTable == null) { return(true); } var newTableName = ClusterStateMachine.CompareExchange.ToString(); foreach (var db in SchemaUpgradeExtensions.GetDatabases(step)) { // update CompareExchange newCompareExchangeSchema.Create(step.WriteTx, newTableName, null); var newTable = step.WriteTx.OpenTable(newCompareExchangeSchema, newTableName); var compareExchangeOldKey = $"{db.ToLowerInvariant()}/"; using (Slice.From(step.ReadTx.Allocator, compareExchangeOldKey, out var keyPrefix)) { foreach (var item in oldTable.SeekByPrimaryKeyPrefix(keyPrefix, Slices.Empty, 0)) { var index = DocumentsStorage.TableValueToLong((int)ClusterStateMachine.CompareExchangeTable.Index, ref item.Value.Reader); using (CompareExchangeCommandBase.GetPrefixIndexSlices(step.ReadTx.Allocator, db, index, out var buffer)) using (Slice.External(step.WriteTx.Allocator, buffer.Ptr, buffer.Length, out var prefixIndexSlice)) using (newTable.Allocate(out TableValueBuilder write)) using (var ctx = JsonOperationContext.ShortTermSingleUse()) { using (var bjro = new BlittableJsonReaderObject( item.Value.Reader.Read((int)ClusterStateMachine.CompareExchangeTable.Value, out var size1), size1, ctx).Clone(ctx) ) { write.Add(item.Key); write.Add(index); write.Add(bjro.BasePointer, bjro.Size); write.Add(prefixIndexSlice); newTable.Set(write); } } } } } } // delete the old table step.WriteTx.DeleteTable(oldTableName); // remove the remaining CompareExchange global index if (step.WriteTx.LowLevelTransaction.RootObjects.Read(ClusterStateMachine.CompareExchangeIndex) != null) { step.WriteTx.DeleteTree(ClusterStateMachine.CompareExchangeIndex); } return(true); }
public async Task RevisionsSubscriptionsWithCustomScriptCompareDocs() { using (var store = GetDocumentStore()) { var subscriptionId = await store.Subscriptions.CreateAsync(new SubscriptionCreationOptions { Query = @" declare function match(d){ return d.Current.Age > d.Previous.Age; } from Users (Revisions = true) as d where match(d) select { Id: id(d.Current), Age: d.Current.Age } " }); using (var context = JsonOperationContext.ShortTermSingleUse()) { var configuration = new RevisionsConfiguration { Default = new RevisionsCollectionConfiguration { Disabled = false, MinimumRevisionsToKeep = 5 }, Collections = new Dictionary <string, RevisionsCollectionConfiguration> { ["Users"] = new RevisionsCollectionConfiguration { Disabled = false }, ["Dons"] = new RevisionsCollectionConfiguration { Disabled = false } } }; await Server.ServerStore.ModifyDatabaseRevisions(context, store.Database, EntityToBlittable.ConvertEntityToBlittable(configuration, new DocumentConventions(), context)); } for (int i = 0; i < 10; i++) { for (var j = 0; j < 10; j++) { using (var session = store.OpenSession()) { session.Store(new User { Name = $"users{i} ver {j}", Age = j }, "users/" + i); session.Store(new Company() { Name = $"dons{i} ver {j}" }, "companies/" + i); session.SaveChanges(); } } } using (var sub = store.Subscriptions.GetSubscriptionWorker <Result>(new SubscriptionWorkerOptions(subscriptionId))) { var mre = new AsyncManualResetEvent(); var names = new HashSet <string>(); var maxAge = -1; GC.KeepAlive(sub.Run(x => { foreach (var item in x.Items) { if (item.Result.Age > maxAge) { names.Add(item.Result.Id + item.Result.Age); maxAge = item.Result.Age; } if (names.Count == 9) { mre.Set(); } } })); Assert.True(await mre.WaitAsync(_reasonableWaitTime)); } } }
public static async ValueTask UnlikelyFailAuthorizationAsync(HttpContext context, string database, RavenServer.AuthenticateConnection feature, AuthorizationStatus authorizationStatus) { string message; if (feature == null || feature.Status == RavenServer.AuthenticationStatus.None || feature.Status == RavenServer.AuthenticationStatus.NoCertificateProvided) { message = "This server requires client certificate for authentication, but none was provided by the client. Did you forget to install the certificate?"; message += BrowserCertificateMessage; } else { var name = feature.Certificate.FriendlyName; if (string.IsNullOrWhiteSpace(name)) { name = feature.Certificate.Subject; } if (string.IsNullOrWhiteSpace(name)) { name = feature.Certificate.ToString(false); } name += $"(Thumbprint: {feature.Certificate.Thumbprint})"; if (feature.Status == RavenServer.AuthenticationStatus.UnfamiliarCertificate) { message = $"The supplied client certificate '{name}' is unknown to the server. In order to register your certificate please contact your system administrator."; message += BrowserCertificateMessage; } else if (feature.Status == RavenServer.AuthenticationStatus.UnfamiliarIssuer) { message = $"The supplied client certificate '{name}' is unknown to the server but has a known Public Key Pinning Hash. Will not use it to authenticate because the issuer is unknown. " + Environment.NewLine + $"To fix this, the admin can register the pinning hash of the *issuer* certificate: '{feature.IssuerHash}' in the '{RavenConfiguration.GetKey(x => x.Security.WellKnownIssuerHashes)}' configuration entry."; } else if (feature.Status == RavenServer.AuthenticationStatus.Allowed) { message = $"Could not authorize access to {(database ?? "the server")} using provided client certificate '{name}'."; } else if (feature.Status == RavenServer.AuthenticationStatus.Operator) { message = $"Insufficient security clearance to access {(database ?? "the server")} using provided client certificate '{name}'."; } else if (feature.Status == RavenServer.AuthenticationStatus.Expired) { message = $"The supplied client certificate '{name}' has expired on {feature.Certificate.NotAfter:D}. Please contact your system administrator in order to obtain a new one."; } else if (feature.Status == RavenServer.AuthenticationStatus.NotYetValid) { message = $"The supplied client certificate '{name}'cannot be used before {feature.Certificate.NotBefore:D}"; } else { message = "Access to the server was denied."; } } switch (authorizationStatus) { case AuthorizationStatus.ClusterAdmin: message += " ClusterAdmin access is required but not given to this certificate"; break; case AuthorizationStatus.Operator: message += " Operator/ClusterAdmin access is required but not given to this certificate"; break; case AuthorizationStatus.DatabaseAdmin: message += " DatabaseAdmin access is required but not given to this certificate"; break; } context.Response.StatusCode = (int)HttpStatusCode.Forbidden; using (var ctx = JsonOperationContext.ShortTermSingleUse()) await using (var writer = new AsyncBlittableJsonTextWriter(ctx, context.Response.Body)) { await DrainRequestAsync(ctx, context); if (RavenServerStartup.IsHtmlAcceptable(context)) { context.Response.StatusCode = (int)HttpStatusCode.Redirect; context.Response.Headers["Location"] = "/auth-error.html?err=" + Uri.EscapeDataString(message); return; } ctx.Write(writer, new DynamicJsonValue { ["Type"] = "InvalidAuth", ["Message"] = message }); } }
public void ValidateRanges() { var values = new Values { intMinVal = Int32.MinValue, intMaxVal = Int32.MaxValue, longMinVal = long.MinValue, longMaxVal = long.MaxValue, doubleMinVal = double.MinValue, doubleMaxVal = double.MaxValue, doubleNegativeInfinity = double.NegativeInfinity, doublePositiveInfinity = double.PositiveInfinity, doubleNan = double.NaN, doubleEpsilon = double.Epsilon, floatMinVal = float.MinValue, floatMaxVal = float.MaxValue, floatMaxPercision = float.Epsilon, floatNegativeInfinity = float.NegativeInfinity, floatPositiveInfinity = float.PositiveInfinity, floatNan = float.NaN, uintMaxVal = uint.MaxValue, ulongMaxVal = ulong.MaxValue, stringMaxLength = string.Join("", Enumerable.Repeat(1, short.MaxValue)), dateMaxPercision = DateTime.Now, dateTimeOffsetMinVal = DateTimeOffset.MinValue, dateTimeOffsetMaxVal = DateTimeOffset.MaxValue, timeSpanMinVal = TimeSpan.MinValue, timeSpanMaxVal = TimeSpan.MaxValue, timeSpanDays = TimeSpan.FromDays(1), timeSpanHours = TimeSpan.FromHours(1), timeSpanMinutes = TimeSpan.FromMinutes(1), timeSpanSeconds = TimeSpan.FromSeconds(1), timeSpanMiliseconds = TimeSpan.FromMilliseconds(1), timeSpanNanoseconds = TimeSpan.FromTicks(1) }; using (var context = JsonOperationContext.ShortTermSingleUse()) { var blittableValues = EntityToBlittable.ConvertCommandToBlittable(values, context); var valuesDeserialized = JsonDeserializationTest.SerializationDeserializationValidation(blittableValues); var valuesType = typeof(Values); Assert.Equal(values.intMinVal, valuesDeserialized.intMinVal); Assert.Equal(values.intMaxVal, valuesDeserialized.intMaxVal); Assert.Equal(values.longMinVal, valuesDeserialized.longMinVal); Assert.Equal(values.longMaxVal, valuesDeserialized.longMaxVal); Assert.Equal(values.doubleMinVal, valuesDeserialized.doubleMinVal); Assert.Equal(values.doubleMaxVal, valuesDeserialized.doubleMaxVal); Assert.Equal(values.doubleNegativeInfinity, valuesDeserialized.doubleNegativeInfinity); Assert.Equal(values.doublePositiveInfinity, valuesDeserialized.doublePositiveInfinity); Assert.Equal(values.doubleNan, valuesDeserialized.doubleNan); Assert.Equal(values.doubleEpsilon, valuesDeserialized.doubleEpsilon); Assert.Equal(values.floatMinVal, valuesDeserialized.floatMinVal); Assert.Equal(values.floatMaxVal, valuesDeserialized.floatMaxVal); Assert.Equal(values.floatMaxPercision, valuesDeserialized.floatMaxPercision); Assert.Equal(values.floatNegativeInfinity, valuesDeserialized.floatNegativeInfinity); Assert.Equal(values.floatPositiveInfinity, valuesDeserialized.floatPositiveInfinity); Assert.Equal(values.floatNan, valuesDeserialized.floatNan); Assert.Equal(values.uintMaxVal, valuesDeserialized.uintMaxVal); Assert.Equal(values.ulongMaxVal, valuesDeserialized.ulongMaxVal); Assert.Equal(values.stringMaxLength, valuesDeserialized.stringMaxLength); Assert.Equal(values.dateMaxPercision, valuesDeserialized.dateMaxPercision); Assert.Equal(values.dateTimeOffsetMinVal, valuesDeserialized.dateTimeOffsetMinVal); Assert.Equal(values.dateTimeOffsetMaxVal, valuesDeserialized.dateTimeOffsetMaxVal); Assert.Equal(values.timeSpanMinVal, valuesDeserialized.timeSpanMinVal); Assert.Equal(values.timeSpanMaxVal, valuesDeserialized.timeSpanMaxVal); Assert.Equal(values.timeSpanDays, valuesDeserialized.timeSpanDays); Assert.Equal(values.timeSpanHours, valuesDeserialized.timeSpanHours); Assert.Equal(values.timeSpanMinutes, valuesDeserialized.timeSpanMinutes); Assert.Equal(values.timeSpanSeconds, valuesDeserialized.timeSpanSeconds); Assert.Equal(values.timeSpanMiliseconds, valuesDeserialized.timeSpanMiliseconds); Assert.Equal(values.timeSpanNanoseconds, valuesDeserialized.timeSpanNanoseconds); } }
public async Task Fastst_node_should_choose_the_node_without_delay() { NoTimeouts(); var databaseName = GetDatabaseName(); var(leader, serversToProxies) = await CreateRaftClusterWithProxiesAsync(3); var followers = Servers.Where(x => x.ServerStore.IsLeader() == false).ToArray(); var conventionsForLoadBalancing = new DocumentConventions { ReadBalanceBehavior = ReadBalanceBehavior.FastestNode }; //set proxies with delays to all servers except follower2 using (var leaderStore = new DocumentStore { Urls = new[] { ReplacePort(leader.WebUrl, serversToProxies[leader].Port) }, Database = databaseName, Conventions = conventionsForLoadBalancing }) { leaderStore.Initialize(); var(index, _) = await CreateDatabaseInCluster(databaseName, 3, leader.WebUrl); await WaitForRaftIndexToBeAppliedInCluster(index, TimeSpan.FromSeconds(30)); var leaderRequestExecutor = leaderStore.GetRequestExecutor(); //make sure we have updated topology --> more deterministic test await leaderRequestExecutor.UpdateTopologyAsync(new RequestExecutor.UpdateTopologyParameters(new ServerNode { ClusterTag = leader.ServerStore.NodeTag, Database = databaseName, Url = leader.WebUrl }) { TimeoutInMs = 5000 }); ApplyProxiesOnRequestExecutor(serversToProxies, leaderRequestExecutor); //wait until all nodes in database cluster are members (and not promotables) //GetDatabaseTopologyCommand -> does not retrieve promotables using (var context = JsonOperationContext.ShortTermSingleUse()) { var topology = new Topology(); while (topology.Nodes?.Count != 3) { var topologyGetCommand = new GetDatabaseTopologyCommand(); await leaderRequestExecutor.ExecuteAsync(topologyGetCommand, context).ConfigureAwait(false); topology = topologyGetCommand.Result; Thread.Sleep(50); } } //set delays to all servers except follower2 foreach (var server in Servers) { if (server == followers[1]) { continue; } serversToProxies[server].ConnectionDelay = 300; } using (var session = leaderStore.OpenSession()) { session.Store(new User { Name = "John Dow" }, "users/1"); session.SaveChanges(); } while (leaderRequestExecutor.InSpeedTestPhase) { using (var session = leaderStore.OpenSession()) { session.Load <User>("users/1"); } } var fastest = leaderRequestExecutor.GetFastestNode().Result.Node; var follower2Proxy = ReplacePort(followers[1].WebUrl, serversToProxies[followers[1]].Port); Assert.Equal(follower2Proxy, fastest.Url); } }
public void IndexDefinitionSerialization() { var indexDefinition = new IndexDefinition(); #if FEATURE_TEST_INDEX indexDefinition.IsTestIndex = true; #endif indexDefinition.LockMode = IndexLockMode.LockedIgnore; indexDefinition.Maps = new HashSet <string> { "a", "b" }; indexDefinition.Name = "n1"; indexDefinition.Reduce = "c"; indexDefinition.Type = IndexType.MapReduce; indexDefinition.Fields = new Dictionary <string, IndexFieldOptions> { { "f1", new IndexFieldOptions { Spatial = new SpatialOptions { Type = SpatialFieldType.Geography, Units = SpatialUnits.Miles, MinY = 3, MinX = 5, MaxY = 2, MaxX = 5, Strategy = SpatialSearchStrategy.QuadPrefixTree, MaxTreeLevel = 2 }, Indexing = FieldIndexing.No, Suggestions = true, Storage = FieldStorage.Yes, Analyzer = "a1", TermVector = FieldTermVector.WithPositionsAndOffsets } }, { "f2", new IndexFieldOptions { Spatial = new SpatialOptions { Type = SpatialFieldType.Cartesian, Units = SpatialUnits.Kilometers, MinY = 5, MinX = 2, MaxY = 9, MaxX = 3, Strategy = SpatialSearchStrategy.BoundingBox, MaxTreeLevel = 5 }, Indexing = FieldIndexing.Exact, Suggestions = false, Storage = FieldStorage.No, Analyzer = "a2", TermVector = FieldTermVector.WithPositions } } }; using (var context = JsonOperationContext.ShortTermSingleUse()) { var builder = indexDefinition.ToJson(); using (var json = context.ReadObject(builder, nameof(IndexDefinition))) { var newIndexDefinition = JsonDeserializationServer.IndexDefinition(json); Assert.True(indexDefinition.Equals(newIndexDefinition)); } } }
public LazyStringValueReaderTests() { _ctx = JsonOperationContext.ShortTermSingleUse(); }
public async Task MissingRevisions3() { var(nodes, leader) = await CreateRaftCluster(3, watcherCluster : true); var database = GetDatabaseName(); await CreateDatabaseInClusterInner(new DatabaseRecord(database), 3, leader.WebUrl, null); using (var store = new DocumentStore { Database = database, Urls = new[] { leader.WebUrl } }.Initialize()) { var configuration = new RevisionsConfiguration { Default = new RevisionsCollectionConfiguration { Disabled = false, PurgeOnDelete = false, MinimumRevisionsToKeep = 30 } }; long index; using (var context = JsonOperationContext.ShortTermSingleUse()) { var configurationJson = DocumentConventions.Default.Serialization.DefaultConverter.ToBlittable(configuration, context); (index, _) = await leader.ServerStore.ModifyDatabaseRevisions(context, database, configurationJson, Guid.NewGuid().ToString()); } await WaitForRaftIndexToBeAppliedInCluster(index, TimeSpan.FromSeconds(15)); var testServer = Servers.First(server => server.ServerStore.IsLeader() == false); await StoreInTransactionMode(store, 1); await StoreInRegularMode(store, 3); await DeleteInTransactionMode(store, 1); var result = await leader.ServerStore.SendToLeaderAsync(new DeleteDatabaseCommand(database, Guid.NewGuid().ToString()) { HardDelete = true, FromNodes = new[] { testServer.ServerStore.NodeTag }, }); await WaitForRaftIndexToBeAppliedInCluster(result.Index, TimeSpan.FromSeconds(10)); var val = await WaitForValueAsync(async() => await GetMembersCount(store, database), 2, 20000); Assert.Equal(2, val); var delCount = await WaitForValueAsync(() => { var record = store.Maintenance.Server.Send(new GetDatabaseRecordOperation(database)); return(record.DeletionInProgress.Count); }, 0); Assert.Equal(0, delCount); await store.Maintenance.Server.SendAsync(new AddDatabaseNodeOperation(database, testServer.ServerStore.NodeTag)); var documentDatabase = await testServer.ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(database); await WaitAndAssertForValueAsync(() => { using (documentDatabase.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (context.OpenReadTransaction()) return(Task.FromResult(documentDatabase.DocumentsStorage.RevisionsStorage.GetRevisionsCount(context, "users/1"))); }, 5); } }
public void UnlikelyFailAuthorization(HttpContext context, string database, RavenServer.AuthenticateConnection feature, AuthorizationStatus authorizationStatus) { string message; if (feature == null || feature.Status == RavenServer.AuthenticationStatus.None || feature.Status == RavenServer.AuthenticationStatus.NoCertificateProvided) { message = "This server requires client certificate for authentication, but none was provided by the client."; } else { var name = feature.Certificate.FriendlyName; if (string.IsNullOrWhiteSpace(name)) { name = feature.Certificate.Subject; } if (string.IsNullOrWhiteSpace(name)) { name = feature.Certificate.ToString(false); } name += "(Thumbprint: " + feature.Certificate.Thumbprint + ")"; if (feature.Status == RavenServer.AuthenticationStatus.UnfamiliarCertificate) { message = "The supplied client certificate '" + name + "' is unknown to the server. In order to register your certificate please contact your system administrator."; } else if (feature.Status == RavenServer.AuthenticationStatus.Allowed) { message = "Could not authorize access to " + (database ?? "the server") + " using provided client certificate '" + name + "'."; } else if (feature.Status == RavenServer.AuthenticationStatus.Operator) { message = "Insufficient security clearance to access " + (database ?? "the server") + " using provided client certificate '" + name + "'."; } else if (feature.Status == RavenServer.AuthenticationStatus.Expired) { message = "The supplied client certificate '" + name + "' has expired on " + feature.Certificate.NotAfter.ToString("D") + ". Please contact your system administrator in order to obtain a new one."; } else if (feature.Status == RavenServer.AuthenticationStatus.NotYetValid) { message = "The supplied client certificate '" + name + "'cannot be used before " + feature.Certificate.NotBefore.ToString("D"); } else { message = "Access to the server was denied."; } } switch (authorizationStatus) { case AuthorizationStatus.ClusterAdmin: message += " ClusterAdmin access is required but not given to this certificate"; break; case AuthorizationStatus.Operator: message += " Operator/ClusterAdmin access is required but not given to this certificate"; break; case AuthorizationStatus.DatabaseAdmin: message += " DatabaseAdmin access is required but not given to this certificate"; break; } context.Response.StatusCode = (int)HttpStatusCode.Forbidden; using (var ctx = JsonOperationContext.ShortTermSingleUse()) using (var writer = new BlittableJsonTextWriter(ctx, context.Response.Body)) { DrainRequest(ctx, context); if (RavenServerStartup.IsHtmlAcceptable(context)) { context.Response.StatusCode = (int)HttpStatusCode.Redirect; context.Response.Headers["Location"] = "/auth-error.html?err=" + Uri.EscapeDataString(message); return; } ctx.Write(writer, new DynamicJsonValue { ["Type"] = "InvalidAuth", ["Message"] = message }); } }
public async ValueTask <string> HandlePath(HttpContext context, string method, string path) { var tryMatch = _trie.TryMatch(method, path); if (tryMatch.Value == null) { var exception = new RouteNotFoundException($"There is no handler for path: {method} {path}{context.Request.QueryString}"); AssertClientVersion(context, exception); throw exception; } var reqCtx = new RequestHandlerContext { HttpContext = context, RavenServer = _ravenServer, RouteMatch = tryMatch.Match }; var tuple = tryMatch.Value.TryGetHandler(reqCtx); var handler = tuple.Item1 ?? await tuple.Item2; reqCtx.Database?.Metrics?.Requests.RequestsPerSec.Mark(); _serverMetrics.Requests.RequestsPerSec.Mark(); Interlocked.Increment(ref _serverMetrics.Requests.ConcurrentRequestsCount); try { _ravenServer.Statistics.LastRequestTime = SystemTime.UtcNow; if (handler == null) { if (_auditLog != null) { _auditLog.Info($"Invalid request {context.Request.Method} {context.Request.Path} by " + $"(Cert: {context.Connection.ClientCertificate?.Subject} ({context.Connection.ClientCertificate?.Thumbprint}) {context.Connection.RemoteIpAddress}:{context.Connection.RemotePort})"); } context.Response.StatusCode = (int)HttpStatusCode.BadRequest; using (var ctx = JsonOperationContext.ShortTermSingleUse()) using (var writer = new BlittableJsonTextWriter(ctx, context.Response.Body)) { ctx.Write(writer, new DynamicJsonValue { ["Type"] = "Error", ["Message"] = $"There is no handler for {context.Request.Method} {context.Request.Path}" }); } return(null); } if (_ravenServer.Configuration.Security.AuthenticationEnabled) { var authResult = TryAuthorize(tryMatch.Value, context, reqCtx.Database); if (authResult == false) { return(reqCtx.Database?.Name); } } if (reqCtx.Database != null) { using (reqCtx.Database.DatabaseInUse(tryMatch.Value.SkipUsagesCount)) { if (reqCtx.HttpContext.Response.Headers.TryGetValue(Constants.Headers.LastKnownClusterTransactionIndex, out var value) && long.TryParse(value, out var index) && index < reqCtx.Database.RachisLogIndexNotifications.LastModifiedIndex) { await reqCtx.Database.RachisLogIndexNotifications.WaitForIndexNotification(index, reqCtx.HttpContext.RequestAborted); } await handler(reqCtx); } } else { await handler(reqCtx); } } finally { Interlocked.Decrement(ref _serverMetrics.Requests.ConcurrentRequestsCount); } return(reqCtx.Database?.Name); }
public static void WaitForIndexing(IDocumentStore store, string dbName = null, TimeSpan?timeout = null, bool allowErrors = false) { var admin = store.Maintenance.ForDatabase(dbName); timeout = timeout ?? (Debugger.IsAttached ? TimeSpan.FromMinutes(15) : TimeSpan.FromMinutes(1)); var sp = Stopwatch.StartNew(); while (sp.Elapsed < timeout.Value) { var databaseStatistics = admin.Send(new GetStatisticsOperation()); var indexes = databaseStatistics.Indexes .Where(x => x.State != IndexState.Disabled); if (indexes.All(x => x.IsStale == false && x.Name.StartsWith("ReplacementOf/") == false)) { return; } if (databaseStatistics.Indexes.Any(x => x.State == IndexState.Error)) { break; } Thread.Sleep(32); } if (allowErrors) { return; } var perf = admin.Send(new GetIndexPerformanceStatisticsOperation()); var errors = admin.Send(new GetIndexErrorsOperation()); var stats = admin.Send(new GetIndexesStatisticsOperation()); var total = new { Errors = errors, Stats = stats, Performance = perf }; var file = Path.GetTempFileName() + ".json"; using (var stream = File.Open(file, FileMode.OpenOrCreate)) using (var context = JsonOperationContext.ShortTermSingleUse()) using (var writer = new BlittableJsonTextWriter(context, stream)) { var djv = (DynamicJsonValue)TypeConverter.ToBlittableSupportedType(total); var json = context.ReadObject(djv, "errors"); writer.WriteObject(json); writer.Flush(); } var statistics = admin.Send(new GetStatisticsOperation()); var corrupted = statistics.Indexes.Where(x => x.State == IndexState.Error).ToList(); if (corrupted.Count > 0) { throw new InvalidOperationException( $"The following indexes are with error state: {string.Join(",", corrupted.Select(x => x.Name))} - details at " + file); } throw new TimeoutException("The indexes stayed stale for more than " + timeout.Value + ", stats at " + file); }
public void Equals_blittables_created_manually() { using (var ctx = JsonOperationContext.ShortTermSingleUse()) { BlittableJsonReaderObject CreateBlittable() { using (var builder = new ManualBlittableJsonDocumentBuilder <UnmanagedWriteBuffer>(ctx)) using (var officeBuilder = new ManualBlittableJsonDocumentBuilder <UnmanagedWriteBuffer>(ctx)) using (var companyBuilder = new ManualBlittableJsonDocumentBuilder <UnmanagedWriteBuffer>(ctx)) { builder.Reset(BlittableJsonDocumentBuilder.UsageMode.None); officeBuilder.Reset(BlittableJsonDocumentBuilder.UsageMode.None); companyBuilder.Reset(BlittableJsonDocumentBuilder.UsageMode.None); companyBuilder.StartWriteObjectDocument(); companyBuilder.StartWriteObject(); companyBuilder.WritePropertyName("Name"); companyBuilder.WriteValue("Hibernating Rhinos"); companyBuilder.WritePropertyName("Type"); companyBuilder.WriteValue("LTD"); companyBuilder.WriteObjectEnd(); companyBuilder.FinalizeDocument(); var embeddedCompany = companyBuilder.CreateReader(); officeBuilder.StartWriteObjectDocument(); officeBuilder.StartWriteObject(); officeBuilder.WritePropertyName("Company"); officeBuilder.WriteEmbeddedBlittableDocument(embeddedCompany); officeBuilder.WritePropertyName("Street"); officeBuilder.WriteValue("Hanasi 21"); officeBuilder.WritePropertyName("City"); officeBuilder.WriteValue("Hadera"); officeBuilder.WriteObjectEnd(); officeBuilder.FinalizeDocument(); var embeddedOffice = officeBuilder.CreateReader(); builder.StartWriteObjectDocument(); builder.StartWriteObject(); builder.WritePropertyName("Office"); builder.WriteEmbeddedBlittableDocument(embeddedOffice); builder.WriteObjectEnd(); builder.FinalizeDocument(); return(builder.CreateReader()); } } using (var blittable1 = CreateBlittable()) using (var blittable2 = CreateBlittable()) { Assert.Equal(blittable1, blittable2); Assert.Equal(blittable1.GetHashCode(), blittable2.GetHashCode()); blittable1.TryGet("Office", out BlittableJsonReaderObject ob1); blittable2.TryGet("Office", out BlittableJsonReaderObject ob2); Assert.Equal(ob1, ob2); } } }
public async Task PutDifferentAttachmentsShouldConflict() { using (var store1 = GetDocumentStore(options: new Options { ModifyDatabaseRecord = record => { record.ConflictSolverConfig = new ConflictSolver(); } })) using (var store2 = GetDocumentStore(options: new Options { ModifyDatabaseRecord = record => { record.ConflictSolverConfig = new ConflictSolver(); } })) { await Databases.SetDatabaseId(store1, new Guid("00000000-48c4-421e-9466-000000000000")); await Databases.SetDatabaseId(store2, new Guid("99999999-48c4-421e-9466-999999999999")); using (var session = store1.OpenAsyncSession()) { var x = new User { Name = "Fitzchak" }; await session.StoreAsync(x, "users/1"); await session.SaveChangesAsync(); using (var a1 = new MemoryStream(new byte[] { 1, 2, 3 })) { await store1.Operations.SendAsync(new PutAttachmentOperation("users/1", "a1", a1, "a1/png")); } using (var session2 = store2.OpenSession()) { session2.Store(new User { Name = "Fitzchak" }, "users/1"); session2.SaveChanges(); using (var a2 = new MemoryStream(new byte[] { 1, 2, 3, 4, 5 })) { store2.Operations.Send(new PutAttachmentOperation("users/1", "a1", a2, "a1/png")); } await SetupReplicationAsync(store1, store2); await session.StoreAsync(new User { Name = "Toli" }, "users/2"); await session.SaveChangesAsync(); WaitForDocumentToReplicate <User>(store2, "users/2", 3000); var conflicts = (await store2.Commands().GetConflictsForAsync("users/1")).ToList(); Assert.Equal(2, conflicts.Count); var requestExecutor = store2.GetRequestExecutor(); using (var context = JsonOperationContext.ShortTermSingleUse()) using (var stringStream = new MemoryStream(System.Text.Encoding.UTF8.GetBytes(_conflictedDocument))) using (var blittableJson = await context.ReadForMemoryAsync(stringStream, "Reading of foo/bar")) { var result = new InMemoryDocumentSessionOperations.SaveChangesData((InMemoryDocumentSessionOperations)session2); result.SessionCommands.Add(new PutCommandDataWithBlittableJson("users/1", null, null, blittableJson)); var sbc = new SingleNodeBatchCommand(DocumentConventions.Default, context, result.SessionCommands, result.Options); await requestExecutor.ExecuteAsync(sbc, context); } } } using (var session = store1.OpenAsyncSession()) { var conflicts = (await store2.Commands().GetConflictsForAsync("users/1")).ToList(); Assert.Equal(0, conflicts.Count); Assert.True(await session.Advanced.Attachments.ExistsAsync("users/1", "a1")); } } }
public async Task Round_robin_load_balancing_should_work() { var databaseName = GetDatabaseName(); var(nodes, leader) = await CreateRaftCluster(3); var followers = Servers.Where(x => x.ServerStore.IsLeader() == false).ToArray(); var conventionsForLoadBalancing = new DocumentConventions { ReadBalanceBehavior = ReadBalanceBehavior.RoundRobin }; using (var leaderStore = new DocumentStore { Urls = new[] { leader.WebUrl }, Database = databaseName, Conventions = conventionsForLoadBalancing }) using (var follower1 = new DocumentStore { Urls = new[] { followers[0].WebUrl }, Database = databaseName, Conventions = conventionsForLoadBalancing }) using (var follower2 = new DocumentStore { Urls = new[] { followers[1].WebUrl }, Database = databaseName, Conventions = conventionsForLoadBalancing }) using (var context = JsonOperationContext.ShortTermSingleUse()) { leaderStore.Initialize(); follower1.Initialize(); follower2.Initialize(); var(index, _) = await CreateDatabaseInCluster(databaseName, 3, leader.WebUrl); await WaitForRaftIndexToBeAppliedInCluster(index, TimeSpan.FromSeconds(30)); var leaderRequestExecutor = leaderStore.GetRequestExecutor(); //make sure we have updated topology --> more deterministic test await leaderRequestExecutor.UpdateTopologyAsync(new RequestExecutor.UpdateTopologyParameters(new ServerNode { ClusterTag = leader.ServerStore.NodeTag, Database = databaseName, Url = leader.WebUrl }) { TimeoutInMs = 5000, ForceUpdate = true }); //wait until all nodes in database cluster are members (and not promotables) //GetDatabaseTopologyCommand -> does not retrieve promotables var topology = new Topology(); while (topology.Nodes?.Count != 3) { var topologyGetCommand = new GetDatabaseTopologyCommand(); await leaderRequestExecutor.ExecuteAsync(topologyGetCommand, context); topology = topologyGetCommand.Result; Thread.Sleep(50); } foreach (var server in Servers) { await server.ServerStore.Cluster.WaitForIndexNotification(index); } using (var session = leaderStore.OpenSession()) { session.Store(new User { Name = "John Dow" }); session.Store(new User { Name = "Jack Dow" }); session.Store(new User { Name = "Jane Dow" }); session.Store(new User { Name = "FooBar" }, "marker"); session.SaveChanges(); await WaitForDocumentInClusterAsync <User>(nodes, databaseName, "marker", x => true, leader.ServerStore.Configuration.Cluster.OperationTimeout.AsTimeSpan); } var usedUrls = new List <string>(); for (var i = 0; i < 3; i++) { using (var session = leaderStore.OpenSession()) { // ReSharper disable once ReturnValueOfPureMethodIsNotUsed session.Query <User>().Where(u => u.Name.StartsWith("Ja")).ToList(); usedUrls.Add((await session.Advanced.GetCurrentSessionNode()).Url.ToLower()); } } foreach (var url in usedUrls) { Assert.Single(usedUrls, url); } } }
public async Task MissingRevisions() { var(nodes, leader) = await CreateRaftCluster(3, watcherCluster : true); var database = GetDatabaseName(); await CreateDatabaseInClusterInner(new DatabaseRecord(database), 3, leader.WebUrl, null); using (var store = new DocumentStore { Database = database, Urls = new[] { leader.WebUrl } }.Initialize()) { var configuration = new RevisionsConfiguration { Default = new RevisionsCollectionConfiguration { Disabled = false, PurgeOnDelete = false, MinimumRevisionsToKeep = 30 } }; long index; using (var context = JsonOperationContext.ShortTermSingleUse()) { var configurationJson = DocumentConventions.Default.Serialization.DefaultConverter.ToBlittable(configuration, context); (index, _) = await leader.ServerStore.ModifyDatabaseRevisions(context, database, configurationJson, Guid.NewGuid().ToString()); } await WaitForRaftIndexToBeAppliedInCluster(index, TimeSpan.FromSeconds(15)); await StoreInTransactionMode(store, 1); await StoreInRegularMode(store, 10); await DeleteInTransactionMode(store, 1); RavenServer testServer = Servers.FirstOrDefault(server => server.ServerStore.IsLeader() == false); var result = await DisposeServerAndWaitForFinishOfDisposalAsync(testServer); var val = await WaitForValueAsync(async() => await GetMembersCount(store, database), 2, 20000); Assert.Equal(2, val); testServer = GetNewServer(new ServerCreationOptions { DeletePrevious = true, RunInMemory = false, CustomSettings = new Dictionary <string, string> { [RavenConfiguration.GetKey(x => x.Core.ServerUrls)] = result.Url } }); using (var cts = new CancellationTokenSource(TimeSpan.FromSeconds(10))) { await testServer.ServerStore.Engine.WaitForState(RachisState.Follower, cts.Token); } await WaitAndAssertForValueAsync(async() => await GetMembersCount(store, database), 3, 20000); using (var session = store.OpenSession(new SessionOptions { })) { session.Store(new User() { Name = "userT" }, "users/1"); session.SaveChanges(); await WaitForDocumentInClusterAsync <User>(nodes, database, "users/1", null, TimeSpan.FromSeconds(15)); } var documentDatabase = await testServer.ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(database); { var res = WaitForValue(() => { using (documentDatabase.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (context.OpenReadTransaction()) return(documentDatabase.DocumentsStorage.RevisionsStorage.GetRevisionsCount(context, "users/1")); }, 13, 15000); Assert.Equal(13, res); } } }
public async Task Round_robin_load_balancing_with_failing_node_should_work() { var databaseName = GetDatabaseName(); var(nodes, leader) = await CreateRaftCluster(3); var followers = Servers.Where(x => x.ServerStore.IsLeader() == false).ToArray(); var conventionsForLoadBalancing = new DocumentConventions { ReadBalanceBehavior = ReadBalanceBehavior.RoundRobin }; using (var leaderStore = new DocumentStore { Urls = new[] { leader.WebUrl }, Database = databaseName, Conventions = conventionsForLoadBalancing }) using (var follower1 = new DocumentStore { Urls = new[] { followers[0].WebUrl }, Database = databaseName, Conventions = conventionsForLoadBalancing }) using (var follower2 = new DocumentStore { Urls = new[] { followers[1].WebUrl }, Database = databaseName, Conventions = conventionsForLoadBalancing }) using (var context = JsonOperationContext.ShortTermSingleUse()) { leaderStore.Initialize(); follower1.Initialize(); follower2.Initialize(); var(index, _) = await CreateDatabaseInCluster(databaseName, 3, leader.WebUrl); await WaitForRaftIndexToBeAppliedInCluster(index, TimeSpan.FromSeconds(30)); var leaderRequestExecutor = leaderStore.GetRequestExecutor(); //wait until all nodes in database cluster are members (and not promotables) //GetDatabaseTopologyCommand -> does not retrieve promotables var topology = new Topology(); while (topology.Nodes?.Count != 3) { var topologyGetCommand = new GetDatabaseTopologyCommand(); await leaderRequestExecutor.ExecuteAsync(topologyGetCommand, context); topology = topologyGetCommand.Result; Thread.Sleep(50); } foreach (var server in Servers) { await server.ServerStore.Cluster.WaitForIndexNotification(index); } using (var session = leaderStore.OpenSession()) { session.Store(new User { Name = "John Dow" }); session.Store(new User { Name = "Jack Dow" }); session.Store(new User { Name = "Jane Dow" }); session.Store(new User { Name = "FooBar" }, "marker"); session.SaveChanges(); await WaitForDocumentInClusterAsync <User>(nodes, databaseName, "marker", x => true, leader.ServerStore.Configuration.Cluster.OperationTimeout.AsTimeSpan); } using (var requestExecutor = RequestExecutor.Create(follower1.Urls, databaseName, null, follower1.Conventions)) { do //make sure there are three nodes in the topology { await Task.Delay(100); } while (requestExecutor.TopologyNodes == null); DisposeServerAndWaitForFinishOfDisposal(leader); var failedRequests = new HashSet <(string, Exception)>(); requestExecutor.OnFailedRequest += (sender, args) => failedRequests.Add((args.Url, args.Exception)); using (var tmpContext = JsonOperationContext.ShortTermSingleUse()) { for (var sessionId = 0; sessionId < 5; sessionId++) { requestExecutor.Cache.Clear(); //make sure we do not use request cache await requestExecutor.ExecuteAsync(new GetStatisticsOperation().GetCommand(DocumentConventions.Default, tmpContext), tmpContext); } } } } }
public async Task MissingRevisions6() { var(nodes, leader) = await CreateRaftCluster(3, watcherCluster : true); var database = GetDatabaseName(); await CreateDatabaseInClusterInner(new DatabaseRecord(database), 3, leader.WebUrl, null); using (var store = new DocumentStore { Database = database, Urls = new[] { leader.WebUrl } }.Initialize()) { var configuration = new RevisionsConfiguration { Default = new RevisionsCollectionConfiguration { Disabled = false, PurgeOnDelete = false, MinimumRevisionsToKeep = 20 } }; long index; using (var context = JsonOperationContext.ShortTermSingleUse()) { var configurationJson = DocumentConventions.Default.Serialization.DefaultConverter.ToBlittable(configuration, context); (index, _) = await leader.ServerStore.ModifyDatabaseRevisions(context, database, configurationJson, Guid.NewGuid().ToString()); } await WaitForRaftIndexToBeAppliedInCluster(index, TimeSpan.FromSeconds(15)); var tasks = new List <Task> { StoreInRegularMode(store, 1), DeleteAndStoreInTransactionMode(store, 1), StoreInRegularMode(store, 1), }; await tasks.WhenAll(); using (var session = store.OpenSession()) { session.Store(new User() { Name = "Toli" }, "users/3"); session.SaveChanges(); await WaitForDocumentInClusterAsync <User>(nodes, database, "users/3", null, TimeSpan.FromSeconds(15)); } var revisionCountList = new List <long>(); foreach (var server in Servers) { var documentDatabase = await server.ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(database); using (documentDatabase.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (context.OpenReadTransaction()) { revisionCountList.Add(documentDatabase.DocumentsStorage.RevisionsStorage.GetRevisionsCount(context, "users/1")); } } Assert.Equal(revisionCountList[0], revisionCountList[1]); Assert.Equal(revisionCountList[0], revisionCountList[2]); revisionCountList.Clear(); foreach (var server in Servers) { var documentDatabase = await server.ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(database); using (documentDatabase.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (context.OpenReadTransaction()) { revisionCountList.Add(documentDatabase.DocumentsStorage.RevisionsStorage.GetRevisionsCount(context, "users/2")); } } Assert.Equal(revisionCountList[0], revisionCountList[1]); Assert.Equal(revisionCountList[0], revisionCountList[2]); } }
internal async Task GetClusterDebugLogs(StringBuilder sb) { (ClusterObserverLogEntry[] List, long Iteration) logs; List<DynamicJsonValue> historyLogs = null; DynamicJsonValue inMemoryDebug = null; List<string> prevStates = null; logs.List = null; await ActionWithLeader((l) => { logs = l.ServerStore.Observer.ReadDecisionsForDatabase(); prevStates = l.ServerStore.Engine.PrevStates.Select(s => s.ToString()).ToList(); using (l.ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { historyLogs = l.ServerStore.Engine.LogHistory.GetHistoryLogs(context).ToList(); inMemoryDebug = l.ServerStore.Engine.InMemoryDebug.ToJson(); } return Task.CompletedTask; }); if (prevStates != null) { sb.AppendLine($"{Environment.NewLine}States:{Environment.NewLine}-----------------------"); foreach (var state in prevStates) { sb.AppendLine($"{state}{Environment.NewLine}"); } sb.AppendLine(); } if (historyLogs != null) { sb.AppendLine($"HistoryLogs:{Environment.NewLine}-----------------------"); using (var context = JsonOperationContext.ShortTermSingleUse()) { var c = 0; foreach (var log in historyLogs) { var json = context.ReadObject(log, nameof(log) + $"{c++}"); sb.AppendLine(json.ToString()); } } sb.AppendLine(); } if (logs.List.Length > 0) { sb.AppendLine($"Cluster Observer Log Entries:{Environment.NewLine}-----------------------"); using (var context = JsonOperationContext.ShortTermSingleUse()) { var c = 0; foreach (var log in logs.List) { var json = context.ReadObject(log.ToJson(), nameof(log) + $"{c++}"); sb.AppendLine(json.ToString()); } } } if (inMemoryDebug != null) { sb.AppendLine($"RachisDebug:{Environment.NewLine}-----------------------"); using (var context = JsonOperationContext.ShortTermSingleUse()) { var json = context.ReadObject(inMemoryDebug, nameof(inMemoryDebug)); sb.AppendLine(json.ToString()); } } }
public async Task MissingRevisions7() { var(nodes, leader) = await CreateRaftCluster(3, watcherCluster : true); var database = GetDatabaseName(); var dbCreation = await CreateDatabaseInClusterInner(new DatabaseRecord(database), 2, leader.WebUrl, null); using (var storeB = new DocumentStore { Database = database, Urls = new[] { dbCreation.Servers[1].WebUrl }, Conventions = new DocumentConventions { DisableTopologyUpdates = true } }.Initialize()) using (var storeA = new DocumentStore { Database = database, Urls = new[] { dbCreation.Servers[0].WebUrl }, Conventions = new DocumentConventions { DisableTopologyUpdates = true } }.Initialize()) { var configuration = new RevisionsConfiguration { Default = new RevisionsCollectionConfiguration { Disabled = false, PurgeOnDelete = false, MinimumRevisionsToKeep = 30 } }; long index; using (var context = JsonOperationContext.ShortTermSingleUse()) { var configurationJson = DocumentConventions.Default.Serialization.DefaultConverter.ToBlittable(configuration, context); (index, _) = await leader.ServerStore.ModifyDatabaseRevisions(context, database, configurationJson, Guid.NewGuid().ToString()); } await WaitForRaftIndexToBeAppliedInCluster(index, TimeSpan.FromSeconds(15)); await StoreInRegularMode(storeA, 3); await WaitAndAssertForValueAsync(async() => { var documentDatabase = await dbCreation.Servers[1].ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(database); using (documentDatabase.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (context.OpenReadTransaction()) return(documentDatabase.DocumentsStorage.RevisionsStorage.GetRevisionsCount(context, "users/1")); }, 3); var result = await leader.ServerStore.SendToLeaderAsync(new DeleteDatabaseCommand(database, Guid.NewGuid().ToString()) { HardDelete = true, FromNodes = new[] { dbCreation.Servers[0].ServerStore.NodeTag }, }); await WaitForRaftIndexToBeAppliedInCluster(result.Index, TimeSpan.FromSeconds(10)); await WaitAndAssertForValueAsync(() => { var record = storeB.Maintenance.Server.Send(new GetDatabaseRecordOperation(database)); return(Task.FromResult(record.DeletionInProgress.Count)); }, 0); var breakRepl = await BreakReplication(dbCreation.Servers[1].ServerStore, database); // make member directly, so it can perform cluster tx var record = storeB.Maintenance.Server.Send(new GetDatabaseRecordOperation(database)); record.Topology.Members.Add(dbCreation.Servers[0].ServerStore.NodeTag); await storeB.Maintenance.Server.SendAsync(new UpdateDatabaseOperation(record, record.Etag)); // have a doc written by cluster tx before we get any replication await StoreInTransactionMode(storeB, 1); breakRepl.Mend(); var val = await WaitForValueAsync(async() => await GetMembersCount(storeB, database), 2, 20000); Assert.Equal(2, val); await WaitAndAssertForValueAsync(() => { var record = storeB.Maintenance.Server.Send(new GetDatabaseRecordOperation(database)); return(Task.FromResult(record.DeletionInProgress.Count)); }, 0); await WaitAndAssertForValueAsync(async() => { var documentDatabase = await dbCreation.Servers[0].ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(database); using (documentDatabase.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (context.OpenReadTransaction()) return(documentDatabase.DocumentsStorage.RevisionsStorage.GetRevisionsCount(context, "users/1")); }, 4); } }
public async ValueTask HandlePath(RequestHandlerContext reqCtx) { var context = reqCtx.HttpContext; var tryMatch = _trie.TryMatch(context.Request.Method, context.Request.Path.Value); if (tryMatch.Value == null) { var exception = new RouteNotFoundException($"There is no handler for path: {context.Request.Method} {context.Request.Path.Value}{context.Request.QueryString}"); AssertClientVersion(context, exception); throw exception; } reqCtx.RavenServer = _ravenServer; reqCtx.RouteMatch = tryMatch.Match; var tuple = tryMatch.Value.TryGetHandler(reqCtx); var handler = tuple.Item1 ?? await tuple.Item2; reqCtx.Database?.Metrics?.Requests.RequestsPerSec.Mark(); _serverMetrics.Requests.RequestsPerSec.Mark(); Interlocked.Increment(ref _serverMetrics.Requests.ConcurrentRequestsCount); try { if (handler == null) { var auditLog = LoggingSource.AuditLog.IsInfoEnabled ? LoggingSource.AuditLog.GetLogger("RequestRouter", "Audit") : null; if (auditLog != null) { auditLog.Info($"Invalid request {context.Request.Method} {context.Request.Path} by " + $"(Cert: {context.Connection.ClientCertificate?.Subject} ({context.Connection.ClientCertificate?.Thumbprint}) {context.Connection.RemoteIpAddress}:{context.Connection.RemotePort})"); } context.Response.StatusCode = (int)HttpStatusCode.BadRequest; using (var ctx = JsonOperationContext.ShortTermSingleUse()) await using (var writer = new AsyncBlittableJsonTextWriter(ctx, context.Response.Body)) { ctx.Write(writer, new DynamicJsonValue { ["Type"] = "Error", ["Message"] = $"There is no handler for {context.Request.Method} {context.Request.Path}" }); } return; } var skipAuthorization = false; if (tryMatch.Value.CorsMode != CorsMode.None) { RequestHandler.SetupCORSHeaders(context, reqCtx.RavenServer.ServerStore, tryMatch.Value.CorsMode); // don't authorize preflight requests: https://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html skipAuthorization = context.Request.Method == "OPTIONS"; } var status = RavenServer.AuthenticationStatus.ClusterAdmin; try { if (_ravenServer.Configuration.Security.AuthenticationEnabled && skipAuthorization == false) { var(authorized, authorizationStatus) = await TryAuthorizeAsync(tryMatch.Value, context, reqCtx.Database); status = authorizationStatus; if (authorized == false) { return; } } } finally { if (tryMatch.Value.SkipLastRequestTimeUpdate == false) { var now = SystemTime.UtcNow; if (now - _lastRequestTimeUpdated >= LastRequestTimeUpdateFrequency) { _ravenServer.Statistics.LastRequestTime = now; _lastRequestTimeUpdated = now; } if (now - _lastAuthorizedNonClusterAdminRequestTime >= LastRequestTimeUpdateFrequency && skipAuthorization == false) { switch (status) { case RavenServer.AuthenticationStatus.Allowed: case RavenServer.AuthenticationStatus.Operator: { _ravenServer.Statistics.LastAuthorizedNonClusterAdminRequestTime = now; _lastAuthorizedNonClusterAdminRequestTime = now; break; } case RavenServer.AuthenticationStatus.None: case RavenServer.AuthenticationStatus.NoCertificateProvided: case RavenServer.AuthenticationStatus.UnfamiliarCertificate: case RavenServer.AuthenticationStatus.UnfamiliarIssuer: case RavenServer.AuthenticationStatus.ClusterAdmin: case RavenServer.AuthenticationStatus.Expired: case RavenServer.AuthenticationStatus.NotYetValid: break; default: ThrowUnknownAuthStatus(status); break; } } } } if (reqCtx.Database != null) { if (tryMatch.Value.DisableOnCpuCreditsExhaustion && _ravenServer.CpuCreditsBalance.FailoverAlertRaised.IsRaised()) { await RejectRequestBecauseOfCpuThresholdAsync(context); return; } using (reqCtx.Database.DatabaseInUse(tryMatch.Value.SkipUsagesCount)) { if (context.Request.Headers.TryGetValue(Constants.Headers.LastKnownClusterTransactionIndex, out var value) && long.TryParse(value, out var index) && index > reqCtx.Database.RachisLogIndexNotifications.LastModifiedIndex) { await reqCtx.Database.RachisLogIndexNotifications.WaitForIndexNotification(index, context.RequestAborted); } await handler(reqCtx); } } else { await handler(reqCtx); } } finally { Interlocked.Decrement(ref _serverMetrics.Requests.ConcurrentRequestsCount); } }
public LuceneDocumentConverterTests(ITestOutputHelper output) : base(output) { _ctx = JsonOperationContext.ShortTermSingleUse(); }
public async Task SubscriptionShouldRespectDocumentsWithCompressedData() { using (var documentStore = this.GetDocumentStore()) { Server.ServerStore.Observer.Suspended = true; var originalDoc = new Doc { Id = "doc/1", StrVal = new string(Enumerable.Repeat('.', 129).ToArray()), LongByteArray = Enumerable.Repeat((byte)2, 1024).ToArray() }; using (var session = documentStore.OpenAsyncSession()) { await session.StoreAsync(originalDoc); await session.SaveChangesAsync(); } var database = await Server.ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(documentStore.Database); using (database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (context.OpenReadTransaction()) { var doc = database.DocumentsStorage.Get(context, "doc/1"); MemoryStream ms = new MemoryStream(); using (var newContext = JsonOperationContext.ShortTermSingleUse()) using (var writer = new BlittableJsonTextWriter(newContext, ms)) { writer.WriteDocument(newContext, doc, metadataOnly: false); writer.Flush(); var bjro = GetReaderFromMemoryStream(ms, context); var desereializedDoc = (Doc)EntityToBlittable.ConvertToEntity(typeof(Doc), null, bjro, DocumentConventions.Default); Assert.Equal(originalDoc.StrVal, desereializedDoc.StrVal); Assert.Equal(originalDoc.LongByteArray, originalDoc.LongByteArray); } } var subscriptionCreationParams = new SubscriptionCreationOptions { Query = "from Docs", }; var subsId = await documentStore.Subscriptions.CreateAsync(subscriptionCreationParams).ConfigureAwait(false); var amre = new AsyncManualResetEvent(); using (var subscription = documentStore.Subscriptions.GetSubscriptionWorker <Doc>(new SubscriptionWorkerOptions(subsId) { TimeToWaitBeforeConnectionRetry = TimeSpan.FromSeconds(5) })) { var t = subscription.Run(batch => { var receivedDoc = batch.Items.First().Result; Assert.Equal(originalDoc.LongByteArray, receivedDoc.LongByteArray); Assert.Equal(originalDoc.StrVal, receivedDoc.StrVal); amre.Set(); }); try { Assert.True(await amre.WaitAsync(TimeSpan.FromSeconds(60))); } catch { if (t.IsFaulted) { t.Wait(); } throw; } } } }
public BlittableJsonTraverserTests() { _ctx = JsonOperationContext.ShortTermSingleUse(); }
public void PeepingTomStreamShouldPeepCorrectly(int originalSize, int chunkSizeToRead, int offset) { using (var context = JsonOperationContext.ShortTermSingleUse()) PeepingTomStreamTest(originalSize, chunkSizeToRead, offset, context); }
public bool Update(UpdateStep step) { var dbs = new List <string>(); const string dbKey = "db/"; var identities = step.ReadTx.ReadTree(ClusterStateMachine.Identities); step.WriteTx.DeleteTree(ClusterStateMachine.Identities); using (var items = step.ReadTx.OpenTable(ClusterStateMachine.ItemsSchema, ClusterStateMachine.Items)) using (Slice.From(step.ReadTx.Allocator, dbKey, out Slice loweredPrefix)) { foreach (var result in items.SeekByPrimaryKeyPrefix(loweredPrefix, Slices.Empty, 0)) { dbs.Add(ClusterStateMachine.GetCurrentItemKey(result.Value).Substring(dbKey.Length)); } } step.WriteTx.CreateTree(ClusterStateMachine.CompareExchangeIndex); foreach (var db in dbs) { if (identities != null) { Slice.From(step.WriteTx.Allocator, "Identities", out var identitySlice); ClusterStateMachine.IdentitiesSchema.Create(step.WriteTx, identitySlice, 32); var writeTable = step.WriteTx.OpenTable(ClusterStateMachine.IdentitiesSchema, identitySlice); using (Slice.From(step.ReadTx.Allocator, $"{dbKey}{db.ToLowerInvariant()}/identities/", out var identityPrefix)) { using (var it = identities.Iterate(prefetch: false)) { it.SetRequiredPrefix(identityPrefix); if (it.Seek(identityPrefix)) { do { var key = it.CurrentKey; var keyAsString = key.ToString(); // old identity key var value = it.CreateReaderForCurrent().ReadLittleEndianInt64(); var newKey = keyAsString.Substring(identityPrefix.ToString().Length); // write to new identities schema GetKeyAndPrefixIndexSlices(step.ReadTx.Allocator, db, $"{newKey}", 0L, out var keyTuple, out var indexTuple); using (keyTuple.Scope) using (indexTuple.Scope) using (Slice.External(step.ReadTx.Allocator, keyTuple.Buffer.Ptr, keyTuple.Buffer.Length, out var keySlice)) using (Slice.External(step.ReadTx.Allocator, indexTuple.Buffer.Ptr, indexTuple.Buffer.Length, out var prefixIndexSlice)) { using (writeTable.Allocate(out var write)) { write.Add(keySlice); write.Add(value); write.Add(0L); write.Add(prefixIndexSlice); writeTable.Set(write); } } } while (it.MoveNext()); } } } } // update db backup status var dbLower = db.ToLowerInvariant(); using (var items = step.WriteTx.OpenTable(ClusterStateMachine.ItemsSchema, ClusterStateMachine.Items)) using (Slice.From(step.ReadTx.Allocator, $"{dbKey}{dbLower}", out Slice lowerKey)) using (var ctx = JsonOperationContext.ShortTermSingleUse()) { var(databaseRecordJson, _) = GetBjroAndIndex(ctx, items, lowerKey); var databaseRecord = JsonDeserializationCluster.DatabaseRecord(databaseRecordJson); if (databaseRecord == null) { continue; } foreach (var pb in databaseRecord.PeriodicBackups) { var pbItemName = PeriodicBackupStatus.GenerateItemName(db, pb.TaskId); using (Slice.From(step.WriteTx.Allocator, pbItemName, out Slice pbsSlice)) using (Slice.From(step.WriteTx.Allocator, pbItemName.ToLowerInvariant(), out Slice pbsSliceLower)) { var(singleBackupStatus, index) = GetBjroAndIndex(ctx, items, pbsSlice); if (singleBackupStatus == null) { continue; } if (singleBackupStatus.TryGet(nameof(PeriodicBackupStatus.LocalBackup), out BlittableJsonReaderObject localBackup) == false || singleBackupStatus.TryGet(nameof(PeriodicBackupStatus.LastRaftIndex), out BlittableJsonReaderObject lastRaftIndexBlittable) == false) { continue; } if (localBackup.TryGet(nameof(PeriodicBackupStatus.LastIncrementalBackup), out DateTime? lastIncrementalBackupDate) == false || lastRaftIndexBlittable.TryGet(nameof(PeriodicBackupStatus.LastEtag), out long?lastRaftIndex) == false) { continue; } if (lastIncrementalBackupDate == null || lastRaftIndex == null) { continue; } var myLastRaftIndex = new LastRaftIndex { LastEtag = 0L }; singleBackupStatus.Modifications = new DynamicJsonValue { [nameof(PeriodicBackupStatus.LastRaftIndex)] = myLastRaftIndex.ToJson() }; using (var old = singleBackupStatus) { singleBackupStatus = ctx.ReadObject(singleBackupStatus, pbItemName); } using (items.Allocate(out var builder)) { builder.Add(pbsSliceLower); builder.Add(pbsSlice); builder.Add(singleBackupStatus.BasePointer, singleBackupStatus.Size); builder.Add(index); items.Set(builder); } } } } } return(true); }
public async Task PlainRevisionsSubscriptions() { using (var store = GetDocumentStore()) { var subscriptionId = await store.Subscriptions.CreateAsync <Revision <User> >(); using (var context = JsonOperationContext.ShortTermSingleUse()) { var configuration = new RevisionsConfiguration { Default = new RevisionsCollectionConfiguration { Disabled = false, MinimumRevisionsToKeep = 5, }, Collections = new Dictionary <string, RevisionsCollectionConfiguration> { ["Users"] = new RevisionsCollectionConfiguration { Disabled = false }, ["Dons"] = new RevisionsCollectionConfiguration { Disabled = false } } }; await Server.ServerStore.ModifyDatabaseRevisions(context, store.Database, EntityToBlittable.ConvertEntityToBlittable(configuration, new DocumentConventions(), context)); } for (int i = 0; i < 10; i++) { for (var j = 0; j < 10; j++) { using (var session = store.OpenSession()) { session.Store(new User { Name = $"users{i} ver {j}" }, "users/" + i); session.Store(new Company() { Name = $"dons{i} ver {j}" }, "dons/" + i); session.SaveChanges(); } } } using (var sub = store.Subscriptions.GetSubscriptionWorker <Revision <User> >(new SubscriptionWorkerOptions(subscriptionId))) { var mre = new AsyncManualResetEvent(); var names = new HashSet <string>(); GC.KeepAlive(sub.Run(x => { foreach (var item in x.Items) { names.Add(item.Result.Current?.Name + item.Result.Previous?.Name); if (names.Count == 100) { mre.Set(); } } })); Assert.True(await mre.WaitAsync(_reasonableWaitTime)); } } }
public void ValidateFailedRevisionsSubscriptionScriptExceptionHandling() { using (var store = GetDocumentStore()) { using (var context = JsonOperationContext.ShortTermSingleUse()) { var configuration = new RevisionsConfiguration { Default = new RevisionsCollectionConfiguration { Active = true, MinimumRevisionsToKeep = 5, }, Collections = new Dictionary <string, RevisionsCollectionConfiguration> { ["Users"] = new RevisionsCollectionConfiguration { Active = true }, ["Dons"] = new RevisionsCollectionConfiguration { Active = true, } } }; AsyncHelpers.RunSync(() => Server.ServerStore.ModifyDatabaseRevisions(context, store.Database, EntityToBlittable.ConvertEntityToBlittable(configuration, new DocumentConventions(), context))); } var subscriptionId = store.Subscriptions.Create(new SubscriptionCreationOptions() { Query = @" declare function project(d){ throw 'nice'; return d; } from Users (Revisions = true) as d select project(d) " }); var subscription = store.Subscriptions.Open <User>(new SubscriptionConnectionOptions(subscriptionId)); var exceptions = new List <Exception>(); var mre = new ManualResetEvent(false); var receivedItem = new SubscriptionBatch <User> .Item(); var userId = string.Empty; using (var session = store.OpenSession()) { var newUser = new User(); session.Store(newUser); session.SaveChanges(); userId = session.Advanced.GetDocumentId(newUser); } subscription.Run(x => { foreach (var item in x.Items) { receivedItem = item; try { var res = item; } catch (Exception e) { exceptions.Add(e); } } mre.Set(); }); Assert.True(mre.WaitOne(_reasonableWaitTime)); Assert.NotNull(receivedItem); Assert.Throws <InvalidOperationException>(() => receivedItem.Result); Assert.NotNull(receivedItem.Metadata); Assert.Equal(receivedItem.Id, userId); } }
public async Task QueriesRunning() { using (var store = GetDocumentStore()) { IndexQuery q; using (var session = store.OpenSession()) { var people = session.Query <Person>() .Where(x => x.Name == "John") .ToList(); // create index q = session.Advanced.DocumentQuery <Person>() .WhereEquals(x => x.Name, "John") .Take(20) .GetIndexQuery(); } using (var context = JsonOperationContext.ShortTermSingleUse()) { var query1 = new IndexQueryServerSide(q.Query, context.ReadObject(new DynamicJsonValue { ["p0"] = q.QueryParameters["p0"] }, "query/parameters")) { Start = q.Start, CutoffEtag = q.CutoffEtag, ExplainScores = q.ExplainScores, PageSize = q.PageSize, ShowTimings = q.ShowTimings, SkipDuplicateChecking = q.SkipDuplicateChecking, WaitForNonStaleResults = q.WaitForNonStaleResults, WaitForNonStaleResultsTimeout = q.WaitForNonStaleResultsTimeout }; var query2 = new MoreLikeThisQueryServerSide { DocumentId = "docs/1" }; var query3 = new FacetQuery { FacetSetupDoc = "setup/1" }; var database = await Server .ServerStore .DatabasesLandlord .TryGetOrCreateResourceStore(new StringSegment(store.Database)); var index = database.IndexStore.GetIndexes().First(); var now = SystemTime.UtcNow; index.CurrentlyRunningQueries.TryAdd(new ExecutingQueryInfo(now, query1, 10, OperationCancelToken.None)); index.CurrentlyRunningQueries.TryAdd(new ExecutingQueryInfo(now, query2, 11, OperationCancelToken.None)); index.CurrentlyRunningQueries.TryAdd(new ExecutingQueryInfo(now, query3, 12, OperationCancelToken.None)); var conventions = new DocumentConventions(); using (var commands = store.Commands()) { var json = commands.RawGetJson <BlittableJsonReaderObject>("/debug/queries/running"); Assert.True(json.TryGet(index.Name, out BlittableJsonReaderArray array)); Assert.Equal(3, array.Length); foreach (BlittableJsonReaderObject info in array) { int queryId; Assert.True(info.TryGet(nameof(ExecutingQueryInfo.QueryId), out queryId)); string duration; Assert.True(info.TryGet(nameof(ExecutingQueryInfo.Duration), out duration)); Assert.NotNull(duration); string startTimeAsString; Assert.True(info.TryGet(nameof(ExecutingQueryInfo.StartTime), out startTimeAsString)); Assert.Equal(now, DateTime.Parse(startTimeAsString).ToUniversalTime()); object token; Assert.False(info.TryGetMember(nameof(ExecutingQueryInfo.Token), out token)); Assert.Null(token); if (queryId == 10) { BlittableJsonReaderObject queryInfo; Assert.True(info.TryGet(nameof(ExecutingQueryInfo.QueryInfo), out queryInfo)); var query = (IndexQuery)conventions.DeserializeEntityFromBlittable(typeof(IndexQuery), queryInfo); Assert.True(q.Equals(query)); continue; } if (queryId == 11) { BlittableJsonReaderObject queryInfo; Assert.True(info.TryGet(nameof(ExecutingQueryInfo.QueryInfo), out queryInfo)); var query = (MoreLikeThisQuery)conventions.DeserializeEntityFromBlittable(typeof(MoreLikeThisQuery), queryInfo); Assert.Equal(query2.DocumentId, query.DocumentId); continue; } if (queryId == 12) { BlittableJsonReaderObject queryInfo; Assert.True(info.TryGet(nameof(ExecutingQueryInfo.QueryInfo), out queryInfo)); var query = (FacetQuery)conventions.DeserializeEntityFromBlittable(typeof(FacetQuery), queryInfo); Assert.Equal(query3.FacetSetupDoc, query.FacetSetupDoc); continue; } throw new NotSupportedException("Should not happen."); } } } } }
public LuceneDocumentConverterTests() { _ctx = JsonOperationContext.ShortTermSingleUse(); }