public override void Respond(IHttpContext context) { var principal = context.User; if (principal == null || principal.Identity == null || principal.Identity.IsAuthenticated == false) { var anonymous = new UserInfo { Remark = "Using anonymous user", IsAdminGlobal = server.SystemConfiguration.AnonymousUserAccessMode == AnonymousUserAccessMode.Admin }; context.WriteJson(RavenJObject.FromObject(anonymous)); return; } var windowsPrincipal = principal as WindowsPrincipal; if (windowsPrincipal != null) { var windowsUser = new UserInfo { Remark = "Using windows auth", User = windowsPrincipal.Identity.Name, IsAdminGlobal = windowsPrincipal.IsAdministrator(server.SystemConfiguration.AnonymousUserAccessMode) }; context.WriteJson(RavenJObject.FromObject(windowsUser)); return; } var principalWithDatabaseAccess = principal as PrincipalWithDatabaseAccess; if (principalWithDatabaseAccess != null) { var windowsUserWithDatabase = new UserInfo { Remark = "Using windows auth", User = principalWithDatabaseAccess.Identity.Name, IsAdminGlobal = principalWithDatabaseAccess.IsAdministrator( server.SystemConfiguration.AnonymousUserAccessMode), IsAdminCurrentDb = principalWithDatabaseAccess.IsAdministrator(Database), Databases = principalWithDatabaseAccess.AdminDatabases.Concat( principalWithDatabaseAccess.ReadOnlyDatabases) .Concat(principalWithDatabaseAccess.ReadWriteDatabases) .Select(db => new DatabaseInfo { Database = db, IsAdmin = principal.IsAdministrator(db) }).ToList(), AdminDatabases = principalWithDatabaseAccess.AdminDatabases, ReadOnlyDatabases = principalWithDatabaseAccess.ReadOnlyDatabases, ReadWriteDatabases = principalWithDatabaseAccess.ReadWriteDatabases }; context.WriteJson(RavenJObject.FromObject(windowsUserWithDatabase)); return; } var oAuthPrincipal = principal as OAuthPrincipal; if (oAuthPrincipal != null) { var oAuth = new UserInfo { Remark = "Using OAuth", User = oAuthPrincipal.Name, IsAdminGlobal = oAuthPrincipal.IsAdministrator(server.SystemConfiguration.AnonymousUserAccessMode), IsAdminCurrentDb = oAuthPrincipal.IsAdministrator(Database), Databases = oAuthPrincipal.TokenBody.AuthorizedDatabases .Select(db => new DatabaseInfo { Database = db.TenantId, IsAdmin = principal.IsAdministrator(db.TenantId) }).ToList(), AccessTokenBody = oAuthPrincipal.TokenBody, }; context.WriteJson(RavenJObject.FromObject(oAuth)); return; } var unknown = new UserInfo { Remark = "Unknown auth", Principal = principal }; context.WriteJson(RavenJObject.FromObject(unknown)); }
public void AfterRollbackCannotSeeChangesEvenInSameTxId() { var transactionInformation = new TransactionInformation { Id = Guid.NewGuid(), Timeout = TimeSpan.FromDays(7) }; using (var tx = NewTransactionalStorage()) { tx.Batch(mutator => mutator.Transactions.AddDocumentInTransaction("Ayende", null, RavenJObject.FromObject(new { Name = "Rahien" }), new RavenJObject(), transactionInformation)); tx.Batch(viewer => Assert.NotNull(viewer.Documents.DocumentByKey("Ayende", transactionInformation))); tx.Batch(mutator => mutator.Transactions.RollbackTransaction(transactionInformation.Id)); tx.Batch(viewer => Assert.Null(viewer.Documents.DocumentByKey("Ayende", transactionInformation))); } }
private void SetBackupStatus(BackupStatus backupStatus) { filesystem.Storage.Batch(accessor => accessor.SetConfig(BackupStatus.RavenBackupStatusDocumentKey, RavenJObject.FromObject(backupStatus))); }
public void AddingDocumentInTxThenAddingWithoutTxThrows() { var transactionInformation = new TransactionInformation { Id = Guid.NewGuid(), Timeout = TimeSpan.FromDays(7) }; using (var tx = NewTransactionalStorage()) { tx.Batch(mutator => mutator.Transactions.AddDocumentInTransaction("Ayende", null, RavenJObject.FromObject(new { Name = "Rahien" }), new RavenJObject(), transactionInformation)); tx.Batch(mutator => Assert.Throws <ConcurrencyException>( () => mutator.Documents.AddDocument("Ayende", Guid.NewGuid(), RavenJObject.FromObject(new { Name = "Rahien" }), new RavenJObject()))); } }
public void CanModifyTxId() { var transactionInformation = new TransactionInformation { Id = Guid.NewGuid(), Timeout = TimeSpan.FromDays(7) }; using (var tx = NewTransactionalStorage()) { tx.Batch(mutator => mutator.Transactions.AddDocumentInTransaction("Ayende", null, RavenJObject.FromObject(new { Name = "Rahien" }), new RavenJObject(), transactionInformation)); var txInfo2 = new TransactionInformation { Id = Guid.NewGuid(), Timeout = TimeSpan.FromDays(1) }; tx.Batch(mutator => mutator.Transactions.ModifyTransactionId(transactionInformation.Id, txInfo2.Id, txInfo2.Timeout)); tx.Batch(viewer => Assert.NotNull(viewer.Documents.DocumentByKey("Ayende", txInfo2))); } }
public void AddingDocInTxAndReadingItInSameTx() { var transactionInformation = new TransactionInformation { Id = Guid.NewGuid(), Timeout = TimeSpan.FromDays(7) }; using (var tx = NewTransactionalStorage()) { tx.Batch(mutator => mutator.Transactions.AddDocumentInTransaction("Ayende", null, RavenJObject.FromObject(new { Name = "Rahien" }), new RavenJObject(), transactionInformation)); tx.Batch(viewer => Assert.NotNull(viewer.Documents.DocumentByKey("Ayende", transactionInformation))); } }
public void AddingDocInTxCannotBeReadOutside() { var transactionInformation = new TransactionInformation { Id = Guid.NewGuid(), Timeout = TimeSpan.FromDays(7) }; using (var tx = NewTransactionalStorage()) { tx.Batch(mutator => mutator.Transactions.AddDocumentInTransaction("Ayende", null, RavenJObject.FromObject(new { Name = "Rahien" }), new RavenJObject(), transactionInformation)); tx.Batch(viewer => Assert.True(viewer.Documents.DocumentByKey("Ayende", null).Metadata.Value <bool>(Constants.RavenDocumentDoesNotExists))); } }
public void CanHaveTwoResultsForSameDoc() { using (var tx = NewTransactionalStorage()) { tx.Batch(mutator => mutator.MappedResults.PutMappedResult("test", "users/ayende", "ayende", RavenJObject.FromObject(new { Name = "Rahien" }), null)); tx.Batch(mutator => mutator.MappedResults.PutMappedResult("test", "users/ayende", "ayende", RavenJObject.FromObject(new { Name = "Rahien" }), null)); tx.Batch(viewer => Assert.Equal(2, viewer.MappedResults.GetMappedResults(new GetMappedResultsParams("test", "ayende", null)).Count())); } }
public void CanStoreAndGetMappedResultWithSeveralResultsForSameReduceKey() { using (var tx = NewTransactionalStorage()) { tx.Batch(mutator => { mutator.MappedResults.PutMappedResult("test", "users/ayende", "ayende", RavenJObject.FromObject(new { Name = "Rahien" }), null); mutator.MappedResults.PutMappedResult("test", "users/rahien", "ayende", RavenJObject.FromObject(new { Name = "Rahien" }), null); }); tx.Batch(viewer => Assert.Equal(2, viewer.MappedResults.GetMappedResults(new GetMappedResultsParams("test", "ayende", null)).Count())); } }
public override IndexingPerformanceStats IndexDocuments(AbstractViewGenerator viewGenerator, IndexingBatch batch, IStorageActionsAccessor actions, DateTime minimumTimestamp, CancellationToken token) { token.ThrowIfCancellationRequested(); var count = 0; var sourceCount = 0; var deleted = new Dictionary <ReduceKeyAndBucket, int>(); var performance = RecordCurrentBatch("Current Map", "Map", batch.Docs.Count); var performanceStats = new List <BasePerformanceStats>(); var usedStorageAccessors = new ConcurrentSet <IStorageActionsAccessor>(); if (usedStorageAccessors.TryAdd(actions)) { var storageCommitDuration = new Stopwatch(); actions.BeforeStorageCommit += storageCommitDuration.Start; actions.AfterStorageCommit += () => { storageCommitDuration.Stop(); performanceStats.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds)); }; } var deleteMappedResultsDuration = new Stopwatch(); var documentsWrapped = batch.Docs.Select(doc => { token.ThrowIfCancellationRequested(); sourceCount++; var documentId = doc.__document_id; using (StopwatchScope.For(deleteMappedResultsDuration)) { actions.MapReduce.DeleteMappedResultsForDocumentId((string)documentId, indexId, deleted); } return(doc); }) .Where(x => x is FilteredDocument == false) .ToList(); performanceStats.Add(new PerformanceStats { Name = IndexingOperation.Map_DeleteMappedResults, DurationMs = deleteMappedResultsDuration.ElapsedMilliseconds, }); var allReferencedDocs = new ConcurrentQueue <IDictionary <string, HashSet <string> > >(); var allReferenceEtags = new ConcurrentQueue <IDictionary <string, Etag> >(); var allState = new ConcurrentQueue <Tuple <HashSet <ReduceKeyAndBucket>, IndexingWorkStats, Dictionary <string, int> > >(); var parallelOperations = new ConcurrentQueue <ParallelBatchStats>(); var parallelProcessingStart = SystemTime.UtcNow; BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, documentsWrapped, partition => { token.ThrowIfCancellationRequested(); var parallelStats = new ParallelBatchStats { StartDelay = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds }; var localStats = new IndexingWorkStats(); var localChanges = new HashSet <ReduceKeyAndBucket>(); var statsPerKey = new Dictionary <string, int>(); var linqExecutionDuration = new Stopwatch(); var reduceInMapLinqExecutionDuration = new Stopwatch(); var putMappedResultsDuration = new Stopwatch(); var convertToRavenJObjectDuration = new Stopwatch(); allState.Enqueue(Tuple.Create(localChanges, localStats, statsPerKey)); using (CurrentIndexingScope.Current = new CurrentIndexingScope(context.Database, PublicName)) { // we are writing to the transactional store from multiple threads here, and in a streaming fashion // should result in less memory and better perf context.TransactionalStorage.Batch(accessor => { if (usedStorageAccessors.TryAdd(accessor)) { var storageCommitDuration = new Stopwatch(); accessor.BeforeStorageCommit += storageCommitDuration.Start; accessor.AfterStorageCommit += () => { storageCommitDuration.Stop(); parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds)); }; } var mapResults = RobustEnumerationIndex(partition, viewGenerator.MapDefinitions, localStats, linqExecutionDuration); var currentDocumentResults = new List <object>(); string currentKey = null; bool skipDocument = false; foreach (var currentDoc in mapResults) { token.ThrowIfCancellationRequested(); var documentId = GetDocumentId(currentDoc); if (documentId != currentKey) { count += ProcessBatch(viewGenerator, currentDocumentResults, currentKey, localChanges, accessor, statsPerKey, reduceInMapLinqExecutionDuration, putMappedResultsDuration, convertToRavenJObjectDuration); currentDocumentResults.Clear(); currentKey = documentId; } else if (skipDocument) { continue; } RavenJObject currentDocJObject; using (StopwatchScope.For(convertToRavenJObjectDuration)) { currentDocJObject = RavenJObject.FromObject(currentDoc, jsonSerializer); } currentDocumentResults.Add(new DynamicJsonObject(currentDocJObject)); if (EnsureValidNumberOfOutputsForDocument(documentId, currentDocumentResults.Count) == false) { skipDocument = true; currentDocumentResults.Clear(); continue; } Interlocked.Increment(ref localStats.IndexingSuccesses); } count += ProcessBatch(viewGenerator, currentDocumentResults, currentKey, localChanges, accessor, statsPerKey, reduceInMapLinqExecutionDuration, putMappedResultsDuration, convertToRavenJObjectDuration); parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.LoadDocument, CurrentIndexingScope.Current.LoadDocumentDuration.ElapsedMilliseconds)); parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Linq_MapExecution, linqExecutionDuration.ElapsedMilliseconds)); parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Linq_ReduceLinqExecution, reduceInMapLinqExecutionDuration.ElapsedMilliseconds)); parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Map_PutMappedResults, putMappedResultsDuration.ElapsedMilliseconds)); parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Map_ConvertToRavenJObject, convertToRavenJObjectDuration.ElapsedMilliseconds)); parallelOperations.Enqueue(parallelStats); }); allReferenceEtags.Enqueue(CurrentIndexingScope.Current.ReferencesEtags); allReferencedDocs.Enqueue(CurrentIndexingScope.Current.ReferencedDocuments); } }); performanceStats.Add(new ParallelPerformanceStats { NumberOfThreads = parallelOperations.Count, DurationMs = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds, BatchedOperations = parallelOperations.ToList() }); var updateDocumentReferencesDuration = new Stopwatch(); using (StopwatchScope.For(updateDocumentReferencesDuration)) { UpdateDocumentReferences(actions, allReferencedDocs, allReferenceEtags); } performanceStats.Add(PerformanceStats.From(IndexingOperation.UpdateDocumentReferences, updateDocumentReferencesDuration.ElapsedMilliseconds)); var changed = allState.SelectMany(x => x.Item1).Concat(deleted.Keys) .Distinct() .ToList(); var stats = new IndexingWorkStats(allState.Select(x => x.Item2)); var reduceKeyStats = allState.SelectMany(x => x.Item3) .GroupBy(x => x.Key) .Select(g => new { g.Key, Count = g.Sum(x => x.Value) }) .ToList(); BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, reduceKeyStats, enumerator => context.TransactionalStorage.Batch(accessor => { while (enumerator.MoveNext()) { var reduceKeyStat = enumerator.Current; accessor.MapReduce.IncrementReduceKeyCounter(indexId, reduceKeyStat.Key, reduceKeyStat.Count); } })); actions.General.MaybePulseTransaction(); var parallelReductionOperations = new ConcurrentQueue <ParallelBatchStats>(); var parallelReductionStart = SystemTime.UtcNow; BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, changed, enumerator => context.TransactionalStorage.Batch(accessor => { var parallelStats = new ParallelBatchStats { StartDelay = (long)(SystemTime.UtcNow - parallelReductionStart).TotalMilliseconds }; var scheduleReductionsDuration = new Stopwatch(); using (StopwatchScope.For(scheduleReductionsDuration)) { while (enumerator.MoveNext()) { accessor.MapReduce.ScheduleReductions(indexId, 0, enumerator.Current); accessor.General.MaybePulseTransaction(); } } parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Map_ScheduleReductions, scheduleReductionsDuration.ElapsedMilliseconds)); parallelReductionOperations.Enqueue(parallelStats); })); performanceStats.Add(new ParallelPerformanceStats { NumberOfThreads = parallelReductionOperations.Count, DurationMs = (long)(SystemTime.UtcNow - parallelReductionStart).TotalMilliseconds, BatchedOperations = parallelReductionOperations.ToList() }); UpdateIndexingStats(context, stats); performance.OnCompleted = () => BatchCompleted("Current Map", "Map", sourceCount, count, performanceStats); logIndexing.Debug("Mapped {0} documents for {1}", count, PublicName); return(performance); }
public void CanDeletePerView() { using (var tx = NewTransactionalStorage()) { tx.Batch(mutator => mutator.MappedResults.PutMappedResult("test", "users/ayende", "ayende", RavenJObject.FromObject(new { Name = "Rahien" }), null)); tx.Batch(mutator => mutator.MappedResults.DeleteMappedResultsForView("test")); tx.Batch(viewer => Assert.Empty(viewer.MappedResults.GetMappedResults(new GetMappedResultsParams("test", "ayende", null)))); } }
public void DeleteIndex(string name) { using (IndexDefinitionStorage.TryRemoveIndexContext()) { var instance = IndexDefinitionStorage.GetIndexDefinition(name); if (instance == null) { return; } // Set up a flag to signal that this is something we're doing TransactionalStorage.Batch(actions => actions.Lists.Set("Raven/Indexes/PendingDeletion", instance.IndexId.ToString(CultureInfo.InvariantCulture), (RavenJObject.FromObject(new { TimeOfOriginalDeletion = SystemTime.UtcNow, instance.IndexId, IndexName = instance.Name })), UuidType.Tasks)); // Delete the main record synchronously IndexDefinitionStorage.RemoveIndex(name); Database.IndexStorage.DeleteIndex(instance.IndexId); WorkContext.ClearErrorsFor(name); // And delete the data in the background StartDeletingIndexDataAsync(instance.IndexId, name); // We raise the notification now because as far as we're concerned it is done *now* TransactionalStorage.ExecuteImmediatelyOrRegisterForSynchronization(() => Database.Notifications.RaiseNotifications(new IndexChangeNotification { Name = name, Type = IndexChangeTypes.IndexRemoved, })); } }
public async Task CanDisableVersioningDuringImport_Between_Remote() { using (var server = GetNewServer()) { using (var store = NewRemoteDocumentStore(ravenDbServer: server)) { store .DatabaseCommands .GlobalAdmin .CreateDatabase(new DatabaseDocument { Id = "Import", Settings = { { Constants.ActiveBundles, "Versioning" }, { "Raven/DataDir", NewDataPath() } } }); using (var session = store.OpenSession()) { for (int i = 0; i < 10; i++) { session.Store(new User()); session.Store(new Address()); } session.SaveChanges(); } var countOfDocuments = store.DatabaseCommands.GetStatistics().CountOfDocuments; store.DatabaseCommands.ForDatabase("Import").Put("Raven/Versioning/DefaultConfiguration", null, RavenJObject.FromObject(new Bundles.Versioning.Data.VersioningConfiguration { Exclude = false, Id = "Raven/Versioning/DefaultConfiguration", MaxRevisions = 5 }), new RavenJObject()); var smuggler = new SmugglerDatabaseApi(new SmugglerDatabaseOptions() { ShouldDisableVersioningBundle = true }); await smuggler.Between(new SmugglerBetweenOptions <RavenConnectionStringOptions>() { From = new RavenConnectionStringOptions() { Url = store.Url, DefaultDatabase = store.DefaultDatabase }, To = new RavenConnectionStringOptions() { Url = store.Url, DefaultDatabase = "Import" } }); var countOfDocsAfterImport = store.DatabaseCommands.ForDatabase("Import").GetStatistics().CountOfDocuments; Assert.Equal(countOfDocuments, countOfDocsAfterImport - 1); // one additional doc for versioning bundle configuration var metadata = store.DatabaseCommands.ForDatabase("Import").Get("users/1").Metadata; Assert.True(metadata.ContainsKey(Constants.RavenIgnoreVersioning) == false, "Metadata contains temporary " + Constants.RavenIgnoreVersioning + " marker"); // after import versioning should be active using (var session = store.OpenSession("Import")) { session.Store(new User(), "users/arek"); session.SaveChanges(); var revisionsFor = session.Advanced.GetRevisionsFor <User>("users/arek", 0, 10); Assert.Equal(1, revisionsFor.Length); } } } }
public async Task <HttpResponseMessage> ClusterConfiguration() { var configuration = await ReadJsonObjectAsync <ClusterConfiguration>().ConfigureAwait(false); if (configuration == null) { return(GetEmptyMessage(HttpStatusCode.BadRequest)); } //Changing the replication check state is something that the admin will do when he has no leader. //But if we have no leader we can't run this command through raft because it will fail... //for this case i'll check that all but the replication state check are the same and if so //i'll apply the change localy to this server, allowing it to become leader. try { await ClusterManager.Client.SendClusterConfigurationAsync(configuration).ConfigureAwait(false); } catch { var configurationJson = SystemDatabase.Documents.Get(Constants.Cluster.ClusterConfigurationDocumentKey, null); var localConfiguration = configurationJson?.DataAsJson.JsonDeserialization <ClusterConfiguration>(); //This should not be the case but i don't want NRE hiding the real problem. if (localConfiguration == null) { throw; } if (!ConfigurationsAreEqualExceptReplicationCheck(configuration, localConfiguration)) { throw; } SystemDatabase.Documents.Put(Constants.Cluster.ClusterConfigurationDocumentKey, null, RavenJObject.FromObject(configuration), new RavenJObject(), null); return(GetEmptyMessage()); } return(GetEmptyMessage()); }
public void WithOAuthOnSpecificDatabaseWontWorkForAnother() { using (var server = GetNewServer(enableAuthentication: true)) { server.SystemDatabase.Documents.Put("Raven/Databases/OAuthTest1", null, RavenJObject.FromObject(new DatabaseDocument { Disabled = false, Id = "Raven/Databases/OAuthTest1", Settings = new IdentityDictionary <string, string> { { "Raven/DataDir", "~\\Databases\\OAuthTest1" } } }), new RavenJObject(), null); server.SystemDatabase.Documents.Put("Raven/Databases/OAuthTest2", null, RavenJObject.FromObject(new DatabaseDocument { Disabled = false, Id = "Raven/Databases/OAuthTest2", Settings = new IdentityDictionary <string, string> { { "Raven/DataDir", "~\\Databases\\OAuthTest2" } } }), new RavenJObject(), null); server.SystemDatabase.Documents.Put("Raven/ApiKeys/test", null, RavenJObject.FromObject(new ApiKeyDefinition { Name = "test", Secret = "test", Enabled = true, Databases = new List <ResourceAccess> { new ResourceAccess { TenantId = "OAuthTest1" }, } }), new RavenJObject(), null); using (var store = new DocumentStore { ApiKey = "test/test", DefaultDatabase = "OAuthTest2", Url = "http://localhost:8079", Conventions = { FailoverBehavior = FailoverBehavior.FailImmediately } }.Initialize()) { Assert.Throws <ErrorResponseException>(() => { using (var session = store.OpenSession()) { session.Store(new ClientServer.Item(), "items/1"); session.SaveChanges(); } }); } } }
public void CanAuthAsAdminAgainstTenantDb() { using (var server = GetNewServer(enableAuthentication: true)) { server.SystemDatabase.Documents.Put("Raven/ApiKeys/sysadmin", null, RavenJObject.FromObject(new ApiKeyDefinition { Name = "sysadmin", Secret = "ThisIsMySecret", Enabled = true, Databases = new List <DatabaseAccess> { new DatabaseAccess { TenantId = Constants.SystemDatabase, Admin = true }, } }), new RavenJObject(), null); server.SystemDatabase.Documents.Put("Raven/ApiKeys/dbadmin", null, RavenJObject.FromObject(new ApiKeyDefinition { Name = "dbadmin", Secret = "ThisIsMySecret", Enabled = true, Databases = new List <DatabaseAccess> { new DatabaseAccess { TenantId = "*", Admin = true }, new DatabaseAccess { TenantId = Constants.SystemDatabase, Admin = false }, } }), new RavenJObject(), null); var serverUrl = server.SystemDatabase.ServerUrl; using (var store = new DocumentStore { Url = serverUrl, ApiKey = "sysadmin/ThisIsMySecret", Conventions = { FailoverBehavior = FailoverBehavior.FailImmediately } }.Initialize()) { store.DatabaseCommands.GlobalAdmin.EnsureDatabaseExists("test"); } using (var store = new DocumentStore { Url = serverUrl, ApiKey = "dbadmin/ThisIsMySecret" }.Initialize()) { store.JsonRequestFactory.CreateHttpJsonRequest(new CreateHttpJsonRequestParams(null, store.Url + "/databases/test/admin/changeDbId", "POST", new OperationCredentials("dbadmin/ThisIsMySecret", null), store.Conventions)) .ExecuteRequest(); // can do admin stuff var httpJsonRequest = store.JsonRequestFactory.CreateHttpJsonRequest(new CreateHttpJsonRequestParams(null, store.Url + "/databases/test/debug/user-info", "GET", new OperationCredentials("dbadmin/ThisIsMySecret", null), store.Conventions)); var json = (RavenJObject)httpJsonRequest.ReadResponseJson(); Assert.True(json.Value <bool>("IsAdminCurrentDb")); } } }
public void WithOAuthWithStarWorksForAnyDatabaseOtherThenSystem() { using (var server = GetNewServer(enableAuthentication: true)) { server.SystemDatabase.Documents.Put("Raven/Databases/OAuthTest", null, RavenJObject.FromObject(new DatabaseDocument { Disabled = false, Id = "Raven/Databases/OAuthTest", Settings = new IdentityDictionary <string, string> { { "Raven/DataDir", "~\\Databases\\OAuthTest" } } }), new RavenJObject(), null); server.SystemDatabase.Documents.Put("Raven/ApiKeys/test", null, RavenJObject.FromObject(new ApiKeyDefinition { Name = "test", Secret = "test", Enabled = true, Databases = new List <ResourceAccess> { new ResourceAccess { TenantId = "*" }, } }), new RavenJObject(), null); using (var store = new DocumentStore { ApiKey = "test/test", DefaultDatabase = "OAuthTest", Url = "http://localhost:8079", Conventions = { FailoverBehavior = FailoverBehavior.FailImmediately } }.Initialize()) { var list = new BlockingCollection <DocumentChangeNotification>(); var taskObservable = store.Changes(); taskObservable.Task.Wait(); var documentSubscription = taskObservable.ForDocument("items/1"); documentSubscription.Task.Wait(); documentSubscription .Subscribe(list.Add); using (var session = store.OpenSession()) { session.Store(new ClientServer.Item(), "items/1"); session.SaveChanges(); } DocumentChangeNotification changeNotification; Assert.True(list.TryTake(out changeNotification, TimeSpan.FromSeconds(2))); Assert.Equal("items/1", changeNotification.Id); Assert.Equal(changeNotification.Type, DocumentChangeTypes.Put); } using (var store = new DocumentStore { ApiKey = "test/test", Url = "http://localhost:8079", Conventions = { FailoverBehavior = FailoverBehavior.FailImmediately } }.Initialize()) { Assert.Throws <ErrorResponseException>(() => { using (var session = store.OpenSession()) { session.Store(new ClientServer.Item(), "items/1"); session.SaveChanges(); } }); } } }
public override bool InitBackup() { parameters.ServerUrl = parameters.ServerUrl.TrimEnd('/'); try //precaution - to show error properly just in case { var serverUri = new Uri(parameters.ServerUrl); if ((String.IsNullOrWhiteSpace(serverUri.PathAndQuery) || serverUri.PathAndQuery.Equals("/")) && String.IsNullOrWhiteSpace(parameters.Database)) { parameters.Database = Constants.SystemDatabase; } var serverHostname = serverUri.Scheme + Uri.SchemeDelimiter + serverUri.Host + ":" + serverUri.Port; store = new DocumentStore { Url = serverHostname, DefaultDatabase = parameters.Database, ApiKey = parameters.ApiKey }; store.Initialize(); } catch (Exception exc) { Console.WriteLine(exc.Message); try { store.Dispose(); } // ReSharper disable once EmptyGeneralCatchClause catch (Exception) { } return(false); } var backupRequest = new { BackupLocation = parameters.BackupPath.Replace("\\", "\\\\"), }; var json = RavenJObject.FromObject(backupRequest).ToString(); var url = "/admin/backup"; if (parameters.Incremental) { url += "?incremental=true"; } try { using (var req = CreateRequest(url, "POST")) { req.WriteAsync(json).Wait(); Console.WriteLine("Sending json {0} to {1}", json, parameters.ServerUrl); var response = req.ReadResponseJson(); Console.WriteLine(response); } } catch (Exception exc) { Console.WriteLine(exc.Message); return(false); } return(true); }
public void AddingDocInTxWhenItWasAddedInAnotherWillFail() { var transactionInformation = new TransactionInformation { Id = Guid.NewGuid(), Timeout = TimeSpan.FromDays(7) }; using (var tx = NewTransactionalStorage()) { tx.Batch(mutator => mutator.Transactions.AddDocumentInTransaction("Ayende", null, RavenJObject.FromObject(new { Name = "Rahien" }), new RavenJObject(), transactionInformation)); Assert.Throws <ConcurrencyException>( () => tx.Batch( mutator => mutator.Transactions.AddDocumentInTransaction("Ayende", null, RavenJObject.FromObject(new { Name = "Rahien" }), new RavenJObject(), new TransactionInformation { Id = Guid.NewGuid(), Timeout = TimeSpan.FromDays(7) }))); } }
private static void InsertHidenUsers(IDocumentStore store, int amount) { for (var i = 0; i < amount; i++) { store.DatabaseCommands.Put("user/" + (i + 1), null, new RavenJObject(), RavenJObject.FromObject(new { hidden = true })); } }
public void AddingDocInTxWillReadOldValueOutsideIt() { var transactionInformation = new TransactionInformation { Id = Guid.NewGuid(), Timeout = TimeSpan.FromDays(7) }; using (var tx = NewTransactionalStorage()) { tx.Batch(mutator => mutator.Documents.AddDocument("Ayende", null, RavenJObject.FromObject(new { Name = "Rahien" }), new RavenJObject())); tx.Batch(mutator => mutator.Transactions.AddDocumentInTransaction("Ayende", null, RavenJObject.FromObject(new { Name = "Rahien2" }), new RavenJObject(), transactionInformation)); tx.Batch(viewer => { var doc = viewer.Documents.DocumentByKey("Ayende", null); Assert.Equal("Rahien", doc.DataAsJson.Value <string>("Name")); }); } }
public async Task <HttpResponseMessage> ImportDatabase(int batchSize, bool includeExpiredDocuments, bool stripReplicationInformation, ItemType operateOnTypes, string filtersPipeDelimited, string transformScript) { if (!Request.Content.IsMimeMultipartContent()) { throw new HttpResponseException(HttpStatusCode.UnsupportedMediaType); } string tempPath = Path.GetTempPath(); var fullTempPath = tempPath + Constants.TempUploadsDirectoryName; if (File.Exists(fullTempPath)) { File.Delete(fullTempPath); } if (Directory.Exists(fullTempPath) == false) { Directory.CreateDirectory(fullTempPath); } var streamProvider = new MultipartFileStreamProvider(fullTempPath); await Request.Content.ReadAsMultipartAsync(streamProvider).ConfigureAwait(false); var uploadedFilePath = streamProvider.FileData[0].LocalFileName; string fileName = null; var fileContent = streamProvider.Contents.SingleOrDefault(); if (fileContent != null) { fileName = fileContent.Headers.ContentDisposition.FileName.Replace("\"", string.Empty); } var status = new ImportOperationStatus(); var cts = new CancellationTokenSource(); var task = Task.Run(async() => { try { using (var fileStream = File.Open(uploadedFilePath, FileMode.Open, FileAccess.Read)) { var dataDumper = new DatabaseDataDumper(Database); dataDumper.Progress += s => status.LastProgress = s; var smugglerOptions = dataDumper.Options; smugglerOptions.BatchSize = batchSize; smugglerOptions.ShouldExcludeExpired = !includeExpiredDocuments; smugglerOptions.StripReplicationInformation = stripReplicationInformation; smugglerOptions.OperateOnTypes = operateOnTypes; smugglerOptions.TransformScript = transformScript; smugglerOptions.CancelToken = cts; // Filters are passed in without the aid of the model binder. Instead, we pass in a list of FilterSettings using a string like this: pathHere;;;valueHere;;;true|||againPathHere;;;anotherValue;;;false // Why? Because I don't see a way to pass a list of a values to a WebAPI method that accepts a file upload, outside of passing in a simple string value and parsing it ourselves. if (filtersPipeDelimited != null) { smugglerOptions.Filters.AddRange(filtersPipeDelimited .Split(new string[] { "|||" }, StringSplitOptions.RemoveEmptyEntries) .Select(f => f.Split(new string[] { ";;;" }, StringSplitOptions.RemoveEmptyEntries)) .Select(o => new FilterSetting { Path = o[0], Values = new List <string> { o[1] }, ShouldMatch = bool.Parse(o[2]) })); } await dataDumper.ImportData(new SmugglerImportOptions <RavenConnectionStringOptions> { FromStream = fileStream }); } } catch (Exception e) { status.Faulted = true; status.State = RavenJObject.FromObject(new { Error = e.ToString() }); if (cts.Token.IsCancellationRequested) { status.State = RavenJObject.FromObject(new { Error = "Task was cancelled" }); cts.Token.ThrowIfCancellationRequested(); //needed for displaying the task status as canceled and not faulted } if (e is InvalidDataException) { status.ExceptionDetails = e.Message; } else if (e is Imports.Newtonsoft.Json.JsonReaderException) { status.ExceptionDetails = "Failed to load JSON Data. Please make sure you are importing .ravendump file, exported by smuggler (aka database export). If you are importing a .ravnedump file then the file may be corrupted"; } else { status.ExceptionDetails = e.ToString(); } throw; } finally { status.Completed = true; File.Delete(uploadedFilePath); } }, cts.Token); long id; Database.Tasks.AddTask(task, status, new TaskActions.PendingTaskDescription { StartTime = SystemTime.UtcNow, TaskType = TaskActions.PendingTaskType.ImportDatabase, Payload = fileName, }, out id, cts); return(GetMessageWithObject(new { OperationId = id })); }
public void AddingDocumentInTxThenAddingWithoutTxAfterTxExpiredWorks() { var transactionInformation = new TransactionInformation { Id = Guid.NewGuid(), Timeout = TimeSpan.FromDays(-7) }; using (var tx = NewTransactionalStorage()) { tx.Batch(mutator => mutator.Transactions.AddDocumentInTransaction("Ayende", null, RavenJObject.FromObject(new { Name = "Rahien1" }), new RavenJObject(), transactionInformation)); tx.Batch(mutator => mutator.Documents.AddDocument("Ayende", Guid.NewGuid(), RavenJObject.FromObject(new { Name = "Rahien2" }), new RavenJObject())); tx.Batch(viewer => { var doc = viewer.Documents.DocumentByKey("Ayende", transactionInformation); Assert.Equal("Rahien2", doc.DataAsJson.Value <string>("Name")); }); } }
public Task <HttpResponseMessage> ResetSqlReplication(string sqlReplicationName) { var task = Database.StartupTasks.OfType <SqlReplicationTask>().FirstOrDefault(); if (task == null) { return(GetMessageWithObjectAsTask(new { Error = "SQL Replication bundle is not installed" }, HttpStatusCode.NotFound)); } SqlReplicationStatistics stats; task.Statistics.TryRemove(sqlReplicationName, out stats); var jsonDocument = Database.Documents.Get(SqlReplicationTask.RavenSqlReplicationStatus, null); if (jsonDocument != null) { var replicationStatus = jsonDocument.DataAsJson.JsonDeserialization <SqlReplicationStatus>(); replicationStatus.LastReplicatedEtags.RemoveAll(x => x.Name == sqlReplicationName); Database.Documents.Put(SqlReplicationTask.RavenSqlReplicationStatus, null, RavenJObject.FromObject(replicationStatus), new RavenJObject(), null); } return(GetEmptyMessageAsTask(HttpStatusCode.NoContent)); }
public void AfterCommittingCanSeeChangesWithoutTx() { var transactionInformation = new TransactionInformation { Id = Guid.NewGuid(), Timeout = TimeSpan.FromDays(7) }; using (var tx = NewTransactionalStorage()) { tx.Batch(mutator => mutator.Transactions.AddDocumentInTransaction("Ayende", null, RavenJObject.FromObject(new { Name = "Rahien" }), new RavenJObject(), transactionInformation)); tx.Batch(mutator => mutator.Transactions.CompleteTransaction(transactionInformation.Id, data => { if (data.Delete) { RavenJObject metadata; mutator.Documents.DeleteDocument(data.Key, null, out metadata); } else { mutator.Documents.AddDocument(data.Key, null, data.Data, data.Metadata); } })); tx.Batch(viewer => Assert.NotNull(viewer.Documents.DocumentByKey("Ayende", null))); } }
public override void IndexDocuments( AbstractViewGenerator viewGenerator, IndexingBatch batch, WorkContext context, IStorageActionsAccessor actions, DateTime minimumTimestamp) { var count = 0; var sourceCount = 0; var sw = Stopwatch.StartNew(); var start = SystemTime.UtcNow; var changed = new HashSet <ReduceKeyAndBucket>(); var documentsWrapped = batch.Docs.Select(doc => { sourceCount++; var documentId = doc.__document_id; actions.MapReduce.DeleteMappedResultsForDocumentId((string)documentId, name, changed); return(doc); }) .Where(x => x is FilteredDocument == false); var stats = new IndexingWorkStats(); foreach ( var mappedResultFromDocument in GroupByDocumentId(context, RobustEnumerationIndex(documentsWrapped.GetEnumerator(), viewGenerator.MapDefinitions, actions, stats))) { var dynamicResults = mappedResultFromDocument.Select(x => (object)new DynamicJsonObject(RavenJObject.FromObject(x, jsonSerializer))).ToList(); foreach ( var doc in RobustEnumerationReduceDuringMapPhase(dynamicResults.GetEnumerator(), viewGenerator.ReduceDefinition, actions, context)) { count++; var reduceValue = viewGenerator.GroupByExtraction(doc); if (reduceValue == null) { logIndexing.Debug("Field {0} is used as the reduce key and cannot be null, skipping document {1}", viewGenerator.GroupByExtraction, mappedResultFromDocument.Key); continue; } var reduceKey = ReduceKeyToString(reduceValue); var docId = mappedResultFromDocument.Key.ToString(); var data = GetMappedData(doc); logIndexing.Debug("Mapped result for index '{0}' doc '{1}': '{2}'", name, docId, data); actions.MapReduce.PutMappedResult(name, docId, reduceKey, data); changed.Add(new ReduceKeyAndBucket(IndexingUtil.MapBucket(docId), reduceKey)); } } UpdateIndexingStats(context, stats); actions.MapReduce.ScheduleReductions(name, 0, changed); AddindexingPerformanceStat(new IndexingPerformanceStats { OutputCount = count, InputCount = sourceCount, Operation = "Map", Duration = sw.Elapsed, Started = start }); logIndexing.Debug("Mapped {0} documents for {1}", count, name); }
internal void DeleteIndex(IndexDefinition instance, bool removeByNameMapping = true, bool clearErrors = true, bool removeIndexReplaceDocument = true, bool isSideBySideReplacement = false) { using (IndexDefinitionStorage.TryRemoveIndexContext()) { if (instance == null) { return; } // Set up a flag to signal that this is something we're doing TransactionalStorage.Batch(actions => actions.Lists.Set("Raven/Indexes/PendingDeletion", instance.IndexId.ToString(CultureInfo.InvariantCulture), (RavenJObject.FromObject(new { TimeOfOriginalDeletion = SystemTime.UtcNow, instance.IndexId, IndexName = instance.Name })), UuidType.Tasks)); // Delete the main record synchronously IndexDefinitionStorage.RemoveIndex(instance.IndexId, removeByNameMapping); Database.IndexStorage.DeleteIndex(instance.IndexId); if (clearErrors) { WorkContext.ClearErrorsFor(instance.Name); } if (removeIndexReplaceDocument && instance.IsSideBySideIndex) { Database.Documents.Delete(Constants.IndexReplacePrefix + instance.Name, null, null); } // And delete the data in the background StartDeletingIndexDataAsync(instance.IndexId, instance.Name); var indexChangeType = isSideBySideReplacement ? IndexChangeTypes.SideBySideReplace : IndexChangeTypes.IndexRemoved; // We raise the notification now because as far as we're concerned it is done *now* TransactionalStorage.ExecuteImmediatelyOrRegisterForSynchronization(() => Database.Notifications.RaiseNotifications(new IndexChangeNotification { Name = instance.Name, Type = indexChangeType, }) ); } }
private RangeValue GetNextRange(IDatabaseCommands databaseCommands) { using (new TransactionScope(TransactionScopeOption.Suppress)) using (RavenTransactionAccessor.SupressExplicitRavenTransaction()) using (databaseCommands.ForceReadFromMaster()) { ModifyCapacityIfRequired(); while (true) { try { var minNextMax = Range.Max; JsonDocument document; try { document = GetDocument(databaseCommands); } catch (ConflictException e) { // resolving the conflict by selecting the highest number var highestMax = e.ConflictedVersionIds .Select(conflictedVersionId => GetMaxFromDocument(databaseCommands.Get(conflictedVersionId), minNextMax)) .Max(); PutDocument(databaseCommands, new JsonDocument { Etag = e.Etag, Metadata = new RavenJObject(), DataAsJson = RavenJObject.FromObject(new { Max = highestMax }), Key = HiLoDocumentKey }); continue; } long min, max; if (document == null) { min = minNextMax + 1; max = minNextMax + capacity; document = new JsonDocument { Etag = Etag.Empty, // sending empty etag means - ensure the that the document does NOT exists Metadata = new RavenJObject(), DataAsJson = RavenJObject.FromObject(new { Max = max }), Key = HiLoDocumentKey }; } else { var oldMax = GetMaxFromDocument(document, minNextMax); min = oldMax + 1; max = oldMax + capacity; document.DataAsJson["Max"] = max; } PutDocument(databaseCommands, document); return(new RangeValue(min, max)); } catch (ConcurrencyException) { // expected, we need to retry } } } }
public void Execute() { try { log.Info("Starting backup of '{0}' to '{1}'", backupSourceDirectory, backupDestinationDirectory); UpdateBackupStatus( string.Format("Started backup process. Backing up data to directory = '{0}'", backupDestinationDirectory), null, BackupStatus.BackupMessageSeverity.Informational); EnsureBackupDestinationExists(); if (incrementalBackup) { var incrementalBackupState = Path.Combine(backupDestinationDirectory, Constants.IncrementalBackupState); if (File.Exists(incrementalBackupState)) { var state = RavenJObject.Parse(File.ReadAllText(incrementalBackupState)).JsonDeserialization <IncrementalBackupState>(); if (state.ResourceId != filesystem.Storage.Id) { throw new InvalidOperationException(string.Format("Can't perform an incremental backup to a given folder because it already contains incremental backup data of different file system. Existing incremental data origins from '{0}' file system.", state.ResourceName)); } } else { var state = new IncrementalBackupState() { ResourceId = filesystem.Storage.Id, ResourceName = filesystem.Name }; File.WriteAllText(incrementalBackupState, RavenJObject.FromObject(state).ToString()); } if (CanPerformIncrementalBackup()) { backupDestinationDirectory = DirectoryForIncrementalBackup(); } else { incrementalBackup = false; // destination wasn't detected as a backup folder, automatically revert to a full backup if incremental was specified } } else if (BackupAlreadyExists) { throw new InvalidOperationException("Denying request to perform a full backup to an existing backup folder. Try doing an incremental backup instead."); } UpdateBackupStatus(string.Format("Backing up indexes.."), null, BackupStatus.BackupMessageSeverity.Informational); // Make sure we have an Indexes folder in the backup location if (!Directory.Exists(Path.Combine(backupDestinationDirectory, "Indexes"))) { Directory.CreateDirectory(Path.Combine(backupDestinationDirectory, "Indexes")); } filesystem.Search.Backup(backupDestinationDirectory); UpdateBackupStatus(string.Format("Finished indexes backup. Executing data backup.."), null, BackupStatus.BackupMessageSeverity.Informational); ExecuteBackup(backupDestinationDirectory, incrementalBackup); if (filesystemDocument != null) { File.WriteAllText(Path.Combine(backupDestinationDirectory, Constants.FilesystemDocumentFilename), RavenJObject.FromObject(filesystemDocument).ToString()); } OperationFinished(); } catch (AggregateException e) { var ne = e.ExtractSingleInnerException(); log.ErrorException("Failed to complete backup", ne); UpdateBackupStatus("Failed to complete backup because: " + ne.Message, ne.ExceptionToString(null), BackupStatus.BackupMessageSeverity.Error); } catch (Exception e) { log.ErrorException("Failed to complete backup", e); UpdateBackupStatus("Failed to complete backup because: " + e.Message, e.ExceptionToString(null), BackupStatus.BackupMessageSeverity.Error); } finally { CompleteBackup(); } }
public static async Task Between(SmugglerBetweenOptions <RavenConnectionStringOptions> betweenOptions, SmugglerDatabaseOptions databaseOptions) { SetDatabaseNameIfEmpty(betweenOptions.From); SetDatabaseNameIfEmpty(betweenOptions.To); using (var exportStore = CreateStore(betweenOptions.From)) using (var importStore = CreateStore(betweenOptions.To)) { SmugglerDatabaseApi.ValidateThatServerIsUpAndDatabaseExists(betweenOptions.From, exportStore); SmugglerDatabaseApi.ValidateThatServerIsUpAndDatabaseExists(betweenOptions.To, importStore); var exportBatchSize = GetBatchSize(exportStore, databaseOptions); var importBatchSize = GetBatchSize(importStore, databaseOptions); var exportStoreSupportedFeatures = await DetectServerSupportedFeatures(exportStore); var importStoreSupportedFeatures = await DetectServerSupportedFeatures(importStore); if (string.IsNullOrEmpty(betweenOptions.IncrementalKey)) { betweenOptions.IncrementalKey = ((AsyncServerClient)exportStore.AsyncDatabaseCommands).Url; } var incremental = new ExportIncremental(); if (databaseOptions.Incremental) { var jsonDocument = await importStore.AsyncDatabaseCommands.GetAsync(SmugglerExportIncremental.RavenDocumentKey); if (jsonDocument != null) { var smugglerExportIncremental = jsonDocument.DataAsJson.JsonDeserialization <SmugglerExportIncremental>(); ExportIncremental value; if (smugglerExportIncremental.ExportIncremental.TryGetValue(betweenOptions.IncrementalKey, out value)) { incremental = value; } databaseOptions.StartDocsEtag = incremental.LastDocsEtag ?? Etag.Empty; databaseOptions.StartAttachmentsEtag = incremental.LastAttachmentsEtag ?? Etag.Empty; } } if (databaseOptions.OperateOnTypes.HasFlag(ItemType.Indexes)) { await ExportIndexes(exportStore, importStore, exportBatchSize); } if (databaseOptions.OperateOnTypes.HasFlag(ItemType.Transformers) && exportStoreSupportedFeatures.IsTransformersSupported && importStoreSupportedFeatures.IsTransformersSupported) { await ExportTransformers(exportStore, importStore, exportBatchSize); } if (databaseOptions.OperateOnTypes.HasFlag(ItemType.Documents)) { incremental.LastDocsEtag = await ExportDocuments(exportStore, importStore, databaseOptions, exportStoreSupportedFeatures, exportBatchSize, importBatchSize); } if (databaseOptions.OperateOnTypes.HasFlag(ItemType.Attachments)) { incremental.LastAttachmentsEtag = await ExportAttachments(exportStore, importStore, databaseOptions, exportBatchSize); } if (exportStoreSupportedFeatures.IsIdentitiesSmugglingSupported && importStoreSupportedFeatures.IsIdentitiesSmugglingSupported) { await ExportIdentities(exportStore, importStore, databaseOptions.OperateOnTypes); } if (databaseOptions.Incremental) { var smugglerExportIncremental = new SmugglerExportIncremental(); var jsonDocument = await importStore.AsyncDatabaseCommands.GetAsync(SmugglerExportIncremental.RavenDocumentKey); if (jsonDocument != null) { smugglerExportIncremental = jsonDocument.DataAsJson.JsonDeserialization <SmugglerExportIncremental>(); } smugglerExportIncremental.ExportIncremental[betweenOptions.IncrementalKey] = incremental; await importStore.AsyncDatabaseCommands.PutAsync(SmugglerExportIncremental.RavenDocumentKey, null, RavenJObject.FromObject(smugglerExportIncremental), new RavenJObject()); } } }