private async Task MigrateDocuments(string lastEtag) { var response = await RunWithAuthRetry(async() => { var url = $"{Options.ServerUrl}/databases/{Options.DatabaseName}/streams/docs?etag={lastEtag}"; var request = new HttpRequestMessage(HttpMethod.Get, url); var responseMessage = await Parameters.HttpClient.SendAsync(request, HttpCompletionOption.ResponseHeadersRead, Parameters.CancelToken.Token); return(responseMessage); }); if (response.IsSuccessStatusCode == false) { var responseString = await response.Content.ReadAsStringAsync(); throw new InvalidOperationException($"Failed to export documents from server: {Options.ServerUrl}, " + $"status code: {response.StatusCode}, " + $"error: {responseString}"); } using (var responseStream = await response.Content.ReadAsStreamAsync()) using (Parameters.Database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (var source = new StreamSource(responseStream, context, Parameters.Database)) { var destination = new DatabaseDestination(Parameters.Database); var options = new DatabaseSmugglerOptionsServerSide { #pragma warning disable 618 ReadLegacyEtag = true, #pragma warning restore 618 TransformScript = Options.TransformScript, OperateOnTypes = Options.OperateOnTypes }; var smuggler = new DatabaseSmuggler(Parameters.Database, source, destination, Parameters.Database.Time, options, Parameters.Result, Parameters.OnProgress, Parameters.CancelToken.Token); // since we will be migrating indexes as separate task don't ensureStepsProcessed at this point smuggler.Execute(ensureStepsProcessed: false); } }
private SmugglerResult CreateBackup(DatabaseSmugglerOptionsServerSide options, string backupFilePath, long?startDocumentEtag, DocumentsOperationContext context) { // the last etag is already included in the last backup startDocumentEtag = startDocumentEtag == null ? 0 : ++startDocumentEtag; SmugglerResult result; using (var file = File.Open(backupFilePath, FileMode.CreateNew)) { var smugglerSource = new DatabaseSource(_database, startDocumentEtag.Value); var smugglerDestination = new StreamDestination(file, context, smugglerSource); var smuggler = new DatabaseSmuggler(_database, smugglerSource, smugglerDestination, _database.Time, token: _cancellationToken.Token, options: options); result = smuggler.Execute(); } return(result); }
private async Task <SmugglerResult> MigrateDatabase(string json, bool readLegacyEtag) { var url = $"{ServerUrl}/databases/{DatabaseName}/studio-tasks/exportDatabase"; var content = new StringContent(json, Encoding.UTF8, "application/json"); var request = new HttpRequestMessage(HttpMethod.Post, url) { Content = content }; var response = await HttpClient.SendAsync(request, HttpCompletionOption.ResponseHeadersRead, CancelToken.Token); if (response.IsSuccessStatusCode == false) { var responseString = await response.Content.ReadAsStringAsync(); throw new InvalidOperationException($"Failed to export database from server: {ServerUrl}, " + $"status code: {response.StatusCode}, " + $"error: {responseString}"); } using (var responseStream = await response.Content.ReadAsStreamAsync()) using (var stream = new GZipStream(responseStream, mode: CompressionMode.Decompress)) using (Database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (var source = new StreamSource(stream, context, Database)) { var destination = new DatabaseDestination(Database); var options = new DatabaseSmugglerOptionsServerSide { #pragma warning disable 618 ReadLegacyEtag = readLegacyEtag, #pragma warning restore 618 RemoveAnalyzers = RemoveAnalyzers }; var smuggler = new DatabaseSmuggler(Database, source, destination, Database.Time, options, Result, OnProgress, CancelToken.Token); return(smuggler.Execute()); } }
public override async Task Execute() { var state = GetLastMigrationState(); var originalState = state; var operateOnTypes = GenerateOperateOnTypes(); if (operateOnTypes == ItemType.None && Options.ImportRavenFs == false) { throw new BadRequestException("No types to import"); } if (Options.ImportRavenFs) { Parameters.Result.AddInfo("Started processing RavenFS files"); Parameters.OnProgress.Invoke(Parameters.Result.Progress); var lastRavenFsEtag = await MigrateRavenFs(state?.LastRavenFsEtag ?? LastEtagsInfo.EtagEmpty, Parameters.Result); state = GetLastMigrationState() ?? GenerateLastEtagsInfo(); state.LastRavenFsEtag = lastRavenFsEtag; await SaveLastOperationState(state); } if (operateOnTypes != ItemType.None) { if (Options.ImportRavenFs && operateOnTypes.HasFlag(ItemType.Documents) == false) { Parameters.Result.Documents.Processed = true; Parameters.OnProgress.Invoke(Parameters.Result.Progress); } var databaseMigrationOptions = new DatabaseMigrationOptions { BatchSize = 1024, OperateOnTypes = operateOnTypes, ExportDeletions = originalState != null, StartDocsEtag = state?.LastDocsEtag ?? LastEtagsInfo.EtagEmpty, StartDocsDeletionEtag = state?.LastDocDeleteEtag ?? LastEtagsInfo.EtagEmpty, StartAttachmentsEtag = state?.LastAttachmentsEtag ?? LastEtagsInfo.EtagEmpty, StartAttachmentsDeletionEtag = state?.LastAttachmentsDeleteEtag ?? LastEtagsInfo.EtagEmpty }; // getting a new operation id was added in v3.5 var operationId = _majorVersion == MajorVersion.V30 ? 0 : await GetOperationId(); object exportData; if (_majorVersion == MajorVersion.V30) { exportData = new ExportDataV3 { SmugglerOptions = JsonConvert.SerializeObject(databaseMigrationOptions) }; } else { exportData = new ExportDataV35 { DownloadOptions = JsonConvert.SerializeObject(databaseMigrationOptions), ProgressTaskId = operationId }; } var exportOptions = JsonConvert.SerializeObject(exportData); var canGetLastStateByOperationId = _buildVersion >= 35215; await MigrateDatabase(exportOptions, readLegacyEtag : canGetLastStateByOperationId == false); var lastState = await GetLastState(canGetLastStateByOperationId, operationId); if (lastState != null) { // refresh the migration state, in case we are running here with a RavenFS concurrently lastState.LastRavenFsEtag = GetLastMigrationState()?.LastRavenFsEtag ?? LastEtagsInfo.EtagEmpty; await SaveLastOperationState(lastState); } } else { if (Options.ImportRavenFs) { Parameters.Result.Documents.Processed = true; } DatabaseSmuggler.EnsureProcessed(Parameters.Result); } }
public BackupResult RunPeriodicBackup(Action <IOperationProgress> onProgress, ref PeriodicBackupStatus runningBackupStatus) { _onProgress = onProgress; AddInfo($"Started task: '{_taskName}'"); var totalSw = Stopwatch.StartNew(); var operationCanceled = false; try { if (_forTestingPurposes != null && _forTestingPurposes.SimulateFailedBackup) { throw new Exception(nameof(_forTestingPurposes.SimulateFailedBackup)); } if (_forTestingPurposes != null && _forTestingPurposes.OnBackupTaskRunHoldBackupExecution != null) { _forTestingPurposes.OnBackupTaskRunHoldBackupExecution.Task.Wait(); } if (runningBackupStatus.LocalBackup == null) { runningBackupStatus.LocalBackup = new LocalBackup(); } if (runningBackupStatus.LastRaftIndex == null) { runningBackupStatus.LastRaftIndex = new LastRaftIndex(); } runningBackupStatus.IsFull = _isFullBackup; if (_logger.IsInfoEnabled) { var fullBackupText = "a " + (_configuration.BackupType == BackupType.Backup ? "full backup" : "snapshot"); _logger.Info($"Creating {(_isFullBackup ? fullBackupText : "an incremental backup")}"); } if (_isFullBackup == false) { // if we come from old version the _previousBackupStatus won't have LastRaftIndex _previousBackupStatus.LastRaftIndex ??= new LastRaftIndex(); // no-op if nothing has changed var(currentLastEtag, currentChangeVector) = _database.ReadLastEtagAndChangeVector(); var currentLastRaftIndex = GetDatabaseEtagForBackup(); // if we come from old version the _previousBackupStatus won't have LastRaftIndex _previousBackupStatus.LastRaftIndex ??= new LastRaftIndex(); if (currentLastEtag == _previousBackupStatus.LastEtag && currentChangeVector == _previousBackupStatus.LastDatabaseChangeVector && currentLastRaftIndex == _previousBackupStatus.LastRaftIndex.LastEtag) { var message = $"Skipping incremental backup because no changes were made from last full backup on {_previousBackupStatus.LastFullBackup}."; if (_logger.IsInfoEnabled) { _logger.Info(message); } runningBackupStatus.LastIncrementalBackup = _startTimeUtc; runningBackupStatus.LocalBackup.LastIncrementalBackup = _startTimeUtc; runningBackupStatus.LocalBackup.IncrementalBackupDurationInMs = 0; DatabaseSmuggler.EnsureProcessed(_backupResult); AddInfo(message); return(_backupResult); } } // update the local configuration before starting the local backup var localSettings = GetBackupConfigurationFromScript(_configuration.LocalSettings, x => JsonDeserializationServer.LocalSettings(x), settings => PutServerWideBackupConfigurationCommand.UpdateSettingsForLocal(settings, _database.Name)); GenerateFolderNameAndBackupDirectory(localSettings, _startTimeUtc, out var nowAsString, out var folderName, out var backupDirectory); var startDocumentEtag = _isFullBackup == false ? _previousBackupStatus.LastEtag : null; var startRaftIndex = _isFullBackup == false ? _previousBackupStatus.LastRaftIndex.LastEtag : null; var fileName = GetFileName(_isFullBackup, backupDirectory.FullPath, nowAsString, _configuration.BackupType, out string backupFilePath); var internalBackupResult = CreateLocalBackupOrSnapshot(runningBackupStatus, backupFilePath, startDocumentEtag, startRaftIndex); runningBackupStatus.LocalBackup.BackupDirectory = _backupToLocalFolder ? backupDirectory.FullPath : null; runningBackupStatus.LocalBackup.TempFolderUsed = _backupToLocalFolder == false; runningBackupStatus.IsEncrypted = _isBackupEncrypted; try { UploadToServer(backupFilePath, folderName, fileName); } finally { runningBackupStatus.UploadToS3 = _backupResult.S3Backup; runningBackupStatus.UploadToAzure = _backupResult.AzureBackup; runningBackupStatus.UploadToGoogleCloud = _backupResult.GoogleCloudBackup; runningBackupStatus.UploadToGlacier = _backupResult.GlacierBackup; runningBackupStatus.UploadToFtp = _backupResult.FtpBackup; _backupResult.LocalBackup = new LocalBackup { BackupDirectory = folderName, FileName = fileName }; // if user did not specify local folder we delete the temporary file if (_backupToLocalFolder == false) { DeleteFile(backupFilePath); } } runningBackupStatus.LastEtag = internalBackupResult.LastDocumentEtag; runningBackupStatus.LastDatabaseChangeVector = internalBackupResult.LastDatabaseChangeVector; runningBackupStatus.LastRaftIndex.LastEtag = internalBackupResult.LastRaftIndex; runningBackupStatus.FolderName = folderName; if (_isFullBackup) { runningBackupStatus.LastFullBackup = _startTimeUtc; } else { runningBackupStatus.LastIncrementalBackup = _startTimeUtc; } totalSw.Stop(); if (_logger.IsInfoEnabled) { var fullBackupText = "a " + (_configuration.BackupType == BackupType.Backup ? " full backup" : " snapshot"); _logger.Info($"Successfully created {(_isFullBackup ? fullBackupText : "an incremental backup")} " + $"in {totalSw.ElapsedMilliseconds:#,#;;0} ms"); } return(_backupResult); } catch (OperationCanceledException) { operationCanceled = TaskCancelToken.Token.IsCancellationRequested; throw; } catch (ObjectDisposedException) { // shutting down, probably operationCanceled = true; throw; } catch (Exception e) { const string message = "Error when performing periodic backup"; runningBackupStatus.Error = new Error { Exception = e.ToString(), At = DateTime.UtcNow }; if (_logger.IsOperationsEnabled) { _logger.Operations(message, e); } _database.NotificationCenter.Add(AlertRaised.Create( _database.Name, $"Periodic Backup task: '{_taskName}'", message, AlertType.PeriodicBackup, NotificationSeverity.Error, details: new ExceptionDetails(e))); throw; } finally { if (operationCanceled == false) { // whether we succeeded or not, // in periodic backup we need to update the last backup time to avoid // starting a new backup right after this one if (_isFullBackup) { runningBackupStatus.LastFullBackupInternal = _startTimeUtc; } else { runningBackupStatus.LastIncrementalBackupInternal = _startTimeUtc; } runningBackupStatus.NodeTag = _database.ServerStore.NodeTag; runningBackupStatus.DurationInMs = totalSw.ElapsedMilliseconds; UpdateOperationId(runningBackupStatus); if (_isOneTimeBackup == false) { runningBackupStatus.Version = ++_previousBackupStatus.Version; // save the backup status AddInfo("Saving backup status"); SaveBackupStatus(runningBackupStatus, _database, _logger, _backupResult); } } } }
protected override int ExecuteCmd(DocumentsOperationContext context) { if (_log.IsInfoEnabled) { _log.Info($"Importing {Documents.Count:#,#0} documents"); } var idsOfDocumentsToUpdateAfterAttachmentDeletion = new HashSet <string>(StringComparer.OrdinalIgnoreCase); foreach (var documentType in Documents) { var tombstone = documentType.Tombstone; if (tombstone != null) { using (Slice.External(context.Allocator, tombstone.LowerId, out Slice key)) { var newEtag = _database.DocumentsStorage.GenerateNextEtag(); var changeVector = _database.DocumentsStorage.GetNewChangeVector(context, newEtag); switch (tombstone.Type) { case Tombstone.TombstoneType.Document: _database.DocumentsStorage.Delete(context, key, tombstone.LowerId, null, tombstone.LastModified.Ticks, changeVector, new CollectionName(tombstone.Collection)); break; case Tombstone.TombstoneType.Attachment: var idEnd = key.Content.IndexOf(SpecialChars.RecordSeparator); if (idEnd < 1) { throw new InvalidOperationException("Cannot find a document ID inside the attachment key"); } var attachmentId = key.Content.Substring(idEnd); idsOfDocumentsToUpdateAfterAttachmentDeletion.Add(attachmentId); _database.DocumentsStorage.AttachmentsStorage.DeleteAttachmentDirect(context, key, false, "$fromReplication", null, changeVector, tombstone.LastModified.Ticks); break; case Tombstone.TombstoneType.Revision: _database.DocumentsStorage.RevisionsStorage.DeleteRevision(context, key, tombstone.Collection, changeVector, tombstone.LastModified.Ticks); break; case Tombstone.TombstoneType.Counter: _database.DocumentsStorage.CountersStorage.DeleteCounter(context, key, tombstone.Collection, tombstone.LastModified.Ticks, forceTombstone: true); break; } } continue; } var conflict = documentType.Conflict; if (conflict != null) { _database.DocumentsStorage.ConflictsStorage.AddConflict(context, conflict.Id, conflict.LastModified.Ticks, conflict.Doc, conflict.ChangeVector, conflict.Collection, conflict.Flags, NonPersistentDocumentFlags.FromSmuggler); continue; } if (documentType.Attachments != null) { foreach (var attachment in documentType.Attachments) { _database.DocumentsStorage.AttachmentsStorage.PutAttachmentStream(context, attachment.Tag, attachment.Base64Hash, attachment.Stream); } } var document = documentType.Document; var id = document.Id; if (IsRevision) { if (_database.DocumentsStorage.RevisionsStorage.Configuration == null) { ThrowRevisionsDisabled(); } PutAttachments(context, document); if (document.Flags.Contain(DocumentFlags.DeleteRevision)) { _database.DocumentsStorage.RevisionsStorage.Delete(context, id, document.Data, document.Flags, document.NonPersistentFlags, document.ChangeVector, document.LastModified.Ticks); } else { _database.DocumentsStorage.RevisionsStorage.Put(context, id, document.Data, document.Flags, document.NonPersistentFlags, document.ChangeVector, document.LastModified.Ticks); } continue; } if (DatabaseSmuggler.IsPreV4Revision(_buildType, id, document)) { // handle old revisions if (_database.DocumentsStorage.RevisionsStorage.Configuration == null) { ThrowRevisionsDisabled(); } var endIndex = id.IndexOf(DatabaseSmuggler.PreV4RevisionsDocumentId, StringComparison.OrdinalIgnoreCase); var newId = id.Substring(0, endIndex); _database.DocumentsStorage.RevisionsStorage.Put(context, newId, document.Data, document.Flags, document.NonPersistentFlags, document.ChangeVector, document.LastModified.Ticks); continue; } PutAttachments(context, document); _database.DocumentsStorage.Put(context, id, null, document.Data, document.LastModified.Ticks, null, document.Flags, document.NonPersistentFlags); } foreach (var idToUpdate in idsOfDocumentsToUpdateAfterAttachmentDeletion) { _database.DocumentsStorage.AttachmentsStorage.UpdateDocumentAfterAttachmentChange(context, idToUpdate); } return(Documents.Count); }
public async Task <IOperationResult> RunPeriodicBackup(Action <IOperationProgress> onProgress) { AddInfo($"Started task: '{_configuration.Name}'", onProgress); var totalSw = Stopwatch.StartNew(); var operationCanceled = false; var runningBackupStatus = _periodicBackup.RunningBackupStatus = new PeriodicBackupStatus { TaskId = _configuration.TaskId, BackupType = _configuration.BackupType, LastEtag = _previousBackupStatus.LastEtag, LastFullBackup = _previousBackupStatus.LastFullBackup, LastIncrementalBackup = _previousBackupStatus.LastIncrementalBackup, LastFullBackupInternal = _previousBackupStatus.LastFullBackupInternal, LastIncrementalBackupInternal = _previousBackupStatus.LastIncrementalBackupInternal, IsFull = _isFullBackup, LocalBackup = _previousBackupStatus.LocalBackup, LastOperationId = _previousBackupStatus.LastOperationId }; try { using (_database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (var tx = context.OpenReadTransaction()) { var now = DateTime.Now.ToString(DateTimeFormat, CultureInfo.InvariantCulture); if (runningBackupStatus.LocalBackup == null) { runningBackupStatus.LocalBackup = new LocalBackup(); } if (_logger.IsInfoEnabled) { var fullBackupText = "a " + (_configuration.BackupType == BackupType.Backup ? "full backup" : "snapshot"); _logger.Info($"Creating {(_isFullBackup ? fullBackupText : "an incremental backup")}"); } if (_isFullBackup == false) { // no-op if nothing has changed var currentLastEtag = DocumentsStorage.ReadLastEtag(tx.InnerTransaction); if (currentLastEtag == _previousBackupStatus.LastEtag) { var message = "Skipping incremental backup because " + $"last etag ({currentLastEtag:#,#;;0}) hasn't changed since last backup"; if (_logger.IsInfoEnabled) { _logger.Info(message); } UpdateOperationId(runningBackupStatus); runningBackupStatus.LastIncrementalBackup = _startTime; DatabaseSmuggler.EnsureProcessed(_backupResult); AddInfo(message, onProgress); return(_backupResult); } } GenerateFolderNameAndBackupDirectory(now, out var folderName, out var backupDirectory); var startDocumentEtag = _isFullBackup == false ? _previousBackupStatus.LastEtag : null; var fileName = GetFileName(_isFullBackup, backupDirectory.FullPath, now, _configuration.BackupType, out string backupFilePath); var lastEtag = CreateLocalBackupOrSnapshot(runningBackupStatus, backupFilePath, startDocumentEtag, context, tx, onProgress); runningBackupStatus.LocalBackup.BackupDirectory = _backupToLocalFolder ? backupDirectory.FullPath : null; runningBackupStatus.LocalBackup.TempFolderUsed = _backupToLocalFolder == false; runningBackupStatus.IsFull = _isFullBackup; try { await UploadToServer(backupFilePath, folderName, fileName, onProgress); } finally { runningBackupStatus.UploadToS3 = _backupResult.S3Backup; runningBackupStatus.UploadToAzure = _backupResult.AzureBackup; runningBackupStatus.UploadToGlacier = _backupResult.GlacierBackup; runningBackupStatus.UploadToFtp = _backupResult.FtpBackup; // if user did not specify local folder we delete the temporary file if (_backupToLocalFolder == false) { IOExtensions.DeleteFile(backupFilePath); } } UpdateOperationId(runningBackupStatus); runningBackupStatus.LastEtag = lastEtag; runningBackupStatus.FolderName = folderName; if (_isFullBackup) { runningBackupStatus.LastFullBackup = _periodicBackup.StartTime; } else { runningBackupStatus.LastIncrementalBackup = _periodicBackup.StartTime; } } totalSw.Stop(); if (_logger.IsInfoEnabled) { var fullBackupText = "a " + (_configuration.BackupType == BackupType.Backup ? " full backup" : " snapshot"); _logger.Info($"Successfully created {(_isFullBackup ? fullBackupText : "an incremental backup")} " + $"in {totalSw.ElapsedMilliseconds:#,#;;0} ms"); } return(_backupResult); } catch (OperationCanceledException) { operationCanceled = TaskCancelToken.Token.IsCancellationRequested && _databaseShutdownCancellationToken.IsCancellationRequested; throw; } catch (ObjectDisposedException) { // shutting down, probably operationCanceled = true; throw; } catch (Exception e) { const string message = "Error when performing periodic backup"; runningBackupStatus.Error = new Error { Exception = e.ToString(), At = DateTime.UtcNow }; if (_logger.IsOperationsEnabled) { _logger.Operations(message, e); } _database.NotificationCenter.Add(AlertRaised.Create( _database.Name, "Periodic Backup", message, AlertType.PeriodicBackup, NotificationSeverity.Error, details: new ExceptionDetails(e))); throw; } finally { if (operationCanceled == false) { // whether we succeeded or not, // we need to update the last backup time to avoid // starting a new backup right after this one if (_isFullBackup) { runningBackupStatus.LastFullBackupInternal = _startTime; } else { runningBackupStatus.LastIncrementalBackupInternal = _startTime; } runningBackupStatus.NodeTag = _serverStore.NodeTag; runningBackupStatus.DurationInMs = totalSw.ElapsedMilliseconds; runningBackupStatus.Version = ++_previousBackupStatus.Version; _periodicBackup.BackupStatus = runningBackupStatus; // save the backup status await WriteStatus(runningBackupStatus, onProgress); } } }
public async Task PostCreateSampleData() { using (ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) { using (context.OpenReadTransaction()) { foreach (var collection in Database.DocumentsStorage.GetCollections(context)) { if (collection.Count > 0) { throw new InvalidOperationException("You cannot create sample data in a database that already contains documents"); } } } var operateOnTypesAsString = GetStringValuesQueryString("operateOnTypes", required: false); var operateOnTypes = GetOperateOnTypes(operateOnTypesAsString); if (operateOnTypes.HasFlag(DatabaseItemType.RevisionDocuments)) { var editRevisions = new EditRevisionsConfigurationCommand(new RevisionsConfiguration { Collections = new Dictionary <string, RevisionsCollectionConfiguration> { ["Orders"] = new RevisionsCollectionConfiguration { Disabled = false } } }, Database.Name, GetRaftRequestIdFromQuery() + "/revisions"); var(index, _) = await ServerStore.SendToLeaderAsync(editRevisions); await Database.RachisLogIndexNotifications.WaitForIndexNotification(index, Database.ServerStore.Engine.OperationTimeout); } if (operateOnTypes.HasFlag(DatabaseItemType.TimeSeries)) { var tsConfig = new TimeSeriesConfiguration { NamedValues = new Dictionary <string, Dictionary <string, string[]> > { ["Companies"] = new Dictionary <string, string[]> { ["StockPrices"] = new[] { "Open", "Close", "High", "Low", "Volume" } }, ["Employees"] = new Dictionary <string, string[]> { ["HeartRates"] = new[] { "BPM" } } } }; var editTimeSeries = new EditTimeSeriesConfigurationCommand(tsConfig, Database.Name, GetRaftRequestIdFromQuery() + "/time-series"); var(index, _) = await ServerStore.SendToLeaderAsync(editTimeSeries); await Database.RachisLogIndexNotifications.WaitForIndexNotification(index, Database.ServerStore.Engine.OperationTimeout); } await using (var sampleData = typeof(SampleDataHandler).Assembly .GetManifestResourceStream("Raven.Server.Web.Studio.EmbeddedData.Northwind.ravendbdump")) { await using (var stream = new GZipStream(sampleData, CompressionMode.Decompress)) using (var source = new StreamSource(stream, context, Database)) { var destination = new DatabaseDestination(Database); var smuggler = new DatabaseSmuggler(Database, source, destination, Database.Time, options: new DatabaseSmugglerOptionsServerSide { OperateOnTypes = operateOnTypes, SkipRevisionCreation = true }); await smuggler.ExecuteAsync(); } } await NoContent(); }
public void FullBackupTo(string backupPath) { using (var file = SafeFileStream.Create(backupPath, FileMode.Create)) using (var package = new ZipArchive(file, ZipArchiveMode.Create, leaveOpen: true)) using (_serverStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { var databaseRecord = _serverStore.Cluster.ReadDatabase(context, Name); Debug.Assert(databaseRecord != null); var zipArchiveEntry = package.CreateEntry(RestoreSettings.SmugglerValuesFileName, CompressionLevel.Optimal); using (var zipStream = zipArchiveEntry.Open()) { var smugglerSource = new DatabaseSource(this, 0); using (DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext ctx)) using (ctx.OpenReadTransaction()) { var smugglerDestination = new StreamDestination(zipStream, ctx, smugglerSource); var databaseSmugglerOptionsServerSide = new DatabaseSmugglerOptionsServerSide { AuthorizationStatus = AuthorizationStatus.DatabaseAdmin, OperateOnTypes = DatabaseItemType.CompareExchange | DatabaseItemType.Identities }; var smuggler = new DatabaseSmuggler(this, smugglerSource, smugglerDestination, this.Time, options: databaseSmugglerOptionsServerSide); smuggler.Execute(); } } zipArchiveEntry = package.CreateEntry(RestoreSettings.SettingsFileName, CompressionLevel.Optimal); using (var zipStream = zipArchiveEntry.Open()) using (var writer = new BlittableJsonTextWriter(context, zipStream)) { //TODO: encrypt this file using the MasterKey //http://issues.hibernatingrhinos.com/issue/RavenDB-7546 writer.WriteStartObject(); // save the database record writer.WritePropertyName(nameof(RestoreSettings.DatabaseRecord)); var databaseRecordBlittable = EntityToBlittable.ConvertEntityToBlittable(databaseRecord, DocumentConventions.Default, context); context.Write(writer, databaseRecordBlittable); // save the database values (subscriptions, periodic backups statuses, etl states...) writer.WriteComma(); writer.WritePropertyName(nameof(RestoreSettings.DatabaseValues)); writer.WriteStartObject(); var first = true; var prefix = Helpers.ClusterStateMachineValuesPrefix(Name); foreach (var keyValue in ClusterStateMachine.ReadValuesStartingWith(context, prefix)) { if (first == false) { writer.WriteComma(); } first = false; var key = keyValue.Key.ToString().Substring(prefix.Length); writer.WritePropertyName(key); context.Write(writer, keyValue.Value); } writer.WriteEndObject(); // end of values writer.WriteEndObject(); } BackupMethods.Full.ToFile(GetAllStoragesForBackup(), package); file.Flush(true); // make sure that we fully flushed to disk } }