public DateTime?GetWakeDatabaseTimeUtc() { long lastEtag; using (_database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (var tx = context.OpenReadTransaction()) { lastEtag = DocumentsStorage.ReadLastEtag(tx.InnerTransaction); } DateTime?wakeupDatabase = null; foreach (var backup in _periodicBackups) { var nextBackup = GetNextWakeupTimeLocal(lastEtag, backup.Value.Configuration, backup.Value.BackupStatus); if (nextBackup == null) { continue; } if (wakeupDatabase == null) { // first time wakeupDatabase = nextBackup; } else if (nextBackup < wakeupDatabase) { // next backup is earlier than the current one wakeupDatabase = nextBackup.Value; } } return(wakeupDatabase?.ToUniversalTime()); }
public IDisposable Initialize(DatabaseSmugglerOptions options, SmugglerResult result, out long buildVersion) { _currentTypeIndex = 0; if (options.OperateOnTypes.HasFlag(DatabaseItemType.Documents) || options.OperateOnTypes.HasFlag(DatabaseItemType.RevisionDocuments) || options.OperateOnTypes.HasFlag(DatabaseItemType.Tombstones) || options.OperateOnTypes.HasFlag(DatabaseItemType.Conflicts) || options.OperateOnTypes.HasFlag(DatabaseItemType.Counters)) { _returnContext = _database.DocumentsStorage.ContextPool.AllocateOperationContext(out _context); _disposeTransaction = _context.OpenReadTransaction(); LastEtag = DocumentsStorage.ReadLastEtag(_disposeTransaction.InnerTransaction); } if (options.OperateOnTypes.HasFlag(DatabaseItemType.CompareExchange) || options.OperateOnTypes.HasFlag(DatabaseItemType.Identities)) { _returnServerContext = _database.ServerStore.ContextPool.AllocateOperationContext(out _serverContext); _disposeServerTransaction = _serverContext.OpenReadTransaction(); } buildVersion = ServerVersion.Build; return(new DisposableAction(() => { _disposeServerTransaction?.Dispose(); _returnServerContext?.Dispose(); _disposeTransaction?.Dispose(); _returnContext?.Dispose(); })); }
private void UpdateDestinationChangeVectorHeartbeat(ReplicationMessageReply replicationBatchReply) { _lastSentDocumentEtag = replicationBatchReply.LastEtagAccepted; LastAcceptedChangeVector = replicationBatchReply.DatabaseChangeVector; if (_external == false) { var update = new UpdateSiblingCurrentEtag(replicationBatchReply, _waitForChanges); if (update.InitAndValidate(_lastDestinationEtag)) { // we intentionally not waiting here, there is nothing that depends on the timing on this, since this // is purely advisory. We just want to have the information up to date at some point, and we won't // miss anything much if this isn't there. _database.TxMerger.Enqueue(update).IgnoreUnobservedExceptions(); } } _lastDestinationEtag = replicationBatchReply.CurrentEtag; using (_database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext documentsContext)) using (documentsContext.OpenReadTransaction()) { if (DocumentsStorage.ReadLastEtag(documentsContext.Transaction.InnerTransaction) != replicationBatchReply.LastEtagAccepted) { // We have changes that the other side doesn't have, this can be because we have writes // or because we have documents that were replicated to us. Either way, we need to sync // those up with the remove side, so we'll start the replication loop again. // We don't care if they are locally modified or not, because we filter documents that // the other side already have (based on the change vector). if ((DateTime.UtcNow - _lastDocumentSentTime).TotalMilliseconds > _parent.MinimalHeartbeatInterval) { _waitForChanges.Set(); } } } }
public DateTime GetWakeDatabaseTime() { var wakeupDatabase = DateTime.MaxValue; long lastEtag; using (_database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (var tx = context.OpenReadTransaction()) { lastEtag = DocumentsStorage.ReadLastEtag(tx.InnerTransaction); } foreach (var backup in _periodicBackups) { var nextBackup = GetNextWakeupTime(lastEtag, backup.Value.Configuration, backup.Value.BackupStatus); if (nextBackup == null) { continue; } if (nextBackup < wakeupDatabase) { wakeupDatabase = nextBackup.Value; } } return(wakeupDatabase); }
private unsafe long ComputeAllDocumentsEtag(DocumentsOperationContext context) { var buffer = stackalloc long[2]; buffer[0] = DocumentsStorage.ReadLastEtag(context.Transaction.InnerTransaction); buffer[1] = Database.DocumentsStorage.GetNumberOfDocuments(context); return((long)Hashing.XXHash64.Calculate((byte *)buffer, sizeof(long) * 2)); }
public async Task <bool> WaitForNonStaleResults() { if (_context.AreTransactionsOpened() == false) { _context.OpenReadTransaction(); } var etag = DocumentsStorage.ReadLastEtag(_context.Documents.Transaction.InnerTransaction); var queryDuration = Stopwatch.StartNew(); var indexNamesGatherer = new GraphQueryIndexNamesGatherer(); await indexNamesGatherer.VisitAsync(RootQueryStep); var indexes = new List <Index>(); var indexNames = new HashSet <string>(StringComparer.OrdinalIgnoreCase); var indexWaiters = new Dictionary <Index, (IndexQueryServerSide, AsyncWaitForIndexing)>(); var queryTimeout = _query.WaitForNonStaleResultsTimeout ?? Index.DefaultWaitForNonStaleResultsTimeout; foreach (var indexName in indexNamesGatherer.Indexes) { if (indexNames.Add(indexName) == false) { continue; } var index = _database.IndexStore.GetIndex(indexName); indexes.Add(index); indexWaiters.Add(index, (_query, new AsyncWaitForIndexing(queryDuration, queryTimeout, index))); } foreach (var qqs in indexNamesGatherer.QueryStepsWithoutExplicitIndex) { // we need to close the transaction since the DynamicQueryRunner.MatchIndex // expects that the transaction will be closed _context.CloseTransaction(); //this will ensure that query step has relevant index //if needed, this will create auto-index var query = new IndexQueryServerSide(qqs.Query.ToString(), qqs.QueryParameters); var index = await _dynamicQueryRunner.MatchIndex(query, true, null, _database.DatabaseShutdown); if (indexNames.Add(index.Name) == false) { continue; } indexes.Add(index); indexWaiters.Add(index, (_query, new AsyncWaitForIndexing(queryDuration, queryTimeout, index))); } return(await WaitForNonStaleResultsInternal(etag, indexes, indexWaiters)); }
public async Task CreateAutoIndexesAndWaitIfNecessary() { var queryStepsGatherer = new QueryQueryStepGatherer(); queryStepsGatherer.Visit(RootQueryStep); if (_context.AreTransactionsOpened() == false) { _context.OpenReadTransaction(); } try { var etag = DocumentsStorage.ReadLastEtag(_context.Documents.Transaction.InnerTransaction); var queryDuration = Stopwatch.StartNew(); var indexes = new List <Index>(); var indexWaiters = new Dictionary <Index, (IndexQueryServerSide, AsyncWaitForIndexing)>(); foreach (var queryStepInfo in queryStepsGatherer.QuerySteps) { if (string.IsNullOrWhiteSpace(queryStepInfo.QueryStep.Query.From.From.FieldValue) || queryStepInfo.IsIndexQuery) { continue; } var indexQuery = new IndexQueryServerSide(queryStepInfo.QueryStep.GetQueryString, queryStepInfo.QueryStep.QueryParameters); //No sense creating an index for collection queries if (indexQuery.Metadata.IsCollectionQuery) { continue; } var indexCreationInfo = await _dynamicQueryRunner.CreateAutoIndexIfNeeded(indexQuery, true, null, _database.DatabaseShutdown); if (indexCreationInfo.HasCreatedAutoIndex) //wait for non-stale only IF we just created an auto-index { indexes.Add(indexCreationInfo.Index); var queryTimeout = indexQuery.WaitForNonStaleResultsTimeout ?? Index.DefaultWaitForNonStaleResultsTimeout; indexWaiters.Add(indexCreationInfo.Index, (indexQuery, new AsyncWaitForIndexing(queryDuration, queryTimeout, indexCreationInfo.Index))); } } await WaitForNonStaleResultsInternal(etag, indexes, indexWaiters); } finally { //The rest of the code assumes that a Tx is not opened _context.CloseTransaction(); } }
private void UpdateDestinationChangeVectorHeartbeat(ReplicationMessageReply replicationBatchReply) { _lastSentDocumentEtag = Math.Max(_lastSentDocumentEtag, replicationBatchReply.LastEtagAccepted); _lastSentIndexOrTransformerEtag = Math.Max(_lastSentIndexOrTransformerEtag, replicationBatchReply.LastIndexTransformerEtagAccepted); _destinationLastKnownDocumentChangeVectorAsString = replicationBatchReply.DocumentsChangeVector.Format(); _destinationLastKnownIndexOrTransformerChangeVectorAsString = replicationBatchReply.IndexTransformerChangeVector.Format(); foreach (var changeVectorEntry in replicationBatchReply.DocumentsChangeVector) { _destinationLastKnownDocumentChangeVector[changeVectorEntry.DbId] = changeVectorEntry.Etag; } foreach (var changeVectorEntry in replicationBatchReply.IndexTransformerChangeVector) { _destinationLastKnownIndexOrTransformerChangeVector[changeVectorEntry.DbId] = changeVectorEntry.Etag; } //using (_documentsContext.OpenReadTransaction()) { if (DocumentsStorage.ReadLastEtag(_documentsContext.Transaction.InnerTransaction) != replicationBatchReply.LastEtagAccepted) { // We have changes that the other side doesn't have, this can be because we have writes // or because we have documents that were replicated to us. Either way, we need to sync // those up with the remove side, so we'll start the replication loop again. // We don't care if they are locally modified or not, because we filter documents that // the other side already have (based on the change vector). if ((DateTime.UtcNow - _lastDocumentSentTime).TotalMilliseconds > _minimalHeartbeatInterval) { _waitForChanges.SetByAsyncCompletion(); } } } if ( _database.IndexMetadataPersistence.ReadLastEtag(_configurationContext.Transaction.InnerTransaction) != replicationBatchReply.LastIndexTransformerEtagAccepted) { if ((DateTime.UtcNow - _lastIndexOrTransformerSentTime).TotalMilliseconds > _minimalHeartbeatInterval) { _waitForChanges.SetByAsyncCompletion(); } } }
public Task <SmugglerInitializeResult> InitializeAsync(DatabaseSmugglerOptionsServerSide options, SmugglerResult result) { _currentTypeIndex = 0; if (options.OperateOnTypes.HasFlag(DatabaseItemType.Documents) || options.OperateOnTypes.HasFlag(DatabaseItemType.RevisionDocuments) || options.OperateOnTypes.HasFlag(DatabaseItemType.Tombstones) || options.OperateOnTypes.HasFlag(DatabaseItemType.Conflicts) || options.OperateOnTypes.HasFlag(DatabaseItemType.CounterGroups) || options.OperateOnTypes.HasFlag(DatabaseItemType.TimeSeries)) { _returnContext = _database.DocumentsStorage.ContextPool.AllocateOperationContext(out _context); _disposeTransaction = _context.OpenReadTransaction(); LastEtag = DocumentsStorage.ReadLastEtag(_disposeTransaction.InnerTransaction); LastDatabaseChangeVector = DocumentsStorage.GetDatabaseChangeVector(_disposeTransaction.InnerTransaction); } if (options.OperateOnTypes.HasFlag(DatabaseItemType.CompareExchange) || options.OperateOnTypes.HasFlag(DatabaseItemType.Identities) || options.OperateOnTypes.HasFlag(DatabaseItemType.CompareExchangeTombstones) || options.OperateOnTypes.HasFlag(DatabaseItemType.Subscriptions) || options.OperateOnTypes.HasFlag(DatabaseItemType.ReplicationHubCertificates)) { _returnServerContext = _database.ServerStore.ContextPool.AllocateOperationContext(out _serverContext); _disposeServerTransaction = _serverContext.OpenReadTransaction(); using (var rawRecord = _database.ServerStore.Cluster.ReadRawDatabaseRecord(_serverContext, _database.Name)) { LastRaftIndex = rawRecord.EtagForBackup; } } var disposable = new DisposableAction(() => { _disposeServerTransaction?.Dispose(); _returnServerContext?.Dispose(); _disposeTransaction?.Dispose(); _returnContext?.Dispose(); }); return(Task.FromResult(new SmugglerInitializeResult(disposable, ServerVersion.Build))); }
private long InitializeLastDatabaseEtagOnIndexCreation(TransactionOperationContext indexContext) { const string key = "LastEtag"; if (_environment.IsNew == false) { var tree = indexContext.Transaction.InnerTransaction.ReadTree(IndexSchema.LastDocumentEtagOnIndexCreationTree); var result = tree?.Read(key); return(result?.Reader.ReadLittleEndianInt64() ?? 0); } using (var queryContext = QueryOperationContext.Allocate(DocumentDatabase, _index)) using (queryContext.OpenReadTransaction()) using (Slice.From(indexContext.Allocator, key, out var slice)) { var lastDatabaseEtag = DocumentsStorage.ReadLastEtag(queryContext.Documents.Transaction.InnerTransaction); var tree = indexContext.Transaction.InnerTransaction.CreateTree(IndexSchema.LastDocumentEtagOnIndexCreationTree); tree.Add(slice, lastDatabaseEtag); return(lastDatabaseEtag); } }
public IDisposable Initialize(DatabaseSmugglerOptions options, SmugglerResult result, out long buildVersion) { _currentTypeIndex = 0; if (options.OperateOnTypes.HasFlag(DatabaseItemType.Documents) || options.OperateOnTypes.HasFlag(DatabaseItemType.RevisionDocuments) || options.OperateOnTypes.HasFlag(DatabaseItemType.Tombstones) || options.OperateOnTypes.HasFlag(DatabaseItemType.Conflicts) || options.OperateOnTypes.HasFlag(DatabaseItemType.CounterGroups)) { _returnContext = _database.DocumentsStorage.ContextPool.AllocateOperationContext(out _context); _disposeTransaction = _context.OpenReadTransaction(); LastEtag = DocumentsStorage.ReadLastEtag(_disposeTransaction.InnerTransaction); LastDatabaseChangeVector = DocumentsStorage.GetDatabaseChangeVector(_disposeTransaction.InnerTransaction); } if (options.OperateOnTypes.HasFlag(DatabaseItemType.CompareExchange) || options.OperateOnTypes.HasFlag(DatabaseItemType.Identities) || options.OperateOnTypes.HasFlag(DatabaseItemType.CompareExchangeTombstones)) { _returnServerContext = _database.ServerStore.ContextPool.AllocateOperationContext(out _serverContext); _disposeServerTransaction = _serverContext.OpenReadTransaction(); using (var rawRecord = _database.ServerStore.Cluster.ReadRawDatabaseRecord(_serverContext, _database.Name)) { LastRaftIndex = rawRecord.GetEtagForBackup(); } } buildVersion = ServerVersion.Build; return(new DisposableAction(() => { _disposeServerTransaction?.Dispose(); _returnServerContext?.Dispose(); _disposeTransaction?.Dispose(); _returnContext?.Dispose(); })); }
private long CreateLocalBackupOrSnapshot(PeriodicBackupConfiguration configuration, bool isFullBackup, PeriodicBackupStatus status, string backupFilePath, long?startDocumentEtag, DocumentsOperationContext context, DocumentsTransaction tx) { long lastEtag; using (status.LocalBackup.UpdateStats(isFullBackup)) { try { if (configuration.BackupType == BackupType.Backup || configuration.BackupType == BackupType.Snapshot && isFullBackup == false) { // smuggler backup var options = new DatabaseSmugglerOptionsServerSide(); if (isFullBackup == false) { options.OperateOnTypes |= DatabaseItemType.Tombstones; } var result = CreateBackup(options, backupFilePath, startDocumentEtag, context); lastEtag = result.GetLastEtag(); } else { // snapshot backup lastEtag = DocumentsStorage.ReadLastEtag(tx.InnerTransaction); _database.FullBackupTo(backupFilePath); } } catch (Exception e) { status.LocalBackup.Exception = e; throw; } } return(lastEtag); }
private static void FillDocumentsInfo(DatabaseStatusReport prevDatabaseReport, DocumentDatabase dbInstance, DatabaseStatusReport report, DocumentsOperationContext context, DocumentsStorage documentsStorage) { if (prevDatabaseReport?.LastTransactionId != null && prevDatabaseReport.LastTransactionId == dbInstance.LastTransactionId) { report.LastEtag = prevDatabaseReport.LastEtag; report.LastTombstoneEtag = prevDatabaseReport.LastTombstoneEtag; report.NumberOfConflicts = prevDatabaseReport.NumberOfConflicts; report.NumberOfDocuments = prevDatabaseReport.NumberOfDocuments; report.DatabaseChangeVector = prevDatabaseReport.DatabaseChangeVector; } else { using (var tx = context.OpenReadTransaction()) { report.LastEtag = DocumentsStorage.ReadLastEtag(tx.InnerTransaction); report.LastTombstoneEtag = DocumentsStorage.ReadLastTombstoneEtag(tx.InnerTransaction); report.NumberOfConflicts = documentsStorage.ConflictsStorage.ConflictsCount; report.NumberOfDocuments = documentsStorage.GetNumberOfDocuments(context); report.DatabaseChangeVector = DocumentsStorage.GetDatabaseChangeVector(context); } } }
private IEnumerable <(string name, DatabaseStatusReport report)> CollectDatabaseInformation(TransactionOperationContext ctx) { foreach (var dbName in _server.Cluster.GetDatabaseNames(ctx)) { if (_token.IsCancellationRequested) { yield break; } var report = new DatabaseStatusReport { Name = dbName, NodeName = _server.NodeTag }; if (_server.DatabasesLandlord.DatabasesCache.TryGetValue(dbName, out var dbTask) == false) { var recorod = _server.Cluster.ReadDatabase(ctx, dbName); if (recorod == null || recorod.Topology.RelevantFor(_server.NodeTag) == false) { continue; // Database does not exists in this server } report.Status = DatabaseStatus.Unloaded; yield return(dbName, report); continue; } if (dbTask.IsFaulted) { var extractSingleInnerException = dbTask.Exception.ExtractSingleInnerException(); if (Equals(extractSingleInnerException.Data[DatabasesLandlord.DoNotRemove], true)) { report.Status = DatabaseStatus.Unloaded; yield return(dbName, report); continue; } } if (dbTask.IsCanceled || dbTask.IsFaulted) { report.Status = DatabaseStatus.Faulted; report.Error = dbTask.Exception.ToString(); yield return(dbName, report); continue; } if (dbTask.IsCompleted == false) { report.Status = DatabaseStatus.Loading; yield return(dbName, report); continue; } var dbInstance = dbTask.Result; var documentsStorage = dbInstance.DocumentsStorage; var indexStorage = dbInstance.IndexStore; if (dbInstance.DatabaseShutdown.IsCancellationRequested) { report.Status = DatabaseStatus.Shutdown; yield return(dbName, report); continue; } report.Status = DatabaseStatus.Loaded; try { using (documentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (var tx = context.OpenReadTransaction()) { report.LastEtag = DocumentsStorage.ReadLastEtag(tx.InnerTransaction); report.LastTombstoneEtag = DocumentsStorage.ReadLastTombstoneEtag(tx.InnerTransaction); report.NumberOfConflicts = documentsStorage.ConflictsStorage.ConflictsCount; report.NumberOfDocuments = documentsStorage.GetNumberOfDocuments(context); report.DatabaseChangeVector = DocumentsStorage.GetDatabaseChangeVector(context); foreach (var outgoing in dbInstance.ReplicationLoader.OutgoingHandlers) { var node = outgoing.GetNode(); if (node != null) { report.LastSentEtag.Add(node, outgoing._lastSentDocumentEtag); } } if (indexStorage != null) { foreach (var index in indexStorage.GetIndexes()) { var stats = index.GetIndexStats(context); //We might have old version of this index with the same name report.LastIndexStats[index.Name] = new DatabaseStatusReport.ObservedIndexStatus { LastIndexedEtag = stats.LastProcessedEtag, IsSideBySide = index.Name.StartsWith(Constants.Documents.Indexing.SideBySideIndexNamePrefix, StringComparison.OrdinalIgnoreCase), IsStale = stats.IsStale, State = index.State }; } } } } catch (Exception e) { report.Error = e.ToString(); } yield return(dbName, report); } }
public async Task <IOperationResult> RunPeriodicBackup(Action <IOperationProgress> onProgress) { AddInfo($"Started task: '{_configuration.Name}'", onProgress); var totalSw = Stopwatch.StartNew(); var operationCanceled = false; var runningBackupStatus = _periodicBackup.RunningBackupStatus = new PeriodicBackupStatus { TaskId = _configuration.TaskId, BackupType = _configuration.BackupType, LastEtag = _previousBackupStatus.LastEtag, LastFullBackup = _previousBackupStatus.LastFullBackup, LastIncrementalBackup = _previousBackupStatus.LastIncrementalBackup, LastFullBackupInternal = _previousBackupStatus.LastFullBackupInternal, LastIncrementalBackupInternal = _previousBackupStatus.LastIncrementalBackupInternal, IsFull = _isFullBackup, LocalBackup = _previousBackupStatus.LocalBackup, LastOperationId = _previousBackupStatus.LastOperationId }; try { using (_database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (var tx = context.OpenReadTransaction()) { var now = DateTime.Now.ToString(DateTimeFormat, CultureInfo.InvariantCulture); if (runningBackupStatus.LocalBackup == null) { runningBackupStatus.LocalBackup = new LocalBackup(); } if (_logger.IsInfoEnabled) { var fullBackupText = "a " + (_configuration.BackupType == BackupType.Backup ? "full backup" : "snapshot"); _logger.Info($"Creating {(_isFullBackup ? fullBackupText : "an incremental backup")}"); } if (_isFullBackup == false) { // no-op if nothing has changed var currentLastEtag = DocumentsStorage.ReadLastEtag(tx.InnerTransaction); if (currentLastEtag == _previousBackupStatus.LastEtag) { var message = "Skipping incremental backup because " + $"last etag ({currentLastEtag:#,#;;0}) hasn't changed since last backup"; if (_logger.IsInfoEnabled) { _logger.Info(message); } UpdateOperationId(runningBackupStatus); runningBackupStatus.LastIncrementalBackup = _startTime; DatabaseSmuggler.EnsureProcessed(_backupResult); AddInfo(message, onProgress); return(_backupResult); } } GenerateFolderNameAndBackupDirectory(now, out var folderName, out var backupDirectory); var startDocumentEtag = _isFullBackup == false ? _previousBackupStatus.LastEtag : null; var fileName = GetFileName(_isFullBackup, backupDirectory.FullPath, now, _configuration.BackupType, out string backupFilePath); var lastEtag = CreateLocalBackupOrSnapshot(runningBackupStatus, backupFilePath, startDocumentEtag, context, tx, onProgress); runningBackupStatus.LocalBackup.BackupDirectory = _backupToLocalFolder ? backupDirectory.FullPath : null; runningBackupStatus.LocalBackup.TempFolderUsed = _backupToLocalFolder == false; runningBackupStatus.IsFull = _isFullBackup; try { await UploadToServer(backupFilePath, folderName, fileName, onProgress); } finally { runningBackupStatus.UploadToS3 = _backupResult.S3Backup; runningBackupStatus.UploadToAzure = _backupResult.AzureBackup; runningBackupStatus.UploadToGlacier = _backupResult.GlacierBackup; runningBackupStatus.UploadToFtp = _backupResult.FtpBackup; // if user did not specify local folder we delete the temporary file if (_backupToLocalFolder == false) { IOExtensions.DeleteFile(backupFilePath); } } UpdateOperationId(runningBackupStatus); runningBackupStatus.LastEtag = lastEtag; runningBackupStatus.FolderName = folderName; if (_isFullBackup) { runningBackupStatus.LastFullBackup = _periodicBackup.StartTime; } else { runningBackupStatus.LastIncrementalBackup = _periodicBackup.StartTime; } } totalSw.Stop(); if (_logger.IsInfoEnabled) { var fullBackupText = "a " + (_configuration.BackupType == BackupType.Backup ? " full backup" : " snapshot"); _logger.Info($"Successfully created {(_isFullBackup ? fullBackupText : "an incremental backup")} " + $"in {totalSw.ElapsedMilliseconds:#,#;;0} ms"); } return(_backupResult); } catch (OperationCanceledException) { operationCanceled = TaskCancelToken.Token.IsCancellationRequested && _databaseShutdownCancellationToken.IsCancellationRequested; throw; } catch (ObjectDisposedException) { // shutting down, probably operationCanceled = true; throw; } catch (Exception e) { const string message = "Error when performing periodic backup"; runningBackupStatus.Error = new Error { Exception = e.ToString(), At = DateTime.UtcNow }; if (_logger.IsOperationsEnabled) { _logger.Operations(message, e); } _database.NotificationCenter.Add(AlertRaised.Create( _database.Name, "Periodic Backup", message, AlertType.PeriodicBackup, NotificationSeverity.Error, details: new ExceptionDetails(e))); throw; } finally { if (operationCanceled == false) { // whether we succeeded or not, // we need to update the last backup time to avoid // starting a new backup right after this one if (_isFullBackup) { runningBackupStatus.LastFullBackupInternal = _startTime; } else { runningBackupStatus.LastIncrementalBackupInternal = _startTime; } runningBackupStatus.NodeTag = _serverStore.NodeTag; runningBackupStatus.DurationInMs = totalSw.ElapsedMilliseconds; runningBackupStatus.Version = ++_previousBackupStatus.Version; _periodicBackup.BackupStatus = runningBackupStatus; // save the backup status await WriteStatus(runningBackupStatus, onProgress); } } }
private async Task <(List <Match> Matches, GraphQueryPlan QueryPlan, bool NotModified)> GetQueryResults(IndexQueryServerSide query, DocumentsOperationContext documentsContext, long?existingResultEtag, OperationCancelToken token, bool collectIntermediateResults = false) { var q = query.Metadata.Query; var qp = new GraphQueryPlan(query, documentsContext, existingResultEtag, token, Database) { CollectIntermediateResults = collectIntermediateResults }; qp.BuildQueryPlan(); qp.OptimizeQueryPlan(); //TODO: audit optimization if (query.WaitForNonStaleResults) { qp.IsStale = await qp.WaitForNonStaleResults(); } else { await qp.CreateAutoIndexesAndWaitIfNecessary(); } //for the case where we don't wait for non stale results we will override IsStale in the QueryQueryStep steps if (documentsContext.Transaction == null || documentsContext.Transaction.Disposed) { documentsContext.OpenReadTransaction(); } qp.ResultEtag = DocumentsStorage.ReadLastEtag(documentsContext.Transaction.InnerTransaction); if (existingResultEtag.HasValue) { if (qp.ResultEtag == existingResultEtag) { return(null, null, true); } } await qp.Initialize(); var matchResults = qp.Execute(); if (query.Metadata.OrderBy != null) { Sort(matchResults, query.Metadata.OrderBy, Database.Name, query.Query); } var filter = q.GraphQuery.Where; if (filter != null) { for (int i = 0; i < matchResults.Count; i++) { var resultAsJson = new DynamicJsonValue(); matchResults[i].PopulateVertices(resultAsJson); using (var result = documentsContext.ReadObject(resultAsJson, "graph/result")) { if (filter.IsMatchedBy(result, query.QueryParameters) == false) { matchResults[i] = default; } } } } if (query.Start > 0) { matchResults.RemoveRange(0, Math.Min(query.Start, matchResults.Count)); } if (query.PageSize < matchResults.Count) { matchResults.RemoveRange(query.PageSize, matchResults.Count - query.PageSize); } return(matchResults, qp, false); }
private async Task RunPeriodicExport(bool fullExport) { if (_cancellationToken.IsCancellationRequested) { return; } try { DocumentsOperationContext context; using (_database.DocumentsStorage.ContextPool.AllocateOperationContext(out context)) { var sp = Stopwatch.StartNew(); using (var tx = context.OpenReadTransaction()) { var exportDirectory = _configuration.LocalFolderName ?? Path.Combine(_database.Configuration.Core.DataDirectory, "PeriodicExport-Temp"); if (Directory.Exists(exportDirectory) == false) { Directory.CreateDirectory(exportDirectory); } var now = SystemTime.UtcNow.ToString("yyyy-MM-dd-HH-mm", CultureInfo.InvariantCulture); if (_status.LastFullExportDirectory == null || IsDirectoryExistsOrContainsFiles() == false || fullExport) { fullExport = true; _status.LastFullExportDirectory = Path.Combine(exportDirectory, $"{now}.ravendb-{_database.Name}-backup"); Directory.CreateDirectory(_status.LastFullExportDirectory); } if (_logger.IsInfoEnabled) { _logger.Info($"Exporting a {(fullExport ? "full" : "incremental")} export"); } if (fullExport == false) { var currentLastEtag = DocumentsStorage.ReadLastEtag(tx.InnerTransaction); // No-op if nothing has changed if (currentLastEtag == _status.LastDocsEtag) { return; } } var dataExporter = new SmugglerExporter(_database) { Options = new DatabaseSmugglerOptions { RevisionDocumentsLimit = _exportLimit } }; string exportFilePath; string fileName; if (fullExport) { // create filename for full export fileName = $"{now}.ravendb-full-export"; exportFilePath = Path.Combine(_status.LastFullExportDirectory, fileName); if (File.Exists(exportFilePath)) { var counter = 1; while (true) { fileName = $"{now} - {counter}.${Constants.PeriodicExport.FullExportExtension}"; exportFilePath = Path.Combine(_status.LastFullExportDirectory, fileName); if (File.Exists(exportFilePath) == false) { break; } counter++; } } } else { // create filename for incremental export fileName = $"{now}-0.${Constants.PeriodicExport.IncrementalExportExtension}"; exportFilePath = Path.Combine(_status.LastFullExportDirectory, fileName); if (File.Exists(exportFilePath)) { var counter = 1; while (true) { fileName = $"{now}-{counter}.${Constants.PeriodicExport.IncrementalExportExtension}"; exportFilePath = Path.Combine(_status.LastFullExportDirectory, fileName); if (File.Exists(exportFilePath) == false) { break; } counter++; } } dataExporter.StartDocsEtag = _status.LastDocsEtag; if (dataExporter.StartDocsEtag == null) { IncrementalExport.ReadLastEtagsFromFile(_status.LastFullExportDirectory, context, dataExporter); } } var exportResult = dataExporter.Export(context, exportFilePath); if (fullExport == false) { // No-op if nothing has changed if (exportResult.LastDocsEtag == _status.LastDocsEtag) { if (_logger.IsInfoEnabled) { _logger.Info("Periodic export returned prematurely, nothing has changed since last export"); } return; } } try { await UploadToServer(exportFilePath, fileName, fullExport); } finally { // if user did not specify local folder we delete temporary file. if (string.IsNullOrEmpty(_configuration.LocalFolderName)) { IOExtensions.DeleteFile(exportFilePath); } } _status.LastDocsEtag = exportResult.LastDocsEtag; if (fullExport) { _status.LastFullExportAt = SystemTime.UtcNow; } else { _status.LastExportAt = SystemTime.UtcNow; } WriteStatus(); } if (_logger.IsInfoEnabled) { _logger.Info($"Successfully exported {(fullExport ? "full" : "incremental")} export in {sp.ElapsedMilliseconds:#,#;;0} ms."); } _exportLimit = null; } } catch (OperationCanceledException) { // shutting down, probably } catch (ObjectDisposedException) { // shutting down, probably } catch (Exception e) { _exportLimit = 100; if (_logger.IsOperationsEnabled) { _logger.Operations("Error when performing periodic export", e); } _database.Alerts.AddAlert(new Alert { Type = AlertType.PeriodicExport, Message = "Error in Periodic Export", CreatedAt = SystemTime.UtcNow, Severity = AlertSeverity.Error, Content = new ExceptionAlertContent { Message = e.Message, Exception = e.ToString() } }); } }
private long CreateLocalBackupOrSnapshot( PeriodicBackupStatus status, string backupFilePath, long?startDocumentEtag, DocumentsOperationContext context, DocumentsTransaction tx, Action <IOperationProgress> onProgress) { long lastEtag; using (status.LocalBackup.UpdateStats(_isFullBackup)) { try { // will rename the file after the backup is finished var tempBackupFilePath = backupFilePath + InProgressExtension; if (_configuration.BackupType == BackupType.Backup || _configuration.BackupType == BackupType.Snapshot && _isFullBackup == false) { var backupType = _configuration.BackupType == BackupType.Snapshot ? "snapshot " : string.Empty; AddInfo($"Started an incremental {backupType}backup", onProgress); // smuggler backup var options = new DatabaseSmugglerOptionsServerSide { AuthorizationStatus = AuthorizationStatus.DatabaseAdmin, }; if (_isFullBackup == false) { options.OperateOnTypes |= DatabaseItemType.Tombstones; } CreateBackup(options, tempBackupFilePath, startDocumentEtag, context, onProgress); lastEtag = _isFullBackup ? DocumentsStorage.ReadLastEtag(tx.InnerTransaction) : _backupResult.GetLastEtag(); } else { // snapshot backup AddInfo("Started a snapshot backup", onProgress); lastEtag = DocumentsStorage.ReadLastEtag(tx.InnerTransaction); var indexesCount = _database.IndexStore.Count; var totalSw = Stopwatch.StartNew(); var sw = Stopwatch.StartNew(); var smugglerResult = _database.FullBackupTo(tempBackupFilePath, info => { AddInfo(info.Message, onProgress); _backupResult.SnapshotBackup.ReadCount += info.FilesCount; if (sw.ElapsedMilliseconds > 0 && info.FilesCount > 0) { AddInfo($"Backed up {_backupResult.SnapshotBackup.ReadCount} " + $"file{(_backupResult.SnapshotBackup.ReadCount > 1 ? "s" : string.Empty)}", onProgress); sw.Restart(); } }, TaskCancelToken.Token); EnsureSnapshotProcessed(context, smugglerResult, indexesCount); AddInfo($"Backed up {_backupResult.SnapshotBackup.ReadCount} files, " + $"took: {totalSw.ElapsedMilliseconds:#,#;;0}ms", onProgress); } IOExtensions.RenameFile(tempBackupFilePath, backupFilePath); } catch (Exception e) { status.LocalBackup.Exception = e.ToString(); throw; } } return(lastEtag); }
private async Task RunPeriodicBackup( PeriodicBackupConfiguration configuration, PeriodicBackupStatus status, bool isFullBackup) { var backupStarted = SystemTime.UtcNow; var totalSw = Stopwatch.StartNew(); status.BackupType = configuration.BackupType; try { using (_database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (var tx = context.OpenReadTransaction()) { var backupToLocalFolder = PeriodicBackupConfiguration.CanBackupUsing(configuration.LocalSettings); var now = SystemTime.UtcNow.ToString(DateTimeFormat, CultureInfo.InvariantCulture); if (status.LocalBackup == null) { status.LocalBackup = new LocalBackup(); } PathSetting backupDirectory; string folderName; // check if we need to do a new full backup if (isFullBackup || status.LastFullBackup == null || // no full backup was previously performed status.NodeTag != _serverStore.NodeTag || // last backup was performed by a different node status.BackupType != configuration.BackupType || // backup type has changed status.LastEtag == null || // last document etag wasn't updated backupToLocalFolder && DirectoryContainsFullBackupOrSnapshot(status.LocalBackup.BackupDirectory, configuration.BackupType) == false) // the local folder has a missing full backup { isFullBackup = true; folderName = $"{now}.ravendb-{_database.Name}-{_serverStore.NodeTag}-{configuration.BackupType.ToString().ToLower()}"; backupDirectory = backupToLocalFolder ? new PathSetting(configuration.LocalSettings.FolderPath).Combine(folderName) : _tempBackupPath; if (Directory.Exists(backupDirectory.FullPath) == false) { Directory.CreateDirectory(backupDirectory.FullPath); } status.LocalBackup.TempFolderUsed = backupToLocalFolder == false; status.LocalBackup.BackupDirectory = backupToLocalFolder ? backupDirectory.FullPath : null; } else { backupDirectory = backupToLocalFolder ? new PathSetting(status.LocalBackup.BackupDirectory) : _tempBackupPath; folderName = status.FolderName; } if (_logger.IsInfoEnabled) { var fullBackupText = "a " + (configuration.BackupType == BackupType.Backup ? "full backup" : "snapshot"); _logger.Info($"Creating {(isFullBackup ? fullBackupText : "an incremental backup")}"); } if (isFullBackup == false) { // no-op if nothing has changed var currentLastEtag = DocumentsStorage.ReadLastEtag(tx.InnerTransaction); if (currentLastEtag == status.LastEtag) { if (_logger.IsInfoEnabled) { _logger.Info("Skipping incremental backup because " + $"last etag ({currentLastEtag}) hasn't changed since last backup"); } status.DurationInMs = totalSw.ElapsedMilliseconds; status.LastIncrementalBackup = backupStarted; return; } } var startDocumentEtag = isFullBackup == false ? status.LastEtag : null; var fileName = GetFileName(isFullBackup, backupDirectory.FullPath, now, configuration.BackupType, out string backupFilePath); var lastEtag = CreateLocalBackupOrSnapshot(configuration, isFullBackup, status, backupFilePath, startDocumentEtag, context, tx); try { await UploadToServer(configuration, status, backupFilePath, folderName, fileName, isFullBackup); } finally { // if user did not specify local folder we delete temporary file if (backupToLocalFolder == false) { IOExtensions.DeleteFile(backupFilePath); } } status.LastEtag = lastEtag; status.FolderName = folderName; } totalSw.Stop(); if (_logger.IsInfoEnabled) { var fullBackupText = "a " + (configuration.BackupType == BackupType.Backup ? " full backup" : " snapshot"); _logger.Info($"Successfully created {(isFullBackup ? fullBackupText : "an incremental backup")} " + $"in {totalSw.ElapsedMilliseconds:#,#;;0} ms"); } } catch (OperationCanceledException) { // shutting down, probably } catch (ObjectDisposedException) { // shutting down, probably } catch (Exception e) { const string message = "Error when performing periodic backup"; if (_logger.IsOperationsEnabled) { _logger.Operations(message, e); } _database.NotificationCenter.Add(AlertRaised.Create("Periodic Backup", message, AlertType.PeriodicBackup, NotificationSeverity.Error, details: new ExceptionDetails(e))); } finally { // whether we succeded or not, // we need to update the last backup time to avoid // starting a new backup right after this one if (isFullBackup) { status.LastFullBackup = backupStarted; } else { status.LastIncrementalBackup = backupStarted; } status.NodeTag = _serverStore.NodeTag; status.DurationInMs = totalSw.ElapsedMilliseconds; status.Version++; // save the backup status await WriteStatus(status); } }
private void FillDatabaseStatistics(DatabaseStatistics stats, QueryOperationContext context) { var indexes = Database.IndexStore.GetIndexes().ToList(); var size = Database.GetSizeOnDisk(); stats.LastDocEtag = DocumentsStorage.ReadLastDocumentEtag(context.Documents.Transaction.InnerTransaction); stats.LastDatabaseEtag = DocumentsStorage.ReadLastEtag(context.Documents.Transaction.InnerTransaction); stats.DatabaseChangeVector = DocumentsStorage.GetDatabaseChangeVector(context.Documents); stats.CountOfDocuments = Database.DocumentsStorage.GetNumberOfDocuments(context.Documents); stats.CountOfRevisionDocuments = Database.DocumentsStorage.RevisionsStorage.GetNumberOfRevisionDocuments(context.Documents); stats.CountOfDocumentsConflicts = Database.DocumentsStorage.ConflictsStorage.GetNumberOfDocumentsConflicts(context.Documents); stats.CountOfTombstones = Database.DocumentsStorage.GetNumberOfTombstones(context.Documents); stats.CountOfConflicts = Database.DocumentsStorage.ConflictsStorage.ConflictsCount; stats.SizeOnDisk = size.Data; stats.NumberOfTransactionMergerQueueOperations = Database.TxMerger.NumberOfQueuedOperations; stats.TempBuffersSizeOnDisk = size.TempBuffers; stats.CountOfCounterEntries = Database.DocumentsStorage.CountersStorage.GetNumberOfCounterEntries(context.Documents); stats.CountOfTimeSeriesSegments = Database.DocumentsStorage.TimeSeriesStorage.GetNumberOfTimeSeriesSegments(context.Documents); var attachments = Database.DocumentsStorage.AttachmentsStorage.GetNumberOfAttachments(context.Documents); stats.CountOfAttachments = attachments.AttachmentCount; stats.CountOfUniqueAttachments = attachments.StreamsCount; stats.CountOfIndexes = indexes.Count; stats.DatabaseId = Database.DocumentsStorage.Environment.Base64Id; stats.Is64Bit = !Database.DocumentsStorage.Environment.Options.ForceUsing32BitsPager && IntPtr.Size == sizeof(long); stats.Pager = Database.DocumentsStorage.Environment.Options.DataPager.GetType().ToString(); stats.Indexes = new IndexInformation[indexes.Count]; for (var i = 0; i < indexes.Count; i++) { var index = indexes[i]; bool isStale; try { isStale = index.IsStale(context); } catch (OperationCanceledException) { // if the index has just been removed, let us consider it stale // until it can be safely removed from the list of indexes in the // database isStale = true; } stats.Indexes[i] = new IndexInformation { State = index.State, IsStale = isStale, Name = index.Name, LockMode = index.Definition.LockMode, Priority = index.Definition.Priority, Type = index.Type, LastIndexingTime = index.LastIndexingTime, SourceType = index.SourceType }; if (stats.LastIndexingTime.HasValue) { stats.LastIndexingTime = stats.LastIndexingTime >= index.LastIndexingTime ? stats.LastIndexingTime : index.LastIndexingTime; } else { stats.LastIndexingTime = index.LastIndexingTime; } } }
private void ReplicateToDestination() { try { AddReplicationPulse(ReplicationPulseDirection.OutgoingInitiate); NativeMemory.EnsureRegistered(); if (_log.IsInfoEnabled) { _log.Info($"Will replicate to {Destination.FromString()} via {_connectionInfo.Url}"); } using (_parent._server.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { var record = _parent.LoadDatabaseRecord(); if (record == null) { throw new InvalidOperationException($"The database record for {_parent.Database.Name} does not exist?!"); } if (record.Encrypted && Destination.Url.StartsWith("https:", StringComparison.OrdinalIgnoreCase) == false) { throw new InvalidOperationException( $"{record.DatabaseName} is encrypted, and require HTTPS for replication, but had endpoint with url {Destination.Url} to database {Destination.Database}"); } } var task = TcpUtils.ConnectSocketAsync(_connectionInfo, _parent._server.Engine.TcpConnectionTimeout, _log); task.Wait(CancellationToken); using (Interlocked.Exchange(ref _tcpClient, task.Result)) { var wrapSsl = TcpUtils.WrapStreamWithSslAsync(_tcpClient, _connectionInfo, _parent._server.Server.Certificate.Certificate, _parent._server.Engine.TcpConnectionTimeout); wrapSsl.Wait(CancellationToken); using (_stream = wrapSsl.Result) // note that _stream is being disposed by the interruptible read using (_interruptibleRead = new InterruptibleRead(_database.DocumentsStorage.ContextPool, _stream)) using (_buffer = JsonOperationContext.ManagedPinnedBuffer.LongLivedInstance()) { var documentSender = new ReplicationDocumentSender(_stream, this, _log); WriteHeaderToRemotePeer(); //handle initial response to last etag and staff try { var response = HandleServerResponse(getFullResponse: true); switch (response.ReplyType) { //The first time we start replication we need to register the destination current CV case ReplicationMessageReply.ReplyType.Ok: LastAcceptedChangeVector = response.Reply.DatabaseChangeVector; break; case ReplicationMessageReply.ReplyType.Error: var exception = new InvalidOperationException(response.Reply.Exception); if (response.Reply.Exception.Contains(nameof(DatabaseDoesNotExistException)) || response.Reply.Exception.Contains(nameof(DatabaseNotRelevantException))) { AddReplicationPulse(ReplicationPulseDirection.OutgoingInitiateError, "Database does not exist"); DatabaseDoesNotExistException.ThrowWithMessageAndException(Destination.Database, response.Reply.Message, exception); } AddReplicationPulse(ReplicationPulseDirection.OutgoingInitiateError, $"Got error: {response.Reply.Exception}"); throw exception; } } catch (DatabaseDoesNotExistException e) { var msg = $"Failed to parse initial server replication response, because there is no database named {_database.Name} " + "on the other end. "; if (_external) { msg += "In order for the replication to work, a database with the same name needs to be created at the destination"; } var young = (DateTime.UtcNow - _startedAt).TotalSeconds < 30; if (young) { msg += "This can happen if the other node wasn't yet notified about being assigned this database and should be resolved shortly."; } if (_log.IsInfoEnabled) { _log.Info(msg, e); } AddReplicationPulse(ReplicationPulseDirection.OutgoingInitiateError, msg); // won't add an alert on young connections // because it may take a few seconds for the other side to be notified by // the cluster that it has this db. if (young == false) { AddAlertOnFailureToReachOtherSide(msg, e); } throw; } catch (OperationCanceledException e) { const string msg = "Got operation canceled notification while opening outgoing replication channel. " + "Aborting and closing the channel."; if (_log.IsInfoEnabled) { _log.Info(msg, e); } AddReplicationPulse(ReplicationPulseDirection.OutgoingInitiateError, msg); throw; } catch (Exception e) { var msg = $"{OutgoingReplicationThreadName} got an unexpected exception during initial handshake"; if (_log.IsInfoEnabled) { _log.Info(msg, e); } AddReplicationPulse(ReplicationPulseDirection.OutgoingInitiateError, msg); AddAlertOnFailureToReachOtherSide(msg, e); throw; } DateTime nextReplicateAt = default(DateTime); while (_cts.IsCancellationRequested == false) { while (_database.Time.GetUtcNow() > nextReplicateAt) { if (_parent.DebugWaitAndRunReplicationOnce != null) { _parent.DebugWaitAndRunReplicationOnce.Wait(_cts.Token); _parent.DebugWaitAndRunReplicationOnce.Reset(); } var sp = Stopwatch.StartNew(); var stats = _lastStats = new OutgoingReplicationStatsAggregator(_parent.GetNextReplicationStatsId(), _lastStats); AddReplicationPerformance(stats); AddReplicationPulse(ReplicationPulseDirection.OutgoingBegin); try { using (var scope = stats.CreateScope()) { try { if (Destination is InternalReplication dest) { _parent.EnsureNotDeleted(dest.NodeTag); } var didWork = documentSender.ExecuteReplicationOnce(scope, ref nextReplicateAt); if (didWork == false) { break; } if (Destination is ExternalReplication externalReplication) { var taskId = externalReplication.TaskId; UpdateExternalReplicationInfo(taskId); } DocumentsSend?.Invoke(this); if (sp.ElapsedMilliseconds > 60 * 1000) { _waitForChanges.Set(); break; } } catch (OperationCanceledException) { // cancellation is not an actual error, // it is a "notification" that we need to cancel current operation const string msg = "Operation was canceled."; AddReplicationPulse(ReplicationPulseDirection.OutgoingError, msg); throw; } catch (Exception e) { AddReplicationPulse(ReplicationPulseDirection.OutgoingError, e.Message); scope.AddError(e); throw; } } } finally { stats.Complete(); AddReplicationPulse(ReplicationPulseDirection.OutgoingEnd); } } //if this returns false, this means either timeout or canceled token is activated while (WaitForChanges(_parent.MinimalHeartbeatInterval, _cts.Token) == false) { //If we got cancelled we need to break right away if (_cts.IsCancellationRequested) { break; } // open tx // read current change vector compare to last sent // if okay, send cv using (_database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext ctx)) using (var tx = ctx.OpenReadTransaction()) { var etag = DocumentsStorage.ReadLastEtag(tx.InnerTransaction); if (etag == _lastSentDocumentEtag) { SendHeartbeat(DocumentsStorage.GetDatabaseChangeVector(ctx)); _parent.CompleteDeletionIfNeeded(); } else if (nextReplicateAt > DateTime.UtcNow) { SendHeartbeat(null); } else { //Send a heartbeat first so we will get an updated CV of the destination var currentChangeVector = DocumentsStorage.GetDatabaseChangeVector(ctx); SendHeartbeat(null); //If our previous CV is already merged to the destination wait a bit more if (ChangeVectorUtils.GetConflictStatus(LastAcceptedChangeVector, currentChangeVector) == ConflictStatus.AlreadyMerged) { continue; } // we have updates that we need to send to the other side // let's do that.. // this can happen if we got replication from another node // that we need to send to it. Note that we typically // will wait for the other node to send the data directly to // our destination, but if it doesn't, we'll step in. // In this case, we try to limit congestion in the network and // only send updates that we have gotten from someone else after // a certain time, to let the other side tell us that it already // got it. Note that this is merely an optimization to reduce network // traffic. It is fine to have the same data come from different sources. break; } } } _waitForChanges.Reset(); } } } } catch (AggregateException e) { if (e.InnerExceptions.Count == 1) { if (e.InnerException is OperationCanceledException oce) { HandleOperationCancelException(oce); } if (e.InnerException is IOException ioe) { HandleIOException(ioe); } } HandleException(e); } catch (OperationCanceledException e) { HandleOperationCancelException(e); } catch (IOException e) { HandleIOException(e); } catch (Exception e) { HandleException(e); } void HandleOperationCancelException(OperationCanceledException e) { if (_log.IsInfoEnabled) { _log.Info($"Operation canceled on replication thread ({FromToString}). " + $"This is not necessary due to an issue. Stopped the thread."); } if (_cts.IsCancellationRequested == false) { Failed?.Invoke(this, e); } } void HandleIOException(IOException e) { if (_log.IsInfoEnabled) { if (e.InnerException is SocketException) { _log.Info($"SocketException was thrown from the connection to remote node ({FromToString}). " + $"This might mean that the remote node is done or there is a network issue.", e); } else { _log.Info($"IOException was thrown from the connection to remote node ({FromToString}).", e); } } Failed?.Invoke(this, e); } void HandleException(Exception e) { if (_log.IsInfoEnabled) { _log.Info($"Unexpected exception occurred on replication thread ({FromToString}). " + $"Replication stopped (will be retried later).", e); } Failed?.Invoke(this, e); } }
protected override async Task DoWork() { await WaitOrThrowOperationCanceled(_notificationCenter.Options.DatabaseStatsThrottle); Stats current; DateTime?lastIndexingErrorTime = null; using (_database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (context.OpenReadTransaction()) { var indexes = _database.IndexStore.GetIndexes().ToList(); var staleIndexes = 0; var countOfIndexingErrors = 0L; // ReSharper disable once LoopCanBeConvertedToQuery foreach (var index in indexes) { if (index.IsStale(context)) { staleIndexes++; } var errorCount = index.GetErrorCount(); if (errorCount > 0) { var lastError = index.GetLastIndexingErrorTime(); if (lastError != null) { if (lastIndexingErrorTime == null || lastError > lastIndexingErrorTime) { lastIndexingErrorTime = lastError; } } } countOfIndexingErrors += errorCount; } current = new Stats { CountOfConflicts = _database.DocumentsStorage.ConflictsStorage.GetCountOfDocumentsConflicts(context), CountOfDocuments = _database.DocumentsStorage.GetNumberOfDocuments(context), CountOfIndexes = indexes.Count, CountOfStaleIndexes = staleIndexes, CountOfIndexingErrors = countOfIndexingErrors, LastEtag = DocumentsStorage.ReadLastEtag(context.Transaction.InnerTransaction), GlobalChangeVector = DocumentsStorage.GetDatabaseChangeVector(context) }; current.Collections = _database.DocumentsStorage.GetCollections(context) .ToDictionary(x => x.Name, x => new DatabaseStatsChanged.ModifiedCollection(x.Name, x.Count, _database.DocumentsStorage.GetLastDocumentChangeVector(context, x.Name))); } if (_latest != null && _latest.Equals(current)) { return; } var modifiedCollections = _latest == null?current.Collections.Values.ToList() : ExtractModifiedCollections(current); _notificationCenter.Add(DatabaseStatsChanged.Create(current.CountOfConflicts, current.CountOfDocuments, current.CountOfIndexes, current.CountOfStaleIndexes, current.GlobalChangeVector, current.LastEtag, current.CountOfIndexingErrors, lastIndexingErrorTime, modifiedCollections)); _latest = current; }
private IEnumerable <(string name, DatabaseStatusReport report)> CollectDatabaseInformation(TransactionOperationContext ctx) { foreach (var dbName in _server.Cluster.GetDatabaseNames(ctx)) { if (_token.IsCancellationRequested) { yield break; } if (_server.DatabasesLandlord.DatabasesCache.TryGetValue(dbName, out var dbTask) == false) { continue; // Database does not exists in this server } var report = new DatabaseStatusReport { Name = dbName, NodeName = _server.NodeTag }; if (dbTask.IsCanceled || dbTask.IsFaulted) { report.Status = DatabaseStatus.Faulted; report.Error = dbTask.Exception.ToString(); yield return(dbName, report); continue; } if (dbTask.IsCompleted == false) { report.Status = DatabaseStatus.Loading; yield return(dbName, report); continue; } var dbInstance = dbTask.Result; var documentsStorage = dbInstance.DocumentsStorage; var indexStorage = dbInstance.IndexStore; if (dbInstance.DatabaseShutdown.IsCancellationRequested) { report.Status = DatabaseStatus.Shutdown; yield return(dbName, report); continue; } report.Status = DatabaseStatus.Loaded; try { using (documentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (var tx = context.OpenReadTransaction()) { report.LastEtag = DocumentsStorage.ReadLastEtag(tx.InnerTransaction); report.LastTombstoneEtag = DocumentsStorage.ReadLastTombstoneEtag(tx.InnerTransaction); report.NumberOfConflicts = documentsStorage.ConflictsStorage.ConflictsCount; report.NumberOfDocuments = documentsStorage.GetNumberOfDocuments(context); report.LastChangeVector = DocumentsStorage.GetDatabaseChangeVector(context); if (indexStorage != null) { foreach (var index in indexStorage.GetIndexes()) { var stats = index.GetIndexStats(context); //We might have old version of this index with the same name report.LastIndexStats.Add(index.Name, new DatabaseStatusReport.ObservedIndexStatus { LastIndexedEtag = stats.LastProcessedEtag, IsSideBySide = false, // TODO: fix this so it get whatever this has side by side or not IsStale = stats.IsStale }); } } } } catch (Exception e) { report.Error = e.ToString(); } yield return(dbName, report); } }