Beispiel #1
0
        public void Initialize(InitializeOptions options = InitializeOptions.None)
        {
            try
            {
                _addToInitLog("Initializing NotificationCenter");
                NotificationCenter.Initialize(this);

                _addToInitLog("Initializing DocumentStorage");
                DocumentsStorage.Initialize((options & InitializeOptions.GenerateNewDatabaseId) == InitializeOptions.GenerateNewDatabaseId);
                _addToInitLog("Starting Transaction Merger");
                TxMerger.Start();
                _addToInitLog("Initializing ConfigurationStorage");
                ConfigurationStorage.Initialize();

                if ((options & InitializeOptions.SkipLoadingDatabaseRecord) == InitializeOptions.SkipLoadingDatabaseRecord)
                {
                    return;
                }

                _addToInitLog("Loading Database");
                long           index;
                DatabaseRecord record;
                using (_serverStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context))
                    using (context.OpenReadTransaction())
                        record = _serverStore.Cluster.ReadDatabase(context, Name, out index);

                if (record == null)
                {
                    DatabaseDoesNotExistException.Throw(Name);
                }

                PeriodicBackupRunner = new PeriodicBackupRunner(this, _serverStore);

                _addToInitLog("Initializing IndexStore (async)");
                _indexStoreTask = IndexStore.InitializeAsync(record);
                _addToInitLog("Initializing Replication");
                ReplicationLoader?.Initialize(record);
                _addToInitLog("Initializing ETL");
                EtlLoader.Initialize(record);

                DocumentTombstoneCleaner.Start();

                try
                {
                    _indexStoreTask.Wait(DatabaseShutdown);
                }
                finally
                {
                    _addToInitLog("Initializing IndexStore completed");
                    _indexStoreTask = null;
                }

                _addToInitLog("Initializing SubscriptionStorage");
                SubscriptionStorage.Initialize();

                NotifyFeaturesAboutStateChange(record, index);
            }
            catch (Exception)
            {
                Dispose();
                throw;
            }
        }
        public bool ExecuteReplicationOnce(TcpConnectionOptions tcpConnectionOptions, OutgoingReplicationStatsScope stats, ref long next)
        {
            EnsureValidStats(stats);
            var wasInterrupted = false;
            var delay          = GetDelayReplication();
            var currentNext    = next;

            using (_parent._database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext documentsContext))
                using (documentsContext.OpenReadTransaction())
                {
                    try
                    {
                        // we scan through the documents to send to the other side, we need to be careful about
                        // filtering a lot of documents, because we need to let the other side know about this, and
                        // at the same time, we need to send a heartbeat to keep the tcp connection alive
                        _lastEtag = _parent._lastSentDocumentEtag;
                        _parent.CancellationToken.ThrowIfCancellationRequested();

                        var  skippedReplicationItemsInfo = new SkippedReplicationItemsInfo();
                        long prevLastEtag     = _lastEtag;
                        var  replicationState = new ReplicationState
                        {
                            BatchSize             = _parent._database.Configuration.Replication.MaxItemsCount,
                            MaxSizeToSend         = _parent._database.Configuration.Replication.MaxSizeToSend,
                            CurrentNext           = currentNext,
                            Delay                 = delay,
                            Context               = documentsContext,
                            LastTransactionMarker = -1,
                            NumberOfItemsSent     = 0,
                            Size = 0L
                        };

                        using (_stats.Storage.Start())
                        {
                            foreach (var item in GetReplicationItems(_parent._database, documentsContext, _lastEtag, _stats, _parent.SupportedFeatures.Replication.CaseInsensitiveCounters))
                            {
                                _parent.CancellationToken.ThrowIfCancellationRequested();

                                if (replicationState.LastTransactionMarker != item.TransactionMarker)
                                {
                                    replicationState.Item = item;

                                    if (CanContinueBatch(replicationState, ref next) == false)
                                    {
                                        wasInterrupted = true;
                                        break;
                                    }

                                    replicationState.LastTransactionMarker = item.TransactionMarker;
                                }

                                _stats.Storage.RecordInputAttempt();

                                // here we add missing attachments in the same batch as the document that contains them without modifying the last etag or transaction boundary
                                if (MissingAttachmentsInLastBatch &&
                                    item.Type == ReplicationBatchItem.ReplicationItemType.Document &&
                                    item is DocumentReplicationItem docItem &&
                                    docItem.Flags.Contain(DocumentFlags.HasAttachments))
                                {
                                    var missingAttachmentBase64Hashes = replicationState.MissingAttachmentBase64Hashes ??= new HashSet <Slice>(SliceStructComparer.Instance);
                                    var type = (docItem.Flags & DocumentFlags.Revision) == DocumentFlags.Revision ? AttachmentType.Revision : AttachmentType.Document;
                                    foreach (var attachment in _parent._database.DocumentsStorage.AttachmentsStorage.GetAttachmentsForDocument(documentsContext, type, docItem.Id, docItem.ChangeVector))
                                    {
                                        // we need to filter attachments that are been sent in the same batch as the document
                                        if (attachment.Etag >= prevLastEtag)
                                        {
                                            if (_replicaAttachmentStreams.ContainsKey(attachment.Base64Hash) == false)
                                            {
                                                missingAttachmentBase64Hashes.Add(attachment.Base64Hash);
                                            }

                                            continue;
                                        }

                                        var stream = _parent._database.DocumentsStorage.AttachmentsStorage.GetAttachmentStream(documentsContext, attachment.Base64Hash);
                                        attachment.Stream = stream;
                                        var attachmentItem = AttachmentReplicationItem.From(documentsContext, attachment);
                                        AddReplicationItemToBatch(attachmentItem, _stats.Storage, replicationState, skippedReplicationItemsInfo);
                                        replicationState.Size += attachmentItem.Size;
                                    }
                                }

                                _lastEtag = item.Etag;

                                if (AddReplicationItemToBatch(item, _stats.Storage, replicationState, skippedReplicationItemsInfo) == false)
                                {
                                    // this item won't be needed anymore
                                    item.Dispose();
                                    continue;
                                }

                                replicationState.Size += item.Size;

                                replicationState.NumberOfItemsSent++;
                            }
                        }

                        if (_log.IsInfoEnabled)
                        {
                            if (skippedReplicationItemsInfo.SkippedItems > 0)
                            {
                                var message = skippedReplicationItemsInfo.GetInfoForDebug(_parent.LastAcceptedChangeVector);
                                _log.Info(message);
                            }

                            var msg = $"Found {_orderedReplicaItems.Count:#,#;;0} documents " +
                                      $"and {_replicaAttachmentStreams.Count} attachment's streams " +
                                      $"to replicate to {_parent.Node.FromString()}, ";

                            var encryptionSize = documentsContext.Transaction.InnerTransaction.LowLevelTransaction.AdditionalMemoryUsageSize.GetValue(SizeUnit.Bytes);
                            if (encryptionSize > 0)
                            {
                                msg += $"encryption buffer overhead size is {new Size(encryptionSize, SizeUnit.Bytes)}, ";
                            }
                            msg += $"total size: {new Size(replicationState.Size + encryptionSize, SizeUnit.Bytes)}";

                            _log.Info(msg);
                        }

                        if (_orderedReplicaItems.Count == 0)
                        {
                            var hasModification = _lastEtag != _parent._lastSentDocumentEtag;

                            // ensure that the other server is aware that we skipped
                            // on (potentially a lot of) documents to send, and we update
                            // the last etag they have from us on the other side
                            _parent._lastSentDocumentEtag = _lastEtag;
                            _parent._lastDocumentSentTime = DateTime.UtcNow;
                            var changeVector = wasInterrupted ? null : DocumentsStorage.GetDatabaseChangeVector(documentsContext);
                            _parent.SendHeartbeat(changeVector);
                            return(hasModification);
                        }

                        _parent.CancellationToken.ThrowIfCancellationRequested();

                        try
                        {
                            using (_stats.Network.Start())
                            {
                                SendDocumentsBatch(documentsContext, _stats.Network);
                                tcpConnectionOptions._lastEtagSent = _lastEtag;
                                tcpConnectionOptions.RegisterBytesSent(replicationState.Size);
                                if (MissingAttachmentsInLastBatch)
                                {
                                    return(false);
                                }
                            }
                        }
                        catch (OperationCanceledException)
                        {
                            if (_log.IsInfoEnabled)
                            {
                                _log.Info("Received cancellation notification while sending document replication batch.");
                            }
                            throw;
                        }
                        catch (Exception e)
                        {
                            if (_log.IsInfoEnabled)
                            {
                                _log.Info("Failed to send document replication batch", e);
                            }
                            throw;
                        }

                        MissingAttachmentsInLastBatch = false;

                        return(true);
                    }
                    finally
                    {
                        foreach (var item in _orderedReplicaItems)
                        {
                            item.Value.Dispose();
                        }
                        _orderedReplicaItems.Clear();
                        _replicaAttachmentStreams.Clear();
                    }
                }
        }
Beispiel #3
0
 public DocumentPutAction(DocumentsStorage documentsStorage, DocumentDatabase documentDatabase)
 {
     _documentsStorage = documentsStorage;
     _documentDatabase = documentDatabase;
 }
Beispiel #4
0
        public Task GetCollectionFields()
        {
            var collection = GetStringQueryString("collection", required: false);
            var prefix     = GetStringQueryString("prefix", required: false);

            using (ContextPool.AllocateOperationContext(out DocumentsOperationContext context))
                using (context.OpenReadTransaction())
                {
                    long   totalResults;
                    string changeVector;
                    string etag = null;

                    if (string.IsNullOrEmpty(collection))
                    {
                        changeVector = DocumentsStorage.GetDatabaseChangeVector(context);
                        totalResults = Database.DocumentsStorage.GetNumberOfDocuments(context);
                        etag         = $"{changeVector}/{totalResults}";
                    }
                    else
                    {
                        changeVector = Database.DocumentsStorage.GetLastDocumentChangeVector(context, collection);
                        totalResults = Database.DocumentsStorage.GetCollection(collection, context).Count;

                        if (changeVector != null)
                        {
                            etag = $"{changeVector}/{totalResults}";
                        }
                    }

                    if (etag != null && GetStringFromHeaders("If-None-Match") == etag)
                    {
                        HttpContext.Response.StatusCode = (int)HttpStatusCode.NotModified;
                        return(Task.CompletedTask);
                    }
                    HttpContext.Response.Headers["ETag"] = "\"" + etag + "\"";

                    if (_buffers == null)
                    {
                        _buffers = new BlittableJsonReaderObject.PropertiesInsertionBuffer();
                    }

                    var fields = new Dictionary <LazyStringValue, FieldType>();

                    if (string.IsNullOrEmpty(collection))
                    {
                        foreach (var collectionStats in Database.DocumentsStorage.GetCollections(context))
                        {
                            FetchFieldsForCollection(context, collectionStats.Name, prefix, fields, _buffers);
                        }
                    }
                    else
                    {
                        FetchFieldsForCollection(context, collection, prefix, fields, _buffers);
                    }

                    using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream()))
                    {
                        writer.WriteStartObject();

                        var first = true;
                        foreach (var field in fields)
                        {
                            if (first == false)
                            {
                                writer.WriteComma();
                            }
                            first = false;

                            writer.WritePropertyName(field.Key);
                            writer.WriteString(field.Value.ToString());
                        }

                        writer.WriteEndObject();
                    }

                    return(Task.CompletedTask);
                }
        }
Beispiel #5
0
        public void can_backup_and_restore()
        {
            var azureSettings = GenerateAzureSettings();

            InitContainer(azureSettings);

            using (var store = GetDocumentStore())
            {
                using (var session = store.OpenSession())
                {
                    session.Store(new User {
                        Name = "oren"
                    }, "users/1");
                    session.CountersFor("users/1").Increment("likes", 100);
                    session.SaveChanges();
                }

                var config = new PeriodicBackupConfiguration
                {
                    BackupType    = BackupType.Backup,
                    AzureSettings = azureSettings,
                    IncrementalBackupFrequency = "0 0 1 1 *"
                };

                var backupTaskId = (store.Maintenance.Send(new UpdatePeriodicBackupOperation(config))).TaskId;
                store.Maintenance.Send(new StartBackupOperation(true, backupTaskId));
                var operation = new GetPeriodicBackupStatusOperation(backupTaskId);
                PeriodicBackupStatus status = null;
                var value = WaitForValue(() =>
                {
                    status = store.Maintenance.Send(operation).Status;
                    return(status?.LastEtag);
                }, 4);
                Assert.True(4 == value, $"4 == value, Got status: {status != null}, exception: {status?.Error?.Exception}");
                Assert.True(status.LastOperationId != null, $"status.LastOperationId != null, Got status: {status != null}, exception: {status?.Error?.Exception}");

                var backupOperation = store.Maintenance.Send(new GetOperationStateOperation(status.LastOperationId.Value));

                var backupResult = backupOperation.Result as BackupResult;
                Assert.True(backupResult != null && backupResult.Counters.Processed, "backupResult != null && backupResult.Counters.Processed");
                Assert.True(1 == backupResult.Counters.ReadCount, "1 == backupResult.Counters.ReadCount");

                using (var session = store.OpenSession())
                {
                    session.Store(new User {
                        Name = "ayende"
                    }, "users/2");
                    session.CountersFor("users/2").Increment("downloads", 200);

                    session.SaveChanges();
                }

                var lastEtag = store.Maintenance.Send(new GetStatisticsOperation()).LastDocEtag;
                store.Maintenance.Send(new StartBackupOperation(false, backupTaskId));
                value = WaitForValue(() => store.Maintenance.Send(operation).Status.LastEtag, lastEtag);
                Assert.Equal(lastEtag, value);

                // restore the database with a different name
                var databaseName = $"restored_database-{Guid.NewGuid()}";

                azureSettings.RemoteFolderName = status.FolderName;
                var restoreFromGoogleCloudConfiguration = new RestoreFromAzureConfiguration()
                {
                    DatabaseName        = databaseName,
                    Settings            = azureSettings,
                    DisableOngoingTasks = true
                };
                var googleCloudOperation = new RestoreBackupOperation(restoreFromGoogleCloudConfiguration);
                var restoreOperation     = store.Maintenance.Server.Send(googleCloudOperation);

                restoreOperation.WaitForCompletion(TimeSpan.FromSeconds(30));
                using (var store2 = GetDocumentStore(new Options()
                {
                    CreateDatabase = false,
                    ModifyDatabaseName = s => databaseName
                }))
                {
                    using (var session = store2.OpenSession(databaseName))
                    {
                        var users = session.Load <User>(new[] { "users/1", "users/2" });
                        Assert.True(users.Any(x => x.Value.Name == "oren"));
                        Assert.True(users.Any(x => x.Value.Name == "ayende"));

                        var val = session.CountersFor("users/1").Get("likes");
                        Assert.Equal(100, val);
                        val = session.CountersFor("users/2").Get("downloads");
                        Assert.Equal(200, val);
                    }

                    var originalDatabase = Server.ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(store.Database).Result;
                    var restoredDatabase = Server.ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(databaseName).Result;
                    using (restoredDatabase.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext ctx))
                        using (ctx.OpenReadTransaction())
                        {
                            var databaseChangeVector = DocumentsStorage.GetDatabaseChangeVector(ctx);
                            Assert.Equal($"A:7-{originalDatabase.DbBase64Id}, A:8-{restoredDatabase.DbBase64Id}", databaseChangeVector);
                        }
                }
            }
        }
Beispiel #6
0
 public HandleDocumentReferences(Index index, Dictionary <string, HashSet <CollectionName> > referencedCollections, DocumentsStorage documentsStorage, IndexStorage indexStorage, IndexingConfiguration configuration)
     : this(index, referencedCollections, documentsStorage, indexStorage, indexStorage.ReferencesForDocuments, configuration)
 {
 }
 public HandleReferences(Index index, Dictionary <string, HashSet <CollectionName> > referencedCollections, DocumentsStorage documentsStorage, IndexStorage indexStorage, IndexingConfiguration configuration)
 {
     _index = index;
     _referencedCollections = referencedCollections;
     _configuration         = configuration;
     _documentsStorage      = documentsStorage;
     _indexStorage          = indexStorage;
     _logger = LoggingSource.Instance
               .GetLogger <HandleReferences>(_indexStorage.DocumentDatabase.Name);
 }
        public async Task Execute(Action <IOperationProgress> onProgress, CompactionResult result)
        {
            if (_isCompactionInProgress)
            {
                throw new InvalidOperationException($"Database '{_database}' cannot be compacted because compaction is already in progress.");
            }

            result.AddMessage($"Started database compaction for {_database}");
            onProgress?.Invoke(result);

            _isCompactionInProgress = true;

            var documentDatabase = await _serverStore.DatabasesLandlord.TryGetOrCreateResourceStore(_database);

            var configuration = _serverStore.DatabasesLandlord.CreateDatabaseConfiguration(_database);

            using (await _serverStore.DatabasesLandlord.UnloadAndLockDatabase(_database, "it is being compacted"))
                using (var src = DocumentsStorage.GetStorageEnvironmentOptionsFromConfiguration(configuration, new IoChangesNotifications(),
                                                                                                new CatastrophicFailureNotification(exception => throw new InvalidOperationException($"Failed to compact database {_database}", exception))))
                {
                    src.ForceUsing32BitsPager            = configuration.Storage.ForceUsing32BitsPager;
                    src.OnNonDurableFileSystemError     += documentDatabase.HandleNonDurableFileSystemError;
                    src.OnRecoveryError                 += documentDatabase.HandleOnRecoveryError;
                    src.CompressTxAboveSizeInBytes       = configuration.Storage.CompressTxAboveSize.GetValue(SizeUnit.Bytes);
                    src.TimeToSyncAfterFlashInSec        = (int)configuration.Storage.TimeToSyncAfterFlash.AsTimeSpan.TotalSeconds;
                    src.NumOfConcurrentSyncsPerPhysDrive = configuration.Storage.NumberOfConcurrentSyncsPerPhysicalDrive;
                    Sodium.CloneKey(out src.MasterKey, documentDatabase.MasterKey);

                    var basePath = configuration.Core.DataDirectory.FullPath;
                    IOExtensions.DeleteDirectory(basePath + "-Compacting");
                    IOExtensions.DeleteDirectory(basePath + "-old");
                    try
                    {
                        configuration.Core.DataDirectory = new PathSetting(basePath + "-Compacting");
                        using (var dst = DocumentsStorage.GetStorageEnvironmentOptionsFromConfiguration(configuration, new IoChangesNotifications(),
                                                                                                        new CatastrophicFailureNotification(exception => throw new InvalidOperationException($"Failed to compact database {_database}", exception))))
                        {
                            dst.OnNonDurableFileSystemError     += documentDatabase.HandleNonDurableFileSystemError;
                            dst.OnRecoveryError                 += documentDatabase.HandleOnRecoveryError;
                            dst.CompressTxAboveSizeInBytes       = configuration.Storage.CompressTxAboveSize.GetValue(SizeUnit.Bytes);
                            dst.ForceUsing32BitsPager            = configuration.Storage.ForceUsing32BitsPager;
                            dst.TimeToSyncAfterFlashInSec        = (int)configuration.Storage.TimeToSyncAfterFlash.AsTimeSpan.TotalSeconds;
                            dst.NumOfConcurrentSyncsPerPhysDrive = configuration.Storage.NumberOfConcurrentSyncsPerPhysicalDrive;
                            Sodium.CloneKey(out dst.MasterKey, documentDatabase.MasterKey);

                            _token.ThrowIfCancellationRequested();
                            StorageCompaction.Execute(src, (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)dst, progressReport =>
                            {
                                result.Progress.TreeProgress   = progressReport.TreeProgress;
                                result.Progress.TreeTotal      = progressReport.TreeTotal;
                                result.Progress.TreeName       = progressReport.TreeName;
                                result.Progress.GlobalProgress = progressReport.GlobalProgress;
                                result.Progress.GlobalTotal    = progressReport.GlobalTotal;
                                result.AddMessage(progressReport.Message);
                                onProgress?.Invoke(result);
                            }, _token);
                        }

                        result.TreeName = null;

                        _token.ThrowIfCancellationRequested();
                        IOExtensions.MoveDirectory(basePath, basePath + "-old");
                        IOExtensions.MoveDirectory(basePath + "-Compacting", basePath);

                        var oldIndexesPath = new PathSetting(basePath + "-old").Combine("Indexes");
                        var newIndexesPath = new PathSetting(basePath).Combine("Indexes");
                        IOExtensions.MoveDirectory(oldIndexesPath.FullPath, newIndexesPath.FullPath);

                        var oldConfigPath = new PathSetting(basePath + "-old").Combine("Configuration");
                        var newConfigPath = new PathSetting(basePath).Combine("Configuration");
                        IOExtensions.MoveDirectory(oldConfigPath.FullPath, newConfigPath.FullPath);
                    }
                    catch (Exception e)
                    {
                        throw new InvalidOperationException($"Failed to execute compaction for {_database}", e);
                    }
                    finally
                    {
                        IOExtensions.DeleteDirectory(basePath + "-Compacting");
                        IOExtensions.DeleteDirectory(basePath + "-old");
                        _isCompactionInProgress = false;
                    }
                }
        }
Beispiel #9
0
        public bool ExecuteReplicationOnce(OutgoingReplicationStatsScope stats)
        {
            EnsureValidStats(stats);

            using (_parent._database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext documentsContext))
                using (documentsContext.OpenReadTransaction())
                {
                    try
                    {
                        // we scan through the documents to send to the other side, we need to be careful about
                        // filtering a lot of documents, because we need to let the other side know about this, and
                        // at the same time, we need to send a heartbeat to keep the tcp connection alive
                        _lastEtag = _parent._lastSentDocumentEtag;
                        _parent.CancellationToken.ThrowIfCancellationRequested();

                        var   batchSize             = _parent._database.Configuration.Replication.MaxItemsCount;
                        var   maxSizeToSend         = _parent._database.Configuration.Replication.MaxSizeToSend;
                        long  size                  = 0;
                        int   numberOfItemsSent     = 0;
                        short lastTransactionMarker = -1;
                        using (_stats.Storage.Start())
                        {
                            foreach (var item in GetDocsConflictsTombstonesRevisionsAndAttachmentsAfter(documentsContext, _lastEtag, _stats))
                            {
                                if (lastTransactionMarker != item.TransactionMarker)
                                {
                                    lastTransactionMarker = item.TransactionMarker;

                                    // Include the attachment's document which is right after its latest attachment.
                                    if ((item.Type == ReplicationBatchItem.ReplicationItemType.Document ||
                                         item.Type == ReplicationBatchItem.ReplicationItemType.DocumentTombstone) &&
                                        // We want to limit batch sizes to reasonable limits.
                                        ((maxSizeToSend.HasValue && size > maxSizeToSend.Value.GetValue(SizeUnit.Bytes)) ||
                                         (batchSize.HasValue && numberOfItemsSent > batchSize.Value)))
                                    {
                                        break;
                                    }
                                }

                                _stats.Storage.RecordInputAttempt();

                                _lastEtag = item.Etag;

                                if (item.Data != null)
                                {
                                    size += item.Data.Size;
                                }
                                else if (item.Type == ReplicationBatchItem.ReplicationItemType.Attachment)
                                {
                                    size += item.Stream.Length;
                                }

                                if (AddReplicationItemToBatch(item, _stats.Storage))
                                {
                                    numberOfItemsSent++;
                                }
                            }
                        }

                        if (_log.IsInfoEnabled)
                        {
                            _log.Info($"Found {_orderedReplicaItems.Count:#,#;;0} documents and {_replicaAttachmentStreams.Count} attachment's streams to replicate to {_parent.Node.FromString()}.");
                        }

                        if (_orderedReplicaItems.Count == 0)
                        {
                            var hasModification = _lastEtag != _parent._lastSentDocumentEtag;

                            // ensure that the other server is aware that we skipped
                            // on (potentially a lot of) documents to send, and we update
                            // the last etag they have from us on the other side
                            _parent._lastSentDocumentEtag = _lastEtag;
                            _parent._lastDocumentSentTime = DateTime.UtcNow;
                            _parent.SendHeartbeat(DocumentsStorage.GetDatabaseChangeVector(documentsContext));
                            return(hasModification);
                        }

                        _parent.CancellationToken.ThrowIfCancellationRequested();

                        try
                        {
                            using (_stats.Network.Start())
                            {
                                SendDocumentsBatch(documentsContext, _stats.Network);
                            }
                        }
                        catch (OperationCanceledException)
                        {
                            if (_log.IsInfoEnabled)
                            {
                                _log.Info("Received cancellation notification while sending document replication batch.");
                            }
                            throw;
                        }
                        catch (Exception e)
                        {
                            if (_log.IsInfoEnabled)
                            {
                                _log.Info("Failed to send document replication batch", e);
                            }
                            throw;
                        }
                        return(true);
                    }
                    finally
                    {
                        foreach (var item in _orderedReplicaItems)
                        {
                            var value = item.Value;
                            if (value.Type == ReplicationBatchItem.ReplicationItemType.Attachment)
                            {
                                // TODO: Why are we disposing here?
                                // Shouldn't the all context be disposed here?
                                // If not, should we dispose all strings here?
                                value.Stream.Dispose();
                            }
                            else
                            {
                                value.Data?.Dispose(); //item.Value.Data is null if tombstone
                            }
                        }
                        _orderedReplicaItems.Clear();
                        _replicaAttachmentStreams.Clear();
                    }
                }
        }
Beispiel #10
0
        public async Task Execute(Action <IOperationProgress> onProgress, CompactionResult result)
        {
            if (_isCompactionInProgress)
            {
                throw new InvalidOperationException($"Database '{_database}' cannot be compacted because compaction is already in progress.");
            }

            result.AddMessage($"Started database compaction for {_database}");
            onProgress?.Invoke(result);

            _isCompactionInProgress = true;
            bool   done             = false;
            string compactDirectory = null;
            string tmpDirectory     = null;

            try
            {
                var documentDatabase = await _serverStore.DatabasesLandlord.TryGetOrCreateResourceStore(_database);

                var configuration = _serverStore.DatabasesLandlord.CreateDatabaseConfiguration(_database);

                using (await _serverStore.DatabasesLandlord.UnloadAndLockDatabase(_database, "it is being compacted"))
                    using (var src = DocumentsStorage.GetStorageEnvironmentOptionsFromConfiguration(configuration, new IoChangesNotifications(),
                                                                                                    new CatastrophicFailureNotification((endId, path, exception) => throw new InvalidOperationException($"Failed to compact database {_database} ({path})", exception))))
                    {
                        InitializeOptions(src, configuration, documentDatabase);

                        var basePath = configuration.Core.DataDirectory.FullPath;
                        compactDirectory = basePath + "-compacting";
                        tmpDirectory     = basePath + "-old";

                        EnsureDirectoriesPermission(basePath, compactDirectory, tmpDirectory);

                        IOExtensions.DeleteDirectory(compactDirectory);
                        IOExtensions.DeleteDirectory(tmpDirectory);
                        configuration.Core.DataDirectory = new PathSetting(compactDirectory);
                        using (var dst = DocumentsStorage.GetStorageEnvironmentOptionsFromConfiguration(configuration, new IoChangesNotifications(),
                                                                                                        new CatastrophicFailureNotification((envId, path, exception) => throw new InvalidOperationException($"Failed to compact database {_database} ({path})", exception))))
                        {
                            InitializeOptions(dst, configuration, documentDatabase);

                            _token.ThrowIfCancellationRequested();
                            StorageCompaction.Execute(src, (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)dst, progressReport =>
                            {
                                result.Progress.TreeProgress   = progressReport.TreeProgress;
                                result.Progress.TreeTotal      = progressReport.TreeTotal;
                                result.Progress.TreeName       = progressReport.TreeName;
                                result.Progress.GlobalProgress = progressReport.GlobalProgress;
                                result.Progress.GlobalTotal    = progressReport.GlobalTotal;
                                result.AddMessage(progressReport.Message);
                                onProgress?.Invoke(result);
                            }, _token);
                        }

                        result.TreeName = null;

                        _token.ThrowIfCancellationRequested();

                        EnsureDirectoriesPermission(basePath, compactDirectory, tmpDirectory);
                        IOExtensions.DeleteDirectory(tmpDirectory);

                        SwitchDatabaseDirectories(basePath, tmpDirectory, compactDirectory);
                        done = true;
                    }
            }
            catch (Exception e)
            {
                throw new InvalidOperationException($"Failed to execute compaction for {_database}", e);
            }
            finally
            {
                IOExtensions.DeleteDirectory(compactDirectory);
                if (done)
                {
                    IOExtensions.DeleteDirectory(tmpDirectory);
                }
                _isCompactionInProgress = false;
            }
        }
Beispiel #11
0
        public void AcceptIncomingConnection(TcpConnectionOptions tcpConnectionOptions)
        {
            ReplicationLatestEtagRequest getLatestEtagMessage;

            using (tcpConnectionOptions.ContextPool.AllocateOperationContext(out JsonOperationContext context))
                using (var readerObject = context.ParseToMemory(
                           tcpConnectionOptions.Stream,
                           "IncomingReplication/get-last-etag-message read",
                           BlittableJsonDocumentBuilder.UsageMode.None,
                           tcpConnectionOptions.PinnedBuffer))
                {
                    getLatestEtagMessage = JsonDeserializationServer.ReplicationLatestEtagRequest(readerObject);
                    if (_log.IsInfoEnabled)
                    {
                        _log.Info(
                            $"GetLastEtag: {getLatestEtagMessage.SourceTag}({getLatestEtagMessage.SourceMachineName}) / {getLatestEtagMessage.SourceDatabaseName} ({getLatestEtagMessage.SourceDatabaseId}) - {getLatestEtagMessage.SourceUrl}");
                    }
                }

            var connectionInfo = IncomingConnectionInfo.FromGetLatestEtag(getLatestEtagMessage);

            try
            {
                AssertValidConnection(connectionInfo);
            }
            catch (Exception e)
            {
                if (_log.IsInfoEnabled)
                {
                    _log.Info($"Connection from [{connectionInfo}] is rejected.", e);
                }

                var incomingConnectionRejectionInfos = _incomingRejectionStats.GetOrAdd(connectionInfo,
                                                                                        _ => new ConcurrentQueue <IncomingConnectionRejectionInfo>());
                incomingConnectionRejectionInfos.Enqueue(new IncomingConnectionRejectionInfo {
                    Reason = e.ToString()
                });

                try
                {
                    tcpConnectionOptions.Dispose();
                }
                catch
                {
                    // do nothing
                }

                throw;
            }

            try
            {
                using (Database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext documentsOperationContext))
                    using (Database.ConfigurationStorage.ContextPool.AllocateOperationContext(out TransactionOperationContext configurationContext))
                        using (var writer = new BlittableJsonTextWriter(documentsOperationContext, tcpConnectionOptions.Stream))
                            using (documentsOperationContext.OpenReadTransaction())
                                using (configurationContext.OpenReadTransaction())
                                {
                                    var changeVector = DocumentsStorage.GetDatabaseChangeVector(documentsOperationContext);

                                    var lastEtagFromSrc = Database.DocumentsStorage.GetLastReplicateEtagFrom(
                                        documentsOperationContext, getLatestEtagMessage.SourceDatabaseId);
                                    if (_log.IsInfoEnabled)
                                    {
                                        _log.Info($"GetLastEtag response, last etag: {lastEtagFromSrc}");
                                    }
                                    var response = new DynamicJsonValue
                                    {
                                        [nameof(ReplicationMessageReply.Type)]                 = "Ok",
                                        [nameof(ReplicationMessageReply.MessageType)]          = ReplicationMessageType.Heartbeat,
                                        [nameof(ReplicationMessageReply.LastEtagAccepted)]     = lastEtagFromSrc,
                                        [nameof(ReplicationMessageReply.NodeTag)]              = _server.NodeTag,
                                        [nameof(ReplicationMessageReply.DatabaseChangeVector)] = changeVector
                                    };

                                    documentsOperationContext.Write(writer, response);
                                    writer.Flush();
                                }
            }
            catch (Exception)
            {
                try
                {
                    tcpConnectionOptions.Dispose();
                }

                catch (Exception)
                {
                    // do nothing
                }
                throw;
            }

            var newIncoming = new IncomingReplicationHandler(
                tcpConnectionOptions,
                getLatestEtagMessage,
                this);

            newIncoming.Failed            += OnIncomingReceiveFailed;
            newIncoming.DocumentsReceived += OnIncomingReceiveSucceeded;

            if (_log.IsInfoEnabled)
            {
                _log.Info(
                    $"Initialized document replication connection from {connectionInfo.SourceDatabaseName} located at {connectionInfo.SourceUrl}");
            }

            // need to safeguard against two concurrent connection attempts
            var newConnection = _incoming.GetOrAdd(newIncoming.ConnectionInfo.SourceDatabaseId, newIncoming);

            if (newConnection == newIncoming)
            {
                newIncoming.Start();
                IncomingReplicationAdded?.Invoke(newIncoming);
                ForceTryReconnectAll();
            }
            else
            {
                newIncoming.Dispose();
            }
        }
Beispiel #12
0
        private unsafe void FillCountOfResultsAndIndexEtag(QueryResultServerSide <Document> resultToFill, QueryMetadata query, QueryOperationContext context)
        {
            var bufferSize    = 3;
            var hasCounters   = query.HasCounterSelect || query.CounterIncludes != null;
            var hasTimeSeries = query.HasTimeSeriesSelect || query.TimeSeriesIncludes != null;
            var hasCmpXchg    = query.HasCmpXchg || query.HasCmpXchgSelect || query.HasCmpXchgIncludes;

            if (hasCounters)
            {
                bufferSize++;
            }
            if (hasTimeSeries)
            {
                bufferSize++;
            }
            if (hasCmpXchg)
            {
                bufferSize++;
            }

            var collection = query.CollectionName;
            var buffer     = stackalloc long[bufferSize];

            // If the query has include or load, it's too difficult to check the etags for just the included collections,
            // it's easier to just show etag for all docs instead.
            if (collection == Constants.Documents.Collections.AllDocumentsCollection ||
                query.HasIncludeOrLoad)
            {
                var numberOfDocuments = Database.DocumentsStorage.GetNumberOfDocuments(context.Documents);
                buffer[0] = DocumentsStorage.ReadLastDocumentEtag(context.Documents.Transaction.InnerTransaction);
                buffer[1] = DocumentsStorage.ReadLastTombstoneEtag(context.Documents.Transaction.InnerTransaction);
                buffer[2] = numberOfDocuments;

                if (hasCounters)
                {
                    buffer[3] = DocumentsStorage.ReadLastCountersEtag(context.Documents.Transaction.InnerTransaction);
                }

                if (hasTimeSeries)
                {
                    buffer[hasCounters ? 4 : 3] = DocumentsStorage.ReadLastTimeSeriesEtag(context.Documents.Transaction.InnerTransaction);
                }

                resultToFill.TotalResults     = (int)numberOfDocuments;
                resultToFill.LongTotalResults = numberOfDocuments;
            }
            else
            {
                var collectionStats = Database.DocumentsStorage.GetCollection(collection, context.Documents);
                buffer[0] = Database.DocumentsStorage.GetLastDocumentEtag(context.Documents.Transaction.InnerTransaction, collection);
                buffer[1] = Database.DocumentsStorage.GetLastTombstoneEtag(context.Documents.Transaction.InnerTransaction, collection);
                buffer[2] = collectionStats.Count;

                if (hasCounters)
                {
                    buffer[3] = Database.DocumentsStorage.CountersStorage.GetLastCounterEtag(context.Documents, collection);
                }

                if (hasTimeSeries)
                {
                    buffer[hasCounters ? 4 : 3] = Database.DocumentsStorage.TimeSeriesStorage.GetLastTimeSeriesEtag(context.Documents, collection);
                }

                resultToFill.TotalResults     = (int)collectionStats.Count;
                resultToFill.LongTotalResults = collectionStats.Count;
            }

            if (hasCmpXchg)
            {
                buffer[bufferSize - 1] = Database.ServerStore.Cluster.GetLastCompareExchangeIndexForDatabase(context.Server, Database.Name);
            }

            resultToFill.ResultEtag = (long)Hashing.XXHash64.Calculate((byte *)buffer, sizeof(long) * (uint)bufferSize);
            resultToFill.NodeTag    = Database.ServerStore.NodeTag;
        }
Beispiel #13
0
        public DocumentDatabase(string name, RavenConfiguration configuration, ServerStore serverStore, Action <string> addToInitLog)
        {
            Name          = name;
            _logger       = LoggingSource.Instance.GetLogger <DocumentDatabase>(Name);
            _serverStore  = serverStore;
            _addToInitLog = addToInitLog;
            StartTime     = SystemTime.UtcNow;
            Configuration = configuration;
            Scripts       = new ScriptRunnerCache(this, Configuration);

            try
            {
                using (_serverStore.ContextPool.AllocateOperationContext(out TransactionOperationContext ctx))
                    using (ctx.OpenReadTransaction())
                    {
                        MasterKey = serverStore.GetSecretKey(ctx, Name);

                        var databaseRecord = _serverStore.Cluster.ReadDatabase(ctx, Name);
                        if (databaseRecord != null)
                        {
                            // can happen when we are in the process of restoring a database
                            if (databaseRecord.Encrypted && MasterKey == null)
                            {
                                throw new InvalidOperationException($"Attempt to create encrypted db {Name} without supplying the secret key");
                            }
                            if (databaseRecord.Encrypted == false && MasterKey != null)
                            {
                                throw new InvalidOperationException($"Attempt to create a non-encrypted db {Name}, but a secret key exists for this db.");
                            }
                        }
                    }

                QueryMetadataCache       = new QueryMetadataCache();
                IoChanges                = new IoChangesNotifications();
                Changes                  = new DocumentsChanges();
                DocumentTombstoneCleaner = new DocumentTombstoneCleaner(this);
                DocumentsStorage         = new DocumentsStorage(this, addToInitLog);
                IndexStore               = new IndexStore(this, serverStore);
                QueryRunner              = new QueryRunner(this);
                EtlLoader                = new EtlLoader(this, serverStore);
                ReplicationLoader        = new ReplicationLoader(this, serverStore);
                SubscriptionStorage      = new SubscriptionStorage(this, serverStore);
                Metrics                  = new MetricCounters();
                TxMerger                 = new TransactionOperationsMerger(this, DatabaseShutdown);
                HugeDocuments            = new HugeDocuments(configuration.PerformanceHints.HugeDocumentsCollectionSize,
                                                             configuration.PerformanceHints.HugeDocumentSize.GetValue(SizeUnit.Bytes));
                ConfigurationStorage            = new ConfigurationStorage(this);
                NotificationCenter              = new NotificationCenter.NotificationCenter(ConfigurationStorage.NotificationsStorage, Name, _databaseShutdown.Token);
                Operations                      = new Operations.Operations(Name, ConfigurationStorage.OperationsStorage, NotificationCenter, Changes);
                DatabaseInfoCache               = serverStore.DatabaseInfoCache;
                RachisLogIndexNotifications     = new RachisLogIndexNotifications(DatabaseShutdown);
                CatastrophicFailureNotification = new CatastrophicFailureNotification(e =>
                {
                    serverStore.DatabasesLandlord.UnloadResourceOnCatastrophicFailure(name, e);
                });
            }
            catch (Exception)
            {
                Dispose();
                throw;
            }
        }
Beispiel #14
0
        public unsafe void Dispose()
        {
            if (_databaseShutdown.IsCancellationRequested)
            {
                return; // double dispose?
            }
            lock (this)
            {
                if (_databaseShutdown.IsCancellationRequested)
                {
                    return; // double dispose?
                }
                //before we dispose of the database we take its latest info to be displayed in the studio
                try
                {
                    var databaseInfo = GenerateDatabaseInfo();
                    if (databaseInfo != null)
                    {
                        DatabaseInfoCache?.InsertDatabaseInfo(databaseInfo, Name);
                    }
                }
                catch (Exception e)
                {
                    // if we encountered a catastrophic failure we might not be able to retrieve database info

                    if (_logger.IsInfoEnabled)
                    {
                        _logger.Info("Failed to generate and store database info", e);
                    }
                }

                _databaseShutdown.Cancel();

                // we'll wait for 1 minute to drain all the requests
                // from the database

                var sp = Stopwatch.StartNew();
                while (sp.ElapsedMilliseconds < 60 * 1000)
                {
                    if (Interlocked.Read(ref _usages) == 0)
                    {
                        break;
                    }

                    if (_waitForUsagesOnDisposal.Wait(1000))
                    {
                        _waitForUsagesOnDisposal.Reset();
                    }
                }

                var exceptionAggregator = new ExceptionAggregator(_logger, $"Could not dispose {nameof(DocumentDatabase)} {Name}");

                foreach (var connection in RunningTcpConnections)
                {
                    exceptionAggregator.Execute(() =>
                    {
                        connection.Dispose();
                    });
                }

                exceptionAggregator.Execute(() =>
                {
                    TxMerger?.Dispose();
                });

                if (_indexStoreTask != null)
                {
                    exceptionAggregator.Execute(() =>
                    {
                        _indexStoreTask.Wait(DatabaseShutdown);
                        _indexStoreTask = null;
                    });
                }

                exceptionAggregator.Execute(() =>
                {
                    IndexStore?.Dispose();
                    IndexStore = null;
                });

                exceptionAggregator.Execute(() =>
                {
                    ExpiredDocumentsCleaner?.Dispose();
                    ExpiredDocumentsCleaner = null;
                });

                exceptionAggregator.Execute(() =>
                {
                    PeriodicBackupRunner?.Dispose();
                    PeriodicBackupRunner = null;
                });

                exceptionAggregator.Execute(() =>
                {
                    DocumentTombstoneCleaner?.Dispose();
                    DocumentTombstoneCleaner = null;
                });

                exceptionAggregator.Execute(() =>
                {
                    ReplicationLoader?.Dispose();
                    ReplicationLoader = null;
                });

                exceptionAggregator.Execute(() =>
                {
                    EtlLoader?.Dispose();
                    EtlLoader = null;
                });

                exceptionAggregator.Execute(() =>
                {
                    Operations?.Dispose(exceptionAggregator);
                    Operations = null;
                });

                exceptionAggregator.Execute(() =>
                {
                    NotificationCenter?.Dispose();
                    NotificationCenter = null;
                });

                exceptionAggregator.Execute(() =>
                {
                    SubscriptionStorage?.Dispose();
                });

                exceptionAggregator.Execute(() =>
                {
                    ConfigurationStorage?.Dispose();
                });

                exceptionAggregator.Execute(() =>
                {
                    DocumentsStorage?.Dispose();
                    DocumentsStorage = null;
                });

                exceptionAggregator.Execute(() =>
                {
                    _databaseShutdown.Dispose();
                });

                exceptionAggregator.Execute(() =>
                {
                    if (MasterKey == null)
                    {
                        return;
                    }
                    fixed(byte *pKey = MasterKey)
                    {
                        Sodium.ZeroMemory(pKey, MasterKey.Length);
                    }
                });

                exceptionAggregator.ThrowIfNeeded();
            }
        }
Beispiel #15
0
 public IncludeDocumentsCommand(DocumentsStorage storage, DocumentsOperationContext context, string[] includes)
 {
     _storage  = storage;
     _context  = context;
     _includes = includes;
 }
        public Task PreviewCollection()
        {
            var start        = GetStart();
            var pageSize     = GetPageSize();
            var collection   = GetStringQueryString("collection", required: false);
            var bindings     = GetStringValuesQueryString("binding", required: false);
            var fullBindings = GetStringValuesQueryString("fullBinding", required: false);

            using (ContextPool.AllocateOperationContext(out DocumentsOperationContext context))
                using (context.OpenReadTransaction())
                {
                    Document[] documents;
                    HashSet <LazyStringValue> availableColumns;
                    HashSet <string>          propertiesPreviewToSend;
                    HashSet <string>          fullPropertiesToSend = new HashSet <string>(fullBindings);

                    long   totalResults;
                    string changeVector;
                    string etag = null;

                    if (string.IsNullOrEmpty(collection))
                    {
                        changeVector = DocumentsStorage.GetDatabaseChangeVector(context);
                        totalResults = Database.DocumentsStorage.GetNumberOfDocuments(context);
                        etag         = $"{changeVector}/{totalResults}";
                    }
                    else
                    {
                        changeVector = Database.DocumentsStorage.GetLastDocumentChangeVector(context, collection);
                        totalResults = Database.DocumentsStorage.GetCollection(collection, context).Count;

                        if (changeVector != null)
                        {
                            etag = $"{changeVector}/{totalResults}";
                        }
                    }

                    if (etag != null && GetStringFromHeaders("If-None-Match") == etag)
                    {
                        HttpContext.Response.StatusCode = (int)HttpStatusCode.NotModified;
                        return(Task.CompletedTask);
                    }

                    HttpContext.Response.Headers["ETag"] = "\"" + etag + "\"";

                    if (string.IsNullOrEmpty(collection))
                    {
                        documents               = Database.DocumentsStorage.GetDocumentsInReverseEtagOrder(context, start, pageSize).ToArray();
                        availableColumns        = ExtractColumnNames(documents, context);
                        propertiesPreviewToSend = bindings.Count > 0 ? new HashSet <string>(bindings) : new HashSet <string>();
                    }
                    else
                    {
                        documents               = Database.DocumentsStorage.GetDocumentsInReverseEtagOrder(context, collection, start, pageSize).ToArray();
                        availableColumns        = ExtractColumnNames(documents, context);
                        propertiesPreviewToSend = bindings.Count > 0 ? new HashSet <string>(bindings) : availableColumns.Take(ColumnsSamplingLimit).Select(x => x.ToString()).ToHashSet();
                    }

                    using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream()))
                    {
                        writer.WriteStartObject();
                        writer.WritePropertyName("Results");

                        writer.WriteStartArray();

                        var first = true;
                        foreach (var document in documents)
                        {
                            if (first == false)
                            {
                                writer.WriteComma();
                            }
                            first = false;

                            using (document.Data)
                            {
                                WriteDocument(writer, context, document, propertiesPreviewToSend, fullPropertiesToSend);
                            }
                        }

                        writer.WriteEndArray();

                        writer.WriteComma();

                        writer.WritePropertyName("TotalResults");
                        writer.WriteInteger(totalResults);

                        writer.WriteComma();

                        writer.WriteArray("AvailableColumns", availableColumns);

                        writer.WriteEndObject();
                    }

                    return(Task.CompletedTask);
                }
        }
Beispiel #17
0
        public Task Stats()
        {
            using (ContextPool.AllocateOperationContext(out DocumentsOperationContext context))
                using (var writer = new BlittableJsonTextWriter(context, ResponseBodyStream()))
                    using (context.OpenReadTransaction())
                    {
                        var indexes = Database.IndexStore.GetIndexes().ToList();

                        var sizeOnDiskInBytes = Database.GetSizeOnDiskInBytes();

                        var stats = new DatabaseStatistics
                        {
                            LastDocEtag               = DocumentsStorage.ReadLastDocumentEtag(context.Transaction.InnerTransaction),
                            CountOfDocuments          = Database.DocumentsStorage.GetNumberOfDocuments(context),
                            CountOfRevisionDocuments  = Database.DocumentsStorage.RevisionsStorage.GetNumberOfRevisionDocuments(context),
                            CountOfDocumentsConflicts = Database.DocumentsStorage.ConflictsStorage.GetNumberOfDocumentsConflicts(context),
                            CountOfTombstones         = Database.DocumentsStorage.GetNumberOfTombstones(context),
                            CountOfConflicts          = Database.DocumentsStorage.ConflictsStorage.ConflictsCount,
                            SizeOnDisk = new Size(sizeOnDiskInBytes),
                            NumberOfTransactionMergerQueueOperations = Database.TxMerger.NumberOfQueuedOperations
                        };

                        var attachments = Database.DocumentsStorage.AttachmentsStorage.GetNumberOfAttachments(context);
                        stats.CountOfAttachments       = attachments.AttachmentCount;
                        stats.CountOfUniqueAttachments = attachments.StreamsCount;
                        stats.CountOfIndexes           = indexes.Count;
                        var statsDatabaseChangeVector = DocumentsStorage.GetDatabaseChangeVector(context);

                        stats.DatabaseChangeVector = statsDatabaseChangeVector;
                        stats.DatabaseId           = Database.DocumentsStorage.Environment.Base64Id;
                        stats.Is64Bit = !Database.DocumentsStorage.Environment.Options.ForceUsing32BitsPager && IntPtr.Size == sizeof(long);
                        stats.Pager   = Database.DocumentsStorage.Environment.Options.DataPager.GetType().ToString();

                        stats.Indexes = new IndexInformation[indexes.Count];
                        for (var i = 0; i < indexes.Count; i++)
                        {
                            var  index = indexes[i];
                            bool isStale;
                            try
                            {
                                isStale = index.IsStale(context);
                            }
                            catch (OperationCanceledException)
                            {
                                // if the index has just been removed, let us consider it stale
                                // until it can be safely removed from the list of indexes in the
                                // database
                                isStale = true;
                            }
                            stats.Indexes[i] = new IndexInformation
                            {
                                State            = index.State,
                                IsStale          = isStale,
                                Name             = index.Name,
                                LockMode         = index.Definition.LockMode,
                                Priority         = index.Definition.Priority,
                                Type             = index.Type,
                                LastIndexingTime = index.LastIndexingTime
                            };

                            if (stats.LastIndexingTime.HasValue)
                            {
                                stats.LastIndexingTime = stats.LastIndexingTime >= index.LastIndexingTime ? stats.LastIndexingTime : index.LastIndexingTime;
                            }
                            else
                            {
                                stats.LastIndexingTime = index.LastIndexingTime;
                            }
                        }

                        writer.WriteDatabaseStatistics(context, stats);
                    }

            return(Task.CompletedTask);
        }
Beispiel #18
0
        public static UpgraderDelegate Upgrader(StorageType storageType, ConfigurationStorage configurationStorage, DocumentsStorage documentsStorage, ServerStore serverStore)
        {
            var upgrade = new InternalUpgrader(storageType, configurationStorage, documentsStorage, serverStore);

            return(upgrade.Upgrade);
        }
Beispiel #19
0
 protected HandleDocumentReferences(Index index, Dictionary <string, HashSet <CollectionName> > referencedCollections, DocumentsStorage documentsStorage, IndexStorage indexStorage, IndexStorage.ReferencesBase referencesStorage, IndexingConfiguration configuration)
     : base(index, referencedCollections, documentsStorage, indexStorage, referencesStorage, configuration)
 {
 }
Beispiel #20
0
 internal InternalUpgrader(StorageType storageType, ConfigurationStorage configurationStorage, DocumentsStorage documentsStorage, ServerStore serverStore)
 {
     _storageType          = storageType;
     _configurationStorage = configurationStorage;
     _documentsStorage     = documentsStorage;
     _serverStore          = serverStore;
 }
Beispiel #21
0
 public TimeSeriesQueryResultRetriever(DocumentDatabase database, IndexQueryServerSide query, QueryTimingsScope queryTimings, DocumentsStorage documentsStorage, JsonOperationContext context, FieldsToFetch fieldsToFetch, IncludeDocumentsCommand includeDocumentsCommand, IncludeCompareExchangeValuesCommand includeCompareExchangeValuesCommand, IncludeRevisionsCommand includeRevisionsCommand)
     : base(Constants.Documents.Indexing.Fields.ValueFieldName, database, query, queryTimings, documentsStorage, context, fieldsToFetch, includeDocumentsCommand, includeCompareExchangeValuesCommand, includeRevisionsCommand)
 {
 }
Beispiel #22
0
            protected override long ExecuteCmd(DocumentsOperationContext context)
            {
                var storage = context.DocumentDatabase.DocumentsStorage;

                RollupSchema.Create(context.Transaction.InnerTransaction, TimeSeriesRollupTable, 16);
                var table = context.Transaction.InnerTransaction.OpenTable(RollupSchema, TimeSeriesRollupTable);

                foreach (var item in _states)
                {
                    if (_configuration == null)
                    {
                        return(RolledUp);
                    }

                    if (_configuration.Collections.TryGetValue(item.Collection, out var config) == false)
                    {
                        continue;
                    }

                    if (config.Disabled)
                    {
                        continue;
                    }

                    if (table.ReadByKey(item.Key, out var current) == false)
                    {
                        continue;
                    }

                    var policy = config.GetPolicyByName(item.RollupPolicy, out _);
                    if (policy == null)
                    {
                        table.DeleteByKey(item.Key);
                        continue;
                    }

                    if (item.Etag != DocumentsStorage.TableValueToLong((int)RollupColumns.Etag, ref current))
                    {
                        continue; // concurrency check
                    }
                    try
                    {
                        RollupOne(context, table, item, policy, config);
                    }
                    catch (NanValueException e)
                    {
                        if (_logger.IsInfoEnabled)
                        {
                            _logger.Info($"{item} failed", e);
                        }

                        if (table.VerifyKeyExists(item.Key) == false)
                        {
                            // we should re-add it, in case we already removed this rollup
                            using (var slicer = new TimeSeriesSliceHolder(context, item.DocId, item.Name, item.Collection))
                                using (Slice.From(context.Allocator, item.ChangeVector, ByteStringType.Immutable, out var cv))
                                    using (Slice.From(context.Allocator, item.RollupPolicy, ByteStringType.Immutable, out var policySlice))
                                        using (table.Allocate(out var tvb))
                                        {
                                            tvb.Add(slicer.StatsKey);
                                            tvb.Add(slicer.CollectionSlice);
                                            tvb.Add(Bits.SwapBytes(item.NextRollup.Ticks));
                                            tvb.Add(policySlice);
                                            tvb.Add(item.Etag);
                                            tvb.Add(cv);

                                            table.Set(tvb);
                                        }
                        }
                    }
                    catch (RollupExceedNumberOfValuesException e)
                    {
                        var name  = item.Name;
                        var docId = item.DocId;
                        try
                        {
                            var document = storage.Get(context, item.DocId, throwOnConflict: false);
                            docId = document?.Id ?? docId;
                            name  = storage.TimeSeriesStorage.GetOriginalName(context, docId, name);
                        }
                        catch
                        {
                            // ignore
                        }

                        var msg = $"Rollup '{item.RollupPolicy}' for time-series '{name}' in document '{docId}' failed.";
                        if (_logger.IsInfoEnabled)
                        {
                            _logger.Info(msg, e);
                        }

                        var alert = AlertRaised.Create(context.DocumentDatabase.Name, "Failed to perform rollup because the time-series has more than 5 values", msg,
                                                       AlertType.RollupExceedNumberOfValues, NotificationSeverity.Warning, $"{item.Collection}/{item.Name}", new ExceptionDetails(e));

                        context.DocumentDatabase.NotificationCenter.Add(alert);
                    }
                }

                return(RolledUp);
            }
Beispiel #23
0
        public async Task Execute(Action <IOperationProgress> onProgress, CompactionResult result)
        {
            if (_isCompactionInProgress)
            {
                throw new InvalidOperationException($"Database '{_database}' cannot be compacted because compaction is already in progress.");
            }

            result.AddMessage($"Started database compaction for {_database}");
            onProgress?.Invoke(result.Progress);

            _isCompactionInProgress = true;
            bool   done                 = false;
            string compactDirectory     = null;
            string tmpDirectory         = null;
            string compactTempDirectory = null;

            byte[] encryptionKey = null;
            try
            {
                var documentDatabase = await _serverStore.DatabasesLandlord.TryGetOrCreateResourceStore(_database);

                var configuration = _serverStore.DatabasesLandlord.CreateDatabaseConfiguration(_database);

                DatabaseRecord databaseRecord = documentDatabase.ReadDatabaseRecord();


                // save the key before unloading the database (it is zeroed when disposing DocumentDatabase).
                if (documentDatabase.MasterKey != null)
                {
                    encryptionKey = documentDatabase.MasterKey.ToArray();
                }

                using (await _serverStore.DatabasesLandlord.UnloadAndLockDatabase(_database, "it is being compacted"))
                    using (var src = DocumentsStorage.GetStorageEnvironmentOptionsFromConfiguration(configuration, new IoChangesNotifications(),
                                                                                                    new CatastrophicFailureNotification((endId, path, exception, stacktrace) => throw new InvalidOperationException($"Failed to compact database {_database} ({path}), StackTrace='{stacktrace}'", exception))))
                    {
                        InitializeOptions(src, configuration, documentDatabase, encryptionKey);
                        DirectoryExecUtils.SubscribeToOnDirectoryInitializeExec(src, configuration.Storage, documentDatabase.Name, DirectoryExecUtils.EnvironmentType.Compaction, Logger);

                        var basePath = configuration.Core.DataDirectory.FullPath;
                        compactDirectory = basePath + "-compacting";
                        tmpDirectory     = basePath + "-old";

                        EnsureDirectoriesPermission(basePath, compactDirectory, tmpDirectory);

                        IOExtensions.DeleteDirectory(compactDirectory);
                        IOExtensions.DeleteDirectory(tmpDirectory);

                        configuration.Core.DataDirectory = new PathSetting(compactDirectory);

                        if (configuration.Storage.TempPath != null)
                        {
                            compactTempDirectory = configuration.Storage.TempPath.FullPath + "-temp-compacting";

                            EnsureDirectoriesPermission(compactTempDirectory);
                            IOExtensions.DeleteDirectory(compactTempDirectory);

                            configuration.Storage.TempPath = new PathSetting(compactTempDirectory);
                        }

                        var revisionsPrefix = CollectionName.GetTablePrefix(CollectionTableType.Revisions);
                        var compressedCollectionsTableNames = databaseRecord.DocumentsCompression?.Collections
                                                              .Select(name => new CollectionName(name).GetTableName(CollectionTableType.Documents))
                                                              .ToHashSet(StringComparer.OrdinalIgnoreCase);

                        using (var dst = DocumentsStorage.GetStorageEnvironmentOptionsFromConfiguration(configuration, new IoChangesNotifications(),
                                                                                                        new CatastrophicFailureNotification((envId, path, exception, stacktrace) => throw new InvalidOperationException($"Failed to compact database {_database} ({path}). StackTrace='{stacktrace}'", exception))))
                        {
                            InitializeOptions(dst, configuration, documentDatabase, encryptionKey);
                            DirectoryExecUtils.SubscribeToOnDirectoryInitializeExec(dst, configuration.Storage, documentDatabase.Name, DirectoryExecUtils.EnvironmentType.Compaction, Logger);

                            _token.ThrowIfCancellationRequested();
                            StorageCompaction.Execute(src, (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)dst, progressReport =>
                            {
                                result.Progress.TreeProgress   = progressReport.TreeProgress;
                                result.Progress.TreeTotal      = progressReport.TreeTotal;
                                result.Progress.TreeName       = progressReport.TreeName;
                                result.Progress.GlobalProgress = progressReport.GlobalProgress;
                                result.Progress.GlobalTotal    = progressReport.GlobalTotal;
                                result.AddMessage(progressReport.Message);
                                onProgress?.Invoke(result.Progress);
                            }, (name, schema) =>
                            {
                                bool isRevision   = name.StartsWith(revisionsPrefix, StringComparison.OrdinalIgnoreCase);
                                schema.Compressed =
                                    (isRevision && databaseRecord.DocumentsCompression?.CompressRevisions == true) ||
                                    compressedCollectionsTableNames?.Contains(name) == true;
                            }, _token);
                        }

                        result.TreeName = null;

                        _token.ThrowIfCancellationRequested();

                        EnsureDirectoriesPermission(basePath, compactDirectory, tmpDirectory);
                        IOExtensions.DeleteDirectory(tmpDirectory);

                        SwitchDatabaseDirectories(basePath, tmpDirectory, compactDirectory);
                        done = true;
                    }
            }
            catch (Exception e)
            {
                throw new InvalidOperationException($"Failed to execute compaction for {_database}", e);
            }
            finally
            {
                IOExtensions.DeleteDirectory(compactDirectory);
                if (done)
                {
                    IOExtensions.DeleteDirectory(tmpDirectory);

                    if (compactTempDirectory != null)
                    {
                        IOExtensions.DeleteDirectory(compactTempDirectory);
                    }
                }
                _isCompactionInProgress = false;
                if (encryptionKey != null)
                {
                    Sodium.ZeroBuffer(encryptionKey);
                }
            }
        }
        private void ReplicateToDestination()
        {
            try
            {
                AddReplicationPulse(ReplicationPulseDirection.OutgoingInitiate);
                NativeMemory.EnsureRegistered();
                if (_log.IsInfoEnabled)
                {
                    _log.Info($"Will replicate to {Destination.FromString()} via {_connectionInfo.Url}");
                }

                using (_parent._server.ContextPool.AllocateOperationContext(out TransactionOperationContext context))
                    using (context.OpenReadTransaction())
                    {
                        var record = _parent.LoadDatabaseRecord();
                        if (record == null)
                        {
                            throw new InvalidOperationException($"The database record for {_parent.Database.Name} does not exist?!");
                        }

                        if (record.Encrypted && Destination.Url.StartsWith("https:", StringComparison.OrdinalIgnoreCase) == false)
                        {
                            throw new InvalidOperationException(
                                      $"{record.DatabaseName} is encrypted, and require HTTPS for replication, but had endpoint with url {Destination.Url} to database {Destination.Database}");
                        }
                    }

                var task = TcpUtils.ConnectSocketAsync(_connectionInfo, _parent._server.Engine.TcpConnectionTimeout, _log);
                task.Wait(CancellationToken);
                using (Interlocked.Exchange(ref _tcpClient, task.Result))
                {
                    var wrapSsl = TcpUtils.WrapStreamWithSslAsync(_tcpClient, _connectionInfo, _parent._server.Server.Certificate.Certificate, _parent._server.Engine.TcpConnectionTimeout);
                    wrapSsl.Wait(CancellationToken);

                    using (_stream = wrapSsl.Result) // note that _stream is being disposed by the interruptible read
                        using (_interruptibleRead = new InterruptibleRead(_database.DocumentsStorage.ContextPool, _stream))
                            using (_buffer = JsonOperationContext.ManagedPinnedBuffer.LongLivedInstance())
                            {
                                var documentSender = new ReplicationDocumentSender(_stream, this, _log);

                                WriteHeaderToRemotePeer();
                                //handle initial response to last etag and staff
                                try
                                {
                                    var response = HandleServerResponse(getFullResponse: true);
                                    switch (response.ReplyType)
                                    {
                                    //The first time we start replication we need to register the destination current CV
                                    case ReplicationMessageReply.ReplyType.Ok:
                                        LastAcceptedChangeVector = response.Reply.DatabaseChangeVector;
                                        break;

                                    case ReplicationMessageReply.ReplyType.Error:
                                        var exception = new InvalidOperationException(response.Reply.Exception);
                                        if (response.Reply.Exception.Contains(nameof(DatabaseDoesNotExistException)) ||
                                            response.Reply.Exception.Contains(nameof(DatabaseNotRelevantException)))
                                        {
                                            AddReplicationPulse(ReplicationPulseDirection.OutgoingInitiateError, "Database does not exist");
                                            DatabaseDoesNotExistException.ThrowWithMessageAndException(Destination.Database, response.Reply.Message, exception);
                                        }

                                        AddReplicationPulse(ReplicationPulseDirection.OutgoingInitiateError, $"Got error: {response.Reply.Exception}");
                                        throw exception;
                                    }
                                }
                                catch (DatabaseDoesNotExistException e)
                                {
                                    var msg = $"Failed to parse initial server replication response, because there is no database named {_database.Name} " +
                                              "on the other end. ";
                                    if (_external)
                                    {
                                        msg += "In order for the replication to work, a database with the same name needs to be created at the destination";
                                    }

                                    var young = (DateTime.UtcNow - _startedAt).TotalSeconds < 30;
                                    if (young)
                                    {
                                        msg += "This can happen if the other node wasn't yet notified about being assigned this database and should be resolved shortly.";
                                    }
                                    if (_log.IsInfoEnabled)
                                    {
                                        _log.Info(msg, e);
                                    }

                                    AddReplicationPulse(ReplicationPulseDirection.OutgoingInitiateError, msg);

                                    // won't add an alert on young connections
                                    // because it may take a few seconds for the other side to be notified by
                                    // the cluster that it has this db.
                                    if (young == false)
                                    {
                                        AddAlertOnFailureToReachOtherSide(msg, e);
                                    }

                                    throw;
                                }
                                catch (OperationCanceledException e)
                                {
                                    const string msg = "Got operation canceled notification while opening outgoing replication channel. " +
                                                       "Aborting and closing the channel.";
                                    if (_log.IsInfoEnabled)
                                    {
                                        _log.Info(msg, e);
                                    }
                                    AddReplicationPulse(ReplicationPulseDirection.OutgoingInitiateError, msg);
                                    throw;
                                }
                                catch (Exception e)
                                {
                                    var msg = $"{OutgoingReplicationThreadName} got an unexpected exception during initial handshake";
                                    if (_log.IsInfoEnabled)
                                    {
                                        _log.Info(msg, e);
                                    }

                                    AddReplicationPulse(ReplicationPulseDirection.OutgoingInitiateError, msg);
                                    AddAlertOnFailureToReachOtherSide(msg, e);

                                    throw;
                                }

                                DateTime nextReplicateAt = default;

                                while (_cts.IsCancellationRequested == false)
                                {
                                    while (_database.Time.GetUtcNow() > nextReplicateAt)
                                    {
                                        if (_parent.DebugWaitAndRunReplicationOnce != null)
                                        {
                                            _parent.DebugWaitAndRunReplicationOnce.Wait(_cts.Token);
                                            _parent.DebugWaitAndRunReplicationOnce.Reset();
                                        }

                                        var sp    = Stopwatch.StartNew();
                                        var stats = _lastStats = new OutgoingReplicationStatsAggregator(_parent.GetNextReplicationStatsId(), _lastStats);
                                        AddReplicationPerformance(stats);
                                        AddReplicationPulse(ReplicationPulseDirection.OutgoingBegin);

                                        try
                                        {
                                            using (var scope = stats.CreateScope())
                                            {
                                                try
                                                {
                                                    if (Destination is InternalReplication dest)
                                                    {
                                                        _parent.EnsureNotDeleted(dest.NodeTag);
                                                    }
                                                    var didWork = documentSender.ExecuteReplicationOnce(scope, ref nextReplicateAt);
                                                    if (documentSender.MissingAttachmentsInLastBatch)
                                                    {
                                                        continue;
                                                    }
                                                    if (didWork == false)
                                                    {
                                                        break;
                                                    }

                                                    if (Destination is ExternalReplication externalReplication)
                                                    {
                                                        var taskId = externalReplication.TaskId;
                                                        UpdateExternalReplicationInfo(taskId);
                                                    }

                                                    DocumentsSend?.Invoke(this);

                                                    if (sp.ElapsedMilliseconds > 60 * 1000)
                                                    {
                                                        _waitForChanges.Set();
                                                        break;
                                                    }
                                                }
                                                catch (OperationCanceledException)
                                                {
                                                    // cancellation is not an actual error,
                                                    // it is a "notification" that we need to cancel current operation

                                                    const string msg = "Operation was canceled.";
                                                    AddReplicationPulse(ReplicationPulseDirection.OutgoingError, msg);

                                                    throw;
                                                }
                                                catch (Exception e)
                                                {
                                                    AddReplicationPulse(ReplicationPulseDirection.OutgoingError, e.Message);

                                                    scope.AddError(e);
                                                    throw;
                                                }
                                            }
                                        }
                                        finally
                                        {
                                            stats.Complete();
                                            AddReplicationPulse(ReplicationPulseDirection.OutgoingEnd);
                                        }
                                    }

                                    //if this returns false, this means either timeout or canceled token is activated
                                    while (WaitForChanges(_parent.MinimalHeartbeatInterval, _cts.Token) == false)
                                    {
                                        //If we got cancelled we need to break right away
                                        if (_cts.IsCancellationRequested)
                                        {
                                            break;
                                        }

                                        // open tx
                                        // read current change vector compare to last sent
                                        // if okay, send cv
                                        using (_database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext ctx))
                                            using (var tx = ctx.OpenReadTransaction())
                                            {
                                                var etag = DocumentsStorage.ReadLastEtag(tx.InnerTransaction);
                                                if (etag == _lastSentDocumentEtag)
                                                {
                                                    SendHeartbeat(DocumentsStorage.GetDatabaseChangeVector(ctx));
                                                    _parent.CompleteDeletionIfNeeded(_cts);
                                                }
                                                else if (nextReplicateAt > DateTime.UtcNow)
                                                {
                                                    SendHeartbeat(null);
                                                }
                                                else
                                                {
                                                    //Send a heartbeat first so we will get an updated CV of the destination
                                                    var currentChangeVector = DocumentsStorage.GetDatabaseChangeVector(ctx);
                                                    SendHeartbeat(null);
                                                    //If our previous CV is already merged to the destination wait a bit more
                                                    if (ChangeVectorUtils.GetConflictStatus(LastAcceptedChangeVector, currentChangeVector) ==
                                                        ConflictStatus.AlreadyMerged)
                                                    {
                                                        continue;
                                                    }

                                                    // we have updates that we need to send to the other side
                                                    // let's do that..
                                                    // this can happen if we got replication from another node
                                                    // that we need to send to it. Note that we typically
                                                    // will wait for the other node to send the data directly to
                                                    // our destination, but if it doesn't, we'll step in.
                                                    // In this case, we try to limit congestion in the network and
                                                    // only send updates that we have gotten from someone else after
                                                    // a certain time, to let the other side tell us that it already
                                                    // got it. Note that this is merely an optimization to reduce network
                                                    // traffic. It is fine to have the same data come from different sources.
                                                    break;
                                                }
                                            }
                                    }
                                    _waitForChanges.Reset();
                                }
                            }
                }
            }
            catch (AggregateException e)
            {
                if (e.InnerExceptions.Count == 1)
                {
                    if (e.InnerException is OperationCanceledException oce)
                    {
                        HandleOperationCancelException(oce);
                    }
                    if (e.InnerException is IOException ioe)
                    {
                        HandleIOException(ioe);
                    }
                }

                HandleException(e);
            }
            catch (OperationCanceledException e)
            {
                HandleOperationCancelException(e);
            }
            catch (IOException e)
            {
                HandleIOException(e);
            }
            catch (Exception e)
            {
                HandleException(e);
            }

            void HandleOperationCancelException(OperationCanceledException e)
            {
                if (_log.IsInfoEnabled)
                {
                    _log.Info($"Operation canceled on replication thread ({FromToString}). " +
                              $"This is not necessary due to an issue. Stopped the thread.");
                }
                if (_cts.IsCancellationRequested == false)
                {
                    Failed?.Invoke(this, e);
                }
            }

            void HandleIOException(IOException e)
            {
                if (_log.IsInfoEnabled)
                {
                    if (e.InnerException is SocketException)
                    {
                        _log.Info($"SocketException was thrown from the connection to remote node ({FromToString}). " +
                                  "This might mean that the remote node is done or there is a network issue.", e);
                    }
                    else
                    {
                        _log.Info($"IOException was thrown from the connection to remote node ({FromToString}).", e);
                    }
                }
                Failed?.Invoke(this, e);
            }

            void HandleException(Exception e)
            {
                if (_log.IsInfoEnabled)
                {
                    _log.Info($"Unexpected exception occurred on replication thread ({FromToString}). " +
                              "Replication stopped (will be retried later).", e);
                }
                Failed?.Invoke(this, e);
            }
        }
Beispiel #25
0
        private void FillDatabaseStatistics(DatabaseStatistics stats, DocumentsOperationContext context)
        {
            using (context.OpenReadTransaction())
            {
                var indexes = Database.IndexStore.GetIndexes().ToList();
                var size    = Database.GetSizeOnDisk();

                stats.LastDocEtag          = DocumentsStorage.ReadLastDocumentEtag(context.Transaction.InnerTransaction);
                stats.LastDatabaseEtag     = DocumentsStorage.ReadLastEtag(context.Transaction.InnerTransaction);
                stats.DatabaseChangeVector = DocumentsStorage.GetDatabaseChangeVector(context);

                stats.CountOfDocuments          = Database.DocumentsStorage.GetNumberOfDocuments(context);
                stats.CountOfRevisionDocuments  = Database.DocumentsStorage.RevisionsStorage.GetNumberOfRevisionDocuments(context);
                stats.CountOfDocumentsConflicts = Database.DocumentsStorage.ConflictsStorage.GetNumberOfDocumentsConflicts(context);
                stats.CountOfTombstones         = Database.DocumentsStorage.GetNumberOfTombstones(context);
                stats.CountOfConflicts          = Database.DocumentsStorage.ConflictsStorage.ConflictsCount;
                stats.SizeOnDisk = size.Data;
                stats.NumberOfTransactionMergerQueueOperations = Database.TxMerger.NumberOfQueuedOperations;
                stats.TempBuffersSizeOnDisk = size.TempBuffers;
                stats.CountOfCounterEntries = Database.DocumentsStorage.CountersStorage.GetNumberOfCounterEntries(context);

                var attachments = Database.DocumentsStorage.AttachmentsStorage.GetNumberOfAttachments(context);
                stats.CountOfAttachments       = attachments.AttachmentCount;
                stats.CountOfUniqueAttachments = attachments.StreamsCount;
                stats.CountOfIndexes           = indexes.Count;

                stats.DatabaseId = Database.DocumentsStorage.Environment.Base64Id;
                stats.Is64Bit    = !Database.DocumentsStorage.Environment.Options.ForceUsing32BitsPager && IntPtr.Size == sizeof(long);
                stats.Pager      = Database.DocumentsStorage.Environment.Options.DataPager.GetType().ToString();

                stats.Indexes = new IndexInformation[indexes.Count];
                for (var i = 0; i < indexes.Count; i++)
                {
                    var  index = indexes[i];
                    bool isStale;
                    try
                    {
                        isStale = index.IsStale(context);
                    }
                    catch (OperationCanceledException)
                    {
                        // if the index has just been removed, let us consider it stale
                        // until it can be safely removed from the list of indexes in the
                        // database
                        isStale = true;
                    }
                    stats.Indexes[i] = new IndexInformation
                    {
                        State            = index.State,
                        IsStale          = isStale,
                        Name             = index.Name,
                        LockMode         = index.Definition.LockMode,
                        Priority         = index.Definition.Priority,
                        Type             = index.Type,
                        LastIndexingTime = index.LastIndexingTime
                    };

                    if (stats.LastIndexingTime.HasValue)
                    {
                        stats.LastIndexingTime = stats.LastIndexingTime >= index.LastIndexingTime ? stats.LastIndexingTime : index.LastIndexingTime;
                    }
                    else
                    {
                        stats.LastIndexingTime = index.LastIndexingTime;
                    }
                }
            }
        }
Beispiel #26
0
 public CurrentTransformationScope(BlittableJsonReaderObject parameters, IncludeDocumentsCommand include, DocumentsStorage documentsStorage, TransformerStore transformerStore, DocumentsOperationContext documentsContext)
 {
     _parameters       = parameters;
     _include          = include;
     _documentsStorage = documentsStorage;
     _transformerStore = transformerStore;
     _documentsContext = documentsContext;
 }
Beispiel #27
0
        public async Task AddGlobalChangeVectorToNewDocument(bool useSsl)
        {
            var clusterSize  = 3;
            var databaseName = GetDatabaseName();
            var leader       = await CreateRaftClusterAndGetLeader(clusterSize, true, 0, useSsl : useSsl);

            X509Certificate2 clientCertificate = null;
            X509Certificate2 adminCertificate  = null;

            if (useSsl)
            {
                var certificates = GenerateAndSaveSelfSignedCertificate();
                adminCertificate  = RegisterClientCertificate(certificates.ServerCertificate.Value, certificates.ClientCertificate1.Value, new Dictionary <string, DatabaseAccess>(), SecurityClearance.ClusterAdmin, server: leader);
                clientCertificate = RegisterClientCertificate(certificates.ServerCertificate.Value, certificates.ClientCertificate2.Value, new Dictionary <string, DatabaseAccess>
                {
                    [databaseName] = DatabaseAccess.Admin
                }, server: leader);
            }

            var doc = new DatabaseRecord(databaseName);

            using (var store = new DocumentStore()
            {
                Urls = new[] { leader.WebUrl },
                Database = databaseName,
                Certificate = adminCertificate,
                Conventions =
                {
                    DisableTopologyUpdates = true
                }
            }.Initialize())
            {
                var databaseResult = await store.Maintenance.Server.SendAsync(new CreateDatabaseOperation(doc, clusterSize));

                var topology = databaseResult.Topology;
                Assert.Equal(clusterSize, topology.AllNodes.Count());
                foreach (var server in Servers)
                {
                    await server.ServerStore.Cluster.WaitForIndexNotification(databaseResult.RaftCommandIndex);
                }
                foreach (var server in Servers)
                {
                    await server.ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(databaseName);
                }
                using (var session = store.OpenAsyncSession())
                {
                    await session.StoreAsync(new User { Name = "Karmel" }, "users/1");

                    await session.SaveChangesAsync();
                }

                Assert.True(await WaitForDocumentInClusterAsync <User>(
                                databaseResult.Topology,
                                databaseName,
                                "users/1",
                                u => u.Name.Equals("Karmel"),
                                TimeSpan.FromSeconds(60),
                                certificate: clientCertificate));

                // we need to wait for database change vector to be updated
                // which means that we need to wait for replication to do a full mesh propagation
                try
                {
                    await WaitForValueOnGroupAsync(topology, serverStore =>
                    {
                        var database = serverStore.DatabasesLandlord.TryGetOrCreateResourceStore(databaseName).Result;

                        using (database.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context))
                            using (context.OpenReadTransaction())
                            {
                                var cv = DocumentsStorage.GetDatabaseChangeVector(context);

                                return(cv != null && cv.Contains("A:1-") && cv.Contains("B:1-") && cv.Contains("C:1-"));
                            }
                    }, expected : true, timeout : 60000);
                }
Beispiel #28
0
        private async Task <(List <Match> Matches, GraphQueryPlan QueryPlan, bool NotModified)> GetQueryResults(IndexQueryServerSide query, QueryOperationContext queryContext, long?existingResultEtag, OperationCancelToken token, bool collectIntermediateResults = false)
        {
            var q  = query.Metadata.Query;
            var qp = new GraphQueryPlan(query, queryContext, existingResultEtag, token, Database)
            {
                CollectIntermediateResults = collectIntermediateResults
            };

            qp.BuildQueryPlan();
            qp.OptimizeQueryPlan(); //TODO: audit optimization

            if (query.WaitForNonStaleResults)
            {
                qp.IsStale = await qp.WaitForNonStaleResults();
            }
            else
            {
                await qp.CreateAutoIndexesAndWaitIfNecessary();
            }

            //for the case where we don't wait for non stale results we will override IsStale in the QueryQueryStep steps

            if (queryContext.AreTransactionsOpened() == false)
            {
                queryContext.OpenReadTransaction();
            }

            qp.ResultEtag = DocumentsStorage.ReadLastEtag(queryContext.Documents.Transaction.InnerTransaction);
            if (existingResultEtag.HasValue)
            {
                if (qp.ResultEtag == existingResultEtag)
                {
                    return(null, null, true);
                }
            }
            await qp.Initialize();

            var matchResults = qp.Execute();

            if (query.Metadata.OrderBy != null)
            {
                Sort(matchResults, query.Metadata.OrderBy, Database.Name, query.Query);
            }

            var filter = q.GraphQuery.Where;

            if (filter != null)
            {
                for (int i = 0; i < matchResults.Count; i++)
                {
                    var resultAsJson = new DynamicJsonValue();
                    matchResults[i].PopulateVertices(resultAsJson);

                    using (var result = queryContext.Documents.ReadObject(resultAsJson, "graph/result"))
                    {
                        if (filter.IsMatchedBy(result, query.QueryParameters) == false)
                        {
                            matchResults[i] = default;
                        }
                    }
                }
            }

            if (query.Start > 0)
            {
                matchResults.RemoveRange(0, Math.Min(query.Start, matchResults.Count));
            }

            if (query.PageSize < matchResults.Count)
            {
                matchResults.RemoveRange(query.PageSize, matchResults.Count - query.PageSize);
            }
            return(matchResults, qp, false);
        }
        protected QueryResultRetrieverBase(DocumentDatabase database, IndexQueryServerSide query, QueryTimingsScope queryTimings, FieldsToFetch fieldsToFetch, DocumentsStorage documentsStorage, JsonOperationContext context, bool reduceResults, IncludeDocumentsCommand includeDocumentsCommand)
        {
            _database = database;
            _query    = query;
            _context  = context;
            _includeDocumentsCommand = includeDocumentsCommand;

            DocumentsStorage = documentsStorage;
            RetrieverScope   = queryTimings?.For(nameof(QueryTimingsScope.Names.Retriever), start: false);
            FieldsToFetch    = fieldsToFetch;

            _blittableTraverser = reduceResults ? BlittableJsonTraverser.FlatMapReduceResults : BlittableJsonTraverser.Default;
        }
Beispiel #30
0
            protected override long ExecuteCmd(DocumentsOperationContext context)
            {
                var tss = context.DocumentDatabase.DocumentsStorage.TimeSeriesStorage;

                RollupSchema.Create(context.Transaction.InnerTransaction, TimeSeriesRollupTable, 16);
                var table = context.Transaction.InnerTransaction.OpenTable(RollupSchema, TimeSeriesRollupTable);

                foreach (var item in _states)
                {
                    if (_configuration == null)
                    {
                        return(RolledUp);
                    }

                    if (_configuration.Collections.TryGetValue(item.Collection, out var config) == false)
                    {
                        continue;
                    }

                    if (config.Disabled)
                    {
                        continue;
                    }

                    var policy = config.GetPolicyByName(item.RollupPolicy, out _);
                    if (policy == null)
                    {
                        table.DeleteByKey(item.Key);
                        continue;
                    }

                    if (table.ReadByKey(item.Key, out var current) == false)
                    {
                        table.DeleteByKey(item.Key);
                        continue;
                    }

                    if (item.Etag != DocumentsStorage.TableValueToLong((int)RollupColumns.Etag, ref current))
                    {
                        continue; // concurrency check
                    }
                    var rollupStart    = item.NextRollup.Add(-policy.AggregationTime);
                    var rawTimeSeries  = item.Name.Split(TimeSeriesConfiguration.TimeSeriesRollupSeparator)[0];
                    var intoTimeSeries = policy.GetTimeSeriesName(rawTimeSeries);

                    var intoReader           = tss.GetReader(context, item.DocId, intoTimeSeries, rollupStart, DateTime.MaxValue);
                    var previouslyAggregated = intoReader.AllValues().Any();
                    if (previouslyAggregated)
                    {
                        var changeVector = intoReader.GetCurrentSegmentChangeVector();
                        if (ChangeVectorUtils.GetConflictStatus(item.ChangeVector, changeVector) == ConflictStatus.AlreadyMerged)
                        {
                            // this rollup is already done
                            table.DeleteByKey(item.Key);
                            continue;
                        }
                    }

                    if (_isFirstInTopology == false)
                    {
                        continue; // we execute the actual rollup only on the primary node to avoid conflicts
                    }
                    var rollupEnd = new DateTime(NextRollup(_now, policy)).Add(-policy.AggregationTime).AddMilliseconds(-1);
                    var reader    = tss.GetReader(context, item.DocId, item.Name, rollupStart, rollupEnd);

                    if (previouslyAggregated)
                    {
                        var hasPriorValues = tss.GetReader(context, item.DocId, item.Name, DateTime.MinValue, rollupStart).AllValues().Any();
                        if (hasPriorValues == false)
                        {
                            table.DeleteByKey(item.Key);
                            var first = tss.GetReader(context, item.DocId, item.Name, rollupStart, DateTime.MaxValue).First();
                            if (first == default)
                            {
                                continue; // nothing we can do here
                            }
                            if (first.Timestamp > item.NextRollup)
                            {
                                // if the 'source' time-series doesn't have any values it is retained.
                                // so we need to aggregate only from the next time frame
                                using (var slicer = new TimeSeriesSliceHolder(context, item.DocId, item.Name, item.Collection))
                                {
                                    tss.Rollups.MarkForPolicy(context, slicer, policy, first.Timestamp);
                                }
                                continue;
                            }
                        }
                    }

                    // rollup from the the raw data will generate 6-value roll up of (first, last, min, max, sum, count)
                    // other rollups will aggregate each of those values by the type
                    var mode      = item.Name.Contains(TimeSeriesConfiguration.TimeSeriesRollupSeparator) ? AggregationMode.FromAggregated : AggregationMode.FromRaw;
                    var rangeSpec = new RangeGroup();
                    switch (policy.AggregationTime.Unit)
                    {
                    case TimeValueUnit.Second:
                        rangeSpec.Ticks          = TimeSpan.FromSeconds(policy.AggregationTime.Value).Ticks;
                        rangeSpec.TicksAlignment = RangeGroup.Alignment.Second;
                        break;

                    case TimeValueUnit.Month:
                        rangeSpec.Months = policy.AggregationTime.Value;
                        break;

                    default:
                        throw new ArgumentOutOfRangeException(nameof(policy.AggregationTime.Unit), $"Not supported time value unit '{policy.AggregationTime.Unit}'");
                    }
                    rangeSpec.InitializeRange(rollupStart);

                    List <SingleResult> values = null;
                    try
                    {
                        values = GetAggregatedValues(reader, rangeSpec, mode);
                    }
                    catch (RollupExceedNumberOfValuesException e)
                    {
                        var name  = item.Name;
                        var docId = item.DocId;
                        try
                        {
                            var document = context.DocumentDatabase.DocumentsStorage.Get(context, item.DocId, throwOnConflict: false);
                            docId = document?.Id ?? docId;
                            name  = tss.GetOriginalName(context, docId, name);
                        }
                        catch
                        {
                            // ignore
                        }
                        var msg = $"Rollup '{item.RollupPolicy}' for time-series '{name}' in document '{docId}' failed.";
                        if (_logger.IsInfoEnabled)
                        {
                            _logger.Info(msg, e);
                        }

                        var alert = AlertRaised.Create(context.DocumentDatabase.Name, "Failed to perform rollup because the time-series has more than 5 values", msg,
                                                       AlertType.RollupExceedNumberOfValues, NotificationSeverity.Warning, $"{item.DocId}/{item.Name}", new ExceptionDetails(e));

                        context.DocumentDatabase.NotificationCenter.Add(alert);

                        continue;
                    }

                    if (previouslyAggregated)
                    {
                        // if we need to re-aggregate we need to delete everything we have from that point on.
                        var removeRequest = new TimeSeriesStorage.DeletionRangeRequest
                        {
                            Collection = item.Collection,
                            DocumentId = item.DocId,
                            Name       = intoTimeSeries,
                            From       = rollupStart,
                            To         = DateTime.MaxValue,
                        };

                        tss.DeleteTimestampRange(context, removeRequest);
                    }

                    tss.AppendTimestamp(context, item.DocId, item.Collection, intoTimeSeries, values, verifyName: false);
                    RolledUp++;
                    table.DeleteByKey(item.Key);

                    var stats = tss.Stats.GetStats(context, item.DocId, item.Name);
                    if (stats.End > rollupEnd)
                    {
                        // we know that we have values after the current rollup and we need to mark them
                        var nextRollup = rollupEnd.AddMilliseconds(1);
                        intoReader = tss.GetReader(context, item.DocId, item.Name, nextRollup, DateTime.MaxValue);
                        if (intoReader.Init() == false)
                        {
                            Debug.Assert(false, "We have values but no segment?");
                            continue;
                        }

                        using (var slicer = new TimeSeriesSliceHolder(context, item.DocId, item.Name, item.Collection))
                        {
                            tss.Rollups.MarkForPolicy(context, slicer, policy, intoReader.First().Timestamp);
                        }
                    }
                }

                return(RolledUp);
            }