private void StreamToClient(long id, SubscriptionActions subscriptions, Stream stream) { var sentDocuments = false; var bufferStream = new BufferedStream(stream, 1024 * 64); var lastBatchSentTime = Stopwatch.StartNew(); using (var writer = new JsonTextWriter(new StreamWriter(bufferStream))) { var options = subscriptions.GetBatchOptions(id); writer.WriteStartObject(); writer.WritePropertyName("Results"); writer.WriteStartArray(); using (var cts = new CancellationTokenSource()) using (var timeout = cts.TimeoutAfter(DatabasesLandlord.SystemConfiguration.DatabaseOperationTimeout)) { Etag lastProcessedDocEtag = null; var batchSize = 0; var batchDocCount = 0; var processedDocumentsCount = 0; var hasMoreDocs = false; var config = subscriptions.GetSubscriptionConfig(id); var startEtag = config.AckEtag; var criteria = config.Criteria; bool isPrefixCriteria = !string.IsNullOrWhiteSpace(criteria.KeyStartsWith); Func<JsonDocument, bool> addDocument = doc => { timeout.Delay(); if (doc == null) { // we only have this heartbeat when the streaming has gone on for a long time // and we haven't sent anything to the user in a while (because of filtering, skipping, etc). writer.WriteRaw(Environment.NewLine); writer.Flush(); if (lastBatchSentTime.ElapsedMilliseconds > 30000) return false; return true; } processedDocumentsCount++; // We cant continue because we have already maxed out the batch bytes size. if (options.MaxSize.HasValue && batchSize >= options.MaxSize) return false; // We cant continue because we have already maxed out the amount of documents to send. if (batchDocCount >= options.MaxDocCount) return false; // We can continue because we are ignoring system documents. if (doc.Key.StartsWith("Raven/", StringComparison.InvariantCultureIgnoreCase)) return true; // We can continue because we are ignoring the document as it doesn't fit the criteria. if (MatchCriteria(criteria, doc) == false) return true; doc.ToJson().WriteTo(writer); writer.WriteRaw(Environment.NewLine); batchSize += doc.SerializedSizeOnDisk; batchDocCount++; return true; // We get the next document }; var retries = 0; do { var lastProcessedDocumentsCount = processedDocumentsCount; Database.TransactionalStorage.Batch(accessor => { // we may be sending a LOT of documents to the user, and most // of them aren't going to be relevant for other ops, so we are going to skip // the cache for that, to avoid filling it up very quickly using (DocumentCacher.SkipSetAndGetDocumentsInDocumentCache()) { if (isPrefixCriteria) { // If we don't get any document from GetDocumentsWithIdStartingWith it could be that we are in presence of a lagoon of uninteresting documents, so we are hitting a timeout. lastProcessedDocEtag = Database.Documents.GetDocumentsWithIdStartingWith(criteria.KeyStartsWith, options.MaxDocCount - batchDocCount, startEtag, cts.Token, addDocument); hasMoreDocs = false; } else { // It doesn't matter if we match the criteria or not, the document has been already processed. lastProcessedDocEtag = Database.Documents.GetDocuments(-1, options.MaxDocCount - batchDocCount, startEtag, cts.Token, addDocument); // If we don't get any document from GetDocuments it may be a signal that something is wrong. if (lastProcessedDocEtag == null) { hasMoreDocs = false; } else { var lastDocEtag = accessor.Staleness.GetMostRecentDocumentEtag(); hasMoreDocs = EtagUtil.IsGreaterThan(lastDocEtag, lastProcessedDocEtag); startEtag = lastProcessedDocEtag; } retries = lastProcessedDocumentsCount == batchDocCount ? retries : 0; } } }); if (lastBatchSentTime.ElapsedMilliseconds >= 30000) { if (batchDocCount == 0) log.Warn("Subscription filtered out all possible documents for {0:#,#;;0} seconds in a row, stopping operation", lastBatchSentTime.Elapsed.TotalSeconds); break; } if (lastProcessedDocumentsCount == processedDocumentsCount) { if (retries == 3) { log.Warn("Subscription processing did not end up replicating any documents for 3 times in a row, stopping operation", retries); } else { log.Warn("Subscription processing did not end up replicating any documents, due to possible storage error, retry number: {0}", retries); } retries++; } } while (retries < 3 && hasMoreDocs && batchDocCount < options.MaxDocCount && (options.MaxSize.HasValue == false || batchSize < options.MaxSize)); writer.WriteEndArray(); if (batchDocCount > 0 || processedDocumentsCount > 0 || isPrefixCriteria) { writer.WritePropertyName("LastProcessedEtag"); writer.WriteValue(lastProcessedDocEtag.ToString()); sentDocuments = true; } writer.WriteEndObject(); writer.Flush(); bufferStream.Flush(); } } if (sentDocuments) subscriptions.UpdateBatchSentTime(id); }
public DocumentDatabase(InMemoryRavenConfiguration configuration, TransportState recievedTransportState = null) { TimerManager = new ResourceTimerManager(); DocumentLock = new PutSerialLock(); IdentityLock = new PutSerialLock(); Name = configuration.DatabaseName; ResourceName = Name; Configuration = configuration; transportState = recievedTransportState ?? new TransportState(); ExtensionsState = new AtomicDictionary<object>(); using (LogManager.OpenMappedContext("database", Name ?? Constants.SystemDatabase)) { Log.Debug("Start loading the following database: {0}", Name ?? Constants.SystemDatabase); initializer = new DocumentDatabaseInitializer(this, configuration); initializer.ValidateLicense(); initializer.ValidateStorage(); initializer.InitializeEncryption(); initializer.SubscribeToDomainUnloadOrProcessExit(); initializer.SubscribeToDiskSpaceChanges(); initializer.ExecuteAlterConfiguration(); initializer.SatisfyImportsOnce(); backgroundTaskScheduler = configuration.CustomTaskScheduler ?? TaskScheduler.Default; recentTouches = new SizeLimitedConcurrentDictionary<string, TouchedDocumentInfo>(configuration.MaxRecentTouchesToRemember, StringComparer.OrdinalIgnoreCase); configuration.Container.SatisfyImportsOnce(this); workContext = new WorkContext { Database = this, DatabaseName = Name, IndexUpdateTriggers = IndexUpdateTriggers, ReadTriggers = ReadTriggers, TaskScheduler = backgroundTaskScheduler, Configuration = configuration, IndexReaderWarmers = IndexReaderWarmers }; try { uuidGenerator = new SequentialUuidGenerator(); initializer.InitializeTransactionalStorage(uuidGenerator); lastCollectionEtags = new LastCollectionEtags(WorkContext); } catch (Exception ex) { Log.ErrorException("Could not initialize transactional storage, not creating database", ex); try { if (TransactionalStorage != null) TransactionalStorage.Dispose(); if (initializer != null) { initializer.UnsubscribeToDomainUnloadOrProcessExit(); initializer.Dispose(); } } catch (Exception e) { Log.ErrorException("Could not dispose on initialized DocumentDatabase members", e); } throw; } try { TransactionalStorage.Batch(actions => uuidGenerator.EtagBase = actions.General.GetNextIdentityValue("Raven/Etag")); initializer.InitializeIndexDefinitionStorage(); Indexes = new IndexActions(this, recentTouches, uuidGenerator, Log); Attachments = new AttachmentActions(this, recentTouches, uuidGenerator, Log); Maintenance = new MaintenanceActions(this, recentTouches, uuidGenerator, Log); Notifications = new NotificationActions(this, recentTouches, uuidGenerator, Log); Subscriptions = new SubscriptionActions(this, Log); Patches = new PatchActions(this, recentTouches, uuidGenerator, Log); Queries = new QueryActions(this, recentTouches, uuidGenerator, Log); Tasks = new TaskActions(this, recentTouches, uuidGenerator, Log); Transformers = new TransformerActions(this, recentTouches, uuidGenerator, Log); Documents = new DocumentActions(this, recentTouches, uuidGenerator, Log); inFlightTransactionalState = TransactionalStorage.InitializeInFlightTransactionalState(this, (key, etag, document, metadata, transactionInformation) => Documents.Put(key, etag, document, metadata, transactionInformation), (key, etag, transactionInformation) => Documents.Delete(key, etag, transactionInformation)); InitializeTriggersExceptIndexCodecs(); // Second stage initializing before index storage for determining the hash algotihm for encrypted databases that were upgraded from 2.5 SecondStageInitialization(); // Index codecs must be initialized before we try to read an index InitializeIndexCodecTriggers(); initializer.InitializeIndexStorage(); CompleteWorkContextSetup(); prefetcher = new Prefetcher(workContext); IndexReplacer = new IndexReplacer(this); indexingExecuter = new IndexingExecuter(workContext, prefetcher, IndexReplacer); InitializeTriggersExceptIndexCodecs(); EnsureAllIndexDefinitionsHaveIndexes(); RaiseIndexingWiringComplete(); ExecuteStartupTasks(); lastCollectionEtags.InitializeBasedOnIndexingResults(); Log.Debug("Finish loading the following database: {0}", configuration.DatabaseName ?? Constants.SystemDatabase); } catch (Exception e) { Log.ErrorException("Could not create database", e); try { Dispose(); } catch (Exception ex) { Log.FatalException("Failed to disposed when already getting an error during ctor", ex); } throw; } } }
private void StreamToClient(long id, SubscriptionActions subscriptions, Stream stream) { var sentDocuments = false; using (var streamWriter = new StreamWriter(stream)) using (var writer = new JsonTextWriter(streamWriter)) { var options = subscriptions.GetBatchOptions(id); writer.WriteStartObject(); writer.WritePropertyName("Results"); writer.WriteStartArray(); using (var cts = new CancellationTokenSource()) using (var timeout = cts.TimeoutAfter(DatabasesLandlord.SystemConfiguration.DatabaseOperationTimeout)) { Etag lastProcessedDocEtag = null; var batchSize = 0; var batchDocCount = 0; var hasMoreDocs = false; var config = subscriptions.GetSubscriptionConfig(id); var startEtag = config.AckEtag; var criteria = config.Criteria; Action<JsonDocument> addDocument = doc => { timeout.Delay(); if (options.MaxSize.HasValue && batchSize >= options.MaxSize) return; if (batchDocCount >= options.MaxDocCount) return; lastProcessedDocEtag = doc.Etag; if (doc.Key.StartsWith("Raven/", StringComparison.InvariantCultureIgnoreCase)) return; if (MatchCriteria(criteria, doc) == false) return; doc.ToJson().WriteTo(writer); writer.WriteRaw(Environment.NewLine); batchSize += doc.SerializedSizeOnDisk; batchDocCount++; }; int nextStart = 0; do { Database.TransactionalStorage.Batch(accessor => { // we may be sending a LOT of documents to the user, and most // of them aren't going to be relevant for other ops, so we are going to skip // the cache for that, to avoid filling it up very quickly using (DocumentCacher.SkipSettingDocumentsInDocumentCache()) { if (!string.IsNullOrWhiteSpace(criteria.KeyStartsWith)) { Database.Documents.GetDocumentsWithIdStartingWith(criteria.KeyStartsWith, options.MaxDocCount - batchDocCount, startEtag, cts.Token, addDocument); } else { Database.Documents.GetDocuments(-1, options.MaxDocCount - batchDocCount, startEtag, cts.Token, addDocument); } } if (lastProcessedDocEtag == null) hasMoreDocs = false; else { var lastDocEtag = accessor.Staleness.GetMostRecentDocumentEtag(); hasMoreDocs = EtagUtil.IsGreaterThan(lastDocEtag, lastProcessedDocEtag); startEtag = lastProcessedDocEtag; } }); } while (hasMoreDocs && batchDocCount < options.MaxDocCount && (options.MaxSize.HasValue == false || batchSize < options.MaxSize)); writer.WriteEndArray(); if (batchDocCount > 0) { writer.WritePropertyName("LastProcessedEtag"); writer.WriteValue(lastProcessedDocEtag.ToString()); sentDocuments = true; } writer.WriteEndObject(); writer.Flush(); } } if (sentDocuments) subscriptions.UpdateBatchSentTime(id); }