private void StreamToClient(long id, SubscriptionActions subscriptions, Stream stream) { var sentDocuments = false; var bufferStream = new BufferedStream(stream, 1024 * 64); var lastBatchSentTime = Stopwatch.StartNew(); using (var writer = new JsonTextWriter(new StreamWriter(bufferStream))) { var options = subscriptions.GetBatchOptions(id); writer.WriteStartObject(); writer.WritePropertyName("Results"); writer.WriteStartArray(); using (var cts = new CancellationTokenSource()) using (var timeout = cts.TimeoutAfter(DatabasesLandlord.SystemConfiguration.DatabaseOperationTimeout)) { Etag lastProcessedDocEtag = null; var batchSize = 0; var batchDocCount = 0; var processedDocumentsCount = 0; var hasMoreDocs = false; var config = subscriptions.GetSubscriptionConfig(id); var startEtag = config.AckEtag; var criteria = config.Criteria; bool isPrefixCriteria = !string.IsNullOrWhiteSpace(criteria.KeyStartsWith); Func<JsonDocument, bool> addDocument = doc => { timeout.Delay(); if (doc == null) { // we only have this heartbeat when the streaming has gone on for a long time // and we haven't sent anything to the user in a while (because of filtering, skipping, etc). writer.WriteRaw(Environment.NewLine); writer.Flush(); if (lastBatchSentTime.ElapsedMilliseconds > 30000) return false; return true; } processedDocumentsCount++; // We cant continue because we have already maxed out the batch bytes size. if (options.MaxSize.HasValue && batchSize >= options.MaxSize) return false; // We cant continue because we have already maxed out the amount of documents to send. if (batchDocCount >= options.MaxDocCount) return false; // We can continue because we are ignoring system documents. if (doc.Key.StartsWith("Raven/", StringComparison.InvariantCultureIgnoreCase)) return true; // We can continue because we are ignoring the document as it doesn't fit the criteria. if (MatchCriteria(criteria, doc) == false) return true; doc.ToJson().WriteTo(writer); writer.WriteRaw(Environment.NewLine); batchSize += doc.SerializedSizeOnDisk; batchDocCount++; return true; // We get the next document }; var retries = 0; do { var lastProcessedDocumentsCount = processedDocumentsCount; Database.TransactionalStorage.Batch(accessor => { // we may be sending a LOT of documents to the user, and most // of them aren't going to be relevant for other ops, so we are going to skip // the cache for that, to avoid filling it up very quickly using (DocumentCacher.SkipSetAndGetDocumentsInDocumentCache()) { if (isPrefixCriteria) { // If we don't get any document from GetDocumentsWithIdStartingWith it could be that we are in presence of a lagoon of uninteresting documents, so we are hitting a timeout. lastProcessedDocEtag = Database.Documents.GetDocumentsWithIdStartingWith(criteria.KeyStartsWith, options.MaxDocCount - batchDocCount, startEtag, cts.Token, addDocument); hasMoreDocs = false; } else { // It doesn't matter if we match the criteria or not, the document has been already processed. lastProcessedDocEtag = Database.Documents.GetDocuments(-1, options.MaxDocCount - batchDocCount, startEtag, cts.Token, addDocument); // If we don't get any document from GetDocuments it may be a signal that something is wrong. if (lastProcessedDocEtag == null) { hasMoreDocs = false; } else { var lastDocEtag = accessor.Staleness.GetMostRecentDocumentEtag(); hasMoreDocs = EtagUtil.IsGreaterThan(lastDocEtag, lastProcessedDocEtag); startEtag = lastProcessedDocEtag; } retries = lastProcessedDocumentsCount == batchDocCount ? retries : 0; } } }); if (lastBatchSentTime.ElapsedMilliseconds >= 30000) { if (batchDocCount == 0) log.Warn("Subscription filtered out all possible documents for {0:#,#;;0} seconds in a row, stopping operation", lastBatchSentTime.Elapsed.TotalSeconds); break; } if (lastProcessedDocumentsCount == processedDocumentsCount) { if (retries == 3) { log.Warn("Subscription processing did not end up replicating any documents for 3 times in a row, stopping operation", retries); } else { log.Warn("Subscription processing did not end up replicating any documents, due to possible storage error, retry number: {0}", retries); } retries++; } } while (retries < 3 && hasMoreDocs && batchDocCount < options.MaxDocCount && (options.MaxSize.HasValue == false || batchSize < options.MaxSize)); writer.WriteEndArray(); if (batchDocCount > 0 || processedDocumentsCount > 0 || isPrefixCriteria) { writer.WritePropertyName("LastProcessedEtag"); writer.WriteValue(lastProcessedDocEtag.ToString()); sentDocuments = true; } writer.WriteEndObject(); writer.Flush(); bufferStream.Flush(); } } if (sentDocuments) subscriptions.UpdateBatchSentTime(id); }
private void StreamToClient(long id, SubscriptionActions subscriptions, Stream stream) { var sentDocuments = false; using (var streamWriter = new StreamWriter(stream)) using (var writer = new JsonTextWriter(streamWriter)) { var options = subscriptions.GetBatchOptions(id); writer.WriteStartObject(); writer.WritePropertyName("Results"); writer.WriteStartArray(); using (var cts = new CancellationTokenSource()) using (var timeout = cts.TimeoutAfter(DatabasesLandlord.SystemConfiguration.DatabaseOperationTimeout)) { Etag lastProcessedDocEtag = null; var batchSize = 0; var batchDocCount = 0; var hasMoreDocs = false; var config = subscriptions.GetSubscriptionConfig(id); var startEtag = config.AckEtag; var criteria = config.Criteria; Action<JsonDocument> addDocument = doc => { timeout.Delay(); if (options.MaxSize.HasValue && batchSize >= options.MaxSize) return; if (batchDocCount >= options.MaxDocCount) return; lastProcessedDocEtag = doc.Etag; if (doc.Key.StartsWith("Raven/", StringComparison.InvariantCultureIgnoreCase)) return; if (MatchCriteria(criteria, doc) == false) return; doc.ToJson().WriteTo(writer); writer.WriteRaw(Environment.NewLine); batchSize += doc.SerializedSizeOnDisk; batchDocCount++; }; int nextStart = 0; do { Database.TransactionalStorage.Batch(accessor => { // we may be sending a LOT of documents to the user, and most // of them aren't going to be relevant for other ops, so we are going to skip // the cache for that, to avoid filling it up very quickly using (DocumentCacher.SkipSettingDocumentsInDocumentCache()) { if (!string.IsNullOrWhiteSpace(criteria.KeyStartsWith)) { Database.Documents.GetDocumentsWithIdStartingWith(criteria.KeyStartsWith, options.MaxDocCount - batchDocCount, startEtag, cts.Token, addDocument); } else { Database.Documents.GetDocuments(-1, options.MaxDocCount - batchDocCount, startEtag, cts.Token, addDocument); } } if (lastProcessedDocEtag == null) hasMoreDocs = false; else { var lastDocEtag = accessor.Staleness.GetMostRecentDocumentEtag(); hasMoreDocs = EtagUtil.IsGreaterThan(lastDocEtag, lastProcessedDocEtag); startEtag = lastProcessedDocEtag; } }); } while (hasMoreDocs && batchDocCount < options.MaxDocCount && (options.MaxSize.HasValue == false || batchSize < options.MaxSize)); writer.WriteEndArray(); if (batchDocCount > 0) { writer.WritePropertyName("LastProcessedEtag"); writer.WriteValue(lastProcessedDocEtag.ToString()); sentDocuments = true; } writer.WriteEndObject(); writer.Flush(); } } if (sentDocuments) subscriptions.UpdateBatchSentTime(id); }