public void Index(string index, AbstractViewGenerator viewGenerator, IndexingBatch batch, WorkContext context, IStorageActionsAccessor actions, DateTime minimumTimestamp) { Index value; if (indexes.TryGetValue(index, out value) == false) { log.Debug("Tried to index on a non existent index {0}, ignoring", index); return; } using (EnsureInvariantCulture()) using (DocumentCacher.SkipSettingDocumentsInDocumentCache()) { value.IndexDocuments(viewGenerator, batch, context, actions, minimumTimestamp); context.RaiseIndexChangeNotification(new IndexChangeNotification { Name = index, Type = IndexChangeTypes.MapCompleted }); } }
protected override Task SerializeToStreamAsync(Stream stream, TransportContext context) { var bufferSize = queryOp.Header.TotalResults > 1024 ? 1024 * 64 : 1024 * 8; using (var bufferedStream = new BufferedStream(stream, bufferSize)) using (queryOp) using (accessor) using (_timeout) using (var writer = GetOutputWriter(req, bufferedStream)) // we may be sending a LOT of documents to the user, and most // of them aren't going to be relevant for other ops, so we are going to skip // the cache for that, to avoid filling it up very quickly using (DocumentCacher.SkipSettingDocumentsInDocumentCache()) { outputContentTypeSetter(writer.ContentType); writer.WriteHeader(); try { queryOp.Execute(o => { _timeout.Delay(); writer.Write(o); }); } catch (Exception e) { writer.WriteError(e); } } return(Task.FromResult(true)); }
public override void Respond(IHttpContext context) { using (context.Response.Streaming()) { context.Response.ContentType = "application/json; charset=utf-8"; var match = urlMatcher.Match(context.GetRequestUrl()); var index = match.Groups[1].Value; var query = context.GetIndexQueryFromHttpContext(int.MaxValue); if (string.IsNullOrEmpty(context.Request.QueryString["pageSize"])) { query.PageSize = int.MaxValue; } var isHeadRequest = context.Request.HttpMethod == "HEAD"; if (isHeadRequest) { query.PageSize = 0; } using (var writer = GetOutputWriter(context)) { // we may be sending a LOT of documents to the user, and most // of them aren't going to be relevant for other ops, so we are going to skip // the cache for that, to avoid filling it up very quickly using (DocumentCacher.SkipSettingDocumentsInDocumentCache()) using (var cts = new CancellationTokenSource()) using (var timeout = cts.TimeoutAfter(Settings.DatbaseOperationTimeout)) { Database.Query(index, query, cts.Token, information => { context.Response.AddHeader("Raven-Result-Etag", information.ResultEtag.ToString()); context.Response.AddHeader("Raven-Index-Etag", information.IndexEtag.ToString()); context.Response.AddHeader("Raven-Is-Stale", information.IsStable ? "true" : "false"); context.Response.AddHeader("Raven-Index", information.Index); context.Response.AddHeader("Raven-Total-Results", information.TotalResults.ToString(CultureInfo.InvariantCulture)); context.Response.AddHeader("Raven-Index-Timestamp", information.IndexTimestamp.ToString(Default.DateTimeFormatsToWrite, CultureInfo.InvariantCulture)); if (isHeadRequest) { return; } writer.WriteHeader(); }, o => { timeout.Delay(); Database.WorkContext.UpdateFoundWork(); writer.Write(o); }); } } } }
public override void Respond(IHttpContext context) { using (context.Response.Streaming()) { context.Response.ContentType = "application/json; charset=utf-8"; using (var writer = new JsonTextWriter(new StreamWriter(context.Response.OutputStream))) { writer.WriteStartObject(); writer.WritePropertyName("Results"); writer.WriteStartArray(); Database.TransactionalStorage.Batch(accessor => { var startsWith = context.Request.QueryString["startsWith"]; int pageSize = context.GetPageSize(int.MaxValue); if (string.IsNullOrEmpty(context.Request.QueryString["pageSize"])) { pageSize = int.MaxValue; } // we may be sending a LOT of documents to the user, and most // of them aren't going to be relevant for other ops, so we are going to skip // the cache for that, to avoid filling it up very quickly using (DocumentCacher.SkipSettingDocumentsInDocumentCache()) { if (string.IsNullOrEmpty(startsWith)) { Database.GetDocuments(context.GetStart(), pageSize, context.GetEtagFromQueryString(), doc => doc.WriteTo(writer)); } else { Database.GetDocumentsWithIdStartingWith( startsWith, context.Request.QueryString["matches"], context.Request.QueryString["exclude"], context.GetStart(), pageSize, doc => doc.WriteTo(writer)); } } }); writer.WriteEndArray(); writer.WriteEndObject(); writer.Flush(); } } }
private void StreamToClient(Stream stream, string startsWith, int start, int pageSize, Etag etag, string matches, int nextPageStart, string skipAfter) { using (var cts = new CancellationTokenSource()) using (var timeout = cts.TimeoutAfter(DatabasesLandlord.SystemConfiguration.DatabaseOperationTimeout)) using (var writer = new JsonTextWriter(new StreamWriter(stream))) { writer.WriteStartObject(); writer.WritePropertyName("Results"); writer.WriteStartArray(); Database.TransactionalStorage.Batch(accessor => { // we may be sending a LOT of documents to the user, and most // of them aren't going to be relevant for other ops, so we are going to skip // the cache for that, to avoid filling it up very quickly using (DocumentCacher.SkipSettingDocumentsInDocumentCache()) { if (string.IsNullOrEmpty(startsWith)) { Database.Documents.GetDocuments(start, pageSize, etag, cts.Token, doc => { timeout.Delay(); doc.WriteTo(writer); writer.WriteRaw(Environment.NewLine); }); } else { var nextPageStartInternal = nextPageStart; Database.Documents.GetDocumentsWithIdStartingWith(startsWith, matches, null, start, pageSize, cts.Token, ref nextPageStartInternal, doc => { timeout.Delay(); doc.WriteTo(writer); writer.WriteRaw(Environment.NewLine); }, skipAfter: skipAfter); nextPageStart = nextPageStartInternal; } } }); writer.WriteEndArray(); writer.WritePropertyName("NextPageStart"); writer.WriteValue(nextPageStart); writer.WriteEndObject(); writer.Flush(); } }
public void Index(string index, AbstractViewGenerator viewGenerator, IEnumerable <dynamic> docs, WorkContext context, IStorageActionsAccessor actions, DateTime minimumTimestamp) { Index value; if (indexes.TryGetValue(index, out value) == false) { log.Debug("Tried to index on a non existant index {0}, ignoring", index); return; } using (EnsureInvariantCulture()) using (DocumentCacher.SkipSettingDocumentsInDocumentCache()) { value.IndexDocuments(viewGenerator, docs, context, actions, minimumTimestamp); } }
void Delete() { var currentTime = SystemTime.UtcNow; var currentExpiryThresholdTime = currentTime.AddHours(-Settings.HoursToKeepMessagesBeforeExpiring); logger.Debug("Trying to find expired documents to delete (with threshold {0})", currentExpiryThresholdTime.ToString(Default.DateTimeFormatsToWrite, CultureInfo.InvariantCulture)); const string queryString = "Status:3 OR Status:4"; var query = new IndexQuery { Start = 0, PageSize = deletionBatchSize, Cutoff = currentTime, Query = queryString, FieldsToFetch = new[] { "__document_id", "ProcessedAt" }, SortedFields = new[] { new SortedField("ProcessedAt") { Field = "ProcessedAt", Descending = false } }, }; try { var docsToExpire = 0; // we may be receiving a LOT of documents to delete, so we are going to skip // the cache for that, to avoid filling it up very quickly var stopwatch = Stopwatch.StartNew(); int deletionCount; using (DocumentCacher.SkipSettingDocumentsInDocumentCache()) using (Database.DisableAllTriggersForCurrentThread()) using (var cts = new CancellationTokenSource()) { var documentWithCurrentThresholdTimeReached = false; var items = new List <ICommandData>(deletionBatchSize); try { Database.Query(indexName, query, CancellationTokenSource.CreateLinkedTokenSource(Database.WorkContext.CancellationToken, cts.Token).Token, null, doc => { if (documentWithCurrentThresholdTimeReached) { return; } if (doc.Value <DateTime>("ProcessedAt") >= currentExpiryThresholdTime) { documentWithCurrentThresholdTimeReached = true; cts.Cancel(); return; } var id = doc.Value <string>("__document_id"); if (!string.IsNullOrEmpty(id)) { items.Add(new DeleteCommandData { Key = id }); } }); } catch (OperationCanceledException) { //Ignore } logger.Debug("Batching deletion of {0} documents.", items.Count); docsToExpire += items.Count; var results = Database.Batch(items.ToArray()); deletionCount = results.Count(x => x.Deleted == true); items.Clear(); } if (docsToExpire == 0) { logger.Debug("No expired documents found"); } else { logger.Debug("Deleted {0} out of {1} expired documents batch - Execution time:{2}ms", deletionCount, docsToExpire, stopwatch.ElapsedMilliseconds); } } catch (Exception e) { logger.ErrorException("Error when trying to find expired documents", e); } }
private void StreamToClient(long id, SubscriptionActions subscriptions, Stream stream) { var sentDocuments = false; using (var streamWriter = new StreamWriter(stream)) using (var writer = new JsonTextWriter(streamWriter)) { var options = subscriptions.GetBatchOptions(id); writer.WriteStartObject(); writer.WritePropertyName("Results"); writer.WriteStartArray(); using (var cts = new CancellationTokenSource()) using (var timeout = cts.TimeoutAfter(DatabasesLandlord.SystemConfiguration.DatabaseOperationTimeout)) { Etag lastProcessedDocEtag = null; var batchSize = 0; var batchDocCount = 0; var processedDocuments = 0; var hasMoreDocs = false; var config = subscriptions.GetSubscriptionConfig(id); var startEtag = config.AckEtag; var criteria = config.Criteria; bool isPrefixCriteria = !string.IsNullOrWhiteSpace(criteria.KeyStartsWith); Func <JsonDocument, bool> addDocument = doc => { processedDocuments++; timeout.Delay(); // We cant continue because we have already maxed out the batch bytes size. if (options.MaxSize.HasValue && batchSize >= options.MaxSize) { return(false); } // We cant continue because we have already maxed out the amount of documents to send. if (batchDocCount >= options.MaxDocCount) { return(false); } // We can continue because we are ignoring system documents. if (doc.Key.StartsWith("Raven/", StringComparison.InvariantCultureIgnoreCase)) { return(true); } // We can continue because we are ignoring the document as it doesn't fit the criteria. if (MatchCriteria(criteria, doc) == false) { return(true); } doc.ToJson().WriteTo(writer); writer.WriteRaw(Environment.NewLine); batchSize += doc.SerializedSizeOnDisk; batchDocCount++; return(true); // We get the next document }; int retries = 0; do { int lastIndex = processedDocuments; Database.TransactionalStorage.Batch(accessor => { // we may be sending a LOT of documents to the user, and most // of them aren't going to be relevant for other ops, so we are going to skip // the cache for that, to avoid filling it up very quickly using (DocumentCacher.SkipSettingDocumentsInDocumentCache()) { if (isPrefixCriteria) { // If we don't get any document from GetDocumentsWithIdStartingWith it could be that we are in presence of a lagoon of uninteresting documents, so we are hitting a timeout. lastProcessedDocEtag = Database.Documents.GetDocumentsWithIdStartingWith(criteria.KeyStartsWith, options.MaxDocCount - batchDocCount, startEtag, cts.Token, addDocument); hasMoreDocs = false; } else { // It doesn't matter if we match the criteria or not, the document has been already processed. lastProcessedDocEtag = Database.Documents.GetDocuments(-1, options.MaxDocCount - batchDocCount, startEtag, cts.Token, addDocument); // If we don't get any document from GetDocuments it may be a signal that something is wrong. if (lastProcessedDocEtag == null) { hasMoreDocs = false; } else { var lastDocEtag = accessor.Staleness.GetMostRecentDocumentEtag(); hasMoreDocs = EtagUtil.IsGreaterThan(lastDocEtag, lastProcessedDocEtag); startEtag = lastProcessedDocEtag; } retries = lastIndex == batchDocCount ? retries : 0; } } }); if (lastIndex == processedDocuments) { if (retries == 3) { log.Warn("Subscription processing did not end up replicating any documents for 3 times in a row, stopping operation", retries); } else { log.Warn("Subscription processing did not end up replicating any documents, due to possible storage error, retry number: {0}", retries); } retries++; } }while (retries < 3 && hasMoreDocs && batchDocCount < options.MaxDocCount && (options.MaxSize.HasValue == false || batchSize < options.MaxSize)); writer.WriteEndArray(); if (batchDocCount > 0 || isPrefixCriteria) { writer.WritePropertyName("LastProcessedEtag"); writer.WriteValue(lastProcessedDocEtag.ToString()); sentDocuments = true; } writer.WriteEndObject(); writer.Flush(); } } if (sentDocuments) { subscriptions.UpdateBatchSentTime(id); } }
private void StreamToClient(long id, SubscriptionActions subscriptions, Stream stream) { var sentDocuments = false; using (var streamWriter = new StreamWriter(stream)) using (var writer = new JsonTextWriter(streamWriter)) { var options = subscriptions.GetBatchOptions(id); writer.WriteStartObject(); writer.WritePropertyName("Results"); writer.WriteStartArray(); using (var cts = new CancellationTokenSource()) using (var timeout = cts.TimeoutAfter(DatabasesLandlord.SystemConfiguration.DatabaseOperationTimeout)) { Etag lastProcessedDocEtag = null; var batchSize = 0; var batchDocCount = 0; var hasMoreDocs = false; var config = subscriptions.GetSubscriptionConfig(id); var startEtag = config.AckEtag; var criteria = config.Criteria; do { Database.TransactionalStorage.Batch(accessor => { // we may be sending a LOT of documents to the user, and most // of them aren't going to be relevant for other ops, so we are going to skip // the cache for that, to avoid filling it up very quickly using (DocumentCacher.SkipSettingDocumentsInDocumentCache()) { Database.Documents.GetDocuments(-1, options.MaxDocCount - batchDocCount, startEtag, cts.Token, doc => { timeout.Delay(); if (options.MaxSize.HasValue && batchSize >= options.MaxSize) { return; } if (batchDocCount >= options.MaxDocCount) { return; } lastProcessedDocEtag = doc.Etag; if (doc.Key.StartsWith("Raven/", StringComparison.InvariantCultureIgnoreCase)) { return; } if (MatchCriteria(criteria, doc) == false) { return; } doc.ToJson().WriteTo(writer); writer.WriteRaw(Environment.NewLine); batchSize += doc.SerializedSizeOnDisk; batchDocCount++; }); } if (lastProcessedDocEtag == null) { hasMoreDocs = false; } else { var lastDocEtag = accessor.Staleness.GetMostRecentDocumentEtag(); hasMoreDocs = EtagUtil.IsGreaterThan(lastDocEtag, lastProcessedDocEtag); startEtag = lastProcessedDocEtag; } }); } while (hasMoreDocs && batchDocCount < options.MaxDocCount && (options.MaxSize.HasValue == false || batchSize < options.MaxSize)); writer.WriteEndArray(); if (batchDocCount > 0) { writer.WritePropertyName("LastProcessedEtag"); writer.WriteValue(lastProcessedDocEtag.ToString()); sentDocuments = true; } writer.WriteEndObject(); writer.Flush(); } } if (sentDocuments) { subscriptions.UpdateBatchSentTime(id); } }
private void StreamToClient(Stream stream, string startsWith, int start, int pageSize, Etag etag, string matches, int nextPageStart, string skipAfter, Lazy <NameValueCollection> headers, IPrincipal user) { var old = CurrentOperationContext.Headers.Value; var oldUser = CurrentOperationContext.User.Value; try { CurrentOperationContext.Headers.Value = headers; CurrentOperationContext.User.Value = user; var bufferStream = new BufferedStream(stream, 1024 * 64); using (var cts = new CancellationTokenSource()) using (var timeout = cts.TimeoutAfter(DatabasesLandlord.SystemConfiguration.DatabaseOperationTimeout)) using (var writer = new JsonTextWriter(new StreamWriter(bufferStream))) { writer.WriteStartObject(); writer.WritePropertyName("Results"); writer.WriteStartArray(); Action <JsonDocument> addDocument = doc => { timeout.Delay(); doc.ToJson().WriteTo(writer); writer.WriteRaw(Environment.NewLine); }; Database.TransactionalStorage.Batch(accessor => { // we may be sending a LOT of documents to the user, and most // of them aren't going to be relevant for other ops, so we are going to skip // the cache for that, to avoid filling it up very quickly using (DocumentCacher.SkipSettingDocumentsInDocumentCache()) { if (string.IsNullOrEmpty(startsWith)) { Database.Documents.GetDocuments(start, pageSize, etag, cts.Token, addDocument); } else { var nextPageStartInternal = nextPageStart; Database.Documents.GetDocumentsWithIdStartingWith(startsWith, matches, null, start, pageSize, cts.Token, ref nextPageStartInternal, addDocument, skipAfter: skipAfter); nextPageStart = nextPageStartInternal; } } }); writer.WriteEndArray(); writer.WritePropertyName("NextPageStart"); writer.WriteValue(nextPageStart); writer.WriteEndObject(); writer.Flush(); bufferStream.Flush(); } } finally { CurrentOperationContext.Headers.Value = old; CurrentOperationContext.User.Value = oldUser; } }