public Etag GetDocuments(int start, int pageSize, Etag etag, CancellationToken token, Func <JsonDocument, bool> addDocument) { Etag lastDocumentReadEtag = null; TransactionalStorage.Batch(actions => { bool returnedDocs = false; while (true) { var documents = etag == null ? actions.Documents.GetDocumentsByReverseUpdateOrder(start, pageSize) : actions.Documents.GetDocumentsAfter(etag, pageSize, token); var documentRetriever = new DocumentRetriever(Database.Configuration, actions, Database.ReadTriggers, Database.InFlightTransactionalState); int docCount = 0; foreach (var doc in documents) { docCount++; token.ThrowIfCancellationRequested(); if (etag != null) { etag = doc.Etag; } JsonDocument.EnsureIdInMetadata(doc); var nonAuthoritativeInformationBehavior = Database.InFlightTransactionalState.GetNonAuthoritativeInformationBehavior <JsonDocument>(null, doc.Key); var document = nonAuthoritativeInformationBehavior == null ? doc : nonAuthoritativeInformationBehavior(doc); document = documentRetriever.ExecuteReadTriggers(document, null, ReadOperation.Load); if (document == null) { continue; } returnedDocs = true; Database.WorkContext.UpdateFoundWork(); bool canContinue = addDocument(document); if (!canContinue) { break; } lastDocumentReadEtag = etag; } if (returnedDocs || docCount == 0) { break; } start += docCount; } }); return(lastDocumentReadEtag); }
public JsonDocumentMetadata GetDocumentMetadata(string key, TransactionInformation transactionInformation) { if (key == null) { throw new ArgumentNullException("key"); } key = key.Trim(); JsonDocumentMetadata document = null; if (transactionInformation == null || Database.InFlightTransactionalState.TryGet(key, transactionInformation, out document) == false) { TransactionalStorage.Batch(actions => { var nonAuthoritativeInformationBehavior = actions.InFlightStateSnapshot.GetNonAuthoritativeInformationBehavior <JsonDocumentMetadata>(transactionInformation, key); document = actions.Documents.DocumentMetadataByKey(key); if (nonAuthoritativeInformationBehavior != null) { document = nonAuthoritativeInformationBehavior(document); } }); } JsonDocument.EnsureIdInMetadata(document); return(new DocumentRetriever(null, null, Database.ReadTriggers) .ProcessReadVetoes(document, transactionInformation, ReadOperation.Load)); }
public JsonDocument Get(string key, TransactionInformation transactionInformation) { if (key == null) { throw new ArgumentNullException("key"); } key = key.Trim(); JsonDocument document = null; if (transactionInformation == null || Database.InFlightTransactionalState.TryGet(key, transactionInformation, out document) == false) { // first we check the dtc state, then the storage, to avoid race conditions var nonAuthoritativeInformationBehavior = Database.InFlightTransactionalState.GetNonAuthoritativeInformationBehavior <JsonDocument>(transactionInformation, key); TransactionalStorage.Batch(actions => { document = actions.Documents.DocumentByKey(key); }); if (nonAuthoritativeInformationBehavior != null) { document = nonAuthoritativeInformationBehavior(document); } } JsonDocument.EnsureIdInMetadata(document); return(new DocumentRetriever(null, null, Database.ReadTriggers, Database.InFlightTransactionalState) .ExecuteReadTriggers(document, transactionInformation, ReadOperation.Load)); }
public Task <RavenJObject> ReadDocumentAsync(string key, CancellationToken cancellationToken) { var document = _database.Documents.Get(key); if (document == null) { return(new CompletedTask <RavenJObject>((RavenJObject)null)); } JsonDocument.EnsureIdInMetadata(document); return(new CompletedTask <RavenJObject>(document.ToJson())); }
public async Task <RavenJObject> ReadDocumentAsync(string key, CancellationToken cancellationToken) { var document = await _store .AsyncDatabaseCommands .GetAsync(key, cancellationToken) .ConfigureAwait(false); if (document == null) { return(null); } JsonDocument.EnsureIdInMetadata(document); return(document.ToJson()); }
public JsonDocument Get(string key, TransactionInformation transactionInformation) { if (key == null) { throw new ArgumentNullException(nameof(key)); } key = key.Trim(); JsonDocument document = null; if (transactionInformation == null || Database.InFlightTransactionalState.TryGet(key, transactionInformation, out document) == false) { TransactionalStorage.Batch(actions => { var nonAuthoritativeInformationBehavior = actions.InFlightStateSnapshot.GetNonAuthoritativeInformationBehavior <JsonDocument>(transactionInformation, key); document = actions.Documents.DocumentByKey(key); if (nonAuthoritativeInformationBehavior != null) { document = nonAuthoritativeInformationBehavior(document); } if (document != null) { if (document.Metadata.ContainsKey(Constants.RavenReplicationConflict) && !document.Metadata.ContainsKey(Constants.RavenReplicationConflictDocument)) { JsonDocument newDocument; Database.ResolveConflict(document, actions, out newDocument); if (newDocument != null) { document = newDocument; } } } }); } JsonDocument.EnsureIdInMetadata(document); return(new DocumentRetriever(null, null, Database.ReadTriggers) .ExecuteReadTriggers(document, transactionInformation, ReadOperation.Load)); }
private JsonDocument GetDocumentWithCaching(string key) { if (key == null) { return(null); } // first we check the dtc state, then the cache and the storage, to avoid race conditions var nonAuthoritativeInformationBehavior = inFlightTransactionalState.GetNonAuthoritativeInformationBehavior <JsonDocument>(null, key); JsonDocument doc; if (disableCache || cache.TryGetValue(key, out doc) == false) { doc = actions.Documents.DocumentByKey(key); } if (nonAuthoritativeInformationBehavior != null) { doc = nonAuthoritativeInformationBehavior(doc); } JsonDocument.EnsureIdInMetadata(doc); if (doc != null && doc.Metadata != null) { doc.Metadata.EnsureCannotBeChangeAndEnableSnapshotting(); } if (disableCache == false && (doc == null || doc.NonAuthoritativeInformation != true)) { cache[key] = doc; } if (cache.Count > 2048) { // we are probably doing a stream here, no point in trying to cache things, we might be // going through the entire db here! disableCache = true; cache.Clear(); } return(doc); }
private List <JsonDocument> GetJsonDocsFromDisk(Etag etag, Etag untilEtag) { List <JsonDocument> jsonDocs = null; // We take an snapshot because the implementation of accessing Values from a ConcurrentDictionary involves a lock. // Taking the snapshot should be safe enough. long currentlyUsedBatchSizesInBytes = autoTuner.CurrentlyUsedBatchSizesInBytes.Values.Sum(); context.TransactionalStorage.Batch(actions => { //limit how much data we load from disk --> better adhere to memory limits var totalSizeAllowedToLoadInBytes = (context.Configuration.DynamicMemoryLimitForProcessing) - (prefetchingQueue.LoadedSize + currentlyUsedBatchSizesInBytes); // at any rate, we will load a min of 512Kb docs var maxSize = Math.Max( Math.Min(totalSizeAllowedToLoadInBytes, autoTuner.MaximumSizeAllowedToFetchFromStorageInBytes), 1024 * 512); jsonDocs = actions.Documents .GetDocumentsAfter( etag, autoTuner.NumberOfItemsToProcessInSingleBatch, context.CancellationToken, maxSize, untilEtag, autoTuner.FetchingDocumentsFromDiskTimeout ) .Where(x => x != null) .Select(doc => { JsonDocument.EnsureIdInMetadata(doc); return(doc); }) .ToList(); }); if (untilEtag == null) { MaybeAddFutureBatch(jsonDocs); } return(jsonDocs); }
public Etag GetDocumentsWithIdStartingWith(string idPrefix, int pageSize, Etag etag, CancellationToken token, Action <JsonDocument> addDocument) { TransactionalStorage.Batch(actions => { bool returnedDocs = false; while (true) { var documents = actions.Documents.GetDocumentsAfterWithIdStartingWith(etag, idPrefix, pageSize, token, timeout: TimeSpan.FromSeconds(2)); var documentRetriever = new DocumentRetriever(Database.Configuration, actions, Database.ReadTriggers, Database.InFlightTransactionalState); int docCount = 0; foreach (var doc in documents) { docCount++; token.ThrowIfCancellationRequested(); etag = doc.Etag; JsonDocument.EnsureIdInMetadata(doc); var nonAuthoritativeInformationBehavior = Database.InFlightTransactionalState.GetNonAuthoritativeInformationBehavior <JsonDocument>(null, doc.Key); var document = nonAuthoritativeInformationBehavior == null ? doc : nonAuthoritativeInformationBehavior(doc); document = documentRetriever.ExecuteReadTriggers(document, null, ReadOperation.Load); if (document == null) { continue; } addDocument(document); returnedDocs = true; Database.WorkContext.UpdateFoundWork(); } if (returnedDocs || docCount == 0) { break; } } }); return(etag); }
public void AfterStorageCommitBeforeWorkNotifications(JsonDocument[] docs) { if (context.Configuration.DisableDocumentPreFetching || docs.Length == 0 || DisableCollectingDocumentsAfterCommit) { return; } if (prefetchingQueue.Count >= // don't use too much, this is an optimization and we need to be careful about using too much mem context.Configuration.MaxNumberOfItemsToPreFetch || prefetchingQueue.LoadedSize > context.Configuration.AvailableMemoryForRaisingBatchSizeLimit) { return; } Etag lowestEtag = null; using (prefetchingQueue.EnterWriteLock()) { foreach (var jsonDocument in docs) { JsonDocument.EnsureIdInMetadata(jsonDocument); prefetchingQueue.Add(jsonDocument); if (ShouldHandleUnusedDocumentsAddedAfterCommit && (lowestEtag == null || jsonDocument.Etag.CompareTo(lowestEtag) < 0)) { lowestEtag = jsonDocument.Etag; } } } if (ShouldHandleUnusedDocumentsAddedAfterCommit && lowestEtag != null) { if (lowestInMemoryDocumentAddedAfterCommit == null || lowestEtag.CompareTo(lowestInMemoryDocumentAddedAfterCommit.Etag) < 0) { lowestInMemoryDocumentAddedAfterCommit = new DocAddedAfterCommit { Etag = lowestEtag, AddedAt = SystemTime.UtcNow }; } } }
public JsonDocumentMetadata GetDocumentMetadata(string key) { if (key == null) { throw new ArgumentNullException("key"); } key = key.Trim(); JsonDocumentMetadata document = null; TransactionalStorage.Batch(actions => { document = actions.Documents.DocumentMetadataByKey(key); }); JsonDocument.EnsureIdInMetadata(document); return(new DocumentRetriever(null, null, Database.ReadTriggers) .ProcessReadVetoes(document, ReadOperation.Load)); }
private JsonDocument GetDocumentWithCaching(string key) { if (key == null) { return(null); } JsonDocument doc; if (disableCache == false && cache.TryGetValue(key, out doc)) { return(doc); } doc = actions.Documents.DocumentByKey(key); JsonDocument.EnsureIdInMetadata(doc); if (doc != null && doc.Metadata != null) { doc.Metadata.EnsureCannotBeChangeAndEnableSnapshotting(); } if (disableCache == false) { cache[key] = doc; } if (cache.Count > 2048) { // we are probably doing a stream here, no point in trying to cache things, we might be // going through the entire db here! disableCache = true; cache.Clear(); } return(doc); }
private async Task <int> ImportDocuments(JsonTextReader jsonReader) { var now = SystemTime.UtcNow; var count = 0; string continuationDocId = "Raven/Smuggler/Continuation/" + Options.ContinuationToken; var state = new OperationState { FilePath = Options.ContinuationToken, LastDocsEtag = Options.StartDocsEtag, }; JsonDocument lastEtagsDocument = null; if (Options.UseContinuationFile) { lastEtagsDocument = Operations.GetDocument(continuationDocId); if (lastEtagsDocument == null) { lastEtagsDocument = new JsonDocument() { Key = continuationDocId, Etag = Etag.Empty, DataAsJson = RavenJObject.FromObject(state) }; } else { state = lastEtagsDocument.DataAsJson.JsonDeserialization <OperationState>(); } JsonDocument.EnsureIdInMetadata(lastEtagsDocument); } int skippedDocuments = 0; long skippedDocumentsSize = 0; Etag tempLastEtag = Etag.Empty; while (jsonReader.Read() && jsonReader.TokenType != JsonToken.EndArray) { Options.CancelToken.Token.ThrowIfCancellationRequested(); var document = (RavenJObject)RavenJToken.ReadFrom(jsonReader); var size = DocumentHelpers.GetRoughSize(document); if (size > 1024 * 1024) { Console.WriteLine("Large document warning: {0:#,#.##;;0} kb - {1}", (double)size / 1024, document["@metadata"].Value <string>("@id")); } if ((Options.OperateOnTypes & ItemType.Documents) != ItemType.Documents) { continue; } if (Options.MatchFilters(document) == false) { continue; } if (Options.ShouldExcludeExpired && Options.ExcludeExpired(document, now)) { continue; } if (!string.IsNullOrEmpty(Options.TransformScript)) { document = await Operations.TransformDocument(document, Options.TransformScript); } // If document is null after a transform we skip it. if (document == null) { continue; } var metadata = document["@metadata"] as RavenJObject; if (metadata != null) { if (Options.SkipConflicted && metadata.ContainsKey(Constants.RavenReplicationConflictDocument)) { continue; } if (Options.StripReplicationInformation) { document["@metadata"] = Operations.StripReplicationInformationFromMetadata(metadata); } } if (Options.UseContinuationFile) { tempLastEtag = Etag.Parse(document.Value <RavenJObject>("@metadata").Value <string>("@etag")); if (tempLastEtag.CompareTo(state.LastDocsEtag) <= 0) // tempLastEtag < lastEtag therefore we are skipping. { skippedDocuments++; skippedDocumentsSize += size; continue; } } await Operations.PutDocument(document, (int)size); count++; if (count % Options.BatchSize == 0) { if (Options.UseContinuationFile) { if (tempLastEtag.CompareTo(state.LastDocsEtag) > 0) { state.LastDocsEtag = tempLastEtag; } await WriteLastEtagToDatabase(state, lastEtagsDocument); } // Wait for the batch to be indexed before continue. if (Options.WaitForIndexing) { await WaitForIndexingAsOfLastWrite(); } Operations.ShowProgress("Read {0:#,#;;0} documents", count + skippedDocuments); } } if (Options.UseContinuationFile) { if (tempLastEtag.CompareTo(state.LastDocsEtag) > 0) { state.LastDocsEtag = tempLastEtag; } await WriteLastEtagToDatabase(state, lastEtagsDocument); Operations.ShowProgress("Documents skipped by continuation {0:#,#;;0} - approx. {1:#,#.##;;0} Mb.", skippedDocuments, (double)skippedDocumentsSize / 1024 / 1024); } await Operations.PutDocument(null, -1); // force flush return(count); }
public RelationalDatabaseWriter.TableQuerySummary[] SimulateSqlReplicationSqlQueries(string strDocumentId, SqlReplicationConfig sqlReplication, bool performRolledbackTransaction, out Alert alert) { RelationalDatabaseWriter.TableQuerySummary[] resutls = null; try { var stats = new SqlReplicationStatistics(sqlReplication.Name, false); var jsonDocument = Database.Documents.Get(strDocumentId, null); JsonDocument.EnsureIdInMetadata(jsonDocument); var doc = jsonDocument.ToJson(); doc[Constants.DocumentIdFieldName] = jsonDocument.Key; var docs = new List <ReplicatedDoc> { new ReplicatedDoc { Document = doc, Etag = jsonDocument.Etag, Key = jsonDocument.Key, SerializedSizeOnDisk = jsonDocument.SerializedSizeOnDisk } }; var scriptResult = ApplyConversionScript(sqlReplication, docs, stats); var sqlReplicationConnections = Database.ConfigurationRetriever.GetConfigurationDocument <SqlReplicationConnections <SqlReplicationConnections.PredefinedSqlConnectionWithConfigurationOrigin> >(Constants.SqlReplication.SqlReplicationConnectionsDocumentName); if (PrepareSqlReplicationConfig(sqlReplication, sqlReplication.Name, stats, sqlReplicationConnections.MergedDocument, false, false)) { if (performRolledbackTransaction) { using (var writer = new RelationalDatabaseWriter(Database, sqlReplication, stats)) { resutls = writer.RolledBackExecute(scriptResult).ToArray(); } } else { var simulatedwriter = new RelationalDatabaseWriterSimulator(Database, sqlReplication, stats); resutls = new List <RelationalDatabaseWriter.TableQuerySummary> { new RelationalDatabaseWriter.TableQuerySummary { Commands = simulatedwriter.SimulateExecuteCommandText(scriptResult) .Select(x => new RelationalDatabaseWriter.TableQuerySummary.CommandData { CommandText = x }).ToArray() } }.ToArray(); } } alert = stats.LastAlert; } catch (Exception e) { alert = new Alert { AlertLevel = AlertLevel.Error, CreatedAt = SystemTime.UtcNow, Message = "Last SQL replication operation for " + sqlReplication.Name + " was failed", Title = "SQL replication error", Exception = e.ToString(), UniqueKey = "Sql Replication Error: " + sqlReplication.Name }; } return(resutls); }
private void BackgroundSqlReplication() { int workCounter = 0; while (Database.WorkContext.DoWork) { IsRunning = !IsHotSpare() && !shouldPause; if (!IsRunning) { Database.WorkContext.WaitForWork(TimeSpan.FromMinutes(10), ref workCounter, "Sql Replication"); continue; } var config = GetConfiguredReplicationDestinations(); if (config.Count == 0) { Database.WorkContext.WaitForWork(TimeSpan.FromMinutes(10), ref workCounter, "Sql Replication"); continue; } var localReplicationStatus = GetReplicationStatus(); // remove all last replicated statuses which are not in the config UpdateLastReplicatedStatus(localReplicationStatus, config); var relevantConfigs = config.Where(x => { if (x.Disabled) { return(false); } var sqlReplicationStatistics = statistics.GetOrDefault(x.Name); if (sqlReplicationStatistics == null) { return(true); } return(SystemTime.UtcNow >= sqlReplicationStatistics.SuspendUntil); }) // have error or the timeout expired .ToList(); var configGroups = SqlReplicationClassifier.GroupConfigs(relevantConfigs, c => GetLastEtagFor(localReplicationStatus, c)); if (configGroups.Count == 0) { Database.WorkContext.WaitForWork(TimeSpan.FromMinutes(10), ref workCounter, "Sql Replication"); continue; } var usedPrefetchers = new ConcurrentSet <PrefetchingBehavior>(); var groupedConfigs = configGroups .Select(x => { var result = new SqlConfigGroup { LastReplicatedEtag = x.Key, ConfigsToWorkOn = x.Value }; SetPrefetcherForIndexingGroup(result, usedPrefetchers); return(result); }) .ToList(); var successes = new ConcurrentQueue <Tuple <SqlReplicationConfigWithLastReplicatedEtag, Etag> >(); var waitForWork = new bool[groupedConfigs.Count]; try { BackgroundTaskExecuter.Instance.ExecuteAll(Database.WorkContext, groupedConfigs, (sqlConfigGroup, i) => { Database.WorkContext.CancellationToken.ThrowIfCancellationRequested(); var prefetchingBehavior = sqlConfigGroup.PrefetchingBehavior; var configsToWorkOn = sqlConfigGroup.ConfigsToWorkOn; List <JsonDocument> documents; var entityNamesToIndex = new HashSet <string>(configsToWorkOn.Select(x => x.RavenEntityName), StringComparer.OrdinalIgnoreCase); using (prefetchingBehavior.DocumentBatchFrom(sqlConfigGroup.LastReplicatedEtag, out documents, entityNamesToIndex)) { Etag latestEtag = null, lastBatchEtag = null; if (documents.Count != 0) { lastBatchEtag = documents[documents.Count - 1].Etag; } var replicationDuration = Stopwatch.StartNew(); documents.RemoveAll(x => x.Key.StartsWith("Raven/", StringComparison.InvariantCultureIgnoreCase)); // we ignore system documents here if (documents.Count != 0) { latestEtag = documents[documents.Count - 1].Etag; } documents.RemoveAll(x => prefetchingBehavior.FilterDocuments(x) == false); var deletedDocsByConfig = new Dictionary <SqlReplicationConfig, List <ListItem> >(); foreach (var configToWorkOn in configsToWorkOn) { var cfg = configToWorkOn; Database.TransactionalStorage.Batch(accessor => { deletedDocsByConfig[cfg] = accessor.Lists.Read(GetSqlReplicationDeletionName(cfg), cfg.LastReplicatedEtag, latestEtag, MaxNumberOfDeletionsToReplicate + 1) .ToList(); }); } // No documents AND there aren't any deletes to replicate if (documents.Count == 0 && deletedDocsByConfig.Sum(x => x.Value.Count) == 0) { // so we filtered some documents, let us update the etag about that. if (latestEtag != null) { foreach (var configToWorkOn in configsToWorkOn) { successes.Enqueue(Tuple.Create(configToWorkOn, latestEtag)); } } else { waitForWork[i] = true; } return; } var itemsToReplicate = documents.Select(x => { JsonDocument.EnsureIdInMetadata(x); var doc = x.ToJson(); doc[Constants.DocumentIdFieldName] = x.Key; return(new ReplicatedDoc { Document = doc, Etag = x.Etag, Key = x.Key, SerializedSizeOnDisk = x.SerializedSizeOnDisk }); }).ToList(); try { BackgroundTaskExecuter.Instance.ExecuteAllInterleaved(Database.WorkContext, configsToWorkOn, replicationConfig => { try { var startTime = SystemTime.UtcNow; var spRepTime = new Stopwatch(); spRepTime.Start(); var lastReplicatedEtag = replicationConfig.LastReplicatedEtag; var deletedDocs = deletedDocsByConfig[replicationConfig]; var docsToReplicate = itemsToReplicate .Where(x => lastReplicatedEtag.CompareTo(x.Etag) < 0) // haven't replicate the etag yet .Where(document => { var info = Database.Documents.GetRecentTouchesFor(document.Key); if (info != null) { if (info.TouchedEtag.CompareTo(lastReplicatedEtag) > 0) { if (Log.IsDebugEnabled) { Log.Debug( "Will not replicate document '{0}' to '{1}' because the updates after etag {2} are related document touches", document.Key, replicationConfig.Name, info.TouchedEtag); } return(false); } } return(true); }); if (deletedDocs.Count >= MaxNumberOfDeletionsToReplicate + 1) { docsToReplicate = docsToReplicate.Where(x => EtagUtil.IsGreaterThan(x.Etag, deletedDocs[deletedDocs.Count - 1].Etag) == false); } var docsToReplicateAsList = docsToReplicate.ToList(); var currentLatestEtag = HandleDeletesAndChangesMerging(deletedDocs, docsToReplicateAsList); if (currentLatestEtag == null && itemsToReplicate.Count > 0 && docsToReplicateAsList.Count == 0) { currentLatestEtag = lastBatchEtag; } int countOfReplicatedItems = 0; if (ReplicateDeletionsToDestination(replicationConfig, deletedDocs) && ReplicateChangesToDestination(replicationConfig, docsToReplicateAsList, out countOfReplicatedItems)) { if (deletedDocs.Count > 0) { Database.TransactionalStorage.Batch(accessor => accessor.Lists.RemoveAllBefore(GetSqlReplicationDeletionName(replicationConfig), deletedDocs[deletedDocs.Count - 1].Etag)); } successes.Enqueue(Tuple.Create(replicationConfig, currentLatestEtag)); } spRepTime.Stop(); var elapsedMicroseconds = (long)(spRepTime.ElapsedTicks * SystemTime.MicroSecPerTick); var sqlReplicationMetricsCounters = GetSqlReplicationMetricsManager(replicationConfig); sqlReplicationMetricsCounters.SqlReplicationBatchSizeMeter.Mark(countOfReplicatedItems); sqlReplicationMetricsCounters.SqlReplicationBatchSizeHistogram.Update(countOfReplicatedItems); sqlReplicationMetricsCounters.SqlReplicationDurationHistogram.Update(elapsedMicroseconds); UpdateReplicationPerformance(replicationConfig, startTime, spRepTime.Elapsed, docsToReplicateAsList.Count); } catch (Exception e) { Log.WarnException("Error while replication to SQL destination: " + replicationConfig.Name, e); Database.AddAlert(new Alert { AlertLevel = AlertLevel.Error, CreatedAt = SystemTime.UtcNow, Exception = e.ToString(), Title = "Sql Replication failure to replication", Message = "Sql Replication could not replicate to " + replicationConfig.Name, UniqueKey = "Sql Replication could not replicate to " + replicationConfig.Name }); } }); } finally { prefetchingBehavior.CleanupDocuments(lastBatchEtag); prefetchingBehavior.UpdateAutoThrottler(documents, replicationDuration.Elapsed); } } }); if (successes.Count == 0) { if (waitForWork.All(x => x)) { Database.WorkContext.WaitForWork(TimeSpan.FromMinutes(10), ref workCounter, "Sql Replication"); } continue; } foreach (var t in successes) { var cfg = t.Item1; var currentLatestEtag = t.Item2; //If a reset was requested we don't want to update the last replicated etag. //If we do register the success the reset will become a noop. bool isReset; if (ResetRequested.TryGetValue(t.Item1.Name, out isReset) && isReset) { continue; } var destEtag = localReplicationStatus.LastReplicatedEtags.FirstOrDefault(x => string.Equals(x.Name, cfg.Name, StringComparison.InvariantCultureIgnoreCase)); if (destEtag == null) { localReplicationStatus.LastReplicatedEtags.Add(new LastReplicatedEtag { Name = cfg.Name, LastDocEtag = currentLatestEtag ?? Etag.Empty }); } else { var lastDocEtag = destEtag.LastDocEtag; if (currentLatestEtag != null && EtagUtil.IsGreaterThan(currentLatestEtag, lastDocEtag)) { lastDocEtag = currentLatestEtag; } destEtag.LastDocEtag = lastDocEtag; } } //We are done recording success for this batch so we can clear the reset dictionary ResetRequested.Clear(); SaveNewReplicationStatus(localReplicationStatus); } finally { AfterReplicationCompleted(successes.Count); RemoveUnusedPrefetchers(usedPrefetchers); } } }
private List <JsonDocument> GetJsonDocsFromDisk(Etag etag, Etag untilEtag, Reference <bool> earlyExit = null) { List <JsonDocument> jsonDocs = null; // We take an snapshot because the implementation of accessing Values from a ConcurrentDictionary involves a lock. // Taking the snapshot should be safe enough. var currentlyUsedBatchSizesInBytes = Size.Sum(autoTuner.CurrentlyUsedBatchSizesInBytes.Values); using (DocumentCacher.SkipSetDocumentsInDocumentCache()) context.TransactionalStorage.Batch(actions => { //limit how much data we load from disk --> better adhere to memory limits var totalSizeAllowedToLoadInBytes = (context.Configuration.Memory.DynamicLimitForProcessing) - (prefetchingQueue.LoadedSize + currentlyUsedBatchSizesInBytes); // at any rate, we will load a min of 512Kb docs long maxSize = Size.Max(Size.Min(totalSizeAllowedToLoadInBytes, autoTuner.MaximumSizeAllowedToFetchFromStorage), minSizeToLoadDocs).GetValue(SizeUnit.Bytes); var sp = Stopwatch.StartNew(); var totalSize = 0L; var largestDocSize = 0L; string largestDocKey = null; jsonDocs = actions.Documents .GetDocumentsAfter( etag, autoTuner.NumberOfItemsToProcessInSingleBatch, context.CancellationToken, maxSize, untilEtag, autoTuner.FetchingDocumentsFromDiskTimeout, earlyExit: earlyExit ) .Where(x => x != null) .Select(doc => { if (largestDocSize < doc.SerializedSizeOnDisk) { largestDocSize = doc.SerializedSizeOnDisk; largestDocKey = doc.Key; } totalSize += doc.SerializedSizeOnDisk; JsonDocument.EnsureIdInMetadata(doc); return(doc); }) .ToList(); loadTimes.Enqueue(new DiskFetchPerformanceStats { LoadingTimeInMillseconds = sp.ElapsedMilliseconds, NumberOfDocuments = jsonDocs.Count, TotalSize = totalSize, LargestDocSize = largestDocSize, LargestDocKey = largestDocKey }); while (loadTimes.Count > 8) { DiskFetchPerformanceStats _; loadTimes.TryDequeue(out _); } }); return(jsonDocs); }
public Etag GetDocumentsWithIdStartingWith(string idPrefix, int pageSize, Etag etag, CancellationToken token, Func <JsonDocument, bool> addDocument) { Etag lastDocumentReadEtag = null; TransactionalStorage.Batch(actions => { var returnedDocs = false; while (true) { var documents = actions.Documents.GetDocumentsAfterWithIdStartingWith(etag, idPrefix, pageSize, token, timeout: TimeSpan.FromSeconds(2), lastProcessedDocument: x => lastDocumentReadEtag = x); var documentRetriever = new DocumentRetriever(Database.Configuration, actions, Database.ReadTriggers); var docCount = 0; var docCountOnLastAdd = 0; foreach (var doc in documents) { docCount++; if (docCount - docCountOnLastAdd > 1000) { addDocument(null); // heartbeat } token.ThrowIfCancellationRequested(); etag = doc.Etag; JsonDocument.EnsureIdInMetadata(doc); var nonAuthoritativeInformationBehavior = actions.InFlightStateSnapshot.GetNonAuthoritativeInformationBehavior <JsonDocument>(null, doc.Key); var document = nonAuthoritativeInformationBehavior == null ? doc : nonAuthoritativeInformationBehavior(doc); document = documentRetriever.ExecuteReadTriggers(document, null, ReadOperation.Load); if (document == null) { continue; } returnedDocs = true; Database.WorkContext.UpdateFoundWork(); var canContinue = addDocument(document); docCountOnLastAdd = docCount; if (!canContinue) { break; } } if (returnedDocs) { break; } // No document was found that matches the requested criteria if (docCount == 0) { // If we had a failure happen, we update the etag as we don't need to process those documents again (no matches there anyways). if (lastDocumentReadEtag != null) { etag = lastDocumentReadEtag; } break; } } }); return(etag); }
public Etag GetDocuments(int start, int pageSize, Etag etag, CancellationToken token, Func <JsonDocument, bool> addDocument, string transformer = null, Dictionary <string, RavenJToken> transformerParameters = null, long?maxSize = null, TimeSpan?timeout = null) { Etag lastDocumentReadEtag = null; using (DocumentCacher.SkipSetDocumentsInDocumentCache()) TransactionalStorage.Batch(actions => { AbstractTransformer storedTransformer = null; if (transformer != null) { storedTransformer = IndexDefinitionStorage.GetTransformer(transformer); if (storedTransformer == null) { throw new InvalidOperationException("No transformer with the name: " + transformer); } } var returnedDocs = false; while (true) { var documents = etag == null ? actions.Documents.GetDocumentsByReverseUpdateOrder(start, pageSize) : actions.Documents.GetDocumentsAfter(etag, pageSize, token, maxSize: maxSize, timeout: timeout); var documentRetriever = new DocumentRetriever(Database.Configuration, actions, Database.ReadTriggers, transformerParameters); var docCount = 0; var docCountOnLastAdd = 0; foreach (var doc in documents) { docCount++; token.ThrowIfCancellationRequested(); if (docCount - docCountOnLastAdd > 1000) { addDocument(null); // heartbeat } if (etag != null) { etag = doc.Etag; } JsonDocument.EnsureIdInMetadata(doc); var nonAuthoritativeInformationBehavior = actions.InFlightStateSnapshot.GetNonAuthoritativeInformationBehavior <JsonDocument>(null, doc.Key); var document = nonAuthoritativeInformationBehavior == null ? doc : nonAuthoritativeInformationBehavior(doc); document = documentRetriever.ExecuteReadTriggers(document, null, ReadOperation.Load); if (document == null) { continue; } returnedDocs = true; Database.WorkContext.UpdateFoundWork(); document = TransformDocumentIfNeeded(document, storedTransformer, documentRetriever); var canContinue = addDocument(document); if (!canContinue) { break; } lastDocumentReadEtag = etag; docCountOnLastAdd = docCount; } if (returnedDocs || docCount == 0) { break; } // No document was found that matches the requested criteria // If we had a failure happen, we update the etag as we don't need to process those documents again (no matches there anyways). if (lastDocumentReadEtag != null) { etag = lastDocumentReadEtag; } start += docCount; } }); return(lastDocumentReadEtag); }
public void GetDocumentsWithIdStartingWith(string idPrefix, string matches, string exclude, int start, int pageSize, CancellationToken token, ref int nextStart, Action <JsonDocument> addDoc, string transformer = null, Dictionary <string, RavenJToken> transformerParameters = null, string skipAfter = null) { if (idPrefix == null) { throw new ArgumentNullException("idPrefix"); } idPrefix = idPrefix.Trim(); var canPerformRapidPagination = nextStart > 0 && start == nextStart; var actualStart = canPerformRapidPagination ? start : 0; var addedDocs = 0; var docCountOnLastAdd = 0; var matchedDocs = 0; TransactionalStorage.Batch( actions => { var docsToSkip = canPerformRapidPagination ? 0 : start; int docCount; AbstractTransformer storedTransformer = null; if (transformer != null) { storedTransformer = IndexDefinitionStorage.GetTransformer(transformer); if (storedTransformer == null) { throw new InvalidOperationException("No transformer with the name: " + transformer); } } do { Database.WorkContext.UpdateFoundWork(); docCount = 0; var docs = actions.Documents.GetDocumentsWithIdStartingWith(idPrefix, actualStart, pageSize, string.IsNullOrEmpty(skipAfter) ? null : skipAfter); var documentRetriever = new DocumentRetriever(Database.Configuration, actions, Database.ReadTriggers, transformerParameters); foreach (var doc in docs) { token.ThrowIfCancellationRequested(); docCount++; if (docCount - docCountOnLastAdd > 1000) { addDoc(null); // heartbeat } var keyTest = doc.Key.Substring(idPrefix.Length); if (!WildcardMatcher.Matches(matches, keyTest) || WildcardMatcher.MatchesExclusion(exclude, keyTest)) { continue; } JsonDocument.EnsureIdInMetadata(doc); var nonAuthoritativeInformationBehavior = actions.InFlightStateSnapshot.GetNonAuthoritativeInformationBehavior <JsonDocument>(null, doc.Key); var document = nonAuthoritativeInformationBehavior != null ? nonAuthoritativeInformationBehavior(doc) : doc; document = documentRetriever.ExecuteReadTriggers(document, null, ReadOperation.Load); if (document == null) { continue; } matchedDocs++; if (matchedDocs <= docsToSkip) { continue; } token.ThrowIfCancellationRequested(); document = TransformDocumentIfNeeded(document, storedTransformer, documentRetriever); addDoc(document); addedDocs++; docCountOnLastAdd = docCount; if (addedDocs >= pageSize) { break; } } actualStart += pageSize; } while (docCount > 0 && addedDocs < pageSize && actualStart > 0 && actualStart < int.MaxValue); }); if (addedDocs != pageSize) { nextStart = start; // will mark as last page } else if (canPerformRapidPagination) { nextStart = start + matchedDocs; } else { nextStart = actualStart; } }
private void BackgroundSqlReplication() { int workCounter = 0; while (Database.WorkContext.DoWork) { IsRunning = !shouldPause; if (!IsRunning) { continue; } var config = GetConfiguredReplicationDestinations(); if (config.Count == 0) { Database.WorkContext.WaitForWork(TimeSpan.FromMinutes(10), ref workCounter, "Sql Replication"); continue; } var localReplicationStatus = GetReplicationStatus(); var relevantConfigs = config.Where(x => { if (x.Disabled) { return(false); } var sqlReplicationStatistics = statistics.GetOrDefault(x.Name); if (sqlReplicationStatistics == null) { return(true); } return(SystemTime.UtcNow >= sqlReplicationStatistics.SuspendUntil); }) // have error or the timeout expired .ToList(); if (relevantConfigs.Count == 0) { Database.WorkContext.WaitForWork(TimeSpan.FromMinutes(10), ref workCounter, "Sql Replication"); continue; } var leastReplicatedEtag = GetLeastReplicatedEtag(relevantConfigs, localReplicationStatus); if (leastReplicatedEtag == null) { Database.WorkContext.WaitForWork(TimeSpan.FromMinutes(10), ref workCounter, "Sql Replication"); continue; } List <JsonDocument> documents; using (prefetchingBehavior.DocumentBatchFrom(leastReplicatedEtag, out documents)) { Etag latestEtag = null, lastBatchEtag = null; if (documents.Count != 0) { lastBatchEtag = documents[documents.Count - 1].Etag; } var replicationDuration = Stopwatch.StartNew(); documents.RemoveAll(x => x.Key.StartsWith("Raven/", StringComparison.InvariantCultureIgnoreCase)); // we ignore system documents here if (documents.Count != 0) { latestEtag = documents[documents.Count - 1].Etag; } documents.RemoveAll(x => prefetchingBehavior.FilterDocuments(x) == false); var deletedDocsByConfig = new Dictionary <SqlReplicationConfig, List <ListItem> >(); foreach (var relevantConfig in relevantConfigs) { var cfg = relevantConfig; Database.TransactionalStorage.Batch(accessor => { deletedDocsByConfig[cfg] = accessor.Lists.Read(GetSqlReplicationDeletionName(cfg), GetLastEtagFor(localReplicationStatus, cfg), latestEtag, MaxNumberOfDeletionsToReplicate + 1) .ToList(); }); } // No documents AND there aren't any deletes to replicate if (documents.Count == 0 && deletedDocsByConfig.Sum(x => x.Value.Count) == 0) { if (latestEtag != null) { // so we filtered some documents, let us update the etag about that. foreach (var lastReplicatedEtag in localReplicationStatus.LastReplicatedEtags) { if (lastReplicatedEtag.LastDocEtag.CompareTo(latestEtag) <= 0) { lastReplicatedEtag.LastDocEtag = latestEtag; } } latestEtag = Etag.Max(latestEtag, lastBatchEtag); SaveNewReplicationStatus(localReplicationStatus); } else // no point in waiting if we just saved a new doc { Database.WorkContext.WaitForWork(TimeSpan.FromMinutes(10), ref workCounter, "Sql Replication"); } continue; } var successes = new ConcurrentQueue <Tuple <SqlReplicationConfig, Etag> >(); try { var itemsToReplicate = documents.Select(x => { JsonDocument.EnsureIdInMetadata(x); var doc = x.ToJson(); doc[Constants.DocumentIdFieldName] = x.Key; return(new ReplicatedDoc { Document = doc, Etag = x.Etag, Key = x.Key, SerializedSizeOnDisk = x.SerializedSizeOnDisk }); }).ToList(); BackgroundTaskExecuter.Instance.ExecuteAllInterleaved(Database.WorkContext, relevantConfigs, replicationConfig => { try { var startTime = SystemTime.UtcNow; Stopwatch spRepTime = new Stopwatch(); spRepTime.Start(); var lastReplicatedEtag = GetLastEtagFor(localReplicationStatus, replicationConfig); var deletedDocs = deletedDocsByConfig[replicationConfig]; var docsToReplicate = itemsToReplicate .Where(x => lastReplicatedEtag.CompareTo(x.Etag) < 0) // haven't replicate the etag yet .Where(document => { var info = Database.Documents.GetRecentTouchesFor(document.Key); if (info != null) { if (info.TouchedEtag.CompareTo(lastReplicatedEtag) > 0) { log.Debug( "Will not replicate document '{0}' to '{1}' because the updates after etag {2} are related document touches", document.Key, replicationConfig.Name, info.TouchedEtag); return(false); } } return(true); }); if (deletedDocs.Count >= MaxNumberOfDeletionsToReplicate + 1) { docsToReplicate = docsToReplicate.Where(x => EtagUtil.IsGreaterThan(x.Etag, deletedDocs[deletedDocs.Count - 1].Etag) == false); } var docsToReplicateAsList = docsToReplicate.ToList(); var currentLatestEtag = HandleDeletesAndChangesMerging(deletedDocs, docsToReplicateAsList); if (currentLatestEtag == null && itemsToReplicate.Count > 0 && docsToReplicateAsList.Count == 0) { currentLatestEtag = lastBatchEtag; } int countOfReplicatedItems = 0; if (ReplicateDeletionsToDestination(replicationConfig, deletedDocs) && ReplicateChangesToDestination(replicationConfig, docsToReplicateAsList, out countOfReplicatedItems)) { if (deletedDocs.Count > 0) { Database.TransactionalStorage.Batch(accessor => accessor.Lists.RemoveAllBefore(GetSqlReplicationDeletionName(replicationConfig), deletedDocs[deletedDocs.Count - 1].Etag)); } successes.Enqueue(Tuple.Create(replicationConfig, currentLatestEtag)); } spRepTime.Stop(); var elapsedMicroseconds = (long)(spRepTime.ElapsedTicks * SystemTime.MicroSecPerTick); var sqlReplicationMetricsCounters = GetSqlReplicationMetricsManager(replicationConfig); sqlReplicationMetricsCounters.SqlReplicationBatchSizeMeter.Mark(countOfReplicatedItems); sqlReplicationMetricsCounters.SqlReplicationBatchSizeHistogram.Update(countOfReplicatedItems); sqlReplicationMetricsCounters.SqlReplicationDurationHistogram.Update(elapsedMicroseconds); UpdateReplicationPerformance(replicationConfig, startTime, spRepTime.Elapsed, docsToReplicateAsList.Count); } catch (Exception e) { log.WarnException("Error while replication to SQL destination: " + replicationConfig.Name, e); Database.AddAlert(new Alert { AlertLevel = AlertLevel.Error, CreatedAt = SystemTime.UtcNow, Exception = e.ToString(), Title = "Sql Replication failure to replication", Message = "Sql Replication could not replicate to " + replicationConfig.Name, UniqueKey = "Sql Replication could not replicate to " + replicationConfig.Name }); } }); if (successes.Count == 0) { continue; } foreach (var t in successes) { var cfg = t.Item1; var currentLatestEtag = t.Item2; var destEtag = localReplicationStatus.LastReplicatedEtags.FirstOrDefault(x => string.Equals(x.Name, cfg.Name, StringComparison.InvariantCultureIgnoreCase)); if (destEtag == null) { localReplicationStatus.LastReplicatedEtags.Add(new LastReplicatedEtag { Name = cfg.Name, LastDocEtag = currentLatestEtag ?? Etag.Empty }); } else { var lastDocEtag = destEtag.LastDocEtag; if (currentLatestEtag != null && EtagUtil.IsGreaterThan(currentLatestEtag, lastDocEtag)) { lastDocEtag = currentLatestEtag; } destEtag.LastDocEtag = lastDocEtag; } } SaveNewReplicationStatus(localReplicationStatus); } finally { AfterReplicationCompleted(successes.Count); var min = localReplicationStatus.LastReplicatedEtags.Min(x => new ComparableByteArray(x.LastDocEtag.ToByteArray())); if (min != null) { var lastMinReplicatedEtag = min.ToEtag(); prefetchingBehavior.CleanupDocuments(lastMinReplicatedEtag); prefetchingBehavior.UpdateAutoThrottler(documents, replicationDuration.Elapsed); } } } } }
public void GetDocumentsWithIdStartingWith(string idPrefix, string matches, string exclude, int start, int pageSize, CancellationToken token, ref int nextStart, Action <JsonDocument> addDoc, string transformer = null, Dictionary <string, RavenJToken> transformerParameters = null, string skipAfter = null) { if (idPrefix == null) { throw new ArgumentNullException("idPrefix"); } idPrefix = idPrefix.Trim(); var canPerformRapidPagination = nextStart > 0 && start == nextStart; var actualStart = canPerformRapidPagination ? start : 0; var addedDocs = 0; var matchedDocs = 0; TransactionalStorage.Batch( actions => { var docsToSkip = canPerformRapidPagination ? 0 : start; int docCount; AbstractTransformer storedTransformer = null; if (transformer != null) { storedTransformer = IndexDefinitionStorage.GetTransformer(transformer); if (storedTransformer == null) { throw new InvalidOperationException("No transformer with the name: " + transformer); } } do { Database.WorkContext.UpdateFoundWork(); docCount = 0; var docs = actions.Documents.GetDocumentsWithIdStartingWith(idPrefix, actualStart, pageSize, string.IsNullOrEmpty(skipAfter) ? null : skipAfter); var documentRetriever = new DocumentRetriever(Database.Configuration, actions, Database.ReadTriggers, Database.InFlightTransactionalState, transformerParameters); foreach (var doc in docs) { token.ThrowIfCancellationRequested(); docCount++; var keyTest = doc.Key.Substring(idPrefix.Length); if (!WildcardMatcher.Matches(matches, keyTest) || WildcardMatcher.MatchesExclusion(exclude, keyTest)) { continue; } JsonDocument.EnsureIdInMetadata(doc); var nonAuthoritativeInformationBehavior = Database.InFlightTransactionalState.GetNonAuthoritativeInformationBehavior <JsonDocument>(null, doc.Key); var document = nonAuthoritativeInformationBehavior != null ? nonAuthoritativeInformationBehavior(doc) : doc; document = documentRetriever.ExecuteReadTriggers(document, null, ReadOperation.Load); if (document == null) { continue; } matchedDocs++; if (matchedDocs <= docsToSkip) { continue; } token.ThrowIfCancellationRequested(); if (storedTransformer != null) { using (new CurrentTransformationScope(Database, documentRetriever)) { var transformed = storedTransformer.TransformResultsDefinition(new[] { new DynamicJsonObject(document.ToJson()) }) .Select(x => JsonExtensions.ToJObject(x)) .ToArray(); if (transformed.Length == 0) { throw new InvalidOperationException("The transform results function failed on a document: " + document.Key); } var transformedJsonDocument = new JsonDocument { Etag = document.Etag.HashWith(storedTransformer.GetHashCodeBytes()).HashWith(documentRetriever.Etag), NonAuthoritativeInformation = document.NonAuthoritativeInformation, LastModified = document.LastModified, DataAsJson = new RavenJObject { { "$values", new RavenJArray(transformed) } }, }; addDoc(transformedJsonDocument); } } else { addDoc(document); } addedDocs++; if (addedDocs >= pageSize) { break; } } actualStart += pageSize; }while (docCount > 0 && addedDocs < pageSize && actualStart > 0 && actualStart < int.MaxValue); }); if (addedDocs != pageSize) { nextStart = start; // will mark as last page } else if (canPerformRapidPagination) { nextStart = start + matchedDocs; } else { nextStart = actualStart; } }