override internal protected void TrackModification(CollectionOnDisk collection, bool untrack = false) { CollectionOnDisk p = collection; // Collection.GetTopParent(); RecordKey key = CreateKey(p); if (!untrack) { ModifiedCollections[key] = p; return; } ModifiedCollections.Remove(key); }
protected virtual void ClearStores(bool isRecycleStores) { CollectionOnDisk.transaction = null; if (CollectionOnDisk.Session != null) { CollectionOnDisk.Session.Transaction = null; } ModifiedCollections.Clear(); _addStore = null; _fileGrowthStore = null; _recycledCollectionStore = null; RemoveFromLogBackupLookup(DataBackupFilename); if (Count > 0 && Interlocked.Decrement(ref Count) == 0) { if (LogCollection != null) { lock (Locker) { if (LogCollection != null) { LogBackupFileHandleLookup.Clear(); LogBackupFilenameLookup.Clear(); Interlocked.Exchange(ref _logBackupFilenameLookupCounter, 0); ClearBackupStreams(); LogCollection = null; } } } } if (_appendLogger != null) { _appendLogger.Dispose(); File.Delete(_appendLogger.LogFilename); _appendLogger = null; } if (_updateLogger == null) { return; } _updateLogger.Dispose(); File.Delete(_updateLogger.LogFilename); _updateLogger = null; }
/// <summary> /// Commit a transaction /// </summary> /// <param name="phase"> /// FirstPhase will make changes permanent but keep transaction log so rollback /// is still possible. /// /// SecondPhase will: /// 1. call FirstPhase commit if this transaction is in UnCommitted phase /// 2. clear the transaction log to complete Commit /// NOTE: Rollback is no longer allowed after completion of SecondPhase /// </param> ///<returns>true if successful otherwise false</returns> public override bool InternalCommit(CommitPhase phase) { if (CurrentCommitPhase == CommitPhase.Committed) { throw new InvalidOperationException(string.Format("Transaction '{0}' is already committed.", Id)); } _inCommit++; try { switch (phase) { case CommitPhase.FirstPhase: if (CurrentCommitPhase == CommitPhase.UnCommitted) { RollbackConflicts(); //** save all cached data of each collection var parents = new Dictionary <CollectionOnDisk, object>(ModifiedCollections.Count); var closeColls = new List <RecordKey>(); foreach (KeyValuePair <RecordKey, CollectionOnDisk> kvp in ModifiedCollections) { CollectionOnDisk collection = kvp.Value; CollectionOnDisk ct = collection.GetTopParent(); if (ct.IsOpen) { parents[ct] = null; } else { closeColls.Add(kvp.Key); } } foreach (CollectionOnDisk collection in parents.Keys) { if (!collection.IsOpen) { continue; } collection.Flush(); collection.OnCommit(); } foreach (RecordKey k in closeColls) { ModifiedCollections.Remove(k); } //File.DeletedCollections.Flush(); CurrentCommitPhase = CommitPhase.FirstPhase; //** don't clear transaction log so rollback is still possible return(true); } break; case CommitPhase.SecondPhase: if (CurrentCommitPhase == CommitPhase.UnCommitted) { if (!Commit(CommitPhase.FirstPhase)) { break; } } if (CurrentCommitPhase == CommitPhase.FirstPhase) { //** mark second phase completed as when it starts, no turning back... CurrentCommitPhase = CommitPhase.SecondPhase; //** preserve the recycled segment so on rollback it can be restored... foreach (CollectionOnDisk collection in ModifiedCollections.Values) { if (!collection.IsOpen) { continue; } collection.HeaderData.RecycledSegmentBeforeTransaction = collection.HeaderData.RecycledSegment; if (collection.HeaderData.RecycledSegmentBeforeTransaction != null) { collection.HeaderData.RecycledSegmentBeforeTransaction = (DeletedBlockInfo) collection.HeaderData.RecycledSegmentBeforeTransaction.Clone(); } } //** delete new (AddStore), updated (LogCollection) and //** file growth segments (FileGrowthStore) "log entries" ClearStores(true); //** todo: Record on Trans Log the FileSet Remove action + info needed for //** commit resume "on crash and restart" 11/9/08 File.Delete(Server.Path + DataBackupFilename); //** todo: remove from trans Log the FileSet Remove action... 11/09/08 return(true); } break; } //** auto roll back this transaction if commit failed above if (CurrentCommitPhase != CommitPhase.Rolledback && CurrentCommitPhase != CommitPhase.SecondPhase) { Rollback(); } return(false); } finally { _inCommit--; if (Parent == null) { CollectionOnDisk.transaction = null; } else { Parent.Children.Remove(this); } } }
public override int Execute(DocumentsOperationContext context) { if (IsClusterTransaction) { Debug.Assert(false, "Shouldn't happen - cluster tx run via normal means"); return(0);// should never happened } _disposables.Clear(); DocumentsStorage.PutOperationResults?lastPutResult = null; for (int i = ParsedCommands.Offset; i < ParsedCommands.Count; i++) { var cmd = ParsedCommands.Array[ParsedCommands.Offset + i]; switch (cmd.Type) { case CommandType.PUT: DocumentsStorage.PutOperationResults putResult; try { putResult = Database.DocumentsStorage.Put(context, cmd.Id, cmd.ChangeVector, cmd.Document); } catch (Voron.Exceptions.VoronConcurrencyErrorException) { // RavenDB-10581 - If we have a concurrency error on "doc-id/" // this means that we have existing values under the current etag // we'll generate a new (random) id for them. // The TransactionMerger will re-run us when we ask it to as a // separate transaction for (; i < ParsedCommands.Count; i++) { cmd = ParsedCommands.Array[ParsedCommands.Offset + i]; if (cmd.Type == CommandType.PUT && cmd.Id?.EndsWith('/') == true) { cmd.Id = MergedPutCommand.GenerateNonConflictingId(Database, cmd.Id); RetryOnError = true; } } throw; } catch (ConcurrencyException e) when(CanAvoidThrowingToMerger(e, i)) { return(0); } context.DocumentDatabase.HugeDocuments.AddIfDocIsHuge(cmd.Id, cmd.Document.Size); AddPutResult(putResult); lastPutResult = putResult; break; case CommandType.PATCH: try { cmd.PatchCommand.Execute(context); } catch (ConcurrencyException e) when(CanAvoidThrowingToMerger(e, i)) { return(0); } var patchResult = cmd.PatchCommand.PatchResult; if (patchResult.ModifiedDocument != null) { context.DocumentDatabase.HugeDocuments.AddIfDocIsHuge(cmd.Id, patchResult.ModifiedDocument.Size); } if (patchResult.ChangeVector != null) { LastChangeVector = patchResult.ChangeVector; } if (patchResult.Collection != null) { ModifiedCollections?.Add(patchResult.Collection); } var patchReply = new DynamicJsonValue { [nameof(BatchRequestParser.CommandData.Id)] = cmd.Id, [nameof(BatchRequestParser.CommandData.ChangeVector)] = patchResult.ChangeVector, [nameof(Constants.Documents.Metadata.LastModified)] = patchResult.LastModified, [nameof(BatchRequestParser.CommandData.Type)] = nameof(CommandType.PATCH), [nameof(PatchStatus)] = patchResult.Status, [nameof(PatchResult.Debug)] = patchResult.Debug }; if (cmd.ReturnDocument) { patchReply[nameof(PatchResult.ModifiedDocument)] = patchResult.ModifiedDocument; } Reply.Add(patchReply); break; case CommandType.DELETE: if (cmd.IdPrefixed == false) { DocumentsStorage.DeleteOperationResult?deleted; try { deleted = Database.DocumentsStorage.Delete(context, cmd.Id, cmd.ChangeVector); } catch (ConcurrencyException e) when(CanAvoidThrowingToMerger(e, i)) { return(0); } AddDeleteResult(deleted, cmd.Id); } else { DeleteWithPrefix(context, cmd.Id); } break; case CommandType.AttachmentPUT: var attachmentStream = AttachmentStreams.Dequeue(); var stream = attachmentStream.Stream; _disposables.Add(stream); var docId = cmd.Id; if (docId[docId.Length - 1] == '/') { // attachment sent by Raven ETL, only prefix is defined if (lastPutResult == null) { ThrowUnexpectedOrderOfRavenEtlCommands(); } Debug.Assert(lastPutResult.Value.Id.StartsWith(docId)); docId = lastPutResult.Value.Id; } var attachmentPutResult = Database.DocumentsStorage.AttachmentsStorage.PutAttachment(context, docId, cmd.Name, cmd.ContentType, attachmentStream.Hash, cmd.ChangeVector, stream, updateDocument: false); LastChangeVector = attachmentPutResult.ChangeVector; if (_documentsToUpdateAfterAttachmentChange == null) { _documentsToUpdateAfterAttachmentChange = new HashSet <string>(StringComparer.OrdinalIgnoreCase); } _documentsToUpdateAfterAttachmentChange.Add(docId); Reply.Add(new DynamicJsonValue { [nameof(BatchRequestParser.CommandData.Id)] = attachmentPutResult.DocumentId, [nameof(BatchRequestParser.CommandData.Type)] = nameof(CommandType.AttachmentPUT), [nameof(BatchRequestParser.CommandData.Name)] = attachmentPutResult.Name, [nameof(BatchRequestParser.CommandData.ChangeVector)] = attachmentPutResult.ChangeVector, [nameof(AttachmentDetails.Hash)] = attachmentPutResult.Hash, [nameof(BatchRequestParser.CommandData.ContentType)] = attachmentPutResult.ContentType, [nameof(AttachmentDetails.Size)] = attachmentPutResult.Size }); break; case CommandType.AttachmentDELETE: Database.DocumentsStorage.AttachmentsStorage.DeleteAttachment(context, cmd.Id, cmd.Name, cmd.ChangeVector, updateDocument: false); if (_documentsToUpdateAfterAttachmentChange == null) { _documentsToUpdateAfterAttachmentChange = new HashSet <string>(StringComparer.OrdinalIgnoreCase); } _documentsToUpdateAfterAttachmentChange.Add(cmd.Id); Reply.Add(new DynamicJsonValue { ["Type"] = nameof(CommandType.AttachmentDELETE), [Constants.Documents.Metadata.Id] = cmd.Id, ["Name"] = cmd.Name }); break; case CommandType.AttachmentMOVE: var attachmentMoveResult = Database.DocumentsStorage.AttachmentsStorage.MoveAttachment(context, cmd.Id, cmd.Name, cmd.DestinationId, cmd.DestinationName, cmd.ChangeVector); LastChangeVector = attachmentMoveResult.ChangeVector; if (_documentsToUpdateAfterAttachmentChange == null) { _documentsToUpdateAfterAttachmentChange = new HashSet <string>(StringComparer.OrdinalIgnoreCase); } _documentsToUpdateAfterAttachmentChange.Add(cmd.Id); _documentsToUpdateAfterAttachmentChange.Add(cmd.DestinationId); Reply.Add(new DynamicJsonValue { [nameof(BatchRequestParser.CommandData.Id)] = cmd.Id, [nameof(BatchRequestParser.CommandData.Type)] = nameof(CommandType.AttachmentMOVE), [nameof(BatchRequestParser.CommandData.Name)] = cmd.Name, [nameof(BatchRequestParser.CommandData.DestinationId)] = attachmentMoveResult.DocumentId, [nameof(BatchRequestParser.CommandData.DestinationName)] = attachmentMoveResult.Name, [nameof(BatchRequestParser.CommandData.ChangeVector)] = attachmentMoveResult.ChangeVector, [nameof(AttachmentDetails.Hash)] = attachmentMoveResult.Hash, [nameof(BatchRequestParser.CommandData.ContentType)] = attachmentMoveResult.ContentType, [nameof(AttachmentDetails.Size)] = attachmentMoveResult.Size }); break; case CommandType.AttachmentCOPY: var attachmentCopyResult = Database.DocumentsStorage.AttachmentsStorage.CopyAttachment(context, cmd.Id, cmd.Name, cmd.DestinationId, cmd.DestinationName, cmd.ChangeVector); LastChangeVector = attachmentCopyResult.ChangeVector; if (_documentsToUpdateAfterAttachmentChange == null) { _documentsToUpdateAfterAttachmentChange = new HashSet <string>(StringComparer.OrdinalIgnoreCase); } _documentsToUpdateAfterAttachmentChange.Add(cmd.DestinationId); Reply.Add(new DynamicJsonValue { [nameof(BatchRequestParser.CommandData.Id)] = attachmentCopyResult.DocumentId, [nameof(BatchRequestParser.CommandData.Type)] = nameof(CommandType.AttachmentCOPY), [nameof(BatchRequestParser.CommandData.Name)] = attachmentCopyResult.Name, [nameof(BatchRequestParser.CommandData.ChangeVector)] = attachmentCopyResult.ChangeVector, [nameof(AttachmentDetails.Hash)] = attachmentCopyResult.Hash, [nameof(BatchRequestParser.CommandData.ContentType)] = attachmentCopyResult.ContentType, [nameof(AttachmentDetails.Size)] = attachmentCopyResult.Size }); break; case CommandType.Counters: var counterDocId = cmd.Counters.DocumentId; if (cmd.FromEtl && counterDocId[counterDocId.Length - 1] == '/') { // counter sent by Raven ETL, only prefix is defined if (lastPutResult == null) { ThrowUnexpectedOrderOfRavenEtlCommands(); } Debug.Assert(lastPutResult.Value.Id.StartsWith(counterDocId)); cmd.Counters.DocumentId = lastPutResult.Value.Id; } var counterBatchCmd = new CountersHandler.ExecuteCounterBatchCommand(Database, new CounterBatch { Documents = new List <DocumentCountersOperation> { cmd.Counters }, FromEtl = cmd.FromEtl }); try { counterBatchCmd.Execute(context); } catch (DocumentDoesNotExistException e) when(CanAvoidThrowingToMerger(e, i)) { return(0); } LastChangeVector = counterBatchCmd.LastChangeVector; Reply.Add(new DynamicJsonValue { [nameof(BatchRequestParser.CommandData.Id)] = cmd.Id, [nameof(BatchRequestParser.CommandData.ChangeVector)] = counterBatchCmd.LastChangeVector, [nameof(BatchRequestParser.CommandData.Type)] = nameof(CommandType.Counters), [nameof(CountersDetail)] = counterBatchCmd.CountersDetail.ToJson(), }); break; } } if (_documentsToUpdateAfterAttachmentChange != null) { foreach (var documentId in _documentsToUpdateAfterAttachmentChange) { var changeVector = Database.DocumentsStorage.AttachmentsStorage.UpdateDocumentAfterAttachmentChange(context, documentId); if (changeVector != null) { LastChangeVector = changeVector; } } } return(Reply.Count); }