/// <summary> /// RegisterAdd will be called whenever a "new" block is allocated. /// Don't save block at this point as changes not saved yet. /// </summary> /// <param name="collection"></param> /// <param name="blockAddress"></param> /// <param name="blockSize"></param> protected internal override void RegisterAdd(CollectionOnDisk collection, long blockAddress, int blockSize) { if (IsTransactionStore(collection)) { ((TransactionBase)collection.ParentTransactionLogger).RegisterAdd(collection, blockAddress, blockSize); return; } if (LogCollection == null) { return; } RecordKey key = CreateKey(collection, blockAddress); //** Check if Block is in Growth Segments if (RegionLogic.IsSegmentInStore(_fileGrowthStore, key, blockSize) || RegionLogic.IsSegmentInStore(_recycledSegmentsStore, key, blockSize)) { if (_inCommit == 0) { TrackModification(collection.GetTopParent()); } return; } RegisterAdd(_addBlocksStore, _fileGrowthStore, _recycledSegmentsStore, collection, blockAddress, blockSize, false); if (_inCommit == 0) { TrackModification(collection.GetTopParent()); } }
protected internal override void RegisterRemove(CollectionOnDisk collection, long blockAddress, int blockSize) { if (IsTransactionStore(collection)) { ((TransactionBase)collection.ParentTransactionLogger).RegisterRemove(collection, blockAddress, blockSize); return; } if (LogCollection == null) { return; } if (_inCommit == 0) { TrackModification(collection.GetTopParent()); } // object o = 90; // todo: remove return when ready... return; // Check if Block is in Growth, RecycledCollection, Add, Recycled blocks segments... RecordKey key = CreateKey(collection, blockAddress); if (RegionLogic.IsSegmentInStore(_fileGrowthStore, key, blockSize) || RegionLogic.IsSegmentInStore(_recycledSegmentsStore, key, blockSize) || RegionLogic.IsSegmentInStore(_addBlocksStore, key, blockSize)) { return; } // check if block is in updated blocks... if (IsInUpdatedBlocks(collection, blockAddress, blockSize)) { return; } AddMerge(_recycledBlocksStore, key, blockSize); if (_inCommit == 0) { TrackModification(collection.GetTopParent()); } }
protected internal override void RegisterRemove(CollectionOnDisk collection) { if (IsTransactionStore(collection)) { ((TransactionBase)collection.ParentTransactionLogger).RegisterRemove(collection); return; } if (LogCollection == null) { return; } if (_inCommit == 0) { TrackModification(collection.GetTopParent()); } }
/// <summary> /// Commit a transaction /// </summary> /// <param name="phase"> /// FirstPhase will make changes permanent but keep transaction log so rollback /// is still possible. /// /// SecondPhase will: /// 1. call FirstPhase commit if this transaction is in UnCommitted phase /// 2. clear the transaction log to complete Commit /// NOTE: Rollback is no longer allowed after completion of SecondPhase /// </param> ///<returns>true if successful otherwise false</returns> public override bool InternalCommit(CommitPhase phase) { if (CurrentCommitPhase == CommitPhase.Committed) { throw new InvalidOperationException(string.Format("Transaction '{0}' is already committed.", Id)); } _inCommit++; try { switch (phase) { case CommitPhase.FirstPhase: if (CurrentCommitPhase == CommitPhase.UnCommitted) { RollbackConflicts(); //** save all cached data of each collection var parents = new Dictionary <CollectionOnDisk, object>(ModifiedCollections.Count); var closeColls = new List <RecordKey>(); foreach (KeyValuePair <RecordKey, CollectionOnDisk> kvp in ModifiedCollections) { CollectionOnDisk collection = kvp.Value; CollectionOnDisk ct = collection.GetTopParent(); if (ct.IsOpen) { parents[ct] = null; } else { closeColls.Add(kvp.Key); } } foreach (CollectionOnDisk collection in parents.Keys) { if (!collection.IsOpen) { continue; } collection.Flush(); collection.OnCommit(); } foreach (RecordKey k in closeColls) { ModifiedCollections.Remove(k); } //File.DeletedCollections.Flush(); CurrentCommitPhase = CommitPhase.FirstPhase; //** don't clear transaction log so rollback is still possible return(true); } break; case CommitPhase.SecondPhase: if (CurrentCommitPhase == CommitPhase.UnCommitted) { if (!Commit(CommitPhase.FirstPhase)) { break; } } if (CurrentCommitPhase == CommitPhase.FirstPhase) { //** mark second phase completed as when it starts, no turning back... CurrentCommitPhase = CommitPhase.SecondPhase; //** preserve the recycled segment so on rollback it can be restored... foreach (CollectionOnDisk collection in ModifiedCollections.Values) { if (!collection.IsOpen) { continue; } collection.HeaderData.RecycledSegmentBeforeTransaction = collection.HeaderData.RecycledSegment; if (collection.HeaderData.RecycledSegmentBeforeTransaction != null) { collection.HeaderData.RecycledSegmentBeforeTransaction = (DeletedBlockInfo) collection.HeaderData.RecycledSegmentBeforeTransaction.Clone(); } } //** delete new (AddStore), updated (LogCollection) and //** file growth segments (FileGrowthStore) "log entries" ClearStores(true); //** todo: Record on Trans Log the FileSet Remove action + info needed for //** commit resume "on crash and restart" 11/9/08 File.Delete(Server.Path + DataBackupFilename); //** todo: remove from trans Log the FileSet Remove action... 11/09/08 return(true); } break; } //** auto roll back this transaction if commit failed above if (CurrentCommitPhase != CommitPhase.Rolledback && CurrentCommitPhase != CommitPhase.SecondPhase) { Rollback(); } return(false); } finally { _inCommit--; if (Parent == null) { CollectionOnDisk.transaction = null; } else { Parent.Children.Remove(this); } } }
/// <summary> /// RegisterSave will be called when a block cache faulted from memory /// onto Disk. Resolution of Added blocks will be done here and only /// those "modified" blocks will be registered & backed up. /// </summary> /// <param name="collection">Collection that is saving the block</param> /// <param name="blockAddress"></param> /// <param name="segmentSize"></param> /// <param name="readPool"> </param> /// <param name="writePool"> </param> protected internal override bool RegisterSave(CollectionOnDisk collection, long blockAddress, int segmentSize, ConcurrentIOPoolManager readPool, ConcurrentIOPoolManager writePool) { if (IsTransactionStore(collection)) { return(((TransactionBase)collection.ParentTransactionLogger).RegisterSave(collection, blockAddress, segmentSize, readPool, writePool)); } if (LogCollection == null) { return(false); } LogTracer.Verbose("Transactin.RegisterSave: Start for Thread {0}.", Thread.CurrentThread.ManagedThreadId); //Step 1. Remove Intersections with Added, Growth segments & Recycled Blocks from region as no need to backup // new Blocks //Step 2. Copy or backup remaining (Updated) blocks onto the Transaction Log file for restore on Rollback RecordKey key = CreateKey(collection, blockAddress); // if in file growth segments, don't register for save... Region region = RegionLogic.RemoveIntersections(_fileGrowthStore, key, blockAddress, segmentSize); if (region == null || region.Count == 0) { if (_inCommit == 0) { TrackModification(collection.GetTopParent()); } return(false); } #region subtract any region intersecting with recycled and add Stores int itemCount = region.Count / 2; if (itemCount < 5) { itemCount = 5; } var regionsForBackup = new List <KeyValuePair <RecordKey, Region> >(itemCount); foreach (KeyValuePair <long, int> area in region) { // subtract regions intersecting with recycled segments key.Address = area.Key; Region region2 = RegionLogic.RemoveIntersections(_recycledSegmentsStore, key, area.Key, area.Value); LogTracer.Verbose("Transactin.RegisterSave: Thread {0}, _recycledSegmentsStore count {1}.", Thread.CurrentThread.ManagedThreadId, _recycledSegmentsStore.Count); if (region2 == null || region2.Count <= 0 || ((LogCollection is SortedDictionaryOnDisk) && key.Filename == ((SortedDictionaryOnDisk)LogCollection).File.Filename)) { continue; } // subtract regions intersecting with (new) add segments foreach (KeyValuePair <long, int> area2 in region2) { key.Address = area2.Key; var region3 = RegionLogic.RemoveIntersections(_addBlocksStore, key, area2.Key, area2.Value); LogTracer.Verbose("Transactin.RegisterSave: Thread {0}, _addBlocksStore count {1}.", Thread.CurrentThread.ManagedThreadId, _addBlocksStore.Count); if (region3 == null || region3.Count <= 0) { continue; } foreach (KeyValuePair <long, int> area3 in region3) { key.Address = area3.Key; var region4 = RegionLogic.RemoveIntersections(_recycledBlocksStore, key, area3.Key, area3.Value); LogTracer.Verbose("Transactin.RegisterSave: Thread {0}, _recycledBlocksStore count {1}.", Thread.CurrentThread.ManagedThreadId, _recycledBlocksStore.Count); if (region4 == null || region4.Count <= 0) { continue; } // any remaining portions are marked for backup if (_inCommit == 0) { TrackModification(collection.GetTopParent()); } regionsForBackup.Add(new KeyValuePair <RecordKey, Region>(key, region4)); } } } #endregion if (readPool != null) { BackupData(regionsForBackup, readPool, writePool); } else { BackupData(regionsForBackup); } return(true); }
/// <summary> /// RegisterSave will be called when a block cache faulted from memory /// onto Disk. Resolution of Added blocks will be done here and only /// those "modified" blocks will be saved. Newly added block(s) will /// not be saved. /// </summary> /// <param name="collection">Collection that is saving the block</param> /// <param name="blockAddress"></param> /// <param name="segmentSize"></param> /// <param name="readPool"> </param> /// <param name="writePool"> </param> protected internal override bool RegisterSave(CollectionOnDisk collection, long blockAddress, int segmentSize, ConcurrentIOPoolManager readPool, ConcurrentIOPoolManager writePool) { if (IsTransactionStore(collection)) { return(((TransactionBase)collection.ParentTransactionLogger).RegisterSave(collection, blockAddress, segmentSize, readPool, writePool)); } if (LogCollection == null) { return(false); } /* Step 1. Remove Intersections with Added, Growth segments & Recycled Blocks from region as no need to backup * new Blocks * Step 2. Copy or backup (any) remaining blocks (the Updated blocks) * onto the Transaction Log file for restore on Rollback */ RecordKey key = CreateKey(collection, blockAddress); //// if in recycled or add store, don't register for save... //if (RegionLogic.IsSegmentInStore(_recycledCollectionStore, key, segmentSize) || InAddStore(key, segmentSize)) // return false; //** if in file growth segments, don't register for save... Region region = RegionLogic.RemoveIntersections(_fileGrowthStore, key, blockAddress, segmentSize); if (region == null || region.Count == 0) { if (_inCommit == 0) { TrackModification(collection.GetTopParent()); } return(false); } //** int itemCount = region.Count / 2; if (itemCount < 5) { itemCount = 5; } var regionsForBackup = new List <KeyValuePair <RecordKey, Region> >(itemCount); foreach (KeyValuePair <long, int> area in region) { key.Address = area.Key; Region region2 = RegionLogic.RemoveIntersections(_recycledCollectionStore, key, area.Key, area.Value); if (region2 == null || region2.Count <= 0 || ((LogCollection is SortedDictionaryOnDisk) && key.Filename == ((SortedDictionaryOnDisk)LogCollection).File.Filename)) { continue; } foreach (KeyValuePair <long, int> area2 in region2) { key.Address = area2.Key; Region region3 = RegionLogic.RemoveIntersections(_addStore, key, area2.Key, area2.Value); //** Step 2: Backup the "modified" portion(s) of data if (region3 == null || region3.Count <= 0) { continue; } if (_inCommit == 0) { TrackModification(collection.GetTopParent()); } regionsForBackup.Add(new KeyValuePair <RecordKey, Region>(key, region3)); } } if (readPool != null) { BackupData(regionsForBackup, readPool, writePool); } else { BackupData(regionsForBackup); } return(true); }