/// <summary> /// Check whether a block is a newly added block or is in new segment or recycle store. /// </summary> /// <param name="addStore"></param> /// <param name="fileGrowthStore"></param> /// <param name="recycledCollectionStore"></param> /// <param name="key"></param> /// <param name="blockSize"></param> /// <param name="checkIfInGrowthSegments"></param> /// <returns>true if block is either new, in new segment or in recycle store, false otherwise</returns> internal static void RegisterAdd( Collections.Generic.ISortedDictionary <RecordKey, long> addStore, Collections.Generic.ISortedDictionary <RecordKey, long> fileGrowthStore, Collections.Generic.ISortedDictionary <RecordKey, long> recycledCollectionStore, RecordKey key, int blockSize, bool checkIfInGrowthSegments) { //** Check if Block is in Growth Segments if (checkIfInGrowthSegments && (RegionLogic.IsSegmentInStore(fileGrowthStore, key, blockSize) || RegionLogic.IsSegmentInStore(recycledCollectionStore, key, blockSize))) { return; } //** Add Block to AddStore for use on Rollback... if (!addStore.ContainsKey(key)) { short passCount = 0; //** Detect and merge contiguous blocks if (!addStore.MovePrevious()) { addStore.MoveFirst(); } while (!addStore.EndOfTree()) { var de = addStore.CurrentEntry; RecordKey k2 = de.Value.Key; long i = de.Value.Value; if (k2.ServerSystemFilename == key.ServerSystemFilename && k2.Filename == key.Filename && k2.CollectionName == key.CollectionName) { if (key.Address + blockSize == k2.Address) { long newSize = i + blockSize; addStore.Remove(de.Value.Key); k2.Address = key.Address; addStore.Add(k2, newSize); return; } if (k2.Address + i == key.Address) { addStore.CurrentValue = i + blockSize; return; } if (key.Address >= k2.Address && key.Address + blockSize <= k2.Address + i) { //** if block is inclusive, don't do anything... return; } } else if (++passCount >= 2) { break; } addStore.MoveNext(); } addStore.Add(key, blockSize); } }
/// <summary> /// 会员相关接口 /// </summary> /// <param name="logic"></param> /// <param name="recordLogic"></param> /// <param name="bonusLogic"></param> public MemberController(MemberLogic logic, SmsRecordLogic recordLogic, BonusLogic bonusLogic, RegionLogic regionLogic) { _logic = logic; _recordLogic = recordLogic; this.bonusLogic = bonusLogic; this.regionLogic = regionLogic; }
/// <summary> /// RegisterAdd will be called whenever a "new" block is allocated. /// Don't save block at this point as changes not saved yet. /// </summary> /// <param name="collection"></param> /// <param name="blockAddress"></param> /// <param name="blockSize"></param> protected internal override void RegisterAdd(CollectionOnDisk collection, long blockAddress, int blockSize) { if (IsTransactionStore(collection)) { ((TransactionBase)collection.ParentTransactionLogger).RegisterAdd(collection, blockAddress, blockSize); return; } if (LogCollection == null) { return; } RecordKey key = CreateKey(collection, blockAddress); //** Check if Block is in Growth Segments if (RegionLogic.IsSegmentInStore(_fileGrowthStore, key, blockSize) || RegionLogic.IsSegmentInStore(_recycledSegmentsStore, key, blockSize)) { if (_inCommit == 0) { TrackModification(collection.GetTopParent()); } return; } RegisterAdd(_addBlocksStore, _fileGrowthStore, _recycledSegmentsStore, collection, blockAddress, blockSize, false); if (_inCommit == 0) { TrackModification(collection.GetTopParent()); } }
protected void Page_Load(object sender, EventArgs e) { this.BusinessLogic = new RegionLogic(); if (!Page.IsPostBack) { FillDataGridAndCleanTextBoxs(); } }
public void GetAllRegionsReturnsRepositoryValues() { var regions = new List <Region> { Region }; RegionsMock.Setup(m => m.GetAll(null, "")).Returns(regions); RegionLogic = CreateRegionLogic(); Assert.IsTrue(RegionLogic.GetAll().SequenceEqual(regions)); RegionsMock.VerifyAll(); }
public void GetRegionsNullTest() { List <Region> regions = new List <Region>(); var mockRegion = new Mock <IRepository <Region> >(MockBehavior.Strict); mockRegion.Setup(p => p.GetAll()).Returns(regions); var logic = new RegionLogic(mockRegion.Object); var result = logic.GetRegions(); mockRegion.VerifyAll(); Assert.IsTrue(result.SequenceEqual(regions)); }
protected internal override void RegisterRemove(CollectionOnDisk collection, long blockAddress, int blockSize) { if (IsTransactionStore(collection)) { ((TransactionBase)collection.ParentTransactionLogger).RegisterRemove(collection, blockAddress, blockSize); return; } if (LogCollection == null) { return; } if (_inCommit == 0) { TrackModification(collection.GetTopParent()); } // object o = 90; // todo: remove return when ready... return; // Check if Block is in Growth, RecycledCollection, Add, Recycled blocks segments... RecordKey key = CreateKey(collection, blockAddress); if (RegionLogic.IsSegmentInStore(_fileGrowthStore, key, blockSize) || RegionLogic.IsSegmentInStore(_recycledSegmentsStore, key, blockSize) || RegionLogic.IsSegmentInStore(_addBlocksStore, key, blockSize)) { return; } // check if block is in updated blocks... if (IsInUpdatedBlocks(collection, blockAddress, blockSize)) { return; } AddMerge(_recycledBlocksStore, key, blockSize); if (_inCommit == 0) { TrackModification(collection.GetTopParent()); } }
/// <summary> /// Check whether a block is: /// - a newly added block /// - in new segment /// - in recycled store /// - in updated blocks /// </summary> /// <param name="addStore"></param> /// <param name="fileGrowthStore"></param> /// <param name="recycledCollectionStore"></param> /// <param name="key"></param> /// <param name="blockSize"></param> /// <param name="checkIfInGrowthSegments"></param> /// <returns>true if block is either new, in new segment or in recycle store, false otherwise</returns> internal static void RegisterAdd( Collections.Generic.ISortedDictionary <RecordKey, long> addStore, Collections.Generic.ISortedDictionary <RecordKey, long> fileGrowthStore, Collections.Generic.ISortedDictionary <RecordKey, long> recycledCollectionStore, RecordKey key, int blockSize, bool checkIfInGrowthSegments) { /* todo: complete the story for: RegisterAdd, ...Save, ...Remove, ...Recycle, ...FileGrowth * Logic table: * Add Save (Update)/Remove Recycle FileGrowth * - FileGrowth blocks can be deleted, then re-allocated for Add * - Block can be allocated for Add, Deleted(will create Updated blocks) if item is deleted, then re-Allocated for Add.*/ // Check if Block is in Growth, RecycledCollection, Recycled blocks segments... if (checkIfInGrowthSegments) { if (RegionLogic.IsSegmentInStore(fileGrowthStore, key, blockSize) || RegionLogic.IsSegmentInStore(recycledCollectionStore, key, blockSize)) { return; } } AddMerge(addStore, key, blockSize); }
public string TraerRegiones() { RegionLogic region = new RegionLogic(); return(this.ToJson(region.TraerTodos())); }
public ApiModule() : base("/api") { #region /// Help Page /// Get["/help"] = x => View["help"]; #endregion #region /// Category /// #region /// /api/Categories/ /// Get["/categories"] = x => { var logic = new CategoryLogic(); var result = logic.GetAll(); return Response.AsJson(result); }; #endregion #region /// /api/Categories/1234 /// Get["/categories/{id:int}"] = x => { var logic = new CategoryLogic(); var result = logic.GetById((int)x.id); return Response.AsJson(result); }; #endregion #endregion #region /// Customer /// #region /// /api/Customers/ /// Get["/customers"] = x => { var logic = new CustomerLogic(); var result = logic.GetAll(); return Response.AsJson(result); }; #endregion #region /// /api/Customers/1234 /// Get["/customers/{id*}"] = x => { var logic = new CustomerLogic(); var result = logic.GetById((string)x.id); return Response.AsJson(result); }; #endregion #endregion #region /// Employee /// #region /// /api/employees /// Get["/employees"] = x => { var logic = new EmployeeLogic(); var result = logic.GetAll(); return Response.AsJson(result); }; #endregion #region /// /api/employees/1234 /// Get["/employees/{id:int}"] = x => { var logic = new EmployeeLogic(); var result = logic.GetById((int)x.id); return Response.AsJson(result); }; #endregion #endregion #region /// Order /// #region /// /api/Orders/ /// Get["/orders"] = x => { var logic = new OrderLogic(); var result = logic.GetAll(); return Response.AsJson(result); }; #endregion #region /// /api/Orders/1234 /// Get["/orders/{id:int}"] = x => { var logic = new OrderLogic(); var result = logic.GetById((int)x.id); return Response.AsJson(result); }; #endregion #endregion #region /// Product /// #region /// /api/Products/ /// Get["/products"] = x => { var logic = new ProductLogic(); var result = logic.GetAll(); return Response.AsJson(result); }; #endregion #region /// /api/Products/1234 /// Get["/products/{id:int}"] = x => { var logic = new ProductLogic(); var result = logic.GetById((int)x.id); return Response.AsJson(result); }; #endregion #endregion #region /// Region /// #region /// /api/Regions/ /// Get["/regions"] = x => { var logic = new RegionLogic(); var result = logic.GetAll(); return Response.AsJson(result); }; #endregion #region /// /api/Regions/1234 /// Get["/regions/{id:int}"] = x => { var logic = new RegionLogic(); var result = logic.GetById((int)x.id); return Response.AsJson(result); }; #endregion #endregion #region /// Shipper /// #region /// /api/Shippers/ /// Get["/shippers"] = x => { var logic = new ShipperLogic(); var result = logic.GetAll(); return Response.AsJson(result); }; #endregion #region /// /api/Shippers/1234 /// Get["/shippers/{id:int}"] = x => { var logic = new ShipperLogic(); var result = logic.GetById((int)x.id); return Response.AsJson(result); }; #endregion #endregion #region /// Supplier /// #region /// /api/Suppliers/ /// Get["/suppliers"] = x => { var logic = new SupplierLogic(); var result = logic.GetAll(); return Response.AsJson(result); }; #endregion #region /// /api/Suppliers/1234 /// Get["/suppliers/{id:int}"] = x => { var logic = new SupplierLogic(); var result = logic.GetById((int)x.id); return Response.AsJson(result); }; #endregion #endregion #region /// Territory /// #region /// /api/Territories/ /// Get["/territories"] = x => { var logic = new TerritoryLogic(); var result = logic.GetAll(); return Response.AsJson(result); }; #endregion #region /// /api/Territories/1234 /// Get["/territories/{id:string}"] = x => { var logic = new TerritoryLogic(); var result = logic.GetById((string)x.id); return Response.AsJson(result); }; #endregion #endregion }
public ApiModule() : base("/api") { #region /// Help Page /// Get["/help"] = x => View["help"]; #endregion #region /// Category /// #region /// /api/Categories/ /// Get["/categories"] = x => { var logic = new CategoryLogic(); var result = logic.GetAll(); return(Response.AsJson(result)); }; #endregion #region /// /api/Categories/1234 /// Get["/categories/{id:int}"] = x => { var logic = new CategoryLogic(); var result = logic.GetById((int)x.id); return(Response.AsJson(result)); }; #endregion #endregion #region /// Customer /// #region /// /api/Customers/ /// Get["/customers"] = x => { var logic = new CustomerLogic(); var result = logic.GetAll(); return(Response.AsJson(result)); }; #endregion #region /// /api/Customers/1234 /// Get["/customers/{id*}"] = x => { var logic = new CustomerLogic(); var result = logic.GetById((string)x.id); return(Response.AsJson(result)); }; #endregion #endregion #region /// Employee /// #region /// /api/employees /// Get["/employees"] = x => { var logic = new EmployeeLogic(); var result = logic.GetAll(); return(Response.AsJson(result)); }; #endregion #region /// /api/employees/1234 /// Get["/employees/{id:int}"] = x => { var logic = new EmployeeLogic(); var result = logic.GetById((int)x.id); return(Response.AsJson(result)); }; #endregion #endregion #region /// Order /// #region /// /api/Orders/ /// Get["/orders"] = x => { var logic = new OrderLogic(); var result = logic.GetAll(); return(Response.AsJson(result)); }; #endregion #region /// /api/Orders/1234 /// Get["/orders/{id:int}"] = x => { var logic = new OrderLogic(); var result = logic.GetById((int)x.id); return(Response.AsJson(result)); }; #endregion #endregion #region /// Product /// #region /// /api/Products/ /// Get["/products"] = x => { var logic = new ProductLogic(); var result = logic.GetAll(); return(Response.AsJson(result)); }; #endregion #region /// /api/Products/1234 /// Get["/products/{id:int}"] = x => { var logic = new ProductLogic(); var result = logic.GetById((int)x.id); return(Response.AsJson(result)); }; #endregion #endregion #region /// Region /// #region /// /api/Regions/ /// Get["/regions"] = x => { var logic = new RegionLogic(); var result = logic.GetAll(); return(Response.AsJson(result)); }; #endregion #region /// /api/Regions/1234 /// Get["/regions/{id:int}"] = x => { var logic = new RegionLogic(); var result = logic.GetById((int)x.id); return(Response.AsJson(result)); }; #endregion #endregion #region /// Shipper /// #region /// /api/Shippers/ /// Get["/shippers"] = x => { var logic = new ShipperLogic(); var result = logic.GetAll(); return(Response.AsJson(result)); }; #endregion #region /// /api/Shippers/1234 /// Get["/shippers/{id:int}"] = x => { var logic = new ShipperLogic(); var result = logic.GetById((int)x.id); return(Response.AsJson(result)); }; #endregion #endregion #region /// Supplier /// #region /// /api/Suppliers/ /// Get["/suppliers"] = x => { var logic = new SupplierLogic(); var result = logic.GetAll(); return(Response.AsJson(result)); }; #endregion #region /// /api/Suppliers/1234 /// Get["/suppliers/{id:int}"] = x => { var logic = new SupplierLogic(); var result = logic.GetById((int)x.id); return(Response.AsJson(result)); }; #endregion #endregion #region /// Territory /// #region /// /api/Territories/ /// Get["/territories"] = x => { var logic = new TerritoryLogic(); var result = logic.GetAll(); return(Response.AsJson(result)); }; #endregion #region /// /api/Territories/1234 /// Get["/territories/{id:string}"] = x => { var logic = new TerritoryLogic(); var result = logic.GetById((string)x.id); return(Response.AsJson(result)); }; #endregion #endregion }
/// <summary> /// RegisterSave will be called when a block cache faulted from memory /// onto Disk. Resolution of Added blocks will be done here and only /// those "modified" blocks will be registered & backed up. /// </summary> /// <param name="collection">Collection that is saving the block</param> /// <param name="blockAddress"></param> /// <param name="segmentSize"></param> /// <param name="readPool"> </param> /// <param name="writePool"> </param> protected internal override bool RegisterSave(CollectionOnDisk collection, long blockAddress, int segmentSize, ConcurrentIOPoolManager readPool, ConcurrentIOPoolManager writePool) { if (IsTransactionStore(collection)) { return(((TransactionBase)collection.ParentTransactionLogger).RegisterSave(collection, blockAddress, segmentSize, readPool, writePool)); } if (LogCollection == null) { return(false); } LogTracer.Verbose("Transactin.RegisterSave: Start for Thread {0}.", Thread.CurrentThread.ManagedThreadId); //Step 1. Remove Intersections with Added, Growth segments & Recycled Blocks from region as no need to backup // new Blocks //Step 2. Copy or backup remaining (Updated) blocks onto the Transaction Log file for restore on Rollback RecordKey key = CreateKey(collection, blockAddress); // if in file growth segments, don't register for save... Region region = RegionLogic.RemoveIntersections(_fileGrowthStore, key, blockAddress, segmentSize); if (region == null || region.Count == 0) { if (_inCommit == 0) { TrackModification(collection.GetTopParent()); } return(false); } #region subtract any region intersecting with recycled and add Stores int itemCount = region.Count / 2; if (itemCount < 5) { itemCount = 5; } var regionsForBackup = new List <KeyValuePair <RecordKey, Region> >(itemCount); foreach (KeyValuePair <long, int> area in region) { // subtract regions intersecting with recycled segments key.Address = area.Key; Region region2 = RegionLogic.RemoveIntersections(_recycledSegmentsStore, key, area.Key, area.Value); LogTracer.Verbose("Transactin.RegisterSave: Thread {0}, _recycledSegmentsStore count {1}.", Thread.CurrentThread.ManagedThreadId, _recycledSegmentsStore.Count); if (region2 == null || region2.Count <= 0 || ((LogCollection is SortedDictionaryOnDisk) && key.Filename == ((SortedDictionaryOnDisk)LogCollection).File.Filename)) { continue; } // subtract regions intersecting with (new) add segments foreach (KeyValuePair <long, int> area2 in region2) { key.Address = area2.Key; var region3 = RegionLogic.RemoveIntersections(_addBlocksStore, key, area2.Key, area2.Value); LogTracer.Verbose("Transactin.RegisterSave: Thread {0}, _addBlocksStore count {1}.", Thread.CurrentThread.ManagedThreadId, _addBlocksStore.Count); if (region3 == null || region3.Count <= 0) { continue; } foreach (KeyValuePair <long, int> area3 in region3) { key.Address = area3.Key; var region4 = RegionLogic.RemoveIntersections(_recycledBlocksStore, key, area3.Key, area3.Value); LogTracer.Verbose("Transactin.RegisterSave: Thread {0}, _recycledBlocksStore count {1}.", Thread.CurrentThread.ManagedThreadId, _recycledBlocksStore.Count); if (region4 == null || region4.Count <= 0) { continue; } // any remaining portions are marked for backup if (_inCommit == 0) { TrackModification(collection.GetTopParent()); } regionsForBackup.Add(new KeyValuePair <RecordKey, Region>(key, region4)); } } } #endregion if (readPool != null) { BackupData(regionsForBackup, readPool, writePool); } else { BackupData(regionsForBackup); } return(true); }
internal static bool GetIntersectingLogs(BackupDataLogKey logKey, int logKeySize, out IEnumerable <KeyValuePair <BackupDataLogKey, BackupDataLogValue> > target, out long startMergedBlockAddress, out long mergedBlockSize) { target = null; startMergedBlockAddress = mergedBlockSize = 0; var l = new List <KeyValuePair <BackupDataLogKey, BackupDataLogValue> >(); if (!LogCollection.Search(logKey)) { if (!LogCollection.MovePrevious()) { if (!LogCollection.MoveFirst()) { return(false); } } } long address1 = logKey.SourceDataAddress; int size1 = logKeySize; startMergedBlockAddress = address1; mergedBlockSize = size1; bool intersected = false; for (int i = 0; i < 3; i++) { var key = LogCollection.CurrentKey; var value = LogCollection.CurrentValue; if (logKey.SourceFilename == key.SourceFilename) { long address2 = key.SourceDataAddress; int size2 = value.DataSize; if (RegionLogic.FirstWithinSecond(address1, size1, address2, size2)) { return(true); } if (RegionLogic.Intersect(address1, size1, address2, size2)) { l.Add(new KeyValuePair <BackupDataLogKey, BackupDataLogValue>(key, value)); i = 0; intersected = true; if (address2 < startMergedBlockAddress) { startMergedBlockAddress = address2; } if (startMergedBlockAddress + mergedBlockSize < address2 + size2) { long l2 = address2 + size2 - startMergedBlockAddress; if (l2 >= int.MaxValue) { break; } mergedBlockSize = l2; } } else if (intersected) { break; } } else { break; } if (!LogCollection.MoveNext()) { break; } } if (l.Count > 0) { target = l; return(true); } return(false); }
/// <summary> /// Initializes a new instance of the <see cref="RegionController"/> class. /// </summary> /// <param name="logic">The logic.</param> public RegionController(RegionLogic logic) { this.logic = logic; }
internal static bool DetectAndMerge(Collections.Generic.ISortedDictionary <long, long> store, long dataAddress, long dataSize, int segmentSize = DataBlock.DataBlockDriver.MaxSegmentSize, RegionLogic region = null) { if (store.Count == 0) { if (dataSize > segmentSize) { return(false); } store.Add(dataAddress, dataSize); return(true); } if (store.Search(dataAddress)) { long currSize = store.CurrentValue; if (currSize < dataSize) { store.CurrentValue = dataSize; } return(true); } //** Detect and merge contiguous deleted blocks short passCount = 0; if (!store.MovePrevious()) { store.MoveFirst(); } while (true) { KeyValuePair <long, long>?item = store.CurrentEntry; long k2 = item.Value.Key; long i = 0; long cv = store.CurrentValue; i = cv; if (region != null) { if (region.Equals(dataAddress, dataSize, k2, i) || region.FirstWithinSecond(dataAddress, dataSize, k2, i)) { return(true); } if (region.FirstWithinSecond(k2, i, dataAddress, dataSize)) { store.Remove(k2); store.Add(dataAddress, dataSize); return(true); } } if (dataAddress + dataSize == k2) { long newSize = i + dataSize; if (newSize <= segmentSize) { store.Remove(item.Value.Key); store.Add(dataAddress, newSize); return(true); } return(false); } if (k2 + i == dataAddress) { if (i + dataSize <= segmentSize) { store.CurrentValue = i + dataSize; return(true); } return(false); } if (++passCount >= 2) { break; } if (!store.MoveNext()) { break; } } return(false); }
/// <summary> /// RegisterSave will be called when a block cache faulted from memory /// onto Disk. Resolution of Added blocks will be done here and only /// those "modified" blocks will be saved. Newly added block(s) will /// not be saved. /// </summary> /// <param name="collection">Collection that is saving the block</param> /// <param name="blockAddress"></param> /// <param name="segmentSize"></param> /// <param name="readPool"> </param> /// <param name="writePool"> </param> protected internal override bool RegisterSave(CollectionOnDisk collection, long blockAddress, int segmentSize, ConcurrentIOPoolManager readPool, ConcurrentIOPoolManager writePool) { if (IsTransactionStore(collection)) { return(((TransactionBase)collection.ParentTransactionLogger).RegisterSave(collection, blockAddress, segmentSize, readPool, writePool)); } if (LogCollection == null) { return(false); } /* Step 1. Remove Intersections with Added, Growth segments & Recycled Blocks from region as no need to backup * new Blocks * Step 2. Copy or backup (any) remaining blocks (the Updated blocks) * onto the Transaction Log file for restore on Rollback */ RecordKey key = CreateKey(collection, blockAddress); //// if in recycled or add store, don't register for save... //if (RegionLogic.IsSegmentInStore(_recycledCollectionStore, key, segmentSize) || InAddStore(key, segmentSize)) // return false; //** if in file growth segments, don't register for save... Region region = RegionLogic.RemoveIntersections(_fileGrowthStore, key, blockAddress, segmentSize); if (region == null || region.Count == 0) { if (_inCommit == 0) { TrackModification(collection.GetTopParent()); } return(false); } //** int itemCount = region.Count / 2; if (itemCount < 5) { itemCount = 5; } var regionsForBackup = new List <KeyValuePair <RecordKey, Region> >(itemCount); foreach (KeyValuePair <long, int> area in region) { key.Address = area.Key; Region region2 = RegionLogic.RemoveIntersections(_recycledCollectionStore, key, area.Key, area.Value); if (region2 == null || region2.Count <= 0 || ((LogCollection is SortedDictionaryOnDisk) && key.Filename == ((SortedDictionaryOnDisk)LogCollection).File.Filename)) { continue; } foreach (KeyValuePair <long, int> area2 in region2) { key.Address = area2.Key; Region region3 = RegionLogic.RemoveIntersections(_addStore, key, area2.Key, area2.Value); //** Step 2: Backup the "modified" portion(s) of data if (region3 == null || region3.Count <= 0) { continue; } if (_inCommit == 0) { TrackModification(collection.GetTopParent()); } regionsForBackup.Add(new KeyValuePair <RecordKey, Region>(key, region3)); } } if (readPool != null) { BackupData(regionsForBackup, readPool, writePool); } else { BackupData(regionsForBackup); } return(true); }
/// <summary> /// RegisterSave is the only one we do COW to be able to roll back from text log file /// the last transaction action done. /// </summary> /// <param name="collection"></param> /// <param name="blockAddress"></param> /// <param name="segmentSize"></param> /// <param name="readPool"> </param> /// <param name="writePool"> </param> protected internal override bool RegisterSave(CollectionOnDisk collection, long blockAddress, int segmentSize, ConcurrentIOPoolManager readPool, ConcurrentIOPoolManager writePool) { if (!string.IsNullOrEmpty(_lastRegisterSaveFilename)) { LoggerTransDetails.Log(string.Format("Successful RegisterSave {0}, {1}, {2}", _lastRegisterSaveFilename, _lastRegisterSaveBlockAddress, _lastRegisterSaveSegmentSize)); _lastRegisterSaveFilename = null; _lastRegisterSaveBlockAddress = 0; _lastRegisterSaveSegmentSize = 0; } /* Step 1. Remove Intersections with added/recycled Blocks from region as no need to backup * new Blocks * Step 2. Copy or backup (any) remaining blocks (the Updated blocks) * onto the Transaction Log file for restore on Rollback */ Transaction.RecordKey key = Transaction.CreateKey(collection, blockAddress); Region region = RegionLogic.RemoveIntersections(_fileGrowthStore, key, blockAddress, segmentSize); if (region == null || region.Count == 0) { return(false); } bool logOnce = false; foreach (KeyValuePair <long, int> area2 in region) { key.Address = area2.Key; Region region2 = RegionLogic.RemoveIntersections(_recycledCollectionStore, key, area2.Key, area2.Value); if (region2 != null && region.Count > 0) { foreach (KeyValuePair <long, int> area3 in region2) { key.Address = area3.Key; Region region3 = RegionLogic.RemoveIntersections(_addStore, key, area3.Key, area3.Value); //** Step 2: Backup the "modified" portion(s) of data if (region3 != null && region3.Count > 0) { //** foreach disk area in region, copy it to transaction log file foreach (KeyValuePair <long, int> area4 in region3) { BackupData(collection, area4.Key, area4.Value); } if (!logOnce) { logOnce = true; _lastRegisterSaveFilename = collection.File.Filename; _lastRegisterSaveBlockAddress = blockAddress; _lastRegisterSaveSegmentSize = segmentSize; LoggerTransDetails.Log( string.Format("RegisterSave {0}, {1}, {2}", collection.File.Filename, blockAddress, segmentSize)); } } } } } return(logOnce); }