/// <summary> /// Check whether a block is a newly added block or is in new segment or recycle store. /// </summary> /// <param name="addStore"></param> /// <param name="fileGrowthStore"></param> /// <param name="recycledCollectionStore"></param> /// <param name="key"></param> /// <param name="blockSize"></param> /// <param name="checkIfInGrowthSegments"></param> /// <returns>true if block is either new, in new segment or in recycle store, false otherwise</returns> internal static void RegisterAdd( Collections.Generic.ISortedDictionary <RecordKey, long> addStore, Collections.Generic.ISortedDictionary <RecordKey, long> fileGrowthStore, Collections.Generic.ISortedDictionary <RecordKey, long> recycledCollectionStore, RecordKey key, int blockSize, bool checkIfInGrowthSegments) { //** Check if Block is in Growth Segments if (checkIfInGrowthSegments && (RegionLogic.IsSegmentInStore(fileGrowthStore, key, blockSize) || RegionLogic.IsSegmentInStore(recycledCollectionStore, key, blockSize))) { return; } //** Add Block to AddStore for use on Rollback... if (!addStore.ContainsKey(key)) { short passCount = 0; //** Detect and merge contiguous blocks if (!addStore.MovePrevious()) { addStore.MoveFirst(); } while (!addStore.EndOfTree()) { var de = addStore.CurrentEntry; RecordKey k2 = de.Value.Key; long i = de.Value.Value; if (k2.ServerSystemFilename == key.ServerSystemFilename && k2.Filename == key.Filename && k2.CollectionName == key.CollectionName) { if (key.Address + blockSize == k2.Address) { long newSize = i + blockSize; addStore.Remove(de.Value.Key); k2.Address = key.Address; addStore.Add(k2, newSize); return; } if (k2.Address + i == key.Address) { addStore.CurrentValue = i + blockSize; return; } if (key.Address >= k2.Address && key.Address + blockSize <= k2.Address + i) { //** if block is inclusive, don't do anything... return; } } else if (++passCount >= 2) { break; } addStore.MoveNext(); } addStore.Add(key, blockSize); } }
internal static void RegisterAdd( Collections.Generic.ISortedDictionary <RecordKey, long> addStore, Collections.Generic.ISortedDictionary <RecordKey, long> fileGrowthStore, Collections.Generic.ISortedDictionary <RecordKey, long> recycledCollectionStore, CollectionOnDisk collection, long blockAddress, int blockSize, bool checkIfInGrowthSegments) { RecordKey key = CreateKey(collection, blockAddress); RegisterAdd(addStore, fileGrowthStore, recycledCollectionStore, key, blockSize, checkIfInGrowthSegments); }
internal static void AddMerge( Collections.Generic.ISortedDictionary <RecordKey, long> addStore, RecordKey key, int blockSize) { addStore.Locker.Invoke(() => { // Add Block to AddStore for use on Rollback... if (!addStore.ContainsKey(key)) { short passCount = 0; // Detect and merge contiguous blocks if (!addStore.MovePrevious()) { addStore.MoveFirst(); } while (!addStore.EndOfTree()) { var de = addStore.CurrentEntry; RecordKey k2 = de.Value.Key; long i = de.Value.Value; if (k2.ServerSystemFilename == key.ServerSystemFilename && k2.Filename == key.Filename && k2.CollectionName == key.CollectionName) { if (key.Address + blockSize == k2.Address) { long newSize = i + blockSize; addStore.Remove(de.Value.Key); k2.Address = key.Address; addStore.Add(k2, newSize); return; } if (k2.Address + i == key.Address) { addStore[de.Value.Key] = i + blockSize; //addStore.CurrentValue = i + blockSize; return; } if (key.Address >= k2.Address && key.Address + blockSize <= k2.Address + i) { // if block is inclusive, don't do anything... return; } } else if (++passCount >= 2) { break; } addStore.MoveNext(); } addStore.Add(key, blockSize); } }); }
private void AddToTransCache(Collections.Generic.ISortedDictionary <long, long> store, long dataAddress, long dataSize, int segmentSize) { if (store.Count == 0) { store.Add(dataAddress, dataSize); } else if (!DetectAndMerge(store, dataAddress, dataSize, segmentSize)) { store.Add(dataAddress, dataSize); } }
private static bool InStore(RecordKey key, int blockSize, Collections.Generic.ISortedDictionary <RecordKey, long> addStore) { if (addStore == null) { throw new ArgumentNullException("addStore"); } return(addStore.Locker.Invoke(() => { if (addStore.ContainsKey(key)) { return true; } short passCount = 0; //** Detect and merge contiguous blocks if (!addStore.MovePrevious()) { addStore.MoveFirst(); } while (!addStore.EndOfTree()) { var de = addStore.CurrentEntry; RecordKey k2 = de.Value.Key; long i = de.Value.Value; if (k2.ServerSystemFilename == key.ServerSystemFilename && k2.Filename == key.Filename && k2.CollectionName == key.CollectionName) { if (key.Address + blockSize == k2.Address) { return true; } if (k2.Address + i == key.Address) { return true; } if (key.Address >= k2.Address && key.Address + blockSize <= k2.Address + i) { //** if block is inclusive, don't do anything... return true; } } else if (++passCount >= 2) { break; } addStore.MoveNext(); } return false; })); }
internal static bool RegisterRecycle( Collections.Generic.ISortedDictionary <RecordKey, long> addStore, Collections.Generic.ISortedDictionary <RecordKey, long> recycleStore, CollectionOnDisk collection, long blockAddress, int blockSize) { var key = CreateKey(collection, blockAddress); //if (InStore(key, blockSize, recycleStore)) // return false; BackupDataLogKey logKey = new BackupDataLogKey(); logKey.SourceFilename = collection.File.Filename; logKey.SourceDataAddress = blockAddress; IEnumerable <KeyValuePair <BackupDataLogKey, BackupDataLogValue> > intersectingLogs; long mergedBlockStartAddress, mergedBlockSize; if (GetIntersectingLogs(logKey, blockSize, out intersectingLogs, out mergedBlockStartAddress, out mergedBlockSize)) { if (intersectingLogs == null) { RegisterAdd(addStore, null, null, collection, blockAddress, blockSize, false); return(true); } // get area(s) outside each intersecting segment and back it up... Region newRegion = new Region(blockAddress, blockSize); bool wasIntersected = false; foreach (KeyValuePair <BackupDataLogKey, BackupDataLogValue> entry in intersectingLogs) { if (newRegion.Subtract(entry.Key.SourceDataAddress, entry.Value.DataSize)) { wasIntersected = true; } } if (wasIntersected) { foreach (KeyValuePair <long, int> newArea in newRegion) { RegisterAdd(addStore, null, null, collection, newArea.Key, newArea.Value, false); } return(true); } } RegisterAdd(addStore, null, null, collection, blockAddress, blockSize, false); return(true); }
/// <summary> /// Checks whether a certain area on disk (segment) /// is inclusive on any of segment entries of the store. /// </summary> /// <param name="fileGrowthStore"></param> /// <param name="key"></param> /// <param name="blockSize"></param> /// <returns></returns> public bool IsSegmentInStore( Collections.Generic.ISortedDictionary <Transaction.Transaction.RecordKey, long> fileGrowthStore, Transaction.Transaction.RecordKey key, int blockSize) { fileGrowthStore.Locker.Lock(); try { if (fileGrowthStore.ContainsKey(key)) { return(true); } KeyValuePair <Transaction.Transaction.RecordKey, long>?de; Transaction.Transaction.RecordKey k2; short passCount = 0; if (!fileGrowthStore.MovePrevious()) { fileGrowthStore.MoveFirst(); } while (!fileGrowthStore.EndOfTree()) { de = fileGrowthStore.CurrentEntry; if (de == null) { break; } k2 = de.Value.Key; if (k2.ServerSystemFilename == key.ServerSystemFilename && k2.Filename == key.Filename) { long i = (long)de.Value.Value; if (key.Address >= k2.Address && key.Address + blockSize <= k2.Address + i) { return(true); } } else if (++passCount >= 2) { break; } fileGrowthStore.MoveNext(); } return(false); } finally { fileGrowthStore.Locker.Unlock(); } }
private bool InAddStore(RecordKey key, int blockSize) { Collections.Generic.ISortedDictionary <RecordKey, long> addStore = _addStore; if (!addStore.ContainsKey(key)) { short passCount = 0; //** Detect and merge contiguous blocks if (!_addStore.MovePrevious()) { addStore.MoveFirst(); } while (!addStore.EndOfTree()) { var de = addStore.CurrentEntry; RecordKey k2 = de.Value.Key; long i = de.Value.Value; if (k2.ServerSystemFilename == key.ServerSystemFilename && k2.Filename == key.Filename && k2.CollectionName == key.CollectionName) { if (key.Address + blockSize == k2.Address) { return(true); } if (k2.Address + i == key.Address) { return(true); } if (key.Address >= k2.Address && key.Address + blockSize <= k2.Address + i) { //** if block is inclusive, don't do anything... return(true); } } else if (++passCount >= 2) { break; } addStore.MoveNext(); } } return(false); }
/// <summary> /// Check whether a block is: /// - a newly added block /// - in new segment /// - in recycled store /// - in updated blocks /// </summary> /// <param name="addStore"></param> /// <param name="fileGrowthStore"></param> /// <param name="recycledCollectionStore"></param> /// <param name="key"></param> /// <param name="blockSize"></param> /// <param name="checkIfInGrowthSegments"></param> /// <returns>true if block is either new, in new segment or in recycle store, false otherwise</returns> internal static void RegisterAdd( Collections.Generic.ISortedDictionary <RecordKey, long> addStore, Collections.Generic.ISortedDictionary <RecordKey, long> fileGrowthStore, Collections.Generic.ISortedDictionary <RecordKey, long> recycledCollectionStore, RecordKey key, int blockSize, bool checkIfInGrowthSegments) { /* todo: complete the story for: RegisterAdd, ...Save, ...Remove, ...Recycle, ...FileGrowth * Logic table: * Add Save (Update)/Remove Recycle FileGrowth * - FileGrowth blocks can be deleted, then re-allocated for Add * - Block can be allocated for Add, Deleted(will create Updated blocks) if item is deleted, then re-Allocated for Add.*/ // Check if Block is in Growth, RecycledCollection, Recycled blocks segments... if (checkIfInGrowthSegments) { if (RegionLogic.IsSegmentInStore(fileGrowthStore, key, blockSize) || RegionLogic.IsSegmentInStore(recycledCollectionStore, key, blockSize)) { return; } } AddMerge(addStore, key, blockSize); }
//public Region RemoveIntersections(Collections.Generic.ISortedDictionary<Transaction.Transaction.RecordKey, long> addStore, // Transaction.Transaction.RecordKey key, Region region) //{ // Region target = new List<KeyValuePair<RecordKey, Region>>(); // foreach(var area in region) // { // } //} /// <summary> /// RemoveIntersections will check whether an area on disk was already /// inclusive in any of the segment areas stored as entries in a store (addStore). /// If input area is fully inclusive, this function returns null, otherwise /// it will return a region equivalent to the input area minus any intersecting /// area(s) of segment(s) in the store. /// </summary> /// <param name="addStore"></param> /// <param name="key"></param> /// <param name="blockAddress"></param> /// <param name="segmentSize"></param> /// <returns></returns> public Region RemoveIntersections( Collections.Generic.ISortedDictionary <Transaction.Transaction.RecordKey, long> addStore, Transaction.Transaction.RecordKey key, long blockAddress, int segmentSize) { addStore.Locker.Lock(); try { long size; if (addStore.TryGetValue(key, out size)) { if (size >= segmentSize) { return(null); } } else if (!addStore.MovePrevious()) { addStore.MoveFirst(); } //** Step 1 //** Starting from current block until block whose address is > BlockAddress + SegmentSize... //** long[0] = Address //** long[1] = Size var region = new Region(blockAddress, segmentSize); Transaction.Transaction.RecordKey k2; short passCount = 0; while (!addStore.EndOfTree()) { var de = addStore.CurrentEntry; k2 = de.Value.Key; if (k2.ServerSystemFilename == key.ServerSystemFilename && k2.Filename == key.Filename) // && k2.CollectionName == key.CollectionName) { size = de.Value.Value; if (k2.Address >= blockAddress + segmentSize) { break; } if (size > int.MaxValue) { throw new InvalidOperationException( string.Format( "Updated segment Size({0} reached > int.MaxValue which isn't supported in a transaction. Keep your transaction smaller by Committing more often", size)); } if (Intersect(k2.Address, (int)size, blockAddress, segmentSize)) { region.Subtract(k2.Address, (int)size); if (region.Count == 0) { return(null); } } else if (++passCount >= 2) { break; } } else if (++passCount >= 2) { break; } if (!addStore.MoveNext()) { break; } } return(region); } finally { addStore.Locker.Unlock(); } }
internal static bool RegisterFileGrowth(Collections.Generic.ISortedDictionary <RecordKey, long> fileGrowthStore, CollectionOnDisk collection, long segmentAddress, long segmentSize, bool recycleCollection) { fileGrowthStore.Locker.Lock(); try { RecordKey key = CreateKey(collection, segmentAddress); if (!fileGrowthStore.ContainsKey(key)) { if (!recycleCollection) { if (collection.Transaction is Transaction) { ((Transaction)collection.Transaction).AppendLogger.LogLine( "{0}{1} {2} {3}", GrowToken, collection.File.Filename, segmentAddress, segmentSize); } } if (!fileGrowthStore.MovePrevious()) { fileGrowthStore.MoveFirst(); } short moveNextCount = 0; RecordKey k2; KeyValuePair <RecordKey, long>?de; while (!fileGrowthStore.EndOfTree()) { de = fileGrowthStore.CurrentEntry; k2 = de.Value.Key; long i = de.Value.Value; if (k2.ServerSystemFilename == key.ServerSystemFilename && k2.Filename == key.Filename) { if (segmentAddress + segmentSize == k2.Address) { long newSize = i + segmentSize; fileGrowthStore.Remove(de.Value.Key); k2.Address = segmentAddress; fileGrowthStore.Add(k2, newSize); return(true); } if (k2.Address + i == segmentAddress) { long expandedSegmentSize = i + segmentSize; if (expandedSegmentSize <= int.MaxValue) { fileGrowthStore[de.Value.Key] = expandedSegmentSize; //fileGrowthStore.CurrentValue = expandedSegmentSize; return(true); } } } if (++moveNextCount >= 2) { break; } fileGrowthStore.MoveNext(); } fileGrowthStore.Add(key, segmentSize); return(true); } throw new InvalidOperationException( string.Format("File '{0}' region '{1}' already expanded.", key.Filename, key.Address) ); } finally { fileGrowthStore.Locker.Unlock(); } }
internal static bool DetectAndMerge(Collections.Generic.ISortedDictionary <long, long> store, long dataAddress, long dataSize, int segmentSize = DataBlock.DataBlockDriver.MaxSegmentSize, RegionLogic region = null) { if (store.Count == 0) { if (dataSize > segmentSize) { return(false); } store.Add(dataAddress, dataSize); return(true); } if (store.Search(dataAddress)) { long currSize = store.CurrentValue; if (currSize < dataSize) { store.CurrentValue = dataSize; } return(true); } //** Detect and merge contiguous deleted blocks short passCount = 0; if (!store.MovePrevious()) { store.MoveFirst(); } while (true) { KeyValuePair <long, long>?item = store.CurrentEntry; long k2 = item.Value.Key; long i = 0; long cv = store.CurrentValue; i = cv; if (region != null) { if (region.Equals(dataAddress, dataSize, k2, i) || region.FirstWithinSecond(dataAddress, dataSize, k2, i)) { return(true); } if (region.FirstWithinSecond(k2, i, dataAddress, dataSize)) { store.Remove(k2); store.Add(dataAddress, dataSize); return(true); } } if (dataAddress + dataSize == k2) { long newSize = i + dataSize; if (newSize <= segmentSize) { store.Remove(item.Value.Key); store.Add(dataAddress, newSize); return(true); } return(false); } if (k2 + i == dataAddress) { if (i + dataSize <= segmentSize) { store.CurrentValue = i + dataSize; return(true); } return(false); } if (++passCount >= 2) { break; } if (!store.MoveNext()) { break; } } return(false); }
private bool DetectAndMerge(Collections.Generic.ISortedDictionary <long, long> store, long dataAddress, long dataSize, int segmentSize) { return(DetectAndMerge(store, dataAddress, dataSize, segmentSize, _region)); }