internal Sop.DataBlock ReadBlockFromDisk(Algorithm.Collection.ICollectionOnDisk parent, long dataAddress, bool getForRemoval, Sop.DataBlock block) { Sop.DataBlock d = block; ReadBlockFromDiskOrInitializeIfEmpty(parent, dataAddress, getForRemoval, d); return(ReadBlock(parent, d, getForRemoval)); }
/// <summary> /// Delete the Collection for recycling /// </summary> public void Delete(Algorithm.Collection.ICollectionOnDisk parent) { if (HeaderData != null && HeaderData.OccupiedBlocksHead != null) { //** add deleted collection start and end block info to the deleted blocks collection of the File var dbi = new DeletedBlockInfo { StartBlockAddress = HeaderData.OccupiedBlocksHead.DataAddress, EndBlockAddress = HeaderData.EndAllocatableAddress }; bool oc = false; if (parent.File.DeletedCollections != null) { oc = ((Algorithm.BTree.IBTreeAlgorithm)parent.File.DeletedCollections).ChangeRegistry; ((Algorithm.BTree.IBTreeAlgorithm)parent.File.DeletedCollections).ChangeRegistry = ((Algorithm.BTree.IBTreeAlgorithm)parent).ChangeRegistry; parent.File.DeletedCollections.Add(dbi); } //** Reset count to 0 and save the header to disk HeaderData.Count = 0; if (!parent.IsTransactionStore) { parent.RegisterChange(true); } if (parent.File.DeletedCollections != null) { ((Algorithm.BTree.IBTreeAlgorithm)parent.File.DeletedCollections).ChangeRegistry = oc; } HeaderData.Clear(); } }
public void ReadBlockFromDisk(Algorithm.Collection.ICollectionOnDisk parent, List <Algorithm.BTree.BTreeItemOnDisk> items, System.Func <int, bool> readCallback) { var sortedBlocks = new Sop.Collections.Generic.SortedDictionary <long, int>(); var dataSegments = new Sop.Collections.Generic.SortedDictionary <long, long>(); for (int i = 0; i < items.Count; i++) { var address = GetId(items[i].Value.DiskBuffer); sortedBlocks.Add(address, i); } dataSegments.Clear(); //detect contiguous blocks & read these data blocks as a bigger segment for optimal reading. KeyValuePair <long, int> lastEntry; List <KeyValuePair <long, int> > blockAddresses = new List <KeyValuePair <long, int> >(); foreach (var entry in sortedBlocks) { lastEntry = entry; var address = entry.Key; blockAddresses.Add(entry); if (!Algorithm.BTree.IndexedBlockRecycler.DetectAndMerge(dataSegments, address, items[entry.Value].Value.DiskBuffer.contiguousBlockCount * (int)parent.DataBlockSize, MaxSegmentSize)) { _readAheadBuffer.Clear(); dataSegments.MoveFirst(); _readAheadBuffer.Read(parent.FileStream, dataSegments.CurrentKey, (int)dataSegments.CurrentValue); foreach (var addr in blockAddresses) { var rab = new DataBlockReadBufferLogic(_readAheadBuffer); var block = ReadBlockFromDisk(parent, addr.Key, false); items[addr.Value].Value.DiskBuffer = block; // process(deserialize the Object) the read blocks... readCallback(addr.Value); _readAheadBuffer = rab; } blockAddresses.Clear(); dataSegments.Clear(); dataSegments.Add(address, items[entry.Value].Value.DiskBuffer.contiguousBlockCount * (int)parent.DataBlockSize); } } // process last data segment... if (dataSegments.Count > 0) { _readAheadBuffer.Clear(); dataSegments.MoveFirst(); _readAheadBuffer.Read(parent.FileStream, dataSegments.CurrentKey, (int)dataSegments.CurrentValue); foreach (var addr in blockAddresses) { var rab = new DataBlockReadBufferLogic(_readAheadBuffer); var block = ReadBlockFromDisk(parent, addr.Key, false); items[addr.Value].Value.DiskBuffer = block; // process(deserialize the Object) the read blocks... readCallback(addr.Value); _readAheadBuffer = rab; } } }
public Sop.DataBlock ReadBlockFromDisk(Algorithm.Collection.ICollectionOnDisk parent, bool getForRemoval) { //var r = MruManager[parent.CurrentEntryDataAddress]; //if (r == null) var r = CreateBlock(parent.DataBlockSize); return(ReadBlockFromDisk(parent, parent.CurrentEntryDataAddress, getForRemoval, r)); }
/// <summary> /// Add Collection to the Collections' Pool /// </summary> /// <param name="collection"></param> public void AddToPool(Algorithm.Collection.ICollectionOnDisk collection) { if (IsClosing) { CollectionsPool.Remove(collection.InMemoryId); } else { //if (!CollectionsPool.Contains(CollName)) CollectionsPool[collection.InMemoryId] = collection; } }
private void SetDiskAddress(Algorithm.Collection.ICollectionOnDisk parent, Sop.DataBlock block, bool addToMru) { if (block.DataAddress < 0) { // reserve the block chunk of space on disk to the block and put it in MRU, // MRU Manager will take care of making another call to physically write the // block when it's appropriate (during MRU fault). block.DataAddress = parent.FileStream.Position; parent.FileStream.Seek(block.Length, SeekOrigin.Current); } SetIsDirty(block, true); //ensure block will be written by MRUManager }
internal void Initialize(Algorithm.Collection.ICollectionOnDisk parent, HeaderData hd) { HeaderData = hd; if (HeaderData != null) { return; } HeaderData = new HeaderData { DiskBuffer = CreateBlock(parent.DataBlockSize) }; }
/// <summary> /// Assign block(s) of space on disk to the Data Block(s). /// The assigned disk blocks' file offsets will be set as the blocks' DataAddresses. /// </summary> /// <param name="parent"></param> /// <param name="block">Data Block which will be assigned a block of space on disk</param> /// <param name="isCollectionBlock"> </param> /// <param name="addToMru">true will put the block into MRU, else not</param> public void SetDiskBlock(Algorithm.Collection.ICollectionOnDisk parent, Sop.DataBlock block, bool isCollectionBlock, bool addToMru = true) { Sop.DataBlock prevBlock = null; bool isHead = true; bool blockExtended = false; while (block != null) { if (block.DataAddress == -1) { if (prevBlock != null) { blockExtended = true; } AddBlockToDisk(parent, block, isHead, isCollectionBlock, addToMru); } if (prevBlock != null) { var db = ((CollectionOnDisk)parent).Blocks[GetId(prevBlock)]; if (db != null) { if (db.InternalNextBlockAddress != -1) { prevBlock.InternalNextBlockAddress = db.InternalNextBlockAddress; } ((CollectionOnDisk)parent).Blocks[GetId(prevBlock)] = prevBlock; } if (prevBlock.NextItemAddress != block.DataAddress) { if (blockExtended) { Log.Logger.Instance.Log(Log.LogLevels.Verbose, "DataBlockDriver.SetDiskBlock: block({0}) got extended.", prevBlock.DataAddress); } prevBlock.NextItemAddress = block.DataAddress; SetDiskAddress(parent, prevBlock, addToMru); } else { //ensure blocks will be written by MRUManager SetIsDirty(prevBlock, true); SetIsDirty(block, true); } } prevBlock = block; block = block.Next; isHead = false; } }
/// Header: /// OccupiedBlock Head /// OccupiedBlock Tail /// DeletedBlock Head /// DeletedBlock Tail /// /// Layout in Disk: /// Byte 0: Available or Occupied flag /// Byte 1 to 8: Next Item Address (64 bit long int) /// Byte 9 to 10: Size Occupied /// Byte 11 to 11 + Size Occupied: USER DATA /// Disk Layout: /// Block 1 -> Block 2 -> Block 3 -> Block n private void AddBlockToDisk(Algorithm.Collection.ICollectionOnDisk parent, Sop.DataBlock block, bool isHead, bool isCollectionBlock, bool addToMru) { parent.IsDirty = true; if (block == null) { throw new ArgumentNullException("block"); } Sop.DataBlock currentBlock = block, prevBlock = null; //** save linked Blocks... do { if (AllocateNextBlock(parent, currentBlock, isHead, isCollectionBlock, addToMru)) { SetIsDirty(currentBlock, true); //ensure block will be written by MRUManager Sop.DataBlock db = currentBlock.Next; while (db != null) { SetIsDirty(db, true); //ensure block will be written by MRUManager if (addToMru && db.SizeOccupied > 0) { ((CollectionOnDisk)parent).Blocks.Add(db.DataAddress, db); } //MruManager.Add(db.DataAddress, db); db = db.Next; } return; } SetIsDirty(currentBlock, true); //ensure block will be written by MRUManager if (prevBlock != null) { long prevId = GetId(prevBlock); Sop.DataBlock db; if (((CollectionOnDisk)parent).Blocks.TryGetValue(prevId, out db)) { if (db.InternalNextBlockAddress != -1) { prevBlock.InternalNextBlockAddress = db.InternalNextBlockAddress; } ((CollectionOnDisk)parent).Blocks[prevId] = prevBlock; } prevBlock.NextItemAddress = currentBlock.DataAddress; SetDiskAddress(parent, prevBlock, addToMru); } prevBlock = currentBlock; currentBlock = currentBlock.Next; isHead = false; } while (currentBlock != null && currentBlock.DataAddress == -1); SetDiskAddress(parent, prevBlock, addToMru); }
/// <summary> /// Close the file /// </summary> public virtual void Close() { if (IsClosing || _store == null || _store.FileStream == null) { return; } if (IsDirty) { Flush(); } if (_storeAddress == -1) { _storeAddress = _store.DataAddress; } IsClosing = true; try { if (DeletedCollections != null) { DeletedCollections.Close(); if (_deletedCollectionsAddress == -1) { _deletedCollectionsAddress = DeletedCollections.DataAddress; } } if (CollectionsPool != null && CollectionsPool.Count > 0) { var colls = new Algorithm.Collection.ICollectionOnDisk[CollectionsPool.Count]; CollectionsPool.Values.CopyTo(colls, 0); for (int i = 0; i < colls.Length; i++) { colls[i].Close(); } CollectionsPool.Clear(); } if (_store != null) { _store.Close(); } _store = null; if (_diskBuffer != null) { _diskBuffer.ClearData(); } } finally { IsClosing = false; } }
/// <summary> /// Read Block from Disk /// </summary> /// <returns></returns> private Sop.DataBlock ReadBlockFromDiskOrInitializeIfEmpty( Algorithm.Collection.ICollectionOnDisk parent, long dataAddress, bool getForRemoval, Sop.DataBlock block) { if (parent.FileStream.Length > 0) { block = ReadBlockFromDisk(parent, dataAddress, getForRemoval, getForRemoval, block); } else { block.Initialize(); } return(block); }
private Sop.DataBlock ReadBlock(Algorithm.Collection.ICollectionOnDisk parent, Sop.DataBlock block, bool getForRemoval) { // Read the block if (block.SizeAvailable == 0 || (block.Next == null && block.NextItemAddress >= 0)) { Sop.DataBlock d = block; //** read rest of blocks while (d.NextItemAddress >= 0) { d = ReadNextBlock(parent, d, getForRemoval); } } return(block); }
protected internal void RemoveFromPool(Algorithm.Collection.ICollectionOnDisk collection, bool willClose) { if (CollectionsPool != null) { var cod = collection; if (cod != null) { if (willClose) { cod.Close(); } CollectionsPool.Remove(collection.InMemoryId); } } }
private void OpenOrCloseStream(bool open) { if (DeletedCollections != null) { if (open) { ((IInternalFileEntity)DeletedCollections).OpenStream(); } else { ((IInternalFileEntity)DeletedCollections).CloseStream(); } } if (CollectionsPool != null && CollectionsPool.Count > 0) { var colls = new Algorithm.Collection.ICollectionOnDisk[CollectionsPool.Count]; CollectionsPool.Values.CopyTo(colls, 0); for (int i = 0; i < colls.Length; i++) { ((ISynchronizer)colls[i].SyncRoot).Invoke(() => { if (open) { ((IInternalFileEntity)colls[i]).OpenStream(); } else { ((IInternalFileEntity)colls[i]).CloseStream(); } }); } } if (_store != null) { _store.Locker.Invoke(() => { if (open) { ((IInternalFileEntity)_store).OpenStream(); } else { ((IInternalFileEntity)_store).CloseStream(); } }); } }
/// <summary> /// Backup the target blocks on disk. /// </summary> /// <param name="readPool"></param> /// <param name="writePool"></param> /// <param name="parent"></param> /// <param name="source"></param> /// <param name="dataChunks"></param> public void Backup(ConcurrentIOPoolManager readPool, ConcurrentIOPoolManager writePool, Algorithm.Collection.ICollectionOnDisk parent, byte[] source, List <DataChunk> dataChunks) { ITransactionLogger trans = parent.Transaction; if (trans != null) { Sop.Transaction.Transaction.LogTracer.Verbose("BulkWriter.Backup: Start for Thread {0}.", Thread.CurrentThread.ManagedThreadId); foreach (var chunk in dataChunks) { Sop.Transaction.Transaction.LogTracer.Verbose("BulkWriter.Backup: inside foreach chunk {0} Thread {1}.", chunk.TargetDataAddress, Thread.CurrentThread.ManagedThreadId); // Identify regions that were not backed up and overwritten yet then back them up... ((TransactionBase)trans).RegisterSave((CollectionOnDisk)parent, chunk.TargetDataAddress, chunk.Size, readPool, writePool); } } }
private Sop.DataBlock ReadNextBlock(Algorithm.Collection.ICollectionOnDisk parent, Sop.DataBlock d, bool getForRemoval) { // read Next block if (d.NextItemAddress != -1) { if (d.Next == null) { d.Next = CreateBlock((DataBlockSize)d.Length); } d.Next.DataAddress = d.NextItemAddress; } d = d.Next; if (d != null && d.DataAddress >= 0) { ReadBlockFromDiskOrInitializeIfEmpty(parent, d.DataAddress, getForRemoval, d); } return(d); }
/// <summary> /// For SOP internal use /// </summary> public void MarkNotDirty() { if (DeletedCollections != null) { DeletedCollections.IsUnloading = true; } if (CollectionsPool != null && CollectionsPool.Count > 0) { var colls = new Algorithm.Collection.ICollectionOnDisk[CollectionsPool.Count]; CollectionsPool.Values.CopyTo(colls, 0); for (int i = 0; i < colls.Length; i++) { colls[i].IsUnloading = true; } } if (_store != null) { _store.IsUnloading = true; } }
/// <summary> /// For SOP internal use /// </summary> public void MarkNotDirty() { if (DeletedCollections != null) { DeletedCollections.IsUnloading = true; } if (CollectionsPool != null && CollectionsPool.Count > 0) { var colls = new Algorithm.Collection.ICollectionOnDisk[CollectionsPool.Count]; CollectionsPool.Values.CopyTo(colls, 0); for (int i = 0; i < colls.Length; i++) { ((ISynchronizer)colls[i].SyncRoot).Invoke(() => { colls[i].IsUnloading = true; }); } } if (_store != null) { _store.Locker.Invoke(() => { _store.IsUnloading = true; }); } }
/// <summary> /// Write a group of Blocks onto Disk. NOTE: it will be more optimal if Blocks /// are sorted by its Data Address so this function can write contiguous blocks /// in one async write. /// </summary> public int WriteBlocksToDisk(Algorithm.Collection.ICollectionOnDisk parent, IDictionary <long, Sop.DataBlock> blocks, bool clear) { if (!parent.IsOpen) { return(0); } var blockSize = (int)parent.DataBlockSize; int chunkSize = MaxSegmentSize / 2; if (chunkSize > blocks.Count * blockSize) { chunkSize = blocks.Count * blockSize; } if (_writeBuffer == null || _writeBuffer.Length < chunkSize) { _writeBuffer = new byte[chunkSize]; } Sop.Transaction.Transaction.LogTracer.Verbose("WriteBlocksToDisk: Start for Thread {0}.", Thread.CurrentThread.ManagedThreadId); using (var backupWritePool = new ConcurrentIOPoolManager()) { // Backup the target blocks on disk using (var backupReadPool = new ConcurrentIOPoolManager()) { WriteBlocksToDisk(backupReadPool, backupWritePool, parent, blocks); } } // overwrite the target blocks on disk with source blocks. using (var writePool = new ConcurrentIOPoolManager()) { WriteBlocksToDisk(null, writePool, parent, blocks); } Sop.Transaction.Transaction.LogTracer.Verbose("WriteBlocksToDisk: End for Thread {0}.", Thread.CurrentThread.ManagedThreadId); return(blocks.Count); }
public Sop.DataBlock ReadBlockFromDisk(Algorithm.Collection.ICollectionOnDisk parent, long address, bool getForRemoval) { if (address >= 0) { var o = ((CollectionOnDisk)parent).Blocks[address]; if (o != null) { getForRemoval = true; if (o.SizeOccupied > 0) { if (o.NextItemAddress >= 0 && o.Next == null) { o.Next = ReadBlockFromDisk(parent, o.NextItemAddress, getForRemoval); } return(o); } } } if (parent.FileStream.Length > address) { if (parent is LinkedListOnDisk) { ((LinkedListOnDisk)parent).MoveTo(address); } Sop.DataBlock d = CreateBlock(parent.DataBlockSize); d = ReadBlockFromDisk(parent, address, getForRemoval, getForRemoval, d); if (!d.IsEmpty()) { ReadBlock(parent, d, getForRemoval); } _readAheadBuffer.Clear(); return(d); } Sop.DataBlock r = CreateBlock(parent.DataBlockSize); r.DataAddress = address; return(r); }
/// <summary> /// Move parent Collection's file pointer to 'Address'. /// </summary> /// <param name="parent"></param> /// <param name="address"></param> /// <returns></returns> public bool MoveTo(Algorithm.Collection.ICollectionOnDisk parent, long address) { if (address >= 0) { FileStream fs = parent.FileStream; if (address != fs.Position) { fs.Seek(address, SeekOrigin.Begin); } if (((CollectionOnDisk)parent).CurrentEntryDataAddress != address) { if (((CollectionOnDisk)parent).currentDataBlock != null && ((CollectionOnDisk)parent).currentDataBlock.DataAddress > -1 ) { ((CollectionOnDisk)parent).currentDataBlock = null; } ((CollectionOnDisk)parent).currentEntry = null; ((CollectionOnDisk)parent).CurrentEntryDataAddress = address; } return(true); } return(false); }
public bool MoveTo(Algorithm.Collection.ICollectionOnDisk parent, Sop.DataBlock dataBlock) { return(dataBlock != null && MoveTo(parent, dataBlock.DataAddress)); }
/// <summary> /// Remove the Collection from Collections' Pool /// </summary> /// <param name="collection"></param> public void RemoveFromPool(Algorithm.Collection.ICollectionOnDisk collection) { RemoveFromPool(collection, false); }
/// <summary> /// Assign block(s) of space on disk to the Data Block(s). /// The assigned disk blocks' file offsets will be set as the blocks' DataAddresses. /// </summary> /// <param name="parent"></param> /// <param name="block">Data Block which will be assigned a block of space on disk</param> /// <param name="isCollectionBlock"> </param> /// <param name="addToMru">true will put the block into MRU, else not</param> public void SetDiskBlock(Algorithm.Collection.ICollectionOnDisk parent, Sop.DataBlock block, bool isCollectionBlock, bool addToMru = true) { Sop.DataBlock prevBlock = null; bool isHead = true; bool blockExtended = false; while (block != null) { if (block.DataAddress == -1) { if (prevBlock != null) { blockExtended = true; } AddBlockToDisk(parent, block, isHead, isCollectionBlock, addToMru); } if (prevBlock != null) { var db = ((CollectionOnDisk)parent).Blocks[GetId(prevBlock)]; if (db != null) { if (db.InternalNextBlockAddress != -1) { prevBlock.InternalNextBlockAddress = db.InternalNextBlockAddress; } ((CollectionOnDisk)parent).Blocks[GetId(prevBlock)] = prevBlock; } if (prevBlock.NextItemAddress != block.DataAddress) { if (blockExtended) { Log.Logger.Instance.Log(Log.LogLevels.Verbose, "DataBlockDriver.SetDiskBlock: block({0}) got extended.", prevBlock.DataAddress); } prevBlock.NextItemAddress = block.DataAddress; SetDiskAddress(parent, prevBlock, addToMru); } else { SetIsDirty(prevBlock, true); //ensure block will be written by MRUManager SetIsDirty(block, true); //ensure block will be written by MRUManager // add to Blocks? //90; //if (addToMru) // MruManager.Add(prevBlock.DataAddress, prevBlock); } } prevBlock = block; block = block.Next; isHead = false; } // Check if Blocks is full? //90; //if (MruManager == null || !MruManager.GeneratePruneEvent) return; //var m = MruManager; //if (m.CheckIfFull()) //{ // var cod = (CollectionOnDisk) MruManager.Collection; // if (cod != null) // cod.OnMaxCapacity(m.Count - m.MinCapacity); //} }
/// <summary> /// Manage this File's Store locks. /// </summary> /// <param name="lockStores"></param> /// <returns></returns> public List <ISynchronizer> ManageLock(bool lockStores = true) { if (!IsOpen) { return(null); } LockSystemStores(lockStores); List <ISynchronizer> result = new List <ISynchronizer>(); if (CollectionsPool != null && CollectionsPool.Count > 0) { Algorithm.Collection.ICollectionOnDisk[] colls = null; #region lock/copy Store Pools if (lockStores) { CollectionsPool.Locker.Lock(); colls = new Algorithm.Collection.ICollectionOnDisk[CollectionsPool.Count]; CollectionsPool.Values.CopyTo(colls, 0); } else { colls = new Algorithm.Collection.ICollectionOnDisk[CollectionsPool.Count]; CollectionsPool.Values.CopyTo(colls, 0); CollectionsPool.Locker.Unlock(); } #endregion var systemStoreLockers = new List <ISynchronizer>(2); // Get collection of System & client Store Lockers, request client Stores to get locked for commit... for (int i = 0; i < colls.Length; i++) { // no need to manage disposed Stores! if (colls[i] is ISortedDictionaryOnDisk && ((Algorithm.SortedDictionary.SortedDictionaryOnDisk)colls[i]).IsDisposed) { continue; } if (SystemManagedStore(colls[i])) { systemStoreLockers.Add((ISynchronizer)colls[i].SyncRoot); continue; } result.Add((ISynchronizer)colls[i].SyncRoot); ((ISynchronizer)colls[i].SyncRoot).CommitLockRequest(lockStores); } // wait until each Store grants the commit lock/unlock request... foreach (var locker in result) { locker.WaitForCommitLock(lockStores); } result.AddRange(systemStoreLockers); #region under study for removal (not needed) //if (lockStores) //{ // // track those modified Stores so they can get flushed in the commit process... // for (int i = 0; i < colls.Length; i++) // { // if (SystemManagedStore(colls[i])) // { // continue; // } // if (colls[i] is BTreeAlgorithm) // { // if (((BTreeAlgorithm)colls[i]).IsDirty) // ((Sop.Transaction.TransactionBase)((BTreeAlgorithm)colls[i]).Transaction). // TrackModification((Algorithm.Collection.CollectionOnDisk)colls[i]); // } // else if (colls[i] is ISortedDictionaryOnDisk) // { // if (((ISortedDictionaryOnDisk)colls[i]).IsDirty) // ((Sop.Transaction.TransactionBase)((ISortedDictionaryOnDisk)colls[i]).Transaction). // TrackModification(((Algorithm.SortedDictionary.SortedDictionaryOnDisk)colls[i]).BTreeAlgorithm); // } // } //} #endregion } return(result); }
/// <summary> /// MoveNext makes the next entry the current one /// </summary> public bool MoveNext(Algorithm.Collection.ICollectionOnDisk parent) { return(MoveTo(parent, ((CollectionOnDisk)parent).GetCurrentDataBlock().Next)); }
protected internal DataBlockDriver(Algorithm.Collection.ICollectionOnDisk parent, HeaderData hd = null) { Initialize(parent, hd); }
/// <summary> /// Allocate next available block on disk. /// Returns true if all blocks had been allocated, false otherwise. /// </summary> /// <param name="parent"></param> /// <param name="block"></param> /// <param name="isHead"></param> /// <param name="isCollectionBlock"></param> /// <param name="addToMru"> </param> /// <returns></returns> private bool AllocateNextBlock(Algorithm.Collection.ICollectionOnDisk parent, Sop.DataBlock block, bool isHead, bool isCollectionBlock, bool addToMru) { bool isBlockLocallyRecycled = false; HeaderData hd = HeaderData; if (hd == null && parent.Parent is CollectionOnDisk) { hd = ((CollectionOnDisk)parent.Parent).HeaderData; } bool willRecycle = hd != null && hd.RecycledSegment != null && hd.RecycledSegment.Count > 0; if (!willRecycle && hd.NextAllocatableAddress + block.Length <= hd.EndAllocatableAddress) { AllocateAvailableBlock((CollectionOnDisk)parent, hd, block); } else { if (parent.File.Server.HasTrashBin && !willRecycle) { if (isCollectionBlock) { willRecycle = parent.File.DeletedCollections != null && parent.File.DeletedCollections.Count > 0; } //** prioritize block recycling if there are plenty of deleted blocks if (!willRecycle && ((CollectionOnDisk)parent).DeletedBlocks != null) { willRecycle = ((CollectionOnDisk)parent).DeletedBlocks.Count > 0 || parent.File.DeletedCollections.Count > 0; } if (!willRecycle && isHead) { willRecycle = parent.File.DeletedCollections != null && parent.File.DeletedCollections.Count > 0; } } DeletedBlockInfo dbi = null; if (willRecycle) { int totalBlockSize; bool fromCollection; if (isCollectionBlock) { if (parent.File.DeletedCollections != null && parent.File.DeletedCollections.Count > 0) { dbi = RecycleBlock(parent, block, true, out totalBlockSize, out fromCollection); } } if ( dbi != null || ((((CollectionOnDisk)parent).DeletedBlocksCount) > 0 && (dbi = RecycleBlock(parent, block, isCollectionBlock, out totalBlockSize, out fromCollection)) != null) ) { #region Recycle Deleted Block if (dbi.IsContiguousBlock) { #region Contiguous blocks can only be from Collection.DeletedBlocks if (block.DataAddress == -1) { long address = dbi.StartBlockAddress; Sop.DataBlock db = block; int blockLength = (int)parent.DataBlockSize; while (db != null) { dbi.Count--; db.DataAddress = address; address += blockLength; db = db.Next; } } isBlockLocallyRecycled = true; #endregion } else { #region Recycled block is from a deleted collection of the File int growthSizeInNob = parent.File.StoreGrowthSizeInNob; block.DataAddress = dbi.StartBlockAddress; hd.StartAllocatableAddress = dbi.StartBlockAddress; hd.EndAllocatableAddress = hd.StartAllocatableAddress + (short)parent.File.DataBlockSize * growthSizeInNob; Log.Logger.Instance.Log(Log.LogLevels.Verbose, "Recycled region {0}, Size {1} from {2} DeletedCollections", dbi.StartBlockAddress, hd.EndAllocatableAddress - hd.StartAllocatableAddress, parent.File.Filename); if (parent.Transaction != null) { ((Transaction.TransactionBase)parent.Transaction).Register( Sop.Transaction.ActionType.RecycleCollection, (CollectionOnDisk)parent, dbi.StartBlockAddress, hd.EndAllocatableAddress - hd.StartAllocatableAddress); } if (dbi.EndBlockAddress == hd.EndAllocatableAddress) { if (parent.File.DeletedCollections != null) { parent.File.DeletedCollections.RemoveTop(); } } else { //** read next segment of deleted collection if ( !ResurfaceDeletedBlockNextSegment((CollectionOnDisk)parent, dbi, hd.EndAllocatableAddress)) { if (parent.File.DeletedCollections != null) { parent.File.DeletedCollections.Remove(dbi.StartBlockAddress); } } } hd.NextAllocatableAddress = hd.StartAllocatableAddress + block.Length; hd.DiskBuffer.IsDirty = true; hd.IsModifiedInTransaction = true; #endregion } #endregion } } if (dbi == null) { if (hd.NextAllocatableAddress + block.Length <= hd.EndAllocatableAddress) { AllocateAvailableBlock((CollectionOnDisk)parent, hd, block); } else { AllocateOnNextSegment((CollectionOnDisk)parent, hd, block); } } } #region Add Block to MRU if (hd.OccupiedBlocksHead == null) { hd.OccupiedBlocksHead = CreateBlock((DataBlockSize)block.Length); hd.OccupiedBlocksHead.DataAddress = block.DataAddress; if (hd.OccupiedBlocksTail == null) { hd.OccupiedBlocksTail = CreateBlock((DataBlockSize)block.Length); } } else if (isBlockLocallyRecycled) { //** reload & update the InternalNextBlockAddress to keep ("low-level") segments' link intact... Sop.DataBlock db = block; while (db != null) { if ((db.DataAddress + (int)parent.DataBlockSize) % (parent.File.Profile.StoreGrowthSizeInNob * (int)parent.DataBlockSize) == 0) { Sop.DataBlock db2 = CreateBlock(parent.DataBlockSize); db2 = ReadBlockFromDisk(parent, db.DataAddress, true, true, db2); db.InternalNextBlockAddress = db2.InternalNextBlockAddress; } db = db.Next; } //** add to MRU cache if requested... return(true); } else { if ((hd.OccupiedBlocksTail.DataAddress + (int)parent.DataBlockSize) % (parent.File.Profile.StoreGrowthSizeInNob * (int)parent.DataBlockSize) == 0) { Sop.DataBlock db = ReadBlockFromDisk(parent, hd.OccupiedBlocksTail.DataAddress, false); db.InternalNextBlockAddress = block.DataAddress; this.SetDiskAddress(parent, db, addToMru); long dbId = GetId(db); ((CollectionOnDisk)parent).Blocks[dbId] = db; } } hd.OccupiedBlocksTail.DataAddress = block.DataAddress; #endregion return(false); }
/// <summary> /// MoveLast makes the last entry in the Collection the current one /// </summary> public bool MoveLast(Algorithm.Collection.ICollectionOnDisk parent) { return(MoveTo(parent, this.HeaderData.OccupiedBlocksTail)); }
private DeletedBlockInfo RecycleBlock(Algorithm.Collection.ICollectionOnDisk parent, Sop.DataBlock block, bool isCollectionBlock, out int totalBlockSize, out bool fromCollection) { int blockLength = block.Length; totalBlockSize = block.CountMembers() * blockLength; var dbi = ((CollectionOnDisk)parent).GetDeletedBlock(totalBlockSize, isCollectionBlock, out fromCollection); if (dbi == null) { totalBlockSize = -1; return(dbi); } if (!fromCollection) { return(dbi); } if (dbi.IsContiguousBlock) { #region NOT VISITED BLOCK //** from Collection.DeletedBlocks (IsContiguousBlock = true) if (dbi.Count * (int)parent.DataBlockSize < totalBlockSize) { string errMsg = "RecycleBlock: Total Requested Block Size > recycled Deleted Block(s)."; Log.Logger.Instance.Log(Log.LogLevels.Fatal, errMsg); throw new InvalidOperationException(errMsg); } #endregion // Register recycled blocks so they can get handled properly, e.g. - will not get backed up during Store Flush. if (parent.Transaction != null) { ((Transaction.TransactionBase)parent.Transaction).Register( Sop.Transaction.ActionType.Recycle, (CollectionOnDisk)parent, dbi.StartBlockAddress, dbi.Count * blockLength); } // set Block to the block addresses referenced in dbi int count = dbi.Count; long address = dbi.StartBlockAddress; Sop.DataBlock db = block; Transaction.ITransactionLogger trans = parent.Transaction; Sop.DataBlock dbPrev = null; while (db != null) { dbi.Count--; db.DataAddress = address; if (dbPrev != null) { dbPrev.NextItemAddress = address; dbPrev.Next = db; SetIsDirty(dbPrev, true); } address += blockLength; dbPrev = db; db = db.Next; } SetIsDirty(dbPrev, true); var codParent = (CollectionOnDisk)parent; IDataBlockRecycler delBlocks = codParent.DeletedBlocks; if (HeaderData.RecycledSegment != dbi || HeaderData.RecycledSegment == null) { if (delBlocks != null) { delBlocks.RemoveAvailableBlock(dbi.StartBlockAddress); } } if (dbi.Count == 0) { dbi.Count = count; HeaderData.RecycledSegment = null; } else { HeaderData.RecycledSegment = (DeletedBlockInfo)dbi.Clone(); HeaderData.RecycledSegment.StartBlockAddress = address; dbi.Count = count; } } return(dbi); }