public void Drop(CollectionPage col) { // delete all index pages for (byte i = 0; i < col.Indexes.Length; i++) { var index = col.Indexes[i]; if (!index.IsEmpty) { _pager.DeletePage(index.HeadNode.PageID); } } // ajust page pointers if (col.PrevPageID != uint.MaxValue) { var prev = _pager.GetPage <BasePage>(col.PrevPageID); prev.NextPageID = col.NextPageID; prev.IsDirty = true; } if (col.NextPageID != uint.MaxValue) { var next = _pager.GetPage <BasePage>(col.NextPageID); next.PrevPageID = col.PrevPageID; next.IsDirty = true; } _pager.DeletePage(col.PageID, false); }
/// <summary> /// Update data inside a datapage. If new data can be used in same datapage, just update. Otherwise, copy content to a new ExtendedPage /// </summary> public DataBlock Update(CollectionPage col, PageAddress blockAddress, byte[] data) { // get datapage and mark as dirty var dataPage = _pager.GetPage <DataPage>(blockAddress.PageID); var block = dataPage.DataBlocks[blockAddress.Index]; var extend = dataPage.FreeBytes + block.Data.Length - data.Length <= 0; // check if need to extend if (extend) { // clear my block data block.Data = new byte[0]; // create (or get a existed) extendpage and store data there ExtendPage extendPage; if (block.ExtendPageID == uint.MaxValue) { extendPage = _pager.NewPage <ExtendPage>(); block.ExtendPageID = extendPage.PageID; } else { extendPage = _pager.GetPage <ExtendPage>(block.ExtendPageID); } this.StoreExtendData(extendPage, data); } else { // if no extends, just update data block block.Data = data; // if there was a extended bytes, delete if (block.ExtendPageID != uint.MaxValue) { _pager.DeletePage(block.ExtendPageID, true); block.ExtendPageID = uint.MaxValue; } } // updates freebytes + items count dataPage.UpdateItemCount(); // set DataPage as dirty _pager.SetDirty(dataPage); // add/remove dataPage on freelist if has space AND its on/off free list _pager.AddOrRemoveToFreeList(dataPage.FreeBytes > DataPage.DATA_RESERVED_BYTES, dataPage, col, ref col.FreeDataPageID); return(block); }
/// <summary> /// Save all dirty pages to disk /// </summary> public void Commit() { // get header page var header = _pager.GetPage <HeaderPage>(0); // set final datafile length (optimize page writes) _disk.SetLength(BasePage.GetSizeOfPages(header.LastPageID + 1)); // write all dirty pages in data file foreach (var page in _pager.GetDirtyPages()) { // first write in journal file original data _disk.WriteJournal(page.PageID, page.DiskData); // then writes no datafile new changed pages // page.WritePage() updated DiskData with new rendered buffer _disk.WritePage(page.PageID, _crypto == null || page.PageID == 0 ? page.WritePage() : _crypto.Encrypt(page.WritePage())); // mark page as clean (is now saved in disk) page.IsDirty = false; } // discard journal file _disk.ClearJournal(); }
/// <summary> /// Get a node inside a page using PageAddress - Returns null if address IsEmpty /// </summary> public IndexNode GetNode(PageAddress address) { if (address.IsEmpty) { return(null); } var page = _pager.GetPage <IndexPage>(address.PageID); return(page.Nodes[address.Index]); }
/// <summary> /// Save all dirty pages to disk /// </summary> public void Commit() { // get header page var header = _pager.GetPage <HeaderPage>(0); // increase file changeID (back to 0 when overflow) header.ChangeID = header.ChangeID == ushort.MaxValue ? (ushort)0 : (ushort)(header.ChangeID + (ushort)1); // mark header as dirty _pager.SetDirty(header); // write journal file _disk.WriteJournal(_cache.GetDirtyPages() .Select(x => x.DiskData) .Where(x => x.Length > 0) .ToList(), header.LastPageID); // enter in exclusive lock mode to write on disk using (_locker.Exclusive()) { // set final datafile length (optimize page writes) _disk.SetLength(BasePage.GetSizeOfPages(header.LastPageID + 1)); foreach (var page in _cache.GetDirtyPages()) { // page.WritePage() updated DiskData with new rendered buffer var buffer = _crypto == null || page.PageID == 0 ? page.WritePage() : _crypto.Encrypt(page.WritePage()); _disk.WritePage(page.PageID, buffer); } // mark all dirty pages in clean pages (all are persisted in disk and are valid pages) _cache.MarkDirtyAsClean(); // ensure all pages from OS cache has been persisted on medium _disk.Flush(); // discard journal file _disk.ClearJournal(header.LastPageID); } }
/// <summary> /// Save all dirty pages to disk /// </summary> public void PersistDirtyPages() { // get header page var header = _pager.GetPage <HeaderPage>(0); // increase file changeID (back to 0 when overflow) header.ChangeID = header.ChangeID == ushort.MaxValue ? (ushort)0 : (ushort)(header.ChangeID + (ushort)1); // mark header as dirty _pager.SetDirty(header); _log.Write(Logger.DISK, "begin disk operations - changeID: {0}", header.ChangeID); // write journal file in desc order to header be last page in disk _disk.WriteJournal(_cache.GetDirtyPages() .OrderByDescending(x => x.PageID) .Select(x => x.DiskData) .Where(x => x.Length > 0) .ToList(), header.LastPageID); // mark header as recovery before start writing (in journal, must keep recovery = false) header.Recovery = true; // get all dirty page stating from Header page (SortedList) foreach (var page in _cache.GetDirtyPages()) { // page.WritePage() updated DiskData with new rendered buffer var buffer = _crypto == null || page.PageID == 0 ? page.WritePage() : _crypto.Encrypt(page.WritePage()); _disk.WritePage(page.PageID, buffer); } // re-write header page but now with recovery=false header.Recovery = false; _log.Write(Logger.DISK, "re-write header page now with recovery = false"); _disk.WritePage(0, header.WritePage()); // mark all dirty pages as clean pages (all are persisted in disk and are valid pages) _cache.MarkDirtyAsClean(); // flush all data direct to disk _disk.Flush(); // discard journal file _disk.ClearJournal(header.LastPageID); }
/// <summary> /// Save all dirty pages to disk - do not touch on lock disk /// </summary> private void Save() { // get header and mark as dirty var header = _pager.GetPage <HeaderPage>(0, true); // increase file changeID (back to 0 when overflow) header.ChangeID = header.ChangeID == ushort.MaxValue ? (ushort)0 : (ushort)(header.ChangeID + 1); // set final datafile length (optimize page writes) _disk.SetLength(BasePage.GetSizeOfPages(header.LastPageID + 1)); // write all dirty pages in data file foreach (var page in _cache.GetDirtyPages()) { _disk.WritePage(page.PageID, page.WritePage()); } }
/// <summary> /// Get a exist collection. Returns null if not exists /// </summary> public CollectionPage Get(string name) { if (string.IsNullOrEmpty(name)) { throw new ArgumentNullException(nameof(name)); } var header = _pager.GetPage <HeaderPage>(0); uint pageID; if (header.CollectionPages.TryGetValue(name, out pageID)) { return(_pager.GetPage <CollectionPage>(pageID)); } return(null); }
/// <summary> /// Save all dirty pages to disk /// </summary> public void PersistDirtyPages() { // get header page var header = _pager.GetPage <HeaderPage>(0); // increase file changeID (back to 0 when overflow) header.ChangeID = header.ChangeID == ushort.MaxValue ? (ushort)0 : (ushort)(header.ChangeID + (ushort)1); // mark header as dirty _pager.SetDirty(header); _log.Write(Logger.DISK, "begin disk operations - changeID: {0}", header.ChangeID); // write journal file in desc order to header be last page in disk if (_disk.IsJournalEnabled) { _disk.WriteJournal(_cache.GetDirtyPages() .OrderByDescending(x => x.PageID) .Select(x => x.DiskData) .Where(x => x.Length > 0) .ToList(), header.LastPageID); // mark header as recovery before start writing (in journal, must keep recovery = false) header.Recovery = true; // flush to disk to ensure journal is committed to disk before proceeding _disk.Flush(); } else { // if no journal extend, resize file here to fast writes _disk.SetLength(BasePage.GetSizeOfPages(header.LastPageID + 1)); } // write header page first. if header.Recovery == true, this ensures it's written to disk *before* we start changing pages var headerPage = _cache.GetPage(0); var headerBuffer = headerPage.WritePage(); _disk.WritePage(0, headerBuffer); _disk.Flush(); // get all dirty page stating from Header page (SortedList) // header page (id=0) always must be first page to write on disk because it's will mark disk as "in recovery" foreach (var page in _cache.GetDirtyPages()) { // we've already written the header, so skip it if (page.PageID == 0) { continue; } // page.WritePage() updated DiskData with new rendered buffer var buffer = _crypto == null || page.PageID == 0 ? page.WritePage() : _crypto.Encrypt(page.WritePage()); _disk.WritePage(page.PageID, buffer); } if (_disk.IsJournalEnabled) { // ensure changed pages are persisted to disk *before* we change header.Recovery to false _disk.Flush(); // re-write header page but now with recovery=false header.Recovery = false; _log.Write(Logger.DISK, "re-write header page now with recovery = false"); _disk.WritePage(0, header.WritePage()); } // mark all dirty pages as clean pages (all are persisted in disk and are valid pages) _cache.MarkDirtyAsClean(); // flush all data direct to disk _disk.Flush(); // discard journal file _disk.ClearJournal(header.LastPageID); }