/// <summary> /// Insert data inside a datapage. Returns dataPageID that idicates the first page /// </summary> public DataBlock Insert(CollectionPage col, byte[] data) { // set collection page as dirty before changes _pager.SetDirty(col); // need to extend (data is bigger than 1 page) var extend = data.Length + DataBlock.DATA_BLOCK_FIXED_SIZE > BasePage.PAGE_AVAILABLE_BYTES; // if extend, just search for a page with BLOCK_SIZE avaiable var dataPage = _pager.GetFreePage <DataPage>(col.FreeDataPageID, extend ? DataBlock.DATA_BLOCK_FIXED_SIZE : data.Length + DataBlock.DATA_BLOCK_FIXED_SIZE); // create a new block with first empty index on DataPage var block = new DataBlock { Position = new PageAddress(dataPage.PageID, dataPage.DataBlocks.NextIndex()), Page = dataPage }; // if extend, store all bytes on extended page. if (extend) { var extendPage = _pager.NewPage <ExtendPage>(); block.ExtendPageID = extendPage.PageID; StoreExtendData(extendPage, data); } else { block.Data = data; } // add dataBlock to this page dataPage.DataBlocks.Add(block.Position.Index, block); // update freebytes + items count dataPage.UpdateItemCount(); // add/remove dataPage on freelist if has space _pager.AddOrRemoveToFreeList(dataPage.FreeBytes > DataPage.DATA_RESERVED_BYTES, dataPage, col, ref col.FreeDataPageID); col.DocumentCount++; return(block); }
/// <summary> /// Save all dirty pages to disk /// </summary> public void Commit() { // get header page var header = _pager.GetPage <HeaderPage>(0); // increase file changeID (back to 0 when overflow) header.ChangeID = header.ChangeID == ushort.MaxValue ? (ushort)0 : (ushort)(header.ChangeID + (ushort)1); // mark header as dirty _pager.SetDirty(header); // write journal file _disk.WriteJournal(_cache.GetDirtyPages() .Select(x => x.DiskData) .Where(x => x.Length > 0) .ToList(), header.LastPageID); // enter in exclusive lock mode to write on disk using (_locker.Exclusive()) { // set final datafile length (optimize page writes) _disk.SetLength(BasePage.GetSizeOfPages(header.LastPageID + 1)); foreach (var page in _cache.GetDirtyPages()) { // page.WritePage() updated DiskData with new rendered buffer var buffer = _crypto == null || page.PageID == 0 ? page.WritePage() : _crypto.Encrypt(page.WritePage()); _disk.WritePage(page.PageID, buffer); } // mark all dirty pages in clean pages (all are persisted in disk and are valid pages) _cache.MarkDirtyAsClean(); // ensure all pages from OS cache has been persisted on medium _disk.Flush(); // discard journal file _disk.ClearJournal(header.LastPageID); } }
/// <summary> /// Add a new collection. Check if name the not exists /// </summary> public CollectionPage Add(string name) { if (string.IsNullOrEmpty(name)) { throw new ArgumentNullException(nameof(name)); } if (!CollectionPage.NamePattern.IsMatch(name)) { throw LiteException.InvalidFormat(name); } _log.Write(Logger.COMMAND, "creating new collection '{0}'", name); // get header marked as dirty because I will use header after (and NewPage can get another header instance) var header = _pager.GetPage <HeaderPage>(0); // check limit count (8 bytes per collection = 4 to string length, 4 for uint pageID) if (header.CollectionPages.Sum(x => x.Key.Length + 8) + name.Length + 8 >= CollectionPage.MAX_COLLECTIONS_SIZE) { throw LiteException.CollectionLimitExceeded(CollectionPage.MAX_COLLECTIONS_SIZE); } // get new collection page (marked as dirty) var col = _pager.NewPage <CollectionPage>(); // add this page to header page collection header.CollectionPages.Add(name, col.PageID); col.CollectionName = name; // set header page as dirty _pager.SetDirty(header); // create PK index var pk = _indexer.CreateIndex(col); pk.Field = "_id"; pk.Expression = "$._id"; pk.Unique = true; return(col); }
/// <summary> /// Save all dirty pages to disk /// </summary> public void PersistDirtyPages() { // get header page var header = _pager.GetPage <HeaderPage>(0); // increase file changeID (back to 0 when overflow) header.ChangeID = header.ChangeID == ushort.MaxValue ? (ushort)0 : (ushort)(header.ChangeID + (ushort)1); // mark header as dirty _pager.SetDirty(header); _log.Write(Logger.DISK, "begin disk operations - changeID: {0}", header.ChangeID); // write journal file in desc order to header be last page in disk _disk.WriteJournal(_cache.GetDirtyPages() .OrderByDescending(x => x.PageID) .Select(x => x.DiskData) .Where(x => x.Length > 0) .ToList(), header.LastPageID); // mark header as recovery before start writing (in journal, must keep recovery = false) header.Recovery = true; // get all dirty page stating from Header page (SortedList) foreach (var page in _cache.GetDirtyPages()) { // page.WritePage() updated DiskData with new rendered buffer var buffer = _crypto == null || page.PageID == 0 ? page.WritePage() : _crypto.Encrypt(page.WritePage()); _disk.WritePage(page.PageID, buffer); } // re-write header page but now with recovery=false header.Recovery = false; _log.Write(Logger.DISK, "re-write header page now with recovery = false"); _disk.WritePage(0, header.WritePage()); // mark all dirty pages as clean pages (all are persisted in disk and are valid pages) _cache.MarkDirtyAsClean(); // flush all data direct to disk _disk.Flush(); // discard journal file _disk.ClearJournal(header.LastPageID); }
/// <summary> /// Insert data inside a datapage. Returns dataPageID that indicates the first page /// </summary> public DataBlock Insert(CollectionPage col, byte[] data) { // need to extend (data is bigger than 1 page) var extend = (data.Length + DataBlock.DATA_BLOCK_FIXED_SIZE) > BasePage.PAGE_AVAILABLE_BYTES; // if extend, just search for a page with BLOCK_SIZE available var dataPage = _pager.GetFreePage <DataPage>(col.FreeDataPageID, extend ? DataBlock.DATA_BLOCK_FIXED_SIZE : data.Length + DataBlock.DATA_BLOCK_FIXED_SIZE); // create a new block with first empty index on DataPage var block = new DataBlock { Page = dataPage }; // if extend, store all bytes on extended page. if (extend) { var extendPage = _pager.NewPage <ExtendPage>(); block.ExtendPageID = extendPage.PageID; this.StoreExtendData(extendPage, data); } else { block.Data = data; } // add dataBlock to this page dataPage.AddBlock(block); // set page as dirty _pager.SetDirty(dataPage); // add/remove dataPage on freelist if has space _pager.AddOrRemoveToFreeList(dataPage.FreeBytes > DataPage.DATA_RESERVED_BYTES, dataPage, col, ref col.FreeDataPageID); // increase document count in collection col.DocumentCount++; // set collection page as dirty _pager.SetDirty(col); return(block); }
/// <summary> /// Create a new index and returns head page address (skip list) /// </summary> public CollectionIndex CreateIndex(CollectionPage col) { // get index slot var index = col.GetFreeIndex(); // get a new index page for first index page var page = _pager.NewPage <IndexPage>(); // create a empty node with full max level var head = new IndexNode(IndexNode.MAX_LEVEL_LENGTH) { Key = BsonValue.MinValue, KeyLength = (ushort)BsonValue.MinValue.GetBytesCount(false), Page = page, Position = new PageAddress(page.PageID, 0) }; // add as first node page.Nodes.Add(head.Position.Index, head); // update freebytes + item count (for head) page.UpdateItemCount(); _pager.SetDirty(index.Page); // add indexPage on freelist if has space _pager.AddOrRemoveToFreeList(true, page, index.Page, ref index.FreeIndexPageID); // point the head/tail node to this new node position index.HeadNode = head.Position; // insert tail node var tail = AddNode(index, BsonValue.MaxValue, IndexNode.MAX_LEVEL_LENGTH); index.TailNode = tail.Position; return(index); }
/// <summary> /// Save all dirty pages to disk /// </summary> public void PersistDirtyPages() { // get header page var header = _pager.GetPage <HeaderPage>(0); // increase file changeID (back to 0 when overflow) header.ChangeID = header.ChangeID == ushort.MaxValue ? (ushort)0 : (ushort)(header.ChangeID + (ushort)1); // mark header as dirty _pager.SetDirty(header); _log.Write(Logger.DISK, "begin disk operations - changeID: {0}", header.ChangeID); // write journal file in desc order to header be last page in disk if (_disk.IsJournalEnabled) { _disk.WriteJournal(_cache.GetDirtyPages() .OrderByDescending(x => x.PageID) .Select(x => x.DiskData) .Where(x => x.Length > 0) .ToList(), header.LastPageID); // mark header as recovery before start writing (in journal, must keep recovery = false) header.Recovery = true; // flush to disk to ensure journal is committed to disk before proceeding _disk.Flush(); } else { // if no journal extend, resize file here to fast writes _disk.SetLength(BasePage.GetSizeOfPages(header.LastPageID + 1)); } // write header page first. if header.Recovery == true, this ensures it's written to disk *before* we start changing pages var headerPage = _cache.GetPage(0); var headerBuffer = headerPage.WritePage(); _disk.WritePage(0, headerBuffer); _disk.Flush(); // get all dirty page stating from Header page (SortedList) // header page (id=0) always must be first page to write on disk because it's will mark disk as "in recovery" foreach (var page in _cache.GetDirtyPages()) { // we've already written the header, so skip it if (page.PageID == 0) { continue; } // page.WritePage() updated DiskData with new rendered buffer var buffer = _crypto == null || page.PageID == 0 ? page.WritePage() : _crypto.Encrypt(page.WritePage()); _disk.WritePage(page.PageID, buffer); } if (_disk.IsJournalEnabled) { // ensure changed pages are persisted to disk *before* we change header.Recovery to false _disk.Flush(); // re-write header page but now with recovery=false header.Recovery = false; _log.Write(Logger.DISK, "re-write header page now with recovery = false"); _disk.WritePage(0, header.WritePage()); } // mark all dirty pages as clean pages (all are persisted in disk and are valid pages) _cache.MarkDirtyAsClean(); // flush all data direct to disk _disk.Flush(); // discard journal file _disk.ClearJournal(header.LastPageID); }