/// <summary> /// Get a new empty page from disk: can be a reused page (from header free list) or file extend /// Never re-use page from same transaction /// </summary> public T NewPage <T>() where T : BasePage { ENSURE(_collectionPage == null, typeof(T) == typeof(CollectionPage), "if no collection page defined yet, must be first request"); ENSURE(typeof(T) == typeof(CollectionPage), _collectionPage == null, "there is no new collection page if page already exists"); var pageID = 0u; PageBuffer buffer; // lock header instance to get new page lock (_header) { // there is need for _header.Savepoint() because changes here will incremental and will be persist later // if any problem occurs here, rollback will catch this changes // try get page from Empty free list if (_header.FreeEmptyPageList != uint.MaxValue) { var free = this.GetPage <BasePage>(_header.FreeEmptyPageList); ENSURE(free.PageType == PageType.Empty, "empty page must be defined as empty type"); // set header free empty page to next free page _header.FreeEmptyPageList = free.NextPageID; // clear NextPageID free.NextPageID = uint.MaxValue; // get pageID from empty list pageID = free.PageID; // get buffer inside re-used page buffer = free.Buffer; } else { // checks if not exceeded data file limit size var newLength = (_header.LastPageID + 1) * PAGE_SIZE; if (newLength > _header.Pragmas.LimitSize) { throw new LiteException(0, $"Maximum data file size has been reached: {FileHelper.FormatFileSize(_header.Pragmas.LimitSize)}"); } // increase LastPageID from shared page pageID = ++_header.LastPageID; // request for a new buffer buffer = _reader.NewPage(); } // retain a list of created pages to, in a rollback situation, back pages to empty list _transPages.NewPages.Add(pageID); } var page = BasePage.CreatePage <T>(buffer, pageID); // update local cache with new instance T page type if (page.PageType != PageType.Collection) { _localPages[pageID] = page; } // define ColID for this new page (if this.CollectionPage is null, so this is new collection page) page.ColID = _collectionPage?.PageID ?? page.PageID; // define as dirty to override pageType page.IsDirty = true; // increment transaction size _transPages.TransactionSize++; return(page); }
/// <summary> /// Return added pages when occurs an rollback transaction (run this only in rollback). Create new transactionID and add into /// Log file all new pages as EmptyPage in a linked order - also, update SharedPage before store /// </summary> private void ReturnNewPages() { // create new transaction ID var transactionID = _walIndex.NextTransactionID(); // now lock header to update LastTransactionID/FreePageList lock (_header) { // persist all empty pages into wal-file var pagePositions = new Dictionary <uint, PagePosition>(); IEnumerable <PageBuffer> source() { // create list of empty pages with forward link pointer for (var i = 0; i < _transPages.NewPages.Count; i++) { var pageID = _transPages.NewPages[i]; var next = i < _transPages.NewPages.Count - 1 ? _transPages.NewPages[i + 1] : _header.FreeEmptyPageList; var buffer = _disk.Cache.NewPage(); var page = new BasePage(buffer, pageID, PageType.Empty) { NextPageID = next, TransactionID = transactionID }; yield return(page.UpdateBuffer()); // update wal pagePositions[pageID] = new PagePosition(pageID, buffer.Position); } // update header page with my new transaction ID _header.TransactionID = transactionID; _header.FreeEmptyPageList = _transPages.NewPages[0]; _header.IsConfirmed = true; // clone header buffer var buf = _header.UpdateBuffer(); var clone = _disk.Cache.NewPage(); Buffer.BlockCopy(buf.Array, buf.Offset, clone.Array, clone.Offset, clone.Count); yield return(clone); }; // create a header save point before any change var safepoint = _header.Savepoint(); try { // write all pages (including new header) _disk.WriteAsync(source()); } catch { // must revert all header content if any error occurs during header change _header.Restore(safepoint); throw; } // now confirm this transaction to wal _walIndex.ConfirmTransaction(transactionID, pagePositions.Values); } }
/// <summary> /// Update document using same page position as reference /// </summary> public void Update(CollectionPage col, PageAddress blockAddress, BsonDocument doc) { var bytesLeft = doc.GetBytesCount(true); if (bytesLeft > MAX_DOCUMENT_SIZE) throw new LiteException(0, "Document size exceed {0} limit", MAX_DOCUMENT_SIZE); DataBlock lastBlock = null; var updateAddress = blockAddress; IEnumerable <BufferSlice> source() { var bytesToCopy = 0; while (bytesLeft > 0) { // if last block contains new block sequence, continue updating if (updateAddress.IsEmpty == false) { var dataPage = _snapshot.GetPage<DataPage>(updateAddress.PageID); var currentBlock = dataPage.GetBlock(updateAddress.Index); // try get full page size content (do not add DATA_BLOCK_FIXED_SIZE because will be added in UpdateBlock) bytesToCopy = Math.Min(bytesLeft, dataPage.FreeBytes + currentBlock.Buffer.Count); // get current free slot linked list var slot = BasePage.FreeIndexSlot(dataPage.FreeBytes); var updateBlock = dataPage.UpdateBlock(currentBlock, bytesToCopy); _snapshot.AddOrRemoveFreeList(dataPage, slot); yield return updateBlock.Buffer; lastBlock = updateBlock; // go to next address (if exists) updateAddress = updateBlock.NextBlock; } else { bytesToCopy = Math.Min(bytesLeft, MAX_DATA_BYTES_PER_PAGE); var dataPage = _snapshot.GetFreePage<DataPage>(bytesToCopy + DataBlock.DATA_BLOCK_FIXED_SIZE); var insertBlock = dataPage.InsertBlock(bytesToCopy, true); if (lastBlock != null) { lastBlock.SetNextBlock(insertBlock.Position); } yield return insertBlock.Buffer; lastBlock = insertBlock; } bytesLeft -= bytesToCopy; } // old document was bigger than current, must delete extend blocks if (lastBlock.NextBlock.IsEmpty == false) { var nextBlockAddress = lastBlock.NextBlock; lastBlock.SetNextBlock(PageAddress.Empty); this.Delete(nextBlockAddress); } } // consume all source bytes to write BsonDocument direct into PageBuffer // must be fastest as possible using (var w = new BufferWriter(source())) { // already bytes count calculate at method start w.WriteDocument(doc, false); w.Consume(); } }