/// <summary> /// Delete all datablock that contains a document (can use multiples data blocks) /// </summary> public async Task Delete(PageAddress blockAddress) { // delete all document blocks while (blockAddress != PageAddress.Empty) { var page = await _snapshot.GetPage <DataPage>(blockAddress.PageID); var block = page.GetBlock(blockAddress.Index); // delete block inside page page.DeleteBlock(blockAddress.Index); // fix page empty list (or delete page) await _snapshot.AddOrRemoveFreeDataList(page); blockAddress = block.NextBlock; } }
/// <summary> /// Delete all datablock that contains a document (can use multiples data blocks) /// </summary> public void Delete(PageAddress blockAddress) { // delete all document blocks while(blockAddress != PageAddress.Empty) { var page = _snapshot.GetPage<DataPage>(blockAddress.PageID); var block = page.GetBlock(blockAddress.Index); var slot = BasePage.FreeIndexSlot(page.FreeBytes); // delete block inside page page.DeleteBlock(blockAddress.Index); // fix page empty list (or delete page) _snapshot.AddOrRemoveFreeList(page, slot); blockAddress = block.NextBlock; } }
/// <summary> /// Insert a new node index inside an collection index. Flip coin to know level /// </summary> public IndexNode AddNode(CollectionIndex index, BsonValue key, PageAddress dataBlock, IndexNode last) { // do not accept Min/Max value as index key (only head/tail can have this value) if (key.IsMaxValue || key.IsMinValue) { throw LiteException.InvalidIndexKey($"BsonValue MaxValue/MinValue are not supported as index key"); } // random level (flip coin mode) - return number between 1-32 var level = this.Flip(); // set index collection with max-index level if (level > index.MaxLevel) { // update max level _snapshot.CollectionPage.UpdateCollectionIndex(index.Name).MaxLevel = level; } // call AddNode with key value return(this.AddNode(index, key, dataBlock, level, last)); }
/// <summary> /// Write PageAddress as PageID, Index /// </summary> internal void Write(PageAddress address) { this.Write(address.PageID); this.Write(address.Index); }
/// <summary> /// Update document using same page position as reference /// </summary> public void Update(CollectionPage col, PageAddress blockAddress, BsonDocument doc) { var bytesLeft = doc.GetBytesCount(true); if (bytesLeft > MAX_DOCUMENT_SIZE) { throw new LiteException(0, "Document size exceed {0} limit", MAX_DOCUMENT_SIZE); } DataBlock lastBlock = null; var updateAddress = blockAddress; IEnumerable <BufferSlice> source() { var bytesToCopy = 0; while (bytesLeft > 0) { // if last block contains new block sequence, continue updating if (updateAddress.IsEmpty == false) { var dataPage = _snapshot.GetPage <DataPage>(updateAddress.PageID); var currentBlock = dataPage.GetBlock(updateAddress.Index); // try get full page size content bytesToCopy = Math.Min(bytesLeft, dataPage.FreeBytes + currentBlock.Buffer.Count); // get current free slot linked list var slot = BasePage.FreeIndexSlot(dataPage.FreeBytes); var updateBlock = dataPage.UpdateBlock(currentBlock, bytesToCopy); _snapshot.AddOrRemoveFreeList(dataPage, slot); yield return(updateBlock.Buffer); lastBlock = updateBlock; // go to next address (if extits) updateAddress = updateBlock.NextBlock; } else { bytesToCopy = Math.Min(bytesLeft, MAX_DATA_BYTES_PER_PAGE); var dataPage = _snapshot.GetFreePage <DataPage>(bytesToCopy + DataBlock.DATA_BLOCK_FIXED_SIZE); var insertBlock = dataPage.InsertBlock(bytesToCopy, true); if (lastBlock != null) { lastBlock.SetNextBlock(insertBlock.Position); } yield return(insertBlock.Buffer); lastBlock = insertBlock; } bytesLeft -= bytesToCopy; } // old document was bigger than current, must delete extend blocks if (lastBlock.NextBlock.IsEmpty == false) { this.Delete(lastBlock.NextBlock); } } // consume all source bytes to write BsonDocument direct into PageBuffer // must be fastest as possible using (var w = new BufferWriter(source())) { // already bytes count calculate at method start w.WriteDocument(doc, false); w.Consume(); } }
public BsonDocument Load(PageAddress rawId) { var node = _indexer.GetNode(rawId); return(this.Load(node)); }
public async Task <BsonDocument> Load(PageAddress rawId) { var node = await _indexer.GetNode(rawId); return(await this.Load(node)); }
/// <summary> /// Create new index node and persist into page segment /// </summary> public IndexNode(IndexPage page, byte index, BufferSlice segment, byte slot, byte level, BsonValue key, PageAddress dataBlock) { _page = page; _segment = segment; this.Position = new PageAddress(page.PageID, index); this.Slot = slot; this.Level = level; this.Key = key; this.DataBlock = dataBlock; this.NextNode = PageAddress.Empty; this.Next = new PageAddress[level]; this.Prev = new PageAddress[level]; for (var i = 0; i < level; i++) { this.SetPrev((byte)i, PageAddress.Empty); this.SetNext((byte)i, PageAddress.Empty); } // persist in buffer read only data segment[P_SLOT] = slot; segment[P_LEVEL] = level; segment.Write(dataBlock, P_DATA_BLOCK); segment.WriteIndexKey(key, P_KEY); // prevNode/nextNode must be defined as Empty segment.Write(this.NextNode, P_NEXT_NODE); page.IsDirty = true; }
/// <summary> /// Insert a new node index inside an collection index. /// </summary> private IndexNode AddNode(CollectionIndex index, BsonValue key, PageAddress dataBlock, byte level, IndexNode last) { // get a free index page for head note var bytesLength = IndexNode.GetNodeLength(level, key, out var keyLength); // test for index key maxlength if (keyLength > MAX_INDEX_KEY_LENGTH) { throw LiteException.InvalidIndexKey($"Index key must be less than {MAX_INDEX_KEY_LENGTH} bytes."); } var indexPage = _snapshot.GetFreeIndexPage(bytesLength, ref index.FreeIndexPageList); // create node in buffer var node = indexPage.InsertIndexNode(index.Slot, level, key, dataBlock, bytesLength); // now, let's link my index node on right place var cur = this.GetNode(index.Head); // using as cache last IndexNode cache = null; // scan from top left for (int i = index.MaxLevel - 1; i >= 0; i--) { // get cache for last node cache = cache != null && cache.Position == cur.Next[i] ? cache : this.GetNode(cur.Next[i]); // for(; <while_not_this>; <do_this>) { ... } for (; cur.Next[i].IsEmpty == false; cur = cache) { // get cache for last node cache = cache != null && cache.Position == cur.Next[i] ? cache : this.GetNode(cur.Next[i]); // read next node to compare var diff = cache.Key.CompareTo(key, _collation); // if unique and diff = 0, throw index exception (must rollback transaction - others nodes can be dirty) if (diff == 0 && index.Unique) { throw LiteException.IndexDuplicateKey(index.Name, key); } if (diff == 1) { break; } } if (i <= (level - 1)) // level == length { // cur = current (immediately before - prev) // node = new inserted node // next = next node (where cur is pointing) node.SetNext((byte)i, cur.Next[i]); node.SetPrev((byte)i, cur.Position); cur.SetNext((byte)i, node.Position); var next = this.GetNode(node.Next[i]); if (next != null) { next.SetPrev((byte)i, node.Position); } } } // if last node exists, create a double link list if (last != null) { ENSURE(last.NextNode == PageAddress.Empty, "last index node must point to null"); last.SetNextNode(node.Position); } // fix page position in free list slot _snapshot.AddOrRemoveFreeIndexList(node.Page, ref index.FreeIndexPageList); return(node); }
/// <summary> /// Create new DataBlock and fill into buffer /// </summary> public DataBlock(DataPage page, byte index, BufferSlice segment, bool extend, PageAddress nextBlock) { _page = page; _segment = segment; this.Position = new PageAddress(page.PageID, index); this.NextBlock = nextBlock; this.Extend = extend; // byte 00: Data Index segment.Write(extend, P_EXTEND); // byte 01-05 (can be updated in "UpdateNextBlock") segment.Write(nextBlock, P_NEXT_BLOCK); // byte 06-EOL: Buffer this.Buffer = segment.Slice(P_BUFFER, segment.Count - P_BUFFER); page.IsDirty = true; }
/// <summary> /// Update document using same page position as reference /// </summary> public async Task Update(CollectionPage col, PageAddress blockAddress, BsonDocument doc) { var bytesLeft = doc.GetBytesCount(true); if (bytesLeft > MAX_DOCUMENT_SIZE) { throw new LiteException(0, "Document size exceed {0} limit", MAX_DOCUMENT_SIZE); } DataBlock lastBlock = null; var updateAddress = blockAddress; async IAsyncEnumerable <BufferSlice> source() { var bytesToCopy = 0; while (bytesLeft > 0) { // if last block contains new block sequence, continue updating if (updateAddress.IsEmpty == false) { var dataPage = await _snapshot.GetPage <DataPage>(updateAddress.PageID); var currentBlock = dataPage.GetBlock(updateAddress.Index); // try get full page size content (do not add DATA_BLOCK_FIXED_SIZE because will be added in UpdateBlock) bytesToCopy = Math.Min(bytesLeft, dataPage.FreeBytes + currentBlock.Buffer.Count); var updateBlock = dataPage.UpdateBlock(currentBlock, bytesToCopy); await _snapshot.AddOrRemoveFreeDataList(dataPage); yield return(updateBlock.Buffer); lastBlock = updateBlock; // go to next address (if exists) updateAddress = updateBlock.NextBlock; } else { bytesToCopy = Math.Min(bytesLeft, MAX_DATA_BYTES_PER_PAGE); var dataPage = await _snapshot.GetFreeDataPage(bytesToCopy + DataBlock.DATA_BLOCK_FIXED_SIZE); var insertBlock = dataPage.InsertBlock(bytesToCopy, true); if (lastBlock != null) { lastBlock.SetNextBlock(insertBlock.Position); } await _snapshot.AddOrRemoveFreeDataList(dataPage); yield return(insertBlock.Buffer); lastBlock = insertBlock; } bytesLeft -= bytesToCopy; } // old document was bigger than current, must delete extend blocks if (lastBlock.NextBlock.IsEmpty == false) { var nextBlockAddress = lastBlock.NextBlock; lastBlock.SetNextBlock(PageAddress.Empty); await this.Delete(nextBlockAddress); } } // consume all source bytes to write BsonDocument direct into PageBuffer await using (var writer = await BufferWriterAsync.CreateAsync(source())) { // already bytes count calculate at method start await writer.WriteDocument(doc, false); await writer.Consume(); } }
/// <summary> /// Write PageAddress as PageID, Index /// </summary> internal async Task Write(PageAddress address) { await this.Write(address.PageID); await this.Write(address.Index); }