private void MoveBranchNode(TreePage parentPage, TreePage from, TreePage to) { Debug.Assert(from.IsBranch); Slice originalFromKey; using (GetActualKey(from, from.LastSearchPositionOrLastEntry, out originalFromKey)) { to.EnsureHasSpaceFor(_tx, originalFromKey, -1); var fromNode = from.GetNode(from.LastSearchPosition); long pageNum = fromNode->PageNumber; if (to.LastSearchPosition == 0) { // cannot add to left implicit side, adjust by moving the left node // to the right by one, then adding the new one as the left TreeNodeHeader *actualKeyNode; Slice implicitLeftKey; using (GetActualKey(to, 0, out actualKeyNode, out implicitLeftKey)) { var implicitLeftNode = to.GetNode(0); var leftPageNumber = implicitLeftNode->PageNumber; Slice implicitLeftKeyToInsert; ByteStringContext.ExternalScope?externalScope; if (implicitLeftNode == actualKeyNode) { externalScope = TreeNodeHeader.ToSlicePtr(_tx.Allocator, actualKeyNode, out implicitLeftKeyToInsert); } else { implicitLeftKeyToInsert = implicitLeftKey; externalScope = null; } to.EnsureHasSpaceFor(_tx, implicitLeftKeyToInsert, -1); to.AddPageRefNode(1, implicitLeftKeyToInsert, leftPageNumber); externalScope?.Dispose(); to.ChangeImplicitRefPageNode(pageNum); // setup the new implicit node } } else { to.AddPageRefNode(to.LastSearchPosition, originalFromKey, pageNum); } } if (from.LastSearchPositionOrLastEntry == 0) { var rightPageNumber = from.GetNode(1)->PageNumber; from.RemoveNode(0); // remove the original implicit node from.ChangeImplicitRefPageNode(rightPageNumber); // setup the new implicit node Debug.Assert(from.NumberOfEntries >= 2); } else { from.RemoveNode(from.LastSearchPositionOrLastEntry); } var pos = parentPage.LastSearchPositionOrLastEntry; parentPage.RemoveNode(pos); Slice newSeparatorKey; var scope = GetActualKey(to, 0, out newSeparatorKey); // get the next smallest key it has now try { var pageNumber = to.PageNumber; if (parentPage.GetNode(0)->PageNumber == to.PageNumber) { pageNumber = from.PageNumber; scope.Dispose(); scope = GetActualKey(from, 0, out newSeparatorKey); } AddSeparatorToParentPage(to, parentPage, pageNumber, newSeparatorKey, pos); } finally { scope.Dispose(); } }
private void MoveLeafNode(TreePage parentPage, TreePage from, TreePage to) { Debug.Assert(from.IsBranch == false); Slice originalFromKeyStart; using (GetActualKey(from, from.LastSearchPositionOrLastEntry, out originalFromKeyStart)) { var fromNode = from.GetNode(from.LastSearchPosition); byte *val = @from.Base + @from.KeysOffsets[@from.LastSearchPosition] + Constants.Tree.NodeHeaderSize + originalFromKeyStart.Size; byte *dataPos; var fromDataSize = fromNode->DataSize; switch (fromNode->Flags) { case TreeNodeFlags.PageRef: to.EnsureHasSpaceFor(_tx, originalFromKeyStart, -1); dataPos = to.AddPageRefNode(to.LastSearchPosition, originalFromKeyStart, fromNode->PageNumber); break; case TreeNodeFlags.Data: to.EnsureHasSpaceFor(_tx, originalFromKeyStart, fromDataSize); dataPos = to.AddDataNode(to.LastSearchPosition, originalFromKeyStart, fromDataSize); break; case TreeNodeFlags.MultiValuePageRef: to.EnsureHasSpaceFor(_tx, originalFromKeyStart, fromDataSize); dataPos = to.AddMultiValueNode(to.LastSearchPosition, originalFromKeyStart, fromDataSize); break; default: throw new NotSupportedException("Invalid node type to move: " + fromNode->Flags); } if (dataPos != null && fromDataSize > 0) { Memory.Copy(dataPos, val, fromDataSize); } from.RemoveNode(from.LastSearchPositionOrLastEntry); var pos = parentPage.LastSearchPositionOrLastEntry; parentPage.RemoveNode(pos); Slice newSeparatorKey; var scope = GetActualKey(to, 0, out newSeparatorKey); // get the next smallest key it has now try { var pageNumber = to.PageNumber; if (parentPage.GetNode(0)->PageNumber == to.PageNumber) { pageNumber = from.PageNumber; scope.Dispose(); scope = GetActualKey(from, 0, out newSeparatorKey); } AddSeparatorToParentPage(to, parentPage, pageNumber, newSeparatorKey, pos); } finally { scope.Dispose(); } } }
private void AddSeparatorToParentPage(TreePage childPage, TreePage parentPage, long pageNumber, Slice seperatorKey, int separatorKeyPosition) { var parent = new ParentPageAction(parentPage, childPage, _tree, _cursor, _tx); parent.AddSeparator(seperatorKey, pageNumber, separatorKeyPosition); }
private static void ThrowOnCompressedPage(TreePage p) { throw new PageCompressedException($"Page {p} is compressed. You need to decompress it to be able to access its content."); }
private bool TryUseRecentTransactionPage(Slice key, out TreeCursorConstructor cursor, out TreePage page, out TreeNodeHeader *node) { var foundPage = _recentlyFoundPages?.Find(key); if (foundPage == null) { page = null; node = null; cursor = default(TreeCursorConstructor); return(false); } var lastFoundPageNumber = foundPage.Number; if (foundPage.Page != null) { // we can't share the same instance, Page instance may be modified by // concurrently run iterators page = new TreePage(foundPage.Page.Base, foundPage.Page.PageSize); } else { page = GetReadOnlyTreePage(lastFoundPageNumber); } if (page.IsLeaf == false) { VoronUnrecoverableErrorException.Raise(_llt.Environment, "Index points to a non leaf page"); } node = page.Search(_llt, key); // will set the LastSearchPosition cursor = new TreeCursorConstructor(_llt, this, page, foundPage.CursorPath, lastFoundPageNumber); return(true); }
private byte *SplitPageInHalf(TreePage rightPage) { bool toRight; var currentIndex = _page.LastSearchPosition; var splitIndex = _page.NumberOfEntries / 2; if (currentIndex <= splitIndex) { toRight = false; } else { toRight = true; var leftPageEntryCount = splitIndex; var rightPageEntryCount = _page.NumberOfEntries - leftPageEntryCount + 1; if (rightPageEntryCount > leftPageEntryCount) { splitIndex++; Debug.Assert(splitIndex < _page.NumberOfEntries); } } DecompressedLeafPage rightDecompressed = null; int?decompressedPageSize = null; if (_pageDecompressed != null) { decompressedPageSize = _pageDecompressed.PageSize; } else if (_splittingOnDecompressed) { decompressedPageSize = _page.PageSize; } if (decompressedPageSize != null) { // splitting the decompressed page, let's allocate the page of the same size to ensure enough space rightDecompressed = _tx.Environment.DecompressionBuffers.GetPage(_tx, decompressedPageSize.Value, DecompressionUsage.Write, rightPage); rightPage = rightDecompressed; } if (_page.IsLeaf) { splitIndex = AdjustSplitPosition(currentIndex, splitIndex, rightPage, ref toRight); } Slice currentKey; using (_page.GetNodeKey(_tx, splitIndex, out currentKey)) { Slice seperatorKey; if (toRight && splitIndex == currentIndex) { seperatorKey = SliceComparer.Compare(currentKey, _newKey) < 0 ? currentKey : _newKey; } else { seperatorKey = currentKey; } var addedAsImplicitRef = false; var parentOfPage = _cursor.CurrentPage; TreePage parentOfRight; using (rightDecompressed) { AddSeparatorToParentPage(rightPage.PageNumber, seperatorKey, out parentOfRight); if (_page.IsBranch && toRight && SliceComparer.EqualsInline(seperatorKey, _newKey)) { // _newKey needs to be inserted as first key (BeforeAllKeys) to the right page, so we need to add it before we move entries from the current page AddNodeToPage(rightPage, 0, Slices.BeforeAllKeys); addedAsImplicitRef = true; } // move the actual entries from page to right page ushort nKeys = _page.NumberOfEntries; for (int i = splitIndex; i < nKeys; i++) { TreeNodeHeader *node = _page.GetNode(i); if (_page.IsBranch && rightPage.NumberOfEntries == 0) { rightPage.CopyNodeDataToEndOfPage(node, Slices.BeforeAllKeys); } else { Slice instance; using (TreeNodeHeader.ToSlicePtr(_tx.Allocator, node, out instance)) { rightPage.CopyNodeDataToEndOfPage(node, instance); } } } if (rightDecompressed != null) { rightDecompressed.CopyToOriginal(_tx, defragRequired: false, wasModified: true, _tree); rightPage = rightDecompressed.Original; } } _page.Truncate(_tx, splitIndex); RecompressPageIfNeeded(wasModified: true); byte *pos; if (addedAsImplicitRef == false) { try { if (toRight && _cursor.CurrentPage.PageNumber != parentOfRight.PageNumber) { // modify the cursor if we are going to insert to the right page _cursor.Pop(); _cursor.Push(parentOfRight); } // actually insert the new key pos = InsertNewKey(toRight ? rightPage : _page); } catch (InvalidOperationException e) { if ( e.Message.StartsWith("The page is full and cannot add an entry", StringComparison.Ordinal) == false) { throw; } throw new InvalidOperationException( GatherDetailedDebugInfo(rightPage, currentKey, seperatorKey, currentIndex, splitIndex, toRight), e); } } else { pos = null; _cursor.Push(rightPage); } if (_page.IsBranch) // remove a branch that has only one entry, the page ref needs to be added to the parent of the current page { Debug.Assert(_page.NumberOfEntries > 0); Debug.Assert(rightPage.NumberOfEntries > 0); if (_page.NumberOfEntries == 1) { RemoveBranchWithOneEntry(_page, parentOfPage); } if (rightPage.NumberOfEntries == 1) { RemoveBranchWithOneEntry(rightPage, parentOfRight); } } return(pos); } }
private Slice GetActualKey(TreePage page, int pos) { TreeNodeHeader *_; return(GetActualKey(page, pos, out _)); }
public void MultiAdd(Slice key, Slice value) { if (!value.HasValue) { throw new ArgumentNullException(nameof(value)); } int maxNodeSize = Llt.DataPager.NodeMaxSize; if (value.Size > maxNodeSize) { throw new ArgumentException("Cannot add a value to child tree that is over " + maxNodeSize + " bytes in size", nameof(value)); } if (value.Size == 0) { throw new ArgumentException("Cannot add empty value to child tree"); } State.IsModified = true; State.Flags |= TreeFlags.MultiValueTrees; TreeNodeHeader *node; var page = FindPageFor(key, out node); if (page == null || page.LastMatch != 0) { MultiAddOnNewValue(key, value, maxNodeSize); return; } page = ModifyPage(page); var item = page.GetNode(page.LastSearchPosition); byte *_; // already was turned into a multi tree, not much to do here if (item->Flags == TreeNodeFlags.MultiValuePageRef) { var existingTree = OpenMultiValueTree(key, item); existingTree.DirectAdd(value, 0, out _).Dispose(); return; } if (item->Flags == TreeNodeFlags.PageRef) { throw new InvalidOperationException("Multi trees don't use overflows"); } var nestedPagePtr = DirectAccessFromHeader(item); var nestedPage = new TreePage(nestedPagePtr, (ushort)GetDataSize(item)); var existingItem = nestedPage.Search(_llt, value); if (nestedPage.LastMatch != 0) { existingItem = null;// not an actual match, just greater than } if (existingItem != null) { // maybe same value added twice? Slice tmpKey; using (TreeNodeHeader.ToSlicePtr(_llt.Allocator, item, out tmpKey)) { if (SliceComparer.Equals(tmpKey, value)) { return; // already there, turning into a no-op } } nestedPage.RemoveNode(nestedPage.LastSearchPosition); } if (nestedPage.HasSpaceFor(_llt, value, 0)) { // we are now working on top of the modified root page, we can just modify the memory directly nestedPage.AddDataNode(nestedPage.LastSearchPosition, value, 0); return; } if (page.HasSpaceFor(_llt, value, 0)) { // page has space for an additional node in nested page ... var requiredSpace = nestedPage.PageSize + // existing page nestedPage.GetRequiredSpace(value, 0); // new node if (requiredSpace + Constants.Tree.NodeHeaderSize <= maxNodeSize) { // ... and it won't require to create an overflow, so we can just expand the current value, no need to create a nested tree yet EnsureNestedPagePointer(page, item, ref nestedPage, ref nestedPagePtr); var newPageSize = (ushort)Math.Min(Bits.NextPowerOf2(requiredSpace), maxNodeSize - Constants.Tree.NodeHeaderSize); ExpandMultiTreeNestedPageSize(key, value, nestedPagePtr, newPageSize, nestedPage.PageSize); return; } } EnsureNestedPagePointer(page, item, ref nestedPage, ref nestedPagePtr); // we now have to convert this into a tree instance, instead of just a nested page var tree = Create(_llt, _tx, key, TreeFlags.MultiValue); for (int i = 0; i < nestedPage.NumberOfEntries; i++) { Slice existingValue; using (nestedPage.GetNodeKey(_llt, i, out existingValue)) { tree.DirectAdd(existingValue, 0, out _).Dispose(); } } tree.DirectAdd(value, 0, out _).Dispose(); _tx.AddMultiValueTree(this, key, tree); // we need to record that we switched to tree mode here, so the next call wouldn't also try to create the tree again DirectAdd(key, sizeof(TreeRootHeader), TreeNodeFlags.MultiValuePageRef, out _).Dispose(); }
private void EnsureNestedPagePointer(TreePage page, TreeNodeHeader *currentItem, ref TreePage nestedPage, ref byte *nestedPagePtr) { var movedItem = page.GetNode(page.LastSearchPosition); if (movedItem == currentItem) { return; } // HasSpaceFor could called Defrag internally and read item has moved // need to ensure the nested page has a valid pointer nestedPagePtr = DirectAccessFromHeader(movedItem); nestedPage = new TreePage(nestedPagePtr, (ushort)GetDataSize(movedItem)); }
private void HandleUncompressedNodes(DecompressedLeafPage decompressedPage, TreePage p, DecompressionUsage usage) { int numberOfEntries = p.NumberOfEntries; for (var i = 0; i < numberOfEntries; i++) { var uncompressedNode = p.GetNode(i); Slice nodeKey; using (TreeNodeHeader.ToSlicePtr(_tx.Allocator, uncompressedNode, out nodeKey)) { if (uncompressedNode->Flags == TreeNodeFlags.CompressionTombstone) { HandleTombstone(decompressedPage, nodeKey, usage); continue; } if (decompressedPage.HasSpaceFor(_llt, TreeSizeOf.NodeEntry(uncompressedNode)) == false) { throw new InvalidOperationException("Could not add uncompressed node to decompressed page"); } int index; if (decompressedPage.NumberOfEntries > 0) { Slice lastKey; using (decompressedPage.GetNodeKey(_llt, decompressedPage.NumberOfEntries - 1, out lastKey)) { // optimization: it's very likely that uncompressed nodes have greater keys than compressed ones // when we insert sequential keys var cmp = SliceComparer.CompareInline(nodeKey, lastKey); if (cmp > 0) { index = decompressedPage.NumberOfEntries; } else { if (cmp == 0) { // update of the last entry, just decrement NumberOfEntries in the page and // put it at the last position index = decompressedPage.NumberOfEntries - 1; decompressedPage.Lower -= Constants.Tree.NodeOffsetSize; } else { index = decompressedPage.NodePositionFor(_llt, nodeKey); if (decompressedPage.LastMatch == 0) // update { decompressedPage.RemoveNode(index); if (usage == DecompressionUsage.Write) { State.NumberOfEntries--; } } } } } } else { // all uncompressed nodes were compresion tombstones which deleted all entries from the decompressed page index = 0; } switch (uncompressedNode->Flags) { case TreeNodeFlags.PageRef: decompressedPage.AddPageRefNode(index, nodeKey, uncompressedNode->PageNumber); break; case TreeNodeFlags.Data: var pos = decompressedPage.AddDataNode(index, nodeKey, uncompressedNode->DataSize); var nodeValue = TreeNodeHeader.Reader(_llt, uncompressedNode); Memory.Copy(pos, nodeValue.Base, nodeValue.Length); break; case TreeNodeFlags.MultiValuePageRef: throw new NotSupportedException("Multi trees do not support compression"); default: throw new NotSupportedException("Invalid node type to copye: " + uncompressedNode->Flags); } } } }
public void MultiDelete(Slice key, Slice value) { State.IsModified = true; TreeNodeHeader *node; var page = FindPageFor(key, out node); if (page == null || page.LastMatch != 0) { return; //nothing to delete - key not found } page = ModifyPage(page); var item = page.GetNode(page.LastSearchPosition); if (item->Flags == TreeNodeFlags.MultiValuePageRef) //multi-value tree exists { var tree = OpenMultiValueTree(key, item); tree.Delete(value); // previously, we would convert back to a simple model if we dropped to a single entry // however, it doesn't really make sense, once you got enough values to go to an actual nested // tree, you are probably going to remain that way, or be removed completely. if (tree.State.NumberOfEntries != 0) { return; } _tx.TryRemoveMultiValueTree(this, key); if (_newPageAllocator != null) { if (IsIndexTree == false) { ThrowAttemptToFreePageToNewPageAllocator(Name, tree.State.RootPageNumber); } _newPageAllocator.FreePage(tree.State.RootPageNumber); } else { if (IsIndexTree) { ThrowAttemptToFreeIndexPageToFreeSpaceHandling(Name, tree.State.RootPageNumber); } _llt.FreePage(tree.State.RootPageNumber); } Delete(key); } else // we use a nested page here { var nestedPage = new TreePage(DirectAccessFromHeader(item), (ushort)GetDataSize(item)); nestedPage.Search(_llt, value); // need to search the value in the nested page if (nestedPage.LastMatch != 0) // value not found { return; } if (item->Flags == TreeNodeFlags.PageRef) { throw new InvalidOperationException("Multi trees don't use overflows"); } var nestedPagePtr = DirectAccessFromHeader(item); nestedPage = new TreePage(nestedPagePtr, (ushort)GetDataSize(item)) { LastSearchPosition = nestedPage.LastSearchPosition }; nestedPage.RemoveNode(nestedPage.LastSearchPosition); if (nestedPage.NumberOfEntries == 0) { Delete(key); } } }
private ActualKeyScope GetActualKey(TreePage page, int pos, out TreeNodeHeader *node, out Slice key) { DecompressedLeafPage decompressedLeafPage = null; node = page.GetNode(pos); var scope = TreeNodeHeader.ToSlicePtr(_tx.Allocator, node, out key); while (key.Size == 0) { Debug.Assert(page.IsBranch); page = _tree.GetReadOnlyTreePage(node->PageNumber); if (page.IsCompressed == false) { node = page.GetNode(0); } else { decompressedLeafPage?.Dispose(); decompressedLeafPage = _tree.DecompressPage(page, skipCache: true); if (decompressedLeafPage.NumberOfEntries > 0) { if (page.NumberOfEntries == 0) { node = decompressedLeafPage.GetNode(0); } else { // we want to find the smallest key in compressed page // it can be inside compressed part or not compressed one // in particular, it can be the key of compression tombstone node that we don't see after decompression // so we need to take first keys from decompressed and compressed page and compare them var decompressedNode = decompressedLeafPage.GetNode(0); var compressedNode = page.GetNode(0); using (TreeNodeHeader.ToSlicePtr(_tx.Allocator, decompressedNode, out var firstDecompressedKey)) using (TreeNodeHeader.ToSlicePtr(_tx.Allocator, compressedNode, out var firstCompressedKey)) { node = SliceComparer.CompareInline(firstDecompressedKey, firstCompressedKey) > 0 ? compressedNode : decompressedNode; } } } else { // we have empty page after decompression (each compressed entry has a corresponding CompressionTombstone) // we can safely use the node key of first tombstone (they have proper order) node = page.GetNode(0); } } scope.Dispose(); scope = TreeNodeHeader.ToSlicePtr(_tx.Allocator, node, out key); } return(new ActualKeyScope { DecompressedLeafPage = decompressedLeafPage, ExternalScope = scope }); }
public byte *Execute() { using (DisableFreeSpaceUsageIfSplittingRootTree()) { if (_page.IsLeaf) { _tree.ClearPagesCache(); } if (_page.IsCompressed) { _pageDecompressed = _tree.DecompressPage(_page, WriteDecompressionUsage, skipCache: false); _pageDecompressed.Search(_tx, _newKey); if (_pageDecompressed.LastMatch == 0) { // we are going to insert the value in a bit, but it might have // been in the compressed portion and not removed by the calling // code _tree.RemoveLeafNode(_pageDecompressed); if (_pageDecompressed.NumberOfEntries == 0) { // we have just removed the last node that we wanted to update // there is no need to do any split - copy the value to the current (empty) page using (_pageDecompressed) { RecompressPageIfNeeded(wasModified: true); var pos = InsertNewKey(_page); return(pos); } } } _page = _pageDecompressed; } TreePage rightPage = _tree.NewPage(_page.TreeFlags, _page.PageNumber); if (_cursor.PageCount == 0) // we need to do a root split { TreePage newRootPage = _tree.NewPage(TreePageFlags.Branch, _page.PageNumber); _cursor.Push(newRootPage); _tree.State.RootPageNumber = newRootPage.PageNumber; _tree.State.Depth++; // now add implicit left page newRootPage.AddPageRefNode(0, Slices.BeforeAllKeys, _page.PageNumber); _parentPage = newRootPage; _parentPage.LastSearchPosition++; } else { // we already popped the page, so the current one on the stack is the parent of the page _parentPage = _tree.ModifyPage(_cursor.CurrentPage); _cursor.Update(_cursor.Pages, _parentPage); } using (_pageDecompressed) { if (_page.LastSearchPosition >= _page.NumberOfEntries) { var pos = OptimizedOnlyMoveNewValueToTheRightPage(rightPage); RecompressPageIfNeeded(wasModified: false); return(pos); } return(SplitPageInHalf(rightPage)); } } }
private byte *AddSeparatorToParentPage(long pageRefNumber, Slice separatorKey, out TreePage parentOfPageRef) { var parent = new ParentPageAction(_parentPage, _page, _tree, _cursor, _tx); var pos = parent.AddSeparator(separatorKey, pageRefNumber); parentOfPageRef = parent.ParentOfAddedPageRef; return(pos); }
public TreePage Execute(TreePage page) { using (DisableFreeSpaceUsageIfSplittingRootTree()) { _tree.ClearPagesCache(); if (_cursor.PageCount <= 1) // the root page { RebalanceRoot(page); return(null); } _cursor.Pop(); var parentPage = _tree.ModifyPage(_cursor.CurrentPage); _cursor.Update(_cursor.Pages, parentPage); if (page.NumberOfEntries == 0) // empty page, just delete it and fixup parent { // need to change the implicit left page if (parentPage.LastSearchPosition == 0 && parentPage.NumberOfEntries > 2) { var newImplicit = parentPage.GetNode(1)->PageNumber; parentPage.RemoveNode(0); parentPage.ChangeImplicitRefPageNode(newImplicit); } else // will be set to rights by the next rebalance call { parentPage.RemoveNode(parentPage.LastSearchPositionOrLastEntry); } _tree.FreePage(page); return(parentPage); } if (page.IsBranch && page.NumberOfEntries == 1) { RemoveBranchWithOneEntry(page, parentPage); return(parentPage); } var minKeys = page.IsBranch ? 2 : 1; if ((page.UseMoreSizeThan(_tx.DataPager.PageMinSpace)) && page.NumberOfEntries >= minKeys) { return(null); // above space/keys thresholds } Debug.Assert(parentPage.NumberOfEntries >= 2); // if we have less than 2 entries in the parent, the tree is invalid var sibling = SetupMoveOrMerge(page, parentPage); Debug.Assert(sibling.PageNumber != page.PageNumber); if (page.TreeFlags != sibling.TreeFlags) { return(null); } if (sibling.IsCompressed) { return(null); } if (sibling.PageSize != page.PageSize) { // if the current page is compressed (but already opened), we need to // avoid merging it with the right (uncompressed) page return(null); } Debug.Assert(page.IsCompressed == false); minKeys = sibling.IsBranch ? 2 : 1; // branch must have at least 2 keys if (sibling.UseMoreSizeThan(_tx.DataPager.PageMinSpace) && sibling.NumberOfEntries > minKeys) { // neighbor is over the min size and has enough key, can move just one key to the current page if (page.IsBranch) { MoveBranchNode(parentPage, sibling, page); } else { MoveLeafNode(parentPage, sibling, page); } return(parentPage); } if (page.LastSearchPosition == 0) // this is the right page, merge left { if (TryMergePages(parentPage, sibling, page) == false) { return(null); } } else // this is the left page, merge right { if (TryMergePages(parentPage, page, sibling) == false) { return(null); } } return(parentPage); } }
private void MoveLeafNode(TreePage parentPage, TreePage from, TreePage to) { Debug.Assert(from.IsBranch == false); var originalFromKeyStart = GetActualKey(from, from.LastSearchPositionOrLastEntry); var fromNode = from.GetNode(from.LastSearchPosition); byte *val = @from.Base + @from.KeysOffsets[@from.LastSearchPosition] + Constants.NodeHeaderSize + originalFromKeyStart.Size; var nodeVersion = fromNode->Version; // every time new node is allocated the version is increased, but in this case we do not want to increase it if (nodeVersion > 0) { nodeVersion -= 1; } byte *dataPos; var fromDataSize = fromNode->DataSize; switch (fromNode->Flags) { case TreeNodeFlags.PageRef: to.EnsureHasSpaceFor(_tx, originalFromKeyStart, -1); dataPos = to.AddPageRefNode(to.LastSearchPosition, originalFromKeyStart, fromNode->PageNumber); break; case TreeNodeFlags.Data: to.EnsureHasSpaceFor(_tx, originalFromKeyStart, fromDataSize); dataPos = to.AddDataNode(to.LastSearchPosition, originalFromKeyStart, fromDataSize, nodeVersion); break; case TreeNodeFlags.MultiValuePageRef: to.EnsureHasSpaceFor(_tx, originalFromKeyStart, fromDataSize); dataPos = to.AddMultiValueNode(to.LastSearchPosition, originalFromKeyStart, fromDataSize, nodeVersion); break; default: throw new NotSupportedException("Invalid node type to move: " + fromNode->Flags); } if (dataPos != null && fromDataSize > 0) { Memory.Copy(dataPos, val, fromDataSize); } from.RemoveNode(from.LastSearchPositionOrLastEntry); var pos = parentPage.LastSearchPositionOrLastEntry; parentPage.RemoveNode(pos); var newSeparatorKey = GetActualKey(to, 0); // get the next smallest key it has now var pageNumber = to.PageNumber; if (parentPage.GetNode(0)->PageNumber == to.PageNumber) { pageNumber = from.PageNumber; newSeparatorKey = GetActualKey(from, 0); } AddSeparatorToParentPage(to, parentPage, pageNumber, newSeparatorKey, pos); }
private ActualKeyScope GetActualKey(TreePage page, int pos, out Slice slice) { TreeNodeHeader *_; return(GetActualKey(page, pos, out _, out slice)); }
private static void ThrowOnCompressedPage(TreePage p) { throw new InvalidOperationException($"Page {p.PageNumber} is compressed. You need to decompress it to be able to access its content."); }