public int GetDataSize(Slice key) { TreeNodeHeader *node; var p = FindPageFor(key, out node); if (p.LastMatch != 0) { return(-1); } if (node == null) { return(-1); } Slice nodeKey; using (TreeNodeHeader.ToSlicePtr(_llt.Allocator, node, out nodeKey)) { if (!SliceComparer.EqualsInline(nodeKey, key)) { return(-1); } } return(GetDataSize(node)); }
public ushort ReadVersion(Slice key) { TreeNodeHeader *node; var p = FindPageFor(key, out node); if (p == null || p.LastMatch != 0) { return(0); } if (node == null || !SliceComparer.EqualsInline(TreeNodeHeader.ToSlicePtr(_llt.Allocator, node), key)) { return(0); } return(node->Version); }
public int GetDataSize(Slice key) { TreeNodeHeader *node; var p = FindPageFor(key, out node); if (p == null || p.LastMatch != 0) { return(-1); } if (node == null || !SliceComparer.EqualsInline(TreeNodeHeader.ToSlicePtr(_llt.Allocator, node), key)) { return(-1); } return(TreeNodeHeader.GetDataSize(_llt, node)); }
private byte *SplitPageInHalf(TreePage rightPage) { bool toRight; var currentIndex = _page.LastSearchPosition; var splitIndex = _page.NumberOfEntries / 2; if (currentIndex <= splitIndex) { toRight = false; } else { toRight = true; var leftPageEntryCount = splitIndex; var rightPageEntryCount = _page.NumberOfEntries - leftPageEntryCount + 1; if (rightPageEntryCount > leftPageEntryCount) { splitIndex++; Debug.Assert(splitIndex < _page.NumberOfEntries); } } if (_page.IsLeaf) { splitIndex = AdjustSplitPosition(currentIndex, splitIndex, ref toRight); } Slice currentKey; using (_page.GetNodeKey(_tx, splitIndex, out currentKey)) { Slice seperatorKey; if (toRight && splitIndex == currentIndex) { seperatorKey = SliceComparer.Compare(currentKey, _newKey) < 0 ? currentKey : _newKey; } else { seperatorKey = currentKey; } var addedAsImplicitRef = false; var parentOfPage = _cursor.CurrentPage; TreePage parentOfRight; DecompressedLeafPage rightDecompressed = null; if (_pageDecompressed != null) { // splitting the decompressed page, let's allocate the page of the same size to ensure enough space rightDecompressed = _tx.Environment.DecompressionBuffers.GetPage(_tx, _pageDecompressed.PageSize, DecompressionUsage.Write, rightPage); rightPage = rightDecompressed; } using (rightDecompressed) { AddSeparatorToParentPage(rightPage.PageNumber, seperatorKey, out parentOfRight); if (_page.IsBranch && toRight && SliceComparer.EqualsInline(seperatorKey, _newKey)) { // _newKey needs to be inserted as first key (BeforeAllKeys) to the right page, so we need to add it before we move entries from the current page AddNodeToPage(rightPage, 0, Slices.BeforeAllKeys); addedAsImplicitRef = true; } // move the actual entries from page to right page ushort nKeys = _page.NumberOfEntries; for (int i = splitIndex; i < nKeys; i++) { TreeNodeHeader *node = _page.GetNode(i); if (_page.IsBranch && rightPage.NumberOfEntries == 0) { rightPage.CopyNodeDataToEndOfPage(node, Slices.BeforeAllKeys); } else { Slice instance; using (TreeNodeHeader.ToSlicePtr(_tx.Allocator, node, out instance)) { rightPage.CopyNodeDataToEndOfPage(node, instance); } } } if (rightDecompressed != null) { rightDecompressed.CopyToOriginal(_tx, defragRequired: false, wasModified: true); rightPage = rightDecompressed.Original; } } _page.Truncate(_tx, splitIndex); RecompressPageIfNeeded(wasModified: true); byte *pos; if (addedAsImplicitRef == false) { try { if (toRight && _cursor.CurrentPage.PageNumber != parentOfRight.PageNumber) { // modify the cursor if we are going to insert to the right page _cursor.Pop(); _cursor.Push(parentOfRight); } // actually insert the new key pos = InsertNewKey(toRight ? rightPage : _page); } catch (InvalidOperationException e) { if ( e.Message.StartsWith("The page is full and cannot add an entry", StringComparison.Ordinal) == false) { throw; } throw new InvalidOperationException( GatherDetailedDebugInfo(rightPage, currentKey, seperatorKey, currentIndex, splitIndex, toRight), e); } } else { pos = null; _cursor.Push(rightPage); } if (_page.IsBranch) // remove a branch that has only one entry, the page ref needs to be added to the parent of the current page { Debug.Assert(_page.NumberOfEntries > 0); Debug.Assert(rightPage.NumberOfEntries > 0); if (_page.NumberOfEntries == 1) { RemoveBranchWithOneEntry(_page, parentOfPage); } if (rightPage.NumberOfEntries == 1) { RemoveBranchWithOneEntry(rightPage, parentOfRight); } } return(pos); } }
public byte *DirectAdd(Slice key, int len, TreeNodeFlags nodeType = TreeNodeFlags.Data, ushort?version = null) { Debug.Assert(nodeType == TreeNodeFlags.Data || nodeType == TreeNodeFlags.MultiValuePageRef); if (State.InWriteTransaction) { State.IsModified = true; } if (_llt.Flags == (TransactionFlags.ReadWrite) == false) { throw new ArgumentException("Cannot add a value in a read only transaction"); } if (AbstractPager.IsKeySizeValid(key.Size) == false) { throw new ArgumentException($"Key size is too big, must be at most {AbstractPager.MaxKeySize} bytes, but was {(key.Size + AbstractPager.RequiredSpaceForNewNode)}", nameof(key)); } Func <TreeCursor> cursorConstructor; TreeNodeHeader * node; var foundPage = FindPageFor(key, out node, out cursorConstructor); var page = ModifyPage(foundPage); ushort nodeVersion = 0; bool? shouldGoToOverflowPage = null; if (page.LastMatch == 0) // this is an update operation { node = page.GetNode(page.LastSearchPosition); Debug.Assert(SliceComparer.EqualsInline(TreeNodeHeader.ToSlicePtr(_llt.Allocator, node), key)); shouldGoToOverflowPage = ShouldGoToOverflowPage(len); byte *pos; if (shouldGoToOverflowPage == false) { // optimization for Data and MultiValuePageRef - try to overwrite existing node space if (TryOverwriteDataOrMultiValuePageRefNode(node, key, len, nodeType, version, out pos)) { return(pos); } } else { // optimization for PageRef - try to overwrite existing overflows if (TryOverwriteOverflowPages(node, key, len, version, out pos)) { return(pos); } } RemoveLeafNode(page, out nodeVersion); } else // new item should be recorded { State.NumberOfEntries++; } CheckConcurrency(key, version, nodeVersion, TreeActionType.Add); var lastSearchPosition = page.LastSearchPosition; // searching for overflow pages might change this byte *overFlowPos = null; var pageNumber = -1L; if (shouldGoToOverflowPage ?? ShouldGoToOverflowPage(len)) { pageNumber = WriteToOverflowPages(len, out overFlowPos); len = -1; nodeType = TreeNodeFlags.PageRef; } byte *dataPos; if (page.HasSpaceFor(_llt, key, len) == false) { using (var cursor = cursorConstructor()) { cursor.Update(cursor.Pages.First, page); var pageSplitter = new TreePageSplitter(_llt, this, key, len, pageNumber, nodeType, nodeVersion, cursor); dataPos = pageSplitter.Execute(); } DebugValidateTree(State.RootPageNumber); } else { switch (nodeType) { case TreeNodeFlags.PageRef: dataPos = page.AddPageRefNode(lastSearchPosition, key, pageNumber); break; case TreeNodeFlags.Data: dataPos = page.AddDataNode(lastSearchPosition, key, len, nodeVersion); break; case TreeNodeFlags.MultiValuePageRef: dataPos = page.AddMultiValueNode(lastSearchPosition, key, len, nodeVersion); break; default: throw new NotSupportedException("Unknown node type for direct add operation: " + nodeType); } page.DebugValidate(_llt, State.RootPageNumber); } if (overFlowPos != null) { return(overFlowPos); } return(dataPos); }
private byte *SplitPageInHalf(TreePage rightPage) { bool toRight; var currentIndex = _page.LastSearchPosition; var splitIndex = _page.NumberOfEntries / 2; if (currentIndex <= splitIndex) { toRight = false; } else { toRight = true; var leftPageEntryCount = splitIndex; var rightPageEntryCount = _page.NumberOfEntries - leftPageEntryCount + 1; if (rightPageEntryCount > leftPageEntryCount) { splitIndex++; Debug.Assert(splitIndex < _page.NumberOfEntries); } } if (_page.IsLeaf) { splitIndex = AdjustSplitPosition(currentIndex, splitIndex, ref toRight); } var currentKey = _page.GetNodeKey(_tx, splitIndex); Slice seperatorKey; if (toRight && splitIndex == currentIndex) { seperatorKey = SliceComparer.Compare(currentKey, _newKey) < 0 ? currentKey : _newKey; } else { seperatorKey = currentKey; } TreePage parentOfRight; AddSeparatorToParentPage(rightPage.PageNumber, seperatorKey, out parentOfRight); var parentOfPage = _cursor.CurrentPage; bool addedAsImplicitRef = false; if (_page.IsBranch && toRight && SliceComparer.EqualsInline(seperatorKey, _newKey)) { // _newKey needs to be inserted as first key (BeforeAllKeys) to the right page, so we need to add it before we move entries from the current page AddNodeToPage(rightPage, 0, Slices.BeforeAllKeys); addedAsImplicitRef = true; } // move the actual entries from page to right page var instance = new Slice(); ushort nKeys = _page.NumberOfEntries; for (int i = splitIndex; i < nKeys; i++) { TreeNodeHeader *node = _page.GetNode(i); if (_page.IsBranch && rightPage.NumberOfEntries == 0) { rightPage.CopyNodeDataToEndOfPage(node, Slices.BeforeAllKeys); } else { instance = TreeNodeHeader.ToSlicePtr(_tx.Allocator, node); rightPage.CopyNodeDataToEndOfPage(node, instance); } } _page.Truncate(_tx, splitIndex); byte *pos; if (addedAsImplicitRef == false) { try { if (toRight && _cursor.CurrentPage.PageNumber != parentOfRight.PageNumber) { // modify the cursor if we are going to insert to the right page _cursor.Pop(); _cursor.Push(parentOfRight); } // actually insert the new key pos = toRight ? InsertNewKey(rightPage) : InsertNewKey(_page); } catch (InvalidOperationException e) { if (e.Message.StartsWith("The page is full and cannot add an entry", StringComparison.Ordinal) == false) { throw; } throw new InvalidOperationException(GatherDetailedDebugInfo(rightPage, currentKey, seperatorKey, currentIndex, splitIndex, toRight), e); } } else { pos = null; _cursor.Push(rightPage); } if (_page.IsBranch) // remove a branch that has only one entry, the page ref needs to be added to the parent of the current page { Debug.Assert(_page.NumberOfEntries > 0); Debug.Assert(rightPage.NumberOfEntries > 0); if (_page.NumberOfEntries == 1) { RemoveBranchWithOneEntry(_page, parentOfPage); } if (rightPage.NumberOfEntries == 1) { RemoveBranchWithOneEntry(rightPage, parentOfRight); } } return(pos); }
public DirectAddScope DirectAdd(Slice key, int len, TreeNodeFlags nodeType, out byte *ptr) { if (_llt.Flags == TransactionFlags.ReadWrite) { State.IsModified = true; } else { ThreadCannotAddInReadTx(); } if (AbstractPager.IsKeySizeValid(key.Size) == false) { ThrowInvalidKeySize(key); } var foundPage = FindPageFor(key, node: out TreeNodeHeader * node, cursor: out TreeCursorConstructor cursorConstructor, allowCompressed: true); var page = ModifyPage(foundPage); bool?shouldGoToOverflowPage = null; if (page.LastMatch == 0) // this is an update operation { if ((nodeType & TreeNodeFlags.NewOnly) == TreeNodeFlags.NewOnly) { ThrowConcurrencyException(); } node = page.GetNode(page.LastSearchPosition); #if DEBUG using (TreeNodeHeader.ToSlicePtr(_llt.Allocator, node, out Slice nodeCheck)) { Debug.Assert(SliceComparer.EqualsInline(nodeCheck, key)); } #endif shouldGoToOverflowPage = ShouldGoToOverflowPage(len); byte *pos; if (shouldGoToOverflowPage == false) { // optimization for Data and MultiValuePageRef - try to overwrite existing node space if (TryOverwriteDataOrMultiValuePageRefNode(node, len, nodeType, out pos)) { ptr = pos; return(new DirectAddScope(this)); } } else { // optimization for PageRef - try to overwrite existing overflows if (TryOverwriteOverflowPages(node, len, out pos)) { ptr = pos; return(new DirectAddScope(this)); } } RemoveLeafNode(page); } else // new item should be recorded { State.NumberOfEntries++; } nodeType &= ~TreeNodeFlags.NewOnly; Debug.Assert(nodeType == TreeNodeFlags.Data || nodeType == TreeNodeFlags.MultiValuePageRef); var lastSearchPosition = page.LastSearchPosition; // searching for overflow pages might change this byte *overFlowPos = null; var pageNumber = -1L; if (shouldGoToOverflowPage ?? ShouldGoToOverflowPage(len)) { pageNumber = WriteToOverflowPages(len, out overFlowPos); len = -1; nodeType = TreeNodeFlags.PageRef; } byte *dataPos; if (page.HasSpaceFor(_llt, key, len) == false) { if (IsLeafCompressionSupported == false || TryCompressPageNodes(key, len, page) == false) { using (var cursor = cursorConstructor.Build(key)) { cursor.Update(cursor.Pages, page); var pageSplitter = new TreePageSplitter(_llt, this, key, len, pageNumber, nodeType, cursor); dataPos = pageSplitter.Execute(); } DebugValidateTree(State.RootPageNumber); ptr = overFlowPos == null ? dataPos : overFlowPos; return(new DirectAddScope(this)); } // existing values compressed and put at the end of the page, let's insert from Upper position lastSearchPosition = 0; } switch (nodeType) { case TreeNodeFlags.PageRef: dataPos = page.AddPageRefNode(lastSearchPosition, key, pageNumber); break; case TreeNodeFlags.Data: dataPos = page.AddDataNode(lastSearchPosition, key, len); break; case TreeNodeFlags.MultiValuePageRef: dataPos = page.AddMultiValueNode(lastSearchPosition, key, len); break; default: ThrowUnknownNodeTypeAddOperation(nodeType); dataPos = null; // never executed break; } page.DebugValidate(this, State.RootPageNumber); ptr = overFlowPos == null ? dataPos : overFlowPos; return(new DirectAddScope(this)); }