private static string GetBranchNodeString(int i, MemorySlice key, Page p, NodeHeader *node) { string keyStr; if (i == 0 && key.KeyLength == 0) { key = p.GetNodeKey(1); keyStr = "(lt " + key + ")"; } else { key = p.GetNodeKey(node); keyStr = key.ToString(); } return(MaxString(keyStr, 25)); }
private static unsafe string GetBranchNodeString(int i, Slice key, Page p, NodeHeader *node) { string keyStr; if (i == 0 && key.Size == 0) { key.Set(p.GetNode(1)); keyStr = "(lt " + key + ")"; } else { key.Set(node); keyStr = key.ToString(); } return(MaxString(keyStr, 25)); }
/// <summary> /// Internal method that is used when splitting pages /// No need to do any work here, we are always adding at the end /// </summary> internal void CopyNodeDataToEndOfPage(NodeHeader *other, MemorySlice key) { var index = NumberOfEntries; Debug.Assert(HasSpaceFor(SizeOf.NodeEntryWithAnotherKey(other, key) + Constants.NodeOffsetSize + SizeOf.NewPrefix(key))); var nodeSize = SizeOf.NodeEntryWithAnotherKey(other, key); Debug.Assert(IsBranch == false || index != 0 || key.KeyLength == 0); // branch page's first item must be the implicit ref var nodeVersion = other->Version; // every time new node is allocated the version is increased, but in this case we do not want to increase it if (nodeVersion > 0) { nodeVersion -= 1; } var prefixedKey = key as PrefixedSlice; if (prefixedKey != null && prefixedKey.NewPrefix != null) { WritePrefix(prefixedKey.NewPrefix, prefixedKey.Header.PrefixId); } var newNode = AllocateNewNode(index, nodeSize, nodeVersion); newNode->KeySize = key.Size; newNode->Flags = other->Flags; if (key.Options == SliceOptions.Key && key.Size > 0) { key.CopyTo((byte *)newNode + Constants.NodeHeaderSize); } if (IsBranch || other->Flags == (NodeFlags.PageRef)) { newNode->PageNumber = other->PageNumber; newNode->Flags = NodeFlags.PageRef; return; } newNode->DataSize = other->DataSize; MemoryUtils.Copy((byte *)newNode + Constants.NodeHeaderSize + key.Size, (byte *)other + Constants.NodeHeaderSize + other->KeySize, other->DataSize); }
private Tree OpenOrCreateMultiValueTree(Transaction tx, Slice key, NodeHeader *item) { var childTreeHeader = (TreeRootHeader *)((byte *)item + item->KeySize + Constants.NodeHeaderSize); Tree tree; if (tx.TryGetMultiValueTree(this, key, out tree)) { return(tree); } tree = childTreeHeader != null? Open(tx, _cmp, childTreeHeader) : Create(tx, _cmp); tx.AddMultiValueTree(this, key, tree); return(tree); }
public void SetNodeKey(NodeHeader *node, ref MemorySlice sliceInstance) { if (KeysPrefixed == false) { sliceInstance.Set(node); return; } if (node->KeySize == 0) { sliceInstance = PrefixedSlice.Empty; return; } PrefixedSlice prefixedSlice; if (sliceInstance != null && sliceInstance != PrefixedSlice.Empty) { sliceInstance.Set(node); prefixedSlice = (PrefixedSlice)sliceInstance; } else { sliceInstance = prefixedSlice = new PrefixedSlice(node); } if (prefixedSlice.Header.PrefixId == PrefixedSlice.NonPrefixedId) { Debug.Assert(prefixedSlice.Header.PrefixUsage == 0); return; } Debug.Assert(prefixedSlice.Header.PrefixId < PrefixCount); if (prefixedSlice.Prefix == null) { prefixedSlice.Prefix = new PrefixNode(); } AssertPrefixNode(prefixedSlice.Header.PrefixId); prefixedSlice.Prefix.Set(_base + _prefixSection->PrefixOffsets[prefixedSlice.Header.PrefixId], PageNumber); }
private bool SetupNextSection(Transaction tx, int num, Slice key) { if (_writingFreSpace) { return(false); // can't find next section when we are already writing a section } NodeHeader *current = null; bool hasMatch = key != null && TryFindSection(tx, num, key, key, null, out current); if (hasMatch == false) // wrap to the beginning { if (TryFindSection(tx, num, key, _sectionsPrefix, key, out current) == false) { return(false); } } Debug.Assert(current != null); _currentKey = new Slice(current).Clone(); _current = new Section { Key = _currentKey, Sequences = new ConsecutiveSequences(), }; using (var stream = NodeHeader.Stream(tx, current)) using (var reader = new BinaryReader(stream)) { _current.Id = reader.ReadInt64(); var largestSeq = reader.ReadInt32(); Debug.Assert(largestSeq >= num); var pageCount = reader.ReadInt32(); for (var i = 1; i < pageCount; i++) { _current.Sequences.Add(reader.ReadInt64()); } } Debug.Assert(_currentKey != null); _currentChanged = false; return(true); }
public PrefixedSlice(NodeHeader *node) { if (node->KeySize > 0) { var prefixHeaderPtr = (PrefixedSliceHeader *)((byte *)node + Constants.NodeHeaderSize); Header = *prefixHeaderPtr; NonPrefixedData = new Slice((byte *)prefixHeaderPtr + Constants.PrefixedSliceHeaderSize, Header.NonPrefixedDataSize); Size = node->KeySize; KeyLength = (ushort)(Header.PrefixUsage + Header.NonPrefixedDataSize); } else { Size = 0; KeyLength = 0; } Options = SliceOptions.Key; }
public override void Set(NodeHeader *node) { Debug.Assert(this != Empty, "Cannot call Set() on PrefixedSlice.Empty"); if (node->KeySize > 0) { var prefixHeaderPtr = (PrefixedSliceHeader *)((byte *)node + Constants.NodeHeaderSize); Header = *prefixHeaderPtr; NonPrefixedData.Set((byte *)prefixHeaderPtr + Constants.PrefixedSliceHeaderSize, Header.NonPrefixedDataSize); Size = node->KeySize; KeyLength = (ushort)(Header.PrefixUsage + Header.NonPrefixedDataSize); } else { Size = 0; KeyLength = 0; } }
internal Tree OpenOrCreateMultiValueTree(Transaction tx, Slice key, NodeHeader *item) { Tree tree; if (tx.TryGetMultiValueTree(this, key, out tree)) { return(tree); } var childTreeHeader = (TreeRootHeader *)((byte *)item + item->KeySize + Constants.NodeHeaderSize); Debug.Assert(childTreeHeader->RootPageNumber < tx.State.NextPageNumber); tree = childTreeHeader != null? Open(tx, _cmp, childTreeHeader) : Create(tx, _cmp); tx.AddMultiValueTree(this, key, tree); return(tree); }
public unsafe static bool ValidateCurrentKey(this IIterator self, NodeHeader *node, SliceComparer cmp) { if (self.RequiredPrefix != null) { var currentKey = new Slice(node); if (currentKey.StartsWith(self.RequiredPrefix, cmp) == false) { return(false); } } if (self.MaxKey != null) { var currentKey = new Slice(node); if (currentKey.Compare(self.MaxKey, cmp) >= 0) { return(false); } } return(true); }
private Tree OpenMultiValueTree(Transaction tx, MemorySlice key, NodeHeader *item) { Tree tree; if (tx.TryGetMultiValueTree(this, key, out tree)) { return(tree); } var childTreeHeader = (TreeRootHeader *)((byte *)item + item->KeySize + Constants.NodeHeaderSize); Debug.Assert(childTreeHeader->RootPageNumber < tx.State.NextPageNumber); Debug.Assert(childTreeHeader->Flags == TreeFlags.MultiValue); tree = Open(tx, childTreeHeader); tx.AddMultiValueTree(this, key, tree); return(tree); }
public unsafe static bool ValidateCurrentKey(this IIterator self, NodeHeader *node, Page page) { if (self.RequiredPrefix != null) { var currentKey = page.GetNodeKey(node); if (currentKey.StartsWith(self.RequiredPrefix) == false) { return(false); } } if (self.MaxKey != null) { var currentKey = page.GetNodeKey(node); if (currentKey.Compare(self.MaxKey) >= 0) { return(false); } } return(true); }
private bool TryOverwriteOverflowPages(TreeMutableState treeState, NodeHeader *updatedNode, MemorySlice key, int len, ushort?version, out byte *pos) { if (updatedNode->Flags == NodeFlags.PageRef && _tx.Id <= _tx.Environment.OldestTransaction) // ensure MVCC - do not overwrite if there is some older active transaction that might read those overflows { var overflowPage = _tx.GetReadOnlyPage(updatedNode->PageNumber); if (len <= overflowPage.OverflowSize) { CheckConcurrency(key, version, updatedNode->Version, TreeActionType.Add); if (updatedNode->Version == ushort.MaxValue) { updatedNode->Version = 0; } updatedNode->Version++; var availableOverflows = _tx.DataPager.GetNumberOfOverflowPages(overflowPage.OverflowSize); var requestedOverflows = _tx.DataPager.GetNumberOfOverflowPages(len); var overflowsToFree = availableOverflows - requestedOverflows; for (int i = 0; i < overflowsToFree; i++) { _tx.FreePage(overflowPage.PageNumber + requestedOverflows + i); } treeState.OverflowPages -= overflowsToFree; treeState.PageCount -= overflowsToFree; overflowPage.OverflowSize = len; pos = overflowPage.Base + Constants.PageHeaderSize; return(true); } } pos = null; return(false); }
/// <summary> /// Internal method that is used when splitting pages /// No need to do any work here, we are always adding at the end /// </summary> internal void CopyNodeDataToEndOfPage(NodeHeader *other, Slice key = null) { var nodeKey = key ?? new Slice(other); Debug.Assert(HasSpaceFor(SizeOf.NodeEntryWithAnotherKey(other, nodeKey) + Constants.NodeOffsetSize)); var index = NumberOfEntries; var nodeSize = SizeOf.NodeEntryWithAnotherKey(other, nodeKey); if (other->KeySize == 0 && key == null) // when copy first item from branch which is implicit ref { nodeSize += nodeKey.Size; } Debug.Assert(IsBranch == false || index != 0 || nodeKey.Size == 0); // branch page's first item must be the implicit ref var nodeVersion = other->Version; // every time new node is allocated the version is increased, but in this case we do not want to increase it if (nodeVersion > 0) { nodeVersion -= 1; } var newNode = AllocateNewNode(index, nodeKey, nodeSize, nodeVersion); newNode->Flags = other->Flags; nodeKey.CopyTo((byte *)newNode + Constants.NodeHeaderSize); if (IsBranch || other->Flags == (NodeFlags.PageRef)) { newNode->PageNumber = other->PageNumber; newNode->Flags = NodeFlags.PageRef; return; } newNode->DataSize = other->DataSize; NativeMethods.memcpy((byte *)newNode + Constants.NodeHeaderSize + other->KeySize, (byte *)other + Constants.NodeHeaderSize + other->KeySize, other->DataSize); }
/// <summary> /// Attempts to create the <see cref="EventWaitHandle"/> handles and initialise the node header and buffers. /// </summary> /// <returns>True if the events and nodes were initialised successfully.</returns> protected override bool DoOpen() { // Create signal events #if Linux if (IsOwnerOfSharedMemory) { DataExists = PosixSemaphore.Create(Name + "_evt_dataexists"); NodeAvailable = PosixSemaphore.Create(Name + "_evt_nodeavail"); } else { DataExists = PosixSemaphore.Open(Name + "_evt_dataexists"); NodeAvailable = PosixSemaphore.Open(Name + "_evt_nodeavail"); } #else DataExists = new EventWaitHandle(false, EventResetMode.AutoReset, Name + "_evt_dataexists"); NodeAvailable = new EventWaitHandle(false, EventResetMode.AutoReset, Name + "_evt_nodeavail"); #endif if (IsOwnerOfSharedMemory) { // Retrieve pointer to node header _nodeHeader = (NodeHeader *)(BufferStartPtr + NodeHeaderOffset); // Initialise the node header InitialiseNodeHeader(); // Initialise nodes entries InitialiseLinkedListNodes(); } else { // Load the NodeHeader _nodeHeader = (NodeHeader *)(BufferStartPtr + NodeHeaderOffset); NodeCount = _nodeHeader->NodeCount; NodeBufferSize = _nodeHeader->NodeBufferSize; } return(true); }
private bool TryOverwriteOverflowPages(NodeHeader *updatedNode, MemorySlice key, int len, ushort?version, out byte *pos) { if (updatedNode->Flags == NodeFlags.PageRef) { var readOnlyOverflowPage = _tx.GetReadOnlyPage(updatedNode->PageNumber); if (len <= readOnlyOverflowPage.OverflowSize) { CheckConcurrency(key, version, updatedNode->Version, TreeActionType.Add); if (updatedNode->Version == ushort.MaxValue) { updatedNode->Version = 0; } updatedNode->Version++; var availableOverflows = _tx.DataPager.GetNumberOfOverflowPages(readOnlyOverflowPage.OverflowSize); var requestedOverflows = _tx.DataPager.GetNumberOfOverflowPages(len); var overflowsToFree = availableOverflows - requestedOverflows; for (int i = 0; i < overflowsToFree; i++) { _tx.FreePage(readOnlyOverflowPage.PageNumber + requestedOverflows + i); } State.RecordFreedPage(readOnlyOverflowPage, overflowsToFree); var writtableOverflowPage = _tx.AllocatePage(requestedOverflows, PageFlags.Overflow, updatedNode->PageNumber); writtableOverflowPage.OverflowSize = len; pos = writtableOverflowPage.Base + Constants.PageHeaderSize; return(true); } } pos = null; return(false); }
private bool TryOverwriteDataOrMultiValuePageRefNode(NodeHeader *updatedNode, MemorySlice key, int len, NodeFlags requestedNodeType, ushort?version, out byte *pos) { switch (requestedNodeType) { case NodeFlags.Data: case NodeFlags.MultiValuePageRef: { if (updatedNode->DataSize == len && (updatedNode->Flags == NodeFlags.Data || updatedNode->Flags == NodeFlags.MultiValuePageRef)) { CheckConcurrency(key, version, updatedNode->Version, TreeActionType.Add); if (updatedNode->Version == ushort.MaxValue) { updatedNode->Version = 0; } updatedNode->Version++; updatedNode->Flags = requestedNodeType; { pos = (byte *)updatedNode + Constants.NodeHeaderSize + updatedNode->KeySize; return(true); } } break; } case NodeFlags.PageRef: throw new InvalidOperationException("We never add PageRef explicitly"); default: throw new ArgumentOutOfRangeException(); } pos = null; return(false); }
private void RemoveBranchWithOneEntry(Page page, Page parentPage) { var pageRefNumber = page.GetNode(0)->PageNumber; NodeHeader *nodeHeader = null; for (int i = 0; i < parentPage.NumberOfEntries; i++) { nodeHeader = parentPage.GetNode(i); if (nodeHeader->PageNumber == page.PageNumber) { break; } } Debug.Assert(nodeHeader->PageNumber == page.PageNumber); nodeHeader->PageNumber = pageRefNumber; _tree.FreePage(page); }
private bool TryUseRecentTransactionPage(MemorySlice key, out Lazy <Cursor> cursor, out Page page, out NodeHeader *node) { node = null; page = null; cursor = null; var recentPages = RecentlyFoundPages; if (recentPages == null) { return(false); } var foundPage = recentPages.Find(key); if (foundPage == null) { return(false); } var lastFoundPageNumber = foundPage.Number; if (foundPage.Page != null) { // we can't share the same instance, Page instance may be modified by // concurrently run iterators page = new Page(foundPage.Page.Base, foundPage.Page.Source, foundPage.Page.PageSize); } else { page = _tx.GetReadOnlyPage(lastFoundPageNumber); } if (page.IsLeaf == false) { throw new DataException("Index points to a non leaf page"); } node = page.Search(key); // will set the LastSearchPosition var cursorPath = foundPage.CursorPath; var pageCopy = page; cursor = new Lazy <Cursor>(() => { var c = new Cursor(); foreach (var p in cursorPath) { if (p == lastFoundPageNumber) { c.Push(pageCopy); } else { var cursorPage = _tx.GetReadOnlyPage(p); if (key.Options == SliceOptions.BeforeAllKeys) { cursorPage.LastSearchPosition = 0; } else if (key.Options == SliceOptions.AfterAllKeys) { cursorPage.LastSearchPosition = (ushort)(cursorPage.NumberOfEntries - 1); } else if (cursorPage.Search(key) != null) { if (cursorPage.LastMatch != 0) { cursorPage.LastSearchPosition--; } } c.Push(cursorPage); } } return(c); }); return(true); }
private Page SearchForPage(MemorySlice key, out Lazy <Cursor> cursor, out NodeHeader *node) { var p = _tx.GetReadOnlyPage(State.RootPageNumber); var c = new Cursor(); c.Push(p); bool rightmostPage = true; bool leftmostPage = true; while ((p.Flags & PageFlags.Branch) == PageFlags.Branch) { int nodePos; if (key.Options == SliceOptions.BeforeAllKeys) { p.LastSearchPosition = nodePos = 0; rightmostPage = false; } else if (key.Options == SliceOptions.AfterAllKeys) { p.LastSearchPosition = nodePos = (ushort)(p.NumberOfEntries - 1); leftmostPage = false; } else { if (p.Search(key) != null) { nodePos = p.LastSearchPosition; if (p.LastMatch != 0) { nodePos--; p.LastSearchPosition--; } if (nodePos != 0) { leftmostPage = false; } rightmostPage = false; } else { nodePos = (ushort)(p.LastSearchPosition - 1); leftmostPage = false; } } var pageNode = p.GetNode(nodePos); p = _tx.GetReadOnlyPage(pageNode->PageNumber); Debug.Assert(pageNode->PageNumber == p.PageNumber, string.Format("Requested Page: #{0}. Got Page: #{1}", pageNode->PageNumber, p.PageNumber)); c.Push(p); } if (p.IsLeaf == false) { throw new DataException("Index points to a non leaf page"); } node = p.Search(key); // will set the LastSearchPosition AddToRecentlyFoundPages(c, p, leftmostPage, rightmostPage); cursor = new Lazy <Cursor>(() => c); return(p); }
/// <summary> /// For leaf pages, check the split point based on what /// fits where, since otherwise adding the node can fail. /// This check is only needed when the data items are /// relatively large, such that being off by one will /// make the difference between success or failure. /// It's also relevant if a page happens to be laid out /// such that one half of its nodes are all "small" and /// the other half of its nodes are "large." If the new /// item is also "large" and falls on the half with /// "large" nodes, it also may not fit. /// </summary> private int AdjustSplitPosition(int currentIndex, int splitIndex, ref bool newPosition) { MemorySlice keyToInsert; if (_tree.KeysPrefixing) { keyToInsert = new PrefixedSlice(_newKey); // let's assume that _newkey won't be prefixed to ensure the destination page will have enough space } else { keyToInsert = _newKey; } int nodeSize = SizeOf.NodeEntry(AbstractPager.PageMaxSpace, keyToInsert, _len) + Constants.NodeOffsetSize; if (_page.NumberOfEntries >= 20 && nodeSize <= AbstractPager.PageMaxSpace / 16) { return(splitIndex); } int pageSize = nodeSize; if (_tree.KeysPrefixing) { pageSize += (Constants.PrefixNodeHeaderSize + 1); // let's assume that prefix will be created to ensure the destination page will have enough space, + 1 because prefix node might require 2-byte alignment } if (currentIndex <= splitIndex) { newPosition = false; for (int i = 0; i < splitIndex; i++) { NodeHeader *node = _page.GetNode(i); pageSize += node->GetNodeSize(); pageSize += pageSize & 1; if (pageSize > AbstractPager.PageMaxSpace) { if (i <= currentIndex) { if (i < currentIndex) { newPosition = true; } return(currentIndex); } return((ushort)i); } } } else { for (int i = _page.NumberOfEntries - 1; i >= splitIndex; i--) { NodeHeader *node = _page.GetNode(i); pageSize += node->GetNodeSize(); pageSize += pageSize & 1; if (pageSize > AbstractPager.PageMaxSpace) { if (i >= currentIndex) { newPosition = false; return(currentIndex); } return((ushort)(i + 1)); } } } return(splitIndex); }
public abstract void Set(NodeHeader *node);
private bool TryFindSection(Transaction tx, int minSeq, Slice currentKey, Slice start, Slice end, out NodeHeader *current) { int minFreeSpace = _minimumFreePagesInSectionSet ? _minimumFreePagesInSection : Math.Min(256, _lastTransactionPageUsage); current = null; int currentMax = 0; using (var it = _env.FreeSpaceRoot.Iterate(tx)) { it.RequiredPrefix = _sectionsPrefix; it.MaxKey = end; if (it.Seek(start) == false) { return(false); } int triesAfterFindingSuitable = 256; do { if (_recordsToSkip.Exists(x => x.Compare(it.CurrentKey, _env.SliceComparer) == 0)) { continue; // if it is marked in memory for either update / delete, we don't want it } if (current != null) { triesAfterFindingSuitable--; } if (currentKey != null) { if (_currentKey.Compare(it.CurrentKey, _env.SliceComparer) == 0) { continue; // skip current one } } using (var stream = it.CreateStreamForCurrent()) using (var reader = new BinaryReader(stream)) { stream.Position = sizeof(long); var largestSeq = reader.ReadInt32(); if (largestSeq < minSeq) { continue; } var pageCount = reader.ReadInt32(); if (pageCount < minFreeSpace || pageCount < currentMax) { continue; } current = it.Current; currentMax = pageCount; } } while (it.MoveNext() && triesAfterFindingSuitable >= 0); } return(current != null); }
private byte *SplitPageInHalf(Page rightPage) { bool toRight; var currentIndex = _page.LastSearchPosition; var splitIndex = _page.NumberOfEntries / 2; if (currentIndex <= splitIndex) { toRight = false; } else { toRight = true; var leftPageEntryCount = splitIndex; var rightPageEntryCount = _page.NumberOfEntries - leftPageEntryCount + 1; if (rightPageEntryCount > leftPageEntryCount) { splitIndex++; Debug.Assert(splitIndex < _page.NumberOfEntries); } } PrefixNode[] prefixes = null; if (_tree.KeysPrefixing && _page.HasPrefixes) { prefixes = _page.GetPrefixes(); } if (_page.IsLeaf || prefixes != null) { splitIndex = AdjustSplitPosition(currentIndex, splitIndex, prefixes, ref toRight); } var currentKey = _page.GetNodeKey(splitIndex); MemorySlice seperatorKey; if (toRight && splitIndex == currentIndex) { seperatorKey = currentKey.Compare(_newKey) < 0 ? currentKey : _newKey; } else { seperatorKey = currentKey; } Page parentOfRight; AddSeparatorToParentPage(rightPage.PageNumber, seperatorKey, out parentOfRight); var parentOfPage = _cursor.CurrentPage; MemorySlice instance = _page.CreateNewEmptyKey(); if (prefixes != null) { for (int i = 0; i < prefixes.Length; i++) { var prefix = prefixes[i]; rightPage.WritePrefix(new Slice(prefix.ValuePtr, prefix.PrefixLength), i); } } bool addedAsImplicitRef = false; if (_page.IsBranch && toRight && seperatorKey == _newKey) { // _newKey needs to be inserted as first key (BeforeAllKeys) to the right page, so we need to add it before we move entries from the current page AddNodeToPage(rightPage, 0, _tree.KeysPrefixing ? (MemorySlice)PrefixedSlice.BeforeAllKeys : Slice.BeforeAllKeys); addedAsImplicitRef = true; } // move the actual entries from page to right page ushort nKeys = _page.NumberOfEntries; for (int i = splitIndex; i < nKeys; i++) { NodeHeader *node = _page.GetNode(i); if (_page.IsBranch && rightPage.NumberOfEntries == 0) { rightPage.CopyNodeDataToEndOfPage(node, _tree.KeysPrefixing ? (MemorySlice)PrefixedSlice.BeforeAllKeys : Slice.BeforeAllKeys); } else { _page.SetNodeKey(node, ref instance); var key = rightPage.PrepareKeyToInsert(instance, rightPage.NumberOfEntries); rightPage.CopyNodeDataToEndOfPage(node, key); } } _page.Truncate(_tx, splitIndex); byte *pos; if (addedAsImplicitRef == false) { try { if (toRight && _cursor.CurrentPage.PageNumber != parentOfRight.PageNumber) { // modify the cursor if we are going to insert to the right page _cursor.Pop(); _cursor.Push(parentOfRight); } // actually insert the new key pos = toRight ? InsertNewKey(rightPage) : InsertNewKey(_page); } catch (InvalidOperationException e) { if (e.Message.StartsWith("The page is full and cannot add an entry") == false) { throw; } throw new InvalidOperationException(GatherDetailedDebugInfo(rightPage, currentKey, seperatorKey, currentIndex, splitIndex, toRight), e); } } else { pos = null; _cursor.Push(rightPage); } if (_page.IsBranch) // remove a branch that has only one entry, the page ref needs to be added to the parent of the current page { Debug.Assert(_page.NumberOfEntries > 0); Debug.Assert(rightPage.NumberOfEntries > 0); if (_page.NumberOfEntries == 1) { RemoveBranchWithOneEntry(_page, parentOfPage); } if (rightPage.NumberOfEntries == 1) { RemoveBranchWithOneEntry(rightPage, parentOfRight); } } return(pos); }
public override void Set(NodeHeader *node) { SetInline(this, node); }
public byte *Execute() { Page rightPage = _tree.NewPage(_page.Flags, 1); if (_cursor.PageCount == 0) // we need to do a root split { Page newRootPage = _tree.NewPage(_tree.KeysPrefixing ? PageFlags.Branch | PageFlags.KeysPrefixed : PageFlags.Branch, 1); _cursor.Push(newRootPage); _treeState.RootPageNumber = newRootPage.PageNumber; _treeState.Depth++; // now add implicit left page newRootPage.AddPageRefNode(0, _tree.KeysPrefixing ? (MemorySlice)PrefixedSlice.BeforeAllKeys : Slice.BeforeAllKeys, _page.PageNumber); _parentPage = newRootPage; _parentPage.LastSearchPosition++; } else { // we already popped the page, so the current one on the stack is the parent of the page if (_tree.Name == Constants.FreeSpaceTreeName) { // a special case for FreeSpaceTree because the allocation of a new page called above // can cause a delete of a free space section resulting in a run of the tree rebalancer // and here the parent page that exists in cursor can be outdated _parentPage = _tx.ModifyPage(_cursor.CurrentPage.PageNumber, _tree, null); // pass _null_ to make sure we'll get the most updated parent page _parentPage.LastSearchPosition = _cursor.CurrentPage.LastSearchPosition; _parentPage.LastMatch = _cursor.CurrentPage.LastMatch; } else { _parentPage = _tx.ModifyPage(_cursor.CurrentPage.PageNumber, _tree, _cursor.CurrentPage); } _cursor.Update(_cursor.Pages.First, _parentPage); } if (_page.IsLeaf) { _tree.ClearRecentFoundPages(); } if (_tree.Name == Constants.FreeSpaceTreeName) { // we need to refresh the LastSearchPosition of the split page which is used by the free space handling // because the allocation of a new page called above could remove some sections // from the page that is being split _page.NodePositionFor(_newKey); } if (_page.LastSearchPosition >= _page.NumberOfEntries) { // when we get a split at the end of the page, we take that as a hint that the user is doing // sequential inserts, at that point, we are going to keep the current page as is and create a new // page, this will allow us to do minimal amount of work to get the best density Page branchOfSeparator; byte *pos; if (_page.IsBranch) { if (_page.NumberOfEntries > 2) { // here we steal the last entry from the current page so we maintain the implicit null left entry NodeHeader *node = _page.GetNode(_page.NumberOfEntries - 1); Debug.Assert(node->Flags == NodeFlags.PageRef); rightPage.AddPageRefNode(0, _tree.KeysPrefixing ? (MemorySlice)PrefixedSlice.BeforeAllKeys : Slice.BeforeAllKeys, node->PageNumber); pos = AddNodeToPage(rightPage, 1); var separatorKey = _page.GetNodeKey(node); AddSeparatorToParentPage(rightPage.PageNumber, separatorKey, out branchOfSeparator); _page.RemoveNode(_page.NumberOfEntries - 1); } else { _tree.FreePage(rightPage); // return the unnecessary right page pos = AddSeparatorToParentPage(_pageNumber, _newKey, out branchOfSeparator); if (_cursor.CurrentPage.PageNumber != branchOfSeparator.PageNumber) { _cursor.Push(branchOfSeparator); } return(pos); } } else { AddSeparatorToParentPage(rightPage.PageNumber, _newKey, out branchOfSeparator); pos = AddNodeToPage(rightPage, 0); } _cursor.Push(rightPage); return(pos); } return(SplitPageInHalf(rightPage)); }
private int AdjustSplitPosition(int currentIndex, int splitIndex, PrefixNode[] prefixes, ref bool toRight) { MemorySlice keyToInsert; int pageSize = 0; if (_tree.KeysPrefixing) { keyToInsert = new PrefixedSlice(_newKey); // let's assume that _newkey won't match any of the existing prefixes pageSize += Constants.PrefixInfoSectionSize; pageSize += Constants.PrefixNodeHeaderSize + 1; // possible new prefix, + 1 because of possible 2-byte alignment } else { keyToInsert = _newKey; } pageSize += SizeOf.NodeEntry(AbstractPager.PageMaxSpace, keyToInsert, _len) + Constants.NodeOffsetSize; if (prefixes != null) { // we are going to copy all existing prefixes so we need to take into account their sizes for (var i = 0; i < prefixes.Length; i++) { var prefixNodeSize = Constants.PrefixNodeHeaderSize + prefixes[i].Header.PrefixLength; pageSize += prefixNodeSize + (prefixNodeSize & 1); // & 1 because we need 2-byte alignment } } if (toRight == false) { for (int i = 0; i < splitIndex; i++) { NodeHeader *node = _page.GetNode(i); pageSize += node->GetNodeSize(); pageSize += pageSize & 1; if (pageSize > AbstractPager.PageMaxSpace) { if (i <= currentIndex) { if (i < currentIndex) { toRight = true; } return(currentIndex); } return(i); } } } else { for (int i = _page.NumberOfEntries - 1; i >= splitIndex; i--) { NodeHeader *node = _page.GetNode(i); pageSize += node->GetNodeSize(); pageSize += pageSize & 1; if (pageSize > AbstractPager.PageMaxSpace) { if (i >= currentIndex) { toRight = false; return(currentIndex); } return(i + 1); } } } return(splitIndex); }
public Slice(NodeHeader *node) { Options = SliceOptions.Key; SetInline(this, node); }
private void SetNodeKey(NodeHeader *node, ref Slice slice) { Slice.SetInline(slice, node); }
internal int GetNumberOfFreePages(NodeHeader *node) { return(GetNodeDataSize(node) / Constants.PageNumberSize); }
public SingleEntryIterator(SliceComparer cmp, NodeHeader* item, Transaction tx) { _cmp = cmp; _item = item; _tx = tx; }