public static int NodeEntry(int pageMaxSpace, MemorySlice key, int len) { if (len < 0) return BranchEntry(key); return LeafEntry(pageMaxSpace, key, len); }
public FoundPage Find(MemorySlice key) { for (int i = 0; i < _cache.Length; i++) { var page = _cache[i]; if (page == null) continue; var first = page.FirstKey; var last = page.LastKey; switch (key.Options) { case SliceOptions.BeforeAllKeys: if (first.Options == SliceOptions.BeforeAllKeys) return page; break; case SliceOptions.AfterAllKeys: if (last.Options == SliceOptions.AfterAllKeys) return page; break; case SliceOptions.Key: if ((first.Options != SliceOptions.BeforeAllKeys && key.Compare(first) < 0)) continue; if (last.Options != SliceOptions.AfterAllKeys && key.Compare(last) > 0) continue; return page; default: throw new ArgumentException(key.Options.ToString()); } } return null; }
private static void ResetSections(ref int foundSections, List<Slice> sections, ref MemorySlice startSection, ref long? startSectionId) { foundSections = 0; startSection = null; startSectionId = null; sections.Clear(); }
public byte* AddSeparator(MemorySlice separator, long pageRefNumber, int? nodePos = null) { var originalLastSearchPositionOfParent = _parentPage.LastSearchPosition; if (nodePos == null) nodePos = _parentPage.NodePositionFor(separator); // select the appropriate place for this var separatorKeyToInsert = _parentPage.PrepareKeyToInsert(separator, nodePos.Value); if (_parentPage.HasSpaceFor(_tx, SizeOf.BranchEntry(separatorKeyToInsert) + Constants.NodeOffsetSize + SizeOf.NewPrefix(separatorKeyToInsert)) == false) { var pageSplitter = new PageSplitter(_tx, _tree, separator, -1, pageRefNumber, NodeFlags.PageRef, 0, _cursor, _tree.State); var posToInsert = pageSplitter.Execute(); ParentOfAddedPageRef = _cursor.CurrentPage; var adjustParentPageOnCursor = true; for (int i = 0; i < _cursor.CurrentPage.NumberOfEntries; i++) { if (_cursor.CurrentPage.GetNode(i)->PageNumber == _currentPage.PageNumber) { adjustParentPageOnCursor = false; _cursor.CurrentPage.LastSearchPosition = i; break; } } if (adjustParentPageOnCursor) { // the above page split has modified the cursor that its first page points to the parent of the leaf where 'separatorKey' was inserted // and it doesn't have the reference to _page, we need to ensure that the actual parent is first at the cursor _cursor.Pop(); _cursor.Push(_parentPage); EnsureValidLastSearchPosition(_parentPage, _currentPage.PageNumber, originalLastSearchPositionOfParent); } #if VALIDATE Debug.Assert(_cursor.CurrentPage.GetNode(_cursor.CurrentPage.LastSearchPosition)->PageNumber == _currentPage.PageNumber, "The parent page is not referencing a page which is being split"); var parentToValidate = ParentOfAddedPageRef; Debug.Assert(Enumerable.Range(0, parentToValidate.NumberOfEntries).Any(i => parentToValidate.GetNode(i)->PageNumber == pageRefNumber), "The parent page of a page reference isn't referencing it"); #endif return posToInsert; } ParentOfAddedPageRef = _parentPage; var pos = _parentPage.AddPageRefNode(nodePos.Value, separatorKeyToInsert, pageRefNumber); EnsureValidLastSearchPosition(_parentPage, _currentPage.PageNumber, originalLastSearchPositionOfParent); return pos; }
public FoundPage(long number, Page page, MemorySlice firstKey, MemorySlice lastKey, long[] cursorPath) { Number = number; Page = page; FirstKey = firstKey; LastKey = lastKey; CursorPath = cursorPath; }
public static int NewPrefix(MemorySlice key) { var prefixedKey = key as PrefixedSlice; if (prefixedKey != null && prefixedKey.NewPrefix != null) // also need to take into account the size of a new prefix that will be written to the page return NewPrefix(prefixedKey); return 0; }
public TreeIterator(Tree tree, Transaction tx) { _tree = tree; _tx = tx; if (tree.KeysPrefixing) _currentInternalKey = new PrefixedSlice(SliceOptions.Key); else _currentInternalKey = new Slice(SliceOptions.Key); }
public static int NodeEntryWithAnotherKey(NodeHeader* other, MemorySlice key) { var keySize = key == null ? other->KeySize : key.Size; var sz = keySize + Constants.NodeHeaderSize; if (other->Flags == NodeFlags.Data || other->Flags == NodeFlags.MultiValuePageRef) sz += other->DataSize; sz += sz & 1; return sz; }
internal bool TryRemoveMultiValueTree(Tree parentTree, MemorySlice key) { var keyToRemove = Tuple.Create(parentTree, key); if (_multiValueTrees == null || !_multiValueTrees.ContainsKey(keyToRemove)) { return(false); } return(_multiValueTrees.Remove(keyToRemove)); }
internal Page FindPageFor(MemorySlice key, out NodeHeader *node, out Lazy <Cursor> cursor) { Page p; if (TryUseRecentTransactionPage(key, out cursor, out p, out node)) { return(p); } return(SearchForPage(key, ref cursor, ref node)); }
public void GetOrAddValueFactoryIsCalledOnyWhenAdded() { try { CreateHashTable(); int callCount = 0; int addedCount = 0; bool start = false; Guid addedGuid = Guid.Empty; var seven = 7; var sevenSlice = new MemorySlice(&seven, sizeof(int)); Action action = () => { var guidToAdd = Guid.NewGuid(); var memorySliceToAdd = new MemorySlice(&guidToAdd, sizeof(Guid)); Func <MemorySlice, MemorySlice> valueFactory = (key) => { Interlocked.Increment(ref callCount); return(memorySliceToAdd); }; while (Volatile.Read(ref start) == false) { ; } var existingOrAdded = hashTable.GetOrAdd(sevenSlice, valueFactory); if (existingOrAdded.SequenceEquals(memorySliceToAdd)) { Interlocked.Increment(ref addedCount); addedGuid = guidToAdd; } }; var tasks = new Task[8]; for (int i = 0; i < tasks.Length; i++) { tasks[i] = Task.Factory.StartNew(action, TaskCreationOptions.LongRunning); } Thread.Sleep(100); start = true; Task.WaitAll(tasks); Assert.Equal(1, callCount); Assert.Equal(1, addedCount); Assert.Equal(1, hashTable.Count); Assert.Single(hashTable); Assert.True(hashTable.ContainsKey(sevenSlice)); Guid addedGuidCopy = addedGuid; Assert.True(hashTable[sevenSlice].SequenceEquals(new MemorySlice(&addedGuidCopy, sizeof(Guid)))); } finally { DeleteHashTable(); } }
public byte *AddDataNode(int index, MemorySlice key, int dataSize, ushort previousNodeVersion) { Debug.Assert(dataSize >= 0); Debug.Assert(key.Options == SliceOptions.Key); var node = CreateNode(index, key, NodeFlags.Data, dataSize, previousNodeVersion); node->DataSize = dataSize; return((byte *)node + Constants.NodeHeaderSize + key.Size); }
public NodeHeader *Search(MemorySlice key) { if (KeysPrefixed) { return(SearchPrefixed(key)); } else { return(Search((Slice)key)); } }
public byte *AddMultiValueNode(int index, MemorySlice key, int dataSize, ushort previousNodeVersion) { Debug.Assert(dataSize == sizeof(TreeRootHeader)); Debug.Assert(key.Options == SliceOptions.Key); var node = CreateNode(index, key, NodeFlags.MultiValuePageRef, dataSize, previousNodeVersion); node->DataSize = dataSize; return((byte *)node + Constants.NodeHeaderSize + key.Size); }
public static int NewPrefix(MemorySlice key) { var prefixedKey = key as PrefixedSlice; if (prefixedKey != null && prefixedKey.NewPrefix != null) // also need to take into account the size of a new prefix that will be written to the page { return(NewPrefix(prefixedKey)); } return(0); }
public void AddOrUpdateShouldWorkAsExpected() { int callCount = 0; int addedCount = 0; bool start = false; using (var store = CreateStore(56)) using (var hashTable = store.GetConcurrentHashTable()) { Action action = () => { long valueToReturn; var memorySliceToReturn = new MemorySlice(&valueToReturn, sizeof(long)); MemorySlice updateValueFactory(long key, MemorySlice value) { Interlocked.Increment(ref callCount); *(long *)memorySliceToReturn.Pointer = *(long *)value.Pointer + 1; return(memorySliceToReturn); } while (Volatile.Read(ref start) == false) { ; } long addValue = 0; var memorySliceToAdd = new MemorySlice(&addValue, sizeof(long)); var newValue = hashTable.AddOrUpdate(7, memorySliceToAdd, updateValueFactory); if (*(long *)newValue.Pointer == 0) { Interlocked.Increment(ref addedCount); } }; var tasks = new Task[8]; for (int i = 0; i < tasks.Length; i++) { tasks[i] = Task.Factory.StartNew(action, TaskCreationOptions.LongRunning); } Thread.Sleep(100); start = true; Task.WaitAll(tasks); Assert.Equal(7, callCount); Assert.Equal(1, addedCount); Assert.Equal(1, hashTable.Count); Assert.Single(hashTable); Assert.True(hashTable.ContainsKey(7)); Assert.Equal(7, *(long *)(hashTable[7].Pointer)); } }
public static int NewPrefix(MemorySlice key) { var prefixedKey = key as PrefixedSlice; if (prefixedKey != null && prefixedKey.NewPrefix != null) // also need to take into account the size of a new prefix that will be written to the page { var size = Constants.PrefixNodeHeaderSize + prefixedKey.NewPrefix.Size; size += size & 1; return(size); } return(0); }
public static int NewPrefix(MemorySlice key) { var prefixedKey = key as PrefixedSlice; if (prefixedKey != null && prefixedKey.NewPrefix != null) // also need to take into account the size of a new prefix that will be written to the page { var size = Constants.PrefixNodeHeaderSize + prefixedKey.NewPrefix.Size; size += size & 1; return size; } return 0; }
public OptionalHeaderStandardFields(ExceptionCollector collector, MemorySlice slice) : base(slice) { if (Slice.Count > Data.Header.OptionalHeaderStandardFields.PeOptionalHeaderLength) { collector.Add(new OptionalHeaderStandardFieldsException(ExceptionLevel.Critical, "The PE Optional Headers Standard Fields is too long.")); } if (Slice.Count < Data.Header.OptionalHeaderStandardFields.PeOptionalHeaderLength) { collector.Add(new OptionalHeaderStandardFieldsException(ExceptionLevel.Critical, "The PE Optional Headers Standard Fields is too short.")); } Validate(collector); }
public TreeIterator(Tree tree, Transaction tx) { _tree = tree; _tx = tx; if (tree.KeysPrefixing) { _currentInternalKey = new PrefixedSlice(SliceOptions.Key); } else { _currentInternalKey = new Slice(SliceOptions.Key); } }
private byte *AddSeparatorToParentPage(long pageNumber, MemorySlice seperatorKey) { var pos = _parentPage.NodePositionFor(seperatorKey); // select the appropriate place for this var separatorKeyToInsert = _parentPage.PrepareKeyToInsert(seperatorKey, pos); if (_parentPage.HasSpaceFor(_tx, SizeOf.BranchEntry(separatorKeyToInsert) + Constants.NodeOffsetSize + SizeOf.NewPrefix(separatorKeyToInsert)) == false) { var pageSplitter = new PageSplitter(_tx, _tree, seperatorKey, -1, pageNumber, NodeFlags.PageRef, 0, _cursor, _treeState); return(pageSplitter.Execute()); } return(_parentPage.AddPageRefNode(pos, separatorKeyToInsert, pageNumber)); }
// REVIEW: Removed forced inlining for now until we can see if we improve without needing it. // [MethodImpl(MethodImplOptions.AggressiveInlining)] public void SetNodeKey(NodeHeader *node, ref MemorySlice sliceInstance) { if (KeysPrefixed) { var slice = (PrefixedSlice)sliceInstance; SetNodeKey(node, ref slice); sliceInstance = slice; } else { Slice slice = (Slice)sliceInstance; SetNodeKey(node, ref slice); sliceInstance = slice; } }
private void AddSeparatorToParentPage(Page parentPage, long pageNumber, MemorySlice seperatorKey, int separatorKeyPosition) { var separatorKeyToInsert = parentPage.PrepareKeyToInsert(seperatorKey, separatorKeyPosition); if (parentPage.HasSpaceFor(_tx, SizeOf.BranchEntry(separatorKeyToInsert) + Constants.NodeOffsetSize + SizeOf.NewPrefix(separatorKeyToInsert)) == false) { var pageSplitter = new PageSplitter(_tx, _tree, seperatorKey, -1, pageNumber, NodeFlags.PageRef, 0, _cursor, _tree.State); pageSplitter.Execute(); } else { parentPage.AddPageRefNode(separatorKeyPosition, separatorKeyToInsert, pageNumber); } }
public FileHeaderCharacteristics(ExceptionCollector collector, MemorySlice slice) : base(slice) { if (Slice.Count > Characteristics.CharacteristicsLength) { collector.Add(new CharacteristicsException(ExceptionLevel.Critical, "The PE Characteristics is too long.")); } if (Slice.Count < Characteristics.CharacteristicsLength) { collector.Add(new CharacteristicsException(ExceptionLevel.Critical, "The PE Characteristics is too short.")); } _characteristics = BitConverter.ToUInt16(Slice.Dump(), 0); Verify(collector); }
public void TryAddingExistingKeyShouldFail() { using (var store = CreateStore(56)) using (var hashTable = store.GetConcurrentHashTable()) { int value = 2; var memorySlize = new MemorySlice(&value, sizeof(int)); hashTable.Add(8, memorySlize); Assert.False(hashTable.TryAdd(8, memorySlize)); hashTable.Dispose(); store.Dispose(); File.Delete(hashTable.config.HashTableFilePath); File.Delete(hashTable.config.DataFilePath); } }
private static string GetBranchNodeString(int i, MemorySlice key, Page p, NodeHeader *node) { string keyStr; if (i == 0 && key.KeyLength == 0) { key = p.GetNodeKey(1); keyStr = "(lt " + key + ")"; } else { key = p.GetNodeKey(node); keyStr = key.ToString(); } return(MaxString(keyStr, 25)); }
public FoundPage Find(MemorySlice key) { for (int i = 0; i < _cache.Length; i++) { var page = _cache[i]; if (page == null) { continue; } var first = page.FirstKey; var last = page.LastKey; switch (key.Options) { case SliceOptions.BeforeAllKeys: if (first.Options == SliceOptions.BeforeAllKeys) { return(page); } break; case SliceOptions.AfterAllKeys: if (last.Options == SliceOptions.AfterAllKeys) { return(page); } break; case SliceOptions.Key: if ((first.Options != SliceOptions.BeforeAllKeys && key.Compare(first) < 0)) { continue; } if (last.Options != SliceOptions.AfterAllKeys && key.Compare(last) > 0) { continue; } return(page); default: throw new ArgumentException(key.Options.ToString()); } } return(null); }
public FoundPage Find(MemorySlice key) { int position = current; int itemsLeft = _cacheSize; while ( itemsLeft > 0 ) { var page = _cache[position % _cacheSize]; if (page == null) { itemsLeft--; position++; continue; } var first = page.FirstKey; var last = page.LastKey; switch (key.Options) { case SliceOptions.Key: if ((first.Options != SliceOptions.BeforeAllKeys && key.Compare(first) < 0)) break; if (last.Options != SliceOptions.AfterAllKeys && key.Compare(last) > 0) break; return page; case SliceOptions.BeforeAllKeys: if (first.Options == SliceOptions.BeforeAllKeys) return page; break; case SliceOptions.AfterAllKeys: if (last.Options == SliceOptions.AfterAllKeys) return page; break; default: throw new ArgumentException(key.Options.ToString()); } itemsLeft--; position++; } return null; }
/// <summary> /// Internal method that is used when splitting pages /// No need to do any work here, we are always adding at the end /// </summary> internal void CopyNodeDataToEndOfPage(NodeHeader *other, MemorySlice key) { var index = NumberOfEntries; Debug.Assert(HasSpaceFor(SizeOf.NodeEntryWithAnotherKey(other, key) + Constants.NodeOffsetSize + SizeOf.NewPrefix(key))); var nodeSize = SizeOf.NodeEntryWithAnotherKey(other, key); Debug.Assert(IsBranch == false || index != 0 || key.KeyLength == 0); // branch page's first item must be the implicit ref var nodeVersion = other->Version; // every time new node is allocated the version is increased, but in this case we do not want to increase it if (nodeVersion > 0) { nodeVersion -= 1; } var prefixedKey = key as PrefixedSlice; if (prefixedKey != null && prefixedKey.NewPrefix != null) { WritePrefix(prefixedKey.NewPrefix, prefixedKey.Header.PrefixId); } var newNode = AllocateNewNode(index, nodeSize, nodeVersion); newNode->KeySize = key.Size; newNode->Flags = other->Flags; if (key.Options == SliceOptions.Key && key.Size > 0) { key.CopyTo((byte *)newNode + Constants.NodeHeaderSize); } if (IsBranch || other->Flags == (NodeFlags.PageRef)) { newNode->PageNumber = other->PageNumber; newNode->Flags = NodeFlags.PageRef; return; } newNode->DataSize = other->DataSize; MemoryUtils.Copy((byte *)newNode + Constants.NodeHeaderSize + key.Size, (byte *)other + Constants.NodeHeaderSize + other->KeySize, other->DataSize); }
public void SetNodeKey(NodeHeader *node, ref MemorySlice sliceInstance) { if (KeysPrefixed == false) { sliceInstance.Set(node); return; } if (node->KeySize == 0) { sliceInstance = PrefixedSlice.Empty; return; } PrefixedSlice prefixedSlice; if (sliceInstance != null && sliceInstance != PrefixedSlice.Empty) { sliceInstance.Set(node); prefixedSlice = (PrefixedSlice)sliceInstance; } else { sliceInstance = prefixedSlice = new PrefixedSlice(node); } if (prefixedSlice.Header.PrefixId == PrefixedSlice.NonPrefixedId) { Debug.Assert(prefixedSlice.Header.PrefixUsage == 0); return; } Debug.Assert(prefixedSlice.Header.PrefixId < PrefixCount); if (prefixedSlice.Prefix == null) { prefixedSlice.Prefix = new PrefixNode(); } AssertPrefixNode(prefixedSlice.Header.PrefixId); prefixedSlice.Prefix.Set(_base + _prefixSection->PrefixOffsets[prefixedSlice.Header.PrefixId], PageNumber); }
/// <summary> /// Calculate the size of a leaf node. /// The size depends on the environment's page size; if a data item /// is too large it will be put onto an overflow page and the node /// size will only include the key and not the data. Sizes are always /// rounded up to an even number of bytes, to guarantee 2-byte alignment /// </summary> public static int LeafEntry(int pageMaxSpace, MemorySlice key, int len) { var nodeSize = Constants.NodeHeaderSize; if (key.Options == SliceOptions.Key) nodeSize += key.Size; if (len != 0) { nodeSize += len; if (nodeSize > pageMaxSpace) nodeSize -= len - Constants.PageNumberSize; } // else - page ref node, take no additional space nodeSize += nodeSize & 1; return nodeSize; }
public OptionalHeader(ExceptionCollector collector, MemorySlice slice) : base(slice) { if (Slice.Count > Data.Header.OptionalHeader.PeOptionalHeaderLength) { collector.Add(new OptionalHeaderException(ExceptionLevel.Critical, "The PE Optional header is too long.")); } if (Slice.Count < Data.Header.OptionalHeader.PeOptionalHeaderLength) { collector.Add(new OptionalHeaderException(ExceptionLevel.Critical, "The PE Optional header is too short.")); } StandardFields = new OptionalHeaderStandardFields(collector, slice.GetSlice( Data.Header.OptionalHeader.StandardFieldsOffset, Data.Header.OptionalHeader.StandardFieldsLength)); NtSpecificFields = new OptionalHeaderNtSpecificFields(collector, slice.GetSlice( Data.Header.OptionalHeader.NtSpecificFieldsOffset, Data.Header.OptionalHeader.NtSpecificFieldsLength)); }
private byte *AddNodeToPage(Page page, int index, MemorySlice alreadyPreparedNewKey = null) { var newKeyToInsert = alreadyPreparedNewKey ?? page.PrepareKeyToInsert(_newKey, index); switch (_nodeType) { case NodeFlags.PageRef: return(page.AddPageRefNode(index, newKeyToInsert, _pageNumber)); case NodeFlags.Data: return(page.AddDataNode(index, newKeyToInsert, _len, _nodeVersion)); case NodeFlags.MultiValuePageRef: return(page.AddMultiValueNode(index, newKeyToInsert, _len, _nodeVersion)); default: throw new NotSupportedException("Unknown node type"); } }
public static int LeafEntry(int pageMaxSpace, MemorySlice key, int len) { var nodeSize = Constants.NodeHeaderSize; if (key.Options == SliceOptions.Key) nodeSize += key.Size; if (len != 0) { nodeSize += len; if (nodeSize > pageMaxSpace) nodeSize -= len - Constants.PageNumberSize; } // else - page ref node, take no additional space nodeSize += nodeSize & 1; return nodeSize; }
public MsDosHeader(ExceptionCollector collector, MemorySlice slice) : base(slice) { if (Slice.Count > Data.Header.MsDosHeader.HeaderLength) { collector.Add(new MsDosHeaderException("The MS-DOS header is too long.")); } if (Slice.Count < Data.Header.MsDosHeader.HeaderLength) { collector.Add(new MsDosHeaderException("The MS-DOS header is too short.")); } Validate(collector); LongFileAddressOfNewExeHeader = BitConverter.ToUInt32( Slice.GetSlice( Data.Header.MsDosHeader.LfanewStartingOffset, Data.Header.MsDosHeader.LfanewLength) .Dump(), 0); }
private Tree OpenMultiValueTree(Transaction tx, MemorySlice key, NodeHeader *item) { Tree tree; if (tx.TryGetMultiValueTree(this, key, out tree)) { return(tree); } var childTreeHeader = (TreeRootHeader *)((byte *)item + item->KeySize + Constants.NodeHeaderSize); Debug.Assert(childTreeHeader->RootPageNumber < tx.State.NextPageNumber); Debug.Assert(childTreeHeader->Flags == TreeFlags.MultiValue); tree = Open(tx, childTreeHeader); tx.AddMultiValueTree(this, key, tree); return(tree); }
private bool TryOverwriteOverflowPages(TreeMutableState treeState, NodeHeader *updatedNode, MemorySlice key, int len, ushort?version, out byte *pos) { if (updatedNode->Flags == NodeFlags.PageRef && _tx.Id <= _tx.Environment.OldestTransaction) // ensure MVCC - do not overwrite if there is some older active transaction that might read those overflows { var overflowPage = _tx.GetReadOnlyPage(updatedNode->PageNumber); if (len <= overflowPage.OverflowSize) { CheckConcurrency(key, version, updatedNode->Version, TreeActionType.Add); if (updatedNode->Version == ushort.MaxValue) { updatedNode->Version = 0; } updatedNode->Version++; var availableOverflows = _tx.DataPager.GetNumberOfOverflowPages(overflowPage.OverflowSize); var requestedOverflows = _tx.DataPager.GetNumberOfOverflowPages(len); var overflowsToFree = availableOverflows - requestedOverflows; for (int i = 0; i < overflowsToFree; i++) { _tx.FreePage(overflowPage.PageNumber + requestedOverflows + i); } treeState.OverflowPages -= overflowsToFree; treeState.PageCount -= overflowsToFree; overflowPage.OverflowSize = len; pos = overflowPage.Base + Constants.PageHeaderSize; return(true); } } pos = null; return(false); }
public void TryUpdateShouldWorkAsExpectd() { try { CreateHashTable(); int seven = 7; int six = 6; var sevenMemorySlice = new MemorySlice(&seven, sizeof(int)); var sixMemorySlice = new MemorySlice(&six, sizeof(int)); Assert.False(hashTable.TryUpdate(sevenMemorySlice, sevenMemorySlice, sevenMemorySlice)); hashTable.Add(sevenMemorySlice, sevenMemorySlice); Assert.False(hashTable.TryUpdate(sevenMemorySlice, sixMemorySlice, sixMemorySlice)); Assert.True(hashTable.TryUpdate(sevenMemorySlice, sixMemorySlice, sevenMemorySlice)); Assert.Equal(6, *(int *)(hashTable[sevenMemorySlice].Pointer)); Assert.Single(hashTable); } finally { DeleteHashTable(); } }
private bool TryOverwriteOverflowPages(NodeHeader *updatedNode, MemorySlice key, int len, ushort?version, out byte *pos) { if (updatedNode->Flags == NodeFlags.PageRef) { var readOnlyOverflowPage = _tx.GetReadOnlyPage(updatedNode->PageNumber); if (len <= readOnlyOverflowPage.OverflowSize) { CheckConcurrency(key, version, updatedNode->Version, TreeActionType.Add); if (updatedNode->Version == ushort.MaxValue) { updatedNode->Version = 0; } updatedNode->Version++; var availableOverflows = _tx.DataPager.GetNumberOfOverflowPages(readOnlyOverflowPage.OverflowSize); var requestedOverflows = _tx.DataPager.GetNumberOfOverflowPages(len); var overflowsToFree = availableOverflows - requestedOverflows; for (int i = 0; i < overflowsToFree; i++) { _tx.FreePage(readOnlyOverflowPage.PageNumber + requestedOverflows + i); } State.RecordFreedPage(readOnlyOverflowPage, overflowsToFree); var writtableOverflowPage = _tx.AllocatePage(requestedOverflows, PageFlags.Overflow, updatedNode->PageNumber); writtableOverflowPage.OverflowSize = len; pos = writtableOverflowPage.Base + Constants.PageHeaderSize; return(true); } } pos = null; return(false); }
public PageSplitter(Transaction tx, Tree tree, MemorySlice newKey, int len, long pageNumber, NodeFlags nodeType, ushort nodeVersion, Cursor cursor) { _tx = tx; _tree = tree; _newKey = newKey; _len = len; _pageNumber = pageNumber; _nodeType = nodeType; _nodeVersion = nodeVersion; _cursor = cursor; Page page = _cursor.Pages.First.Value; _page = tx.ModifyPage(page.PageNumber, _tree, page); _cursor.Pop(); }
public PageSplitter(Transaction tx, Tree tree, MemorySlice newKey, int len, long pageNumber, NodeFlags nodeType, ushort nodeVersion, Cursor cursor, TreeMutableState treeState) { _tx = tx; _tree = tree; _newKey = newKey; _len = len; _pageNumber = pageNumber; _nodeType = nodeType; _nodeVersion = nodeVersion; _cursor = cursor; _treeState = treeState; Page page = _cursor.Pages.First.Value; _page = tx.ModifyPage(page.PageNumber, _tree, page); _cursor.Pop(); }
internal void AddMultiValueTree(Tree tree, MemorySlice key, Tree mvTree) { if (_multiValueTrees == null) _multiValueTrees = new Dictionary<Tuple<Tree, MemorySlice>, Tree>(new TreeAndSliceComparer()); mvTree.IsMultiValueTree = true; _multiValueTrees.Add(Tuple.Create(tree, key), mvTree); }
public NodeHeader* Search(MemorySlice key) { if (KeysPrefixed) { return SearchPrefixed(key); } else { return Search((Slice)key); } }
private NodeHeader* SearchPrefixed( MemorySlice key ) { key.PrepareForSearching(); int numberOfEntries = NumberOfEntries; if (numberOfEntries == 0) { LastSearchPosition = 0; LastMatch = 1; return null; } switch (key.Options) { case SliceOptions.Key: { var pageKey = CreateNewEmptyKey(); if (numberOfEntries == 1) { var node = GetNode(0); SetNodeKey(node, ref pageKey); LastMatch = key.Compare(pageKey); LastSearchPosition = LastMatch > 0 ? 1 : 0; return LastSearchPosition == 0 ? node : null; } int low = IsLeaf ? 0 : 1; int high = numberOfEntries - 1; int position = 0; while (low <= high) { position = (low + high) >> 1; var node = (NodeHeader*)(_base + KeysOffsets[position]); SetNodeKey(node, ref pageKey); LastMatch = key.Compare(pageKey); if (LastMatch == 0) break; if (LastMatch > 0) low = position + 1; else high = position - 1; } if (LastMatch > 0) // found entry less than key { position++; // move to the smallest entry larger than the key } Debug.Assert(position < ushort.MaxValue); LastSearchPosition = position; if (position >= numberOfEntries) return null; return GetNode(position); } case SliceOptions.BeforeAllKeys: { LastSearchPosition = 0; LastMatch = 1; return GetNode(0); } case SliceOptions.AfterAllKeys: { LastMatch = -1; LastSearchPosition = numberOfEntries - 1; return GetNode(LastSearchPosition); } default: throw new NotSupportedException("This SliceOptions is not supported. Make sure you have updated this code when adding a new one."); } }
// REVIEW: Removed forced inlining for now until we can see if we improve without needing it. // [MethodImpl(MethodImplOptions.AggressiveInlining)] public void SetNodeKey(NodeHeader* node, ref MemorySlice sliceInstance) { if (KeysPrefixed) { var slice = (PrefixedSlice)sliceInstance; SetNodeKey(node, ref slice); sliceInstance = slice; } else { Slice slice = (Slice)sliceInstance; SetNodeKey(node, ref slice); sliceInstance = slice; } }
public byte* AddPageRefNode(int index, MemorySlice key, long pageNumber) { var node = CreateNode(index, key, NodeFlags.PageRef, -1, 0); node->PageNumber = pageNumber; return null; // nothing to write into page ref node }
public byte* AddMultiValueNode(int index, MemorySlice key, int dataSize, ushort previousNodeVersion) { Debug.Assert(dataSize == sizeof(TreeRootHeader)); Debug.Assert(key.Options == SliceOptions.Key); var node = CreateNode(index, key, NodeFlags.MultiValuePageRef, dataSize, previousNodeVersion); node->DataSize = dataSize; return (byte*)node + Constants.NodeHeaderSize + key.Size; }
public int GetRequiredSpace(MemorySlice key, int len) { return SizeOf.NodeEntry(PageMaxSpace, key, len) + Constants.NodeOffsetSize + SizeOf.NewPrefix(key); }
public int NodePositionFor(MemorySlice key) { Search(key); return LastSearchPosition; }
public bool HasSpaceFor(Transaction tx, MemorySlice key, int len) { var requiredSpace = GetRequiredSpace(key, len); return HasSpaceFor(tx, requiredSpace); }
private bool TryUseExistingPrefix(MemorySlice key, out PrefixedSlice prefixedSlice) { if (_prefixSection->NextPrefixId < 1) { prefixedSlice = null; return false; } BestPrefixMatch bestMatch = null; for (byte prefixId = 0; prefixId < _prefixSection->NextPrefixId; prefixId++) { AssertPrefixNode(prefixId); var prefix = new PrefixNode(); prefix.Set(_base + _prefixSection->PrefixOffsets[prefixId], PageNumber); var length = key.FindPrefixSize(new Slice(prefix.ValuePtr, prefix.PrefixLength)); if (length == 0) continue; if (length == prefix.PrefixLength) // full prefix usage { prefixedSlice = new PrefixedSlice(prefixId, length, key.Skip(length)) { Prefix = prefix }; return true; } // keep on looking for a better prefix if (bestMatch == null) { bestMatch = new BestPrefixMatch { PrefixId = prefixId, PrefixUsage = length, PrefixNode = prefix }; } else if (length > bestMatch.PrefixUsage) { bestMatch.PrefixId = prefixId; bestMatch.PrefixUsage = length; bestMatch.PrefixNode = prefix; } } if (bestMatch != null && bestMatch.PrefixUsage > MinPrefixLength(key)) { prefixedSlice = new PrefixedSlice(bestMatch.PrefixId, bestMatch.PrefixUsage, key.Skip(bestMatch.PrefixUsage)) { Prefix = bestMatch.PrefixNode }; return true; } prefixedSlice = null; return false; }
private bool TryCreateNewPrefix(MemorySlice key, int nodeIndex, out PrefixedSlice prefixedSlice) { if (_prefixSection->NextPrefixId >= PrefixCount || NumberOfEntries == 0) { prefixedSlice = null; return false; } MemorySlice left; MemorySlice right; if (nodeIndex > 0 && nodeIndex < NumberOfEntries) // middle { left = GetNodeKey(nodeIndex - 1); right = GetNodeKey(nodeIndex); } else if (nodeIndex == 0) // first { left = null; right = GetNodeKey(0); } else if (nodeIndex == NumberOfEntries) // last { left = GetNodeKey(nodeIndex - 1); right = null; } else throw new NotSupportedException("Invalid node index prefix: " + nodeIndex + ". Number of entries: " + NumberOfEntries); ushort leftLength = 0; ushort rightLength = 0; if (left != null && left.Size > 0) // not before all keys leftLength = key.FindPrefixSize(left); if (right != null) rightLength = key.FindPrefixSize(right); var minPrefixLength = MinPrefixLength(key); if (left != null && leftLength > minPrefixLength && leftLength > rightLength) { prefixedSlice = new PrefixedSlice(_prefixSection->NextPrefixId, leftLength, key.Skip(leftLength)) { NewPrefix = new Slice(left.ToSlice(), leftLength) }; return true; } if (right != null && rightLength > minPrefixLength && rightLength > leftLength) { prefixedSlice = new PrefixedSlice(_prefixSection->NextPrefixId, rightLength, key.Skip(rightLength)) { NewPrefix = new Slice(right.ToSlice(), rightLength) }; return true; } prefixedSlice = null; return false; }
public MemorySlice PrepareKeyToInsert(MemorySlice key, int nodeIndex) { if (KeysPrefixed == false) return key; if (key.KeyLength == 0) return PrefixedSlice.Empty; PrefixedSlice prefixedSlice; if (TryUseExistingPrefix(key, out prefixedSlice)) return prefixedSlice; if (TryCreateNewPrefix(key, nodeIndex, out prefixedSlice)) return prefixedSlice; return new PrefixedSlice(key); }
/// <summary> /// Internal method that is used when splitting pages /// No need to do any work here, we are always adding at the end /// </summary> internal void CopyNodeDataToEndOfPage(NodeHeader* other, MemorySlice key) { var index = NumberOfEntries; Debug.Assert(HasSpaceFor(SizeOf.NodeEntryWithAnotherKey(other, key) + Constants.NodeOffsetSize + SizeOf.NewPrefix(key))); var nodeSize = SizeOf.NodeEntryWithAnotherKey(other, key); Debug.Assert(IsBranch == false || index != 0 || key.KeyLength == 0);// branch page's first item must be the implicit ref var nodeVersion = other->Version; // every time new node is allocated the version is increased, but in this case we do not want to increase it if (nodeVersion > 0) nodeVersion -= 1; var prefixedKey = key as PrefixedSlice; if (prefixedKey != null && prefixedKey.NewPrefix != null) WritePrefix(prefixedKey.NewPrefix, prefixedKey.Header.PrefixId); var newNode = AllocateNewNode(index, nodeSize, nodeVersion); newNode->KeySize = key.Size; newNode->Flags = other->Flags; if(key.Options == SliceOptions.Key && key.Size > 0) key.CopyTo((byte*)newNode + Constants.NodeHeaderSize); if (IsBranch || other->Flags == (NodeFlags.PageRef)) { newNode->PageNumber = other->PageNumber; newNode->Flags = NodeFlags.PageRef; return; } newNode->DataSize = other->DataSize; Memory.Copy((byte*)newNode + Constants.NodeHeaderSize + key.Size, (byte*)other + Constants.NodeHeaderSize + other->KeySize, other->DataSize); }
private NodeHeader* CreateNode(int index, MemorySlice key, NodeFlags flags, int len, ushort previousNodeVersion) { Debug.Assert(index <= NumberOfEntries && index >= 0); Debug.Assert(IsBranch == false || index != 0 || key.KeyLength == 0);// branch page's first item must be the implicit ref if (HasSpaceFor(key, len) == false) throw new InvalidOperationException(string.Format("The page is full and cannot add an entry, this is probably a bug. Key: {0}, data length: {1}, size left: {2}", key, len, SizeLeft)); var prefixedKey = key as PrefixedSlice; if (prefixedKey != null && prefixedKey.NewPrefix != null) WritePrefix(prefixedKey.NewPrefix, prefixedKey.Header.PrefixId); // move higher pointers up one slot for (int i = NumberOfEntries; i > index; i--) { KeysOffsets[i] = KeysOffsets[i - 1]; } var nodeSize = SizeOf.NodeEntry(PageMaxSpace, key, len); var node = AllocateNewNode(index, nodeSize, previousNodeVersion); node->KeySize = key.Size; if (key.Options == SliceOptions.Key && key.Size > 0) key.CopyTo((byte*)node + Constants.NodeHeaderSize); node->Flags = flags; return node; }
internal bool TryGetMultiValueTree(Tree tree, MemorySlice key, out Tree mvTree) { mvTree = null; if (_multiValueTrees == null) return false; return _multiValueTrees.TryGetValue(Tuple.Create(tree, key), out mvTree); }
private bool HasSpaceFor(MemorySlice key, int len) { return HasSpaceFor(GetRequiredSpace(key, len)); }
internal bool TryRemoveMultiValueTree(Tree parentTree, MemorySlice key) { var keyToRemove = Tuple.Create(parentTree, key); if (_multiValueTrees == null || !_multiValueTrees.ContainsKey(keyToRemove)) return false; return _multiValueTrees.Remove(keyToRemove); }
public byte* AddDataNode(int index, MemorySlice key, int dataSize, ushort previousNodeVersion) { Debug.Assert(dataSize >= 0); Debug.Assert(key.Options == SliceOptions.Key); var node = CreateNode(index, key, NodeFlags.Data, dataSize, previousNodeVersion); node->DataSize = dataSize; return (byte*)node + Constants.NodeHeaderSize + key.Size; }