public IIterator MultiRead(Slice key) { TreeNodeHeader *node; var page = FindPageFor(key, out node); if (page == null || page.LastMatch != 0) { return(new EmptyIterator()); } Debug.Assert(node != null); Slice fetchedNodeKey; using (TreeNodeHeader.ToSlicePtr(_llt.Allocator, node, out fetchedNodeKey)) { if (SliceComparer.Equals(fetchedNodeKey, key) == false) { VoronUnrecoverableErrorException.Raise(_llt.Environment, "Was unable to retrieve the correct node. Data corruption possible"); } } if (node->Flags == TreeNodeFlags.MultiValuePageRef) { var tree = OpenMultiValueTree(key, node); return(tree.Iterate(true)); } var ptr = DirectAccessFromHeader(node); var nestedPage = new TreePage(ptr, (ushort)GetDataSize(node)); return(new TreePageIterator(_llt, key, this, nestedPage)); }
public void Validate(FixedSizeSchemaIndexDef actual) { if (actual == null) { throw new ArgumentNullException(nameof(actual), "Expected an index but received null"); } if (!SliceComparer.Equals(Name, actual.Name)) { throw new ArgumentException( $"Expected index to have Name='{Name}', got Name='{actual.Name}' instead", nameof(actual)); } if (StartIndex != actual.StartIndex) { throw new ArgumentException( $"Expected index {Name} to have StartIndex='{StartIndex}', got StartIndex='{actual.StartIndex}' instead", nameof(actual)); } if (IsGlobal != actual.IsGlobal) { throw new ArgumentException( $"Expected index {Name} to have IsGlobal='{IsGlobal}', got IsGlobal='{actual.IsGlobal}' instead", nameof(actual)); } }
public void Single_MultiAdd_And_Read_DataStored() { var random = new Random(); var buffer = new byte[1000]; random.NextBytes(buffer); using (var tx = Env.WriteTransaction()) { tx.CreateTree("foo"); tx.Commit(); } using (var tx = Env.WriteTransaction()) { Slice key; Slice.From(Allocator, buffer, out key); tx.ReadTree("foo").MultiAdd("ChildTreeKey", key); tx.Commit(); } using (var tx = Env.ReadTransaction()) { using (var fetchedDataIterator = tx.ReadTree("foo").MultiRead("ChildTreeKey")) { fetchedDataIterator.Seek(Slices.BeforeAllKeys); Slice key; Slice.From(Allocator, buffer, out key); Assert.True(SliceComparer.Equals(fetchedDataIterator.CurrentKey, key)); } } }
public TableSchema DefineFixedSizeIndex(FixedSizeSchemaIndexDef index) { if (!index.Name.HasValue || SliceComparer.Equals(Slices.Empty, index.Name)) { throw new ArgumentException("Fixed size index name must be non-empty", nameof(index)); } _fixedSizeIndexes[index.Name] = index; return(this); }
private void FixedSchemaIndexDefEqual(TableSchema.FixedSizeSchemaIndexDef expectedIndex, TableSchema.FixedSizeSchemaIndexDef actualIndex) { if (expectedIndex == null) { Assert.Equal(null, actualIndex); } else { Assert.Equal(expectedIndex.IsGlobal, actualIndex.IsGlobal); Assert.True(SliceComparer.Equals(expectedIndex.Name, actualIndex.Name)); Assert.Equal(expectedIndex.StartIndex, actualIndex.StartIndex); } }
public bool Equals(Tuple <Tree, Slice> x, Tuple <Tree, Slice> y) { if (x == null && y == null) { return(true); } if (x == null || y == null) { return(false); } if (x.Item1 != y.Item1) { return(false); } return(SliceComparer.Equals(x.Item2, y.Item2)); }
public void DeleteTree(Slice name) { if (_lowLevelTransaction.Flags == TransactionFlags.ReadWrite == false) { throw new ArgumentException("Cannot create a new newRootTree with a read only transaction"); } Tree tree = ReadTree(name); if (tree == null) { return; } foreach (var page in tree.AllPages()) { _lowLevelTransaction.FreePage(page); } _lowLevelTransaction.RootObjects.Delete(name); if (_multiValueTrees != null) { var toRemove = new List <Tuple <Tree, Slice> >(); foreach (var valueTree in _multiValueTrees) { var multiTree = valueTree.Key.Item1; if (SliceComparer.Equals(multiTree.Name, name)) { toRemove.Add(valueTree.Key); } } foreach (var recordToRemove in toRemove) { _multiValueTrees.Remove(recordToRemove); } } // already created in ReadTree _trees.Remove(name); }
public void RenameTree(Slice fromName, Slice toName) { if (_lowLevelTransaction.Flags == TransactionFlags.ReadWrite == false) { throw new ArgumentException("Cannot rename a new tree with a read only transaction"); } if (SliceComparer.Equals(toName, Constants.RootTreeNameSlice)) { throw new InvalidOperationException("Cannot create a tree with reserved name: " + toName); } if (ReadTree(toName) != null) { throw new ArgumentException("Cannot rename a tree with the name of an existing tree: " + toName); } Tree fromTree = ReadTree(fromName); if (fromTree == null) { throw new ArgumentException("Tree " + fromName + " does not exists"); } _lowLevelTransaction.RootObjects.Delete(fromName); byte *ptr; using (_lowLevelTransaction.RootObjects.DirectAdd(toName, sizeof(TreeRootHeader), out ptr)) fromTree.State.CopyTo((TreeRootHeader *)ptr); fromTree.Rename(toName); fromTree.State.IsModified = true; // _trees already ensrued already created in ReadTree _trees.Remove(fromName); _trees.Remove(toName); AddTree(toName, fromTree); }
public TableSchema DefineKey(SchemaIndexDef index) { bool hasEmptyName = !index.Name.HasValue || SliceComparer.Equals(Slices.Empty, index.Name); if (index.IsGlobal && hasEmptyName) { throw new ArgumentException("Name must be non empty for global index as primary key", nameof(index)); } if (hasEmptyName) { index.Name = PkSlice; } if (index.Count > 1) { throw new InvalidOperationException("Primary key must be a single field"); } _primaryKey = index; return(this); }
protected unsafe Tuple <Slice, Slice> ReadKey(Transaction txh, Tree tree, Slice key) { TreeNodeHeader *node; tree.FindPageFor(key, out node); if (node == null) { return(null); } Slice item1; TreeNodeHeader.ToSlicePtr(txh.Allocator, node, out item1); if (!SliceComparer.Equals(item1, key)) { return(null); } Slice item2; Slice.External(txh.Allocator, (byte *)node + node->KeySize + Constants.Tree.NodeHeaderSize, (ushort)node->DataSize, out item2); return(Tuple.Create(item1, item2)); }
public unsafe void ShouldPreserveTables(int entries, int seed) { // Create random docs to check everything is preserved using (var allocator = new ByteStringContext(SharedMultipleUseFlag.None)) { var create = new Dictionary <Slice, long>(); var delete = new List <Slice>(); var r = new Random(seed); for (var i = 0; i < entries; i++) { Slice key; Slice.From(allocator, "test" + i, out key); create.Add(key, r.Next()); if (r.NextDouble() < 0.5) { delete.Add(key); } } // Create the schema var schema = new TableSchema() .DefineKey(new TableSchema.SchemaIndexDef { StartIndex = 0, Count = 1, IsGlobal = false }); using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(DataDir))) { // Create table in the environment using (var tx = env.WriteTransaction()) { schema.Create(tx, "test", 16); var table = tx.OpenTable(schema, "test"); foreach (var entry in create) { var value = entry.Value; table.Set(new TableValueBuilder { entry.Key, value }); } tx.Commit(); } using (var tx = env.ReadTransaction()) { var table = tx.OpenTable(schema, "test"); Assert.Equal(table.NumberOfEntries, entries); } // Delete some of the entries (this is so that compaction makes sense) using (var tx = env.WriteTransaction()) { var table = tx.OpenTable(schema, "test"); foreach (var entry in delete) { table.DeleteByKey(entry); } tx.Commit(); } } var compactedData = Path.Combine(DataDir, "Compacted"); StorageCompaction.Execute(StorageEnvironmentOptions.ForPath(DataDir), (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions) StorageEnvironmentOptions.ForPath(compactedData)); using (var compacted = new StorageEnvironment(StorageEnvironmentOptions.ForPath(compactedData))) { using (var tx = compacted.ReadTransaction()) { var table = tx.OpenTable(schema, "test"); foreach (var entry in create) { TableValueReader reader; var hasValue = table.ReadByKey(entry.Key, out reader); if (delete.Contains(entry.Key)) { // This key should not be here Assert.False(hasValue); } else { // This key should be there Assert.True(hasValue); // Data should be the same int size; byte *ptr = reader.Read(0, out size); Slice current; using (Slice.External(allocator, ptr, size, out current)) Assert.True(SliceComparer.Equals(current, entry.Key)); ptr = reader.Read(1, out size); Assert.Equal(entry.Value, *(long *)ptr); } } tx.Commit(); } } } }
public void MultiAdd(Slice key, Slice value) { if (!value.HasValue) { throw new ArgumentNullException(nameof(value)); } int maxNodeSize = Llt.DataPager.NodeMaxSize; if (value.Size > maxNodeSize) { throw new ArgumentException("Cannot add a value to child tree that is over " + maxNodeSize + " bytes in size", nameof(value)); } if (value.Size == 0) { throw new ArgumentException("Cannot add empty value to child tree"); } State.IsModified = true; State.Flags |= TreeFlags.MultiValueTrees; TreeNodeHeader *node; var page = FindPageFor(key, out node); if (page == null || page.LastMatch != 0) { MultiAddOnNewValue(key, value, maxNodeSize); return; } page = ModifyPage(page); var item = page.GetNode(page.LastSearchPosition); byte *_; // already was turned into a multi tree, not much to do here if (item->Flags == TreeNodeFlags.MultiValuePageRef) { var existingTree = OpenMultiValueTree(key, item); existingTree.DirectAdd(value, 0, out _).Dispose(); return; } if (item->Flags == TreeNodeFlags.PageRef) { throw new InvalidOperationException("Multi trees don't use overflows"); } var nestedPagePtr = DirectAccessFromHeader(item); var nestedPage = new TreePage(nestedPagePtr, (ushort)GetDataSize(item)); var existingItem = nestedPage.Search(_llt, value); if (nestedPage.LastMatch != 0) { existingItem = null;// not an actual match, just greater than } if (existingItem != null) { // maybe same value added twice? Slice tmpKey; using (TreeNodeHeader.ToSlicePtr(_llt.Allocator, item, out tmpKey)) { if (SliceComparer.Equals(tmpKey, value)) { return; // already there, turning into a no-op } } nestedPage.RemoveNode(nestedPage.LastSearchPosition); } if (nestedPage.HasSpaceFor(_llt, value, 0)) { // we are now working on top of the modified root page, we can just modify the memory directly nestedPage.AddDataNode(nestedPage.LastSearchPosition, value, 0); return; } if (page.HasSpaceFor(_llt, value, 0)) { // page has space for an additional node in nested page ... var requiredSpace = nestedPage.PageSize + // existing page nestedPage.GetRequiredSpace(value, 0); // new node if (requiredSpace + Constants.Tree.NodeHeaderSize <= maxNodeSize) { // ... and it won't require to create an overflow, so we can just expand the current value, no need to create a nested tree yet EnsureNestedPagePointer(page, item, ref nestedPage, ref nestedPagePtr); var newPageSize = (ushort)Math.Min(Bits.NextPowerOf2(requiredSpace), maxNodeSize - Constants.Tree.NodeHeaderSize); ExpandMultiTreeNestedPageSize(key, value, nestedPagePtr, newPageSize, nestedPage.PageSize); return; } } EnsureNestedPagePointer(page, item, ref nestedPage, ref nestedPagePtr); // we now have to convert this into a tree instance, instead of just a nested page var tree = Create(_llt, _tx, key, TreeFlags.MultiValue); for (int i = 0; i < nestedPage.NumberOfEntries; i++) { Slice existingValue; using (nestedPage.GetNodeKey(_llt, i, out existingValue)) { tree.DirectAdd(existingValue, 0, out _).Dispose(); } } tree.DirectAdd(value, 0, out _).Dispose(); _tx.AddMultiValueTree(this, key, tree); // we need to record that we switched to tree mode here, so the next call wouldn't also try to create the tree again DirectAdd(key, sizeof(TreeRootHeader), TreeNodeFlags.MultiValuePageRef, out _).Dispose(); }