public void CanAllocateEnoughToFillEntireSection() { long pageNumber; using (var tx = Env.WriteTransaction()) { var section = ActiveRawDataSmallSection.Create(tx.LowLevelTransaction, "test", (byte)TableType.None); pageNumber = section.PageNumber; tx.Commit(); } using (var tx = Env.WriteTransaction()) { var section = new ActiveRawDataSmallSection(tx.LowLevelTransaction, pageNumber); section.DataMoved += (previousId, newId, data, size) => { }; int allocationSize = 1020; long id; var list = new List <long>(); while (section.TryAllocate(allocationSize, out id)) { list.Add(id); } Assert.False(section.TryAllocate(allocationSize, out id)); var idToFree = list[list.Count / 2]; section.Free(idToFree); Assert.True(section.TryAllocate(allocationSize, out id)); } }
public void CanReadAndWriteFromSection_AfterFlush() { Env.Options.ManualFlushing = true; long pageNumber; long id; using (var tx = Env.WriteTransaction()) { var section = ActiveRawDataSmallSection.Create(tx.LowLevelTransaction, "test", (byte)TableType.None); pageNumber = section.PageNumber; //var section = new RawDataSmallSection(tx.LowLevelTransaction, pageNumber); Assert.True(section.TryAllocate(15, out id)); WriteValue(section, id, "Hello There"); tx.Commit(); } Env.FlushLogToDataFile(); using (var tx = Env.ReadTransaction()) { var section = new ActiveRawDataSmallSection(tx.LowLevelTransaction, pageNumber); AssertValueMatches(section, id, "Hello There"); } }
public void ShouldNotReturnMoreIdsThanTotalNumberOfEntriesInSection() { long pageNumber; using (var tx = Env.WriteTransaction()) { var section = ActiveRawDataSmallSection.Create(tx.LowLevelTransaction, "test", (byte)TableType.None); pageNumber = section.PageNumber; tx.Commit(); } long newId; using (var tx = Env.WriteTransaction()) { var section = new ActiveRawDataSmallSection(tx.LowLevelTransaction, pageNumber); Assert.True(section.TryAllocate(16, out newId)); WriteValue(section, newId, 1.ToString("0000000000000")); tx.Commit(); } using (var tx = Env.WriteTransaction()) { var section = new ActiveRawDataSmallSection(tx.LowLevelTransaction, pageNumber); var ids = section.GetAllIdsInSectionContaining(newId); Assert.Equal(section.NumberOfEntries, ids.Count); Assert.Equal(1, ids.Count); Assert.Equal(newId, ids[0]); AssertValueMatches(section, newId, 1.ToString("0000000000000")); } }
public void Braking_large_allocation_in_scratch_file_has_to_really_create_separate_pages_of_size_one() { long pageNumber; using (var tx = Env.WriteTransaction()) { var section = ActiveRawDataSmallSection.Create(tx, "test", (byte)TableType.None); pageNumber = section.PageNumber; tx.Commit(); } using (var tx = Env.WriteTransaction()) { // just to increment transaction id tx.LowLevelTransaction.ModifyPage(0); tx.Commit(); } using (var tx = Env.WriteTransaction()) { var section = new ActiveRawDataSmallSection(tx, pageNumber); section.DeleteSection(pageNumber); var allocatePage = tx.LowLevelTransaction.AllocatePage(1); // the case is that the free space handling will return same page number as we just freed by deleting raw data section // if the below assertion fails it means we have changed voron internals and this test might require adjustments Assert.Equal(allocatePage.PageNumber, pageNumber); tx.Commit(); } Env.FlushLogToDataFile(); using (var tx = Env.WriteTransaction()) { // just to increment transaction id tx.LowLevelTransaction.ModifyPage(0); tx.Commit(); } using (var tx = Env.WriteTransaction()) { // just to increment transaction id tx.LowLevelTransaction.ModifyPage(0); tx.Commit(); } using (var tx = Env.WriteTransaction()) { // ensure below call won't throw 'An item with the same key has already been added' // from ScratchBufferFile.BreakLargeAllocationToSeparatePages ActiveRawDataSmallSection.Create(tx, "test", (byte)TableType.None); tx.Commit(); } }
public void CanReadAndWriteFromSection() { long pageNumber; using (var tx = Env.WriteTransaction()) { var section = ActiveRawDataSmallSection.Create(tx.LowLevelTransaction, "test", (byte)TableType.None); pageNumber = section.PageNumber; tx.Commit(); } long id; using (var tx = Env.WriteTransaction()) { var section = new ActiveRawDataSmallSection(tx.LowLevelTransaction, pageNumber); Assert.True(section.TryAllocate(15, out id)); WriteValue(section, id, "Hello There"); tx.Commit(); } using (var tx = Env.ReadTransaction()) { var section = new ActiveRawDataSmallSection(tx.LowLevelTransaction, pageNumber); AssertValueMatches(section, id, "Hello There"); } }
private static void WriteValue(ActiveRawDataSmallSection section, long id, string value) { var bytes = Encoding.UTF8.GetBytes(value); fixed(byte *p = bytes) { Assert.True(section.TryWrite(id, p, bytes.Length)); } }
/// <summary> /// A table is stored inside a tree, and has the following keys in it /// /// - active-section -> page number - the page number of the current active small data section /// - inactive-sections -> fixed size tree with no content where the keys are the page numbers of inactive small raw data sections /// - large-values -> fixed size tree with no content where the keys are the page numbers of the large values /// - for each index: /// - If can fit into fixed size tree, use that. /// - Otherwise, create a tree (whose key would be the indexed field value and the value would /// be a fixed size tree of the ids of all the matching values) /// - stats -> header information about the table (number of entries, etc) /// /// </summary> public void Create(Transaction tx, string name) { if (_pk == null && _indexes.Count == 0 && _fixedSizeIndexes.Count == 0) { throw new InvalidOperationException($"Cannot create table {name} without a primary key and no indexes"); } var tableTree = tx.CreateTree(name); if (tableTree.State.NumberOfEntries > 0) { return; // this was already created } var rawDataActiveSection = ActiveRawDataSmallSection.Create(tx.LowLevelTransaction, name); Slice pageNumber = Slice.From(tx.Allocator, EndianBitConverter.Little.GetBytes(rawDataActiveSection.PageNumber), ByteStringType.Immutable); tableTree.Add(ActiveSection, pageNumber); var stats = (TableSchemaStats *)tableTree.DirectAdd(Stats, sizeof(TableSchemaStats)); stats->NumberOfEntries = 0; if (_pk != null) { if (_pk.IsGlobal == false) { var indexTree = Tree.Create(tx.LowLevelTransaction, tx); var treeHeader = tableTree.DirectAdd(_pk.NameAsSlice, sizeof(TreeRootHeader)); indexTree.State.CopyTo((TreeRootHeader *)treeHeader); } else { tx.CreateTree(_pk.Name); } } foreach (var indexDef in _indexes.Values) { if (indexDef.IsGlobal == false) { var indexTree = Tree.Create(tx.LowLevelTransaction, tx); var treeHeader = tableTree.DirectAdd(indexDef.NameAsSlice, sizeof(TreeRootHeader)); indexTree.State.CopyTo((TreeRootHeader *)treeHeader); } else { tx.CreateTree(indexDef.Name); } } }
public void ShouldReturnValidIdsOfEntriesInSectionThatAreReadable() { long pageNumber; using (var tx = Env.WriteTransaction()) { var section = ActiveRawDataSmallSection.Create(tx.LowLevelTransaction, "test", (byte)TableType.None); pageNumber = section.PageNumber; tx.Commit(); } long idWhichIsGoingToBeDeleted1; long idWhichIsGoingToBeDeleted2; long existingId; using (var tx = Env.WriteTransaction()) { var section = new ActiveRawDataSmallSection(tx.LowLevelTransaction, pageNumber); Assert.True(section.TryAllocate(2000, out idWhichIsGoingToBeDeleted1)); WriteValue(section, idWhichIsGoingToBeDeleted1, 1.ToString("0000000000000")); Assert.True(section.TryAllocate(2000, out idWhichIsGoingToBeDeleted2)); WriteValue(section, idWhichIsGoingToBeDeleted2, 2.ToString("0000000000000")); Assert.True(section.TryAllocate(2000, out existingId)); WriteValue(section, existingId, 3.ToString("0000000000000")); tx.Commit(); } using (var tx = Env.WriteTransaction()) { var section = new ActiveRawDataSmallSection(tx.LowLevelTransaction, pageNumber); section.Free(idWhichIsGoingToBeDeleted1); section.Free(idWhichIsGoingToBeDeleted2); tx.Commit(); } using (var tx = Env.WriteTransaction()) { var section = new ActiveRawDataSmallSection(tx.LowLevelTransaction, pageNumber); var ids = section.GetAllIdsInSectionContaining(existingId); Assert.Equal(1, ids.Count); Assert.Equal(existingId, ids[0]); AssertValueMatches(section, existingId, 3.ToString("0000000000000")); } }
private static void AssertValueMatches(ActiveRawDataSmallSection section, long id, string expected) { int size; var p = section.DirectRead(id, out size); var buffer = new byte[size]; fixed(byte *bp = buffer) { Memory.Copy(bp, p, size); } var actual = Encoding.UTF8.GetString(buffer, 0, size); Assert.Equal(expected, actual); }
public void WhatShouldWeDoHere() { long pageNumber; using (var tx = Env.WriteTransaction()) { var section = ActiveRawDataSmallSection.Create(tx.LowLevelTransaction, "test", (byte)TableType.None); pageNumber = section.PageNumber; tx.Commit(); } using (var tx = Env.WriteTransaction()) { var section = new ActiveRawDataSmallSection(tx.LowLevelTransaction, pageNumber); Assert.Throws <InvalidOperationException>(() => section.Free(0)); } }
private long AllocateFromSmallActiveSection(int size) { long id; if (ActiveDataSmallSection.TryAllocate(size, out id) == false) { InactiveSections.Add(_activeDataSmallSection.PageNumber); using (var it = ActiveCandidateSection.Iterate()) { if (it.Seek(long.MinValue)) { do { var sectionPageNumber = it.CurrentKey; _activeDataSmallSection = new ActiveRawDataSmallSection(_tx.LowLevelTransaction, sectionPageNumber); _activeDataSmallSection.DataMoved += OnDataMoved; if (_activeDataSmallSection.TryAllocate(size, out id)) { ActiveCandidateSection.Delete(sectionPageNumber); return(id); } } while (it.MoveNext()); } } var newNumberOfPages = Math.Max((ushort)(ActiveDataSmallSection.NumberOfPages * 2), ushort.MaxValue); _activeDataSmallSection = ActiveRawDataSmallSection.Create(_tx.LowLevelTransaction, Name, newNumberOfPages); _activeDataSmallSection.DataMoved += OnDataMoved; Slice pageNumber; var val = _activeDataSmallSection.PageNumber; using (Slice.External(_tx.Allocator, (byte *)&val, sizeof(long), out pageNumber)) { _tableTree.Add(TableSchema.ActiveSectionSlice, pageNumber); } var allocationResult = _activeDataSmallSection.TryAllocate(size, out id); Debug.Assert(allocationResult); } return(id); }
public void CanReadAndWriteFromSection_SingleTx() { Env.Options.ManualFlushing = true; using (var tx = Env.WriteTransaction()) { var section = ActiveRawDataSmallSection.Create(tx, "test", (byte)TableType.None); long id; Assert.True(section.TryAllocate(15, out id)); WriteValue(section, id, "Hello There"); AssertValueMatches(section, id, "Hello There"); tx.Commit(); } Env.FlushLogToDataFile(); }
public void CanAllocateMultipleValues(int seed) { var random = new Random(seed); long pageNumber; using (var tx = Env.WriteTransaction()) { var section = ActiveRawDataSmallSection.Create(tx.LowLevelTransaction, "test", (byte)TableType.None); pageNumber = section.PageNumber; tx.Commit(); } var dic = new Dictionary <long, int>(); for (int i = 0; i < 100; i++) { long id; using (var tx = Env.WriteTransaction()) { var section = new ActiveRawDataSmallSection(tx.LowLevelTransaction, pageNumber); Assert.True(section.TryAllocate(random.Next(16, 256), out id)); WriteValue(section, id, i.ToString("0000000000000")); dic[id] = i; tx.Commit(); } using (var tx = Env.WriteTransaction()) { var section = new ActiveRawDataSmallSection(tx.LowLevelTransaction, pageNumber); AssertValueMatches(section, id, i.ToString("0000000000000")); } } foreach (var kvp in dic) { using (var tx = Env.WriteTransaction()) { var section = new ActiveRawDataSmallSection(tx.LowLevelTransaction, pageNumber); AssertValueMatches(section, kvp.Key, kvp.Value.ToString("0000000000000")); } } }
private long AllocateFromSmallActiveSection(int size) { long id; if (ActiveDataSmallSection.TryAllocate(size, out id) == false) { InactiveSections.Add(_activeDataSmallSection.PageNumber); using (var it = ActiveCandidateSection.Iterate()) { if (it.Seek(long.MinValue)) { do { var sectionPageNumber = it.CurrentKey; _activeDataSmallSection = new ActiveRawDataSmallSection(_tx.LowLevelTransaction, sectionPageNumber); if (_activeDataSmallSection.TryAllocate(size, out id)) { ActiveCandidateSection.Delete(sectionPageNumber); return(id); } } while (it.MoveNext()); } } _activeDataSmallSection = ActiveRawDataSmallSection.Create(_tx.LowLevelTransaction, Name); var pageNumber = Slice.From(_tx.Allocator, EndianBitConverter.Little.GetBytes(_activeDataSmallSection.PageNumber), ByteStringType.Immutable); _tableTree.Add(TableSchema.ActiveSection, pageNumber); var allocationResult = _activeDataSmallSection.TryAllocate(size, out id); Debug.Assert(allocationResult); } return(id); }
/// <summary> /// A table is stored inside a tree, and has the following keys in it /// /// - active-section -> page number - the page number of the current active small data section /// - inactive-sections -> fixed size tree with no content where the keys are the page numbers of inactive small raw data sections /// - large-values -> fixed size tree with no content where the keys are the page numbers of the large values /// - for each index: /// - If can fit into fixed size tree, use that. /// - Otherwise, create a tree (whose key would be the indexed field value and the value would /// be a fixed size tree of the ids of all the matching values) /// - stats -> header information about the table (number of entries, etc) /// - schemas -> schema definition for the table /// /// </summary> public void Create(Transaction tx, Slice name, ushort?sizeInPages) { if (_primaryKey == null && _indexes.Count == 0 && _fixedSizeIndexes.Count == 0) { throw new InvalidOperationException($"Cannot create table {name} without a primary key and no indexes"); } var tableTree = tx.CreateTree(name, RootObjectType.Table); if (tableTree.State.NumberOfEntries > 0) { return; // this was already created } // Create raw data. This is where we will actually store the documents using (var rawDataActiveSection = ActiveRawDataSmallSection.Create(tx.LowLevelTransaction, name, TableType, sizeInPages)) { long val = rawDataActiveSection.PageNumber; Slice pageNumber; using ( Slice.External(tx.Allocator, (byte *)&val, sizeof(long), ByteStringType.Immutable, out pageNumber)) { tableTree.Add(ActiveSectionSlice, pageNumber); } byte *ptr; using (tableTree.DirectAdd(StatsSlice, sizeof(TableSchemaStats), out ptr)) { var stats = (TableSchemaStats *)ptr; stats->NumberOfEntries = 0; } var tablePageAllocator = new NewPageAllocator(tx.LowLevelTransaction, tableTree); tablePageAllocator.Create(); var globalPageAllocator = new NewPageAllocator(tx.LowLevelTransaction, tx.LowLevelTransaction.RootObjects); globalPageAllocator.Create(); if (_primaryKey != null) { if (_primaryKey.IsGlobal == false) { using (var indexTree = Tree.Create(tx.LowLevelTransaction, tx, _primaryKey.Name, isIndexTree: true, newPageAllocator: tablePageAllocator)) { using (tableTree.DirectAdd(_primaryKey.Name, sizeof(TreeRootHeader), out ptr)) { indexTree.State.CopyTo((TreeRootHeader *)ptr); } } } else { tx.CreateTree(_primaryKey.Name.ToString(), isIndexTree: true, newPageAllocator: globalPageAllocator); } } foreach (var indexDef in _indexes.Values) { if (indexDef.IsGlobal == false) { using (var indexTree = Tree.Create(tx.LowLevelTransaction, tx, indexDef.Name, isIndexTree: true, newPageAllocator: tablePageAllocator)) { using (tableTree.DirectAdd(indexDef.Name, sizeof(TreeRootHeader), out ptr)) { indexTree.State.CopyTo((TreeRootHeader *)ptr); } } } else { tx.CreateTree(indexDef.Name.ToString(), isIndexTree: true, newPageAllocator: globalPageAllocator); } } // Serialize the schema into the table's tree var serializer = SerializeSchema(); using (tableTree.DirectAdd(SchemasSlice, serializer.Length, out ptr)) { fixed(byte *source = serializer) { Memory.Copy(ptr, source, serializer.Length); } } } }
public void DeleteTable(string name) { var tableTree = ReadTree(name, RootObjectType.Table); var writtenSchemaData = tableTree.DirectRead(TableSchema.SchemasSlice); var writtenSchemaDataSize = tableTree.GetDataSize(TableSchema.SchemasSlice); var schema = TableSchema.ReadFrom(Allocator, writtenSchemaData, writtenSchemaDataSize); var table = OpenTable(schema, name); // delete table data table.DeleteByPrimaryKey(Slices.BeforeAllKeys, x => { if (schema.Key.IsGlobal) { return(table.IsOwned(x.Reader.Id)); } return(true); }); if (schema.Key.IsGlobal == false) { var pkTree = table.GetTree(schema.Key); DeleteTree(pkTree, isInRoot: false); tableTree.Delete(pkTree.Name); } // index trees should be already removed but just in case let's go over them and ensure they're really deleted foreach (var indexDef in schema.Indexes.Values) { if (indexDef.IsGlobal) // must not delete global indexes { continue; } if (tableTree.Read(indexDef.Name) == null) { continue; } var indexTree = table.GetTree(indexDef); DeleteTree(indexTree, isInRoot: false); tableTree.Delete(indexTree.Name); } foreach (var indexDef in schema.FixedSizeIndexes.Values) { if (indexDef.IsGlobal) // must not delete global indexes { continue; } if (tableTree.Read(indexDef.Name) == null) { continue; } var index = table.GetFixedSizeTree(indexDef); DeleteFixedTree(index, isInRoot: false); tableTree.Delete(index.Name); } // raw data sections table.ActiveDataSmallSection.FreeRawDataSectionPages(); if (tableTree.Read(TableSchema.ActiveCandidateSectionSlice) != null) { using (var it = table.ActiveCandidateSection.Iterate()) { if (it.Seek(long.MinValue)) { var sectionPageNumber = it.CurrentKey; var section = new ActiveRawDataSmallSection(this, sectionPageNumber); section.FreeRawDataSectionPages(); } } DeleteFixedTree(table.ActiveCandidateSection, isInRoot: false); } if (tableTree.Read(TableSchema.InactiveSectionSlice) != null) { DeleteFixedTree(table.InactiveSections, isInRoot: false); } DeleteTree(name); using (Slice.From(Allocator, name, ByteStringType.Immutable, out var nameSlice)) { _tables.Remove(nameSlice); } }