public void WriteGather(long position, IntPtr[] pages) { _locker.EnterWriteLock(); try { if (position != _lastPos) { throw new InvalidOperationException("Journal writes must be to the next location in the journal"); } var size = pages.Length * AbstractPager.PageSize; _lastPos += size; var handle = Marshal.AllocHGlobal(size); var buffer = new Buffer { Handle = handle, Pointer = (byte *)handle.ToPointer(), SizeInPages = pages.Length }; _buffers = _buffers.Append(buffer); for (int index = 0; index < pages.Length; index++) { MemoryUtils.Copy(buffer.Pointer + (index * AbstractPager.PageSize), (byte *)pages[index].ToPointer(), AbstractPager.PageSize); } } finally { _locker.ExitWriteLock(); } }
internal Page ModifyPage(long num, Tree tree, Page page) { _env.AssertFlushingNotFailed(); page = page ?? GetReadOnlyPage(num); if (page.Dirty) { return(page); } if (_dirtyPages.Contains(num)) { page.Dirty = true; return(page); } var newPage = AllocatePage(1, PageFlags.None, num); // allocate new page in a log file but with the same number MemoryUtils.Copy(newPage.Base, page.Base, AbstractPager.PageSize); newPage.LastSearchPosition = page.LastSearchPosition; newPage.LastMatch = page.LastMatch; tree.RecentlyFoundPages.Reset(num); return(newPage); }
public static void CopyTo(Transaction tx, NodeHeader *node, byte *dest) { if (node->Flags == (NodeFlags.PageRef)) { var overFlowPage = tx.GetReadOnlyPage(node->PageNumber); MemoryUtils.Copy(dest, overFlowPage.Base + Constants.PageHeaderSize, overFlowPage.OverflowSize); } MemoryUtils.Copy(dest, (byte *)node + node->KeySize + Constants.NodeHeaderSize, node->DataSize); }
public void CopyTo(byte[] dest) { if (Array == null) { fixed(byte *p = dest) MemoryUtils.Copy(p, Pointer, Size); return; } Buffer.BlockCopy(Array, 0, dest, 0, Size); }
public void CreatePagesSnapshot() { _copiedPages = new byte[CompressedPages.Length * AbstractPager.PageSize]; fixed(byte *p = PagesSnapshot) { for (int i = 0; i < CompressedPages.Length; i++) { MemoryUtils.Copy(p + (i * AbstractPager.PageSize), (byte *)CompressedPages[i].ToPointer(), AbstractPager.PageSize); } } }
public void Truncate(Transaction tx, int i) { if (i >= NumberOfEntries) { return; } // when truncating, we copy the values to a tmp page // this has the effect of compacting the page data and avoiding // internal page fragmentation TemporaryPage tmp; using (tx.Environment.GetTemporaryPage(tx, out tmp)) { var copy = tmp.GetTempPage(KeysPrefixed); copy.Flags = Flags; copy.ClearPrefixInfo(); var slice = CreateNewEmptyKey(); for (int j = 0; j < i; j++) { var node = GetNode(j); SetNodeKey(node, ref slice); copy.CopyNodeDataToEndOfPage(node, copy.PrepareKeyToInsert(slice, copy.NumberOfEntries)); } MemoryUtils.Copy(_base + Constants.PageHeaderSize, copy._base + Constants.PageHeaderSize, _pageSize - Constants.PageHeaderSize); if (KeysPrefixed) { ClearPrefixInfo(); _prefixSection->NextPrefixId = copy._prefixSection->NextPrefixId; for (var prefixId = 0; prefixId < _prefixSection->NextPrefixId; prefixId++) { _prefixSection->PrefixOffsets[prefixId] = copy._prefixSection->PrefixOffsets[prefixId]; } } Upper = copy.Upper; Lower = copy.Lower; } if (LastSearchPosition > i) { LastSearchPosition = i; } }
public override void CopyTo(byte *dest) { if (Array == null) { MemoryUtils.Copy(dest, Pointer, Size); return; } fixed(byte *a = Array) { MemoryUtils.Copy(dest, a, Size); } }
public MemorySlice GetNodeKey(NodeHeader *node) { if (KeysPrefixed == false) { var keySize = node->KeySize; var key = new byte[keySize]; fixed(byte *ptr = key) MemoryUtils.Copy(ptr, (byte *)node + Constants.NodeHeaderSize, keySize); return(new Slice(key)); } if (node->KeySize == 0) { return(new PrefixedSlice(Slice.Empty)); } var prefixHeader = (PrefixedSliceHeader *)((byte *)node + Constants.NodeHeaderSize); var nonPrefixedSize = prefixHeader->NonPrefixedDataSize; var nonPrefixedData = new byte[nonPrefixedSize]; fixed(byte *ptr = nonPrefixedData) MemoryUtils.Copy(ptr, (byte *)prefixHeader + Constants.PrefixedSliceHeaderSize, nonPrefixedSize); var prefixedSlice = new PrefixedSlice(prefixHeader->PrefixId, prefixHeader->PrefixUsage, new Slice(nonPrefixedData)); if (prefixHeader->PrefixId == PrefixedSlice.NonPrefixedId) { return(prefixedSlice); } AssertPrefixNode(prefixedSlice.Header.PrefixId); var prefixNodePtr = (PrefixNodeHeader *)(_base + _prefixSection->PrefixOffsets[prefixedSlice.Header.PrefixId]); var prefixLength = prefixNodePtr->PrefixLength; var prefixData = new byte[prefixLength]; fixed(byte *ptr = prefixData) MemoryUtils.Copy(ptr, (byte *)prefixNodePtr + Constants.PrefixNodeHeaderSize, prefixLength); prefixedSlice.Prefix = new PrefixNode(new PrefixNodeHeader { PrefixLength = prefixLength }, prefixData, PageNumber); return(prefixedSlice); }
public void Add(Slice key, byte[] value, ushort?version = null) { if (value == null) { throw new ArgumentNullException("value"); } State.IsModified = true; var pos = DirectAdd(key, value.Length, version: version); fixed(byte *src = value) { MemoryUtils.Copy(pos, src, value.Length); } }
private void WriteVariableSizeFields(byte *ptr) { if (VariableSizeWrites == null) { return; } var fieldOffsetsSize = VariableFieldOffsetSize * VariableSizeWrites.Length; var offsetsPointer = ptr + _schema.FixedSize; var fieldPointer = offsetsPointer + fieldOffsetsSize; var offsets = new uint[VariableSizeWrites.Length]; for (int i = 0; i < VariableSizeWrites.Length; i++) { var write = VariableSizeWrites[i]; int valueLength = write.ValueSize; offsets[i] = (uint)(fieldPointer - ptr); Write7BitEncodedInt(fieldPointer, valueLength); fieldPointer += write.ValueSizeLength; if (write.Value != null) // We have an array of bytes { fixed(byte *valuePtr = write.Value) { MemoryUtils.Copy(fieldPointer, valuePtr, valueLength); } } else // We have an string { fixed(char *valuePtr = write.ValueString) { Encoding.UTF8.GetBytes(valuePtr, write.ValueString.Length, fieldPointer, valueLength); } } fieldPointer += valueLength; } fixed(uint *p = offsets) { MemoryUtils.Copy(offsetsPointer, (byte *)p, fieldOffsetsSize); } }
public void CopyTo(int from, byte *dest, int offset, int count) { if (from + count > Size) { throw new ArgumentOutOfRangeException("from", "Cannot copy data after the end of the slice"); } if (Array == null) { MemoryUtils.Copy(dest + offset, Pointer + from, count); return; } fixed(byte *p = Array) MemoryUtils.Copy(dest + offset, p + from, count); }
public override unsafe void WriteHeader(string filename, FileHeader *header) { if (Disposed) { throw new ObjectDisposedException("PureMemoryStorageEnvironmentOptions"); } IntPtr ptr; if (_headers.TryGetValue(filename, out ptr) == false) { ptr = Marshal.AllocHGlobal(sizeof(FileHeader)); _headers[filename] = ptr; } MemoryUtils.Copy((byte *)ptr, (byte *)header, sizeof(FileHeader)); }
/// <summary> /// Internal method that is used when splitting pages /// No need to do any work here, we are always adding at the end /// </summary> internal void CopyNodeDataToEndOfPage(NodeHeader *other, MemorySlice key) { var index = NumberOfEntries; Debug.Assert(HasSpaceFor(SizeOf.NodeEntryWithAnotherKey(other, key) + Constants.NodeOffsetSize + SizeOf.NewPrefix(key))); var nodeSize = SizeOf.NodeEntryWithAnotherKey(other, key); Debug.Assert(IsBranch == false || index != 0 || key.KeyLength == 0); // branch page's first item must be the implicit ref var nodeVersion = other->Version; // every time new node is allocated the version is increased, but in this case we do not want to increase it if (nodeVersion > 0) { nodeVersion -= 1; } var prefixedKey = key as PrefixedSlice; if (prefixedKey != null && prefixedKey.NewPrefix != null) { WritePrefix(prefixedKey.NewPrefix, prefixedKey.Header.PrefixId); } var newNode = AllocateNewNode(index, nodeSize, nodeVersion); newNode->KeySize = key.Size; newNode->Flags = other->Flags; if (key.Options == SliceOptions.Key && key.Size > 0) { key.CopyTo((byte *)newNode + Constants.NodeHeaderSize); } if (IsBranch || other->Flags == (NodeFlags.PageRef)) { newNode->PageNumber = other->PageNumber; newNode->Flags = NodeFlags.PageRef; return; } newNode->DataSize = other->DataSize; MemoryUtils.Copy((byte *)newNode + Constants.NodeHeaderSize + key.Size, (byte *)other + Constants.NodeHeaderSize + other->KeySize, other->DataSize); }
private void Defrag(Transaction tx) { TemporaryPage tmp; using (tx.Environment.GetTemporaryPage(tx, out tmp)) { var tempPage = tmp.GetTempPage(KeysPrefixed); MemoryUtils.Copy(tempPage.Base, Base, _pageSize); var numberOfEntries = NumberOfEntries; Upper = KeysPrefixed ? (ushort)(_pageSize - Constants.PrefixInfoSectionSize) : _pageSize; for (int i = 0; i < numberOfEntries; i++) { var node = tempPage.GetNode(i); var size = node->GetNodeSize() - Constants.NodeOffsetSize; size += size & 1; MemoryUtils.Copy(Base + Upper - size, (byte *)node, size); Upper -= (ushort)size; KeysOffsets[i] = Upper; } if (KeysPrefixed == false) { return; } var prefixNode = new PrefixNode(); for (byte i = 0; i < _prefixSection->NextPrefixId; i++) { tempPage.AssertPrefixNode(i); prefixNode.Set(tempPage._base + tempPage._prefixSection->PrefixOffsets[i], tempPage.PageNumber); var prefixNodeSize = Constants.PrefixNodeHeaderSize + prefixNode.PrefixLength; prefixNodeSize += prefixNodeSize & 1; MemoryUtils.Copy(Base + Upper - prefixNodeSize, prefixNode.Base, prefixNodeSize); Upper -= (ushort)prefixNodeSize; _prefixSection->PrefixOffsets[i] = Upper; } } }
public Slice Clone() { var buffer = new byte[Size]; if (Array == null) { fixed(byte *dest = buffer) { MemoryUtils.Copy(dest, Pointer, Size); } } else { Buffer.BlockCopy(Array, 0, buffer, 0, Size); } return(new Slice(buffer)); }
public bool Read(long pageNumber, byte *buffer, int count) { long pos = 0; foreach (var current in _buffers) { if (pos != pageNumber) { pos += current.SizeInPages; continue; } MemoryUtils.Copy(buffer, current.Pointer, count); return(true); } return(false); }
public override Slice Skip(ushort bytesToSkip) { if (Header.PrefixId == NonPrefixedId) { return(NonPrefixedData.Skip(bytesToSkip)); } if (bytesToSkip == Header.PrefixUsage) { return(NonPrefixedData); } if (bytesToSkip > Header.PrefixUsage) { return(NonPrefixedData.Skip((ushort)(bytesToSkip - Header.PrefixUsage))); } // bytesToSkip < _header.PrefixUsage Debug.Assert(Prefix != null); var prefixPart = Header.PrefixUsage - bytesToSkip; var sliceSize = prefixPart + Header.NonPrefixedDataSize; var sliceData = new byte[sliceSize]; fixed(byte *slicePtr = sliceData) { if (Prefix.Value == null) { MemoryUtils.Copy(slicePtr, Prefix.ValuePtr + bytesToSkip, prefixPart); } else { fixed(byte *prefixVal = Prefix.Value) MemoryUtils.Copy(slicePtr, prefixVal + bytesToSkip, prefixPart); } } NonPrefixedData.CopyTo(0, sliceData, prefixPart, sliceSize - prefixPart); return(new Slice(sliceData)); }
private static void CopyStreamToPointer(Transaction tx, Stream value, byte *pos) { TemporaryPage tmp; using (tx.Environment.GetTemporaryPage(tx, out tmp)) { var tempPageBuffer = tmp.TempPageBuffer; var tempPagePointer = tmp.TempPagePointer; while (true) { var read = value.Read(tempPageBuffer, 0, AbstractPager.PageSize); if (read == 0) { break; } MemoryUtils.Copy(pos, tempPagePointer, read); pos += read; } } }
public void CopyTo(int from, byte[] dest, int offset, int count) { if (from + count > Size) { throw new ArgumentOutOfRangeException("from", "Cannot copy data after the end of the slice"); } if (offset + count > dest.Length) { throw new ArgumentOutOfRangeException("from", "Cannot copy data after the end of the buffer" + ""); } if (Array == null) { fixed(byte *p = dest) MemoryUtils.Copy(p + offset, Pointer + from, count); return; } Buffer.BlockCopy(Array, from, dest, offset, count); }
private void WritePageDirect(Page page, int numberOfPagesIncludingOverflow) { var pageFromScratchBuffer = _env.ScratchBufferPool.Allocate(this, numberOfPagesIncludingOverflow); var dest = _env.ScratchBufferPool.AcquirePagePointer(pageFromScratchBuffer.ScratchFileNumber, pageFromScratchBuffer.PositionInScratchBuffer); MemoryUtils.Copy(dest, page.Base, numberOfPagesIncludingOverflow * AbstractPager.PageSize); _allocatedPagesInTransaction++; _dirtyPages.Add(page.PageNumber); page.Dirty = true; if (numberOfPagesIncludingOverflow > 1) { _dirtyOverflowPages.Add(page.PageNumber + 1, numberOfPagesIncludingOverflow - 1); } _scratchPagesTable[page.PageNumber] = pageFromScratchBuffer; _transactionPages.Add(pageFromScratchBuffer); }
public int Read(byte *buffer, int count) { count = Math.Min(count, _len - _pos); if (count <= 0) { return(0); } if (_val == null) { fixed(byte *b = _buffer) MemoryUtils.Copy(buffer, b + _pos, count); } else { MemoryUtils.Copy(buffer, _val + _pos, count); } _pos += count; return(count); }
internal Page ModifyPage(long num, Page page) { _env.AssertFlushingNotFailed(); if (_dirtyPages.Contains(num)) { page = GetPageForModification(num, page); page.Dirty = true; return(page); } page = GetPageForModification(num, page); var newPage = AllocatePage(1, PageFlags.None, num); // allocate new page in a log file but with the same number MemoryUtils.Copy(newPage.Base, page.Base, AbstractPager.PageSize); newPage.LastSearchPosition = page.LastSearchPosition; newPage.LastMatch = page.LastMatch; return(newPage); }
public byte[] ReadBytes(T field) { var fieldIndex = ((VariableSizeField)_schema.Fields[field.GetHashCode()]).Index; if (_ptr != null) { var offset = VariableOffsets[fieldIndex]; int valueLengthSize; var length = Read7BitEncodedInt(_ptr + offset, out valueLengthSize); var result = new byte[length]; fixed (byte* rPtr = result) { MemoryUtils.Copy(rPtr, _ptr + offset + valueLengthSize, length); } return result; } return _value.VariableSizeWrites[fieldIndex].Value; }
public Stream ToStream() { var ms = new MemoryStream(260); var tmpBuffer = new byte[(_inner.Length + 1) * sizeof(int)]; unsafe { fixed(int *src = _inner) fixed(byte *dest = tmpBuffer) { *(int *)dest = SetCount; MemoryUtils.Copy(dest + sizeof(int), (byte *)src, tmpBuffer.Length - 1); } } Debug.Assert(BitConverter.ToInt32(tmpBuffer, 0) == SetCount); ms.Write(tmpBuffer, 0, tmpBuffer.Length); ms.Position = 0; return(ms); }
private void ExpandMultiTreeNestedPageSize(Transaction tx, Slice key, Slice value, byte *nestedPagePtr, ushort newSize, int currentSize) { Debug.Assert(newSize > currentSize); TemporaryPage tmp; using (tx.Environment.GetTemporaryPage(tx, out tmp)) { var tempPagePointer = tmp.TempPagePointer; MemoryUtils.Copy(tempPagePointer, nestedPagePtr, currentSize); Delete(key); // release our current page Page nestedPage = new Page(tempPagePointer, "multi tree", (ushort)currentSize); var ptr = DirectAdd(key, newSize); var newNestedPage = new Page(ptr, "multi tree", newSize) { Lower = (ushort)Constants.PageHeaderSize, Upper = KeysPrefixing ? (ushort)(newSize - Constants.PrefixInfoSectionSize) : newSize, Flags = KeysPrefixing ? PageFlags.Leaf | PageFlags.KeysPrefixed : PageFlags.Leaf, PageNumber = -1L // mark as invalid page number }; newNestedPage.ClearPrefixInfo(); MemorySlice nodeKey = nestedPage.CreateNewEmptyKey(); for (int i = 0; i < nestedPage.NumberOfEntries; i++) { var nodeHeader = nestedPage.GetNode(i); nestedPage.SetNodeKey(nodeHeader, ref nodeKey); nodeKey = newNestedPage.PrepareKeyToInsert(nodeKey, i); newNestedPage.AddDataNode(i, nodeKey, 0, (ushort)(nodeHeader->Version - 1)); // we dec by one because AdddataNode will inc by one, and we don't want to change those values } newNestedPage.Search(value); newNestedPage.AddDataNode(newNestedPage.LastSearchPosition, newNestedPage.PrepareKeyToInsert(value, newNestedPage.LastSearchPosition), 0, 0); } }
private void WriteVariableSizeFields(byte *ptr) { if (VariableSizeWrites == null) { return; } var fieldOffsetsSize = VariableFieldOffsetSize * VariableSizeWrites.Length; var offsetsPointer = ptr + _schema.FixedSize; var fieldPointer = offsetsPointer + fieldOffsetsSize; var offsets = new uint[VariableSizeWrites.Length]; for (int i = 0; i < VariableSizeWrites.Length; i++) { var write = VariableSizeWrites[i]; var valueLength = write.Value.Length; offsets[i] = (uint)(fieldPointer - ptr); Write7BitEncodedInt(fieldPointer, valueLength); fieldPointer += write.ValueSizeLength; fixed(byte *valuePtr = write.Value) { MemoryUtils.Copy(fieldPointer, valuePtr, valueLength); } fieldPointer += valueLength; } fixed(uint *p = offsets) { MemoryUtils.Copy(offsetsPointer, (byte *)p, fieldOffsetsSize); } }
private bool TryMergePages(Page parentPage, Page left, Page right) { TemporaryPage tmp; using (_tx.Environment.GetTemporaryPage(_tx, out tmp)) { var mergedPage = tmp.GetTempPage(left.KeysPrefixed); MemoryUtils.Copy(mergedPage.Base, left.Base, left.PageSize); var previousSearchPosition = right.LastSearchPosition; for (int i = 0; i < right.NumberOfEntries; i++) { right.LastSearchPosition = i; var key = GetActualKey(right, right.LastSearchPositionOrLastEntry); var node = right.GetNode(i); var prefixedKey = mergedPage.PrepareKeyToInsert(key, mergedPage.NumberOfEntries); if (mergedPage.HasSpaceFor(_tx, SizeOf.NodeEntryWithAnotherKey(node, prefixedKey) + Constants.NodeOffsetSize + SizeOf.NewPrefix(prefixedKey)) == false) { right.LastSearchPosition = previousSearchPosition; //previous position --> prevent mutation of parameter return(false); } mergedPage.CopyNodeDataToEndOfPage(node, prefixedKey); } MemoryUtils.Copy(left.Base, mergedPage.Base, left.PageSize); } parentPage.RemoveNode(parentPage.LastSearchPositionOrLastEntry); // unlink the right sibling _tx.FreePage(right.PageNumber); return(true); }
public void ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal, Action <string> infoNotify = null, Action backupStarted = null) { if (env.Options.IncrementalBackupEnabled == false) { throw new InvalidOperationException("Incremental backup is disabled for this storage"); } var pageNumberToPageInScratch = new Dictionary <long, long>(); if (infoNotify == null) { infoNotify = str => { } } ; var toDispose = new List <IDisposable>(); try { IncrementalBackupInfo backupInfo; long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { backupInfo = env.HeaderAccessor.Get(ptr => ptr->IncrementalBackup); if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition; } //txw.Commit(); - intentionally not committing } if (backupStarted != null) { backupStarted(); } infoNotify("Voron - reading storage journals for snapshot pages"); var lastBackedUpFile = backupInfo.LastBackedUpJournal; var lastBackedUpPage = backupInfo.LastBackedUpJournalPage; var firstJournalToBackup = backupInfo.LastBackedUpJournal; if (firstJournalToBackup == -1) { firstJournalToBackup = 0; // first time that we do incremental backup } var lastTransaction = new TransactionHeader { TransactionId = -1 }; var recoveryPager = env.Options.CreateScratchPager("min-inc-backup.scratch"); toDispose.Add(recoveryPager); int recoveryPage = 0; for (var journalNum = firstJournalToBackup; journalNum <= backupInfo.LastCreatedJournal; journalNum++) { lastBackedUpFile = journalNum; var journalFile = IncrementalBackup.GetJournalFile(env, journalNum, backupInfo); try { using (var filePager = env.Options.OpenJournalPager(journalNum)) { var reader = new JournalReader(filePager, recoveryPager, 0, null, recoveryPage); reader.MaxPageToRead = lastBackedUpPage = journalFile.JournalWriter.NumberOfAllocatedPages; if (journalNum == lastWrittenLogFile) // set the last part of the log file we'll be reading { reader.MaxPageToRead = lastBackedUpPage = lastWrittenLogPage; } if (lastBackedUpPage == journalFile.JournalWriter.NumberOfAllocatedPages) // past the file size { // move to the next lastBackedUpPage = -1; lastBackedUpFile++; } if (journalNum == backupInfo.LastBackedUpJournal) // continue from last backup { reader.SetStartPage(backupInfo.LastBackedUpJournalPage); } TransactionHeader *lastJournalTxHeader = null; while (reader.ReadOneTransaction(env.Options)) { // read all transactions here lastJournalTxHeader = reader.LastTransactionHeader; } if (lastJournalTxHeader != null) { lastTransaction = *lastJournalTxHeader; } recoveryPage = reader.RecoveryPage; foreach (var pagePosition in reader.TransactionPageTranslation) { var pageInJournal = pagePosition.Value.JournalPos; var page = recoveryPager.Read(pageInJournal); pageNumberToPageInScratch[pagePosition.Key] = pageInJournal; if (page.IsOverflow) { var numberOfOverflowPages = recoveryPager.GetNumberOfOverflowPages(page.OverflowSize); for (int i = 1; i < numberOfOverflowPages; i++) { pageNumberToPageInScratch.Remove(page.PageNumber + i); } } } } } finally { journalFile.Release(); } } if (pageNumberToPageInScratch.Count == 0) { infoNotify("Voron - no changes since last backup, nothing to do"); return; } infoNotify("Voron - started writing snapshot file."); if (lastTransaction.TransactionId == -1) { throw new InvalidOperationException("Could not find any transactions in the journals, but found pages to write? That ain't right."); } // it is possible that we merged enough transactions so the _merged_ output is too large for us. // Voron limit transactions to about 4GB each. That means that we can't just merge all transactions // blindly, for fear of hitting this limit. So we need to split things. // We are also limited to about 8 TB of data in general before we literally can't fit the number of pages into // pageNumberToPageInScratch even theoretically. // We're fine with saying that you need to run min inc backup before you hit 8 TB in your increment, so that works for now. // We are also going to use env.Options.MaxScratchBufferSize to set the actual transaction limit here, to avoid issues // down the road and to limit how big a single transaction can be before the theoretical 4GB limit. var nextJournalNum = lastBackedUpFile; using (var file = new FileStream(backupPath, FileMode.Create)) { using (var package = new ZipArchive(file, ZipArchiveMode.Create, leaveOpen: true)) { var copier = new DataCopier(AbstractPager.PageSize * 16); var finalPager = env.Options.CreateScratchPager("min-inc-backup-final.scratch"); toDispose.Add(finalPager); finalPager.EnsureContinuous(null, 0, 1); //txHeader foreach (var partition in Partition(pageNumberToPageInScratch.Values, env.Options.MaxNumberOfPagesInMergedTransaction)) { int totalNumberOfPages = 0; int overflowPages = 0; int start = 1; foreach (var pageNum in partition) { var p = recoveryPager.Read(pageNum); var size = 1; if (p.IsOverflow) { size = recoveryPager.GetNumberOfOverflowPages(p.OverflowSize); overflowPages += (size - 1); } totalNumberOfPages += size; finalPager.EnsureContinuous(null, start, size); //maybe increase size MemoryUtils.Copy(finalPager.AcquirePagePointer(start), p.Base, size * AbstractPager.PageSize); start += size; } var txPage = finalPager.AcquirePagePointer(0); StdLib.memset(txPage, 0, AbstractPager.PageSize); var txHeader = (TransactionHeader *)txPage; txHeader->HeaderMarker = Constants.TransactionHeaderMarker; txHeader->FreeSpace = lastTransaction.FreeSpace; txHeader->Root = lastTransaction.Root; txHeader->OverflowPageCount = overflowPages; txHeader->PageCount = totalNumberOfPages - overflowPages; txHeader->PreviousTransactionCrc = lastTransaction.PreviousTransactionCrc; txHeader->TransactionId = lastTransaction.TransactionId; txHeader->NextPageNumber = lastTransaction.NextPageNumber; txHeader->LastPageNumber = lastTransaction.LastPageNumber; txHeader->TxMarker = TransactionMarker.Commit | TransactionMarker.Merged; txHeader->Compressed = false; txHeader->UncompressedSize = txHeader->CompressedSize = totalNumberOfPages * AbstractPager.PageSize; txHeader->Crc = Crc.Value(finalPager.AcquirePagePointer(1), 0, totalNumberOfPages * AbstractPager.PageSize); var entry = package.CreateEntry(string.Format("{0:D19}.merged-journal", nextJournalNum), compression); nextJournalNum++; using (var stream = entry.Open()) { copier.ToStream(finalPager.AcquirePagePointer(0), (totalNumberOfPages + 1) * AbstractPager.PageSize, stream); } } } file.Flush(true); // make sure we hit the disk and stay there } env.HeaderAccessor.Modify(header => { header->IncrementalBackup.LastBackedUpJournal = lastBackedUpFile; header->IncrementalBackup.LastBackedUpJournalPage = lastBackedUpPage; }); } finally { foreach (var disposable in toDispose) { disposable.Dispose(); } } }
private void MoveLeafNode(Page parentPage, Page from, Page to) { Debug.Assert(from.IsBranch == false); var originalFromKeyStart = GetActualKey(from, from.LastSearchPositionOrLastEntry); var fromNode = from.GetNode(from.LastSearchPosition); byte *val = @from.Base + @from.KeysOffsets[@from.LastSearchPosition] + Constants.NodeHeaderSize + originalFromKeyStart.Size; var nodeVersion = fromNode->Version; // every time new node is allocated the version is increased, but in this case we do not want to increase it if (nodeVersion > 0) { nodeVersion -= 1; } var prefixedOriginalFromKey = to.PrepareKeyToInsert(originalFromKeyStart, to.LastSearchPosition); byte *dataPos; var fromDataSize = fromNode->DataSize; switch (fromNode->Flags) { case NodeFlags.PageRef: to.EnsureHasSpaceFor(_tx, prefixedOriginalFromKey, -1); dataPos = to.AddPageRefNode(to.LastSearchPosition, prefixedOriginalFromKey, fromNode->PageNumber); break; case NodeFlags.Data: to.EnsureHasSpaceFor(_tx, prefixedOriginalFromKey, fromDataSize); dataPos = to.AddDataNode(to.LastSearchPosition, prefixedOriginalFromKey, fromDataSize, nodeVersion); break; case NodeFlags.MultiValuePageRef: to.EnsureHasSpaceFor(_tx, prefixedOriginalFromKey, fromDataSize); dataPos = to.AddMultiValueNode(to.LastSearchPosition, prefixedOriginalFromKey, fromDataSize, nodeVersion); break; default: throw new NotSupportedException("Invalid node type to move: " + fromNode->Flags); } if (dataPos != null && fromDataSize > 0) { MemoryUtils.Copy(dataPos, val, fromDataSize); } from.RemoveNode(from.LastSearchPositionOrLastEntry); var pos = parentPage.LastSearchPositionOrLastEntry; parentPage.RemoveNode(pos); var newSeparatorKey = GetActualKey(to, 0); // get the next smallest key it has now var pageNumber = to.PageNumber; if (parentPage.GetNode(0)->PageNumber == to.PageNumber) { pageNumber = from.PageNumber; newSeparatorKey = GetActualKey(from, 0); } AddSeparatorToParentPage(parentPage, pageNumber, newSeparatorKey, pos); }
public bool Initialize() { _locker.EnterWriteLock(); try { if (_theHeader == null) { throw new ObjectDisposedException("Cannot access the header after it was disposed"); } var headers = stackalloc FileHeader[2]; var f1 = &headers[0]; var f2 = &headers[1]; var hasHeader1 = _env.Options.ReadHeader(HeaderFileNames[0], f1); var hasHeader2 = _env.Options.ReadHeader(HeaderFileNames[1], f2); if (hasHeader1 == false && hasHeader2 == false) { // new FillInEmptyHeader(f1); FillInEmptyHeader(f2); _env.Options.WriteHeader(HeaderFileNames[0], f1); _env.Options.WriteHeader(HeaderFileNames[1], f2); MemoryUtils.Copy((byte *)_theHeader, (byte *)f1, sizeof(FileHeader)); return(true); // new } if (f1->MagicMarker != Constants.MagicMarker && f2->MagicMarker != Constants.MagicMarker) { throw new InvalidDataException("None of the header files start with the magic marker, probably not db files"); } // if one of the files is corrupted, but the other isn't, restore to the valid file if (f1->MagicMarker != Constants.MagicMarker) { *f1 = *f2; } if (f2->MagicMarker != Constants.MagicMarker) { *f2 = *f1; } if (f1->Version != Constants.CurrentVersion) { throw new InvalidDataException("This is a db file for version " + f1->Version + ", which is not compatible with the current version " + Constants.CurrentVersion + Environment.NewLine + "Error at " + _env.Options.BasePath); } if (f1->TransactionId < 0) { throw new InvalidDataException("The transaction number cannot be negative"); } if (f1->HeaderRevision > f2->HeaderRevision) { MemoryUtils.Copy((byte *)_theHeader, (byte *)f1, sizeof(FileHeader)); } else { MemoryUtils.Copy((byte *)_theHeader, (byte *)f2, sizeof(FileHeader)); } _revision = _theHeader->HeaderRevision; return(false); } finally { _locker.ExitWriteLock(); } }