public StorageEnvironment(StorageEnvironmentOptions options) { try { _options = options; _dataPager = options.DataPager; _freeSpaceHandling = new FreeSpaceHandling(); _headerAccessor = new HeaderAccessor(this); var isNew = _headerAccessor.Initialize(); _scratchBufferPool = new ScratchBufferPool(this); _journal = new WriteAheadJournal(this); if (isNew) CreateNewDatabase(); else // existing db, let us load it LoadExistingDatabase(); State.FreeSpaceRoot.Name = Constants.FreeSpaceTreeName; State.Root.Name = Constants.RootTreeName; Writer = new TransactionMergingWriter(this, _cancellationTokenSource.Token); if (_options.ManualFlushing == false) _flushingTask = FlushWritesToDataFileAsync(); } catch (Exception) { Dispose(); throw; } }
public Transaction(StorageEnvironment env, long id, TransactionFlags flags, IFreeSpaceHandling freeSpaceHandling) { _dataPager = env.Options.DataPager; _env = env; _journal = env.Journal; _id = id; _freeSpaceHandling = freeSpaceHandling; Flags = flags; var scratchPagerState = env.ScratchBufferPool.PagerState; scratchPagerState.AddRef(); _pagerStates.Add(scratchPagerState); if (flags.HasFlag(TransactionFlags.ReadWrite) == false) { _state = env.State; _journal.GetSnapshots().ForEach(AddJournalSnapshot); return; } _state = env.State.Clone(); InitTransactionHeader(); MarkTreesForWriteTransaction(); }
public Transaction(StorageEnvironment env, long id, TransactionFlags flags, IFreeSpaceHandling freeSpaceHandling) { _dataPager = env.Options.DataPager; _env = env; _journal = env.Journal; _id = id; _freeSpaceHandling = freeSpaceHandling; Flags = flags; var scratchPagerStates = env.ScratchBufferPool.GetPagerStatesOfAllScratches(); foreach (var scratchPagerState in scratchPagerStates.Values) { scratchPagerState.AddRef(); _pagerStates.Add(scratchPagerState); } if (flags.HasFlag(TransactionFlags.ReadWrite) == false) { // for read transactions, we need to keep the pager state frozen // for write transactions, we can use the current one (which == null) _scratchPagerStates = scratchPagerStates; _state = env.State.Clone(this); _journal.GetSnapshots().ForEach(AddJournalSnapshot); return; } _state = env.State.Clone(this); InitTransactionHeader(); MarkTreesForWriteTransaction(); }
private void FilePager() { if (File.Exists("test.data")) { File.Delete("test.data"); } _pager = new MemoryMapPager("test.data"); }
public ShippedTransactionsReader(IVirtualPager pager) { if (pager == null) { throw new ArgumentNullException("pager"); } _pager = pager; _pageNumbers = new List <long>(); }
public Transaction(IVirtualPager pager, StorageEnvironment env, long id, TransactionFlags flags, IFreeSpaceRepository freeSpaceRepository) { _pager = pager; _env = env; _id = id; _freeSpaceRepository = freeSpaceRepository; Flags = flags; NextPageNumber = env.NextPageNumber; }
private IntPtr[] CompressPages(Transaction tx, int numberOfPages, IVirtualPager compressionPager, uint previousTransactionCrc) { // numberOfPages include the tx header page, which we don't compress var dataPagesCount = numberOfPages - 1; var sizeInBytes = dataPagesCount * AbstractPager.PageSize; var outputBuffer = LZ4.MaximumOutputLength(sizeInBytes); var outputBufferInPages = outputBuffer / AbstractPager.PageSize + (outputBuffer % AbstractPager.PageSize == 0 ? 0 : 1); var pagesRequired = (dataPagesCount + outputBufferInPages); compressionPager.EnsureContinuous(tx, 0, pagesRequired); var tempBuffer = compressionPager.AcquirePagePointer(tx, 0); var compressionBuffer = compressionPager.AcquirePagePointer(tx, dataPagesCount); var write = tempBuffer; var txPages = tx.GetTransactionPages(); foreach (var txPage in txPages) { var scratchPage = tx.Environment.ScratchBufferPool.AcquirePagePointer(tx, txPage.ScratchFileNumber, txPage.PositionInScratchBuffer); var count = txPage.NumberOfPages * AbstractPager.PageSize; Memory.BulkCopy(write, scratchPage, count); write += count; } var len = DoCompression(tempBuffer, compressionBuffer, sizeInBytes, outputBuffer); var remainder = len % AbstractPager.PageSize; var compressedPages = (len / AbstractPager.PageSize) + (remainder == 0 ? 0 : 1); if (remainder != 0) { // zero the remainder of the page UnmanagedMemory.Set(compressionBuffer + len, 0, remainder); } var pages = new IntPtr[compressedPages + 1]; var txHeaderPage = tx.GetTransactionHeaderPage(); var txHeaderBase = tx.Environment.ScratchBufferPool.AcquirePagePointer(tx, txHeaderPage.ScratchFileNumber, txHeaderPage.PositionInScratchBuffer); var txHeader = (TransactionHeader *)txHeaderBase; txHeader->Compressed = true; txHeader->CompressedSize = len; txHeader->UncompressedSize = sizeInBytes; txHeader->PreviousTransactionCrc = previousTransactionCrc; pages[0] = new IntPtr(txHeaderBase); for (int index = 0; index < compressedPages; index++) { pages[index + 1] = new IntPtr(compressionBuffer + (index * AbstractPager.PageSize)); } txHeader->Crc = Crc.Value(compressionBuffer, 0, compressedPages * AbstractPager.PageSize); return(pages); }
public JournalReader(IVirtualPager pager, IVirtualPager recoveryPager, long lastSyncedTransactionId, TransactionHeader *previous, int recoverPage = 0) { RequireHeaderUpdate = false; _pager = pager; _recoveryPager = recoveryPager; _lastSyncedTransactionId = lastSyncedTransactionId; _readingPage = 0; _recoveryPage = recoverPage; LastTransactionHeader = previous; }
public JournalReader(IVirtualPager pager, IVirtualPager recoveryPager, long lastSyncedTransactionId, TransactionHeader* previous) { RequireHeaderUpdate = false; _pager = pager; _recoveryPager = recoveryPager; _lastSyncedTransactionId = lastSyncedTransactionId; _readingPage = 0; _recoveryPage = 0; LastTransactionHeader = previous; }
private void RecoverCurrentJournalSize(IVirtualPager pager) { var journalSize = Utils.NearestPowerOfTwo(pager.NumberOfAllocatedPages * AbstractPager.PageSize); if (journalSize >= _env.Options.MaxLogFileSize) // can't set for more than the max log file size { return; } _currentJournalFileSize = journalSize; }
public PureMemoryStorageEnvironmentOptions() { _instanceId = Interlocked.Increment(ref _counter); if (RunningOnPosix) { _dataPager = new PosixTempMemoryMapPager("_data.pager", InitialFileSize); } else { _dataPager = new Win32PageFileBackedMemoryMappedPager("data.pager", InitialFileSize); } }
private byte *[] CompressPages(Transaction tx, int numberOfPages, IVirtualPager compressionPager) { // numberOfPages include the tx header page, which we don't compress var dataPagesCount = numberOfPages - 1; var sizeInBytes = dataPagesCount * AbstractPager.PageSize; var outputBuffer = LZ4.MaximumOutputLength(sizeInBytes); var outputBufferInPages = outputBuffer / AbstractPager.PageSize + (outputBuffer % AbstractPager.PageSize == 0 ? 0 : 1); var pagesRequired = (dataPagesCount + outputBufferInPages); compressionPager.EnsureContinuous(tx, 0, pagesRequired); var tempBuffer = compressionPager.AcquirePagePointer(0); var compressionBuffer = compressionPager.AcquirePagePointer(dataPagesCount); var write = tempBuffer; var txPages = tx.GetTransactionPages(); for (int index = 1; index < txPages.Count; index++) { var txPage = txPages[index]; var scratchPage = tx.Environment.ScratchBufferPool.AcquirePagePointer(txPage.PositionInScratchBuffer); var count = txPage.NumberOfPages * AbstractPager.PageSize; NativeMethods.memcpy(write, scratchPage, count); write += count; } var sizeAfterCompression = DoCompression(tempBuffer, compressionBuffer, sizeInBytes, outputBuffer); var compressedPages = (sizeAfterCompression / AbstractPager.PageSize) + (sizeAfterCompression % AbstractPager.PageSize == 0 ? 0 : 1); var txHeaderBase = tx.Environment.ScratchBufferPool.AcquirePagePointer(txPages[0].PositionInScratchBuffer); var txHeader = (TransactionHeader *)txHeaderBase; txHeader->Compressed = true; txHeader->CompressedSize = sizeAfterCompression; txHeader->UncompressedSize = sizeInBytes; var pages = new byte *[compressedPages + 1]; pages[0] = txHeaderBase; for (int index = 0; index < compressedPages; index++) { pages[index + 1] = compressionBuffer + (index * AbstractPager.PageSize); } txHeader->Crc = Crc.Value(compressionBuffer, 0, compressedPages * AbstractPager.PageSize); return(pages); }
public JournalReader(IVirtualPager pager, IVirtualPager recoveryPager, long lastSyncedTransactionId, TransactionHeader *previous) { if (pager == null) { throw new ArgumentNullException("pager"); } RequireHeaderUpdate = false; _pager = pager; _recoveryPager = recoveryPager; _lastSyncedTransactionId = lastSyncedTransactionId; _readingPage = 0; _recoveryPage = 0; LastTransactionHeader = previous; _previousTransactionCrc = 0; }
public WriteAheadJournal(StorageEnvironment env) { _env = env; _dataPager = _env.Options.DataPager; _currentJournalFileSize = env.Options.InitialLogFileSize; _headerAccessor = env.HeaderAccessor; _updateLogInfo = header => { var journalFilesCount = _files.Count; header->Journal.CurrentJournal = journalFilesCount > 0 ? _journalIndex : -1; header->Journal.JournalFilesCount = journalFilesCount; header->IncrementalBackup.LastCreatedJournal = _journalIndex; }; _compressionPager = _env.Options.CreateScratchPager("compression.buffers"); _journalApplicator = new JournalApplicator(this); }
public PureMemoryStorageEnvironmentOptions(string configTempPath) { tempPath = configTempPath; _instanceId = Interlocked.Increment(ref _counter); var filename = $"ravendb-{Process.GetCurrentProcess().Id}-{_instanceId}-data.pager"; string path = Path.Combine(tempPath, filename); if (RunningOnPosix) { _dataPager = new PosixTempMemoryMapPager(path, InitialFileSize); } else { _dataPager = new Win32MemoryMapPager(Path.Combine(tempPath, filename), InitialFileSize, Win32NativeFileAttributes.RandomAccess | Win32NativeFileAttributes.DeleteOnClose | Win32NativeFileAttributes.Temporary); } }
private void Setup(IVirtualPager pager) { if (pager.NumberOfAllocatedPages == 0) { WriteEmptyHeaderPage(_pager.Get(null, 0)); WriteEmptyHeaderPage(_pager.Get(null, 1)); NextPageNumber = 2; using (var tx = new Transaction(_pager, this, _transactionsCounter + 1, TransactionFlags.ReadWrite, _freeSpaceRepository)) { var root = Tree.Create(tx, _sliceComparer); var freeSpace = Tree.Create(tx, _sliceComparer); // important to first create the two trees, then set them on the env FreeSpaceRoot = freeSpace; Root = root; tx.UpdateRoots(root, freeSpace); tx.Commit(); } return; } // existing db, let us load it // the first two pages are allocated for double buffering tx commits FileHeader *entry = FindLatestFileHeadeEntry(); NextPageNumber = entry->LastPageNumber + 1; _transactionsCounter = entry->TransactionId + 1; using (var tx = new Transaction(_pager, this, _transactionsCounter + 1, TransactionFlags.ReadWrite, _freeSpaceRepository)) { var root = Tree.Open(tx, _sliceComparer, &entry->Root); var freeSpace = Tree.Open(tx, _sliceComparer, &entry->FreeSpace); // important to first create the two trees, then set them on the env FreeSpaceRoot = freeSpace; Root = root; tx.Commit(); } }
public StorageEnvironment(IVirtualPager pager, bool ownsPager = true) { try { _pager = pager; _ownsPager = ownsPager; _freeSpaceRepository = new FreeSpaceRepository(this); _sliceComparer = NativeMethods.memcmp; Setup(pager); FreeSpaceRoot.Name = "Free Space"; Root.Name = "Root"; } catch (Exception) { Dispose(); } }
public unsafe StorageEnvironment(StorageEnvironmentOptions options) { try { TemporaryPage = new TemporaryPage(); _options = options; _dataPager = options.DataPager; _freeSpaceHandling = new FreeSpaceHandling(this); _sliceComparer = NativeMethods.memcmp; _headerAccessor = new HeaderAccessor(this); var isNew = _headerAccessor.Initialize(); _scratchBufferPool = new ScratchBufferPool(this); _journal = new WriteAheadJournal(this); if (isNew) { CreateNewDatabase(); } else // existing db, let us load it { LoadExistingDatabase(); } State.FreeSpaceRoot.Name = Constants.FreeSpaceTreeName; State.Root.Name = Constants.RootTreeName; Writer = new TransactionMergingWriter(this); if (_options.ManualFlushing == false) { _flushingTask = FlushWritesToDataFileAsync(); } } catch (Exception) { Dispose(); throw; } }
public StorageEnvironment(StorageEnvironmentOptions options) { try { _options = options; _dataPager = options.DataPager; _freeSpaceHandling = new FreeSpaceHandling(); _headerAccessor = new HeaderAccessor(this); var isNew = _headerAccessor.Initialize(); _scratchBufferPool = new ScratchBufferPool(this); _journal = new WriteAheadJournal(this); if (isNew) { CreateNewDatabase(); } else // existing db, let us load it { LoadExistingDatabase(); } Writer = new TransactionMergingWriter(this, _cancellationTokenSource.Token); if (_options.ManualFlushing == false) { _flushingTask = FlushWritesToDataFileAsync(); } } catch (Exception) { Dispose(); throw; } }
public PureMemoryStorageEnvironmentOptions() { _dataPager = new Win32PageFileBackedMemoryMappedPager("data.pager", InitialFileSize); }
public ScratchBufferPool(StorageEnvironment env) { _scratchPager = env.Options.CreateScratchPager("scratch.buffers"); _scratchPager.AllocateMorePages(null, env.Options.InitialFileSize.HasValue ? Math.Max(env.Options.InitialFileSize.Value, env.Options.InitialLogFileSize) : env.Options.InitialLogFileSize); _sizeLimit = env.Options.MaxScratchBufferSize; }
public ScratchBufferPool(StorageEnvironment env) { _scratchPager = env.Options.CreateScratchPager("scratch.buffers"); _scratchPager.AllocateMorePages(null, env.Options.InitialLogFileSize); }
public ScratchBufferFile(IVirtualPager scratchPager, int scratchNumber) { _scratchPager = scratchPager; _scratchNumber = scratchNumber; }
public PureMemoryStorageEnvironmentOptions() { //_dataPager = new Win32PureMemoryPager(); //TODO : after Win32PageFileBackedMemoryMappedPager is finished and works, change this to Win32PageFileBackedMemoryMappedPager with Guid.New as memoryName _dataPager = new Win32PageFileBackedMemoryMappedPager(); }
private void RecoverCurrentJournalSize(IVirtualPager pager) { var journalSize = Utils.NearestPowerOfTwo(pager.NumberOfAllocatedPages * AbstractPager.PageSize); if (journalSize >= _env.Options.MaxLogFileSize) // can't set for more than the max log file size return; // this set the size of the _next_ journal file size _currentJournalFileSize = Math.Min(journalSize, _env.Options.MaxLogFileSize); }
private IntPtr[] CompressPages(Transaction tx, int numberOfPages, IVirtualPager compressionPager,uint previousTransactionCrc) { // numberOfPages include the tx header page, which we don't compress var dataPagesCount = numberOfPages - 1; var sizeInBytes = dataPagesCount * AbstractPager.PageSize; var outputBuffer = LZ4.MaximumOutputLength(sizeInBytes); var outputBufferInPages = outputBuffer / AbstractPager.PageSize + (outputBuffer % AbstractPager.PageSize == 0 ? 0 : 1); var pagesRequired = (dataPagesCount + outputBufferInPages); compressionPager.EnsureContinuous(tx, 0, pagesRequired); var tempBuffer = compressionPager.AcquirePagePointer(0); var compressionBuffer = compressionPager.AcquirePagePointer(dataPagesCount); var write = tempBuffer; var txPages = tx.GetTransactionPages(); foreach( var txPage in txPages ) { var scratchPage = tx.Environment.ScratchBufferPool.AcquirePagePointer(txPage.ScratchFileNumber, txPage.PositionInScratchBuffer); var count = txPage.NumberOfPages * AbstractPager.PageSize; MemoryUtils.BulkCopy(write, scratchPage, count); write += count; } var len = DoCompression(tempBuffer, compressionBuffer, sizeInBytes, outputBuffer); var remainder = len % AbstractPager.PageSize; var compressedPages = (len / AbstractPager.PageSize) + (remainder == 0 ? 0 : 1); if (remainder != 0) { // zero the remainder of the page StdLib.memset(compressionBuffer + len, 0, remainder); } var pages = new IntPtr[compressedPages + 1]; var txHeaderPage = tx.GetTransactionHeaderPage(); var txHeaderBase = tx.Environment.ScratchBufferPool.AcquirePagePointer(txHeaderPage.ScratchFileNumber, txHeaderPage.PositionInScratchBuffer); var txHeader = (TransactionHeader*)txHeaderBase; txHeader->Compressed = true; txHeader->CompressedSize = len; txHeader->UncompressedSize = sizeInBytes; txHeader->PreviousTransactionCrc = previousTransactionCrc; pages[0] = new IntPtr(txHeaderBase); for (int index = 0; index < compressedPages; index++) { pages[index + 1] = new IntPtr(compressionBuffer + (index * AbstractPager.PageSize)); } txHeader->Crc = Crc.Value(compressionBuffer, 0, compressedPages * AbstractPager.PageSize); return pages; }
private byte*[] CompressPages(Transaction tx, int numberOfPages, IVirtualPager compressionPager) { // numberOfPages include the tx header page, which we don't compress var dataPagesCount = numberOfPages - 1; var sizeInBytes = dataPagesCount * AbstractPager.PageSize; var outputBuffer = LZ4.MaximumOutputLength(sizeInBytes); var outputBufferInPages = outputBuffer / AbstractPager.PageSize + (outputBuffer % AbstractPager.PageSize == 0 ? 0 : 1); var pagesRequired = (dataPagesCount + outputBufferInPages); compressionPager.EnsureContinuous(tx, 0, pagesRequired); var tempBuffer = compressionPager.AcquirePagePointer(0); var compressionBuffer = compressionPager.AcquirePagePointer(dataPagesCount); var write = tempBuffer; var txPages = tx.GetTransactionPages(); for (int index = 1; index < txPages.Count; index++) { var txPage = txPages[index]; var scratchPage = tx.Environment.ScratchBufferPool.AcquirePagePointer(txPage.PositionInScratchBuffer); var count = txPage.NumberOfPages * AbstractPager.PageSize; NativeMethods.memcpy(write, scratchPage, count); write += count; } var len = DoCompression(tempBuffer, compressionBuffer, sizeInBytes, outputBuffer); var compressedPages = (len / AbstractPager.PageSize) + (len % AbstractPager.PageSize == 0 ? 0 : 1); var pages = new byte*[compressedPages + 1]; var txHeaderBase = tx.Environment.ScratchBufferPool.AcquirePagePointer(txPages[0].PositionInScratchBuffer); var txHeader = (TransactionHeader*)txHeaderBase; txHeader->Compressed = true; txHeader->CompressedSize = len; txHeader->UncompressedSize = sizeInBytes; pages[0] = txHeaderBase; for (int index = 0; index < compressedPages; index++) { pages[index + 1] = compressionBuffer + (index * AbstractPager.PageSize); } txHeader->Crc = Crc.Value(compressionBuffer, 0, compressedPages * AbstractPager.PageSize); return pages; }
public ScratchBufferFile(IVirtualPager scratchPager, int scratchNumber) { _scratchPager = scratchPager; _scratchNumber = scratchNumber; _allocatedPagesCount = 0; }
public PureMemoryStorageEnvironmentOptions() { _instanceId = Interlocked.Increment(ref _counter); if (RunningOnPosix) _dataPager = new PosixTempMemoryMapPager("_data.pager", InitialFileSize); else _dataPager = new Win32PageFileBackedMemoryMappedPager("data.pager", InitialFileSize); }
public ScratchBufferFile(IVirtualPager scratchPager, int scratchNumber) { _scratchPager = scratchPager; _scratchNumber = scratchNumber; _allocatedPagesUsedSize = 0; }
public static Dictionary <long, JournalFile.PagePosition> GetTransactionToPageTranslation(this TransactionHeader current, IVirtualPager pager, ref int currentPage) { var tempTransactionPageTranslaction = new Dictionary <long, JournalFile.PagePosition>(); for (var i = 0; i < current.PageCount; i++) { Debug.Assert(pager.Disposed == false); var page = pager.Read(currentPage); tempTransactionPageTranslaction[page.PageNumber] = new JournalFile.PagePosition { JournalPos = currentPage, TransactionId = current.TransactionId }; if (page.IsOverflow) { var numOfPages = pager.GetNumberOfOverflowPages(page.OverflowSize); currentPage += numOfPages; } else { currentPage++; } } return(tempTransactionPageTranslaction); }