Ejemplo n.º 1
0
        public ScratchBufferFile(AbstractPager scratchPager, int scratchNumber)
        {
            _scratchPager        = scratchPager;
            _scratchNumber       = scratchNumber;
            _allocatedPagesCount = 0;

            scratchPager.AllocatedInBytesFunc = () => AllocatedPagesCount * Constants.Storage.PageSize;
            _strongRefToAllocateInBytesFunc   = new StrongReference <Func <long> > {
                Value = scratchPager.AllocatedInBytesFunc
            };
            MemoryInformation.DirtyMemoryObjects.TryAdd(_strongRefToAllocateInBytesFunc);

            DebugInfo = new ScratchFileDebugInfo(this);

            _disposeOnceRunner = new DisposeOnce <SingleAttempt>(() =>
            {
                _strongRefToAllocateInBytesFunc.Value = null; // remove ref (so if there's a left over refs in DirtyMemoryObjects but also function as _disposed = true for racy func invoke)
                MemoryInformation.DirtyMemoryObjects.TryRemove(_strongRefToAllocateInBytesFunc);
                _strongRefToAllocateInBytesFunc = null;

                _scratchPager.PagerState.DiscardOnTxCopy = true;
                _scratchPager.Dispose();
                ClearDictionaries();
            });
        }
Ejemplo n.º 2
0
        public StorageEnvironment(StorageEnvironmentOptions options, LoggerSetup loggerSetup)
        {
            try
            {
                _loggerSetup       = loggerSetup;
                _options           = options;
                _dataPager         = options.DataPager;
                _freeSpaceHandling = new FreeSpaceHandling();
                _headerAccessor    = new HeaderAccessor(this);
                var isNew = _headerAccessor.Initialize();

                _scratchBufferPool = new ScratchBufferPool(this);

                _journal = new WriteAheadJournal(this);

                if (isNew)
                {
                    CreateNewDatabase();
                }
                else // existing db, let us load it
                {
                    LoadExistingDatabase();
                }

                if (_options.ManualFlushing == false)
                {
                    Task.Run(IdleFlushTimer);
                }
            }
            catch (Exception)
            {
                Dispose();
                throw;
            }
        }
Ejemplo n.º 3
0
 protected void AssertKey(Slice key)
 {
     if (AbstractPager.IsKeySizeValid(key.Size) == false)
     {
         throw new ArgumentException(string.Format("The key must be a maximum of 2,000 bytes in UTF8, key is: '{0}'", key), "key");
     }
 }
Ejemplo n.º 4
0
 public LazyTransactionBuffer(StorageEnvironmentOptions options)
 {
     _options = options;
     _lazyTransactionPager         = CreateBufferPager();
     _transactionPersistentContext = new TransactionPersistentContext(true);
     _log = LoggingSource.Instance.GetLogger <LazyTransactionBuffer>(options.BasePath.FullPath);
 }
Ejemplo n.º 5
0
        public void ToStream(AbstractPager src, long startPage, long numberOfPages, Stream output)
        {
            // In case of encryption, we don't want to decrypt the data for backup,
            // so let's work directly with the underlying encrypted data (Inner pager).

            if ((_buffer.Length % Constants.Storage.PageSize) != 0)
            {
                throw new ArgumentException("The buffer length must be a multiple of the page size");
            }

            var steps = _buffer.Length / Constants.Storage.PageSize;

            using (var tempTx = new TempPagerTransaction())
                fixed(byte *pBuffer = _buffer)
                {
                    for (long i = startPage; i < startPage + numberOfPages; i += steps)
                    {
                        var pagesToCopy = (int)(i + steps > numberOfPages ? numberOfPages - i : steps);
                        src.EnsureMapped(tempTx, i, pagesToCopy);
                        var ptr = src.AcquireRawPagePointer(tempTx, i);
                        Memory.Copy(pBuffer, ptr, pagesToCopy * Constants.Storage.PageSize);
                        output.Write(_buffer, 0, pagesToCopy * Constants.Storage.PageSize);
                    }
                }
        }
Ejemplo n.º 6
0
 public LazyTransactionBuffer(StorageEnvironmentOptions options)
 {
     _lazyTransactionPager         = options.CreateTemporaryBufferPager("lazy-transactions.buffer", options.InitialFileSize ?? options.InitialLogFileSize);
     _transactionPersistentContext = new TransactionPersistentContext(true);
     _log     = LoggingSource.Instance.GetLogger <LazyTransactionBuffer>(options.BasePath.FullPath);
     _options = options;
 }
Ejemplo n.º 7
0
 public ScratchBufferFile(AbstractPager scratchPager, int scratchNumber)
 {
     _scratchPager           = scratchPager;
     _scratchNumber          = scratchNumber;
     _allocatedPagesUsedSize = 0;
     _pageSize = scratchPager.PageSize;
 }
Ejemplo n.º 8
0
 public JournalReader(AbstractPager pager, AbstractPager recoveryPager, long lastSyncedTransactionId, TransactionHeader *previous, int recoverPage = 0)
 {
     RequireHeaderUpdate = false;
     _pager                   = pager;
     _recoveryPager           = recoveryPager;
     _lastSyncedTransactionId = lastSyncedTransactionId;
     _readingPage             = 0;
     _recoveryPage            = recoverPage;
     LastTransactionHeader    = previous;
 }
Ejemplo n.º 9
0
 public JournalReader(AbstractPager journalPager, AbstractPager dataPager, AbstractPager recoveryPager,
                      long lastSyncedTransactionId, TransactionHeader *previous)
 {
     RequireHeaderUpdate      = false;
     _journalPager            = journalPager;
     _dataPager               = dataPager;
     _recoveryPager           = recoveryPager;
     _lastSyncedTransactionId = lastSyncedTransactionId;
     _readingPage             = 0;
     LastTransactionHeader    = previous;
 }
Ejemplo n.º 10
0
        private void RecoverCurrentJournalSize(AbstractPager pager)
        {
            var journalSize = Bits.NextPowerOf2(pager.NumberOfAllocatedPages * pager.PageSize);

            if (journalSize >= _env.Options.MaxLogFileSize) // can't set for more than the max log file size
            {
                return;
            }

            // this set the size of the _next_ journal file size
            _currentJournalFileSize = Math.Min(journalSize, _env.Options.MaxLogFileSize);
        }
Ejemplo n.º 11
0
        public StorageEnvironment(StorageEnvironmentOptions options)
        {
            try
            {
                _log               = LoggingSource.Instance.GetLogger <StorageEnvironment>(options.BasePath);
                _options           = options;
                _dataPager         = options.DataPager;
                _freeSpaceHandling = new FreeSpaceHandling();
                _headerAccessor    = new HeaderAccessor(this);

                _options.DeleteAllTempBuffers();

                _decompressionBuffers = new DecompressionBuffersPool(options);
                var isNew = _headerAccessor.Initialize();

                _scratchBufferPool = new ScratchBufferPool(this);

                if (PlatformDetails.RunningOnPosix &&
                    options.BasePath != null &&
                    IsStorageSupportingO_Direct(options.BasePath) == false)
                {
                    options.SafePosixOpenFlags &= ~PerPlatformValues.OpenFlags.O_DIRECT;
                    var message = "Path " + options.BasePath +
                                  " not supporting O_DIRECT writes. As a result - data durability is not guarenteed";
                    _options.InvokeNonDurabaleFileSystemError(this, message, null);
                }

                options.PosixOpenFlags = options.SafePosixOpenFlags;

                _journal = new WriteAheadJournal(this);

                if (isNew)
                {
                    CreateNewDatabase();
                }
                else // existing db, let us load it
                {
                    LoadExistingDatabase();
                }

                if (_options.ManualFlushing == false)
                {
                    Task.Run(IdleFlushTimer);
                }
            }
            catch (Exception)
            {
                Dispose();
                throw;
            }
        }
Ejemplo n.º 12
0
        public ScratchBufferFile(AbstractPager scratchPager, int scratchNumber)
        {
            _scratchPager        = scratchPager;
            _scratchNumber       = scratchNumber;
            _allocatedPagesCount = 0;

            scratchPager.AllocatedInBytesFunc = () => AllocatedPagesCount * Constants.Storage.PageSize;

            _disposeOnceRunner = new DisposeOnce <SingleAttempt>(() =>
            {
                _scratchPager.PagerState.DiscardOnTxCopy = true;
                _scratchPager.Dispose();
            });
        }
Ejemplo n.º 13
0
            public DecompressionBuffer(AbstractPager pager, long position, int size, DecompressionBuffersPool pool, int index, LowLevelTransaction tx)
            {
                _pager    = pager;
                _position = position;
                _size     = size;
                _pool     = pool;
                _index    = index;
                _pager.EnsureMapped(tx, _position, _size / Constants.Storage.PageSize);
                var ptr = _pager.AcquirePagePointer(tx, position);

                TempPage = new TemporaryPage(ptr, size)
                {
                    ReturnTemporaryPageToPool = this
                };
            }
Ejemplo n.º 14
0
        public void EnsureSize(int sizeInPages)
        {
            try
            {
                _lazyTransactionPager.EnsureContinuous(0, sizeInPages);
            }
            catch (InsufficientMemoryException)
            {
                // RavenDB-10830: failed to lock memory of temp buffers in encrypted db, let's create new file with initial size

                _lazyTransactionPager.Dispose();
                _lazyTransactionPager = CreateBufferPager();
                throw;
            }
        }
Ejemplo n.º 15
0
        public StorageEnvironment(StorageEnvironmentOptions options)
        {
            try
            {
                _log               = LoggingSource.Instance.GetLogger <StorageEnvironment>(options.BasePath.FullPath);
                _options           = options;
                _dataPager         = options.DataPager;
                _freeSpaceHandling = new FreeSpaceHandling();
                _headerAccessor    = new HeaderAccessor(this);
                NumOfConcurrentSyncsPerPhysDrive = options.NumOfConcurrentSyncsPerPhysDrive;
                TimeToSyncAfterFlashInSec        = options.TimeToSyncAfterFlashInSec;

                Debug.Assert(_dataPager.NumberOfAllocatedPages != 0);

                var remainingBits = _dataPager.NumberOfAllocatedPages % (8 * sizeof(long));

                _validPages = new long[_dataPager.NumberOfAllocatedPages / (8 * sizeof(long)) + (remainingBits == 0 ? 0 : 1)];
                _validPages[_validPages.Length - 1] |= unchecked (((long)ulong.MaxValue << (int)remainingBits));

                _decompressionBuffers = new DecompressionBuffersPool(options);
                var isNew = _headerAccessor.Initialize();

                _scratchBufferPool = new ScratchBufferPool(this);

                options.SetPosixOptions();

                _journal = new WriteAheadJournal(this);

                if (isNew)
                {
                    CreateNewDatabase();
                }
                else // existing db, let us load it
                {
                    LoadExistingDatabase();
                }

                if (_options.ManualFlushing == false)
                {
                    Task.Run(IdleFlushTimer);
                }
            }
            catch (Exception)
            {
                Dispose();
                throw;
            }
        }
Ejemplo n.º 16
0
        public void ToStream(AbstractPager src, long startPage, long numberOfPages,
                             Stream output, Action <string> infoNotify, CancellationToken cancellationToken)
        {
            // In case of encryption, we don't want to decrypt the data for backup,
            // so let's work directly with the underlying encrypted data (Inner pager).

            if ((_buffer.Length % Constants.Storage.PageSize) != 0)
            {
                throw new ArgumentException("The buffer length must be a multiple of the page size");
            }

            var  steps       = _buffer.Length / Constants.Storage.PageSize;
            long totalCopied = 0;
            var  toBeCopied  = new Size(numberOfPages * Constants.Storage.PageSize, SizeUnit.Bytes).ToString();
            var  totalSw     = Stopwatch.StartNew();
            var  sw          = Stopwatch.StartNew();

            using (var tempTx = new TempPagerTransaction())
                fixed(byte *pBuffer = _buffer)
                {
                    for (var i = startPage; i < startPage + numberOfPages; i += steps)
                    {
                        cancellationToken.ThrowIfCancellationRequested();

                        var pagesToCopy = (int)(i + steps > numberOfPages ? numberOfPages - i : steps);
                        src.EnsureMapped(tempTx, i, pagesToCopy);
                        var ptr           = src.AcquireRawPagePointer(tempTx, i);
                        var copiedInBytes = pagesToCopy * Constants.Storage.PageSize;
                        Memory.Copy(pBuffer, ptr, copiedInBytes);
                        output.Write(_buffer, 0, copiedInBytes);

                        totalCopied += copiedInBytes;

                        if (sw.ElapsedMilliseconds > 500)
                        {
                            infoNotify($"Copied: {new Size(totalCopied, SizeUnit.Bytes)} / {toBeCopied}");
                            sw.Restart();
                        }
                    }
                }

            var totalSecElapsed = Math.Max((double)totalSw.ElapsedMilliseconds / 1000, 0.0001);

            infoNotify?.Invoke($"Finshed copying {new Size(totalCopied, SizeUnit.Bytes)}, " +
                               $"{new Size((long)(totalCopied / totalSecElapsed), SizeUnit.Bytes)}/sec");
        }
Ejemplo n.º 17
0
        public JournalReader(AbstractPager journalPager, AbstractPager dataPager, AbstractPager recoveryPager,
                             long lastSyncedTransactionId, TransactionHeader *previous)
        {
            RequireHeaderUpdate               = false;
            _journalPager                     = journalPager;
            _dataPager                        = dataPager;
            _recoveryPager                    = recoveryPager;
            _lastSyncedTransactionId          = lastSyncedTransactionId;
            _readAt4Kb                        = 0;
            LastTransactionHeader             = previous;
            _journalPagerNumberOfAllocated4Kb =
                _journalPager.TotalAllocationSize / (4 * Constants.Size.Kilobyte);

            if (journalPager.Options.EncryptionEnabled)
            {
                _encryptionBuffers = new List <EncryptionBuffer>();
            }
        }
Ejemplo n.º 18
0
        public WriteAheadJournal(StorageEnvironment env)
        {
            _env       = env;
            _dataPager = _env.Options.DataPager;
            _currentJournalFileSize = env.Options.InitialLogFileSize;
            _headerAccessor         = env.HeaderAccessor;
            _updateLogInfo          = header =>
            {
                var journalFilesCount = _files.Count;
                var currentJournal    = journalFilesCount > 0 ? _journalIndex : -1;
                header->Journal.CurrentJournal               = currentJournal;
                header->Journal.JournalFilesCount            = journalFilesCount;
                header->IncrementalBackup.LastCreatedJournal = _journalIndex;
            };

            _compressionPager  = _env.Options.CreateScratchPager("compression.buffers");
            _journalApplicator = new JournalApplicator(this);
        }
Ejemplo n.º 19
0
        public JournalReader(AbstractPager journalPager, AbstractPager dataPager, AbstractPager recoveryPager, HashSet <long> modifiedPages, JournalInfo journalInfo, TransactionHeader *previous)
        {
            RequireHeaderUpdate               = false;
            _journalPager                     = journalPager;
            _dataPager                        = dataPager;
            _recoveryPager                    = recoveryPager;
            _modifiedPages                    = modifiedPages;
            _journalInfo                      = journalInfo;
            _readAt4Kb                        = 0;
            LastTransactionHeader             = previous;
            _journalPagerNumberOfAllocated4Kb =
                _journalPager.TotalAllocationSize / (4 * Constants.Size.Kilobyte);

            if (journalPager.Options.Encryption.IsEnabled)
            {
                _encryptionBuffers = new List <EncryptionBuffer>();
            }
        }
Ejemplo n.º 20
0
        private void EnsureInitialized()
        {
            if (_initialized)
            {
                return;
            }

            lock (_decompressionPagerLock)
            {
                if (_initialized)
                {
                    return;
                }

                _pool             = new[] { new ConcurrentQueue <DecompressionBuffer>() };
                _compressionPager = CreateDecompressionPager(DecompressedPagesCache.Size * Constants.Compression.MaxPageSize);
                _oldPagers        = ImmutableAppendOnlyList <AbstractPager> .Empty;
                _initialized      = true;
            }
        }
Ejemplo n.º 21
0
        public async Task <HttpResponseMessage> Put(string name, bool preserveTimestamps = false)
        {
            var metadata = GetFilteredMetadataFromHeaders(ReadInnerHeaders);
            var etag     = GetEtag();

            if (name.Length > AbstractPager.GetMaxKeySize())
            {
                if (Log.IsDebugEnabled)
                {
                    Log.Debug("File '{0}' was not created due to illegal name length", name);
                }

                return(GetMessageWithString(string.Format("File '{0}' was not created due to illegal name length", name), HttpStatusCode.BadRequest));
            }

            var options = new FileActions.PutOperationOptions();

            long contentSize;

            if (long.TryParse(GetHeader(Constants.FileSystem.RavenFsSize), out contentSize))
            {
                options.ContentSize = contentSize;
            }

            DateTimeOffset lastModified;

            if (DateTimeOffset.TryParse(GetHeader(Constants.RavenLastModified), out lastModified))
            {
                options.LastModified = lastModified;
            }

            options.PreserveTimestamps      = preserveTimestamps;
            options.ContentLength           = Request.Content.Headers.ContentLength;
            options.TransferEncodingChunked = Request.Headers.TransferEncodingChunked ?? false;

            await FileSystem.Files.PutAsync(name, etag, metadata, () => Request.Content.ReadAsStreamAsync(), options).ConfigureAwait(false);

            SynchronizationTask.Context.NotifyAboutWork();

            return(GetEmptyMessage(HttpStatusCode.Created));
        }
Ejemplo n.º 22
0
        public void ToStream(AbstractPager src, long startPage, long numberOfPages, Stream output)
        {
            if ((_buffer.Length % src.PageSize) != 0)
            {
                throw new ArgumentException("The buffer length must be a multiple of the page size");
            }

            var steps = _buffer.Length / src.PageSize;

            using (var tempTx = new TempPagerTransaction())
                fixed(byte *pBuffer = _buffer)
                {
                    for (long i = startPage; i < numberOfPages; i += steps)
                    {
                        var pagesToCopy = (int)(i + steps > numberOfPages ? numberOfPages - i : steps);
                        src.EnsureMapped(tempTx, i, pagesToCopy);
                        var ptr = src.AcquirePagePointer(tempTx, i);
                        Memory.Copy(pBuffer, ptr, pagesToCopy * src.PageSize);
                        output.Write(_buffer, 0, pagesToCopy * src.PageSize);
                    }
                }
        }
Ejemplo n.º 23
0
        public HttpResponseMessage Patch(string name, string rename)
        {
            name   = FileHeader.Canonize(name);
            rename = FileHeader.Canonize(rename);
            var etag = GetEtag();

            if (rename.Length > AbstractPager.GetMaxKeySize())
            {
                if (Log.IsDebugEnabled)
                {
                    Log.Debug("File '{0}' was not renamed to '{1}' due to illegal name length", name, rename);
                }
                return(GetMessageWithString(string.Format("File '{0}' was not renamed to '{1}' due to illegal name length", name, rename), HttpStatusCode.BadRequest));
            }

            Storage.Batch(accessor =>
            {
                FileSystem.Synchronizations.AssertFileIsNotBeingSynced(name);

                var existingFile = accessor.ReadFile(name);
                if (existingFile == null || existingFile.Metadata.Value <bool>(SynchronizationConstants.RavenDeleteMarker))
                {
                    throw new FileNotFoundException();
                }

                var renamingFile = accessor.ReadFile(rename);
                if (renamingFile != null && renamingFile.Metadata.Value <bool>(SynchronizationConstants.RavenDeleteMarker) == false)
                {
                    throw new FileExistsException("Cannot rename because file " + rename + " already exists");
                }

                var metadata = existingFile.Metadata;

                if (etag != null && existingFile.Etag != etag)
                {
                    throw new ConcurrencyException("Operation attempted on file '" + name + "' using a non current etag")
                    {
                        ActualETag   = existingFile.Etag,
                        ExpectedETag = etag
                    }
                }
                ;

                Historian.UpdateLastModified(metadata);

                var operation = new RenameFileOperation(name, rename, existingFile.Etag, metadata);

                accessor.SetConfig(RavenFileNameHelper.RenameOperationConfigNameForFile(name), JsonExtensions.ToJObject(operation));
                accessor.PulseTransaction(); // commit rename operation config

                Files.ExecuteRenameOperation(operation);
            });

            if (Log.IsDebugEnabled)
            {
                Log.Debug("File '{0}' was renamed to '{1}'", name, rename);
            }

            SynchronizationTask.Context.NotifyAboutWork();

            return(GetEmptyMessage(HttpStatusCode.NoContent));
        }
Ejemplo n.º 24
0
        private IntPtr[] CompressPages(LowLevelTransaction tx, int numberOfPages, AbstractPager compressionPager)
        {
            // numberOfPages include the tx header page, which we don't compress
            var dataPagesCount = numberOfPages - 1;

            int pageSize    = tx.Environment.Options.PageSize;
            var sizeInBytes = dataPagesCount * pageSize;

            // We want to include the Transaction Header straight into the compression buffer.
            var outputBufferSize    = LZ4.MaximumOutputLength(sizeInBytes) + sizeof(TransactionHeader);
            var outputBufferInPages = outputBufferSize / pageSize +
                                      (outputBufferSize % pageSize == 0 ? 0 : 1);

            // The pages required includes the intermediate pages and the required output pages.
            var pagesRequired = (dataPagesCount + outputBufferInPages);
            var pagerState    = compressionPager.EnsureContinuous(0, pagesRequired);

            tx.EnsurePagerStateReference(pagerState);

            // We get the pointer to the compression buffer, which will be the buffer that will hold the whole thing.
            var outputBuffer = compressionPager.AcquirePagePointer(tx, dataPagesCount);

            // Where we are going to store the input data continously to compress it afterwards.
            var tempBuffer = compressionPager.AcquirePagePointer(tx, 0);
            var txPages    = tx.GetTransactionPages();
            var write      = tempBuffer;

            foreach (var txPage in txPages)
            {
                var scratchPage = tx.Environment.ScratchBufferPool.AcquirePagePointer(tx, txPage.ScratchFileNumber, txPage.PositionInScratchBuffer);
                var count       = txPage.NumberOfPages * pageSize;
                Memory.Copy(write, scratchPage, count);
                write += count;
            }

            var compressionBuffer = outputBuffer + sizeof(TransactionHeader);
            var len = DoCompression(tempBuffer, compressionBuffer, sizeInBytes, outputBufferSize);

            int totalLength     = len + sizeof(TransactionHeader); // We need to account for the transaction header as part of the total length.
            var remainder       = totalLength % pageSize;
            var compressedPages = (totalLength / pageSize) + (remainder == 0 ? 0 : 1);

            if (remainder != 0)
            {
                // zero the remainder of the page
                UnmanagedMemory.Set(outputBuffer + totalLength, 0, pageSize - remainder);
            }

            var txHeaderPage = tx.GetTransactionHeaderPage();
            var txHeaderBase = tx.Environment.ScratchBufferPool.AcquirePagePointer(tx, txHeaderPage.ScratchFileNumber, txHeaderPage.PositionInScratchBuffer);
            var txHeader     = (TransactionHeader *)txHeaderBase;

            txHeader->Compressed       = true;
            txHeader->CompressedSize   = len;
            txHeader->UncompressedSize = sizeInBytes;
            txHeader->Hash             = Hashing.XXHash64.Calculate(compressionBuffer, len);

            // Copy the transaction header to the output buffer.
            Memory.Copy(outputBuffer, txHeaderBase, sizeof(TransactionHeader));

            var pages = new IntPtr[compressedPages];

            for (int index = 0; index < compressedPages; index++)
            {
                pages[index] = new IntPtr(outputBuffer + (index * pageSize));
            }

            return(pages);
        }
Ejemplo n.º 25
0
        public byte *DirectAdd(Slice key, int len, TreeNodeFlags nodeType = TreeNodeFlags.Data, ushort?version = null)
        {
            Debug.Assert(nodeType == TreeNodeFlags.Data || nodeType == TreeNodeFlags.MultiValuePageRef);

            if (State.InWriteTransaction)
            {
                State.IsModified = true;
            }

            if (_llt.Flags == (TransactionFlags.ReadWrite) == false)
            {
                throw new ArgumentException("Cannot add a value in a read only transaction");
            }

            if (AbstractPager.IsKeySizeValid(key.Size) == false)
            {
                throw new ArgumentException($"Key size is too big, must be at most {AbstractPager.MaxKeySize} bytes, but was {(key.Size + AbstractPager.RequiredSpaceForNewNode)}", nameof(key));
            }

            Func <TreeCursor> cursorConstructor;
            TreeNodeHeader *  node;
            var foundPage = FindPageFor(key, out node, out cursorConstructor);

            var page = ModifyPage(foundPage);

            ushort nodeVersion            = 0;
            bool?  shouldGoToOverflowPage = null;

            if (page.LastMatch == 0) // this is an update operation
            {
                node = page.GetNode(page.LastSearchPosition);

                Debug.Assert(SliceComparer.EqualsInline(TreeNodeHeader.ToSlicePtr(_llt.Allocator, node), key));

                shouldGoToOverflowPage = ShouldGoToOverflowPage(len);

                byte *pos;
                if (shouldGoToOverflowPage == false)
                {
                    // optimization for Data and MultiValuePageRef - try to overwrite existing node space
                    if (TryOverwriteDataOrMultiValuePageRefNode(node, key, len, nodeType, version, out pos))
                    {
                        return(pos);
                    }
                }
                else
                {
                    // optimization for PageRef - try to overwrite existing overflows
                    if (TryOverwriteOverflowPages(node, key, len, version, out pos))
                    {
                        return(pos);
                    }
                }

                RemoveLeafNode(page, out nodeVersion);
            }
            else // new item should be recorded
            {
                State.NumberOfEntries++;
            }

            CheckConcurrency(key, version, nodeVersion, TreeActionType.Add);

            var   lastSearchPosition = page.LastSearchPosition; // searching for overflow pages might change this
            byte *overFlowPos        = null;
            var   pageNumber         = -1L;

            if (shouldGoToOverflowPage ?? ShouldGoToOverflowPage(len))
            {
                pageNumber = WriteToOverflowPages(len, out overFlowPos);
                len        = -1;
                nodeType   = TreeNodeFlags.PageRef;
            }

            byte *dataPos;

            if (page.HasSpaceFor(_llt, key, len) == false)
            {
                using (var cursor = cursorConstructor())
                {
                    cursor.Update(cursor.Pages.First, page);

                    var pageSplitter = new TreePageSplitter(_llt, this, key, len, pageNumber, nodeType, nodeVersion, cursor);
                    dataPos = pageSplitter.Execute();
                }

                DebugValidateTree(State.RootPageNumber);
            }
            else
            {
                switch (nodeType)
                {
                case TreeNodeFlags.PageRef:
                    dataPos = page.AddPageRefNode(lastSearchPosition, key, pageNumber);
                    break;

                case TreeNodeFlags.Data:
                    dataPos = page.AddDataNode(lastSearchPosition, key, len, nodeVersion);
                    break;

                case TreeNodeFlags.MultiValuePageRef:
                    dataPos = page.AddMultiValueNode(lastSearchPosition, key, len, nodeVersion);
                    break;

                default:
                    throw new NotSupportedException("Unknown node type for direct add operation: " + nodeType);
                }
                page.DebugValidate(_llt, State.RootPageNumber);
            }
            if (overFlowPos != null)
            {
                return(overFlowPos);
            }
            return(dataPos);
        }
Ejemplo n.º 26
0
        public IDisposable GetTemporaryPage(LowLevelTransaction tx, int pageSize, out TemporaryPage tmp)
        {
            if (pageSize < Constants.Storage.PageSize)
            {
                ThrowInvalidPageSize(pageSize);
            }

            if (pageSize > Constants.Compression.MaxPageSize)
            {
                ThrowPageSizeTooBig(pageSize);
            }

            Debug.Assert(pageSize == Bits.NextPowerOf2(pageSize));

            EnsureInitialized();

            var index = GetTempPagesPoolIndex(pageSize);

            if (_pool.Length <= index)
            {
                lock (_expandPoolLock)
                {
                    if (_pool.Length <= index) // someone could get the lock and add it meanwhile
                    {
                        var oldSize = _pool.Length;

                        var newPool = new ConcurrentQueue <DecompressionBuffer> [index + 1];
                        Array.Copy(_pool, newPool, _pool.Length);
                        for (var i = oldSize; i < newPool.Length; i++)
                        {
                            newPool[i] = new ConcurrentQueue <DecompressionBuffer>();
                        }
                        _pool = newPool;
                    }
                }
            }

            DecompressionBuffer buffer;

            var queue = _pool[index];

            tmp = null;

            while (queue.TryDequeue(out buffer))
            {
                if (buffer.CanReuse == false)
                {
                    continue;
                }

                try
                {
                    buffer.EnsureValidPointer(tx);
                    tmp = buffer.TempPage;
                    break;
                }
                catch (ObjectDisposedException)
                {
                    // we could dispose the pager during the cleanup
                }
            }

            if (tmp == null)
            {
                var allocationInPages = pageSize / Constants.Storage.PageSize;

                lock (_decompressionPagerLock) // once we fill up the pool we won't be allocating additional pages frequently
                {
                    if (_lastUsedPage + allocationInPages > _maxNumberOfPagesInScratchBufferPool)
                    {
                        CreateNewBuffersPager(_options.MaxScratchBufferSize);
                    }

                    try
                    {
                        _compressionPager.EnsureContinuous(_lastUsedPage, allocationInPages);
                    }
                    catch (InsufficientMemoryException)
                    {
                        // RavenDB-10830: failed to lock memory of temp buffers in encrypted db, let's create new file with initial size

                        CreateNewBuffersPager(DecompressedPagesCache.Size * Constants.Compression.MaxPageSize);
                        throw;
                    }

                    buffer = new DecompressionBuffer(_compressionPager, _lastUsedPage, pageSize, this, index, tx);

                    _lastUsedPage += allocationInPages;

                    void CreateNewBuffersPager(long size)
                    {
                        _oldPagers        = _oldPagers.Append(_compressionPager);
                        _compressionPager = CreateDecompressionPager(size);
                        _lastUsedPage     = 0;
                    }
                }

                tmp = buffer.TempPage;
            }

            Interlocked.Add(ref _currentlyUsedBytes, pageSize);

            return(tmp.ReturnTemporaryPageToPool);
        }
Ejemplo n.º 27
0
 public ScratchBufferFile(AbstractPager scratchPager, int scratchNumber)
 {
     _scratchPager        = scratchPager;
     _scratchNumber       = scratchNumber;
     _allocatedPagesCount = 0;
 }
Ejemplo n.º 28
0
        private static void Backup(
            StorageEnvironment env, CompressionLevel compression, Action <string> infoNotify,
            Action backupStarted, AbstractPager dataPager, ZipArchive package, string basePath, DataCopier copier)
        {
            var  usedJournals       = new List <JournalFile>();
            long lastWrittenLogPage = -1;
            long lastWrittenLogFile = -1;
            LowLevelTransaction txr = null;
            var backupSuccess       = false;

            try
            {
                long allocatedPages;
                var  writePesistentContext = new TransactionPersistentContext(true);
                var  readPesistentContext  = new TransactionPersistentContext(true);
                using (var txw = env.NewLowLevelTransaction(writePesistentContext, TransactionFlags.ReadWrite)) // so we can snapshot the headers safely
                {
                    txr            = env.NewLowLevelTransaction(readPesistentContext, TransactionFlags.Read);   // now have snapshot view
                    allocatedPages = dataPager.NumberOfAllocatedPages;

                    Debug.Assert(HeaderAccessor.HeaderFileNames.Length == 2);
                    infoNotify("Voron copy headers for " + basePath);
                    VoronBackupUtil.CopyHeaders(compression, package, copier, env.Options, basePath);

                    // journal files snapshot
                    var files = env.Journal.Files; // thread safety copy

                    JournalInfo journalInfo = env.HeaderAccessor.Get(ptr => ptr->Journal);
                    for (var journalNum = journalInfo.CurrentJournal - journalInfo.JournalFilesCount + 1;
                         journalNum <= journalInfo.CurrentJournal;
                         journalNum++)
                    {
                        var journalFile = files.FirstOrDefault(x => x.Number == journalNum);
                        // first check journal files currently being in use
                        if (journalFile == null)
                        {
                            long journalSize;
                            using (var pager = env.Options.OpenJournalPager(journalNum))
                            {
                                journalSize = Bits.NextPowerOf2(pager.NumberOfAllocatedPages * Constants.Storage.PageSize);
                            }

                            journalFile = new JournalFile(env, env.Options.CreateJournalWriter(journalNum, journalSize), journalNum);
                        }

                        journalFile.AddRef();
                        usedJournals.Add(journalFile);
                    }

                    if (env.Journal.CurrentFile != null)
                    {
                        lastWrittenLogFile = env.Journal.CurrentFile.Number;
                        lastWrittenLogPage = env.Journal.CurrentFile.WritePosIn4KbPosition - 1;
                    }

                    // txw.Commit(); intentionally not committing
                }

                backupStarted?.Invoke();

                // data file backup
                var dataPart = package.CreateEntry(Path.Combine(basePath, Constants.DatabaseFilename), compression);
                Debug.Assert(dataPart != null);

                if (allocatedPages > 0) //only true if dataPager is still empty at backup start
                {
                    using (var dataStream = dataPart.Open())
                    {
                        // now can copy everything else
                        copier.ToStream(dataPager, 0, allocatedPages, dataStream);
                    }
                }

                try
                {
                    long lastBackedupJournal = 0;
                    foreach (var journalFile in usedJournals)
                    {
                        var entryName   = StorageEnvironmentOptions.JournalName(journalFile.Number);
                        var journalPart = package.CreateEntry(Path.Combine(basePath, entryName), compression);

                        Debug.Assert(journalPart != null);

                        long pagesToCopy = journalFile.JournalWriter.NumberOfAllocated4Kb;
                        if (journalFile.Number == lastWrittenLogFile)
                        {
                            pagesToCopy = lastWrittenLogPage + 1;
                        }

                        using (var stream = journalPart.Open())
                        {
                            copier.ToStream(env, journalFile, 0, pagesToCopy, stream);
                            infoNotify(string.Format("Voron copy journal file {0}", entryName));
                        }

                        lastBackedupJournal = journalFile.Number;
                    }

                    if (env.Options.IncrementalBackupEnabled)
                    {
                        env.HeaderAccessor.Modify(header =>
                        {
                            header->IncrementalBackup.LastBackedUpJournal = lastBackedupJournal;

                            //since we backed-up everything, no need to start next incremental backup from the middle
                            header->IncrementalBackup.LastBackedUpJournalPage = -1;
                        });
                    }
                    backupSuccess = true;
                }
                catch (Exception)
                {
                    backupSuccess = false;
                    throw;
                }
                finally
                {
                    var lastSyncedJournal = env.HeaderAccessor.Get(header => header->Journal).LastSyncedJournal;
                    foreach (var journalFile in usedJournals)
                    {
                        if (backupSuccess)                                 // if backup succeeded we can remove journals
                        {
                            if (journalFile.Number < lastWrittenLogFile && // prevent deletion of the current journal and journals with a greater number
                                journalFile.Number < lastSyncedJournal)    // prevent deletion of journals that aren't synced with the data file
                            {
                                journalFile.DeleteOnClose = true;
                            }
                        }

                        journalFile.Release();
                    }
                }
            }
            finally
            {
                txr?.Dispose();
            }
        }
Ejemplo n.º 29
0
 public LazyTransactionBuffer(StorageEnvironmentOptions options)
 {
     _options = options;
     _lazyTransactionPager = _options.CreateScratchPager("lazy-transactions.buffer");
 }
Ejemplo n.º 30
0
        private static void Backup(
            StorageEnvironment env, CompressionLevel compression, Action <string> infoNotify,
            Action backupStarted, AbstractPager dataPager, ZipArchive package, string basePath, DataCopier copier)
        {
            var  usedJournals       = new List <JournalFile>();
            long lastWrittenLogPage = -1;
            long lastWrittenLogFile = -1;
            LowLevelTransaction txr = null;

            try
            {
                long allocatedPages;
                var  writePesistentContext = new TransactionPersistentContext(true);
                var  readPesistentContext  = new TransactionPersistentContext(true);
                using (var txw = env.NewLowLevelTransaction(writePesistentContext, TransactionFlags.ReadWrite)) // so we can snapshot the headers safely
                {
                    txr            = env.NewLowLevelTransaction(readPesistentContext, TransactionFlags.Read);   // now have snapshot view
                    allocatedPages = dataPager.NumberOfAllocatedPages;

                    Debug.Assert(HeaderAccessor.HeaderFileNames.Length == 2);
                    infoNotify("Voron copy headers for " + basePath);
                    VoronBackupUtil.CopyHeaders(compression, package, copier, env.Options, basePath);

                    // journal files snapshot
                    var files = env.Journal.Files; // thread safety copy

                    JournalInfo journalInfo = env.HeaderAccessor.Get(ptr => ptr->Journal);
                    for (var journalNum = journalInfo.CurrentJournal - journalInfo.JournalFilesCount + 1;
                         journalNum <= journalInfo.CurrentJournal;
                         journalNum++)
                    {
                        var journalFile = files.FirstOrDefault(x => x.Number == journalNum);
                        // first check journal files currently being in use
                        if (journalFile == null)
                        {
                            long journalSize;
                            using (var pager = env.Options.OpenJournalPager(journalNum))
                            {
                                journalSize = Bits.NextPowerOf2(pager.NumberOfAllocatedPages * env.Options.PageSize);
                            }

                            journalFile = new JournalFile(env, env.Options.CreateJournalWriter(journalNum, journalSize), journalNum);
                        }

                        journalFile.AddRef();
                        usedJournals.Add(journalFile);
                    }

                    if (env.Journal.CurrentFile != null)
                    {
                        lastWrittenLogFile = env.Journal.CurrentFile.Number;
                        lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition - 1;
                    }

                    // txw.Commit(); intentionally not committing
                }

                backupStarted?.Invoke();

                // data file backup
                var dataPart = package.CreateEntry(Path.Combine(basePath, Constants.DatabaseFilename), compression);
                Debug.Assert(dataPart != null);

                if (allocatedPages > 0) //only true if dataPager is still empty at backup start
                {
                    using (var dataStream = dataPart.Open())
                    {
                        // now can copy everything else
                        copier.ToStream(dataPager, 0, allocatedPages, dataStream);
                    }
                }

                try
                {
                    foreach (JournalFile journalFile in usedJournals)
                    {
                        var entryName   = Path.Combine(basePath, StorageEnvironmentOptions.JournalName(journalFile.Number));
                        var journalPart = package.CreateEntry(entryName, compression);

                        Debug.Assert(journalPart != null);

                        long pagesToCopy = journalFile.JournalWriter.NumberOfAllocatedPages;
                        if (journalFile.Number == lastWrittenLogFile)
                        {
                            pagesToCopy = lastWrittenLogPage + 1;
                        }

                        using (var stream = journalPart.Open())
                        {
                            copier.ToStream(env, journalFile, 0, pagesToCopy, stream);
                            infoNotify(string.Format("Voron copy journal file {0}", entryName));
                        }
                    }
                }
                finally
                {
                    foreach (var journalFile in usedJournals)
                    {
                        journalFile.Release();
                    }
                }
            }
            finally
            {
                txr?.Dispose();
            }
        }