Пример #1
0
        public byte *Get(int numberOfPages, out int size, out NativeMemory.ThreadStats thread)
        {
            var numberOfPagesPowerOfTwo = Bits.PowerOf2(numberOfPages);;

            size = numberOfPagesPowerOfTwo * Constants.Storage.PageSize;

            if (Disabled || numberOfPagesPowerOfTwo > MaxNumberOfPagesToCache)
            {
                // We don't want to pool large buffers
                size = numberOfPages * Constants.Storage.PageSize;
                return(PlatformSpecific.NativeMemory.Allocate4KbAlignedMemory(size, out thread));
            }

            var index = Bits.MostSignificantBit(size);

            while (_items[index].TryPull(out var allocation))
            {
                if (allocation.InUse.Raise() == false)
                {
                    continue;
                }

                thread              = NativeMemory.ThreadAllocations.Value;
                thread.Allocations += size;
                return(allocation.Ptr);
            }

            return(PlatformSpecific.NativeMemory.Allocate4KbAlignedMemory(size, out thread));
        }
Пример #2
0
        private void GrowArena(int requestedSize)
        {
            if (requestedSize >= MaxArenaSize)
            {
                throw new ArgumentOutOfRangeException(nameof(requestedSize));
            }

            long newSize = GetPreferredSize(requestedSize);

            if (newSize > MaxArenaSize)
            {
                newSize = MaxArenaSize;
            }

            var newBuffer = NativeMemory.AllocateMemory(newSize, out var thread);

            // Save the old buffer pointer to be released when the arena is reset
            if (_olderBuffers == null)
            {
                _olderBuffers = new List <Tuple <IntPtr, long, NativeMemory.ThreadStats> >();
            }
            _olderBuffers.Add(Tuple.Create(new IntPtr(_ptrStart), _allocated, _allocatingThread));

            _allocatingThread = thread;

            _allocated = newSize;

            _ptrStart   = newBuffer;
            _ptrCurrent = _ptrStart;
            _used       = 0;
        }
Пример #3
0
        public void Return(byte *ptr, long size, NativeMemory.ThreadStats allocatingThread)
        {
            if (ptr == null)
            {
                return;
            }

            size = Bits.PowerOf2(size);
            Sodium.sodium_memzero(ptr, (UIntPtr)size);

            if (size > Constants.Size.Megabyte * 16 || LowMemoryNotification.Instance.LowMemoryState)
            {
                // We don't want to pool large buffers / clear them up on low memory
                PlatformSpecific.NativeMemory.Free4KbAlignedMemory(ptr, size, allocatingThread);
                return;
            }

            var index = Bits.MostSignificantBit(size);

            _items[index].Push(new NativeAllocation()
            {
                Ptr = (IntPtr)ptr,
                AllocatingThread = allocatingThread,
                Size             = size
            });
        }
Пример #4
0
        public byte *Get(int size, out NativeMemory.ThreadStats thread)
        {
            size = Bits.PowerOf2(size);

            if (size > _maxBufferSizeToKeepInBytes)
            {
                // We don't want to pool large buffers
                return(PlatformSpecific.NativeMemory.Allocate4KbAlignedMemory(size, out thread));
            }

            var index = Bits.MostSignificantBit(size);

            while (_items[index].TryPop(out var allocation))
            {
                if (allocation.InUse.Raise() == false)
                {
                    continue;
                }

                thread              = NativeMemory.ThreadAllocations.Value;
                thread.Allocations += size;
                return(allocation.Ptr);
            }

            return(PlatformSpecific.NativeMemory.Allocate4KbAlignedMemory(size, out thread));
        }
Пример #5
0
        public void Return(byte *ptr, long size, NativeMemory.ThreadStats allocatingThread, long generation)
        {
            if (ptr == null)
            {
                return;
            }

            Sodium.sodium_memzero(ptr, (UIntPtr)size);

            if (Disabled || size / Constants.Storage.PageSize > MaxNumberOfPagesToCache ||
                (_isLowMemory.IsRaised() && generation < Generation))
            {
                // - don't want to pool large buffers
                // - release all the buffers that were created before we got the low memory event
                PlatformSpecific.NativeMemory.Free4KbAlignedMemory(ptr, size, allocatingThread);
                return;
            }

            // updating the thread allocations since we released the memory back to the pool
            NativeMemory.UpdateMemoryStatsForThread(allocatingThread, size);

            var index = Bits.MostSignificantBit(size);

            _items[index].Push(new NativeAllocation
            {
                Ptr         = ptr,
                Size        = size,
                InPoolSince = DateTime.UtcNow
            });
        }
        public void Return(byte *ptr, long size, NativeMemory.ThreadStats allocatingThread, long generation)
        {
            if (ptr == null)
            {
                return;
            }

            size = Bits.PowerOf2(size);
            Interlocked.Add(ref _currentlyInUseBytes, -size);

            Sodium.sodium_memzero(ptr, (UIntPtr)size);

            if (Disabled || size > _maxBufferSizeToKeepInBytes || (_isLowMemory.IsRaised() && generation < Generation))
            {
                // - don't want to pool large buffers
                // - release all the buffers that were created before we got the low memory event
                PlatformSpecific.NativeMemory.Free4KbAlignedMemory(ptr, size, allocatingThread);
                return;
            }

            // updating the thread allocations since we released the memory back to the pool
            NativeMemory.UpdateMemoryStatsForThread(allocatingThread, size);

            var index = Bits.MostSignificantBit(size);

            _items[index].Push(new NativeAllocation
            {
                Ptr         = ptr,
                Size        = size,
                InPoolSince = DateTime.UtcNow
            });
        }
Пример #7
0
        public void Return(byte *ptr, int size, NativeMemory.ThreadStats allocatingThread)
        {
            if (ptr == null)
            {
                return;
            }

            size = Bits.NextPowerOf2(size);
            Sodium.ZeroMemory(ptr, size);

            if (size > Constants.Size.Megabyte * 16)
            {
                // We don't want to pool large buffers
                NativeMemory.Free4KbAlignedMemory(ptr, size, allocatingThread);
                return;
            }

            var index = Bits.MostSignificantBit(size);

            _items[index].Push(new NativeAllocation()
            {
                Ptr = (IntPtr)ptr,
                AllocatingThread = allocatingThread,
                Size             = size
            });
        }
Пример #8
0
        public void Return(byte *ptr, long size, NativeMemory.ThreadStats allocatingThread, long generation)
        {
            if (ptr == null)
            {
                return;
            }

            Interlocked.Add(ref _currentlyInUseBytes, -size);

            Sodium.sodium_memzero(ptr, (UIntPtr)size);

            var numberOfPages = size / Constants.Storage.PageSize;

            if (Disabled || numberOfPages > MaxNumberOfPagesToCache || (_isLowMemory.IsRaised() && generation < Generation))
            {
                // - don't want to pool large buffers
                // - release all the buffers that were created before we got the low memory event
                ForTestingPurposes?.OnFree4KbAlignedMemory?.Invoke(size);
                PlatformSpecific.NativeMemory.Free4KbAlignedMemory(ptr, size, allocatingThread);
                return;
            }

            var index      = Bits.MostSignificantBit(size);
            var allocation = new NativeAllocation
            {
                Ptr         = ptr,
                Size        = size,
                InPoolSince = DateTime.UtcNow
            };

            var addToPerCorePool = ForTestingPurposes == null || ForTestingPurposes.CanAddToPerCorePool;
            var success          = addToPerCorePool ? _items[index].TryPush(allocation) : false;

            if (success)
            {
                // updating the thread allocations since we released the memory back to the pool
                ForTestingPurposes?.OnUpdateMemoryStatsForThread?.Invoke(size);
                NativeMemory.UpdateMemoryStatsForThread(allocatingThread, size);
                return;
            }

            var addToGlobalPool = ForTestingPurposes == null || ForTestingPurposes.CanAddToGlobalPool;

            var currentGlobalStack = _globalStacks[index];

            if (addToGlobalPool && currentGlobalStack.Count < _maxNumberOfAllocationsToKeepInGlobalStackPerSlot)
            {
                // updating the thread allocations since we released the memory back to the pool
                ForTestingPurposes?.OnUpdateMemoryStatsForThread?.Invoke(size);
                NativeMemory.UpdateMemoryStatsForThread(allocatingThread, size);
                currentGlobalStack.Push(allocation);
                return;
            }

            ForTestingPurposes?.OnFree4KbAlignedMemory?.Invoke(size);
            PlatformSpecific.NativeMemory.Free4KbAlignedMemory(ptr, size, allocatingThread);
        }
Пример #9
0
        public byte *Get(int numberOfPages, out long size, out NativeMemory.ThreadStats thread)
        {
            var numberOfPagesPowerOfTwo = Bits.PowerOf2(numberOfPages);

            size = numberOfPagesPowerOfTwo * Constants.Storage.PageSize;

            if (Disabled || numberOfPagesPowerOfTwo > MaxNumberOfPagesToCache)
            {
                // We don't want to pool large buffers
                size = numberOfPages * Constants.Storage.PageSize;
                Interlocked.Add(ref _currentlyInUseBytes, size);

                return(PlatformSpecific.NativeMemory.Allocate4KbAlignedMemory(size, out thread));
            }

            Interlocked.Add(ref _currentlyInUseBytes, size);

            var index = Bits.MostSignificantBit(size);
            NativeAllocation allocation;

            while (_items[index].TryPull(out allocation))
            {
                if (allocation.InUse.Raise() == false)
                {
                    continue;
                }

                thread              = NativeMemory.ThreadAllocations.Value;
                thread.Allocations += size;

                Debug.Assert(size == allocation.Size, $"size ({size}) == allocation.Size ({allocation.Size})");

                return(allocation.Ptr);
            }

            var currentGlobalStack = _globalStacks[index];

            while (currentGlobalStack.TryPop(out allocation))
            {
                if (allocation.InUse.Raise() == false)
                {
                    continue;
                }

                Debug.Assert(size == allocation.Size, $"size ({size}) == allocation.Size ({allocation.Size})");

                thread              = NativeMemory.ThreadAllocations.Value;
                thread.Allocations += size;

                return(allocation.Ptr);
            }

            return(PlatformSpecific.NativeMemory.Allocate4KbAlignedMemory(size, out thread));
        }
Пример #10
0
        internal void TransactionCompleted(LowLevelTransaction tx)
        {
            if (ActiveTransactions.TryRemove(tx) == false)
            {
                return;
            }
            try
            {
                if (tx.Flags != (TransactionFlags.ReadWrite))
                {
                    return;
                }

                if (tx.FlushedToJournal)
                {
                    var totalPages = 0;
                    // ReSharper disable once LoopCanBeConvertedToQuery
                    foreach (var page in tx.GetTransactionPages())
                    {
                        totalPages += page.NumberOfPages;
                    }

                    Interlocked.Add(ref SizeOfUnflushedTransactionsInJournalFile, totalPages);

                    if (tx.IsLazyTransaction == false)
                    {
                        GlobalFlushingBehavior.GlobalFlusher.Value.MaybeFlushEnvironment(this);
                    }
                }

                if (tx.AsyncCommit != null)
                {
                    return;
                }

                _currentTransactionHolder = null;
                _writeTransactionRunning.Reset();
                _transactionWriter.Release();

                if (tx.FlushInProgressLockTaken)
                {
                    FlushInProgressLock.ExitReadLock();
                }
            }
            finally
            {
                if (tx.AlreadyAllowedDisposeWithLazyTransactionRunning == false)
                {
                    _envDispose.Signal();
                }
            }
        }
Пример #11
0
        private void GrowArena(int requestedSize)
        {
            if (requestedSize >= MaxArenaSize)
            {
                throw new ArgumentOutOfRangeException(nameof(requestedSize));
            }

            // we need the next allocation to cover at least the next expansion (also doubling)
            // so we'll allocate 3 times as much as was requested, or twice as much as we already have
            // the idea is that a single allocation can server for multiple (increasing in size) calls
            long newSize = Math.Max(Bits.NextPowerOf2(requestedSize) * 3, _allocated);

            if (newSize > MaxArenaSize)
            {
                newSize = MaxArenaSize;
            }

            if (Logger.IsInfoEnabled)
            {
                if (newSize > 512 * 1024 * 1024)
                {
                    Logger.Info(
                        $"Arena main buffer reached size of {newSize:#,#;0} bytes (previously {_allocated:#,#;0} bytes), check if you forgot to reset the context. From now on we grow this arena in 1GB chunks.");
                }
                Logger.Info(
                    $"Increased size of buffer from {_allocated:#,#;0} to {newSize:#,#;0} because we need {requestedSize:#,#;0}. _used={_used:#,#;0}");
            }

            NativeMemory.ThreadStats thread;
            var newBuffer = NativeMemory.AllocateMemory(newSize, out thread);

            // Save the old buffer pointer to be released when the arena is reset
            if (_olderBuffers == null)
            {
                _olderBuffers = new List <Tuple <IntPtr, long, NativeMemory.ThreadStats> >();
            }
            _olderBuffers.Add(Tuple.Create(new IntPtr(_ptrStart), _allocated, _allocatingThread));

            _allocatingThread = thread;

            _allocated = newSize;

            _ptrStart   = newBuffer;
            _ptrCurrent = _ptrStart;
            _used       = 0;
        }
Пример #12
0
        private void GrowArena(int requestedSize)
        {
            if (requestedSize >= MaxArenaSize)
            {
                throw new ArgumentOutOfRangeException(nameof(requestedSize), requestedSize,
                                                      $"Requested arena resize to {requestedSize} while current size is {_allocated} and maximum size is {MaxArenaSize}");
            }

            long newSize = GetPreferredSize(requestedSize);

            if (newSize > MaxArenaSize)
            {
                newSize = MaxArenaSize;
            }

            byte *newBuffer;

            NativeMemory.ThreadStats thread;
            try
            {
                newBuffer = NativeMemory.AllocateMemory(newSize, out thread);
            }
            catch (OutOfMemoryException oom)
                when(oom.Data?.Contains("Recoverable") != true)  // this can be raised if the commit charge is low
                {
                    // we were too eager with memory allocations?
                    newBuffer = NativeMemory.AllocateMemory(requestedSize, out thread);
                    newSize   = requestedSize;
                }

            // Save the old buffer pointer to be released when the arena is reset
            if (_olderBuffers == null)
            {
                _olderBuffers = new List <Tuple <IntPtr, long, NativeMemory.ThreadStats> >();
            }
            _olderBuffers.Add(Tuple.Create(new IntPtr(_ptrStart), _allocated, _allocatingThread));

            _allocatingThread = thread;

            _allocated = newSize;

            _ptrStart   = newBuffer;
            _ptrCurrent = _ptrStart;
            _used       = 0;
        }
Пример #13
0
        private void GrowArena(int requestedSize)
        {
            if (_lowMemoryFlag)
            {
                throw new LowMemoryException($"Request to grow the arena by {requestedSize} because we are under memory pressure");
            }

            if (requestedSize >= MaxArenaSize)
            {
                throw new ArgumentOutOfRangeException(nameof(requestedSize));
            }

            LowMemoryNotification.NotifyAllocationPending();

            // we need the next allocation to cover at least the next expansion (also doubling)
            // so we'll allocate 3 times as much as was requested, or as much as we already have
            // the idea is that a single allocation can server for multiple (increasing in size) calls
            long newSize = Math.Max(Bits.NextPowerOf2(requestedSize) * 3, _initialSize);

            if (newSize > MaxArenaSize)
            {
                newSize = MaxArenaSize;
            }


            NativeMemory.ThreadStats thread;
            var newBuffer = NativeMemory.AllocateMemory(newSize, out thread);

            // Save the old buffer pointer to be released when the arena is reset
            if (_olderBuffers == null)
            {
                _olderBuffers = new List <Tuple <IntPtr, long, NativeMemory.ThreadStats> >();
            }
            _olderBuffers.Add(Tuple.Create(new IntPtr(_ptrStart), _allocated, _allocatingThread));

            _allocatingThread = thread;

            _allocated = newSize;

            _ptrStart   = newBuffer;
            _ptrCurrent = _ptrStart;
            _used       = 0;
        }
Пример #14
0
        public byte *Get(int size, out NativeMemory.ThreadStats thread)
        {
            size = Bits.PowerOf2(size);

            if (size > Constants.Size.Megabyte * 16)
            {
                // We don't want to pool large buffers
                return(PlatformSpecific.NativeMemory.Allocate4KbAlignedMemory(size, out thread));
            }

            var index = Bits.MostSignificantBit(size);

            if (_items[index].TryPop(out var allocation))
            {
                thread = allocation.AllocatingThread;
                return((byte *)allocation.Ptr);
            }

            return(PlatformSpecific.NativeMemory.Allocate4KbAlignedMemory(size, out thread));
        }
Пример #15
0
        public static bool TryIncreasingMemoryUsageForThread(NativeMemory.ThreadStats threadStats,
                                                             ref Size currentMaximumAllowedMemory,
                                                             Size currentlyInUse,
                                                             bool isRunningOn32Bits,
                                                             Logger logger,
                                                             out ProcessMemoryUsage currentUsage)
        {
            if (isRunningOn32Bits)
            {
                currentUsage = null;
                return(false);
            }

            // we run out our memory quota, so we need to see if we can increase it or break
            var memoryInfoResult = MemoryInformation.GetMemInfoUsingOneTimeSmapsReader();

            using (GetProcessMemoryUsage(out currentUsage, out var mappedSharedMem))
            {
                var memoryAssumedFreeOrCheapToFree = memoryInfoResult.AvailableWithoutTotalCleanMemory;

                // there isn't enough available memory to try, we want to leave some out for other things
                if (memoryAssumedFreeOrCheapToFree <
                    Size.Min(memoryInfoResult.TotalPhysicalMemory / 50, new Size(1, SizeUnit.Gigabytes)))
                {
                    if (logger.IsInfoEnabled)
                    {
                        logger.Info(
                            $"{threadStats.Name} which is already using {currentlyInUse}/{currentMaximumAllowedMemory} and the system has " +
                            $"{memoryInfoResult.AvailableWithoutTotalCleanMemory}/{memoryInfoResult.TotalPhysicalMemory} free RAM. Also have ~{mappedSharedMem} in mmap " +
                            "files that can be cleanly released, not enough to proceed in batch.");
                    }

                    return(false);
                }

                // If there isn't enough here to double our current allocation, we won't allocate any more
                // we do this check in this way to prevent multiple indexes of hitting this at the
                // same time and each thinking that they have enough space
                if (memoryAssumedFreeOrCheapToFree < currentMaximumAllowedMemory)
                {
                    if (logger.IsInfoEnabled)
                    {
                        logger.Info(
                            $"{threadStats} which is already using {currentlyInUse}/{currentMaximumAllowedMemory} and the system has" +
                            $"{memoryInfoResult.AvailableWithoutTotalCleanMemory}/{memoryInfoResult.TotalPhysicalMemory} free RAM. Also have ~{mappedSharedMem} in mmap " +
                            "files that can be cleanly released, not enough to proceed in batch.");
                    }
                    return(false);
                }

                // even though we have twice as much memory as we have current allocated, we will
                // only increment by 16MB to avoid over allocation by multiple indexes. This way,
                // we'll check often as we go along this
                var oldBudget = currentMaximumAllowedMemory;
                currentMaximumAllowedMemory = currentlyInUse + new Size(16, SizeUnit.Megabytes);

                if (logger.IsInfoEnabled)
                {
                    logger.Info(
                        $"Increasing memory budget for {threadStats.Name} which is using  {currentlyInUse}/{oldBudget} and the system has" +
                        $"{memoryInfoResult.AvailableWithoutTotalCleanMemory}/{memoryInfoResult.TotalPhysicalMemory} free RAM with {mappedSharedMem} in mmap " +
                        $"files that can be cleanly released. Budget increased to {currentMaximumAllowedMemory}");
                }

                return(true);
            }
        }
Пример #16
0
        public static bool TryIncreasingMemoryUsageForThread(NativeMemory.ThreadStats threadStats,
                                                             ref Size currentMaximumAllowedMemory,
                                                             bool isRunningOn32Bits,
                                                             Logger logger,
                                                             out ProcessMemoryUsage currentUsage)
        {
            if (isRunningOn32Bits)
            {
                currentUsage = null;
                return(false);
            }

            var currentlyAllocated = new Size(threadStats.TotalAllocated, SizeUnit.Bytes);

            //TODO: This has to be exposed via debug endpoint

            // we run out our memory quota, so we need to see if we can increase it or break
            var memoryInfoResult = MemoryInformation.GetMemoryInfo();

            using (var currentProcess = Process.GetCurrentProcess())
            {
                // a lot of the memory that we use is actually from memory mapped files, as such, we can
                // rely on the OS to page it out (without needing to write, since it is read only in this case)
                // so we try to calculate how much such memory we can use with this assumption
                var memoryMappedSize = new Size(currentProcess.WorkingSet64 - currentProcess.PrivateMemorySize64, SizeUnit.Bytes);

                currentUsage = new ProcessMemoryUsage(currentProcess.WorkingSet64, currentProcess.PrivateMemorySize64);

                if (memoryMappedSize < Size.Zero)
                {
                    // in this case, we are likely paging, our working set is smaller than the memory we allocated
                    // it isn't _neccesarily_ a bad thing, we might be paged on allocated memory we aren't using, but
                    // at any rate, we'll ignore that and just use the actual physical memory available
                    memoryMappedSize = Size.Zero;
                }
                var minMemoryToLeaveForMemoryMappedFiles = memoryInfoResult.TotalPhysicalMemory / 4;

                var memoryAssumedFreeOrCheapToFree = (memoryInfoResult.AvailableMemory + memoryMappedSize - minMemoryToLeaveForMemoryMappedFiles);

                // there isn't enough available memory to try, we want to leave some out for other things
                if (memoryAssumedFreeOrCheapToFree < memoryInfoResult.TotalPhysicalMemory / 10)
                {
                    if (logger.IsInfoEnabled)
                    {
                        logger.Info(
                            $"{threadStats.Name} which is already using {currentlyAllocated}/{currentMaximumAllowedMemory} and the system has" +
                            $"{memoryInfoResult.AvailableMemory}/{memoryInfoResult.TotalPhysicalMemory} free RAM. Also have ~{memoryMappedSize} in mmap " +
                            "files that can be cleanly released, not enough to proceed in batch.");
                    }

                    return(false);
                }

                // If there isn't enough here to double our current allocation, we won't allocate any more
                // we do this check in this way to prevent multiple indexes of hitting this at the
                // same time and each thinking that they have enough space
                if (memoryAssumedFreeOrCheapToFree < currentMaximumAllowedMemory)
                {
                    // TODO: We probably need to make a note of this in log & expose in stats
                    // TODO: to explain why we aren't increasing the memory in use

                    if (logger.IsInfoEnabled)
                    {
                        logger.Info(
                            $"{threadStats} which is already using {currentlyAllocated}/{currentMaximumAllowedMemory} and the system has" +
                            $"{memoryInfoResult.AvailableMemory}/{memoryInfoResult.TotalPhysicalMemory} free RAM. Also have ~{memoryMappedSize} in mmap " +
                            "files that can be cleanly released, not enough to proceed in batch.");
                    }
                    return(false);
                }

                // even though we have twice as much memory as we have current allocated, we will
                // only increment by 16MB to avoid over allocation by multiple indexes. This way,
                // we'll check often as we go along this
                var oldBudget = currentMaximumAllowedMemory;
                currentMaximumAllowedMemory = currentlyAllocated + new Size(16, SizeUnit.Megabytes);

                if (logger.IsInfoEnabled)
                {
                    logger.Info(
                        $"Increasing memory budget for {threadStats.Name} which is using  {currentlyAllocated}/{oldBudget} and the system has" +
                        $"{memoryInfoResult.AvailableMemory}/{memoryInfoResult.TotalPhysicalMemory} free RAM with {memoryMappedSize} in mmap " +
                        $"files that can be cleanly released. Budget increased to {currentMaximumAllowedMemory}");
                }

                return(true);
            }
        }
Пример #17
0
        public static bool TryIncreasingMemoryUsageForThread(NativeMemory.ThreadStats threadStats,
                                                             ref Size currentMaximumAllowedMemory,
                                                             Size currentlyInUse,
                                                             bool isRunningOn32Bits,
                                                             Logger logger,
                                                             out ProcessMemoryUsage currentUsage)
        {
            if (isRunningOn32Bits)
            {
                currentUsage = null;
                return(false);
            }

            // we run out our memory quota, so we need to see if we can increase it or break
            var memoryInfoResult = MemoryInformation.GetMemoryInfo();

            using (var currentProcess = Process.GetCurrentProcess())
            {
                // a lot of the memory that we use is actually from memory mapped files, as such, we can
                // rely on the OS to page it out (without needing to write, since it is read only in this case)
                // so we try to calculate how much such memory we can use with this assumption
                var mappedSharedMem = LowMemoryNotification.GetCurrentProcessMemoryMappedShared();

                currentUsage = new ProcessMemoryUsage(currentProcess.WorkingSet64,
                                                      Math.Max(0, currentProcess.WorkingSet64 - mappedSharedMem.GetValue(SizeUnit.Bytes)));

                var memoryAssumedFreeOrCheapToFree = memoryInfoResult.AvailableMemory + mappedSharedMem;

                // there isn't enough available memory to try, we want to leave some out for other things
                if (memoryAssumedFreeOrCheapToFree <
                    Size.Min(memoryInfoResult.TotalPhysicalMemory / 50, new Size(1, SizeUnit.Gigabytes)))
                {
                    if (logger.IsInfoEnabled)
                    {
                        logger.Info(
                            $"{threadStats.Name} which is already using {currentlyInUse}/{currentMaximumAllowedMemory} and the system has " +
                            $"{memoryInfoResult.AvailableMemory}/{memoryInfoResult.TotalPhysicalMemory} free RAM. Also have ~{mappedSharedMem} in mmap " +
                            "files that can be cleanly released, not enough to proceed in batch.");
                    }

                    return(false);
                }

                // If there isn't enough here to double our current allocation, we won't allocate any more
                // we do this check in this way to prevent multiple indexes of hitting this at the
                // same time and each thinking that they have enough space
                if (memoryAssumedFreeOrCheapToFree < currentMaximumAllowedMemory)
                {
                    if (logger.IsInfoEnabled)
                    {
                        logger.Info(
                            $"{threadStats} which is already using {currentlyInUse}/{currentMaximumAllowedMemory} and the system has" +
                            $"{memoryInfoResult.AvailableMemory}/{memoryInfoResult.TotalPhysicalMemory} free RAM. Also have ~{mappedSharedMem} in mmap " +
                            "files that can be cleanly released, not enough to proceed in batch.");
                    }
                    return(false);
                }

                // even though we have twice as much memory as we have current allocated, we will
                // only increment by 16MB to avoid over allocation by multiple indexes. This way,
                // we'll check often as we go along this
                var oldBudget = currentMaximumAllowedMemory;
                currentMaximumAllowedMemory = currentlyInUse + new Size(16, SizeUnit.Megabytes);

                if (logger.IsInfoEnabled)
                {
                    logger.Info(
                        $"Increasing memory budget for {threadStats.Name} which is using  {currentlyInUse}/{oldBudget} and the system has" +
                        $"{memoryInfoResult.AvailableMemory}/{memoryInfoResult.TotalPhysicalMemory} free RAM with {mappedSharedMem} in mmap " +
                        $"files that can be cleanly released. Budget increased to {currentMaximumAllowedMemory}");
                }

                return(true);
            }
        }
Пример #18
0
        internal LowLevelTransaction NewLowLevelTransaction(TransactionPersistentContext transactionPersistentContext, TransactionFlags flags, ByteStringContext context = null, TimeSpan?timeout = null)
        {
            _cancellationTokenSource.Token.ThrowIfCancellationRequested();

            bool txLockTaken = false;
            bool flushInProgressReadLockTaken = false;

            try
            {
                IncrementUsageOnNewTransaction();

                if (flags == TransactionFlags.ReadWrite)
                {
                    var wait = timeout ?? (Debugger.IsAttached ? TimeSpan.FromMinutes(30) : TimeSpan.FromSeconds(30));

                    if (FlushInProgressLock.IsWriteLockHeld == false)
                    {
                        flushInProgressReadLockTaken = FlushInProgressLock.TryEnterReadLock(wait);
                    }

                    txLockTaken = _transactionWriter.Wait(wait);
                    if (txLockTaken == false || (flushInProgressReadLockTaken == false &&
                                                 FlushInProgressLock.IsWriteLockHeld == false))
                    {
                        GlobalFlushingBehavior.GlobalFlusher.Value.MaybeFlushEnvironment(this);
                        ThrowOnTimeoutWaitingForWriteTxLock(wait);
                    }
                    _cancellationTokenSource.Token.ThrowIfCancellationRequested();

                    _currentTransactionHolder = NativeMemory.ThreadAllocations.Value;
                    WriteTransactionStarted();

                    if (_endOfDiskSpace != null)
                    {
                        _endOfDiskSpace.AssertCanContinueWriting();

                        _endOfDiskSpace = null;
                        Task.Run(IdleFlushTimer);
                        GlobalFlushingBehavior.GlobalFlusher.Value.MaybeFlushEnvironment(this);
                    }
                }

                LowLevelTransaction tx;

                _txCommit.EnterReadLock();
                try
                {
                    _cancellationTokenSource.Token.ThrowIfCancellationRequested();

                    if (_currentTransactionHolder == null)
                    {
                        _currentTransactionHolder = NativeMemory.ThreadAllocations.Value;
                    }

                    long txId = flags == TransactionFlags.ReadWrite ? NextWriteTransactionId : CurrentReadTransactionId;
                    tx = new LowLevelTransaction(this, txId, transactionPersistentContext, flags, _freeSpaceHandling,
                                                 context)
                    {
                        FlushInProgressLockTaken = flushInProgressReadLockTaken,
                        CurrentTransactionHolder = _currentTransactionHolder
                    };
                    ActiveTransactions.Add(tx);
                }
                finally
                {
                    _txCommit.ExitReadLock();
                }

                var state = _dataPager.PagerState;
                tx.EnsurePagerStateReference(state);

                return(tx);
            }
            catch (Exception)
            {
                try
                {
                    if (txLockTaken)
                    {
                        _transactionWriter.Release();
                    }
                    if (flushInProgressReadLockTaken)
                    {
                        FlushInProgressLock.ExitReadLock();
                    }
                }
                finally
                {
                    DecrementUsageOnTransactionCreationFailure();
                }
                throw;
            }
        }