Esempio n. 1
0
        // TODO: Test if adding padding helps under contention
        //private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;

        protected PoolArena(
            PooledByteBufferAllocator parent,
            int pageSize,
            int maxOrder,
            int pageShifts,
            int chunkSize)
        {
            this.Parent              = parent;
            this.PageSize            = pageSize;
            this.maxOrder            = maxOrder;
            this.PageShifts          = pageShifts;
            this.ChunkSize           = chunkSize;
            this.SubpageOverflowMask = ~(pageSize - 1);
            this.tinySubpagePools    = this.NewSubpagePoolArray(NumTinySubpagePools);
            for (int i = 0; i < this.tinySubpagePools.Length; i++)
            {
                this.tinySubpagePools[i] = this.NewSubpagePoolHead(pageSize);
            }

            this.NumSmallSubpagePools = pageShifts - 9;
            this.smallSubpagePools    = this.NewSubpagePoolArray(this.NumSmallSubpagePools);
            for (int i = 0; i < this.smallSubpagePools.Length; i++)
            {
                this.smallSubpagePools[i] = this.NewSubpagePoolHead(pageSize);
            }

            this.q100  = new PoolChunkList <T>(this, null, 100, int.MaxValue, chunkSize);
            this.q075  = new PoolChunkList <T>(this, this.q100, 75, 100, chunkSize);
            this.q050  = new PoolChunkList <T>(this, this.q075, 50, 100, chunkSize);
            this.q025  = new PoolChunkList <T>(this, this.q050, 25, 75, chunkSize);
            this.q000  = new PoolChunkList <T>(this, this.q025, 1, 50, chunkSize);
            this.qInit = new PoolChunkList <T>(this, this.q000, int.MinValue, 25, chunkSize);

            this.q100.PrevList(this.q075);
            this.q075.PrevList(this.q050);
            this.q050.PrevList(this.q025);
            this.q025.PrevList(this.q000);
            this.q000.PrevList(null);
            this.qInit.PrevList(this.qInit);

            var metrics = new List <IPoolChunkListMetric>(6);

            metrics.Add(this.qInit);
            metrics.Add(this.q000);
            metrics.Add(this.q025);
            metrics.Add(this.q050);
            metrics.Add(this.q075);
            metrics.Add(this.q100);
            this.chunkListMetrics = metrics;
        }
        void Init0(PoolChunk <T> chunk, long handle, int offset, int length, int maxLength, PoolThreadCache <T> cache)
        {
            Debug.Assert(handle >= 0);
            Debug.Assert(chunk != null);

            this.Chunk     = chunk;
            this.Memory    = chunk.Memory;
            this.allocator = chunk.Arena.Parent;
            this.Cache     = cache;
            this.Handle    = handle;
            this.Offset    = offset;
            this.Length    = length;
            this.MaxLength = maxLength;
        }
Esempio n. 3
0
 public DirectArena(PooledByteBufferAllocator parent, int pageSize, int maxOrder, int pageShifts, int chunkSize)
     : base(parent, pageSize, maxOrder, pageShifts, chunkSize)
 {
     this.memoryChunks = new List <MemoryChunk>();
 }
Esempio n. 4
0
 public HeapArena(PooledByteBufferAllocator parent, int pageSize, int maxOrder, int pageShifts, int chunkSize)
     : base(parent, pageSize, maxOrder, pageShifts, chunkSize)
 {
 }
 internal PooledByteBufferAllocatorMetric(PooledByteBufferAllocator allocator)
 {
     this.allocator = allocator;
 }
Esempio n. 6
0
        static PooledByteBufferAllocator()
        {
            int       defaultPageSize       = SystemPropertyUtil.GetInt("io.netty.allocator.pageSize", 8192);
            Exception pageSizeFallbackCause = null;

            try
            {
                ValidateAndCalculatePageShifts(defaultPageSize);
            }
            catch (Exception t)
            {
                pageSizeFallbackCause = t;
                defaultPageSize       = 8192;
            }
            DefaultPageSize = defaultPageSize;

            int       defaultMaxOrder       = SystemPropertyUtil.GetInt("io.netty.allocator.maxOrder", 11);
            Exception maxOrderFallbackCause = null;

            try
            {
                ValidateAndCalculateChunkSize(DefaultPageSize, defaultMaxOrder);
            }
            catch (Exception t)
            {
                maxOrderFallbackCause = t;
                defaultMaxOrder       = 11;
            }
            DefaultMaxOrder = defaultMaxOrder;

            // todo: Determine reasonable default for heapArenaCount
            // Assuming each arena has 3 chunks, the pool should not consume more than 50% of max memory.

            // Use 2 * cores by default to reduce contention as we use 2 * cores for the number of EventLoops
            // in NIO and EPOLL as well. If we choose a smaller number we will run into hotspots as allocation and
            // deallocation needs to be synchronized on the PoolArena.
            // See https://github.com/netty/netty/issues/3888
            int defaultMinNumArena = Environment.ProcessorCount * 2;

            DefaultNumHeapArena   = Math.Max(0, SystemPropertyUtil.GetInt("io.netty.allocator.numHeapArenas", defaultMinNumArena));
            DefaultNumDirectArena = Math.Max(0, SystemPropertyUtil.GetInt("io.netty.allocator.numDirectArenas", defaultMinNumArena));

            // cache sizes
            DefaultTinyCacheSize   = SystemPropertyUtil.GetInt("io.netty.allocator.tinyCacheSize", 512);
            DefaultSmallCacheSize  = SystemPropertyUtil.GetInt("io.netty.allocator.smallCacheSize", 256);
            DefaultNormalCacheSize = SystemPropertyUtil.GetInt("io.netty.allocator.normalCacheSize", 64);

            // 32 kb is the default maximum capacity of the cached buffer. Similar to what is explained in
            // 'Scalable memory allocation using jemalloc'
            DefaultMaxCachedBufferCapacity = SystemPropertyUtil.GetInt("io.netty.allocator.maxCachedBufferCapacity", 32 * 1024);

            // the number of threshold of allocations when cached entries will be freed up if not frequently used
            DefaultCacheTrimInterval = SystemPropertyUtil.GetInt(
                "io.netty.allocator.cacheTrimInterval", 8192);

            if (Logger.DebugEnabled)
            {
                Logger.Debug("-Dio.netty.allocator.numHeapArenas: {}", DefaultNumHeapArena);
                Logger.Debug("-Dio.netty.allocator.numDirectArenas: {}", DefaultNumDirectArena);
                if (pageSizeFallbackCause == null)
                {
                    Logger.Debug("-Dio.netty.allocator.pageSize: {}", DefaultPageSize);
                }
                else
                {
                    Logger.Debug("-Dio.netty.allocator.pageSize: {}", DefaultPageSize, pageSizeFallbackCause);
                }
                if (maxOrderFallbackCause == null)
                {
                    Logger.Debug("-Dio.netty.allocator.maxOrder: {}", DefaultMaxOrder);
                }
                else
                {
                    Logger.Debug("-Dio.netty.allocator.maxOrder: {}", DefaultMaxOrder, maxOrderFallbackCause);
                }
                Logger.Debug("-Dio.netty.allocator.chunkSize: {}", DefaultPageSize << DefaultMaxOrder);
                Logger.Debug("-Dio.netty.allocator.tinyCacheSize: {}", DefaultTinyCacheSize);
                Logger.Debug("-Dio.netty.allocator.smallCacheSize: {}", DefaultSmallCacheSize);
                Logger.Debug("-Dio.netty.allocator.normalCacheSize: {}", DefaultNormalCacheSize);
                Logger.Debug("-Dio.netty.allocator.maxCachedBufferCapacity: {}", DefaultMaxCachedBufferCapacity);
                Logger.Debug("-Dio.netty.allocator.cacheTrimInterval: {}", DefaultCacheTrimInterval);
            }

            Default = new PooledByteBufferAllocator(PlatformDependent.DirectBufferPreferred);
        }
Esempio n. 7
0
 public PoolThreadLocalCache(PooledByteBufferAllocator owner)
 {
     this.owner = owner;
 }