static PooledByteBufferAllocator()
        {
            int       defaultPageSize       = SystemPropertyUtil.GetInt("io.netty.allocator.pageSize", 8192);
            Exception pageSizeFallbackCause = null;

            try
            {
                ValidateAndCalculatePageShifts(defaultPageSize);
            }
            catch (Exception t)
            {
                pageSizeFallbackCause = t;
                defaultPageSize       = 8192;
            }
            DefaultPageSize = defaultPageSize;

            int       defaultMaxOrder       = SystemPropertyUtil.GetInt("io.netty.allocator.maxOrder", 11);
            Exception maxOrderFallbackCause = null;

            try
            {
                ValidateAndCalculateChunkSize(DefaultPageSize, defaultMaxOrder);
            }
            catch (Exception t)
            {
                maxOrderFallbackCause = t;
                defaultMaxOrder       = 11;
            }
            DefaultMaxOrder = defaultMaxOrder;

            // todo: Determine reasonable default for heapArenaCount
            // Assuming each arena has 3 chunks, the pool should not consume more than 50% of max memory.

            // Use 2 * cores by default to reduce contention as we use 2 * cores for the number of EventLoops
            // in NIO and EPOLL as well. If we choose a smaller number we will run into hotspots as allocation and
            // deallocation needs to be synchronized on the PoolArena.
            // See https://github.com/netty/netty/issues/3888
            int defaultMinNumArena = Environment.ProcessorCount * 2;

            DefaultNumHeapArena   = Math.Max(0, SystemPropertyUtil.GetInt("io.netty.allocator.numHeapArenas", defaultMinNumArena));
            DefaultNumDirectArena = Math.Max(0, SystemPropertyUtil.GetInt("io.netty.allocator.numDirectArenas", defaultMinNumArena));

            // cache sizes
            DefaultTinyCacheSize   = SystemPropertyUtil.GetInt("io.netty.allocator.tinyCacheSize", 512);
            DefaultSmallCacheSize  = SystemPropertyUtil.GetInt("io.netty.allocator.smallCacheSize", 256);
            DefaultNormalCacheSize = SystemPropertyUtil.GetInt("io.netty.allocator.normalCacheSize", 64);

            // 32 kb is the default maximum capacity of the cached buffer. Similar to what is explained in
            // 'Scalable memory allocation using jemalloc'
            DefaultMaxCachedBufferCapacity = SystemPropertyUtil.GetInt("io.netty.allocator.maxCachedBufferCapacity", 32 * 1024);

            // the number of threshold of allocations when cached entries will be freed up if not frequently used
            DefaultCacheTrimInterval = SystemPropertyUtil.GetInt(
                "io.netty.allocator.cacheTrimInterval", 8192);

            Default = new PooledByteBufferAllocator(PlatformDependent.DirectBufferPreferred);
        }
 protected sealed override void Deallocate()
 {
     if (this.Handle >= 0)
     {
         long handle = this.Handle;
         this.Handle = -1;
         this.Memory = default(T);
         this.Chunk.Arena.Free(this.Chunk, handle, this.MaxLength, this.Cache);
         this.Chunk     = null;
         this.allocator = null;
         this.Recycle();
     }
 }
        internal IByteBuffer Allocate(PooledByteBufferAllocator allocator)
        {
            Debug.Assert(allocator is object);

#if DEBUG
            if (Log.DebugEnabled)
            {
                Log.Debug("{} allocate, estimated size = {}", nameof(ReceiveBufferSizeEstimate), _receiveBufferSize);
            }
#endif

            return(allocator.Buffer(_receiveBufferSize));
        }
Esempio n. 4
0
        void Init0(PoolChunk <T> chunk, long handle, int offset, int length, int maxLength, PoolThreadCache <T> cache)
        {
            Debug.Assert(handle >= 0);
            Debug.Assert(chunk != null);

            this.Chunk     = chunk;
            this.Memory    = chunk.Memory;
            this.allocator = chunk.Arena.Parent;
            this.Cache     = cache;
            this.Handle    = handle;
            this.Offset    = offset;
            this.Length    = length;
            this.MaxLength = maxLength;
        }
Esempio n. 5
0
        // TODO: Test if adding padding helps under contention
        //private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;

        protected PoolArena(
            PooledByteBufferAllocator parent,
            int pageSize,
            int maxOrder,
            int pageShifts,
            int chunkSize)
        {
            Parent              = parent;
            PageSize            = pageSize;
            _maxOrder           = maxOrder;
            PageShifts          = pageShifts;
            ChunkSize           = chunkSize;
            SubpageOverflowMask = ~(pageSize - 1);
            _tinySubpagePools   = NewSubpagePoolArray(NumTinySubpagePools);
            for (int i = 0; i < _tinySubpagePools.Length; i++)
            {
                _tinySubpagePools[i] = NewSubpagePoolHead(pageSize);
            }

            NumSmallSubpagePools = pageShifts - 9;
            _smallSubpagePools   = NewSubpagePoolArray(NumSmallSubpagePools);
            for (int i = 0; i < _smallSubpagePools.Length; i++)
            {
                _smallSubpagePools[i] = NewSubpagePoolHead(pageSize);
            }

            _q100  = new PoolChunkList <T>(this, null, 100, int.MaxValue, chunkSize);
            _q075  = new PoolChunkList <T>(this, _q100, 75, 100, chunkSize);
            _q050  = new PoolChunkList <T>(this, _q075, 50, 100, chunkSize);
            _q025  = new PoolChunkList <T>(this, _q050, 25, 75, chunkSize);
            _q000  = new PoolChunkList <T>(this, _q025, 1, 50, chunkSize);
            _qInit = new PoolChunkList <T>(this, _q000, int.MinValue, 25, chunkSize);

            _q100.PrevList(_q075);
            _q075.PrevList(_q050);
            _q050.PrevList(_q025);
            _q025.PrevList(_q000);
            _q000.PrevList(null);
            _qInit.PrevList(_qInit);

            var metrics = new List <IPoolChunkListMetric>(6);

            metrics.Add(_qInit);
            metrics.Add(_q000);
            metrics.Add(_q025);
            metrics.Add(_q050);
            metrics.Add(_q075);
            metrics.Add(_q100);
            _chunkListMetrics = metrics;
        }
Esempio n. 6
0
        unsafe void Init0(PoolChunk <T> chunk, long handle, int offset, int length, int maxLength, PoolThreadCache <T> cache)
        {
            Debug.Assert(handle >= 0);
            Debug.Assert(chunk is object);

            Chunk      = chunk;
            Memory     = chunk.Memory;
            _allocator = chunk.Arena.Parent;
            Origin     = chunk.NativePointer;
            Cache      = cache;
            Handle     = handle;
            Offset     = offset;
            Length     = length;
            MaxLength  = maxLength;
        }
        // TODO: Test if adding padding helps under contention
        //private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;

        protected PoolArena(PooledByteBufferAllocator parent, int pageSize, int maxOrder, int pageShifts, int chunkSize, int maxChunkCount)
        {
            this.Parent              = parent;
            this.PageSize            = pageSize;
            this.maxOrder            = maxOrder;
            this.PageShifts          = pageShifts;
            this.ChunkSize           = chunkSize;
            this.maxChunkCount       = maxChunkCount;
            this.SubpageOverflowMask = ~(pageSize - 1);
            this.tinySubpagePools    = this.NewSubpagePoolArray(NumTinySubpagePools);
            for (int i = 0; i < this.tinySubpagePools.Length; i++)
            {
                this.tinySubpagePools[i] = this.NewSubpagePoolHead(pageSize);
            }

            this.NumSmallSubpagePools = pageShifts - 9;
            this.smallSubpagePools    = this.NewSubpagePoolArray(this.NumSmallSubpagePools);
            for (int i = 0; i < this.smallSubpagePools.Length; i++)
            {
                this.smallSubpagePools[i] = this.NewSubpagePoolHead(pageSize);
            }

            this.q100  = new PoolChunkList <T>(null, 100, int.MaxValue, chunkSize);
            this.q075  = new PoolChunkList <T>(this.q100, 75, 100, chunkSize);
            this.q050  = new PoolChunkList <T>(this.q075, 50, 100, chunkSize);
            this.q025  = new PoolChunkList <T>(this.q050, 25, 75, chunkSize);
            this.q000  = new PoolChunkList <T>(this.q025, 1, 50, chunkSize);
            this.qInit = new PoolChunkList <T>(this.q000, int.MinValue, 25, chunkSize);

            this.q100.PrevList(this.q075);
            this.q075.PrevList(this.q050);
            this.q050.PrevList(this.q025);
            this.q025.PrevList(this.q000);
            this.q000.PrevList(null);
            this.qInit.PrevList(this.qInit);

            var metrics = new List <IPoolChunkListMetric>(6);

            metrics.Add(this.qInit);
            metrics.Add(this.q000);
            metrics.Add(this.q025);
            metrics.Add(this.q050);
            metrics.Add(this.q075);
            metrics.Add(this.q100);
            this.chunkListMetrics = metrics;
        }
Esempio n. 8
0
 public PoolThreadLocalCache(PooledByteBufferAllocator owner)
 {
     this.owner = owner;
 }
Esempio n. 9
0
 internal PooledByteBufferAllocatorMetric(PooledByteBufferAllocator allocator)
 {
     _allocator = allocator;
 }
Esempio n. 10
0
 public DirectArena(PooledByteBufferAllocator parent, int pageSize, int maxOrder, int pageShifts, int chunkSize)
     : base(parent, pageSize, maxOrder, pageShifts, chunkSize)
 {
     _memoryChunks = new List <MemoryChunk>();
 }
Esempio n. 11
0
 public HeapArena(PooledByteBufferAllocator parent, int pageSize, int maxOrder, int pageShifts, int chunkSize)
     : base(parent, pageSize, maxOrder, pageShifts, chunkSize)
 {
 }
        async Task EnsureServerInitializedAsync()
        {
            if (this.ServerAddress != null)
            {
                return;
            }

            int threadCount = Environment.ProcessorCount;
            var executorGroup = new MultithreadEventLoopGroup(threadCount);
            var bufAllocator = new PooledByteBufferAllocator(16 * 1024, 10 * 1024 * 1024 / threadCount); // reserve 10 MB for 64 KB buffers
            BlobSessionStatePersistenceProvider sessionStateProvider = await BlobSessionStatePersistenceProvider.CreateAsync(
                this.settingsProvider.GetSetting("BlobSessionStatePersistenceProvider.StorageConnectionString"),
                this.settingsProvider.GetSetting("BlobSessionStatePersistenceProvider.StorageContainerName"));
            TableQos2StatePersistenceProvider qos2StateProvider = await TableQos2StatePersistenceProvider.CreateAsync(
                this.settingsProvider.GetSetting("TableQos2StatePersistenceProvider.StorageConnectionString"),
                this.settingsProvider.GetSetting("TableQos2StatePersistenceProvider.StorageTableName"));
            var settings = new Settings(this.settingsProvider);
            var authProvider = new SasTokenAuthenticationProvider();
            var topicNameRouter = new TopicNameRouter();

            DeviceClientFactoryFunc deviceClientFactoryFactory = IotHubDeviceClient.PreparePoolFactory(settings.IotHubConnectionString + ";DeviceId=stub", "a", 1);

            ServerBootstrap server = new ServerBootstrap()
                .Group(executorGroup)
                .Channel<TcpServerSocketChannel>()
                .ChildOption(ChannelOption.Allocator, bufAllocator)
                .ChildOption(ChannelOption.AutoRead, false)
                .ChildHandler(new ActionChannelInitializer<IChannel>(ch =>
                {
                    ch.Pipeline.AddLast(TlsHandler.Server(this.tlsCertificate));
                    ch.Pipeline.AddLast(
                        MqttEncoder.Instance,
                        new MqttDecoder(true, 256 * 1024),
                        new MqttIotHubAdapter(
                            settings,
                            deviceClientFactoryFactory,
                            sessionStateProvider,
                            authProvider,
                            topicNameRouter,
                            qos2StateProvider),
                        new XUnitLoggingHandler(this.output));
                }));

            IChannel serverChannel = await server.BindAsync(IPAddress.Any, this.ProtocolGatewayPort);

            this.ScheduleCleanup(async () =>
            {
                await serverChannel.CloseAsync();
                await executorGroup.ShutdownGracefullyAsync();
            });
            this.ServerAddress = IPAddress.Loopback;
        }
        static PooledByteBufferAllocator()
        {
            int       defaultPageSize       = SystemPropertyUtil.GetInt("io.netty.allocator.pageSize", 8192);
            Exception pageSizeFallbackCause = null;

            try
            {
                ValidateAndCalculatePageShifts(defaultPageSize);
            }
            catch (Exception t)
            {
                pageSizeFallbackCause = t;
                defaultPageSize       = 8192;
            }
            DEFAULT_PAGE_SIZE = defaultPageSize;

            int       defaultMaxOrder       = SystemPropertyUtil.GetInt("io.netty.allocator.maxOrder", 11);
            Exception maxOrderFallbackCause = null;

            try
            {
                ValidateAndCalculateChunkSize(DEFAULT_PAGE_SIZE, defaultMaxOrder);
            }
            catch (Exception t)
            {
                maxOrderFallbackCause = t;
                defaultMaxOrder       = 11;
            }
            DEFAULT_MAX_ORDER = defaultMaxOrder;

            // Determine reasonable default for nHeapArena and nDirectArena.
            // Assuming each arena has 3 chunks, the pool should not consume more than 50% of max memory.

            // Use 2 * cores by default to reduce contention as we use 2 * cores for the number of EventLoops
            // in NIO and EPOLL as well. If we choose a smaller number we will run into hotspots as allocation and
            // deallocation needs to be synchronized on the PoolArena.
            // See https://github.com/netty/netty/issues/3888
            int defaultMinNumArena = Environment.ProcessorCount * 2;
            int defaultChunkSize   = DEFAULT_PAGE_SIZE << DEFAULT_MAX_ORDER;

            DEFAULT_NUM_HEAP_ARENA = Math.Max(0, SystemPropertyUtil.GetInt("dotNetty.allocator.numHeapArenas", defaultMinNumArena));

            // cache sizes
            DEFAULT_TINY_CACHE_SIZE   = SystemPropertyUtil.GetInt("io.netty.allocator.tinyCacheSize", 512);
            DEFAULT_SMALL_CACHE_SIZE  = SystemPropertyUtil.GetInt("io.netty.allocator.smallCacheSize", 256);
            DEFAULT_NORMAL_CACHE_SIZE = SystemPropertyUtil.GetInt("io.netty.allocator.normalCacheSize", 64);

            // 32 kb is the default maximum capacity of the cached buffer. Similar to what is explained in
            // 'Scalable memory allocation using jemalloc'
            DEFAULT_MAX_CACHED_BUFFER_CAPACITY = SystemPropertyUtil.GetInt("io.netty.allocator.maxCachedBufferCapacity", 32 * 1024);

            // the number of threshold of allocations when cached entries will be freed up if not frequently used
            DEFAULT_CACHE_TRIM_INTERVAL = SystemPropertyUtil.GetInt(
                "io.netty.allocator.cacheTrimInterval", 8192);

            if (Logger.DebugEnabled)
            {
                Logger.Debug("-Dio.netty.allocator.numHeapArenas: {}", DEFAULT_NUM_HEAP_ARENA);
                if (pageSizeFallbackCause == null)
                {
                    Logger.Debug("-Dio.netty.allocator.pageSize: {}", DEFAULT_PAGE_SIZE);
                }
                else
                {
                    Logger.Debug("-Dio.netty.allocator.pageSize: {}", DEFAULT_PAGE_SIZE, pageSizeFallbackCause);
                }
                if (maxOrderFallbackCause == null)
                {
                    Logger.Debug("-Dio.netty.allocator.maxOrder: {}", DEFAULT_MAX_ORDER);
                }
                else
                {
                    Logger.Debug("-Dio.netty.allocator.maxOrder: {}", DEFAULT_MAX_ORDER, maxOrderFallbackCause);
                }
                Logger.Debug("-Dio.netty.allocator.chunkSize: {}", DEFAULT_PAGE_SIZE << DEFAULT_MAX_ORDER);
                Logger.Debug("-Dio.netty.allocator.tinyCacheSize: {}", DEFAULT_TINY_CACHE_SIZE);
                Logger.Debug("-Dio.netty.allocator.smallCacheSize: {}", DEFAULT_SMALL_CACHE_SIZE);
                Logger.Debug("-Dio.netty.allocator.normalCacheSize: {}", DEFAULT_NORMAL_CACHE_SIZE);
                Logger.Debug("-Dio.netty.allocator.maxCachedBufferCapacity: {}", DEFAULT_MAX_CACHED_BUFFER_CAPACITY);
                Logger.Debug("-Dio.netty.allocator.cacheTrimInterval: {}", DEFAULT_CACHE_TRIM_INTERVAL);
            }

            Default = new PooledByteBufferAllocator();
        }