public unsafe ThreadContext(TransportThread transportThread) { _transportThread = transportThread; _connectionDispatcher = transportThread.ConnectionDispatcher; _sockets = new Dictionary <int, TSocket>(); _logger = _transportThread.LoggerFactory.CreateLogger($"{nameof(RedHat)}.{nameof(TransportThread)}.{_transportThread.ThreadId}"); _acceptSockets = new List <TSocket>(); _transportOptions = transportThread.TransportOptions; _scheduledSendAdding = new List <ScheduledSend>(1024); _scheduledSendRunning = new List <ScheduledSend>(1024); _epollState = EPollBlocked; if (_transportOptions.AioReceive | _transportOptions.AioSend) { _aioEventsMemory = AllocMemory(sizeof(io_event) * EventBufferLength); _aioCbsMemory = AllocMemory(sizeof(iocb) * EventBufferLength); _aioCbsTableMemory = AllocMemory(IntPtr.Size * EventBufferLength); _ioVectorTableMemory = AllocMemory(SizeOf.iovec * IoVectorsPerAioSocket * EventBufferLength); for (int i = 0; i < EventBufferLength; i++) { AioCbsTable[i] = &AioCbs[i]; } if (_transportOptions.AioSend) { _aioSendBuffers = new ReadOnlySequence <byte> [EventBufferLength]; } } int maxMemoryHandleCount = TSocket.MaxIOVectorReceiveLength; if (_transportOptions.AioReceive || _transportOptions.AioSend) { maxMemoryHandleCount = Math.Max(maxMemoryHandleCount, EventBufferLength); } if (_transportOptions.DeferSend) { maxMemoryHandleCount = Math.Max(maxMemoryHandleCount, TSocket.MaxIOVectorSendLength); } MemoryHandles = new MemoryHandle[maxMemoryHandleCount]; // These members need to be Disposed _epoll = EPoll.Create(); _epollFd = _epoll.DangerousGetHandle().ToInt32(); MemoryPool = CreateMemoryPool(); _pipeEnds = PipeEnd.CreatePair(blocking: false); if (_aioEventsMemory != IntPtr.Zero) { aio_context_t ctx; AioInterop.IoSetup(EventBufferLength, &ctx).ThrowOnError(); _aioContext = ctx; } }
private TransportThread[] CreateTransportThreads(IPEndPoint ipEndPoint, AcceptThread acceptThread) { var threads = new TransportThread[_transportOptions.ThreadCount]; IList <int> preferredCpuIds = null; if (_transportOptions.SetThreadAffinity) { preferredCpuIds = GetPreferredCpuIds(); } int cpuIdx = 0; for (int i = 0; i < _transportOptions.ThreadCount; i++) { int cpuId = preferredCpuIds == null ? -1 : preferredCpuIds[cpuIdx++ % preferredCpuIds.Count]; int threadId = Interlocked.Increment(ref s_threadId); var thread = new TransportThread(ipEndPoint, _transportOptions, acceptThread, threadId, cpuId, _loggerFactory); threads[i] = thread; } return(threads); }
public unsafe ThreadContext(TransportThread transportThread) { _transportThread = transportThread; _sockets = new Dictionary <int, TSocket>(); _logger = _transportThread.LoggerFactory.CreateLogger($"{nameof(RedHat)}.{nameof(TransportThread)}.{_transportThread.ThreadId}"); _acceptSockets = new List <TSocket>(); _transportOptions = transportThread.TransportOptions; _scheduledSendAdding = new List <ScheduledSend>(1024); _scheduledSendRunning = new List <ScheduledSend>(1024); _epollState = EPollBlocked; if (_transportOptions.AioReceive | _transportOptions.AioSend) { _aioEventsMemory = AllocMemory(sizeof(io_event) * EventBufferLength); _aioCbsMemory = AllocMemory(sizeof(iocb) * EventBufferLength); _aioCbsTableMemory = AllocMemory(IntPtr.Size * EventBufferLength); _ioVectorTableMemory = AllocMemory(SizeOf.iovec * IoVectorsPerAioSocket * EventBufferLength); for (int i = 0; i < EventBufferLength; i++) { AioCbsTable[i] = &AioCbs[i]; } if (_transportOptions.AioSend) { _aioSendBuffers = new ReadOnlySequence <byte> [EventBufferLength]; } } int maxMemoryHandleCount = TSocket.MaxIOVectorReceiveLength; if (_transportOptions.AioReceive || _transportOptions.AioSend) { maxMemoryHandleCount = Math.Max(maxMemoryHandleCount, EventBufferLength); } if (_transportOptions.DeferSend) { maxMemoryHandleCount = Math.Max(maxMemoryHandleCount, TSocket.MaxIOVectorSendLength); } MemoryHandles = new MemoryHandle[maxMemoryHandleCount]; // These members need to be Disposed _epoll = EPoll.Create(); _epollFd = _epoll.DangerousGetHandle().ToInt32(); MemoryPool = CreateMemoryPool(); _pipeEnds = PipeEnd.CreatePair(blocking: false); if (_aioEventsMemory != IntPtr.Zero) { aio_context_t ctx; AioInterop.IoSetup(EventBufferLength, &ctx).ThrowOnError(); _aioContext = ctx; } // Single reader, single writer queue since all writes happen from the TransportThread and reads happen sequentially // This channel is unbounded which means there's nothing limiting the number of sockets we're accepting. // This is similar to having an unbounded number of thread pool work items queued to invoke a ConnectionHandler // which was the previous pattern, but now it's more explicit. // TODO: Find a reasonable limit and start applying accept backpressure once the channel reaches that limit. _acceptQueue = Channel.CreateUnbounded <TSocket>(new UnboundedChannelOptions { SingleReader = true, SingleWriter = true, AllowSynchronousContinuations = _transportOptions.ApplicationSchedulingMode == PipeScheduler.Inline, }); }