public TSocket(ThreadContext threadContext, int fd, SocketFlags flags, LinuxTransportOptions options) { _threadContext = threadContext; Fd = fd; _flags = flags; _onFlushedToApp = new Action(OnFlushedToApp); _onReadFromApp = new Action(OnReadFromApp); _connectionClosedTokenSource = new CancellationTokenSource(); ConnectionClosed = _connectionClosedTokenSource.Token; _waitForConnectionClosedTcs = new TaskCompletionSource <object>(TaskCreationOptions.RunContinuationsAsynchronously); if (!IsDeferSend) { _sendMemoryHandles = new MemoryHandle[MaxIOVectorSendLength]; } var inputOptions = new PipeOptions(MemoryPool, options.ApplicationSchedulingMode, PipeScheduler.Inline, PauseInputWriterThreshold, PauseInputWriterThreshold / 2, useSynchronizationContext: false); var outputOptions = new PipeOptions(MemoryPool, PipeScheduler.Inline, options.ApplicationSchedulingMode, PauseOutputWriterThreshold, PauseOutputWriterThreshold / 2, useSynchronizationContext: false); var pair = DuplexPipe.CreateConnectionPair(inputOptions, outputOptions); Transport = pair.Transport; Application = pair.Application; }
public Transport(IEndPointInformation ipEndPointInformation, IConnectionDispatcher connectionDispatcher, LinuxTransportOptions transportOptions, ILoggerFactory loggerFactory) { if (connectionDispatcher == null) { throw new ArgumentNullException(nameof(connectionDispatcher)); } if (transportOptions == null) { throw new ArgumentException(nameof(transportOptions)); } if (loggerFactory == null) { throw new ArgumentException(nameof(loggerFactory)); } if (ipEndPointInformation == null) { throw new ArgumentException(nameof(ipEndPointInformation)); } _endPoint = ipEndPointInformation; _connectionDispatcher = connectionDispatcher; _transportOptions = transportOptions; _loggerFactory = loggerFactory; _logger = loggerFactory.CreateLogger <Transport>(); _threads = Array.Empty <TransportThread>(); }
public TransportThread(IPEndPoint endPoint, LinuxTransportOptions options, AcceptThread acceptThread, int threadId, int cpuId, ILoggerFactory loggerFactory) { ThreadId = threadId; CpuId = cpuId; EndPoint = endPoint; TransportOptions = options; AcceptThread = acceptThread; LoggerFactory = loggerFactory; }
public LinuxTransportFactory(IOptions <LinuxTransportOptions> options, ILoggerFactory loggerFactory) { if (options == null) { throw new ArgumentNullException(nameof(options)); } if (loggerFactory == null) { throw new ArgumentNullException(nameof(loggerFactory)); } _options = options.Value; _loggerFactory = loggerFactory; }
public unsafe ThreadContext(TransportThread transportThread) { _transportThread = transportThread; _connectionDispatcher = transportThread.ConnectionDispatcher; _sockets = new Dictionary <int, TSocket>(); _logger = _transportThread.LoggerFactory.CreateLogger($"{nameof(RedHat)}.{nameof(TransportThread)}.{_transportThread.ThreadId}"); _acceptSockets = new List <TSocket>(); _transportOptions = transportThread.TransportOptions; _scheduledSendAdding = new List <ScheduledSend>(1024); _scheduledSendRunning = new List <ScheduledSend>(1024); _epollState = EPollBlocked; if (_transportOptions.AioReceive | _transportOptions.AioSend) { _aioEventsMemory = AllocMemory(sizeof(io_event) * EventBufferLength); _aioCbsMemory = AllocMemory(sizeof(iocb) * EventBufferLength); _aioCbsTableMemory = AllocMemory(IntPtr.Size * EventBufferLength); _ioVectorTableMemory = AllocMemory(SizeOf.iovec * IoVectorsPerAioSocket * EventBufferLength); for (int i = 0; i < EventBufferLength; i++) { AioCbsTable[i] = &AioCbs[i]; } if (_transportOptions.AioSend) { _aioSendBuffers = new ReadOnlySequence <byte> [EventBufferLength]; } } int maxMemoryHandleCount = TSocket.MaxIOVectorReceiveLength; if (_transportOptions.AioReceive || _transportOptions.AioSend) { maxMemoryHandleCount = Math.Max(maxMemoryHandleCount, EventBufferLength); } if (_transportOptions.DeferSend) { maxMemoryHandleCount = Math.Max(maxMemoryHandleCount, TSocket.MaxIOVectorSendLength); } MemoryHandles = new MemoryHandle[maxMemoryHandleCount]; // These members need to be Disposed _epoll = EPoll.Create(); _epollFd = _epoll.DangerousGetHandle().ToInt32(); MemoryPool = CreateMemoryPool(); _pipeEnds = PipeEnd.CreatePair(blocking: false); if (_aioEventsMemory != IntPtr.Zero) { aio_context_t ctx; AioInterop.IoSetup(EventBufferLength, &ctx).ThrowOnError(); _aioContext = ctx; } }
public TransportThread(IPEndPoint endPoint, IConnectionDispatcher connectionDispatcher, LinuxTransportOptions options, AcceptThread acceptThread, int threadId, int cpuId, ILoggerFactory loggerFactory) { if (connectionDispatcher == null) { throw new ArgumentNullException(nameof(connectionDispatcher)); } ConnectionDispatcher = connectionDispatcher; ThreadId = threadId; CpuId = cpuId; EndPoint = endPoint; TransportOptions = options; AcceptThread = acceptThread; LoggerFactory = loggerFactory; }
public Transport(EndPoint endPoint, LinuxTransportOptions transportOptions, ILoggerFactory loggerFactory) { if (transportOptions == null) { throw new ArgumentException(nameof(transportOptions)); } if (loggerFactory == null) { throw new ArgumentException(nameof(loggerFactory)); } if (endPoint == null) { throw new ArgumentException(nameof(endPoint)); } EndPoint = endPoint; _transportOptions = transportOptions; _loggerFactory = loggerFactory; _logger = loggerFactory.CreateLogger <Transport>(); _threads = Array.Empty <TransportThread>(); }
public unsafe ThreadContext(TransportThread transportThread) { _transportThread = transportThread; _sockets = new Dictionary <int, TSocket>(); _logger = _transportThread.LoggerFactory.CreateLogger($"{nameof(RedHat)}.{nameof(TransportThread)}.{_transportThread.ThreadId}"); _acceptSockets = new List <TSocket>(); _transportOptions = transportThread.TransportOptions; _scheduledSendAdding = new List <ScheduledSend>(1024); _scheduledSendRunning = new List <ScheduledSend>(1024); _epollState = EPollBlocked; if (_transportOptions.AioReceive | _transportOptions.AioSend) { _aioEventsMemory = AllocMemory(sizeof(io_event) * EventBufferLength); _aioCbsMemory = AllocMemory(sizeof(iocb) * EventBufferLength); _aioCbsTableMemory = AllocMemory(IntPtr.Size * EventBufferLength); _ioVectorTableMemory = AllocMemory(SizeOf.iovec * IoVectorsPerAioSocket * EventBufferLength); for (int i = 0; i < EventBufferLength; i++) { AioCbsTable[i] = &AioCbs[i]; } if (_transportOptions.AioSend) { _aioSendBuffers = new ReadOnlySequence <byte> [EventBufferLength]; } } int maxMemoryHandleCount = TSocket.MaxIOVectorReceiveLength; if (_transportOptions.AioReceive || _transportOptions.AioSend) { maxMemoryHandleCount = Math.Max(maxMemoryHandleCount, EventBufferLength); } if (_transportOptions.DeferSend) { maxMemoryHandleCount = Math.Max(maxMemoryHandleCount, TSocket.MaxIOVectorSendLength); } MemoryHandles = new MemoryHandle[maxMemoryHandleCount]; // These members need to be Disposed _epoll = EPoll.Create(); _epollFd = _epoll.DangerousGetHandle().ToInt32(); MemoryPool = CreateMemoryPool(); _pipeEnds = PipeEnd.CreatePair(blocking: false); if (_aioEventsMemory != IntPtr.Zero) { aio_context_t ctx; AioInterop.IoSetup(EventBufferLength, &ctx).ThrowOnError(); _aioContext = ctx; } // Single reader, single writer queue since all writes happen from the TransportThread and reads happen sequentially // This channel is unbounded which means there's nothing limiting the number of sockets we're accepting. // This is similar to having an unbounded number of thread pool work items queued to invoke a ConnectionHandler // which was the previous pattern, but now it's more explicit. // TODO: Find a reasonable limit and start applying accept backpressure once the channel reaches that limit. _acceptQueue = Channel.CreateUnbounded <TSocket>(new UnboundedChannelOptions { SingleReader = true, SingleWriter = true, AllowSynchronousContinuations = _transportOptions.ApplicationSchedulingMode == PipeScheduler.Inline, }); }