public Task BindAsync() { lock (_gate) { if (_state != State.Initial) { throw new InvalidOperationException($"Invalid operation: {_state}"); } _stoppedTcs = new TaskCompletionSource <object>(); try { _pipeEnds = PipeEnd.CreatePair(blocking: false); _thread = new Thread(AcceptThreadStart);; _thread.Start(); _state = State.Started; } catch (System.Exception) { _state = State.Stopped; _stoppedTcs = null; _socket.Dispose(); Cleanup(); throw; } } return(Task.CompletedTask); }
public unsafe static PosixResult Pipe(out PipeEnd readEnd, out PipeEnd writeEnd, bool blocking) { int *fds = stackalloc int[2]; int flags = O_CLOEXEC; if (!blocking) { flags |= O_NONBLOCK; } readEnd = new PipeEnd(); writeEnd = new PipeEnd(); int res = pipe2(fds, flags); if (res == 0) { readEnd.SetHandle(fds[0]); writeEnd.SetHandle(fds[1]); } else { readEnd = null; writeEnd = null; } return(PosixResult.FromReturnValue(res)); }
public unsafe ThreadContext(TransportThread transportThread) { _transportThread = transportThread; _connectionDispatcher = transportThread.ConnectionDispatcher; _sockets = new Dictionary <int, TSocket>(); _logger = _transportThread.LoggerFactory.CreateLogger($"{nameof(RedHat)}.{nameof(TransportThread)}.{_transportThread.ThreadId}"); _acceptSockets = new List <TSocket>(); _transportOptions = transportThread.TransportOptions; _scheduledSendAdding = new List <ScheduledSend>(1024); _scheduledSendRunning = new List <ScheduledSend>(1024); _epollState = EPollBlocked; if (_transportOptions.AioReceive | _transportOptions.AioSend) { _aioEventsMemory = AllocMemory(sizeof(io_event) * EventBufferLength); _aioCbsMemory = AllocMemory(sizeof(iocb) * EventBufferLength); _aioCbsTableMemory = AllocMemory(IntPtr.Size * EventBufferLength); _ioVectorTableMemory = AllocMemory(SizeOf.iovec * IoVectorsPerAioSocket * EventBufferLength); for (int i = 0; i < EventBufferLength; i++) { AioCbsTable[i] = &AioCbs[i]; } if (_transportOptions.AioSend) { _aioSendBuffers = new ReadOnlySequence <byte> [EventBufferLength]; } } int maxMemoryHandleCount = TSocket.MaxIOVectorReceiveLength; if (_transportOptions.AioReceive || _transportOptions.AioSend) { maxMemoryHandleCount = Math.Max(maxMemoryHandleCount, EventBufferLength); } if (_transportOptions.DeferSend) { maxMemoryHandleCount = Math.Max(maxMemoryHandleCount, TSocket.MaxIOVectorSendLength); } MemoryHandles = new MemoryHandle[maxMemoryHandleCount]; // These members need to be Disposed _epoll = EPoll.Create(); _epollFd = _epoll.DangerousGetHandle().ToInt32(); MemoryPool = CreateMemoryPool(); _pipeEnds = PipeEnd.CreatePair(blocking: false); if (_aioEventsMemory != IntPtr.Zero) { aio_context_t ctx; AioInterop.IoSetup(EventBufferLength, &ctx).ThrowOnError(); _aioContext = ctx; } }
public unsafe ThreadContext(TransportThread transportThread) { _transportThread = transportThread; _sockets = new Dictionary <int, TSocket>(); _logger = _transportThread.LoggerFactory.CreateLogger($"{nameof(RedHat)}.{nameof(TransportThread)}.{_transportThread.ThreadId}"); _acceptSockets = new List <TSocket>(); _transportOptions = transportThread.TransportOptions; _scheduledSendAdding = new List <ScheduledSend>(1024); _scheduledSendRunning = new List <ScheduledSend>(1024); _epollState = EPollBlocked; if (_transportOptions.AioReceive | _transportOptions.AioSend) { _aioEventsMemory = AllocMemory(sizeof(io_event) * EventBufferLength); _aioCbsMemory = AllocMemory(sizeof(iocb) * EventBufferLength); _aioCbsTableMemory = AllocMemory(IntPtr.Size * EventBufferLength); _ioVectorTableMemory = AllocMemory(SizeOf.iovec * IoVectorsPerAioSocket * EventBufferLength); for (int i = 0; i < EventBufferLength; i++) { AioCbsTable[i] = &AioCbs[i]; } if (_transportOptions.AioSend) { _aioSendBuffers = new ReadOnlySequence <byte> [EventBufferLength]; } } int maxMemoryHandleCount = TSocket.MaxIOVectorReceiveLength; if (_transportOptions.AioReceive || _transportOptions.AioSend) { maxMemoryHandleCount = Math.Max(maxMemoryHandleCount, EventBufferLength); } if (_transportOptions.DeferSend) { maxMemoryHandleCount = Math.Max(maxMemoryHandleCount, TSocket.MaxIOVectorSendLength); } MemoryHandles = new MemoryHandle[maxMemoryHandleCount]; // These members need to be Disposed _epoll = EPoll.Create(); _epollFd = _epoll.DangerousGetHandle().ToInt32(); MemoryPool = CreateMemoryPool(); _pipeEnds = PipeEnd.CreatePair(blocking: false); if (_aioEventsMemory != IntPtr.Zero) { aio_context_t ctx; AioInterop.IoSetup(EventBufferLength, &ctx).ThrowOnError(); _aioContext = ctx; } // Single reader, single writer queue since all writes happen from the TransportThread and reads happen sequentially // This channel is unbounded which means there's nothing limiting the number of sockets we're accepting. // This is similar to having an unbounded number of thread pool work items queued to invoke a ConnectionHandler // which was the previous pattern, but now it's more explicit. // TODO: Find a reasonable limit and start applying accept backpressure once the channel reaches that limit. _acceptQueue = Channel.CreateUnbounded <TSocket>(new UnboundedChannelOptions { SingleReader = true, SingleWriter = true, AllowSynchronousContinuations = _transportOptions.ApplicationSchedulingMode == PipeScheduler.Inline, }); }
public extern static PosixResult Pipe(out PipeEnd readEnd, out PipeEnd writeEnd, bool blocking);