// private unsafe iovec* IoVectorTable => (iovec*)Align(_ioVectorTableMemory); // TODO public unsafe LinuxAio() : base(supportsPolling: false, isThreadSafe: false) { _memoryHandles = new List <MemoryHandle>(); try { // Memory _aioEventsMemory = AllocMemory(sizeof(io_event) * IocbLength); _aioCbsMemory = AllocMemory(sizeof(iocb) * IocbLength); _aioCbsTableMemory = AllocMemory(IntPtr.Size * IocbLength); // _ioVectorTableMemory = AllocMemory(SizeOf.iovec * IovsPerIocb * _ioCbLength); _scheduledOperations = new List <Operation>(); for (int i = 0; i < IocbLength; i++) { AioCbsTable[i] = &AioCbs[i]; } // Aio aio_context_t ctx; int rv = io_setup(NrEvents, &ctx); if (rv == -1) { PlatformException.Throw(); } _ctx = ctx; } catch { FreeResources(); } }
public unsafe ThreadContext(TransportThread transportThread) { _transportThread = transportThread; _connectionDispatcher = transportThread.ConnectionDispatcher; _sockets = new Dictionary <int, TSocket>(); _logger = _transportThread.LoggerFactory.CreateLogger($"{nameof(RedHat)}.{nameof(TransportThread)}.{_transportThread.ThreadId}"); _acceptSockets = new List <TSocket>(); _transportOptions = transportThread.TransportOptions; _scheduledSendAdding = new List <ScheduledSend>(1024); _scheduledSendRunning = new List <ScheduledSend>(1024); _epollState = EPollBlocked; if (_transportOptions.AioReceive | _transportOptions.AioSend) { _aioEventsMemory = AllocMemory(sizeof(io_event) * EventBufferLength); _aioCbsMemory = AllocMemory(sizeof(iocb) * EventBufferLength); _aioCbsTableMemory = AllocMemory(IntPtr.Size * EventBufferLength); _ioVectorTableMemory = AllocMemory(SizeOf.iovec * IoVectorsPerAioSocket * EventBufferLength); for (int i = 0; i < EventBufferLength; i++) { AioCbsTable[i] = &AioCbs[i]; } if (_transportOptions.AioSend) { _aioSendBuffers = new ReadOnlySequence <byte> [EventBufferLength]; } } int maxMemoryHandleCount = TSocket.MaxIOVectorReceiveLength; if (_transportOptions.AioReceive || _transportOptions.AioSend) { maxMemoryHandleCount = Math.Max(maxMemoryHandleCount, EventBufferLength); } if (_transportOptions.DeferSend) { maxMemoryHandleCount = Math.Max(maxMemoryHandleCount, TSocket.MaxIOVectorSendLength); } MemoryHandles = new MemoryHandle[maxMemoryHandleCount]; // These members need to be Disposed _epoll = EPoll.Create(); _epollFd = _epoll.DangerousGetHandle().ToInt32(); MemoryPool = CreateMemoryPool(); _pipeEnds = PipeEnd.CreatePair(blocking: false); if (_aioEventsMemory != IntPtr.Zero) { aio_context_t ctx; AioInterop.IoSetup(EventBufferLength, &ctx).ThrowOnError(); _aioContext = ctx; } }
private static unsafe int IoGetEvents(aio_context_t ctx, int nr, io_event *events) { Debug.Assert(nr != 0); // Check the ring buffer to avoid making a syscall. aio_ring *pRing = ctx.ring; if (pRing->magic == 0xa10a10a1 && pRing->incompat_features == 0) { int head = (int)pRing->head; int tail = (int)pRing->tail; int available = tail - head; if (available < 0) { available += (int)pRing->nr; } if (available >= nr) { io_event *ringEvents = (io_event *)((byte *)pRing + pRing->header_length); io_event *start = ringEvents + head; io_event *end = start + nr; if (head + nr > pRing->nr) { end -= pRing->nr; } if (end > start) { Copy(start, end, events); } else { io_event *eventsEnd = Copy(start, ringEvents + pRing->nr, events); Copy(ringEvents, end, eventsEnd); } head += nr; if (head >= pRing->nr) { head -= (int)pRing->nr; } pRing->head = (uint)head; return(nr); } } return(io_getevents(ctx, nr, nr, events, null)); }
public unsafe static PosixResult IoGetEvents(aio_context_t ctx, int nr, io_event *events) { aio_ring *pRing = ctx.ring; if (nr <= 0) { return(new PosixResult(PosixResult.EINVAL)); } if (pRing->magic == 0xa10a10a1 && pRing->incompat_features == 0) { int head = (int)pRing->head; int tail = (int)pRing->tail; int available = tail - head; if (available < 0) { available += (int)pRing->nr; } if (available >= nr) { io_event *ringEvents = (io_event *)((byte *)pRing + pRing->header_length); io_event *start = ringEvents + head; io_event *end = start + nr; if (head + nr > pRing->nr) { end -= pRing->nr; } if (end > start) { Copy(start, end, events); } else { io_event *eventsEnd = Copy(start, ringEvents + pRing->nr, events); Copy(ringEvents, end, eventsEnd); } head += nr; if (head >= pRing->nr) { head -= (int)pRing->nr; } pRing->head = (uint)head; return(new PosixResult(nr)); } } return(IoGetEvents(ctx, nr, nr, events, -1)); }
public unsafe static PosixResult IoGetEvents(aio_context_t ctx, int min_nr, int nr, io_event *events, int timeoutMs) { timespec timeout = default(timespec); bool hasTimeout = timeoutMs >= 0; if (hasTimeout) { timeout.tv_sec = timeoutMs / 1000; timeout.tv_nsec = 1000 * (timeoutMs % 1000); } int rv; do { rv = io_getevents(ctx, min_nr, nr, events, hasTimeout ? &timeout : null); } while (rv < 0 && errno == EINTR); return(PosixResult.FromReturnValue(rv)); }
public unsafe ThreadContext(TransportThread transportThread) { _transportThread = transportThread; _sockets = new Dictionary <int, TSocket>(); _logger = _transportThread.LoggerFactory.CreateLogger($"{nameof(RedHat)}.{nameof(TransportThread)}.{_transportThread.ThreadId}"); _acceptSockets = new List <TSocket>(); _transportOptions = transportThread.TransportOptions; _scheduledSendAdding = new List <ScheduledSend>(1024); _scheduledSendRunning = new List <ScheduledSend>(1024); _epollState = EPollBlocked; if (_transportOptions.AioReceive | _transportOptions.AioSend) { _aioEventsMemory = AllocMemory(sizeof(io_event) * EventBufferLength); _aioCbsMemory = AllocMemory(sizeof(iocb) * EventBufferLength); _aioCbsTableMemory = AllocMemory(IntPtr.Size * EventBufferLength); _ioVectorTableMemory = AllocMemory(SizeOf.iovec * IoVectorsPerAioSocket * EventBufferLength); for (int i = 0; i < EventBufferLength; i++) { AioCbsTable[i] = &AioCbs[i]; } if (_transportOptions.AioSend) { _aioSendBuffers = new ReadOnlySequence <byte> [EventBufferLength]; } } int maxMemoryHandleCount = TSocket.MaxIOVectorReceiveLength; if (_transportOptions.AioReceive || _transportOptions.AioSend) { maxMemoryHandleCount = Math.Max(maxMemoryHandleCount, EventBufferLength); } if (_transportOptions.DeferSend) { maxMemoryHandleCount = Math.Max(maxMemoryHandleCount, TSocket.MaxIOVectorSendLength); } MemoryHandles = new MemoryHandle[maxMemoryHandleCount]; // These members need to be Disposed _epoll = EPoll.Create(); _epollFd = _epoll.DangerousGetHandle().ToInt32(); MemoryPool = CreateMemoryPool(); _pipeEnds = PipeEnd.CreatePair(blocking: false); if (_aioEventsMemory != IntPtr.Zero) { aio_context_t ctx; AioInterop.IoSetup(EventBufferLength, &ctx).ThrowOnError(); _aioContext = ctx; } // Single reader, single writer queue since all writes happen from the TransportThread and reads happen sequentially // This channel is unbounded which means there's nothing limiting the number of sockets we're accepting. // This is similar to having an unbounded number of thread pool work items queued to invoke a ConnectionHandler // which was the previous pattern, but now it's more explicit. // TODO: Find a reasonable limit and start applying accept backpressure once the channel reaches that limit. _acceptQueue = Channel.CreateUnbounded <TSocket>(new UnboundedChannelOptions { SingleReader = true, SingleWriter = true, AllowSynchronousContinuations = _transportOptions.ApplicationSchedulingMode == PipeScheduler.Inline, }); }
public unsafe static PosixResult IoSubmit(aio_context_t ctx, int nr, iocb **iocbpp) { int rv = io_submit(ctx, nr, iocbpp); return(PosixResult.FromReturnValue(rv)); }
public unsafe static PosixResult IoDestroy(aio_context_t ctx) { int rv = io_destroy(ctx); return(PosixResult.FromReturnValue(rv)); }
public static int io_getevents(aio_context_t ctx, long_t min_nr, long_t nr, io_event *events, timespec *timeout) { int rv = (int)syscall(SYS_io_getevents, ctx.ring, min_nr, nr, events, timeout); return(rv); }
public static int io_submit(aio_context_t ctx, long_t nr, iocb **iocbpp) { int rv = (int)syscall(SYS_io_submit, ctx.ring, nr, iocbpp); return(rv); }
public static int io_destroy(aio_context_t ctx) { int rv = (int)syscall(SYS_io_destroy, ctx.ring); return(rv); }