internal static LocalAddress Register(IChannel channel, LocalAddress oldLocalAddress, EndPoint localAddress) { if (oldLocalAddress is object) { ThrowHelper.ThrowChannelException_AlreadyBound(); } var addr = localAddress as LocalAddress; if (addr is null) { ThrowHelper.ThrowChannelException_UnsupportedAddrType(localAddress); } if (LocalAddress.Any.Equals(addr)) { addr = new LocalAddress(channel); } var result = BoundChannels.GetOrAdd(addr, channel); if (!ReferenceEquals(result, channel)) { ThrowHelper.ThrowChannelException_AddrAlreadyInUseBy(result); } return(addr); }
internal static LocalAddress Register(IChannel channel, LocalAddress oldLocalAddress, EndPoint localAddress) { if (oldLocalAddress != null) { throw new ChannelException("already bound"); } if (!(localAddress is LocalAddress)) { throw new ChannelException($"unsupported address type: {localAddress.GetType()}"); } var addr = (LocalAddress)localAddress; if (LocalAddress.Any.Equals(addr)) { addr = new LocalAddress(channel); } var result = BoundChannels.GetOrAdd(addr, channel); if (!ReferenceEquals(result, channel)) { throw new ChannelException($"address already in use by: {result}"); } return(addr); }
public override Task ConnectAsync(EndPoint remoteAddress, EndPoint localAddress) { var promise = new TaskCompletionSource(); if (Volatile.Read(ref _channel.v_state) == State.Connected) { var cause = new AlreadyConnectedException(); Util.SafeSetFailure(promise, cause, Logger); _ = _channel.Pipeline.FireExceptionCaught(cause); return(promise.Task); } if (Volatile.Read(ref _channel.v_connectPromise) is object) { ThrowHelper.ThrowConnectionPendingException(); } _ = Interlocked.Exchange(ref _channel.v_connectPromise, promise); if (Volatile.Read(ref _channel.v_state) != State.Bound) { // Not bound yet and no localAddress specified - get one. if (localAddress is null) { localAddress = new LocalAddress(_channel); } } if (localAddress is object) { try { _channel.DoBind(localAddress); } catch (Exception ex) { Util.SafeSetFailure(promise, ex, Logger); _ = _channel.CloseAsync(); return(promise.Task); } } IChannel boundChannel = LocalChannelRegistry.Get(remoteAddress); if (!(boundChannel is LocalServerChannel serverChannel)) { Exception cause = new ConnectException($"connection refused: {remoteAddress}", null); Util.SafeSetFailure(promise, cause, Logger); _ = _channel.CloseAsync(); return(promise.Task); } _ = Interlocked.Exchange(ref _channel.v_peer, serverChannel.Serve(_channel)); return(promise.Task); }
public override Task ConnectAsync(EndPoint remoteAddress, EndPoint localAddress) { var promise = new TaskCompletionSource(); if (this.localChannel.state == State.Connected) { var cause = new AlreadyConnectedException(); Util.SafeSetFailure(promise, cause, Logger); this.localChannel.Pipeline.FireExceptionCaught(cause); return(promise.Task); } if (this.localChannel.connectPromise != null) { throw new ConnectionPendingException(); } this.localChannel.connectPromise = promise; if (this.localChannel.state != State.Bound) { // Not bound yet and no localAddress specified - get one. if (localAddress == null) { localAddress = new LocalAddress(this.localChannel); } } if (localAddress != null) { try { this.localChannel.DoBind(localAddress); } catch (Exception ex) { Util.SafeSetFailure(promise, ex, Logger); this.channel.CloseAsync(); return(promise.Task); } } IChannel boundChannel = LocalChannelRegistry.Get(remoteAddress); if (!(boundChannel is LocalServerChannel)) { Exception cause = new ConnectException($"connection refused: {remoteAddress}", null); Util.SafeSetFailure(promise, cause, Logger); this.localChannel.CloseAsync(); return(promise.Task); } this.localChannel.peer = ((LocalServerChannel)boundChannel).Serve(this.localChannel); return(promise.Task); }
protected override void DoClose() { if (this.state <= 1) { // Update all internal state before the closeFuture is notified. if (this.localAddress != null) { LocalChannelRegistry.Unregister(this.localAddress); this.localAddress = null; } this.state = 2; } }
internal LocalChannel(LocalServerChannel parent, LocalChannel peer) : base(parent) { //this.Configuration.Allocator(new PreferHeapByteBufAllocator(config.getAllocator())); this.peer = peer; if (parent != null) { this.localAddress = parent.LocalAddress; } if (peer != null) { this.remoteAddress = peer.LocalAddress; } this.Configuration = new DefaultChannelConfiguration(this); this.shutdownHook = () => this.Unsafe.CloseAsync(); }
internal LocalChannel(LocalServerChannel parent, LocalChannel peer) : base(parent) { _inboundBuffer = PlatformDependent.NewMpscQueue <object>(); v_peer = peer; if (parent is object) { v_localAddress = parent.LocalAddress; } if (peer is object) { v_remoteAddress = peer.LocalAddress; } var config = new DefaultChannelConfiguration(this); config.Allocator = new PreferHeapByteBufAllocator(config.Allocator); Configuration = config; shutdownHook = () => Unsafe.Close(Unsafe.VoidPromise()); }
protected override void DoBind(EndPoint localAddress) { this.localAddress = LocalChannelRegistry.Register(this, this.localAddress, localAddress); this.state = 1; }
protected override void DoClose() { var peer = this.peer; var oldState = this.state; try { if (oldState != State.Closed) { // Update all internal state before the closeFuture is notified. if (this.localAddress != null) { if (this.Parent == null) { LocalChannelRegistry.Unregister(this.localAddress); } this.localAddress = null; } // State change must happen before finishPeerRead to ensure writes are released either in doWrite or // channelRead. this.state = State.Closed; // Preserve order of event and force a read operation now before the close operation is processed. this.FinishPeerRead(this); TaskCompletionSource promise = this.connectPromise; if (promise != null) { // Use tryFailure() instead of setFailure() to avoid the race against cancel(). promise.TrySetException(DoCloseClosedChannelException); this.connectPromise = null; } } if (peer != null) { this.peer = null; // Need to execute the close in the correct EventLoop (see https://github.com/netty/netty/issues/1777). // Also check if the registration was not done yet. In this case we submit the close to the EventLoop // to make sure its run after the registration completes // (see https://github.com/netty/netty/issues/2144). IEventLoop peerEventLoop = peer.EventLoop; bool peerIsActive = peer.Active; if (peerEventLoop.InEventLoop && !this.registerInProgress) { peer.TryClose(peerIsActive); } else { try { peerEventLoop.Execute(() => peer.TryClose(peerIsActive)); } catch (Exception cause) { Logger.Warn("Releasing Inbound Queues for channels {}-{} because exception occurred!", this, peer, cause); if (peerEventLoop.InEventLoop) { peer.ReleaseInboundBuffers(); } else { // inboundBuffers is a SPSC so we may leak if the event loop is shutdown prematurely or // rejects the close Runnable but give a best effort. peer.CloseAsync(); } throw; } } } } finally { // Release all buffers if the Channel was already registered in the past and if it was not closed before. if (oldState != State.Closed) { // We need to release all the buffers that may be put into our inbound queue since we closed the Channel // to ensure we not leak any memory. This is fine as it basically gives the same guarantees as TCP which // means even if the promise was notified before its not really guaranteed that the "remote peer" will // see the buffer at all. this.ReleaseInboundBuffers(); } } }