private void heartbeat() { Debug.Assert(_state == StateActive); if(!_endpoint.datagram()) { IceInternal.BasicStream os = new IceInternal.BasicStream(_instance, Util.currentProtocolEncoding); os.writeBlob(IceInternal.Protocol.magic); Ice.Util.currentProtocol.write__(os); Ice.Util.currentProtocolEncoding.write__(os); os.writeByte(IceInternal.Protocol.validateConnectionMsg); os.writeByte((byte)0); os.writeInt(IceInternal.Protocol.headerSize); // Message size. try { OutgoingMessage message = new OutgoingMessage(os, false, false); sendMessage(message); } catch(Ice.LocalException ex) { setState(StateClosed, ex); Debug.Assert(_exception != null); } } }
public void finishBatchRequest(IceInternal.BasicStream os, bool compress) { try { _m.Lock(); try { // // Get the batch stream back. // _batchStream.swap(os); if(_exception != null) { throw _exception; } bool flush = false; if(_batchAutoFlush) { // // Throw memory limit exception if the first message added causes us to // go over limit. Otherwise put aside the marshalled message that caused // limit to be exceeded and rollback stream to the marker. // try { _transceiver.checkSendSize(_batchStream.getBuffer(), _instance.messageSizeMax()); } catch(LocalException) { if(_batchRequestNum > 0) { flush = true; } else { throw; } } } if(flush) { // // Temporarily save the last request. // int requestSize = _batchStream.size() - _batchMarker; byte[] lastRequest = new byte[requestSize]; Buffer.BlockCopy(_batchStream.getBuffer().b.rawBytes(), _batchMarker, lastRequest, 0, requestSize); _batchStream.resize(_batchMarker, false); // // Send the batch stream without the last request. // try { // // Fill in the number of requests in the batch. // _batchStream.pos(IceInternal.Protocol.headerSize); _batchStream.writeInt(_batchRequestNum); OutgoingMessage message = new OutgoingMessage(_batchStream, _batchRequestCompress, true); sendMessage(message); } catch(LocalException ex) { setState(StateClosed, ex); Debug.Assert(_exception != null); throw _exception; } // // Reset the batch stream. // _batchStream = new IceInternal.BasicStream(_instance, Util.currentProtocolEncoding, _batchAutoFlush); _batchRequestNum = 0; _batchRequestCompress = false; _batchMarker = 0; // // Check again if the last request doesn't exceed the maximum message size. // if(IceInternal.Protocol.requestBatchHdr.Length + lastRequest.Length > _instance.messageSizeMax()) { IceInternal.Ex.throwMemoryLimitException( IceInternal.Protocol.requestBatchHdr.Length + lastRequest.Length, _instance.messageSizeMax()); } // // Start a new batch with the last message that caused us to go over the limit. // _batchStream.writeBlob(IceInternal.Protocol.requestBatchHdr); _batchStream.writeBlob(lastRequest); } // // Increment the number of requests in the batch. // ++_batchRequestNum; // // We compress the whole batch if there is at least one compressed // message. // if(compress) { _batchRequestCompress = true; } // // Notify about the batch stream not being in use anymore. // Debug.Assert(_batchStreamInUse); _batchStreamInUse = false; _m.NotifyAll(); } finally { _m.Unlock(); } } catch(LocalException) { abortBatchRequest(); throw; } }
private void initiateShutdown() { Debug.Assert(_state == StateClosing); Debug.Assert(_dispatchCount == 0); if(_shutdownInitiated) { return; } _shutdownInitiated = true; if(!_endpoint.datagram()) { // // Before we shut down, we send a close connection message. // IceInternal.BasicStream os = new IceInternal.BasicStream(_instance, Util.currentProtocolEncoding); os.writeBlob(IceInternal.Protocol.magic); Ice.Util.currentProtocol.write__(os); Ice.Util.currentProtocolEncoding.write__(os); os.writeByte(IceInternal.Protocol.closeConnectionMsg); os.writeByte(_compressionSupported ? (byte)1 : (byte)0); os.writeInt(IceInternal.Protocol.headerSize); // Message size. if(sendMessage(new OutgoingMessage(os, false, false))) { setState(StateClosingPending); // // Notify the the transceiver of the graceful connection closure. // int op = _transceiver.closing(true, _exception); if(op != 0) { scheduleTimeout(op); _threadPool.register(this, op); } } } }
private void initiateShutdown() { Debug.Assert(_state == StateClosing); Debug.Assert(_dispatchCount == 0); Debug.Assert(!_shutdownInitiated); _shutdownInitiated = true; if(!_endpoint.datagram()) { // // Before we shut down, we send a close connection // message. // IceInternal.BasicStream os = new IceInternal.BasicStream(_instance, Util.currentProtocolEncoding); os.writeBlob(IceInternal.Protocol.magic); Ice.Util.currentProtocol.write__(os); Ice.Util.currentProtocolEncoding.write__(os); os.writeByte(IceInternal.Protocol.closeConnectionMsg); os.writeByte(_compressionSupported ? (byte)1 : (byte)0); os.writeInt(IceInternal.Protocol.headerSize); // Message size. if(sendMessage(new OutgoingMessage(os, false, false))) { // // Schedule the close timeout to wait for the peer to close the connection. If // the message was queued for sending, sendNextMessage will schedule the timeout // once all messages were sent. // scheduleTimeout(IceInternal.SocketOperation.Write, closeTimeout()); } // // The CloseConnection message should be sufficient. Closing the write // end of the socket is probably an artifact of how things were done // in IIOP. In fact, shutting down the write end of the socket causes // problems on Windows by preventing the peer from using the socket. // For example, the peer is no longer able to continue writing a large // message after the socket is shutdown. // //_transceiver.shutdownWrite(); } }