public void finishBatchRequest(BasicStream os) { try { lock(this) { _batchStream.swap(os); if(_batchAutoFlushSize > 0 && (_batchStream.size() > _batchAutoFlushSize)) { // // Temporarily save the last request. // byte[] lastRequest = new byte[_batchStream.size() - _batchMarker]; Buffer buffer = _batchStream.getBuffer(); buffer.b.position(_batchMarker); buffer.b.get(lastRequest); _batchStream.resize(_batchMarker, false); int invokeNum = _batchRequestNum; BasicStream stream = new BasicStream(_reference.getInstance(), Ice.Util.currentProtocolEncoding); stream.swap(_batchStream); _adapter.getThreadPool().dispatch(() => { invokeAll(stream, 0, invokeNum, true); }, null); // // Reset the batch. // _batchRequestNum = 0; _batchMarker = 0; // // Start a new batch with the last message that caused us to go over the limit. // _batchStream.writeBlob(Protocol.requestBatchHdr); _batchStream.writeBlob(lastRequest); } // // Increment the number of requests in the batch. // Debug.Assert(_batchStreamInUse); ++_batchRequestNum; _batchStreamInUse = false; Monitor.PulseAll(this); } } catch(Ice.LocalException ex) { abortBatchRequest(); throw ex; } }