/// <summary> /// Chunk handler entry point. /// </summary> internal override void handleChunk(Client client, int type, ByteBuffer data, bool isReply, int msgId) { Log.d("ddm-prof", "handling " + ChunkHandler.name(type)); if (type == CHUNK_MPRE) { handleMPRE(client, data); } else if (type == CHUNK_MPSE) { handleMPSE(client, data); } else if (type == CHUNK_MPRQ) { handleMPRQ(client, data); } else if (type == CHUNK_FAIL) { handleFAIL(client, data); } else { handleUnknownChunk(client, type, data, isReply, msgId); } }
/// <summary> /// Send a DDM packet to the client. /// /// Ideally, we can do this with a single channel write. If that doesn't /// happen, we have to prevent anybody else from writing to the channel /// until this packet completes, so we synchronize on the channel. /// /// Another goal is to avoid unnecessary buffer copies, so we write /// directly out of the JdwpPacket's ByteBuffer. /// </summary> //JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET: //ORIGINAL LINE: void sendAndConsume(JdwpPacket packet, ChunkHandler replyHandler) throws java.io.IOException internal virtual void sendAndConsume(JdwpPacket packet, ChunkHandler replyHandler) { if (mChan == null) { // can happen for e.g. THST packets Log.v("ddms", "Not sending packet -- client is closed"); return; } if (replyHandler != null) { /* * Add the ID to the list of outstanding requests. We have to do * this before sending the packet, in case the response comes back * before our thread returns from the packet-send function. */ addRequestId(packet.id, replyHandler); } lock (mChan) { try { packet.writeAndConsume(mChan); } catch (IOException ioe) { removeRequestId(packet.id); throw ioe; } } }
/// <summary> /// Chunk handler entry point. /// </summary> internal override void handleChunk(Client client, int type, ByteBuffer data, bool isReply, int msgId) { Log.d("ddm-nativeheap", "handling " + ChunkHandler.name(type)); if (type == CHUNK_NHGT) { handleNHGT(client, data); } else if (type == CHUNK_NHST) { // start chunk before any NHSG chunk(s) client.clientData.nativeHeapData.clearHeapData(); } else if (type == CHUNK_NHEN) { // end chunk after NHSG chunk(s) client.clientData.nativeHeapData.sealHeapData(); } else if (type == CHUNK_NHSG) { handleNHSG(client, data); } else { handleUnknownChunk(client, type, data, isReply, msgId); } client.update(Client.CHANGE_NATIVE_HEAP_DATA); }
/// <summary> /// Chunk handler entry point. /// </summary> internal override void handleChunk(Client client, int type, ByteBuffer data, bool isReply, int msgId) { Log.d("ddm-thread", "handling " + ChunkHandler.name(type)); if (type == CHUNK_THCR) { handleTHCR(client, data); } else if (type == CHUNK_THDE) { handleTHDE(client, data); } else if (type == CHUNK_THST) { handleTHST(client, data); } else if (type == CHUNK_THNM) { handleTHNM(client, data); } else if (type == CHUNK_STKL) { handleSTKL(client, data); } else { handleUnknownChunk(client, type, data, isReply, msgId); } }
/* * Add the specified ID to the list of request IDs for which we await * a response. */ private void addRequestId(int id, ChunkHandler handler) { lock (mOutstandingReqs) { if (Log.Config.LOGV) { Log.v("ddms", "Adding req 0x" + id.toHexString() + " to set"); } mOutstandingReqs.Add(id, handler); } }
/* * Broadcast an event to all message handlers. */ private void broadcast(int @event, Client client) { Log.d("ddms", "broadcast " + @event + ": " + client); /* * The handler objects appear once in mHandlerMap for each message they * handle. We want to notify them once each, so we convert the HashMap * to a HashSet before we iterate. */ HashSet <ChunkHandler> set; lock (mHandlerMap) { ICollection <ChunkHandler> values = mHandlerMap.Values; set = new HashSet <ChunkHandler>(values); } IEnumerator <ChunkHandler> iter = set.GetEnumerator(); while (iter.MoveNext()) { ChunkHandler handler = iter.Current; switch (@event) { case CLIENT_READY: try { handler.clientReady(client); } catch (IOException) { // Something failed with the client. It should // fall out of the list the next time we try to // do something with it, so we discard the // exception here and assume cleanup will happen // later. May need to propagate farther. The // trouble is that not all values for "event" may // actually throw an exception. Log.w("ddms", "Got exception while broadcasting 'ready'"); return; } break; case CLIENT_DISCONNECTED: handler.clientDisconnected(client); break; default: throw new NotSupportedException(); } } }
/// <summary> /// Chunk handler entry point. /// </summary> internal override void handleChunk(Client client, int type, ByteBuffer data, bool isReply, int msgId) { Log.d("ddm-test", "handling " + ChunkHandler.name(type)); if (type == CHUNK_TEST) { handleTEST(client, data); } else { handleUnknownChunk(client, type, data, isReply, msgId); } }
/// <summary> /// Chunk handler entry point. /// </summary> internal override void handleChunk(Client client, int type, ByteBuffer data, bool isReply, int msgId) { Log.d("ddm-appname", "handling " + ChunkHandler.name(type)); if (type == CHUNK_APNM) { Debug.Assert(!isReply); handleAPNM(client, data); } else { handleUnknownChunk(client, type, data, isReply, msgId); } }
/// <summary> /// Determine whether this is a response to a request we sent earlier. /// If so, return the ChunkHandler responsible. /// </summary> internal virtual ChunkHandler isResponseToUs(int id) { lock (mOutstandingReqs) { ChunkHandler handler = mOutstandingReqs[id]; if (handler != null) { if (Log.Config.LOGV) { Log.v("ddms", "Found 0x" + id.toHexString() + " in request set - " + handler); } return(handler); } } return(null); }
/// <summary> /// Chunk handler entry point. /// </summary> internal override void handleChunk(Client client, int type, ByteBuffer data, bool isReply, int msgId) { Log.d("ddm-hello", "handling " + ChunkHandler.name(type)); if (type == CHUNK_HELO) { Debug.Assert(isReply); handleHELO(client, data); } else if (type == CHUNK_FEAT) { handleFEAT(client, data); } else { handleUnknownChunk(client, type, data, isReply, msgId); } }
/// <summary> /// Register "handler" as the handler for type "type". /// </summary> internal void registerChunkHandler(int type, ChunkHandler handler) { lock (this) { if (mInstance == null) { return; } lock (mHandlerMap) { if (mHandlerMap[type] == null) { mHandlerMap.Add(type, handler); } } } }
/* * Process an incoming DDM packet. If this is a reply to an earlier request, * "handler" will be set to the handler responsible for the original * request. The spec allows a JDWP message to include multiple DDM chunks. */ private void callHandler(Client client, JdwpPacket packet, ChunkHandler handler) { // on first DDM packet received, broadcast a "ready" message if (!client.ddmSeen()) { broadcast(CLIENT_READY, client); } ByteBuffer buf = packet.payload; int type, length; bool reply = true; type = buf.getInt(); length = buf.getInt(); if (handler == null) { // not a reply, figure out who wants it lock (mHandlerMap) { handler = mHandlerMap[type]; reply = false; } } if (handler == null) { Log.w("ddms", "Received unsupported chunk type " + ChunkHandler.name(type) + " (len=" + length + ")"); } else { Log.d("ddms", "Calling handler for " + ChunkHandler.name(type) + " [" + handler + "] (len=" + length + ")"); ByteBuffer ibuf = buf.slice(); ByteBuffer roBuf = ibuf.asReadOnlyBuffer(); // enforce R/O roBuf.order = ChunkHandler.CHUNK_ORDER; // do the handling of the chunk synchronized on the client list // to be sure there's no concurrency issue when we look for HOME // in hasApp() lock (mClientList) { handler.handleChunk(client, type, roBuf, reply, packet.id); } } }
/// <summary> /// Chunk handler entry point. /// </summary> internal override void handleChunk(Client client, int type, ByteBuffer data, bool isReply, int msgId) { Log.d("ddm-heap", "handling " + ChunkHandler.name(type)); if (type == CHUNK_HPIF) { handleHPIF(client, data); } else if (type == CHUNK_HPST) { handleHPST(client, data); } else if (type == CHUNK_HPEN) { handleHPEN(client, data); } else if (type == CHUNK_HPSG) { handleHPSG(client, data); } else if (type == CHUNK_HPDU) { handleHPDU(client, data); } else if (type == CHUNK_HPDS) { handleHPDS(client, data); } else if (type == CHUNK_REAQ) { handleREAQ(client, data); } else if (type == CHUNK_REAL) { handleREAL(client, data); } else { handleUnknownChunk(client, type, data, isReply, msgId); } }
/// <summary> /// Register "handler" as the handler for type "type". /// </summary> internal void registerChunkHandler(int type, ChunkHandler handler) { lock (this) { if(mInstance == null) { return; } lock (mHandlerMap) { if(mHandlerMap[type] == null) { mHandlerMap.Add(type, handler); } } } }
/* * Something happened. Figure out what. */ private void processClientActivity(SelectionKey key) { Client client = (Client)key.attachment(); try { if (key.readable == false || key.valid == false) { Log.d("ddms", "Invalid key from " + client + ". Dropping client."); dropClient(client, true); // notify return; } client.read(); /* * See if we have a full packet in the buffer. It's possible we have * more than one packet, so we have to loop. */ JdwpPacket packet = client.jdwpPacket; while (packet != null) { if (packet.ddmPacket) { // unsolicited DDM request - hand it off Debug.Assert(!packet.reply); callHandler(client, packet, null); packet.consume(); } else if (packet.reply && client.isResponseToUs(packet.id) != null) { // reply to earlier DDM request ChunkHandler handler = client.isResponseToUs(packet.id); if (packet.error) { client.packetFailed(packet); } else if (packet.empty) { Log.d("ddms", "Got empty reply for 0x" + packet.id.toHexString() + " from " + client); } else { callHandler(client, packet, handler); } packet.consume(); client.removeRequestId(packet.id); } else { Log.v("ddms", "Forwarding client " + (packet.reply ? "reply" : "event") + " 0x" + packet.id.toHexString() + " to " + client.debugger); client.forwardPacketToDebugger(packet); } // find next packet = client.jdwpPacket; } } catch (CancelledKeyException) { // key was canceled probably due to a disconnected client before we could // read stuff coming from the client, so we drop it. dropClient(client, true); // notify } catch (IOException) { // something closed down, no need to print anything. The client is simply dropped. dropClient(client, true); // notify } catch (Exception ex) { Log.e("ddms", ex); /* close the client; automatically un-registers from selector */ dropClient(client, true); // notify if (ex is OverflowException) { Log.w("ddms", "Client data packet exceeded maximum buffer size " + client); } else { // don't know what this is, display it Log.e("ddms", ex); } } }