/// <summary> /// Appends a block to the end of the underlying BlockArray. /// </summary> /// <param name="block">The block to append.</param> /// <remarks> /// <para> /// The underyling block array's SetExactSize() method will be /// called before appending the block. The stream position will /// be set to the end of the stream before the method returns. /// </para> /// <para> /// This method is a performance improvement over writing the /// a buffer to the stream via one of the write methods. /// </para> /// </remarks> public void Append(Block block) { blocks.SetExactSize(length); blocks.Append(block); length += block.Length; pos = length; }
/// <summary> /// Handles the parsing of received data. /// </summary> /// <param name="data">The received data.</param> /// <param name="cb">Number of bytes received.</param> /// <returns><c>true</c> if the data has been completely parsed.</returns> /// <remarks> /// <note> /// The data buffer passed MUST NOT be reused. Ownership /// will be taken over by this instance. /// </note> /// </remarks> /// <exception cref="HttpBadProtocolException">Badly formatted HTTP message.</exception> public bool Parse(byte[] data, int cb) { if (isChunked) { // We'll get a zero length block when the other side of the // socket is closed. This should never happen for chunked // transfers. if (cb == 0) { throw new HttpBadProtocolException("Client closed socket."); } return(ChunkParser(new BlockArray(new Block(data, 0, cb)), 0)); } if (cb == 0) { // We'll get a zero length block when the other side of the // socket is closed. This is a protocol error for requests. // For responses this indicates the end of the content data. if (isRequest) { throw new HttpBadProtocolException("Client closed socket."); } if (cbContent != -1 && cbContent != content.Size) { // We have a protocol error if there was a Content-Length // header and its value doesn't match the content actually // gathered. throw new HttpBadProtocolException("Content-Length mismatch."); } return(true); } content.Append(new Block(data, 0, cb)); if (cbContent != -1 && content.Size > cbContent) { throw new HttpBadProtocolException(); // We got more than Content-Length data } if (cbContentMax != -1 && content != null && content.Size >= cbContentMax) { throw new HttpContentSizeException(); } return(content.Size == cbContent); }
public void AddFromBlockArray() { BlockArray ba1; BlockArray ba2; ba1 = new BlockArray(); ba1.Append(new byte[] { 0, 1, 2, 3, 4 }); ba2 = new BlockArray(); ba2.Append(new byte[] { 5, 6, 7, 8, 9 }); ba2.Append(new byte[] { 10, 11, 12, 13, 14 }); ba2.Append(new byte[] { 15, 16, 17, 18, 19 }); ba2.Append(new byte[] { 20, 21, 22, 23, 24 }); ba1.Append(ba2, 2, 2); Assert.Equal(new byte[] { 0, 1, 2, 3, 4, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 }, ba1.ToByteArray()); ba1.SetExactSize(5); Assert.Equal(new byte[] { 0, 1, 2, 3, 4 }, ba1.ToByteArray()); ba1.Append(ba2); Assert.Equal(new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 }, ba1.ToByteArray()); }
public void Indexing() { BlockArray blocks; blocks = new BlockArray(512, 512); blocks[0] = 55; blocks[1] = 56; blocks[511] = 57; Assert.Equal(55, blocks[0]); Assert.Equal(56, blocks[1]); Assert.Equal(57, blocks[511]); for (int i = 0; i < 512; i++) { blocks[i] = (byte)i; } for (int i = 0; i < 512; i++) { Assert.Equal((byte)i, blocks[i]); } for (int i = 511; i <= 0; i--) { Assert.Equal((byte)i, blocks[i]); } blocks = new BlockArray(); blocks.Append(new Block(10)); blocks.Append(new Block(10)); blocks.Append(new Block(10)); blocks[0] = 0; blocks[10] = 10; blocks[20] = 20; Assert.Equal(0, blocks[0]); Assert.Equal(10, blocks[10]); Assert.Equal(20, blocks[20]); blocks = new BlockArray(); blocks.Append(new Block(10)); blocks.Append(new Block(10)); blocks.Append(new Block(10)); Assert.Equal(30, blocks.Size); for (int i = 0; i < 30; i++) { blocks[i] = (byte)i; } Assert.Equal(0, blocks[0]); for (int i = 0; i < 30; i++) { Assert.Equal((byte)i, blocks[i]); } for (int i = 29; i >= 0; i--) { Assert.Equal((byte)i, blocks[i]); } for (int i = 29; i >= 0; i--) { blocks[i] = (byte)(i + 10); } for (int i = 0; i < 30; i++) { Assert.Equal((byte)(i + 10), blocks[i]); } for (int i = 29; i >= 0; i--) { Assert.Equal((byte)(i + 10), blocks[i]); } blocks = new BlockArray(1000, 1); for (int i = 0; i < 1000; i++) { blocks[i] = (byte)i; } for (int i = 0; i < 1000; i++) { Assert.Equal((byte)i, blocks[i]); } for (int i = 999; i >= 0; i--) { Assert.Equal((byte)i, blocks[i]); } for (int i = 0; i < 1000; i++) { Assert.Equal((byte)i, blocks[i]); Assert.Equal((byte)(999 - i), blocks[999 - i]); } //------------------------- blocks = new BlockArray(new Block(new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, 5, 5)); Assert.Equal(5, blocks[0]); }
/// <summary> /// Parses the data block passed as chunked data. /// </summary> /// <param name="received">The request/response data received so far.</param> /// <param name="pos">Position of the first byte of content data after the headers.</param> /// <returns><c>true</c> if the data has been completely parsed.</returns> /// <exception cref="HttpBadProtocolException">Badly formatted HTTP message.</exception> /// <exception cref="HttpContentSizeException">The content size exceeds the maximum allowed.</exception> private bool ChunkParser(BlockArray received, int pos) { int cbRecv = received.Size; // $hack(jeff.lill): // // The string appending is probably not the most efficient way to // implement this but it's a lot easier and probably less bug prone. // Most chunked transfer text lines will be very small anyway in // real life so this isn't likely to be a big performance problem. while (pos < cbRecv) { switch (chunkState) { case ChunkState.Start: content = new BlockArray(); sizeLine = string.Empty; chunkState = ChunkState.Size; break; case ChunkState.Size: // Append characters until the string is terminated with CRLF. sizeLine += (char)received[pos++]; if (sizeLine.EndsWith(Helper.CRLF)) { cbChunk = -1; for (int i = 0; i < sizeLine.Length; i++) { char ch = sizeLine[i]; int digit; if ('0' <= ch && ch <= '9') { digit = ch - '0'; } else if ('a' <= ch && ch <= 'f') { digit = ch - 'a' + 10; } else if ('A' <= ch && ch <= 'F') { digit = ch - 'A' + 10; } else { break; } if (cbChunk == -1) { cbChunk = 0; } else { cbChunk <<= 4; } cbChunk += digit; } if (cbChunk == -1) { throw new HttpBadProtocolException("Invalid chunked transfer size line."); } if (cbChunk == 0) { chunkState = ChunkState.Footers; } else { cbChunkRead = 0; chunkState = ChunkState.Data; } } else if (sizeLine.Length > HttpHeaderCollection.MaxHeaderChars) { throw new HttpBadProtocolException("Chunked transfer size line is too long."); } break; case ChunkState.Data: int cbRemain = received.Size - pos; int cb; cb = cbChunk - cbChunkRead; if (cb > cbRemain) { cb = cbRemain; } content.Append(received.Extract(pos, cb)); pos += cb; cbChunkRead += cb; if (cbChunk == cbChunkRead) { chunkState = ChunkState.DataCR; } break; case ChunkState.DataCR: if (received[pos++] != Helper.CR) { throw new HttpBadProtocolException("CRLF expected after chunk data."); } chunkState = ChunkState.DataLF; break; case ChunkState.DataLF: if (received[pos++] != Helper.LF) { throw new HttpBadProtocolException("CRLF expected after chunk data."); } chunkState = ChunkState.Size; sizeLine = string.Empty; break; case ChunkState.Footers: // $todo(jeff.lill): // // I'm not going to worry about parsing chunked transfer // footers right now. All I'm going to do is continue // accumulating characters into sizeLine until I // see a CRLF CRLF sequence terminating the footers. sizeLine += (char)received[pos++]; if (sizeLine.EndsWith(Helper.CRLF)) { return(true); } break; } } if (cbContentMax != -1 && content != null && content.Size >= cbContentMax) { throw new HttpContentSizeException(); } return(false); }
/// <summary> /// Adds the data passed to the information to be parsed as /// headers. /// </summary> /// <param name="data">The received data.</param> /// <param name="cb">Number of bytes received.</param> /// <returns><c>true</c> if a complete set of headers has been received.</returns> /// <remarks> /// <note> /// Ownership of the data buffer is passed to /// this instance. The code receiving data from the network /// MUST allocate a new buffer and not reuse this one. /// </note> /// </remarks> public bool Parse(byte[] data, int cb) { if (blocks == null) { throw new InvalidOperationException("Parsing not begun."); } if (cb <= 0) { return(false); } // Here's the deal: I'm looking for a 4 byte CR-LF-CR-LF sequence // to indicate the end of the HTTP headers. I'm going to append // data received to the block array and then begin scanning for // this sequence. If the new block begins with a CR or LF character // then I'll begin the scan 3 logical bytes before the new data // to catch CR-LF-CR-LF sequences that span blocks. int pos = blocks.Size; blocks.Append(new Block(data, 0, cb)); if (data[0] == Helper.CR || data[0] == Helper.LF) { // I have to worry about the possibility that the CR-LF-CR-LF // sequence spans blocks. pos -= 3; if (pos < 0) { pos = 0; } while (pos < blocks.Size - 3) { // $todo(jeff.lill): // // This lookup won't perform that well due to // how BlockArray caches the last referenced // index. Since it won't be that common for // the header termination to cross data blocks // I'm not going to worry about this too much // right now. if (blocks[pos + 0] == Helper.CR && blocks[pos + 1] == Helper.LF && blocks[pos + 2] == Helper.CR && blocks[pos + 3] == Helper.LF) { dataPos = pos + 4; return(true); } pos++; } } else { // For better performance, I can simply scan the data block // for the header termination. for (pos = 0; pos < cb - 3; pos++) { if (data[pos + 0] == Helper.CR && data[pos + 1] == Helper.LF && data[pos + 2] == Helper.CR && data[pos + 3] == Helper.LF) { dataPos = blocks.Size - cb + pos + 4; return(true); } } } return(false); }