public static string NoTrimReadTo(Stream stream, byte blocker1, byte blocker2) { byte[] readBuf = VariableSizedBufferPool.Get(1024, true); try { int bufpos = 0; int ch = stream.ReadByte(); while (ch != blocker1 && ch != blocker2 && ch != -1) { if (ch > 0x7f) //replaces asciitostring { ch = '?'; } //make buffer larger if too short if (readBuf.Length <= bufpos) { VariableSizedBufferPool.Resize(ref readBuf, readBuf.Length * 2, true); } if (bufpos > 0 || !char.IsWhiteSpace((char)ch)) //trimstart { readBuf[bufpos++] = (byte)ch; } ch = stream.ReadByte(); } return(System.Text.Encoding.UTF8.GetString(readBuf, 0, bufpos)); } finally { VariableSizedBufferPool.Release(readBuf); } }
protected void FlushRemainingFragmentBuffer() { if (fragmentBuffer != null) { VariableSizedBufferPool.Resize(ref fragmentBuffer, fragmentBufferDataLength, false); AddStreamedFragment(fragmentBuffer); fragmentBuffer = null; fragmentBufferDataLength = 0; } #if !BESTHTTP_DISABLE_CACHING if (cacheStream != null) { cacheStream.Dispose(); cacheStream = null; HTTPCacheService.SetBodyLength(baseRequest.CurrentUri, allFragmentSize); } #endif var tmp = fragmentWaitEvent; fragmentWaitEvent = null; if (tmp != null) { (tmp as IDisposable).Dispose(); } }
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.6.1 protected void ReadChunked(Stream stream) { BeginReceiveStreamFragments(); string contentLengthHeader = GetFirstHeaderValue("Content-Length"); bool hasContentLengthHeader = !string.IsNullOrEmpty(contentLengthHeader); int realLength = 0; if (hasContentLengthHeader) { hasContentLengthHeader = int.TryParse(contentLengthHeader, out realLength); } if (HTTPManager.Logger.Level == Logger.Loglevels.All) { VerboseLogging(string.Format("ReadChunked - hasContentLengthHeader: {0}, contentLengthHeader: {1} realLength: {2:N0}", hasContentLengthHeader.ToString(), contentLengthHeader, realLength)); } using (var output = new BufferPoolMemoryStream()) { int chunkLength = ReadChunkLength(stream); if (HTTPManager.Logger.Level == Logger.Loglevels.All) { VerboseLogging(string.Format("chunkLength: {0:N0}", chunkLength)); } byte[] buffer = VariableSizedBufferPool.Get(Mathf.NextPowerOfTwo(chunkLength), true); int contentLength = 0; // Progress report: baseRequest.DownloadLength = hasContentLengthHeader ? realLength : chunkLength; baseRequest.DownloadProgressChanged = this.IsSuccess #if !BESTHTTP_DISABLE_CACHING || this.IsFromCache #endif ; string encoding = #if !BESTHTTP_DISABLE_CACHING IsFromCache ? null : #endif GetFirstHeaderValue("content-encoding"); bool gzipped = !string.IsNullOrEmpty(encoding) && encoding == "gzip"; while (chunkLength != 0) { // To avoid more GC garbage we use only one buffer, and resize only if the next chunk doesn't fit. if (buffer.Length < chunkLength) { VariableSizedBufferPool.Resize(ref buffer, chunkLength, true); } int readBytes = 0; // Fill up the buffer do { int bytes = stream.Read(buffer, readBytes, chunkLength - readBytes); if (bytes <= 0) { throw ExceptionHelper.ServerClosedTCPStream(); } readBytes += bytes; // Progress report: // Placing reporting inside this cycle will report progress much more frequent baseRequest.Downloaded += bytes; baseRequest.DownloadProgressChanged = this.IsSuccess #if !BESTHTTP_DISABLE_CACHING || this.IsFromCache #endif ; } while (readBytes < chunkLength); if (baseRequest.UseStreaming) { if (gzipped) { var decompressed = Decompress(buffer, 0, readBytes); if (decompressed != null) { FeedStreamFragment(decompressed, 0, decompressed.Length); } } else { FeedStreamFragment(buffer, 0, readBytes); } } else { output.Write(buffer, 0, readBytes); } // Every chunk data has a trailing CRLF ReadTo(stream, LF); contentLength += readBytes; // read the next chunk's length chunkLength = ReadChunkLength(stream); if (HTTPManager.Logger.Level == Logger.Loglevels.All) { VerboseLogging(string.Format("chunkLength: {0:N0}", chunkLength)); } if (!hasContentLengthHeader) { baseRequest.DownloadLength += chunkLength; } baseRequest.DownloadProgressChanged = this.IsSuccess #if !BESTHTTP_DISABLE_CACHING || this.IsFromCache #endif ; } VariableSizedBufferPool.Release(buffer); if (baseRequest.UseStreaming) { if (gzipped) { var decompressed = Decompress(null, 0, 0, true); if (decompressed != null) { FeedStreamFragment(decompressed, 0, decompressed.Length); } } FlushRemainingFragmentBuffer(); } // Read the trailing headers or the CRLF ReadHeaders(stream); // HTTP servers sometimes use compression (gzip) or deflate methods to optimize transmission. // How both chunked and gzip encoding interact is dictated by the two-staged encoding of HTTP: // first the content stream is encoded as (Content-Encoding: gzip), after which the resulting byte stream is encoded for transfer using another encoder (Transfer-Encoding: chunked). // This means that in case both compression and chunked encoding are enabled, the chunk encoding itself is not compressed, and the data in each chunk should not be compressed individually. // The remote endpoint can decode the incoming stream by first decoding it with the Transfer-Encoding, followed by the specified Content-Encoding. // It would be a better implementation when the chunk would be decododed on-the-fly. Becouse now the whole stream must be downloaded, and then decoded. It needs more memory. if (!baseRequest.UseStreaming) { this.Data = DecodeStream(output); } } CloseDecompressors(); }