internal HttpResponseDataChunk(HttpResponseHeader header) : base(header.Headers, header.Version) { ResponseCode = header.ResponseCode; Message = header.Message; HeadResponse = header.HeadRequest; ConnectResponse = header.ConnectRequest; }
private HttpParserConfig CreateConfig(HttpResponseHeader response, HttpRequestHeader request) { HttpParserConfig config = new HttpParserConfig(); HttpLayerConfigEntry entry = _layer._config.GetEntry(request, response); config.ConvertToChunked = entry.ConvertToChunked; config.StreamBody = entry.ResponseStreamBody; config.StreamChunkSize = _layer._config.ResponseStreamChunkSize; config.StrictParsing = _layer._config.ResponseStrictParsing; if (_layer._config.BufferedResponseMaxLength != 0 && (response.ContentLength > _layer._config.BufferedResponseMaxLength)) { config.StreamBody = true; } return config; }
public override DataFrame Read() { DataFrame frame = null; try { if (_chunks == null || !_chunks.MoveNext()) { if (_isTransparent) { _chunks = BaseHttpDataAdapter.ReadFrames(null, _reader).GetEnumerator(); } else { _currentHeader = HttpParser.ReadResponseHeader(_reader, _layer._config.ResponseStrictParsing, _logger); HttpRequestHeader request = null; lock (_requests) { if (_requests.Count > 0) { // If we have a queued request then dequeue and set head response request = _requests.Dequeue(); } } lock (_requestStream) { if (request == null) { try { _requestStream.Position = 0; request = HttpParser.ReadRequestHeader(new DataReader(_requestStream), false, _logger); } catch (EndOfStreamException) { // Ignore end of stream, might just mean we sent garbage to the server which we can't parse } } _requestStream.SetLength(0); } if (request != null) { _currentHeader.SetHeadRequest(request.IsHead); _currentHeader.SetConnectRequest(request.IsConnect); if (_currentHeader.Is100Continue) { // If a 100 status response then requeue the request lock (_requests) { HttpRequestHeader[] headers = _requests.ToArray(); _requests.Clear(); // Unlikely that another request will come as client is probably waiting for 100 status, but might as well be sure _requests.Enqueue(request); foreach (HttpRequestHeader head in headers) { _requests.Enqueue(head); } } } } if (_currentHeader.IsUpgradeResponse) { _layer._upgrading = true; _isTransparent = true; } _chunks = _currentHeader.ReadFrames(CreateConfig(_currentHeader, request)).GetEnumerator(); } if (!_chunks.MoveNext()) { throw new EndOfStreamException(); } } frame = _chunks.Current; } catch (EndOfStreamException) { frame = null; } return frame; }
/// <summary> /// /// </summary> /// <param name="request"></param> /// <param name="response"></param> /// <returns></returns> public bool IsMatch(HttpRequestHeader request, HttpResponseHeader response) { if (IsMatch(request)) { foreach (KeyDataPair<string> pair in response.Headers) { if(pair.Name.Equals("Content-Type", StringComparison.OrdinalIgnoreCase)) { return ContentTypeMatch.IsMatch(pair.Value); } } } return false; }
/// <summary> /// /// </summary> /// <param name="request"></param> /// <param name="response"></param> /// <returns></returns> public HttpLayerConfigEntry GetEntry(HttpRequestHeader request, HttpResponseHeader response) { return GetEntry(e => e.IsMatch(request, response)); }