/// <summary> /// Serves the request /// </summary> /// <returns>An awaitable task.</returns> /// <param name="path">The local path to a file to send.</param> /// <param name="mimetype">The mime type to report.</param> /// <param name="context">The request context.</param> protected virtual async Task <bool> ServeRequest(string path, string mimetype, IHttpContext context) { var permissionissue = false; try { string etag = null; string etagkey = ETagCacheSize < 0 ? null : (await m_vfs.GetLastFileWriteTimeUtcAsync(path)).Ticks + path; string[] clientetags = new string[0]; if (etagkey != null) { KeyValuePair <string, long> etagcacheddata; using (await m_etagLock.LockAsync()) m_etagCache.TryGetValue(etagkey, out etagcacheddata); etag = etagcacheddata.Key; var ce = ETAG_RE.Matches(context.Request.Headers["If-None-Match"] ?? string.Empty); if (ce.Count > 0) { clientetags = new string[ce.Count]; for (var i = 0; i < clientetags.Length; i++) { clientetags[i] = ce[i].Groups["etag"].Value; if (etag != null && string.Equals(clientetags[i], etag, StringComparison.OrdinalIgnoreCase)) { return(SetNotModified(context, etag)); } } } } permissionissue = true; using (var fs = await m_vfs.OpenReadAsync(path)) { permissionissue = false; var startoffset = 0L; var bytecount = fs.Length; var endoffset = bytecount - 1; var rangerequest = context.Request.Headers["Range"]; if (!string.IsNullOrWhiteSpace(rangerequest)) { var m = RANGE_MATCHER.Match(rangerequest); if (!m.Success || m.Length != rangerequest.Length) { return(SetInvalidRangeHeader(context, bytecount)); } if (m.Groups["start"].Length != 0) { if (!long.TryParse(m.Groups["start"].Value, out startoffset)) { return(SetInvalidRangeHeader(context, bytecount)); } } if (m.Groups["end"].Length != 0) { if (!long.TryParse(m.Groups["end"].Value, out endoffset)) { return(SetInvalidRangeHeader(context, bytecount)); } } if (m.Groups["start"].Length == 0 && m.Groups["end"].Length == 0) { return(SetInvalidRangeHeader(context, bytecount)); } if (m.Groups["start"].Length == 0 && m.Groups["end"].Length != 0) { startoffset = bytecount - endoffset; endoffset = bytecount - 1; } if (endoffset > bytecount - 1) { endoffset = bytecount - 1; } if (endoffset < startoffset) { return(SetInvalidRangeHeader(context, bytecount)); } } if (etagkey != null && etag == null) { fs.Position = 0; etag = await ComputeETag(fs); if (ETagCacheSize > 0) { using (await m_etagLock.LockAsync()) { m_etagCache[etagkey] = new KeyValuePair <string, long>(etag, DateTime.UtcNow.Ticks); if (m_etagCache.Count > ETagCacheSize) { // Don't repeatedly remove items, // but batch up the removal, // as the sorting takes some time var removecount = Math.Max(1, m_etagCache.Count / 3); foreach (var key in m_etagCache.OrderBy(x => x.Value.Value).Select(x => x.Key).Take(removecount).ToArray()) { m_etagCache.Remove(key); } } } } } if (etag != null && clientetags != null && clientetags.Any(x => string.Equals(x, etag, StringComparison.Ordinal))) { return(SetNotModified(context, etag)); } var lastmodified = await m_vfs.GetLastFileWriteTimeUtcAsync(path); context.Response.ContentType = mimetype; context.Response.StatusCode = HttpStatusCode.OK; context.Response.AddHeader("Last-Modified", lastmodified.ToString("R", CultureInfo.InvariantCulture)); context.Response.AddHeader("Accept-Ranges", "bytes"); // If the VFS or something else handles cache headers, do not overwrite them here if (!context.Response.Headers.ContainsKey("Cache-Control") && !context.Response.Headers.ContainsKey("Expires")) { context.Response.SetExpires(CacheSeconds); } DateTime modifiedsincedate; DateTime.TryParseExact(context.Request.Headers["If-Modified-Since"], CultureInfo.CurrentCulture.DateTimeFormat.RFC1123Pattern, CultureInfo.InvariantCulture, DateTimeStyles.AdjustToUniversal | DateTimeStyles.AssumeUniversal, out modifiedsincedate); if (modifiedsincedate == lastmodified) { return(SetNotModified(context, etag)); } else { if (etag != null) { context.Response.Headers["ETag"] = $"\"{etag}\""; } context.Response.ContentLength = endoffset - startoffset + 1; if (context.Response.ContentLength != bytecount) { context.Response.StatusCode = HttpStatusCode.PartialContent; context.Response.AddHeader("Content-Range", string.Format("bytes {0}-{1}/{2}", startoffset, endoffset, bytecount)); } } await BeforeResponseAsync(context, fs); if (context.Response.StatusCode == HttpStatusCode.NotModified) { return(true); } if (string.Equals(context.Request.Method, "HEAD", StringComparison.Ordinal)) { if (context.Response.ContentLength != 0) { context.Response.KeepAlive = false; await context.Response.FlushHeadersAsync(); } return(true); } fs.Position = startoffset; var remain = context.Response.ContentLength; var buf = new byte[TransferChunkSize]; // Since this is a transfer, we do not honor the processing timeout here //var ct = context.Request.TimeoutCancellationToken; await using (var os = context.Response.GetResponseStream()) { while (remain > 0) { var r = await fs.ReadAsync(buf, 0, (int)Math.Min(buf.Length, remain)); using (var ct = new CancellationTokenSource(ActivityTimeoutSeconds)) await os.WriteAsync(buf, 0, r, ct.Token); remain -= r; } } } } catch (Exception ex) { // Log the error await context.LogMessageAsync(LogLevel.Error, $"Failed to process file: {path}", ex); // If this happens when we try to open the file, report as permission problem if (permissionissue) { throw new HttpException(HttpStatusCode.Forbidden); } // Something else has happened throw new HttpException(HttpStatusCode.InternalServerError); } return(true); }