public void Process(HttpContext context, IResponseArgs e) { CacheResult r = Process(e); context.Items["FinalCachedFile"] = r.Path; if (r.Data == null) { var keyBasis = e.RequestKey; // Path to the file in the blob container string path = new UrlHasher().Hash(keyBasis) + '.' + e.SuggestedExtension; var container = GetBlobContainer(); using (var stream = container.GetBlockBlobReference(path).OpenRead()) { HandleResponseStream(context, e, stream); } } else { HandleResponseStream(context, e, (MemoryStream)r.Data); } }
/// <summary> /// May return either a physical file name or a MemoryStream with the data. /// Faster than GetCachedFile, as writes are (usually) asynchronous. If the write queue is full, the write is forced to be synchronous again. /// Identical to GetCachedFile() when asynchronous=false /// </summary> /// <param name="keyBasis"></param> /// <param name="extension"></param> /// <param name="writeCallback"></param> /// <param name="timeoutMs"></param> /// <returns></returns> public CacheResult GetCachedFile(string keyBasis, string extension, ResizeImageDelegate writeCallback, int timeoutMs, bool asynchronous) { Stopwatch sw = null; if (lp.Logger != null) { sw = new Stopwatch(); sw.Start(); } // Path to the file in the blob container string path = new UrlHasher().Hash(keyBasis) + '.' + extension; CacheResult result = new CacheResult(CacheQueryResult.Hit, path); bool asyncFailed = false; //2013-apr-25: What happens if the file is still being written to blob storage - it's present but not complete? To handle that, we use mayBeLocked. bool mayBeLocked = Locks.MayBeLocked(path.ToUpperInvariant()); // On the first check, verify the file exists by connecting to the blob directly if (!asynchronous) { //May throw an IOException if the file cannot be opened, and is locked by an external processes for longer than timeoutMs. //This method may take longer than timeoutMs under absolute worst conditions. if (!TryWriteFile(result, path, writeCallback, timeoutMs, !mayBeLocked)) { //On failure result.Result = CacheQueryResult.Failed; } } else if (!Index.PathExistInIndex(path) || mayBeLocked) { //Looks like a miss. Let's enter a lock for the creation of the file. This is a different locking system than for writing to the file - far less contention, as it doesn't include the //This prevents two identical requests from duplicating efforts. Different requests don't lock. //Lock execution using relativePath as the sync basis. Ignore casing differences. This prevents duplicate entries in the write queue and wasted CPU/RAM usage. if (!QueueLocks.TryExecute(path.ToUpperInvariant(), timeoutMs, delegate() { //Now, if the item we seek is in the queue, we have a memcached hit. If not, we should check the index. It's possible the item has been written to disk already. //If both are a miss, we should see if there is enough room in the write queue. If not, switch to in-thread writing. AsyncWrite t = CurrentWrites.Get(path); if (t != null) { result.Data = t.GetReadonlyStream(); } //On the second check, use cached data for speed. The cached data should be updated if another thread updated a file (but not if another process did). //When t == null, and we're inside QueueLocks, all work on the file must be finished, so we have no need to consult mayBeLocked. if (t == null && !Index.PathExistInIndex(path)) { result.Result = CacheQueryResult.Miss; //Still a miss, we even rechecked the filesystem. Write to memory. MemoryStream ms = new MemoryStream(4096); //4K initial capacity is minimal, but this array will get copied around alot, better to underestimate. //Read, resize, process, and encode the image. Lots of exceptions thrown here. writeCallback(ms); ms.Position = 0; AsyncWrite w = new AsyncWrite(CurrentWrites, ms, path); if (CurrentWrites.Queue(w, delegate(AsyncWrite job) { try { Stopwatch swio = new Stopwatch(); swio.Start(); //TODO: perhaps a different timeout? if (!TryWriteFile(null, job.Path, delegate(Stream s) { ((MemoryStream)job.GetReadonlyStream()).WriteTo(s); }, timeoutMs, true)) { swio.Stop(); //We failed to lock the file. if (lp.Logger != null) { lp.Logger.Warn("Failed to flush async write, timeout exceeded after {1}ms - {0}", result.Path, swio.ElapsedMilliseconds); } } else { swio.Stop(); if (lp.Logger != null) { lp.Logger.Trace("{0}ms: Async write started {1}ms after enqueue for {2}", swio.ElapsedMilliseconds.ToString().PadLeft(4), DateTime.UtcNow.Subtract(w.JobCreatedAt).Subtract(swio.Elapsed).TotalMilliseconds, result.Path); } } } catch (Exception ex) { if (lp.Logger != null) { lp.Logger.Error("Failed to flush async write, {0} {1}\n{2}", ex.ToString(), result.Path, ex.StackTrace); } } finally { CurrentWrites.Remove(job); //Remove from the queue, it's done or failed. } })) { //We queued it! Send back a read-only memory stream result.Data = w.GetReadonlyStream(); } else { asyncFailed = false; //We failed to queue it - either the ThreadPool was exhausted or we exceeded the MB limit for the write queue. //Write the MemoryStream to disk using the normal method. //This is nested inside a queuelock because if we failed here, the next one will also. Better to force it to wait until the file is written to blob storage. if (!TryWriteFile(result, path, delegate(Stream s) { ms.WriteTo(s); }, timeoutMs, false)) { if (lp.Logger != null) { lp.Logger.Warn("Failed to queue async write, also failed to lock for sync writing: {0}", result.Path); } } } } })) { //On failure result.Result = CacheQueryResult.Failed; } } if (lp.Logger != null) { sw.Stop(); lp.Logger.Trace("{0}ms: {3}{1} for {2}, Key: {4}", sw.ElapsedMilliseconds.ToString(NumberFormatInfo.InvariantInfo).PadLeft(4), result.Result.ToString(), result.Path, asynchronous ? (asyncFailed ? "Fallback to sync " : "Async ") : "", keyBasis); } //Fire event if (CacheResultReturned != null) { CacheResultReturned(this, result); } return(result); }