/// <summary> /// Caches a given IRP on the way down /// </summary> /// <param name="irp"></param> /// <returns></returns> public PreReadReturnCode CacheIRP(IRP irp) { // lookup if IRP is already cached Dictionary<string, Dictionary<uint, FileCacheElement>> f = null; Dictionary<uint, FileCacheElement> b = null; FileCacheElement ce = null; bool hit = false; bool anyMisses = false; FlowSLA sla = null; ulong savedFileOffset = irp.FileOffset; uint savedDataLength = irp.DataLength; uint dataOffset = irp.DataOffset; ulong fileOffset = savedFileOffset; bool canSatisfyRequest = false; Debug.Assert(irp.IoFlowHeader.MajorFunction == MajorFunction.IRP_MJ_READ); if (noisyOutput) Console.WriteLine("CacheIRP {0}: Attempting to lock cache on Offset={1} Length={2}", Thread.CurrentThread.ManagedThreadId, fileOffset, ALIGNED_BLOCK_SIZE); Monitor.Enter(cacheLock); sla = FlowStats[irp.FlowId]; Debug.Assert(sla != null); //Only deal with explicitly declared flows //Do we have enough cache space available to satisfy this request? if (sla.FlowCacheSize > irp.DataLength) { canSatisfyRequest = true; } if (noisyOutput) Console.WriteLine("CacheIRP {0}: Locked cache on Offset={1} Length={2}", Thread.CurrentThread.ManagedThreadId, fileOffset, ALIGNED_BLOCK_SIZE); if (canSatisfyRequest) { // iterate over all blocks // it's a hit if all blocks are a hit, otherwise its a miss for the entire IRP do { uint blockid = (uint)(fileOffset / ALIGNED_BLOCK_SIZE); hit = false; //block isn't a hit yet { if (Cache.TryGetValue(irp.FlowId, out f)) { if (f.TryGetValue(irp.IoFlow.FileName, out b)) { if (b.TryGetValue(blockid, out ce)) { // cache entry exists if (noisyOutput) Console.WriteLine("CacheIRP {0}: Attempting to lock ce on Offset={1} Length={2}", Thread.CurrentThread.ManagedThreadId, fileOffset, ALIGNED_BLOCK_SIZE); lock (ce.LockObj) { if (noisyOutput) Console.WriteLine("CacheIRP {0}: Locked ce on Offset={1} Length={2}", Thread.CurrentThread.ManagedThreadId, fileOffset, ALIGNED_BLOCK_SIZE); if (ce.Data != null) { // real hit ; cache entry has data being read sla = FlowStats[irp.FlowId]; Debug.Assert(sla != null); //We should always have a valid sla entry if we have a hit hit = true; byte[] irpData = irp.GetDataReadWrite(); Debug.Assert(ce.DataLength == ALIGNED_BLOCK_SIZE); Buffer.BlockCopy(ce.Data, 0, irpData, (int)dataOffset, (int)ALIGNED_BLOCK_SIZE); Debug.Assert(ce.nodeInList != null); Debug.Assert(ce.nodeInList != null); sla.cacheEntries.Remove(ce.nodeInList); //Assumes no duplicate ce's in the list, which should be true... //ce.UpdateNodeList(sla.cacheEntries.AddLast(ce)); sla.cacheEntries.AddLast(ce.nodeInList); if (sla.FlowSLAHasGhostCache()) { sla.GhostCache.CacheReadReference(ce.fileName + Convert.ToString(blockid)); //Forward the reference to the ghost cache } } else { // cache entry exists, BUT data is still in-flight from storage medium hit = false; } } if (noisyOutput) Console.WriteLine("CacheIRP {0}: UnLocked ce on Offset={1} Length={2}", Thread.CurrentThread.ManagedThreadId, fileOffset, ALIGNED_BLOCK_SIZE); } } } if (!hit) { // evict first Evict(irp.FlowId); // then insert if (f == null) { Cache[irp.FlowId] = new Dictionary<string, Dictionary<uint, FileCacheElement>>(); f = Cache[irp.FlowId]; } if (b == null) { f[irp.IoFlow.FileName] = new Dictionary<uint, FileCacheElement>(); b = f[irp.IoFlow.FileName]; } if (ce == null) { //b[blockid] = new FileCacheElement(irp.IoFlow, irp.IoFlowRuntime.getDriveLetterFileName(irp.IoFlow.FileName), null, // fileOffset, dataOffset, ALIGNED_BLOCK_SIZE /* copying data only */); //string tempFileNameChunking = irp.IoFlowRuntime.getDriveLetterFileName(irp.IoFlow.FileName); //save file name here once, so we don't do multiple calls to this b[blockid] = getFileCacheElement(irp.IoFlow, irp.IoFlow.FileName, null, fileOffset, dataOffset, ALIGNED_BLOCK_SIZE /* copying data only */); ce = b[blockid]; // insert element into list if (false == FlowStats.TryGetValue(irp.FlowId, out sla)) { //sla = new FlowSLA(); //FlowStats[irp.FlowId] = sla; Debug.Assert(0 == 1); //XXXIS let's only deal with explicitly declared flows right now } ce.UpdateNodeList(sla.cacheEntries.AddLast(ce)); cacheSizeUsedBytes += ALIGNED_BLOCK_SIZE; sla.FlowCacheSizeUsedBytes += ALIGNED_BLOCK_SIZE; if (sla.FlowSLAHasGhostCache()) { sla.GhostCache.CacheReadReference(ce.fileName + Convert.ToString(blockid)); //Forward the reference to the ghost cache } } } } if (noisyOutput) Console.WriteLine("CacheIRP {0}: UnLock cache on Offset={1} Length={2}", Thread.CurrentThread.ManagedThreadId, fileOffset, ALIGNED_BLOCK_SIZE); fileOffset += ALIGNED_BLOCK_SIZE; dataOffset += ALIGNED_BLOCK_SIZE; if (hit == false) { anyMisses = true; } } while (fileOffset < savedFileOffset + savedDataLength); if (false == FlowStats.TryGetValue(irp.FlowId, out sla)) { Debug.Assert(0 == 1); //XXXIS let's only deal with explicitly declared flows right now } if (!anyMisses) { sla.CacheAccessesHits++; //Increment the number of hits in the cache } } sla.CacheAccessesTotal++; //increment all the accesses to this cache sla.FlowBytesAccessed += irp.DataLength; Monitor.Exit(cacheLock); // deal with MISSES // Let IRP go through and intercept POST operation (with payload) // if (anyMisses == true || !canSatisfyRequest) { //Console.WriteLine("MISS: {0}", irp.FileOffset); if (noisyOutput) Console.WriteLine("CacheIRP {0}: PreRead MISS Offset={1} Length={2}", Thread.CurrentThread.ManagedThreadId, irp.FileOffset, irp.DataLength); return PreReadReturnCode.FLT_PREOP_SUCCESS_WITH_CALLBACK; } else { //Console.WriteLine("HIT: {0}", irp.FileOffset); if (noisyOutput) Console.WriteLine("CacheIRP {0}: PreRead HIT Offset={1} Length={2}", Thread.CurrentThread.ManagedThreadId, irp.FileOffset, irp.DataLength); return PreReadReturnCode.FLT_PREOP_COMPLETE; } }