public virtual bool IsCached(string bpid, long blockId) { lock (this) { ExtendedBlockId block = new ExtendedBlockId(blockId, bpid); FsDatasetCache.Value val = mappableBlockMap[block]; return((val != null) && val.state.ShouldAdvertise()); } }
internal virtual void UncacheBlock(string bpid, long blockId) { lock (this) { ExtendedBlockId key = new ExtendedBlockId(blockId, bpid); FsDatasetCache.Value prevValue = mappableBlockMap[key]; bool deferred = false; if (!dataset.datanode.GetShortCircuitRegistry().ProcessBlockMunlockRequest(key)) { deferred = true; } if (prevValue == null) { Log.Debug("Block with id {}, pool {} does not need to be uncached, " + "because it is not currently in the mappableBlockMap." , blockId, bpid); numBlocksFailedToUncache.IncrementAndGet(); return; } switch (prevValue.state) { case FsDatasetCache.State.Caching: { Log.Debug("Cancelling caching for block with id {}, pool {}.", blockId, bpid); mappableBlockMap[key] = new FsDatasetCache.Value(prevValue.mappableBlock, FsDatasetCache.State .CachingCancelled); break; } case FsDatasetCache.State.Cached: { mappableBlockMap[key] = new FsDatasetCache.Value(prevValue.mappableBlock, FsDatasetCache.State .Uncaching); if (deferred) { Log.Debug("{} is anchored, and can't be uncached now. Scheduling it " + "for uncaching in {} " , key, DurationFormatUtils.FormatDurationHMS(revocationPollingMs)); deferredUncachingExecutor.Schedule(new FsDatasetCache.UncachingTask(this, key, revocationMs ), revocationPollingMs, TimeUnit.Milliseconds); } else { Log.Debug("{} has been scheduled for immediate uncaching.", key); uncachingExecutor.Execute(new FsDatasetCache.UncachingTask(this, key, 0)); } break; } default: { Log.Debug("Block with id {}, pool {} does not need to be uncached, " + "because it is in state {}." , blockId, bpid, prevValue.state); numBlocksFailedToUncache.IncrementAndGet(); break; } } } }
/// <summary>Attempt to begin caching a block.</summary> internal virtual void CacheBlock(long blockId, string bpid, string blockFileName, long length, long genstamp, Executor volumeExecutor) { lock (this) { ExtendedBlockId key = new ExtendedBlockId(blockId, bpid); FsDatasetCache.Value prevValue = mappableBlockMap[key]; if (prevValue != null) { Log.Debug("Block with id {}, pool {} already exists in the " + "FsDatasetCache with state {}" , blockId, bpid, prevValue.state); numBlocksFailedToCache.IncrementAndGet(); return; } mappableBlockMap[key] = new FsDatasetCache.Value(null, FsDatasetCache.State.Caching ); volumeExecutor.Execute(new FsDatasetCache.CachingTask(this, key, blockFileName, length , genstamp)); Log.Debug("Initiating caching for Block with id {}, pool {}", blockId, bpid); } }
public virtual void Run() { bool success = false; FileInputStream blockIn = null; FileInputStream metaIn = null; MappableBlock mappableBlock = null; ExtendedBlock extBlk = new ExtendedBlock(this.key.GetBlockPoolId(), this.key.GetBlockId (), this.length, this.genstamp); long newUsedBytes = this._enclosing.usedBytesCount.Reserve(this.length); bool reservedBytes = false; try { if (newUsedBytes < 0) { FsDatasetCache.Log.Warn("Failed to cache " + this.key + ": could not reserve " + this.length + " more bytes in the cache: " + DFSConfigKeys.DfsDatanodeMaxLockedMemoryKey + " of " + this._enclosing.maxBytes + " exceeded."); return; } reservedBytes = true; try { blockIn = (FileInputStream)this._enclosing.dataset.GetBlockInputStream(extBlk, 0); metaIn = DatanodeUtil.GetMetaDataInputStream(extBlk, this._enclosing.dataset); } catch (InvalidCastException e) { FsDatasetCache.Log.Warn("Failed to cache " + this.key + ": Underlying blocks are not backed by files." , e); return; } catch (FileNotFoundException) { FsDatasetCache.Log.Info("Failed to cache " + this.key + ": failed to find backing " + "files."); return; } catch (IOException e) { FsDatasetCache.Log.Warn("Failed to cache " + this.key + ": failed to open file", e); return; } try { mappableBlock = MappableBlock.Load(this.length, blockIn, metaIn, this.blockFileName ); } catch (ChecksumException) { // Exception message is bogus since this wasn't caused by a file read FsDatasetCache.Log.Warn("Failed to cache " + this.key + ": checksum verification failed." ); return; } catch (IOException e) { FsDatasetCache.Log.Warn("Failed to cache " + this.key, e); return; } lock (this._enclosing) { FsDatasetCache.Value value = this._enclosing.mappableBlockMap[this.key]; Preconditions.CheckNotNull(value); Preconditions.CheckState(value.state == FsDatasetCache.State.Caching || value.state == FsDatasetCache.State.CachingCancelled); if (value.state == FsDatasetCache.State.CachingCancelled) { Sharpen.Collections.Remove(this._enclosing.mappableBlockMap, this.key); FsDatasetCache.Log.Warn("Caching of " + this.key + " was cancelled."); return; } this._enclosing.mappableBlockMap[this.key] = new FsDatasetCache.Value(mappableBlock , FsDatasetCache.State.Cached); } FsDatasetCache.Log.Debug("Successfully cached {}. We are now caching {} bytes in" + " total.", this.key, newUsedBytes); this._enclosing.dataset.datanode.GetShortCircuitRegistry().ProcessBlockMlockEvent (this.key); this._enclosing.numBlocksCached.AddAndGet(1); this._enclosing.dataset.datanode.GetMetrics().IncrBlocksCached(1); success = true; } finally { IOUtils.CloseQuietly(blockIn); IOUtils.CloseQuietly(metaIn); if (!success) { if (reservedBytes) { this._enclosing.usedBytesCount.Release(this.length); } FsDatasetCache.Log.Debug("Caching of {} was aborted. We are now caching only {} " + "bytes in total.", this.key, this._enclosing.usedBytesCount.Get()); if (mappableBlock != null) { mappableBlock.Close(); } this._enclosing.numBlocksFailedToCache.IncrementAndGet(); lock (this._enclosing) { Sharpen.Collections.Remove(this._enclosing.mappableBlockMap, this.key); } } } }