private string FindReasonForNotCaching(CachedBlock cblock, BlockInfoContiguous blockInfo
                                        )
 {
     if (blockInfo == null)
     {
         // Somehow, a cache report with the block arrived, but the block
         // reports from the DataNode haven't (yet?) described such a block.
         // Alternately, the NameNode might have invalidated the block, but the
         // DataNode hasn't caught up.  In any case, we want to tell the DN
         // to uncache this.
         return("not tracked by the BlockManager");
     }
     else
     {
         if (!blockInfo.IsComplete())
         {
             // When a cached block changes state from complete to some other state
             // on the DataNode (perhaps because of append), it will begin the
             // uncaching process.  However, the uncaching process is not
             // instantaneous, especially if clients have pinned the block.  So
             // there may be a period of time when incomplete blocks remain cached
             // on the DataNodes.
             return("not complete");
         }
         else
         {
             if (cblock.GetReplication() == 0)
             {
                 // Since 0 is not a valid value for a cache directive's replication
                 // field, seeing a replication of 0 on a CacheBlock means that it
                 // has never been reached by any sweep.
                 return("not needed by any directives");
             }
             else
             {
                 if (cblock.GetMark() != mark)
                 {
                     // Although the block was needed in the past, we didn't reach it during
                     // the current sweep.  Therefore, it doesn't need to be cached any more.
                     // Need to set the replication to 0 so it doesn't flip back to cached
                     // when the mark flips on the next scan
                     cblock.SetReplicationAndMark((short)0, mark);
                     return("no longer needed by any directives");
                 }
             }
         }
     }
     return(null);
 }
        /// <summary>Add new entries to the PendingCached list.</summary>
        /// <param name="neededCached">The number of replicas that need to be cached.</param>
        /// <param name="cachedBlock">The block which needs to be cached.</param>
        /// <param name="cached">A list of DataNodes currently caching the block.</param>
        /// <param name="pendingCached">
        /// A list of DataNodes that will soon cache the
        /// block.
        /// </param>
        private void AddNewPendingCached(int neededCached, CachedBlock cachedBlock, IList
                                         <DatanodeDescriptor> cached, IList <DatanodeDescriptor> pendingCached)
        {
            // To figure out which replicas can be cached, we consult the
            // blocksMap.  We don't want to try to cache a corrupt replica, though.
            BlockInfoContiguous blockInfo = blockManager.GetStoredBlock(new Block(cachedBlock
                                                                                  .GetBlockId()));

            if (blockInfo == null)
            {
                Log.Debug("Block {}: can't add new cached replicas," + " because there is no record of this block "
                          + "on the NameNode.", cachedBlock.GetBlockId());
                return;
            }
            if (!blockInfo.IsComplete())
            {
                Log.Debug("Block {}: can't cache this block, because it is not yet" + " complete."
                          , cachedBlock.GetBlockId());
                return;
            }
            // Filter the list of replicas to only the valid targets
            IList <DatanodeDescriptor> possibilities = new List <DatanodeDescriptor>();
            int numReplicas = blockInfo.GetCapacity();
            ICollection <DatanodeDescriptor> corrupt = blockManager.GetCorruptReplicas(blockInfo
                                                                                       );
            int outOfCapacity = 0;

            for (int i = 0; i < numReplicas; i++)
            {
                DatanodeDescriptor datanode = blockInfo.GetDatanode(i);
                if (datanode == null)
                {
                    continue;
                }
                if (datanode.IsDecommissioned() || datanode.IsDecommissionInProgress())
                {
                    continue;
                }
                if (corrupt != null && corrupt.Contains(datanode))
                {
                    continue;
                }
                if (pendingCached.Contains(datanode) || cached.Contains(datanode))
                {
                    continue;
                }
                long pendingBytes = 0;
                // Subtract pending cached blocks from effective capacity
                IEnumerator <CachedBlock> it = datanode.GetPendingCached().GetEnumerator();
                while (it.HasNext())
                {
                    CachedBlock         cBlock = it.Next();
                    BlockInfoContiguous info   = blockManager.GetStoredBlock(new Block(cBlock.GetBlockId
                                                                                           ()));
                    if (info != null)
                    {
                        pendingBytes -= info.GetNumBytes();
                    }
                }
                it = datanode.GetPendingUncached().GetEnumerator();
                // Add pending uncached blocks from effective capacity
                while (it.HasNext())
                {
                    CachedBlock         cBlock = it.Next();
                    BlockInfoContiguous info   = blockManager.GetStoredBlock(new Block(cBlock.GetBlockId
                                                                                           ()));
                    if (info != null)
                    {
                        pendingBytes += info.GetNumBytes();
                    }
                }
                long pendingCapacity = pendingBytes + datanode.GetCacheRemaining();
                if (pendingCapacity < blockInfo.GetNumBytes())
                {
                    Log.Trace("Block {}: DataNode {} is not a valid possibility " + "because the block has size {}, but the DataNode only has {}"
                              + "bytes of cache remaining ({} pending bytes, {} already cached.", blockInfo.GetBlockId
                                  (), datanode.GetDatanodeUuid(), blockInfo.GetNumBytes(), pendingCapacity, pendingBytes
                              , datanode.GetCacheRemaining());
                    outOfCapacity++;
                    continue;
                }
                possibilities.AddItem(datanode);
            }
            IList <DatanodeDescriptor> chosen = ChooseDatanodesForCaching(possibilities, neededCached
                                                                          , blockManager.GetDatanodeManager().GetStaleInterval());

            foreach (DatanodeDescriptor datanode_1 in chosen)
            {
                Log.Trace("Block {}: added to PENDING_CACHED on DataNode {}", blockInfo.GetBlockId
                              (), datanode_1.GetDatanodeUuid());
                pendingCached.AddItem(datanode_1);
                bool added = datanode_1.GetPendingCached().AddItem(cachedBlock);
                System.Diagnostics.Debug.Assert(added);
            }
            // We were unable to satisfy the requested replication factor
            if (neededCached > chosen.Count)
            {
                Log.Debug("Block {}: we only have {} of {} cached replicas." + " {} DataNodes have insufficient cache capacity."
                          , blockInfo.GetBlockId(), (cachedBlock.GetReplication() - neededCached + chosen.
                                                     Count), cachedBlock.GetReplication(), outOfCapacity);
            }
        }
 /// <summary>Scan through the cached block map.</summary>
 /// <remarks>
 /// Scan through the cached block map.
 /// Any blocks which are under-replicated should be assigned new Datanodes.
 /// Blocks that are over-replicated should be removed from Datanodes.
 /// </remarks>
 private void RescanCachedBlockMap()
 {
     for (IEnumerator <CachedBlock> cbIter = cachedBlocks.GetEnumerator(); cbIter.HasNext
              ();)
     {
         scannedBlocks++;
         CachedBlock cblock = cbIter.Next();
         IList <DatanodeDescriptor> pendingCached = cblock.GetDatanodes(DatanodeDescriptor.CachedBlocksList.Type
                                                                        .PendingCached);
         IList <DatanodeDescriptor> cached = cblock.GetDatanodes(DatanodeDescriptor.CachedBlocksList.Type
                                                                 .Cached);
         IList <DatanodeDescriptor> pendingUncached = cblock.GetDatanodes(DatanodeDescriptor.CachedBlocksList.Type
                                                                          .PendingUncached);
         // Remove nodes from PENDING_UNCACHED if they were actually uncached.
         for (IEnumerator <DatanodeDescriptor> iter = pendingUncached.GetEnumerator(); iter
              .HasNext();)
         {
             DatanodeDescriptor datanode = iter.Next();
             if (!cblock.IsInList(datanode.GetCached()))
             {
                 Log.Trace("Block {}: removing from PENDING_UNCACHED for node {} " + "because the DataNode uncached it."
                           , cblock.GetBlockId(), datanode.GetDatanodeUuid());
                 datanode.GetPendingUncached().Remove(cblock);
                 iter.Remove();
             }
         }
         BlockInfoContiguous blockInfo = blockManager.GetStoredBlock(new Block(cblock.GetBlockId
                                                                                   ()));
         string reason       = FindReasonForNotCaching(cblock, blockInfo);
         int    neededCached = 0;
         if (reason != null)
         {
             Log.Trace("Block {}: can't cache block because it is {}", cblock.GetBlockId(), reason
                       );
         }
         else
         {
             neededCached = cblock.GetReplication();
         }
         int numCached = cached.Count;
         if (numCached >= neededCached)
         {
             // If we have enough replicas, drop all pending cached.
             for (IEnumerator <DatanodeDescriptor> iter_1 = pendingCached.GetEnumerator(); iter_1
                  .HasNext();)
             {
                 DatanodeDescriptor datanode = iter_1.Next();
                 datanode.GetPendingCached().Remove(cblock);
                 iter_1.Remove();
                 Log.Trace("Block {}: removing from PENDING_CACHED for node {}" + "because we already have {} cached replicas and we only"
                           + " need {}", cblock.GetBlockId(), datanode.GetDatanodeUuid(), numCached, neededCached
                           );
             }
         }
         if (numCached < neededCached)
         {
             // If we don't have enough replicas, drop all pending uncached.
             for (IEnumerator <DatanodeDescriptor> iter_1 = pendingUncached.GetEnumerator(); iter_1
                  .HasNext();)
             {
                 DatanodeDescriptor datanode = iter_1.Next();
                 datanode.GetPendingUncached().Remove(cblock);
                 iter_1.Remove();
                 Log.Trace("Block {}: removing from PENDING_UNCACHED for node {} " + "because we only have {} cached replicas and we need "
                           + "{}", cblock.GetBlockId(), datanode.GetDatanodeUuid(), numCached, neededCached
                           );
             }
         }
         int neededUncached = numCached - (pendingUncached.Count + neededCached);
         if (neededUncached > 0)
         {
             AddNewPendingUncached(neededUncached, cblock, cached, pendingUncached);
         }
         else
         {
             int additionalCachedNeeded = neededCached - (numCached + pendingCached.Count);
             if (additionalCachedNeeded > 0)
             {
                 AddNewPendingCached(additionalCachedNeeded, cblock, cached, pendingCached);
             }
         }
         if ((neededCached == 0) && pendingUncached.IsEmpty() && pendingCached.IsEmpty())
         {
             // we have nothing more to do with this block.
             Log.Trace("Block {}: removing from cachedBlocks, since neededCached " + "== 0, and pendingUncached and pendingCached are empty."
                       , cblock.GetBlockId());
             cbIter.Remove();
         }
     }
 }
        /// <summary>Apply a CacheDirective to a file.</summary>
        /// <param name="directive">The CacheDirective to apply.</param>
        /// <param name="file">The file.</param>
        private void RescanFile(CacheDirective directive, INodeFile file)
        {
            BlockInfoContiguous[] blockInfos = file.GetBlocks();
            // Increment the "needed" statistics
            directive.AddFilesNeeded(1);
            // We don't cache UC blocks, don't add them to the total here
            long neededTotal = file.ComputeFileSizeNotIncludingLastUcBlock() * directive.GetReplication
                                   ();

            directive.AddBytesNeeded(neededTotal);
            // The pool's bytesNeeded is incremented as we scan. If the demand
            // thus far plus the demand of this file would exceed the pool's limit,
            // do not cache this file.
            CachePool pool = directive.GetPool();

            if (pool.GetBytesNeeded() > pool.GetLimit())
            {
                Log.Debug("Directive {}: not scanning file {} because " + "bytesNeeded for pool {} is {}, but the pool's limit is {}"
                          , directive.GetId(), file.GetFullPathName(), pool.GetPoolName(), pool.GetBytesNeeded
                              (), pool.GetLimit());
                return;
            }
            long cachedTotal = 0;

            foreach (BlockInfoContiguous blockInfo in blockInfos)
            {
                if (!blockInfo.GetBlockUCState().Equals(HdfsServerConstants.BlockUCState.Complete
                                                        ))
                {
                    // We don't try to cache blocks that are under construction.
                    Log.Trace("Directive {}: can't cache block {} because it is in state " + "{}, not COMPLETE."
                              , directive.GetId(), blockInfo, blockInfo.GetBlockUCState());
                    continue;
                }
                Block       block   = new Block(blockInfo.GetBlockId());
                CachedBlock ncblock = new CachedBlock(block.GetBlockId(), directive.GetReplication
                                                          (), mark);
                CachedBlock ocblock = cachedBlocks.Get(ncblock);
                if (ocblock == null)
                {
                    cachedBlocks.Put(ncblock);
                    ocblock = ncblock;
                }
                else
                {
                    // Update bytesUsed using the current replication levels.
                    // Assumptions: we assume that all the blocks are the same length
                    // on each datanode.  We can assume this because we're only caching
                    // blocks in state COMPLETE.
                    // Note that if two directives are caching the same block(s), they will
                    // both get them added to their bytesCached.
                    IList <DatanodeDescriptor> cachedOn = ocblock.GetDatanodes(DatanodeDescriptor.CachedBlocksList.Type
                                                                               .Cached);
                    long cachedByBlock = Math.Min(cachedOn.Count, directive.GetReplication()) * blockInfo
                                         .GetNumBytes();
                    cachedTotal += cachedByBlock;
                    if ((mark != ocblock.GetMark()) || (ocblock.GetReplication() < directive.GetReplication
                                                            ()))
                    {
                        //
                        // Overwrite the block's replication and mark in two cases:
                        //
                        // 1. If the mark on the CachedBlock is different from the mark for
                        // this scan, that means the block hasn't been updated during this
                        // scan, and we should overwrite whatever is there, since it is no
                        // longer valid.
                        //
                        // 2. If the replication in the CachedBlock is less than what the
                        // directive asks for, we want to increase the block's replication
                        // field to what the directive asks for.
                        //
                        ocblock.SetReplicationAndMark(directive.GetReplication(), mark);
                    }
                }
                Log.Trace("Directive {}: setting replication for block {} to {}", directive.GetId
                              (), blockInfo, ocblock.GetReplication());
            }
            // Increment the "cached" statistics
            directive.AddBytesCached(cachedTotal);
            if (cachedTotal == neededTotal)
            {
                directive.AddFilesCached(1);
            }
            Log.Debug("Directive {}: caching {}: {}/{} bytes", directive.GetId(), file.GetFullPathName
                          (), cachedTotal, neededTotal);
        }