Beispiel #1
0
        public async ReusableTask WriteAsync(ITorrentFileInfo file, long offset, byte[] buffer, int bufferOffset, int count, bool forceWrite)
        {
            if (forceWrite)
            {
                await Writer.WriteAsync(file, offset, buffer, bufferOffset, count);
            }
            else
            {
                if (CacheUsed > (Capacity - count))
                {
                    await FlushAsync(0);
                }

                var releaser = DiskManager.BufferPool.Rent(count, out byte[] cacheBuffer);
                Buffer.BlockCopy(buffer, bufferOffset, cacheBuffer, 0, count);

                var block = new CachedBlock {
                    Buffer         = cacheBuffer,
                    BufferReleaser = releaser,
                    Count          = count,
                    Offset         = offset,
                    File           = file
                };
                CachedBlocks.Add(block);
                Interlocked.Add(ref cacheUsed, block.Count);
            }
        }
Beispiel #2
0
        public virtual void TestMultipleLists()
        {
            DatanodeDescriptor[] datanodes = new DatanodeDescriptor[] { new DatanodeDescriptor
                                                                            (new DatanodeID("127.0.0.1", "localhost", "abcd", 5000, 5001, 5002, 5003)), new
                                                                        DatanodeDescriptor(new DatanodeID("127.0.1.1", "localhost", "efgh", 6000, 6001,
                                                                                                          6002, 6003)) };
            DatanodeDescriptor.CachedBlocksList[] lists = new DatanodeDescriptor.CachedBlocksList
                                                          [] { datanodes[0].GetPendingCached(), datanodes[0].GetCached(), datanodes[1].GetPendingCached
                                                                   (), datanodes[1].GetCached(), datanodes[1].GetPendingUncached() };
            int NumBlocks = 8000;

            CachedBlock[] blocks = new CachedBlock[NumBlocks];
            for (int i = 0; i < NumBlocks; i++)
            {
                blocks[i] = new CachedBlock(i, (short)i, true);
            }
            Random r = new Random(654);

            foreach (DatanodeDescriptor.CachedBlocksList list in lists)
            {
                TestAddElementsToList(list, blocks);
            }
            foreach (DatanodeDescriptor.CachedBlocksList list_1 in lists)
            {
                TestRemoveElementsFromList(r, list_1, blocks);
            }
        }
        public void Write(TorrentFile file, long offset, byte[] buffer, int bufferOffset, int count, bool forceWrite)
        {
            if (forceWrite)
            {
                writer.Write(file, offset, buffer, bufferOffset, count);
            }
            else
            {
                if (Used > (Capacity - count))
                {
                    Flush(0);
                }

                byte[] cacheBuffer = BufferManager.EmptyBuffer;
                ClientEngine.BufferManager.GetBuffer(ref cacheBuffer, count);
                Buffer.BlockCopy(buffer, bufferOffset, cacheBuffer, 0, count);

                CachedBlock block = new CachedBlock();
                block.Buffer = cacheBuffer;
                block.Count  = count;
                block.Offset = offset;
                block.File   = file;
                cachedBlocks.Add(block);
            }
        }
        public void Write(TorrentFile file, long offset, byte[] buffer, int bufferOffset, int count, bool forceWrite)
        {
            if (forceWrite)
            {
                Writer.Write(file, offset, buffer, bufferOffset, count);
            }
            else
            {
                if (CacheUsed > (Capacity - count))
                {
                    Flush(0);
                }

                byte[] cacheBuffer = ClientEngine.BufferPool.Rent(count);
                Buffer.BlockCopy(buffer, bufferOffset, cacheBuffer, 0, count);

                CachedBlock block = new CachedBlock();
                block.Buffer = cacheBuffer;
                block.Count  = count;
                block.Offset = offset;
                block.File   = file;
                CachedBlocks.Add(block);
                Interlocked.Add(ref cacheUsed, block.Count);
            }
        }
        /// <summary>Add new entries to the PendingUncached list.</summary>
        /// <param name="neededUncached">The number of replicas that need to be uncached.</param>
        /// <param name="cachedBlock">The block which needs to be uncached.</param>
        /// <param name="cached">A list of DataNodes currently caching the block.</param>
        /// <param name="pendingUncached">
        /// A list of DataNodes that will soon uncache the
        /// block.
        /// </param>
        private void AddNewPendingUncached(int neededUncached, CachedBlock cachedBlock, IList
                                           <DatanodeDescriptor> cached, IList <DatanodeDescriptor> pendingUncached)
        {
            // Figure out which replicas can be uncached.
            List <DatanodeDescriptor> possibilities = new List <DatanodeDescriptor>();

            foreach (DatanodeDescriptor datanode in cached)
            {
                if (!pendingUncached.Contains(datanode))
                {
                    possibilities.AddItem(datanode);
                }
            }
            while (neededUncached > 0)
            {
                if (possibilities.IsEmpty())
                {
                    Log.Warn("Logic error: we're trying to uncache more replicas than " + "actually exist for "
                             + cachedBlock);
                    return;
                }
                DatanodeDescriptor datanode_1 = possibilities.Remove(random.Next(possibilities.Count
                                                                                 ));
                pendingUncached.AddItem(datanode_1);
                bool added = datanode_1.GetPendingUncached().AddItem(cachedBlock);
                System.Diagnostics.Debug.Assert(added);
                neededUncached--;
            }
        }
Beispiel #6
0
        public void Write(TorrentFile file, long offset, byte[] buffer, int bufferOffset, int count, bool forceWrite)
        {
            if (forceWrite)
            {
                Writer.Write(file, offset, buffer, bufferOffset, count);
            }
            else
            {
                if (CacheUsed > (Capacity - count))
                {
                    Flush(0);
                }

                var releaser = ClientEngine.BufferPool.Rent(count, out byte[] cacheBuffer);
                Buffer.BlockCopy(buffer, bufferOffset, cacheBuffer, 0, count);

                var block = new CachedBlock {
                    Buffer         = cacheBuffer,
                    BufferReleaser = releaser,
                    Count          = count,
                    Offset         = offset,
                    File           = file
                };
                CachedBlocks.Add(block);
                Interlocked.Add(ref cacheUsed, block.Count);
            }
        }
Beispiel #7
0
 void Flush (int index)
 {
     CachedBlock b = CachedBlocks[index];
     CachedBlocks.RemoveAt (index);
     Interlocked.Add (ref cacheUsed, -b.Count);
     Write (b.File, b.Offset, b.Buffer, 0, b.Count, true);
     ClientEngine.BufferPool.Return (b.Buffer);
 }
        public void Flush(int index)
        {
            CachedBlock b = cachedBlocks[index];

            cachedBlocks.RemoveAt(index);
            Write(b.File, b.Offset, b.Buffer, 0, b.Count, true);
            ClientEngine.BufferManager.FreeBuffer(ref b.Buffer);
        }
Beispiel #9
0
        void Flush(int index)
        {
            CachedBlock b = CachedBlocks[index];

            CachedBlocks.RemoveAt(index);
            Interlocked.Add(ref cacheUsed, -b.Count);
            using (b.BufferReleaser)
                Write(b.File, b.Offset, b.Buffer, 0, b.Count, true);
        }
Beispiel #10
0
        async void FlushBlockAsync(ITorrentData torrent, List <CachedBlock> blocks, CachedBlock cached)
        {
            // FIXME: How do we handle failures from this?
            using (cached.BufferReleaser) {
                await WriteToFilesAsync(torrent, cached.Block, cached.Buffer);

                Interlocked.Add(ref cacheUsed, -cached.Block.RequestLength);
                blocks.Remove(cached);
            }
        }
Beispiel #11
0
        async ReusableTask FlushAsync(int index)
        {
            CachedBlock b = CachedBlocks[index];

            CachedBlocks.RemoveAt(index);
            Interlocked.Add(ref cacheUsed, -b.Count);

            using (b.BufferReleaser)
                await WriteAsync(b.File, b.Offset, b.Buffer, 0, b.Count, true);
        }
Beispiel #12
0
        public async ReusableTask WriteAsync(ITorrentData torrent, BlockInfo block, Memory <byte> buffer, bool preferSkipCache)
        {
            if (preferSkipCache || Capacity < block.RequestLength)
            {
                await WriteToFilesAsync(torrent, block, buffer);
            }
            else
            {
                if (!CachedBlocks.TryGetValue(torrent, out List <CachedBlock> blocks))
                {
                    CachedBlocks[torrent] = blocks = new List <CachedBlock> ();
                }

                if (CacheUsed > (Capacity - block.RequestLength))
                {
                    var firstFlushable = FindFirstFlushable(blocks);
                    if (firstFlushable < 0)
                    {
                        await WriteToFilesAsync(torrent, block, buffer);

                        return;
                    }
                    else
                    {
                        var cached = blocks[firstFlushable];
                        blocks[firstFlushable] = cached.SetFlushing();

                        using (cached.BufferReleaser)
                            await WriteToFilesAsync(torrent, cached.Block, cached.Buffer);

                        Interlocked.Add(ref cacheUsed, -cached.Block.RequestLength);
                        blocks.Remove(cached);
                    }
                }

                CachedBlock?cache = null;
                for (int i = 0; i < blocks.Count && !cache.HasValue; i++)
                {
                    if (blocks[i].Block == block)
                    {
                        cache = blocks[i];
                    }
                }

                if (!cache.HasValue)
                {
                    var releaser = BufferPool.Rent(block.RequestLength, out Memory <byte> memory);
                    cache = new CachedBlock(block, releaser, memory);
                    blocks.Add(cache.Value);
                    Interlocked.Add(ref cacheUsed, block.RequestLength);
                }
                buffer.CopyTo(cache.Value.Buffer);
                WrittenToCache?.Invoke(this, block);
            }
        }
 public override void Flush(TorrentFile file)
 {
     for (int i = 0; i < cachedBlocks.Count; i++)
     {
         if (cachedBlocks[i].File == file)
         {
             CachedBlock b = cachedBlocks[i];
             writer.Write(b.File, b.Offset, b.Buffer, 0, b.Count);
             ClientEngine.BufferManager.FreeBuffer(ref b.Buffer);
         }
     }
     cachedBlocks.RemoveAll(delegate(CachedBlock b) { return(b.File == file); });
 }
 private string FindReasonForNotCaching(CachedBlock cblock, BlockInfoContiguous blockInfo
                                        )
 {
     if (blockInfo == null)
     {
         // Somehow, a cache report with the block arrived, but the block
         // reports from the DataNode haven't (yet?) described such a block.
         // Alternately, the NameNode might have invalidated the block, but the
         // DataNode hasn't caught up.  In any case, we want to tell the DN
         // to uncache this.
         return("not tracked by the BlockManager");
     }
     else
     {
         if (!blockInfo.IsComplete())
         {
             // When a cached block changes state from complete to some other state
             // on the DataNode (perhaps because of append), it will begin the
             // uncaching process.  However, the uncaching process is not
             // instantaneous, especially if clients have pinned the block.  So
             // there may be a period of time when incomplete blocks remain cached
             // on the DataNodes.
             return("not complete");
         }
         else
         {
             if (cblock.GetReplication() == 0)
             {
                 // Since 0 is not a valid value for a cache directive's replication
                 // field, seeing a replication of 0 on a CacheBlock means that it
                 // has never been reached by any sweep.
                 return("not needed by any directives");
             }
             else
             {
                 if (cblock.GetMark() != mark)
                 {
                     // Although the block was needed in the past, we didn't reach it during
                     // the current sweep.  Therefore, it doesn't need to be cached any more.
                     // Need to set the replication to 0 so it doesn't flip back to cached
                     // when the mark flips on the next scan
                     cblock.SetReplicationAndMark((short)0, mark);
                     return("no longer needed by any directives");
                 }
             }
         }
     }
     return(null);
 }
Beispiel #15
0
        public virtual void TestSingleList()
        {
            DatanodeDescriptor dn = new DatanodeDescriptor(new DatanodeID("127.0.0.1", "localhost"
                                                                          , "abcd", 5000, 5001, 5002, 5003));

            CachedBlock[] blocks = new CachedBlock[] { new CachedBlock(0L, (short)1, true), new
                                                       CachedBlock(1L, (short)1, true), new CachedBlock(2L, (short)1, true) };
            // check that lists are empty
            NUnit.Framework.Assert.IsTrue("expected pending cached list to start off empty.",
                                          !dn.GetPendingCached().GetEnumerator().HasNext());
            NUnit.Framework.Assert.IsTrue("expected cached list to start off empty.", !dn.GetCached
                                              ().GetEnumerator().HasNext());
            NUnit.Framework.Assert.IsTrue("expected pending uncached list to start off empty."
                                          , !dn.GetPendingUncached().GetEnumerator().HasNext());
            // add a block to the back
            NUnit.Framework.Assert.IsTrue(dn.GetCached().AddItem(blocks[0]));
            NUnit.Framework.Assert.IsTrue("expected pending cached list to still be empty.",
                                          !dn.GetPendingCached().GetEnumerator().HasNext());
            NUnit.Framework.Assert.AreEqual("failed to insert blocks[0]", blocks[0], dn.GetCached
                                                ().GetEnumerator().Next());
            NUnit.Framework.Assert.IsTrue("expected pending uncached list to still be empty."
                                          , !dn.GetPendingUncached().GetEnumerator().HasNext());
            // add another block to the back
            NUnit.Framework.Assert.IsTrue(dn.GetCached().AddItem(blocks[1]));
            IEnumerator <CachedBlock> iter = dn.GetCached().GetEnumerator();

            NUnit.Framework.Assert.AreEqual(blocks[0], iter.Next());
            NUnit.Framework.Assert.AreEqual(blocks[1], iter.Next());
            NUnit.Framework.Assert.IsTrue(!iter.HasNext());
            // add a block to the front
            NUnit.Framework.Assert.IsTrue(dn.GetCached().AddFirst(blocks[2]));
            iter = dn.GetCached().GetEnumerator();
            NUnit.Framework.Assert.AreEqual(blocks[2], iter.Next());
            NUnit.Framework.Assert.AreEqual(blocks[0], iter.Next());
            NUnit.Framework.Assert.AreEqual(blocks[1], iter.Next());
            NUnit.Framework.Assert.IsTrue(!iter.HasNext());
            // remove a block from the middle
            NUnit.Framework.Assert.IsTrue(dn.GetCached().Remove(blocks[0]));
            iter = dn.GetCached().GetEnumerator();
            NUnit.Framework.Assert.AreEqual(blocks[2], iter.Next());
            NUnit.Framework.Assert.AreEqual(blocks[1], iter.Next());
            NUnit.Framework.Assert.IsTrue(!iter.HasNext());
            // remove all blocks
            dn.GetCached().Clear();
            NUnit.Framework.Assert.IsTrue("expected cached list to be empty after clear.", !dn
                                          .GetPendingCached().GetEnumerator().HasNext());
        }
        public void Write(TorrentFile file, long offset, byte[] buffer, int bufferOffset, int count, bool forceWrite)
        {
            if (forceWrite)
            {
                writer.Write(file, offset, buffer, bufferOffset, count);
            }
            else
            {
                if (Used > Capacity - count)
                    Flush(0);

                var cacheBuffer = BufferManager.EmptyBuffer;
                ClientEngine.BufferManager.GetBuffer(ref cacheBuffer, count);
                Buffer.BlockCopy(buffer, bufferOffset, cacheBuffer, 0, count);

                var block = new CachedBlock();
                block.Buffer = cacheBuffer;
                block.Count = count;
                block.Offset = offset;
                block.File = file;
                cachedBlocks.Add(block);
            }
        }
        /// <summary>Apply a CacheDirective to a file.</summary>
        /// <param name="directive">The CacheDirective to apply.</param>
        /// <param name="file">The file.</param>
        private void RescanFile(CacheDirective directive, INodeFile file)
        {
            BlockInfoContiguous[] blockInfos = file.GetBlocks();
            // Increment the "needed" statistics
            directive.AddFilesNeeded(1);
            // We don't cache UC blocks, don't add them to the total here
            long neededTotal = file.ComputeFileSizeNotIncludingLastUcBlock() * directive.GetReplication
                                   ();

            directive.AddBytesNeeded(neededTotal);
            // The pool's bytesNeeded is incremented as we scan. If the demand
            // thus far plus the demand of this file would exceed the pool's limit,
            // do not cache this file.
            CachePool pool = directive.GetPool();

            if (pool.GetBytesNeeded() > pool.GetLimit())
            {
                Log.Debug("Directive {}: not scanning file {} because " + "bytesNeeded for pool {} is {}, but the pool's limit is {}"
                          , directive.GetId(), file.GetFullPathName(), pool.GetPoolName(), pool.GetBytesNeeded
                              (), pool.GetLimit());
                return;
            }
            long cachedTotal = 0;

            foreach (BlockInfoContiguous blockInfo in blockInfos)
            {
                if (!blockInfo.GetBlockUCState().Equals(HdfsServerConstants.BlockUCState.Complete
                                                        ))
                {
                    // We don't try to cache blocks that are under construction.
                    Log.Trace("Directive {}: can't cache block {} because it is in state " + "{}, not COMPLETE."
                              , directive.GetId(), blockInfo, blockInfo.GetBlockUCState());
                    continue;
                }
                Block       block   = new Block(blockInfo.GetBlockId());
                CachedBlock ncblock = new CachedBlock(block.GetBlockId(), directive.GetReplication
                                                          (), mark);
                CachedBlock ocblock = cachedBlocks.Get(ncblock);
                if (ocblock == null)
                {
                    cachedBlocks.Put(ncblock);
                    ocblock = ncblock;
                }
                else
                {
                    // Update bytesUsed using the current replication levels.
                    // Assumptions: we assume that all the blocks are the same length
                    // on each datanode.  We can assume this because we're only caching
                    // blocks in state COMPLETE.
                    // Note that if two directives are caching the same block(s), they will
                    // both get them added to their bytesCached.
                    IList <DatanodeDescriptor> cachedOn = ocblock.GetDatanodes(DatanodeDescriptor.CachedBlocksList.Type
                                                                               .Cached);
                    long cachedByBlock = Math.Min(cachedOn.Count, directive.GetReplication()) * blockInfo
                                         .GetNumBytes();
                    cachedTotal += cachedByBlock;
                    if ((mark != ocblock.GetMark()) || (ocblock.GetReplication() < directive.GetReplication
                                                            ()))
                    {
                        //
                        // Overwrite the block's replication and mark in two cases:
                        //
                        // 1. If the mark on the CachedBlock is different from the mark for
                        // this scan, that means the block hasn't been updated during this
                        // scan, and we should overwrite whatever is there, since it is no
                        // longer valid.
                        //
                        // 2. If the replication in the CachedBlock is less than what the
                        // directive asks for, we want to increase the block's replication
                        // field to what the directive asks for.
                        //
                        ocblock.SetReplicationAndMark(directive.GetReplication(), mark);
                    }
                }
                Log.Trace("Directive {}: setting replication for block {} to {}", directive.GetId
                              (), blockInfo, ocblock.GetReplication());
            }
            // Increment the "cached" statistics
            directive.AddBytesCached(cachedTotal);
            if (cachedTotal == neededTotal)
            {
                directive.AddFilesCached(1);
            }
            Log.Debug("Directive {}: caching {}: {}/{} bytes", directive.GetId(), file.GetFullPathName
                          (), cachedTotal, neededTotal);
        }
 /// <summary>Scan through the cached block map.</summary>
 /// <remarks>
 /// Scan through the cached block map.
 /// Any blocks which are under-replicated should be assigned new Datanodes.
 /// Blocks that are over-replicated should be removed from Datanodes.
 /// </remarks>
 private void RescanCachedBlockMap()
 {
     for (IEnumerator <CachedBlock> cbIter = cachedBlocks.GetEnumerator(); cbIter.HasNext
              ();)
     {
         scannedBlocks++;
         CachedBlock cblock = cbIter.Next();
         IList <DatanodeDescriptor> pendingCached = cblock.GetDatanodes(DatanodeDescriptor.CachedBlocksList.Type
                                                                        .PendingCached);
         IList <DatanodeDescriptor> cached = cblock.GetDatanodes(DatanodeDescriptor.CachedBlocksList.Type
                                                                 .Cached);
         IList <DatanodeDescriptor> pendingUncached = cblock.GetDatanodes(DatanodeDescriptor.CachedBlocksList.Type
                                                                          .PendingUncached);
         // Remove nodes from PENDING_UNCACHED if they were actually uncached.
         for (IEnumerator <DatanodeDescriptor> iter = pendingUncached.GetEnumerator(); iter
              .HasNext();)
         {
             DatanodeDescriptor datanode = iter.Next();
             if (!cblock.IsInList(datanode.GetCached()))
             {
                 Log.Trace("Block {}: removing from PENDING_UNCACHED for node {} " + "because the DataNode uncached it."
                           , cblock.GetBlockId(), datanode.GetDatanodeUuid());
                 datanode.GetPendingUncached().Remove(cblock);
                 iter.Remove();
             }
         }
         BlockInfoContiguous blockInfo = blockManager.GetStoredBlock(new Block(cblock.GetBlockId
                                                                                   ()));
         string reason       = FindReasonForNotCaching(cblock, blockInfo);
         int    neededCached = 0;
         if (reason != null)
         {
             Log.Trace("Block {}: can't cache block because it is {}", cblock.GetBlockId(), reason
                       );
         }
         else
         {
             neededCached = cblock.GetReplication();
         }
         int numCached = cached.Count;
         if (numCached >= neededCached)
         {
             // If we have enough replicas, drop all pending cached.
             for (IEnumerator <DatanodeDescriptor> iter_1 = pendingCached.GetEnumerator(); iter_1
                  .HasNext();)
             {
                 DatanodeDescriptor datanode = iter_1.Next();
                 datanode.GetPendingCached().Remove(cblock);
                 iter_1.Remove();
                 Log.Trace("Block {}: removing from PENDING_CACHED for node {}" + "because we already have {} cached replicas and we only"
                           + " need {}", cblock.GetBlockId(), datanode.GetDatanodeUuid(), numCached, neededCached
                           );
             }
         }
         if (numCached < neededCached)
         {
             // If we don't have enough replicas, drop all pending uncached.
             for (IEnumerator <DatanodeDescriptor> iter_1 = pendingUncached.GetEnumerator(); iter_1
                  .HasNext();)
             {
                 DatanodeDescriptor datanode = iter_1.Next();
                 datanode.GetPendingUncached().Remove(cblock);
                 iter_1.Remove();
                 Log.Trace("Block {}: removing from PENDING_UNCACHED for node {} " + "because we only have {} cached replicas and we need "
                           + "{}", cblock.GetBlockId(), datanode.GetDatanodeUuid(), numCached, neededCached
                           );
             }
         }
         int neededUncached = numCached - (pendingUncached.Count + neededCached);
         if (neededUncached > 0)
         {
             AddNewPendingUncached(neededUncached, cblock, cached, pendingUncached);
         }
         else
         {
             int additionalCachedNeeded = neededCached - (numCached + pendingCached.Count);
             if (additionalCachedNeeded > 0)
             {
                 AddNewPendingCached(additionalCachedNeeded, cblock, cached, pendingCached);
             }
         }
         if ((neededCached == 0) && pendingUncached.IsEmpty() && pendingCached.IsEmpty())
         {
             // we have nothing more to do with this block.
             Log.Trace("Block {}: removing from cachedBlocks, since neededCached " + "== 0, and pendingUncached and pendingCached are empty."
                       , cblock.GetBlockId());
             cbIter.Remove();
         }
     }
 }
        /// <summary>Add new entries to the PendingCached list.</summary>
        /// <param name="neededCached">The number of replicas that need to be cached.</param>
        /// <param name="cachedBlock">The block which needs to be cached.</param>
        /// <param name="cached">A list of DataNodes currently caching the block.</param>
        /// <param name="pendingCached">
        /// A list of DataNodes that will soon cache the
        /// block.
        /// </param>
        private void AddNewPendingCached(int neededCached, CachedBlock cachedBlock, IList
                                         <DatanodeDescriptor> cached, IList <DatanodeDescriptor> pendingCached)
        {
            // To figure out which replicas can be cached, we consult the
            // blocksMap.  We don't want to try to cache a corrupt replica, though.
            BlockInfoContiguous blockInfo = blockManager.GetStoredBlock(new Block(cachedBlock
                                                                                  .GetBlockId()));

            if (blockInfo == null)
            {
                Log.Debug("Block {}: can't add new cached replicas," + " because there is no record of this block "
                          + "on the NameNode.", cachedBlock.GetBlockId());
                return;
            }
            if (!blockInfo.IsComplete())
            {
                Log.Debug("Block {}: can't cache this block, because it is not yet" + " complete."
                          , cachedBlock.GetBlockId());
                return;
            }
            // Filter the list of replicas to only the valid targets
            IList <DatanodeDescriptor> possibilities = new List <DatanodeDescriptor>();
            int numReplicas = blockInfo.GetCapacity();
            ICollection <DatanodeDescriptor> corrupt = blockManager.GetCorruptReplicas(blockInfo
                                                                                       );
            int outOfCapacity = 0;

            for (int i = 0; i < numReplicas; i++)
            {
                DatanodeDescriptor datanode = blockInfo.GetDatanode(i);
                if (datanode == null)
                {
                    continue;
                }
                if (datanode.IsDecommissioned() || datanode.IsDecommissionInProgress())
                {
                    continue;
                }
                if (corrupt != null && corrupt.Contains(datanode))
                {
                    continue;
                }
                if (pendingCached.Contains(datanode) || cached.Contains(datanode))
                {
                    continue;
                }
                long pendingBytes = 0;
                // Subtract pending cached blocks from effective capacity
                IEnumerator <CachedBlock> it = datanode.GetPendingCached().GetEnumerator();
                while (it.HasNext())
                {
                    CachedBlock         cBlock = it.Next();
                    BlockInfoContiguous info   = blockManager.GetStoredBlock(new Block(cBlock.GetBlockId
                                                                                           ()));
                    if (info != null)
                    {
                        pendingBytes -= info.GetNumBytes();
                    }
                }
                it = datanode.GetPendingUncached().GetEnumerator();
                // Add pending uncached blocks from effective capacity
                while (it.HasNext())
                {
                    CachedBlock         cBlock = it.Next();
                    BlockInfoContiguous info   = blockManager.GetStoredBlock(new Block(cBlock.GetBlockId
                                                                                           ()));
                    if (info != null)
                    {
                        pendingBytes += info.GetNumBytes();
                    }
                }
                long pendingCapacity = pendingBytes + datanode.GetCacheRemaining();
                if (pendingCapacity < blockInfo.GetNumBytes())
                {
                    Log.Trace("Block {}: DataNode {} is not a valid possibility " + "because the block has size {}, but the DataNode only has {}"
                              + "bytes of cache remaining ({} pending bytes, {} already cached.", blockInfo.GetBlockId
                                  (), datanode.GetDatanodeUuid(), blockInfo.GetNumBytes(), pendingCapacity, pendingBytes
                              , datanode.GetCacheRemaining());
                    outOfCapacity++;
                    continue;
                }
                possibilities.AddItem(datanode);
            }
            IList <DatanodeDescriptor> chosen = ChooseDatanodesForCaching(possibilities, neededCached
                                                                          , blockManager.GetDatanodeManager().GetStaleInterval());

            foreach (DatanodeDescriptor datanode_1 in chosen)
            {
                Log.Trace("Block {}: added to PENDING_CACHED on DataNode {}", blockInfo.GetBlockId
                              (), datanode_1.GetDatanodeUuid());
                pendingCached.AddItem(datanode_1);
                bool added = datanode_1.GetPendingCached().AddItem(cachedBlock);
                System.Diagnostics.Debug.Assert(added);
            }
            // We were unable to satisfy the requested replication factor
            if (neededCached > chosen.Count)
            {
                Log.Debug("Block {}: we only have {} of {} cached replicas." + " {} DataNodes have insufficient cache capacity."
                          , blockInfo.GetBlockId(), (cachedBlock.GetReplication() - neededCached + chosen.
                                                     Count), cachedBlock.GetReplication(), outOfCapacity);
            }
        }