Пример #1
0
        /// <summary>
        /// Create a list of
        /// <see cref="VolumeBlockLocationCallable"/>
        /// corresponding to a set
        /// of datanodes and blocks. The blocks must all correspond to the same
        /// block pool.
        /// </summary>
        /// <param name="datanodeBlocks">Map of datanodes to block replicas at each datanode</param>
        /// <returns>
        /// callables Used to query each datanode for location information on
        /// the block replicas at the datanode
        /// </returns>
        private static IList <BlockStorageLocationUtil.VolumeBlockLocationCallable> CreateVolumeBlockLocationCallables
            (Configuration conf, IDictionary <DatanodeInfo, IList <LocatedBlock> > datanodeBlocks
            , int timeout, bool connectToDnViaHostname, Span parent)
        {
            if (datanodeBlocks.IsEmpty())
            {
                return(Lists.NewArrayList());
            }
            // Construct the callables, one per datanode
            IList <BlockStorageLocationUtil.VolumeBlockLocationCallable> callables = new AList
                                                                                     <BlockStorageLocationUtil.VolumeBlockLocationCallable>();

            foreach (KeyValuePair <DatanodeInfo, IList <LocatedBlock> > entry in datanodeBlocks)
            {
                // Construct RPC parameters
                DatanodeInfo         datanode      = entry.Key;
                IList <LocatedBlock> locatedBlocks = entry.Value;
                if (locatedBlocks.IsEmpty())
                {
                    continue;
                }
                // Ensure that the blocks all are from the same block pool.
                string poolId = locatedBlocks[0].GetBlock().GetBlockPoolId();
                foreach (LocatedBlock lb in locatedBlocks)
                {
                    if (!poolId.Equals(lb.GetBlock().GetBlockPoolId()))
                    {
                        throw new ArgumentException("All blocks to be queried must be in the same block pool: "
                                                    + locatedBlocks[0].GetBlock() + " and " + lb + " are from different pools.");
                    }
                }
                long[] blockIds = new long[locatedBlocks.Count];
                int    i        = 0;
                IList <Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> > dnTokens = new
                                                                                                  AList <Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> >(locatedBlocks
                                                                                                                                                                         .Count);
                foreach (LocatedBlock b in locatedBlocks)
                {
                    blockIds[i++] = b.GetBlock().GetBlockId();
                    dnTokens.AddItem(b.GetBlockToken());
                }
                BlockStorageLocationUtil.VolumeBlockLocationCallable callable = new BlockStorageLocationUtil.VolumeBlockLocationCallable
                                                                                    (conf, datanode, poolId, blockIds, dnTokens, timeout, connectToDnViaHostname, parent
                                                                                    );
                callables.AddItem(callable);
            }
            return(callables);
        }
Пример #2
0
        /// <summary>
        /// Queries datanodes for the blocks specified in <code>datanodeBlocks</code>,
        /// making one RPC to each datanode.
        /// </summary>
        /// <remarks>
        /// Queries datanodes for the blocks specified in <code>datanodeBlocks</code>,
        /// making one RPC to each datanode. These RPCs are made in parallel using a
        /// threadpool.
        /// </remarks>
        /// <param name="datanodeBlocks">Map of datanodes to the blocks present on the DN</param>
        /// <returns>metadatas Map of datanodes to block metadata of the DN</returns>
        /// <exception cref="Org.Apache.Hadoop.Hdfs.Security.Token.Block.InvalidBlockTokenException
        ///     ">if client does not have read access on a requested block</exception>
        internal static IDictionary <DatanodeInfo, HdfsBlocksMetadata> QueryDatanodesForHdfsBlocksMetadata
            (Configuration conf, IDictionary <DatanodeInfo, IList <LocatedBlock> > datanodeBlocks
            , int poolsize, int timeoutMs, bool connectToDnViaHostname)
        {
            IList <BlockStorageLocationUtil.VolumeBlockLocationCallable> callables = CreateVolumeBlockLocationCallables
                                                                                         (conf, datanodeBlocks, timeoutMs, connectToDnViaHostname, Trace.CurrentSpan());
            // Use a thread pool to execute the Callables in parallel
            IList <Future <HdfsBlocksMetadata> > futures = new AList <Future <HdfsBlocksMetadata> >
                                                               ();
            ExecutorService executor = new ScheduledThreadPoolExecutor(poolsize);

            try
            {
                futures = executor.InvokeAll(callables, timeoutMs, TimeUnit.Milliseconds);
            }
            catch (Exception)
            {
            }
            // Swallow the exception here, because we can return partial results
            executor.Shutdown();
            IDictionary <DatanodeInfo, HdfsBlocksMetadata> metadatas = Maps.NewHashMapWithExpectedSize
                                                                           (datanodeBlocks.Count);

            // Fill in metadatas with results from DN RPCs, where possible
            for (int i = 0; i < futures.Count; i++)
            {
                BlockStorageLocationUtil.VolumeBlockLocationCallable callable = callables[i];
                DatanodeInfo datanode = callable.GetDatanodeInfo();
                Future <HdfsBlocksMetadata> future = futures[i];
                try
                {
                    HdfsBlocksMetadata metadata = future.Get();
                    metadatas[callable.GetDatanodeInfo()] = metadata;
                }
                catch (CancellationException e)
                {
                    Log.Info("Cancelled while waiting for datanode " + datanode.GetIpcAddr(false) + ": "
                             + e.ToString());
                }
                catch (ExecutionException e)
                {
                    Exception t = e.InnerException;
                    if (t is InvalidBlockTokenException)
                    {
                        Log.Warn("Invalid access token when trying to retrieve " + "information from datanode "
                                 + datanode.GetIpcAddr(false));
                        throw (InvalidBlockTokenException)t;
                    }
                    else
                    {
                        if (t is NotSupportedException)
                        {
                            Log.Info("Datanode " + datanode.GetIpcAddr(false) + " does not support" + " required #getHdfsBlocksMetadata() API"
                                     );
                            throw (NotSupportedException)t;
                        }
                        else
                        {
                            Log.Info("Failed to query block locations on datanode " + datanode.GetIpcAddr(false
                                                                                                          ) + ": " + t);
                        }
                    }
                    if (Log.IsDebugEnabled())
                    {
                        Log.Debug("Could not fetch information from datanode", t);
                    }
                }
                catch (Exception)
                {
                    // Shouldn't happen, because invokeAll waits for all Futures to be ready
                    Log.Info("Interrupted while fetching HdfsBlocksMetadata");
                }
            }
            return(metadatas);
        }