Exemple #1
0
 /// <exception cref="Org.Apache.Hadoop.Hdfs.Server.Datanode.BPServiceActorActionException
 ///     "/>
 public virtual void ReportTo(DatanodeProtocolClientSideTranslatorPB bpNamenode, DatanodeRegistration
                              bpRegistration)
 {
     if (bpRegistration == null)
     {
         return;
     }
     DatanodeInfo[] dnArr        = new DatanodeInfo[] { new DatanodeInfo(bpRegistration) };
     string[]       uuids        = new string[] { storageUuid };
     StorageType[]  types        = new StorageType[] { storageType };
     LocatedBlock[] locatedBlock = new LocatedBlock[] { new LocatedBlock(block, dnArr,
                                                                         uuids, types) };
     try
     {
         bpNamenode.ReportBadBlocks(locatedBlock);
     }
     catch (RemoteException re)
     {
         DataNode.Log.Info("reportBadBlock encountered RemoteException for " + "block:  "
                           + block, re);
     }
     catch (IOException)
     {
         throw new BPServiceActorActionException("Failed to report bad block " + block + " to namenode: "
                                                 );
     }
 }
        /// <summary>Create a file with one block and corrupt some/all of the block replicas.
        ///     </summary>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="Org.Apache.Hadoop.Security.AccessControlException"/>
        /// <exception cref="System.IO.FileNotFoundException"/>
        /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/>
        /// <exception cref="System.Exception"/>
        /// <exception cref="Sharpen.TimeoutException"/>
        private void CreateAFileWithCorruptedBlockReplicas(Path filePath, short repl, int
                                                           corruptBlockCount)
        {
            DFSTestUtil.CreateFile(dfs, filePath, BlockSize, repl, 0);
            DFSTestUtil.WaitReplication(dfs, filePath, repl);
            // Locate the file blocks by asking name node
            LocatedBlocks locatedblocks = dfs.dfs.GetNamenode().GetBlockLocations(filePath.ToString
                                                                                      (), 0L, BlockSize);

            NUnit.Framework.Assert.AreEqual(repl, locatedblocks.Get(0).GetLocations().Length);
            // The file only has one block
            LocatedBlock lblock = locatedblocks.Get(0);

            DatanodeInfo[] datanodeinfos = lblock.GetLocations();
            ExtendedBlock  block         = lblock.GetBlock();

            // corrupt some /all of the block replicas
            for (int i = 0; i < corruptBlockCount; i++)
            {
                DatanodeInfo dninfo = datanodeinfos[i];
                DataNode     dn     = cluster.GetDataNode(dninfo.GetIpcPort());
                CorruptBlock(block, dn);
                Log.Debug("Corrupted block " + block.GetBlockName() + " on data node " + dninfo);
            }
        }
Exemple #3
0
 /// <exception cref="System.IO.IOException"/>
 public override void WriteBlock(ExtendedBlock blk, StorageType storageType, Org.Apache.Hadoop.Security.Token.Token
                                 <BlockTokenIdentifier> blockToken, string clientName, DatanodeInfo[] targets, StorageType
                                 [] targetStorageTypes, DatanodeInfo source, BlockConstructionStage stage, int pipelineSize
                                 , long minBytesRcvd, long maxBytesRcvd, long latestGenerationStamp, DataChecksum
                                 requestedChecksum, CachingStrategy cachingStrategy, bool allowLazyPersist, bool
                                 pinning, bool[] targetPinnings)
 {
     DataTransferProtos.ClientOperationHeaderProto header = DataTransferProtoUtil.BuildClientHeader
                                                                (blk, clientName, blockToken);
     DataTransferProtos.ChecksumProto checksumProto = DataTransferProtoUtil.ToProto(requestedChecksum
                                                                                    );
     DataTransferProtos.OpWriteBlockProto.Builder proto = DataTransferProtos.OpWriteBlockProto
                                                          .NewBuilder().SetHeader(header).SetStorageType(PBHelper.ConvertStorageType(storageType
                                                                                                                                     )).AddAllTargets(PBHelper.Convert(targets, 1)).AddAllTargetStorageTypes(PBHelper
                                                                                                                                                                                                             .ConvertStorageTypes(targetStorageTypes, 1)).SetStage(DataTransferProtoUtil.ToProto
                                                                                                                                                                                                                                                                       (stage)).SetPipelineSize(pipelineSize).SetMinBytesRcvd(minBytesRcvd).SetMaxBytesRcvd
                                                              (maxBytesRcvd).SetLatestGenerationStamp(latestGenerationStamp).SetRequestedChecksum
                                                              (checksumProto).SetCachingStrategy(GetCachingStrategy(cachingStrategy)).SetAllowLazyPersist
                                                              (allowLazyPersist).SetPinning(pinning).AddAllTargetPinnings(PBHelper.Convert(targetPinnings
                                                                                                                                           , 1));
     if (source != null)
     {
         proto.SetSource(PBHelper.ConvertDatanodeInfo(source));
     }
     Send(@out, OP.WriteBlock, ((DataTransferProtos.OpWriteBlockProto)proto.Build()));
 }
Exemple #4
0
        /// <summary>Convert a DatanodeInfo to a Json map.</summary>
        internal static IDictionary <string, object> ToJsonMap(DatanodeInfo datanodeinfo)
        {
            if (datanodeinfo == null)
            {
                return(null);
            }
            // TODO: Fix storageID
            IDictionary <string, object> m = new SortedDictionary <string, object>();

            m["ipAddr"] = datanodeinfo.GetIpAddr();
            // 'name' is equivalent to ipAddr:xferPort. Older clients (1.x, 0.23.x)
            // expects this instead of the two fields.
            m["name"]                = datanodeinfo.GetXferAddr();
            m["hostName"]            = datanodeinfo.GetHostName();
            m["storageID"]           = datanodeinfo.GetDatanodeUuid();
            m["xferPort"]            = datanodeinfo.GetXferPort();
            m["infoPort"]            = datanodeinfo.GetInfoPort();
            m["infoSecurePort"]      = datanodeinfo.GetInfoSecurePort();
            m["ipcPort"]             = datanodeinfo.GetIpcPort();
            m["capacity"]            = datanodeinfo.GetCapacity();
            m["dfsUsed"]             = datanodeinfo.GetDfsUsed();
            m["remaining"]           = datanodeinfo.GetRemaining();
            m["blockPoolUsed"]       = datanodeinfo.GetBlockPoolUsed();
            m["cacheCapacity"]       = datanodeinfo.GetCacheCapacity();
            m["cacheUsed"]           = datanodeinfo.GetCacheUsed();
            m["lastUpdate"]          = datanodeinfo.GetLastUpdate();
            m["lastUpdateMonotonic"] = datanodeinfo.GetLastUpdateMonotonic();
            m["xceiverCount"]        = datanodeinfo.GetXceiverCount();
            m["networkLocation"]     = datanodeinfo.GetNetworkLocation();
            m["adminState"]          = datanodeinfo.GetAdminState().ToString();
            return(m);
        }
Exemple #5
0
        public virtual void TestConvertBlockRecoveryCommand()
        {
            DatanodeInfo di1 = DFSTestUtil.GetLocalDatanodeInfo();
            DatanodeInfo di2 = DFSTestUtil.GetLocalDatanodeInfo();

            DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
            IList <BlockRecoveryCommand.RecoveringBlock> blks = ImmutableList.Of(new BlockRecoveryCommand.RecoveringBlock
                                                                                     (GetExtendedBlock(1), dnInfo, 3), new BlockRecoveryCommand.RecoveringBlock(GetExtendedBlock
                                                                                                                                                                    (2), dnInfo, 3));
            BlockRecoveryCommand cmd = new BlockRecoveryCommand(blks);

            DatanodeProtocolProtos.BlockRecoveryCommandProto proto = PBHelper.Convert(cmd);
            NUnit.Framework.Assert.AreEqual(1, proto.GetBlocks(0).GetBlock().GetB().GetBlockId
                                                ());
            NUnit.Framework.Assert.AreEqual(2, proto.GetBlocks(1).GetBlock().GetB().GetBlockId
                                                ());
            BlockRecoveryCommand cmd2 = PBHelper.Convert(proto);
            IList <BlockRecoveryCommand.RecoveringBlock> cmd2Blks = Lists.NewArrayList(cmd2.GetRecoveringBlocks
                                                                                           ());

            NUnit.Framework.Assert.AreEqual(blks[0].GetBlock(), cmd2Blks[0].GetBlock());
            NUnit.Framework.Assert.AreEqual(blks[1].GetBlock(), cmd2Blks[1].GetBlock());
            NUnit.Framework.Assert.AreEqual(Joiner.On(",").Join(blks), Joiner.On(",").Join(cmd2Blks
                                                                                           ));
            NUnit.Framework.Assert.AreEqual(cmd.ToString(), cmd2.ToString());
        }
        public virtual void TestShortCircuitReadFromServerWithoutShm()
        {
            TemporarySocketDirectory sockDir    = new TemporarySocketDirectory();
            Configuration            clientConf = CreateShortCircuitConf("testShortCircuitReadFromServerWithoutShm"
                                                                         , sockDir);
            Configuration serverConf = new Configuration(clientConf);

            serverConf.SetInt(DFSConfigKeys.DfsShortCircuitSharedMemoryWatcherInterruptCheckMs
                              , 0);
            DFSInputStream.tcpReadsDisabledForTesting = true;
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).NumDataNodes(1).Build
                                         ();

            cluster.WaitActive();
            clientConf.Set(DFSConfigKeys.DfsClientContext, "testShortCircuitReadFromServerWithoutShm_clientContext"
                           );
            DistributedFileSystem fs = (DistributedFileSystem)FileSystem.Get(cluster.GetURI(0
                                                                                            ), clientConf);
            string TestFile    = "/test_file";
            int    TestFileLen = 4000;
            int    Seed        = unchecked ((int)(0xFADEC));

            DFSTestUtil.CreateFile(fs, new Path(TestFile), TestFileLen, (short)1, Seed);
            byte[] contents = DFSTestUtil.ReadFileBuffer(fs, new Path(TestFile));
            byte[] expected = DFSTestUtil.CalculateFileContentsFromSeed(Seed, TestFileLen);
            NUnit.Framework.Assert.IsTrue(Arrays.Equals(contents, expected));
            ShortCircuitCache cache    = fs.dfs.GetClientContext().GetShortCircuitCache();
            DatanodeInfo      datanode = new DatanodeInfo(cluster.GetDataNodes()[0].GetDatanodeId(
                                                              ));

            cache.GetDfsClientShmManager().Visit(new _Visitor_334(datanode));
            cluster.Shutdown();
            sockDir.Close();
        }
Exemple #7
0
        /*
         * Replace block
         */
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Net.Sockets.SocketException"/>
        private bool ReplaceBlock(ExtendedBlock block, DatanodeInfo source, DatanodeInfo
                                  sourceProxy, DatanodeInfo destination, StorageType targetStorageType)
        {
            Socket sock = new Socket();

            try
            {
                sock.Connect(NetUtils.CreateSocketAddr(destination.GetXferAddr()), HdfsServerConstants
                             .ReadTimeout);
                sock.SetKeepAlive(true);
                // sendRequest
                DataOutputStream @out = new DataOutputStream(sock.GetOutputStream());
                new Sender(@out).ReplaceBlock(block, targetStorageType, BlockTokenSecretManager.DummyToken
                                              , source.GetDatanodeUuid(), sourceProxy);
                @out.Flush();
                // receiveResponse
                DataInputStream reply = new DataInputStream(sock.GetInputStream());
                DataTransferProtos.BlockOpResponseProto proto = DataTransferProtos.BlockOpResponseProto
                                                                .ParseDelimitedFrom(reply);
                while (proto.GetStatus() == DataTransferProtos.Status.InProgress)
                {
                    proto = DataTransferProtos.BlockOpResponseProto.ParseDelimitedFrom(reply);
                }
                return(proto.GetStatus() == DataTransferProtos.Status.Success);
            }
            finally
            {
                sock.Close();
            }
        }
        /// <summary>
        /// Test to verify that InterDatanode RPC timesout as expected when
        /// the server DN does not respond.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestInterDNProtocolTimeout()
        {
            Org.Apache.Hadoop.Ipc.Server server = new TestInterDatanodeProtocol.TestServer(1,
                                                                                           true);
            server.Start();
            IPEndPoint            addr     = NetUtils.GetConnectAddress(server);
            DatanodeID            fakeDnId = DFSTestUtil.GetLocalDatanodeID(addr.Port);
            DatanodeInfo          dInfo    = new DatanodeInfo(fakeDnId);
            InterDatanodeProtocol proxy    = null;

            try
            {
                proxy = DataNode.CreateInterDataNodeProtocolProxy(dInfo, conf, 500, false);
                proxy.InitReplicaRecovery(new BlockRecoveryCommand.RecoveringBlock(new ExtendedBlock
                                                                                       ("bpid", 1), null, 100));
                NUnit.Framework.Assert.Fail("Expected SocketTimeoutException exception, but did not get."
                                            );
            }
            finally
            {
                if (proxy != null)
                {
                    RPC.StopProxy(proxy);
                }
                server.Stop();
            }
        }
Exemple #9
0
        public virtual void TestLocatedBlocks2Locations()
        {
            DatanodeInfo d = DFSTestUtil.GetLocalDatanodeInfo();

            DatanodeInfo[] ds = new DatanodeInfo[1];
            ds[0] = d;
            // ok
            ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
            LocatedBlock  l1 = new LocatedBlock(b1, ds, 0, false);
            // corrupt
            ExtendedBlock        b2  = new ExtendedBlock("bpid", 2, 1, 1);
            LocatedBlock         l2  = new LocatedBlock(b2, ds, 0, true);
            IList <LocatedBlock> ls  = Arrays.AsList(l1, l2);
            LocatedBlocks        lbs = new LocatedBlocks(10, false, ls, l2, true, null);

            BlockLocation[] bs = DFSUtil.LocatedBlocks2Locations(lbs);
            NUnit.Framework.Assert.IsTrue("expected 2 blocks but got " + bs.Length, bs.Length
                                          == 2);
            int corruptCount = 0;

            foreach (BlockLocation b in bs)
            {
                if (b.IsCorrupt())
                {
                    corruptCount++;
                }
            }
            NUnit.Framework.Assert.IsTrue("expected 1 corrupt files but got " + corruptCount,
                                          corruptCount == 1);
            // test an empty location
            bs = DFSUtil.LocatedBlocks2Locations(new LocatedBlocks());
            NUnit.Framework.Assert.AreEqual(0, bs.Length);
        }
 public _PrivilegedExceptionAction_107(DatanodeInfo node, Configuration conf, int
                                       socketTimeout, bool connectToDnViaHostname)
 {
     this.node                   = node;
     this.conf                   = conf;
     this.socketTimeout          = socketTimeout;
     this.connectToDnViaHostname = connectToDnViaHostname;
 }
Exemple #11
0
        public virtual void TestDefaultPolicy()
        {
            Configuration            conf = new HdfsConfiguration();
            ReplaceDatanodeOnFailure p    = ReplaceDatanodeOnFailure.Get(conf);

            DatanodeInfo[]   infos     = new DatanodeInfo[5];
            DatanodeInfo[][] datanodes = new DatanodeInfo[infos.Length + 1][];
            datanodes[0] = new DatanodeInfo[0];
            for (int i = 0; i < infos.Length;)
            {
                infos[i] = DFSTestUtil.GetLocalDatanodeInfo(50020 + i);
                i++;
                datanodes[i] = new DatanodeInfo[i];
                System.Array.Copy(infos, 0, datanodes[i], 0, datanodes[i].Length);
            }
            bool[] isAppend   = new bool[] { true, true, false, false };
            bool[] isHflushed = new bool[] { true, false, true, false };
            for (short replication = 1; replication <= infos.Length; replication++)
            {
                for (int nExistings = 0; nExistings < datanodes.Length; nExistings++)
                {
                    DatanodeInfo[] existings = datanodes[nExistings];
                    NUnit.Framework.Assert.AreEqual(nExistings, existings.Length);
                    for (int i_1 = 0; i_1 < isAppend.Length; i_1++)
                    {
                        for (int j = 0; j < isHflushed.Length; j++)
                        {
                            int  half            = replication / 2;
                            bool enoughReplica   = replication <= nExistings;
                            bool noReplica       = nExistings == 0;
                            bool replicationL3   = replication < 3;
                            bool existingsLEhalf = nExistings <= half;
                            bool isAH            = isAppend[i_1] || isHflushed[j];
                            bool expected;
                            if (enoughReplica || noReplica || replicationL3)
                            {
                                expected = false;
                            }
                            else
                            {
                                expected = isAH || existingsLEhalf;
                            }
                            bool computed = p.Satisfy(replication, existings, isAppend[i_1], isHflushed[j]);
                            try
                            {
                                NUnit.Framework.Assert.AreEqual(expected, computed);
                            }
                            catch (Exception e)
                            {
                                string s = "replication=" + replication + "\nnExistings =" + nExistings + "\nisAppend   ="
                                           + isAppend[i_1] + "\nisHflushed =" + isHflushed[j];
                                throw new RuntimeException(s, e);
                            }
                        }
                    }
                }
            }
        }
Exemple #12
0
 internal static DatanodeInfo[] ToDatanodeInfos(IList <Org.Apache.Hadoop.Hdfs.Server.Blockmanagement.DatanodeStorageInfo
                                                       > storages)
 {
     DatanodeInfo[] datanodes = new DatanodeInfo[storages.Count];
     for (int i = 0; i < storages.Count; i++)
     {
         datanodes[i] = storages[i].GetDatanodeDescriptor();
     }
     return(datanodes);
 }
Exemple #13
0
 /// <summary>Remove a storage from the invalidatesSet</summary>
 internal virtual void Remove(DatanodeInfo dn)
 {
     lock (this)
     {
         LightWeightHashSet <Block> blocks = Sharpen.Collections.Remove(node2blocks, dn);
         if (blocks != null)
         {
             numBlocks -= blocks.Count;
         }
     }
 }
Exemple #14
0
        public virtual void TestLocatedBlockConstructorWithNullCachedLocs()
        {
            DatanodeInfo d = DFSTestUtil.GetLocalDatanodeInfo();

            DatanodeInfo[] ds = new DatanodeInfo[1];
            ds[0] = d;
            ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
            LocatedBlock  l1 = new LocatedBlock(b1, ds, null, null, 0, false, null);

            DatanodeInfo[] cachedLocs = l1.GetCachedLocations();
            NUnit.Framework.Assert.IsTrue(cachedLocs.Length == 0);
        }
Exemple #15
0
 internal VolumeBlockLocationCallable(Configuration configuration, DatanodeInfo datanode
                                      , string poolId, long[] blockIds, IList <Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier
                                                                                                                       > > dnTokens, int timeout, bool connectToDnViaHostname, Span parentSpan)
 {
     this.configuration          = configuration;
     this.timeout                = timeout;
     this.datanode               = datanode;
     this.poolId                 = poolId;
     this.blockIds               = blockIds;
     this.dnTokens               = dnTokens;
     this.connectToDnViaHostname = connectToDnViaHostname;
     this.parentSpan             = parentSpan;
 }
        /// <exception cref="System.IO.IOException"/>
        private ICollection <BlockRecoveryCommand.RecoveringBlock> InitRecoveringBlocks()
        {
            ICollection <BlockRecoveryCommand.RecoveringBlock> blocks = new AList <BlockRecoveryCommand.RecoveringBlock
                                                                                   >(1);
            DatanodeInfo mockOtherDN = DFSTestUtil.GetLocalDatanodeInfo();

            DatanodeInfo[] locs = new DatanodeInfo[] { new DatanodeInfo(dn.GetDNRegistrationForBP
                                                                            (block.GetBlockPoolId())), mockOtherDN };
            BlockRecoveryCommand.RecoveringBlock rBlock = new BlockRecoveryCommand.RecoveringBlock
                                                              (block, locs, RecoveryId);
            blocks.AddItem(rBlock);
            return(blocks);
        }
 /// <exception cref="System.IO.IOException"/>
 public virtual BlocksWithLocations GetBlocks(DatanodeInfo datanode, long size)
 {
     NamenodeProtocolProtos.GetBlocksRequestProto req = ((NamenodeProtocolProtos.GetBlocksRequestProto
                                                          )NamenodeProtocolProtos.GetBlocksRequestProto.NewBuilder().SetDatanode(PBHelper.
                                                                                                                                 Convert((DatanodeID)datanode)).SetSize(size).Build());
     try
     {
         return(PBHelper.Convert(rpcProxy.GetBlocks(NullController, req).GetBlocks()));
     }
     catch (ServiceException e)
     {
         throw ProtobufHelper.GetRemoteException(e);
     }
 }
Exemple #18
0
        private LocatedBlock CreateLocatedBlockNoStorageMedia()
        {
            DatanodeInfo[] dnInfos = new DatanodeInfo[] { DFSTestUtil.GetLocalDatanodeInfo("127.0.0.1"
                                                                                           , "h1", DatanodeInfo.AdminStates.DecommissionInprogress), DFSTestUtil.GetLocalDatanodeInfo
                                                              ("127.0.0.1", "h2", DatanodeInfo.AdminStates.Decommissioned), DFSTestUtil.GetLocalDatanodeInfo
                                                              ("127.0.0.1", "h3", DatanodeInfo.AdminStates.Normal) };
            LocatedBlock lb = new LocatedBlock(new ExtendedBlock("bp12", 12345, 10, 53), dnInfos
                                               , 5, false);

            lb.SetBlockToken(new Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier>
                                 (Sharpen.Runtime.GetBytesForString("identifier"), Sharpen.Runtime.GetBytesForString
                                     ("password"), new Text("kind"), new Text("service")));
            return(lb);
        }
Exemple #19
0
 /// <summary>Ask each of the actors to report a bad block hosted on another DN.</summary>
 internal virtual void ReportRemoteBadBlock(DatanodeInfo dnInfo, ExtendedBlock block
                                            )
 {
     foreach (BPServiceActor actor in bpServices)
     {
         try
         {
             actor.ReportRemoteBadBlock(dnInfo, block);
         }
         catch (IOException e)
         {
             Log.Warn("Couldn't report bad block " + block + " to " + actor, e);
         }
     }
 }
Exemple #20
0
 /// <returns>
 /// true if the given storage has the given block listed for
 /// invalidation. Blocks are compared including their generation stamps:
 /// if a block is pending invalidation but with a different generation stamp,
 /// returns false.
 /// </returns>
 internal virtual bool Contains(DatanodeInfo dn, Block block)
 {
     lock (this)
     {
         LightWeightHashSet <Block> s = node2blocks[dn];
         if (s == null)
         {
             return(false);
         }
         // no invalidate blocks for this storage ID
         Block blockInSet = s.GetElement(block);
         return(blockInSet != null && block.GetGenerationStamp() == blockInSet.GetGenerationStamp
                    ());
     }
 }
Exemple #21
0
 /// <summary>Remove the block from the specified storage.</summary>
 internal virtual void Remove(DatanodeInfo dn, Block block)
 {
     lock (this)
     {
         LightWeightHashSet <Block> v = node2blocks[dn];
         if (v != null && v.Remove(block))
         {
             numBlocks--;
             if (v.IsEmpty())
             {
                 Sharpen.Collections.Remove(node2blocks, dn);
             }
         }
     }
 }
Exemple #22
0
        /// <summary>
        /// Create a list of
        /// <see cref="VolumeBlockLocationCallable"/>
        /// corresponding to a set
        /// of datanodes and blocks. The blocks must all correspond to the same
        /// block pool.
        /// </summary>
        /// <param name="datanodeBlocks">Map of datanodes to block replicas at each datanode</param>
        /// <returns>
        /// callables Used to query each datanode for location information on
        /// the block replicas at the datanode
        /// </returns>
        private static IList <BlockStorageLocationUtil.VolumeBlockLocationCallable> CreateVolumeBlockLocationCallables
            (Configuration conf, IDictionary <DatanodeInfo, IList <LocatedBlock> > datanodeBlocks
            , int timeout, bool connectToDnViaHostname, Span parent)
        {
            if (datanodeBlocks.IsEmpty())
            {
                return(Lists.NewArrayList());
            }
            // Construct the callables, one per datanode
            IList <BlockStorageLocationUtil.VolumeBlockLocationCallable> callables = new AList
                                                                                     <BlockStorageLocationUtil.VolumeBlockLocationCallable>();

            foreach (KeyValuePair <DatanodeInfo, IList <LocatedBlock> > entry in datanodeBlocks)
            {
                // Construct RPC parameters
                DatanodeInfo         datanode      = entry.Key;
                IList <LocatedBlock> locatedBlocks = entry.Value;
                if (locatedBlocks.IsEmpty())
                {
                    continue;
                }
                // Ensure that the blocks all are from the same block pool.
                string poolId = locatedBlocks[0].GetBlock().GetBlockPoolId();
                foreach (LocatedBlock lb in locatedBlocks)
                {
                    if (!poolId.Equals(lb.GetBlock().GetBlockPoolId()))
                    {
                        throw new ArgumentException("All blocks to be queried must be in the same block pool: "
                                                    + locatedBlocks[0].GetBlock() + " and " + lb + " are from different pools.");
                    }
                }
                long[] blockIds = new long[locatedBlocks.Count];
                int    i        = 0;
                IList <Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> > dnTokens = new
                                                                                                  AList <Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> >(locatedBlocks
                                                                                                                                                                         .Count);
                foreach (LocatedBlock b in locatedBlocks)
                {
                    blockIds[i++] = b.GetBlock().GetBlockId();
                    dnTokens.AddItem(b.GetBlockToken());
                }
                BlockStorageLocationUtil.VolumeBlockLocationCallable callable = new BlockStorageLocationUtil.VolumeBlockLocationCallable
                                                                                    (conf, datanode, poolId, blockIds, dnTokens, timeout, connectToDnViaHostname, parent
                                                                                    );
                callables.AddItem(callable);
            }
            return(callables);
        }
Exemple #23
0
        public virtual void TestToDatanodeInfoWithName()
        {
            IDictionary <string, object> response = new Dictionary <string, object>();
            // Older servers (1.x, 0.23, etc.) sends 'name' instead of ipAddr
            // and xferPort.
            string name = "127.0.0.1:1004";

            response["name"]            = name;
            response["hostName"]        = "localhost";
            response["storageID"]       = "fake-id";
            response["infoPort"]        = 1338l;
            response["ipcPort"]         = 1339l;
            response["capacity"]        = 1024l;
            response["dfsUsed"]         = 512l;
            response["remaining"]       = 512l;
            response["blockPoolUsed"]   = 512l;
            response["lastUpdate"]      = 0l;
            response["xceiverCount"]    = 4096l;
            response["networkLocation"] = "foo.bar.baz";
            response["adminState"]      = "NORMAL";
            response["cacheCapacity"]   = 123l;
            response["cacheUsed"]       = 321l;
            DatanodeInfo di = JsonUtil.ToDatanodeInfo(response);

            NUnit.Framework.Assert.AreEqual(name, di.GetXferAddr());
            // The encoded result should contain name, ipAddr and xferPort.
            IDictionary <string, object> r = JsonUtil.ToJsonMap(di);

            NUnit.Framework.Assert.AreEqual(name, r["name"]);
            NUnit.Framework.Assert.AreEqual("127.0.0.1", r["ipAddr"]);
            // In this test, it is Integer instead of Long since json was not actually
            // involved in constructing the map.
            NUnit.Framework.Assert.AreEqual(1004, (int)(int)r["xferPort"]);
            // Invalid names
            string[] badNames = new string[] { "127.0.0.1", "127.0.0.1:", ":", "127.0.0.1:sweet"
                                               , ":123" };
            foreach (string badName in badNames)
            {
                response["name"] = badName;
                CheckDecodeFailure(response);
            }
            // Missing both name and ipAddr
            Sharpen.Collections.Remove(response, "name");
            CheckDecodeFailure(response);
            // Only missing xferPort
            response["ipAddr"] = "127.0.0.1";
            CheckDecodeFailure(response);
        }
Exemple #24
0
        /// <summary>try to access a block on a data node.</summary>
        /// <remarks>try to access a block on a data node. If fails - throws exception</remarks>
        /// <param name="datanode"/>
        /// <param name="lblock"/>
        /// <exception cref="System.IO.IOException"/>
        private void AccessBlock(DatanodeInfo datanode, LocatedBlock lblock)
        {
            IPEndPoint    targetAddr = null;
            ExtendedBlock block      = lblock.GetBlock();

            targetAddr = NetUtils.CreateSocketAddr(datanode.GetXferAddr());
            BlockReader blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).SetInetSocketAddress
                                          (targetAddr).SetBlock(block).SetFileName(BlockReaderFactory.GetFileName(targetAddr
                                                                                                                  , "test-blockpoolid", block.GetBlockId())).SetBlockToken(lblock.GetBlockToken())
                                      .SetStartOffset(0).SetLength(-1).SetVerifyChecksum(true).SetClientName("TestDataNodeVolumeFailure"
                                                                                                             ).SetDatanodeInfo(datanode).SetCachingStrategy(CachingStrategy.NewDefaultStrategy
                                                                                                                                                                ()).SetClientCacheContext(ClientContext.GetFromConf(conf)).SetConfiguration(conf
                                                                                                                                                                                                                                            ).SetRemotePeerFactory(new _RemotePeerFactory_422(this)).Build();

            blockReader.Close();
        }
        public virtual void Setup()
        {
            conf = new HdfsConfiguration();
            SimulatedFSDataset.SetFactory(conf);
            Configuration[] overlays = new Configuration[NumDatanodes];
            for (int i = 0; i < overlays.Length; i++)
            {
                overlays[i] = new Configuration();
                if (i == RoNodeIndex)
                {
                    overlays[i].SetEnum(SimulatedFSDataset.ConfigPropertyState, i == RoNodeIndex ? DatanodeStorage.State
                                        .ReadOnlyShared : DatanodeStorage.State.Normal);
                }
            }
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDatanodes).DataNodeConfOverlays
                          (overlays).Build();
            fs              = cluster.GetFileSystem();
            blockManager    = cluster.GetNameNode().GetNamesystem().GetBlockManager();
            datanodeManager = blockManager.GetDatanodeManager();
            client          = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()), cluster
                                            .GetConfiguration(0));
            for (int i_1 = 0; i_1 < NumDatanodes; i_1++)
            {
                DataNode dataNode = cluster.GetDataNodes()[i_1];
                ValidateStorageState(BlockManagerTestUtil.GetStorageReportsForDatanode(datanodeManager
                                                                                       .GetDatanode(dataNode.GetDatanodeId())), i_1 == RoNodeIndex ? DatanodeStorage.State
                                     .ReadOnlyShared : DatanodeStorage.State.Normal);
            }
            // Create a 1 block file
            DFSTestUtil.CreateFile(fs, Path, BlockSize, BlockSize, BlockSize, (short)1, seed);
            LocatedBlock locatedBlock = GetLocatedBlock();

            extendedBlock = locatedBlock.GetBlock();
            block         = extendedBlock.GetLocalBlock();
            Assert.AssertThat(locatedBlock.GetLocations().Length, CoreMatchers.Is(1));
            normalDataNode   = locatedBlock.GetLocations()[0];
            readOnlyDataNode = datanodeManager.GetDatanode(cluster.GetDataNodes()[RoNodeIndex
                                                           ].GetDatanodeId());
            Assert.AssertThat(normalDataNode, CoreMatchers.Is(CoreMatchers.Not(readOnlyDataNode
                                                                               )));
            ValidateNumberReplicas(1);
            // Inject the block into the datanode with READ_ONLY_SHARED storage
            cluster.InjectBlocks(0, RoNodeIndex, Collections.Singleton(block));
            // There should now be 2 *locations* for the block
            // Must wait until the NameNode has processed the block report for the injected blocks
            WaitForLocations(2);
        }
Exemple #26
0
        /// <exception cref="Com.Google.Protobuf.ServiceException"/>
        public virtual NamenodeProtocolProtos.GetBlocksResponseProto GetBlocks(RpcController
                                                                               unused, NamenodeProtocolProtos.GetBlocksRequestProto request)
        {
            DatanodeInfo        dnInfo = new DatanodeInfo(PBHelper.Convert(request.GetDatanode()));
            BlocksWithLocations blocks;

            try
            {
                blocks = impl.GetBlocks(dnInfo, request.GetSize());
            }
            catch (IOException e)
            {
                throw new ServiceException(e);
            }
            return((NamenodeProtocolProtos.GetBlocksResponseProto)NamenodeProtocolProtos.GetBlocksResponseProto
                   .NewBuilder().SetBlocks(PBHelper.Convert(blocks)).Build());
        }
Exemple #27
0
        /// <exception cref="System.IO.IOException"/>
        private void GetBlocksWithException(NamenodeProtocol namenode, DatanodeInfo datanode
                                            , long size)
        {
            bool getException = false;

            try
            {
                namenode.GetBlocks(DFSTestUtil.GetLocalDatanodeInfo(), 2);
            }
            catch (RemoteException e)
            {
                getException = true;
                NUnit.Framework.Assert.IsTrue(e.GetClassName().Contains("HadoopIllegalArgumentException"
                                                                        ));
            }
            NUnit.Framework.Assert.IsTrue(getException);
        }
Exemple #28
0
        public virtual void TestConvertRecoveringBlock()
        {
            DatanodeInfo di1 = DFSTestUtil.GetLocalDatanodeInfo();
            DatanodeInfo di2 = DFSTestUtil.GetLocalDatanodeInfo();

            DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
            BlockRecoveryCommand.RecoveringBlock b = new BlockRecoveryCommand.RecoveringBlock
                                                         (GetExtendedBlock(), dnInfo, 3);
            HdfsProtos.RecoveringBlockProto      bProto = PBHelper.Convert(b);
            BlockRecoveryCommand.RecoveringBlock b1     = PBHelper.Convert(bProto);
            NUnit.Framework.Assert.AreEqual(b.GetBlock(), b1.GetBlock());
            DatanodeInfo[] dnInfo1 = b1.GetLocations();
            NUnit.Framework.Assert.AreEqual(dnInfo.Length, dnInfo1.Length);
            for (int i = 0; i < dnInfo.Length; i++)
            {
                Compare(dnInfo[0], dnInfo1[0]);
            }
        }
Exemple #29
0
        private LocatedBlock CreateLocatedBlock()
        {
            DatanodeInfo[] dnInfos = new DatanodeInfo[] { DFSTestUtil.GetLocalDatanodeInfo("127.0.0.1"
                                                                                           , "h1", DatanodeInfo.AdminStates.DecommissionInprogress), DFSTestUtil.GetLocalDatanodeInfo
                                                              ("127.0.0.1", "h2", DatanodeInfo.AdminStates.Decommissioned), DFSTestUtil.GetLocalDatanodeInfo
                                                              ("127.0.0.1", "h3", DatanodeInfo.AdminStates.Normal), DFSTestUtil.GetLocalDatanodeInfo
                                                              ("127.0.0.1", "h4", DatanodeInfo.AdminStates.Normal) };
            string[]      storageIDs = new string[] { "s1", "s2", "s3", "s4" };
            StorageType[] media      = new StorageType[] { StorageType.Disk, StorageType.Ssd, StorageType
                                                           .Disk, StorageType.RamDisk };
            LocatedBlock lb = new LocatedBlock(new ExtendedBlock("bp12", 12345, 10, 53), dnInfos
                                               , storageIDs, media, 5, false, new DatanodeInfo[] {  });

            lb.SetBlockToken(new Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier>
                                 (Sharpen.Runtime.GetBytesForString("identifier"), Sharpen.Runtime.GetBytesForString
                                     ("password"), new Text("kind"), new Text("service")));
            return(lb);
        }
Exemple #30
0
        public virtual void TestBlockMoveAcrossStorageInSameNode()
        {
            Configuration conf = new HdfsConfiguration();
            // create only one datanode in the cluster to verify movement within
            // datanode.
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).StorageTypes
                                         (new StorageType[] { StorageType.Disk, StorageType.Archive }).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                Path file = new Path("/testBlockMoveAcrossStorageInSameNode/file");
                DFSTestUtil.CreateFile(dfs, file, 1024, (short)1, 1024);
                LocatedBlocks locatedBlocks = dfs.GetClient().GetLocatedBlocks(file.ToString(), 0
                                                                               );
                // get the current
                LocatedBlock   locatedBlock = locatedBlocks.Get(0);
                ExtendedBlock  block        = locatedBlock.GetBlock();
                DatanodeInfo[] locations    = locatedBlock.GetLocations();
                NUnit.Framework.Assert.AreEqual(1, locations.Length);
                StorageType[] storageTypes = locatedBlock.GetStorageTypes();
                // current block should be written to DISK
                NUnit.Framework.Assert.IsTrue(storageTypes[0] == StorageType.Disk);
                DatanodeInfo source = locations[0];
                // move block to ARCHIVE by using same DataNodeInfo for source, proxy and
                // destination so that movement happens within datanode
                NUnit.Framework.Assert.IsTrue(ReplaceBlock(block, source, source, source, StorageType
                                                           .Archive));
                // wait till namenode notified
                Sharpen.Thread.Sleep(3000);
                locatedBlocks = dfs.GetClient().GetLocatedBlocks(file.ToString(), 0);
                // get the current
                locatedBlock = locatedBlocks.Get(0);
                NUnit.Framework.Assert.AreEqual("Storage should be only one", 1, locatedBlock.GetLocations
                                                    ().Length);
                NUnit.Framework.Assert.IsTrue("Block should be moved to ARCHIVE", locatedBlock.GetStorageTypes
                                                  ()[0] == StorageType.Archive);
            }
            finally
            {
                cluster.Shutdown();
            }
        }