Beispiel #1
0
 public virtual void TestChooseTargetWithDecomNodes()
 {
     namenode.GetNamesystem().WriteLock();
     try
     {
         string blockPoolId = namenode.GetNamesystem().GetBlockPoolId();
         dnManager.HandleHeartbeat(dnrList[3], BlockManagerTestUtil.GetStorageReportsForDatanode
                                       (dataNodes[3]), blockPoolId, dataNodes[3].GetCacheCapacity(), dataNodes[3].GetCacheRemaining
                                       (), 2, 0, 0, null);
         dnManager.HandleHeartbeat(dnrList[4], BlockManagerTestUtil.GetStorageReportsForDatanode
                                       (dataNodes[4]), blockPoolId, dataNodes[4].GetCacheCapacity(), dataNodes[4].GetCacheRemaining
                                       (), 4, 0, 0, null);
         dnManager.HandleHeartbeat(dnrList[5], BlockManagerTestUtil.GetStorageReportsForDatanode
                                       (dataNodes[5]), blockPoolId, dataNodes[5].GetCacheCapacity(), dataNodes[5].GetCacheRemaining
                                       (), 4, 0, 0, null);
         // value in the above heartbeats
         int          load = 2 + 4 + 4;
         FSNamesystem fsn  = namenode.GetNamesystem();
         NUnit.Framework.Assert.AreEqual((double)load / 6, dnManager.GetFSClusterStats().GetInServiceXceiverAverage
                                             (), Epsilon);
         // Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
         // returns false
         for (int i = 0; i < 3; i++)
         {
             DatanodeDescriptor d = dnManager.GetDatanode(dnrList[i]);
             dnManager.GetDecomManager().StartDecommission(d);
             d.SetDecommissioned();
         }
         NUnit.Framework.Assert.AreEqual((double)load / 3, dnManager.GetFSClusterStats().GetInServiceXceiverAverage
                                             (), Epsilon);
         // update references of writer DN to update the de-commissioned state
         IList <DatanodeDescriptor> liveNodes = new AList <DatanodeDescriptor>();
         dnManager.FetchDatanodes(liveNodes, null, false);
         DatanodeDescriptor writerDn = null;
         if (liveNodes.Contains(dataNodes[0]))
         {
             writerDn = liveNodes[liveNodes.IndexOf(dataNodes[0])];
         }
         // Call chooseTarget()
         DatanodeStorageInfo[] targets = namenode.GetNamesystem().GetBlockManager().GetBlockPlacementPolicy
                                             ().ChooseTarget("testFile.txt", 3, writerDn, new AList <DatanodeStorageInfo>(), false
                                                             , null, 1024, TestBlockStoragePolicy.DefaultStoragePolicy);
         NUnit.Framework.Assert.AreEqual(3, targets.Length);
         ICollection <DatanodeStorageInfo> targetSet = new HashSet <DatanodeStorageInfo>(Arrays
                                                                                         .AsList(targets));
         for (int i_1 = 3; i_1 < storages.Length; i_1++)
         {
             NUnit.Framework.Assert.IsTrue(targetSet.Contains(storages[i_1]));
         }
     }
     finally
     {
         dataNodes[0].StopDecommission();
         dataNodes[1].StopDecommission();
         dataNodes[2].StopDecommission();
         namenode.GetNamesystem().WriteUnlock();
     }
 }
 private static void UpdateHeartbeatWithUsage(DatanodeDescriptor dn, long capacity
                                              , long dfsUsed, long remaining, long blockPoolUsed, long dnCacheCapacity, long dnCacheUsed
                                              , int xceiverCount, int volFailures)
 {
     dn.GetStorageInfos()[0].SetUtilizationForTesting(capacity, dfsUsed, remaining, blockPoolUsed
                                                      );
     dn.UpdateHeartbeat(BlockManagerTestUtil.GetStorageReportsForDatanode(dn), dnCacheCapacity
                        , dnCacheUsed, xceiverCount, volFailures, null);
 }
        private void AddNodes(IEnumerable <DatanodeDescriptor> nodesToAdd)
        {
            NetworkTopology cluster = bm.GetDatanodeManager().GetNetworkTopology();

            // construct network topology
            foreach (DatanodeDescriptor dn in nodesToAdd)
            {
                cluster.Add(dn);
                dn.GetStorageInfos()[0].SetUtilizationForTesting(2 * HdfsConstants.MinBlocksForWrite
                                                                 * BlockSize, 0L, 2 * HdfsConstants.MinBlocksForWrite * BlockSize, 0L);
                dn.UpdateHeartbeat(BlockManagerTestUtil.GetStorageReportsForDatanode(dn), 0L, 0L,
                                   0, 0, null);
                bm.GetDatanodeManager().CheckIfClusterIsNowMultiRack(dn);
            }
        }
Beispiel #4
0
        public static void SetupCluster()
        {
            Configuration conf = new HdfsConfiguration();

            string[] racks = new string[] { "/rack1", "/rack1", "/rack1", "/rack2", "/rack2",
                                            "/rack2" };
            storages  = DFSTestUtil.CreateDatanodeStorageInfos(racks);
            dataNodes = DFSTestUtil.ToDatanodeDescriptor(storages);
            FileSystem.SetDefaultUri(conf, "hdfs://localhost:0");
            conf.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, "0.0.0.0:0");
            FilePath baseDir = PathUtils.GetTestDir(typeof(TestReplicationPolicy));

            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, new FilePath(baseDir, "name").GetPath
                         ());
            conf.SetBoolean(DFSConfigKeys.DfsNamenodeAvoidStaleDatanodeForReadKey, true);
            conf.SetBoolean(DFSConfigKeys.DfsNamenodeAvoidStaleDatanodeForWriteKey, true);
            conf.SetBoolean(DFSConfigKeys.DfsNamenodeReplicationConsiderloadKey, true);
            DFSTestUtil.FormatNameNode(conf);
            namenode = new NameNode(conf);
            int blockSize = 1024;

            dnrList   = new AList <DatanodeRegistration>();
            dnManager = namenode.GetNamesystem().GetBlockManager().GetDatanodeManager();
            // Register DNs
            for (int i = 0; i < 6; i++)
            {
                DatanodeRegistration dnr = new DatanodeRegistration(dataNodes[i], new StorageInfo
                                                                        (HdfsServerConstants.NodeType.DataNode), new ExportedBlockKeys(), VersionInfo.GetVersion
                                                                        ());
                dnrList.AddItem(dnr);
                dnManager.RegisterDatanode(dnr);
                dataNodes[i].GetStorageInfos()[0].SetUtilizationForTesting(2 * HdfsConstants.MinBlocksForWrite
                                                                           * blockSize, 0L, 2 * HdfsConstants.MinBlocksForWrite * blockSize, 0L);
                dataNodes[i].UpdateHeartbeat(BlockManagerTestUtil.GetStorageReportsForDatanode(dataNodes
                                                                                               [i]), 0L, 0L, 0, 0, null);
            }
        }
        public virtual void TestProcesOverReplicateBlock()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, 100L);
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 1000L);
            conf.Set(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, Sharpen.Extensions.ToString
                         (2));
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            try
            {
                Path fileName = new Path("/foo1");
                DFSTestUtil.CreateFile(fs, fileName, 2, (short)3, 0L);
                DFSTestUtil.WaitReplication(fs, fileName, (short)3);
                // corrupt the block on datanode 0
                ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, fileName);
                NUnit.Framework.Assert.IsTrue(cluster.CorruptReplica(0, block));
                MiniDFSCluster.DataNodeProperties dnProps = cluster.StopDataNode(0);
                // remove block scanner log to trigger block scanning
                FilePath scanCursor = new FilePath(new FilePath(MiniDFSCluster.GetFinalizedDir(cluster
                                                                                               .GetInstanceStorageDir(0, 0), cluster.GetNamesystem().GetBlockPoolId()).GetParent
                                                                    ()).GetParent(), "scanner.cursor");
                //wait for one minute for deletion to succeed;
                for (int i = 0; !scanCursor.Delete(); i++)
                {
                    NUnit.Framework.Assert.IsTrue("Could not delete " + scanCursor.GetAbsolutePath()
                                                  + " in one minute", i < 60);
                    try
                    {
                        Sharpen.Thread.Sleep(1000);
                    }
                    catch (Exception)
                    {
                    }
                }
                // restart the datanode so the corrupt replica will be detected
                cluster.RestartDataNode(dnProps);
                DFSTestUtil.WaitReplication(fs, fileName, (short)2);
                string     blockPoolId     = cluster.GetNamesystem().GetBlockPoolId();
                DatanodeID corruptDataNode = DataNodeTestUtils.GetDNRegistrationForBP(cluster.GetDataNodes
                                                                                          ()[2], blockPoolId);
                FSNamesystem     namesystem = cluster.GetNamesystem();
                BlockManager     bm         = namesystem.GetBlockManager();
                HeartbeatManager hm         = bm.GetDatanodeManager().GetHeartbeatManager();
                try
                {
                    namesystem.WriteLock();
                    lock (hm)
                    {
                        // set live datanode's remaining space to be 0
                        // so they will be chosen to be deleted when over-replication occurs
                        string corruptMachineName = corruptDataNode.GetXferAddr();
                        foreach (DatanodeDescriptor datanode in hm.GetDatanodes())
                        {
                            if (!corruptMachineName.Equals(datanode.GetXferAddr()))
                            {
                                datanode.GetStorageInfos()[0].SetUtilizationForTesting(100L, 100L, 0, 100L);
                                datanode.UpdateHeartbeat(BlockManagerTestUtil.GetStorageReportsForDatanode(datanode
                                                                                                           ), 0L, 0L, 0, 0, null);
                            }
                        }
                        // decrease the replication factor to 1;
                        NameNodeAdapter.SetReplication(namesystem, fileName.ToString(), (short)1);
                        // corrupt one won't be chosen to be excess one
                        // without 4910 the number of live replicas would be 0: block gets lost
                        NUnit.Framework.Assert.AreEqual(1, bm.CountNodes(block.GetLocalBlock()).LiveReplicas
                                                            ());
                    }
                }
                finally
                {
                    namesystem.WriteUnlock();
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }