コード例 #1
0
        public virtual void TestReduceReplFactorRespectsRackPolicy()
        {
            Configuration conf = GetConf();
            short         ReplicationFactor = 3;
            Path          filePath          = new Path("/testFile");

            string[]       racks   = new string[] { "/rack1", "/rack1", "/rack2", "/rack2" };
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(racks.Length
                                                                                   ).Racks(racks).Build();
            FSNamesystem ns = cluster.GetNameNode().GetNamesystem();

            try
            {
                // Create a file with one block
                FileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, filePath, 1L, ReplicationFactor, 1L);
                ExtendedBlock b = DFSTestUtil.GetFirstBlock(fs, filePath);
                DFSTestUtil.WaitForReplication(cluster, b, 2, ReplicationFactor, 0);
                // Decrease the replication factor, make sure the deleted replica
                // was not the one that lived on the rack with only one replica,
                // ie we should still have 2 racks after reducing the repl factor.
                ReplicationFactor = 2;
                NameNodeAdapter.SetReplication(ns, "/testFile", ReplicationFactor);
                DFSTestUtil.WaitForReplication(cluster, b, 2, ReplicationFactor, 0);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
コード例 #2
0
        public virtual void TestSufficientlySingleReplBlockUsesNewRack()
        {
            Configuration conf = GetConf();
            short         ReplicationFactor = 1;
            Path          filePath          = new Path("/testFile");

            string[]       racks   = new string[] { "/rack1", "/rack1", "/rack1", "/rack2" };
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(racks.Length
                                                                                   ).Racks(racks).Build();
            FSNamesystem ns = cluster.GetNameNode().GetNamesystem();

            try
            {
                // Create a file with one block with a replication factor of 1
                FileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, filePath, 1L, ReplicationFactor, 1L);
                ExtendedBlock b = DFSTestUtil.GetFirstBlock(fs, filePath);
                DFSTestUtil.WaitForReplication(cluster, b, 1, ReplicationFactor, 0);
                ReplicationFactor = 2;
                NameNodeAdapter.SetReplication(ns, "/testFile", ReplicationFactor);
                DFSTestUtil.WaitForReplication(cluster, b, 2, ReplicationFactor, 0);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
コード例 #3
0
        public virtual void TestUnderReplicatedUsesNewRacks()
        {
            Configuration conf = GetConf();
            short         ReplicationFactor = 3;
            Path          filePath          = new Path("/testFile");

            // All datanodes are on the same rack
            string[]       racks   = new string[] { "/rack1", "/rack1", "/rack1", "/rack1", "/rack1" };
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(racks.Length
                                                                                   ).Racks(racks).Build();
            FSNamesystem ns = cluster.GetNameNode().GetNamesystem();

            try
            {
                // Create a file with one block
                FileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, filePath, 1L, ReplicationFactor, 1L);
                ExtendedBlock b = DFSTestUtil.GetFirstBlock(fs, filePath);
                DFSTestUtil.WaitForReplication(cluster, b, 1, ReplicationFactor, 0);
                // Add new datanodes on a different rack and increase the
                // replication factor so the block is underreplicated and make
                // sure at least one of the hosts on the new rack is used.
                string[] newRacks = new string[] { "/rack2", "/rack2" };
                cluster.StartDataNodes(conf, 2, true, null, newRacks);
                ReplicationFactor = 5;
                NameNodeAdapter.SetReplication(ns, "/testFile", ReplicationFactor);
                DFSTestUtil.WaitForReplication(cluster, b, 2, ReplicationFactor, 0);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
コード例 #4
0
        public virtual void TestExcessBlocks()
        {
            Path file = GetTestPath("testExcessBlocks");

            CreateFile(file, 100, (short)2);
            NameNodeAdapter.SetReplication(namesystem, file.ToString(), (short)1);
            UpdateMetrics();
            MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(NsMetrics);

            MetricsAsserts.AssertGauge("ExcessBlocks", 1L, rb);
            // verify ExcessBlocks metric is decremented and
            // excessReplicateMap is cleared after deleting a file
            fs.Delete(file, true);
            rb = MetricsAsserts.GetMetrics(NsMetrics);
            MetricsAsserts.AssertGauge("ExcessBlocks", 0L, rb);
            NUnit.Framework.Assert.IsTrue(bm.excessReplicateMap.IsEmpty());
        }
コード例 #5
0
        public virtual void TestProcesOverReplicateBlock()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, 100L);
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 1000L);
            conf.Set(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, Sharpen.Extensions.ToString
                         (2));
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            try
            {
                Path fileName = new Path("/foo1");
                DFSTestUtil.CreateFile(fs, fileName, 2, (short)3, 0L);
                DFSTestUtil.WaitReplication(fs, fileName, (short)3);
                // corrupt the block on datanode 0
                ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, fileName);
                NUnit.Framework.Assert.IsTrue(cluster.CorruptReplica(0, block));
                MiniDFSCluster.DataNodeProperties dnProps = cluster.StopDataNode(0);
                // remove block scanner log to trigger block scanning
                FilePath scanCursor = new FilePath(new FilePath(MiniDFSCluster.GetFinalizedDir(cluster
                                                                                               .GetInstanceStorageDir(0, 0), cluster.GetNamesystem().GetBlockPoolId()).GetParent
                                                                    ()).GetParent(), "scanner.cursor");
                //wait for one minute for deletion to succeed;
                for (int i = 0; !scanCursor.Delete(); i++)
                {
                    NUnit.Framework.Assert.IsTrue("Could not delete " + scanCursor.GetAbsolutePath()
                                                  + " in one minute", i < 60);
                    try
                    {
                        Sharpen.Thread.Sleep(1000);
                    }
                    catch (Exception)
                    {
                    }
                }
                // restart the datanode so the corrupt replica will be detected
                cluster.RestartDataNode(dnProps);
                DFSTestUtil.WaitReplication(fs, fileName, (short)2);
                string     blockPoolId     = cluster.GetNamesystem().GetBlockPoolId();
                DatanodeID corruptDataNode = DataNodeTestUtils.GetDNRegistrationForBP(cluster.GetDataNodes
                                                                                          ()[2], blockPoolId);
                FSNamesystem     namesystem = cluster.GetNamesystem();
                BlockManager     bm         = namesystem.GetBlockManager();
                HeartbeatManager hm         = bm.GetDatanodeManager().GetHeartbeatManager();
                try
                {
                    namesystem.WriteLock();
                    lock (hm)
                    {
                        // set live datanode's remaining space to be 0
                        // so they will be chosen to be deleted when over-replication occurs
                        string corruptMachineName = corruptDataNode.GetXferAddr();
                        foreach (DatanodeDescriptor datanode in hm.GetDatanodes())
                        {
                            if (!corruptMachineName.Equals(datanode.GetXferAddr()))
                            {
                                datanode.GetStorageInfos()[0].SetUtilizationForTesting(100L, 100L, 0, 100L);
                                datanode.UpdateHeartbeat(BlockManagerTestUtil.GetStorageReportsForDatanode(datanode
                                                                                                           ), 0L, 0L, 0, 0, null);
                            }
                        }
                        // decrease the replication factor to 1;
                        NameNodeAdapter.SetReplication(namesystem, fileName.ToString(), (short)1);
                        // corrupt one won't be chosen to be excess one
                        // without 4910 the number of live replicas would be 0: block gets lost
                        NUnit.Framework.Assert.AreEqual(1, bm.CountNodes(block.GetLocalBlock()).LiveReplicas
                                                            ());
                    }
                }
                finally
                {
                    namesystem.WriteUnlock();
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }