/// <summary>
        /// Create a 4 nodes cluster: 2 nodes (n0, n1) in RACK0/NODEGROUP0, 1 node (n2)
        /// in RACK1/NODEGROUP1 and 1 node (n3) in RACK1/NODEGROUP2.
        /// </summary>
        /// <remarks>
        /// Create a 4 nodes cluster: 2 nodes (n0, n1) in RACK0/NODEGROUP0, 1 node (n2)
        /// in RACK1/NODEGROUP1 and 1 node (n3) in RACK1/NODEGROUP2. Fill the cluster
        /// to 60% and 3 replicas, so n2 and n3 will have replica for all blocks according
        /// to replica placement policy with NodeGroup. As a result, n2 and n3 will be
        /// filled with 80% (60% x 4 / 3), and no blocks can be migrated from n2 and n3
        /// to n0 or n1 as balancer policy with node group. Thus, we expect the balancer
        /// to end in 5 iterations without move block process.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestBalancerEndInNoMoveProgress()
        {
            Configuration conf = CreateConf();

            long[]   capacities     = new long[] { Capacity, Capacity, Capacity, Capacity };
            string[] racks          = new string[] { Rack0, Rack0, Rack1, Rack1 };
            string[] nodeGroups     = new string[] { Nodegroup0, Nodegroup0, Nodegroup1, Nodegroup2 };
            int      numOfDatanodes = capacities.Length;

            NUnit.Framework.Assert.AreEqual(numOfDatanodes, racks.Length);
            NUnit.Framework.Assert.AreEqual(numOfDatanodes, nodeGroups.Length);
            MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf).NumDataNodes(capacities
                                                                                           .Length).Racks(racks).SimulatedCapacities(capacities);
            MiniDFSClusterWithNodeGroup.SetNodeGroups(nodeGroups);
            cluster = new MiniDFSClusterWithNodeGroup(builder);
            try
            {
                cluster.WaitActive();
                client = NameNodeProxies.CreateProxy <ClientProtocol>(conf, cluster.GetFileSystem(
                                                                          0).GetUri()).GetProxy();
                long totalCapacity = TestBalancer.Sum(capacities);
                // fill up the cluster to be 60% full
                long totalUsedSpace = totalCapacity * 6 / 10;
                TestBalancer.CreateFile(cluster, filePath, totalUsedSpace / 3, (short)(3), 0);
                // run balancer which can finish in 5 iterations with no block movement.
                RunBalancerCanFinish(conf, totalUsedSpace, totalCapacity);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
 static TestBalancerWithNodeGroup()
 {
     //msec
     // 0.5%
     // 10%+delta
     TestBalancer.InitTestSetup();
 }
Exemplo n.º 3
0
        private static Configuration CreateConf()
        {
            Configuration conf = new HdfsConfiguration();

            TestBalancer.InitConf(conf);
            return(conf);
        }
Exemplo n.º 4
0
        /// <summary>
        /// Test a cluster with even distribution, then a new empty node is added to
        /// the cluster.
        /// </summary>
        /// <remarks>
        /// Test a cluster with even distribution, then a new empty node is added to
        /// the cluster. Test start a cluster with specified number of nodes, and fills
        /// it to be 30% full (with a single file replicated identically to all
        /// datanodes); It then adds one new empty node and starts balancing.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestBalancerWithHANameNodes()
        {
            Configuration conf = new HdfsConfiguration();

            TestBalancer.InitConf(conf);
            long newNodeCapacity = TestBalancer.Capacity;
            // new node's capacity
            string newNodeRack = TestBalancer.Rack2;

            // new node's rack
            // array of racks for original nodes in cluster
            string[] racks = new string[] { TestBalancer.Rack0, TestBalancer.Rack1 };
            // array of capacities of original nodes in cluster
            long[] capacities = new long[] { TestBalancer.Capacity, TestBalancer.Capacity };
            NUnit.Framework.Assert.AreEqual(capacities.Length, racks.Length);
            int numOfDatanodes = capacities.Length;

            MiniDFSNNTopology.NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1");
            nn1Conf.SetIpcPort(NameNode.DefaultPort);
            Configuration copiedConf = new Configuration(conf);

            cluster = new MiniDFSCluster.Builder(copiedConf).NnTopology(MiniDFSNNTopology.SimpleHATopology
                                                                            ()).NumDataNodes(capacities.Length).Racks(racks).SimulatedCapacities(capacities)
                      .Build();
            HATestUtil.SetFailoverConfigurations(cluster, conf);
            try
            {
                cluster.WaitActive();
                cluster.TransitionToActive(1);
                Sharpen.Thread.Sleep(500);
                client = NameNodeProxies.CreateProxy <ClientProtocol>(conf, FileSystem.GetDefaultUri
                                                                          (conf)).GetProxy();
                long totalCapacity = TestBalancer.Sum(capacities);
                // fill up the cluster to be 30% full
                long totalUsedSpace = totalCapacity * 3 / 10;
                TestBalancer.CreateFile(cluster, TestBalancer.filePath, totalUsedSpace / numOfDatanodes
                                        , (short)numOfDatanodes, 1);
                // start up an empty node with the same capacity and on the same rack
                cluster.StartDataNodes(conf, 1, true, null, new string[] { newNodeRack }, new long
                                       [] { newNodeCapacity });
                totalCapacity += newNodeCapacity;
                TestBalancer.WaitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
                ICollection <URI> namenodes = DFSUtil.GetNsServiceRpcUris(conf);
                NUnit.Framework.Assert.AreEqual(1, namenodes.Count);
                NUnit.Framework.Assert.IsTrue(namenodes.Contains(HATestUtil.GetLogicalUri(cluster
                                                                                          )));
                int r = Org.Apache.Hadoop.Hdfs.Server.Balancer.Balancer.Run(namenodes, Balancer.Parameters
                                                                            .Default, conf);
                NUnit.Framework.Assert.AreEqual(ExitStatus.Success.GetExitCode(), r);
                TestBalancer.WaitForBalancer(totalUsedSpace, totalCapacity, client, cluster, Balancer.Parameters
                                             .Default);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        internal static Configuration CreateConf()
        {
            Configuration conf = new HdfsConfiguration();

            TestBalancer.InitConf(conf);
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockSize);
            conf.Set(CommonConfigurationKeysPublic.NetTopologyImplKey, typeof(NetworkTopologyWithNodeGroup
                                                                              ).FullName);
            conf.Set(DFSConfigKeys.DfsBlockReplicatorClassnameKey, typeof(BlockPlacementPolicyWithNodeGroup
                                                                          ).FullName);
            return(conf);
        }
Exemplo n.º 6
0
 static TestStorageMover()
 {
     DefaultConf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
     DefaultConf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1L);
     DefaultConf.SetLong(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, 2L);
     DefaultConf.SetLong(DFSConfigKeys.DfsMoverMovedwinwidthKey, 2000L);
     DefaultPolicies = BlockStoragePolicySuite.CreateDefaultSuite();
     Hot             = DefaultPolicies.GetPolicy(HdfsConstants.HotStoragePolicyName);
     Warm            = DefaultPolicies.GetPolicy(HdfsConstants.WarmStoragePolicyName);
     Cold            = DefaultPolicies.GetPolicy(HdfsConstants.ColdStoragePolicyName);
     TestBalancer.InitTestSetup();
     Dispatcher.SetDelayAfterErrors(1000L);
 }
        /// <summary>
        /// Create a cluster with even distribution, and a new empty node is added to
        /// the cluster, then test rack locality for balancer policy.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestBalancerWithRackLocality()
        {
            Configuration conf = CreateConf();

            long[]   capacities     = new long[] { Capacity, Capacity };
            string[] racks          = new string[] { Rack0, Rack1 };
            string[] nodeGroups     = new string[] { Nodegroup0, Nodegroup1 };
            int      numOfDatanodes = capacities.Length;

            NUnit.Framework.Assert.AreEqual(numOfDatanodes, racks.Length);
            MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf).NumDataNodes(capacities
                                                                                           .Length).Racks(racks).SimulatedCapacities(capacities);
            MiniDFSClusterWithNodeGroup.SetNodeGroups(nodeGroups);
            cluster = new MiniDFSClusterWithNodeGroup(builder);
            try
            {
                cluster.WaitActive();
                client = NameNodeProxies.CreateProxy <ClientProtocol>(conf, cluster.GetFileSystem(
                                                                          0).GetUri()).GetProxy();
                long totalCapacity = TestBalancer.Sum(capacities);
                // fill up the cluster to be 30% full
                long totalUsedSpace = totalCapacity * 3 / 10;
                long length         = totalUsedSpace / numOfDatanodes;
                TestBalancer.CreateFile(cluster, filePath, length, (short)numOfDatanodes, 0);
                LocatedBlocks lbs = client.GetBlockLocations(filePath.ToUri().GetPath(), 0, length
                                                             );
                ICollection <ExtendedBlock> before = GetBlocksOnRack(lbs.GetLocatedBlocks(), Rack0
                                                                     );
                long   newCapacity  = Capacity;
                string newRack      = Rack1;
                string newNodeGroup = Nodegroup2;
                // start up an empty node with the same capacity and on the same rack
                cluster.StartDataNodes(conf, 1, true, null, new string[] { newRack }, new long[]
                                       { newCapacity }, new string[] { newNodeGroup });
                totalCapacity += newCapacity;
                // run balancer and validate results
                RunBalancerCanFinish(conf, totalUsedSpace, totalCapacity);
                lbs = client.GetBlockLocations(filePath.ToUri().GetPath(), 0, length);
                ICollection <ExtendedBlock> after = GetBlocksOnRack(lbs.GetLocatedBlocks(), Rack0);
                NUnit.Framework.Assert.AreEqual(before, after);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemplo n.º 8
0
        /// <summary>
        /// This test start a cluster, fill the DataNodes to be 30% full;
        /// It then adds an empty node and start balancing.
        /// </summary>
        /// <param name="nNameNodes">Number of NameNodes</param>
        /// <param name="capacities">Capacities of the datanodes</param>
        /// <param name="racks">Rack names</param>
        /// <param name="newCapacity">the capacity of the new DataNode</param>
        /// <param name="newRack">the rack for the new DataNode</param>
        /// <param name="conf">Configuration</param>
        /// <exception cref="System.Exception"/>
        private void RunTest(int nNameNodes, long[] capacities, string[] racks, long newCapacity
                             , string newRack, Configuration conf)
        {
            int nDataNodes = capacities.Length;

            Log.Info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes);
            NUnit.Framework.Assert.AreEqual(nDataNodes, racks.Length);
            Log.Info("RUN_TEST -1");
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration(conf)).NnTopology
                                         (MiniDFSNNTopology.SimpleFederatedTopology(nNameNodes)).NumDataNodes(nDataNodes)
                                     .Racks(racks).SimulatedCapacities(capacities).Build();

            Log.Info("RUN_TEST 0");
            DFSTestUtil.SetFederatedConfiguration(cluster, conf);
            try
            {
                cluster.WaitActive();
                Log.Info("RUN_TEST 1");
                TestBalancerWithMultipleNameNodes.Suite s = new TestBalancerWithMultipleNameNodes.Suite
                                                                (cluster, nNameNodes, nDataNodes, conf);
                long totalCapacity = TestBalancer.Sum(capacities);
                Log.Info("RUN_TEST 2");
                // fill up the cluster to be 30% full
                long totalUsed = totalCapacity * 3 / 10;
                long size      = (totalUsed / nNameNodes) / s.replication;
                for (int n = 0; n < nNameNodes; n++)
                {
                    CreateFile(s, n, size);
                }
                Log.Info("RUN_TEST 3");
                // start up an empty node with the same capacity and on the same rack
                cluster.StartDataNodes(conf, 1, true, null, new string[] { newRack }, new long[]
                                       { newCapacity });
                totalCapacity += newCapacity;
                Log.Info("RUN_TEST 4");
                // run RUN_TEST and validate results
                RunBalancer(s, totalUsed, totalCapacity);
                Log.Info("RUN_TEST 5");
            }
            finally
            {
                cluster.Shutdown();
            }
            Log.Info("RUN_TEST 6");
        }
        /// <summary>
        /// Create a cluster with even distribution, and a new empty node is added to
        /// the cluster, then test node-group locality for balancer policy.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestBalancerWithNodeGroup()
        {
            Configuration conf = CreateConf();

            long[]   capacities     = new long[] { Capacity, Capacity, Capacity, Capacity };
            string[] racks          = new string[] { Rack0, Rack0, Rack1, Rack1 };
            string[] nodeGroups     = new string[] { Nodegroup0, Nodegroup0, Nodegroup1, Nodegroup2 };
            int      numOfDatanodes = capacities.Length;

            NUnit.Framework.Assert.AreEqual(numOfDatanodes, racks.Length);
            NUnit.Framework.Assert.AreEqual(numOfDatanodes, nodeGroups.Length);
            MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf).NumDataNodes(capacities
                                                                                           .Length).Racks(racks).SimulatedCapacities(capacities);
            MiniDFSClusterWithNodeGroup.SetNodeGroups(nodeGroups);
            cluster = new MiniDFSClusterWithNodeGroup(builder);
            try
            {
                cluster.WaitActive();
                client = NameNodeProxies.CreateProxy <ClientProtocol>(conf, cluster.GetFileSystem(
                                                                          0).GetUri()).GetProxy();
                long totalCapacity = TestBalancer.Sum(capacities);
                // fill up the cluster to be 20% full
                long totalUsedSpace = totalCapacity * 2 / 10;
                TestBalancer.CreateFile(cluster, filePath, totalUsedSpace / (numOfDatanodes / 2),
                                        (short)(numOfDatanodes / 2), 0);
                long   newCapacity  = Capacity;
                string newRack      = Rack1;
                string newNodeGroup = Nodegroup2;
                // start up an empty node with the same capacity and on NODEGROUP2
                cluster.StartDataNodes(conf, 1, true, null, new string[] { newRack }, new long[]
                                       { newCapacity }, new string[] { newNodeGroup });
                totalCapacity += newCapacity;
                // run balancer and validate results
                RunBalancer(conf, totalUsedSpace, totalCapacity);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
 static TestFsckWithMultipleNameNodes()
 {
     TestBalancer.InitTestSetup();
 }
Exemplo n.º 11
0
 static TestBalancerWithHANameNodes()
 {
     TestBalancer.InitTestSetup();
 }
Exemplo n.º 12
0
        /// <summary>First start a cluster and fill the cluster up to a certain size.</summary>
        /// <remarks>
        /// First start a cluster and fill the cluster up to a certain size.
        /// Then redistribute blocks according the required distribution.
        /// Finally, balance the cluster.
        /// </remarks>
        /// <param name="nNameNodes">Number of NameNodes</param>
        /// <param name="distributionPerNN">The distribution for each NameNode.</param>
        /// <param name="capacities">Capacities of the datanodes</param>
        /// <param name="racks">Rack names</param>
        /// <param name="conf">Configuration</param>
        /// <exception cref="System.Exception"/>
        private void UnevenDistribution(int nNameNodes, long[] distributionPerNN, long[]
                                        capacities, string[] racks, Configuration conf)
        {
            Log.Info("UNEVEN 0");
            int nDataNodes = distributionPerNN.Length;

            if (capacities.Length != nDataNodes || racks.Length != nDataNodes)
            {
                throw new ArgumentException("Array length is not the same");
            }
            // calculate total space that need to be filled
            long usedSpacePerNN = TestBalancer.Sum(distributionPerNN);

            // fill the cluster
            ExtendedBlock[][] blocks;
            {
                Log.Info("UNEVEN 1");
                MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration(conf)).NnTopology
                                             (MiniDFSNNTopology.SimpleFederatedTopology(2)).NumDataNodes(nDataNodes).Racks(racks
                                                                                                                           ).SimulatedCapacities(capacities).Build();
                Log.Info("UNEVEN 2");
                try
                {
                    cluster.WaitActive();
                    DFSTestUtil.SetFederatedConfiguration(cluster, conf);
                    Log.Info("UNEVEN 3");
                    TestBalancerWithMultipleNameNodes.Suite s = new TestBalancerWithMultipleNameNodes.Suite
                                                                    (cluster, nNameNodes, nDataNodes, conf);
                    blocks = GenerateBlocks(s, usedSpacePerNN);
                    Log.Info("UNEVEN 4");
                }
                finally
                {
                    cluster.Shutdown();
                }
            }
            conf.Set(DFSConfigKeys.DfsNamenodeSafemodeThresholdPctKey, "0.0f");
            {
                Log.Info("UNEVEN 10");
                MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology
                                                                                     .SimpleFederatedTopology(nNameNodes)).NumDataNodes(nDataNodes).Racks(racks).SimulatedCapacities
                                             (capacities).Format(false).Build();
                Log.Info("UNEVEN 11");
                try
                {
                    cluster.WaitActive();
                    Log.Info("UNEVEN 12");
                    TestBalancerWithMultipleNameNodes.Suite s = new TestBalancerWithMultipleNameNodes.Suite
                                                                    (cluster, nNameNodes, nDataNodes, conf);
                    for (int n = 0; n < nNameNodes; n++)
                    {
                        // redistribute blocks
                        Block[][] blocksDN = TestBalancer.DistributeBlocks(blocks[n], s.replication, distributionPerNN
                                                                           );
                        for (int d = 0; d < blocksDN.Length; d++)
                        {
                            cluster.InjectBlocks(n, d, Arrays.AsList(blocksDN[d]));
                        }
                        Log.Info("UNEVEN 13: n=" + n);
                    }
                    long totalCapacity = TestBalancer.Sum(capacities);
                    long totalUsed     = nNameNodes * usedSpacePerNN;
                    Log.Info("UNEVEN 14");
                    RunBalancer(s, totalUsed, totalCapacity);
                    Log.Info("UNEVEN 15");
                }
                finally
                {
                    cluster.Shutdown();
                }
                Log.Info("UNEVEN 16");
            }
        }
Exemplo n.º 13
0
        /// <exception cref="System.Exception"/>
        internal static void RunBalancer(TestBalancerWithMultipleNameNodes.Suite s, long
                                         totalUsed, long totalCapacity)
        {
            double avg = totalUsed * 100.0 / totalCapacity;

            Log.Info("BALANCER 0: totalUsed=" + totalUsed + ", totalCapacity=" + totalCapacity
                     + ", avg=" + avg);
            Wait(s.clients, totalUsed, totalCapacity);
            Log.Info("BALANCER 1");
            // start rebalancing
            ICollection <URI> namenodes = DFSUtil.GetNsServiceRpcUris(s.conf);
            int r = Org.Apache.Hadoop.Hdfs.Server.Balancer.Balancer.Run(namenodes, Balancer.Parameters
                                                                        .Default, s.conf);

            NUnit.Framework.Assert.AreEqual(ExitStatus.Success.GetExitCode(), r);
            Log.Info("BALANCER 2");
            Wait(s.clients, totalUsed, totalCapacity);
            Log.Info("BALANCER 3");
            int i = 0;

            for (bool balanced = false; !balanced; i++)
            {
                long[] used = new long[s.cluster.GetDataNodes().Count];
                long[] cap  = new long[used.Length];
                for (int n = 0; n < s.clients.Length; n++)
                {
                    DatanodeInfo[] datanodes = s.clients[n].GetDatanodeReport(HdfsConstants.DatanodeReportType
                                                                              .All);
                    NUnit.Framework.Assert.AreEqual(datanodes.Length, used.Length);
                    for (int d = 0; d < datanodes.Length; d++)
                    {
                        if (n == 0)
                        {
                            used[d] = datanodes[d].GetDfsUsed();
                            cap[d]  = datanodes[d].GetCapacity();
                            if (i % 100 == 0)
                            {
                                Log.Warn("datanodes[" + d + "]: getDfsUsed()=" + datanodes[d].GetDfsUsed() + ", getCapacity()="
                                         + datanodes[d].GetCapacity());
                            }
                        }
                        else
                        {
                            NUnit.Framework.Assert.AreEqual(used[d], datanodes[d].GetDfsUsed());
                            NUnit.Framework.Assert.AreEqual(cap[d], datanodes[d].GetCapacity());
                        }
                    }
                }
                balanced = true;
                for (int d_1 = 0; d_1 < used.Length; d_1++)
                {
                    double p = used[d_1] * 100.0 / cap[d_1];
                    balanced = p <= avg + Balancer.Parameters.Default.threshold;
                    if (!balanced)
                    {
                        if (i % 100 == 0)
                        {
                            Log.Warn("datanodes " + d_1 + " is not yet balanced: " + "used=" + used[d_1] + ", cap="
                                     + cap[d_1] + ", avg=" + avg);
                            Log.Warn("TestBalancer.sum(used)=" + TestBalancer.Sum(used) + ", TestBalancer.sum(cap)="
                                     + TestBalancer.Sum(cap));
                        }
                        Sleep(100);
                        break;
                    }
                }
            }
            Log.Info("BALANCER 6");
        }