static TestReplicationPolicyWithNodeGroup()
 {
     string[] racks = new string[] { "/d1/r1/n1", "/d1/r1/n1", "/d1/r1/n2", "/d1/r2/n3"
                                     , "/d1/r2/n3", "/d1/r2/n4", "/d2/r3/n5", "/d2/r3/n6" };
     storages  = DFSTestUtil.CreateDatanodeStorageInfos(racks);
     dataNodes = DFSTestUtil.ToDatanodeDescriptor(storages);
 }
 static TestReplicationPolicyWithNodeGroup()
 {
     string[] racksInMoreTargetsCase = new string[] { "/r1/n1", "/r1/n1", "/r1/n2", "/r1/n2"
                                                      , "/r1/n3", "/r1/n3", "/r2/n4", "/r2/n4", "/r2/n5", "/r2/n5", "/r2/n6", "/r2/n6" };
     storagesInMoreTargetsCase = DFSTestUtil.CreateDatanodeStorageInfos(racksInMoreTargetsCase
                                                                        );
     dataNodesInMoreTargetsCase = DFSTestUtil.ToDatanodeDescriptor(storagesInMoreTargetsCase
                                                                   );
 }
 static TestReplicationPolicyWithNodeGroup()
 {
     string[] racksInBoundaryCase = new string[] { "/d1/r1/n1", "/d1/r1/n1", "/d1/r1/n1"
                                                   , "/d1/r1/n2", "/d1/r2/n3", "/d1/r2/n3" };
     storagesInBoundaryCase = DFSTestUtil.CreateDatanodeStorageInfos(racksInBoundaryCase
                                                                     );
     dataNodesInBoundaryCase = DFSTestUtil.ToDatanodeDescriptor(storagesInBoundaryCase
                                                                );
 }
 static TestReplicationPolicyWithNodeGroup()
 {
     string[] racksForDependencies = new string[] { "/d1/r1/n1", "/d1/r1/n1", "/d1/r1/n2"
                                                    , "/d1/r1/n2", "/d1/r1/n3", "/d1/r1/n4" };
     string[] hostNamesForDependencies = new string[] { "h1", "h2", "h3", "h4", "h5",
                                                        "h6" };
     storagesForDependencies = DFSTestUtil.CreateDatanodeStorageInfos(racksForDependencies
                                                                      , hostNamesForDependencies);
     dataNodesForDependencies = DFSTestUtil.ToDatanodeDescriptor(storagesForDependencies
                                                                 );
 }
        public virtual void SetupMockCluster()
        {
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.NetTopologyScriptFileNameKey, "need to set a dummy value here so it assumes a multi-rack cluster"
                     );
            fsn = Org.Mockito.Mockito.Mock <FSNamesystem>();
            Org.Mockito.Mockito.DoReturn(true).When(fsn).HasWriteLock();
            bm = new BlockManager(fsn, conf);
            string[] racks = new string[] { "/rackA", "/rackA", "/rackA", "/rackB", "/rackB",
                                            "/rackB" };
            storages = DFSTestUtil.CreateDatanodeStorageInfos(racks);
            nodes    = Arrays.AsList(DFSTestUtil.ToDatanodeDescriptor(storages));
            rackA    = nodes.SubList(0, 3);
            rackB    = nodes.SubList(3, 6);
        }
Exemple #6
0
        public static void SetupCluster()
        {
            Configuration conf = new HdfsConfiguration();

            string[] racks = new string[] { "/rack1", "/rack1", "/rack1", "/rack2", "/rack2",
                                            "/rack2" };
            storages  = DFSTestUtil.CreateDatanodeStorageInfos(racks);
            dataNodes = DFSTestUtil.ToDatanodeDescriptor(storages);
            FileSystem.SetDefaultUri(conf, "hdfs://localhost:0");
            conf.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, "0.0.0.0:0");
            FilePath baseDir = PathUtils.GetTestDir(typeof(TestReplicationPolicy));

            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, new FilePath(baseDir, "name").GetPath
                         ());
            conf.SetBoolean(DFSConfigKeys.DfsNamenodeAvoidStaleDatanodeForReadKey, true);
            conf.SetBoolean(DFSConfigKeys.DfsNamenodeAvoidStaleDatanodeForWriteKey, true);
            conf.SetBoolean(DFSConfigKeys.DfsNamenodeReplicationConsiderloadKey, true);
            DFSTestUtil.FormatNameNode(conf);
            namenode = new NameNode(conf);
            int blockSize = 1024;

            dnrList   = new AList <DatanodeRegistration>();
            dnManager = namenode.GetNamesystem().GetBlockManager().GetDatanodeManager();
            // Register DNs
            for (int i = 0; i < 6; i++)
            {
                DatanodeRegistration dnr = new DatanodeRegistration(dataNodes[i], new StorageInfo
                                                                        (HdfsServerConstants.NodeType.DataNode), new ExportedBlockKeys(), VersionInfo.GetVersion
                                                                        ());
                dnrList.AddItem(dnr);
                dnManager.RegisterDatanode(dnr);
                dataNodes[i].GetStorageInfos()[0].SetUtilizationForTesting(2 * HdfsConstants.MinBlocksForWrite
                                                                           * blockSize, 0L, 2 * HdfsConstants.MinBlocksForWrite * blockSize, 0L);
                dataNodes[i].UpdateHeartbeat(BlockManagerTestUtil.GetStorageReportsForDatanode(dataNodes
                                                                                               [i]), 0L, 0L, 0, 0, null);
            }
        }
        public virtual void TestPendingReplication()
        {
            PendingReplicationBlocks pendingReplications;

            pendingReplications = new PendingReplicationBlocks(Timeout * 1000);
            pendingReplications.Start();
            //
            // Add 10 blocks to pendingReplications.
            //
            DatanodeStorageInfo[] storages = DFSTestUtil.CreateDatanodeStorageInfos(10);
            for (int i = 0; i < storages.Length; i++)
            {
                Block block = new Block(i, i, 0);
                DatanodeStorageInfo[] targets = new DatanodeStorageInfo[i];
                System.Array.Copy(storages, 0, targets, 0, i);
                pendingReplications.Increment(block, DatanodeStorageInfo.ToDatanodeDescriptors(targets
                                                                                               ));
            }
            NUnit.Framework.Assert.AreEqual("Size of pendingReplications ", 10, pendingReplications
                                            .Size());
            //
            // remove one item and reinsert it
            //
            Block blk = new Block(8, 8, 0);

            pendingReplications.Decrement(blk, storages[7].GetDatanodeDescriptor());
            // removes one replica
            NUnit.Framework.Assert.AreEqual("pendingReplications.getNumReplicas ", 7, pendingReplications
                                            .GetNumReplicas(blk));
            for (int i_1 = 0; i_1 < 7; i_1++)
            {
                // removes all replicas
                pendingReplications.Decrement(blk, storages[i_1].GetDatanodeDescriptor());
            }
            NUnit.Framework.Assert.IsTrue(pendingReplications.Size() == 9);
            pendingReplications.Increment(blk, DatanodeStorageInfo.ToDatanodeDescriptors(DFSTestUtil
                                                                                         .CreateDatanodeStorageInfos(8)));
            NUnit.Framework.Assert.IsTrue(pendingReplications.Size() == 10);
            //
            // verify that the number of replicas returned
            // are sane.
            //
            for (int i_2 = 0; i_2 < 10; i_2++)
            {
                Block block       = new Block(i_2, i_2, 0);
                int   numReplicas = pendingReplications.GetNumReplicas(block);
                NUnit.Framework.Assert.IsTrue(numReplicas == i_2);
            }
            //
            // verify that nothing has timed out so far
            //
            NUnit.Framework.Assert.IsTrue(pendingReplications.GetTimedOutBlocks() == null);
            //
            // Wait for one second and then insert some more items.
            //
            try
            {
                Sharpen.Thread.Sleep(1000);
            }
            catch (Exception)
            {
            }
            for (int i_3 = 10; i_3 < 15; i_3++)
            {
                Block block = new Block(i_3, i_3, 0);
                pendingReplications.Increment(block, DatanodeStorageInfo.ToDatanodeDescriptors(DFSTestUtil
                                                                                               .CreateDatanodeStorageInfos(i_3)));
            }
            NUnit.Framework.Assert.IsTrue(pendingReplications.Size() == 15);
            //
            // Wait for everything to timeout.
            //
            int loop = 0;

            while (pendingReplications.Size() > 0)
            {
                try
                {
                    Sharpen.Thread.Sleep(1000);
                }
                catch (Exception)
                {
                }
                loop++;
            }
            System.Console.Out.WriteLine("Had to wait for " + loop + " seconds for the lot to timeout"
                                         );
            //
            // Verify that everything has timed out.
            //
            NUnit.Framework.Assert.AreEqual("Size of pendingReplications ", 0, pendingReplications
                                            .Size());
            Block[] timedOut = pendingReplications.GetTimedOutBlocks();
            NUnit.Framework.Assert.IsTrue(timedOut != null && timedOut.Length == 15);
            for (int i_4 = 0; i_4 < timedOut.Length; i_4++)
            {
                NUnit.Framework.Assert.IsTrue(timedOut[i_4].GetBlockId() < 15);
            }
            pendingReplications.Stop();
        }
        public virtual void TestProcessPendingReplications()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, Timeout);
            MiniDFSCluster      cluster = null;
            Block               block;
            BlockInfoContiguous blockInfo;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(DatanodeCount).Build();
                cluster.WaitActive();
                FSNamesystem             fsn                 = cluster.GetNamesystem();
                BlockManager             blkManager          = fsn.GetBlockManager();
                PendingReplicationBlocks pendingReplications = blkManager.pendingReplications;
                UnderReplicatedBlocks    neededReplications  = blkManager.neededReplications;
                BlocksMap blocksMap = blkManager.blocksMap;
                //
                // Add 1 block to pendingReplications with GenerationStamp = 0.
                //
                block     = new Block(1, 1, 0);
                blockInfo = new BlockInfoContiguous(block, (short)3);
                pendingReplications.Increment(block, DatanodeStorageInfo.ToDatanodeDescriptors(DFSTestUtil
                                                                                               .CreateDatanodeStorageInfos(1)));
                BlockCollection bc = Org.Mockito.Mockito.Mock <BlockCollection>();
                Org.Mockito.Mockito.DoReturn((short)3).When(bc).GetBlockReplication();
                // Place into blocksmap with GenerationStamp = 1
                blockInfo.SetGenerationStamp(1);
                blocksMap.AddBlockCollection(blockInfo, bc);
                NUnit.Framework.Assert.AreEqual("Size of pendingReplications ", 1, pendingReplications
                                                .Size());
                // Add a second block to pendingReplications that has no
                // corresponding entry in blocksmap
                block = new Block(2, 2, 0);
                pendingReplications.Increment(block, DatanodeStorageInfo.ToDatanodeDescriptors(DFSTestUtil
                                                                                               .CreateDatanodeStorageInfos(1)));
                // verify 2 blocks in pendingReplications
                NUnit.Framework.Assert.AreEqual("Size of pendingReplications ", 2, pendingReplications
                                                .Size());
                //
                // Wait for everything to timeout.
                //
                while (pendingReplications.Size() > 0)
                {
                    try
                    {
                        Sharpen.Thread.Sleep(100);
                    }
                    catch (Exception)
                    {
                    }
                }
                //
                // Verify that block moves to neededReplications
                //
                while (neededReplications.Size() == 0)
                {
                    try
                    {
                        Sharpen.Thread.Sleep(100);
                    }
                    catch (Exception)
                    {
                    }
                }
                // Verify that the generation stamp we will try to replicate
                // is now 1
                foreach (Block b in neededReplications)
                {
                    NUnit.Framework.Assert.AreEqual("Generation stamp is 1 ", 1, b.GetGenerationStamp
                                                        ());
                }
                // Verify size of neededReplications is exactly 1.
                NUnit.Framework.Assert.AreEqual("size of neededReplications is 1 ", 1, neededReplications
                                                .Size());
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }