Esempio n. 1
0
        public virtual void TestBlocksScheduledCounter()
        {
            cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).Build();
            cluster.WaitActive();
            fs = cluster.GetFileSystem();
            //open a file an write a few bytes:
            FSDataOutputStream @out = fs.Create(new Path("/testBlockScheduledCounter"));

            for (int i = 0; i < 1024; i++)
            {
                @out.Write(i);
            }
            // flush to make sure a block is allocated.
            @out.Hflush();
            AList <DatanodeDescriptor> dnList = new AList <DatanodeDescriptor>();
            DatanodeManager            dm     = cluster.GetNamesystem().GetBlockManager().GetDatanodeManager
                                                    ();

            dm.FetchDatanodes(dnList, dnList, false);
            DatanodeDescriptor dn = dnList[0];

            NUnit.Framework.Assert.AreEqual(1, dn.GetBlocksScheduled());
            // close the file and the counter should go to zero.
            @out.Close();
            NUnit.Framework.Assert.AreEqual(0, dn.GetBlocksScheduled());
        }
Esempio n. 2
0
 public virtual void TestChooseTargetWithDecomNodes()
 {
     namenode.GetNamesystem().WriteLock();
     try
     {
         string blockPoolId = namenode.GetNamesystem().GetBlockPoolId();
         dnManager.HandleHeartbeat(dnrList[3], BlockManagerTestUtil.GetStorageReportsForDatanode
                                       (dataNodes[3]), blockPoolId, dataNodes[3].GetCacheCapacity(), dataNodes[3].GetCacheRemaining
                                       (), 2, 0, 0, null);
         dnManager.HandleHeartbeat(dnrList[4], BlockManagerTestUtil.GetStorageReportsForDatanode
                                       (dataNodes[4]), blockPoolId, dataNodes[4].GetCacheCapacity(), dataNodes[4].GetCacheRemaining
                                       (), 4, 0, 0, null);
         dnManager.HandleHeartbeat(dnrList[5], BlockManagerTestUtil.GetStorageReportsForDatanode
                                       (dataNodes[5]), blockPoolId, dataNodes[5].GetCacheCapacity(), dataNodes[5].GetCacheRemaining
                                       (), 4, 0, 0, null);
         // value in the above heartbeats
         int          load = 2 + 4 + 4;
         FSNamesystem fsn  = namenode.GetNamesystem();
         NUnit.Framework.Assert.AreEqual((double)load / 6, dnManager.GetFSClusterStats().GetInServiceXceiverAverage
                                             (), Epsilon);
         // Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
         // returns false
         for (int i = 0; i < 3; i++)
         {
             DatanodeDescriptor d = dnManager.GetDatanode(dnrList[i]);
             dnManager.GetDecomManager().StartDecommission(d);
             d.SetDecommissioned();
         }
         NUnit.Framework.Assert.AreEqual((double)load / 3, dnManager.GetFSClusterStats().GetInServiceXceiverAverage
                                             (), Epsilon);
         // update references of writer DN to update the de-commissioned state
         IList <DatanodeDescriptor> liveNodes = new AList <DatanodeDescriptor>();
         dnManager.FetchDatanodes(liveNodes, null, false);
         DatanodeDescriptor writerDn = null;
         if (liveNodes.Contains(dataNodes[0]))
         {
             writerDn = liveNodes[liveNodes.IndexOf(dataNodes[0])];
         }
         // Call chooseTarget()
         DatanodeStorageInfo[] targets = namenode.GetNamesystem().GetBlockManager().GetBlockPlacementPolicy
                                             ().ChooseTarget("testFile.txt", 3, writerDn, new AList <DatanodeStorageInfo>(), false
                                                             , null, 1024, TestBlockStoragePolicy.DefaultStoragePolicy);
         NUnit.Framework.Assert.AreEqual(3, targets.Length);
         ICollection <DatanodeStorageInfo> targetSet = new HashSet <DatanodeStorageInfo>(Arrays
                                                                                         .AsList(targets));
         for (int i_1 = 3; i_1 < storages.Length; i_1++)
         {
             NUnit.Framework.Assert.IsTrue(targetSet.Contains(storages[i_1]));
         }
     }
     finally
     {
         dataNodes[0].StopDecommission();
         dataNodes[1].StopDecommission();
         dataNodes[2].StopDecommission();
         namenode.GetNamesystem().WriteUnlock();
     }
 }
Esempio n. 3
0
        /// <summary>
        /// Verify a DN remains in DECOMMISSION_INPROGRESS state if it is marked
        /// as dead before decommission has completed.
        /// </summary>
        /// <remarks>
        /// Verify a DN remains in DECOMMISSION_INPROGRESS state if it is marked
        /// as dead before decommission has completed. That will allow DN to resume
        /// the replication process after it rejoins the cluster.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestDecommissionStatusAfterDNRestart()
        {
            DistributedFileSystem fileSys = (DistributedFileSystem)cluster.GetFileSystem();
            // Create a file with one block. That block has one replica.
            Path f = new Path("decommission.dat");

            DFSTestUtil.CreateFile(fileSys, f, fileSize, fileSize, fileSize, (short)1, seed);
            // Find the DN that owns the only replica.
            RemoteIterator <LocatedFileStatus> fileList = fileSys.ListLocatedStatus(f);

            BlockLocation[] blockLocations = fileList.Next().GetBlockLocations();
            string          dnName         = blockLocations[0].GetNames()[0];
            // Decommission the DN.
            FSNamesystem    fsn = cluster.GetNamesystem();
            DatanodeManager dm  = fsn.GetBlockManager().GetDatanodeManager();

            DecommissionNode(fsn, localFileSys, dnName);
            dm.RefreshNodes(conf);
            // Stop the DN when decommission is in progress.
            // Given DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY is to 1 and the size of
            // the block, it will take much longer time that test timeout value for
            // the decommission to complete. So when stopDataNode is called,
            // decommission should be in progress.
            MiniDFSCluster.DataNodeProperties dataNodeProperties = cluster.StopDataNode(dnName
                                                                                        );
            IList <DatanodeDescriptor> dead = new AList <DatanodeDescriptor>();

            while (true)
            {
                dm.FetchDatanodes(null, dead, false);
                if (dead.Count == 1)
                {
                    break;
                }
                Sharpen.Thread.Sleep(1000);
            }
            // Force removal of the dead node's blocks.
            BlockManagerTestUtil.CheckHeartbeat(fsn.GetBlockManager());
            // Force DatanodeManager to check decommission state.
            BlockManagerTestUtil.RecheckDecommissionState(dm);
            // Verify that the DN remains in DECOMMISSION_INPROGRESS state.
            NUnit.Framework.Assert.IsTrue("the node should be DECOMMISSION_IN_PROGRESSS", dead
                                          [0].IsDecommissionInProgress());
            // Check DatanodeManager#getDecommissionNodes, make sure it returns
            // the node as decommissioning, even if it's dead
            IList <DatanodeDescriptor> decomlist = dm.GetDecommissioningNodes();

            NUnit.Framework.Assert.IsTrue("The node should be be decommissioning", decomlist.
                                          Count == 1);
            // Delete the under-replicated file, which should let the
            // DECOMMISSION_IN_PROGRESS node become DECOMMISSIONED
            CleanupFile(fileSys, f);
            BlockManagerTestUtil.RecheckDecommissionState(dm);
            NUnit.Framework.Assert.IsTrue("the node should be decommissioned", dead[0].IsDecommissioned
                                              ());
            // Add the node back
            cluster.RestartDataNode(dataNodeProperties, true);
            cluster.WaitActive();
            // Call refreshNodes on FSNamesystem with empty exclude file.
            // This will remove the datanodes from decommissioning list and
            // make them available again.
            WriteConfigFile(localFileSys, excludeFile, null);
            dm.RefreshNodes(conf);
        }
Esempio n. 4
0
        public virtual void TestVolumeSize()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = null;
            // Set aside fifth of the total capacity as reserved
            long reserved = 10000;

            conf.SetLong(DFSConfigKeys.DfsDatanodeDuReservedKey, reserved);
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).Build();
                cluster.WaitActive();
                FSNamesystem    namesystem = cluster.GetNamesystem();
                DatanodeManager dm         = cluster.GetNamesystem().GetBlockManager().GetDatanodeManager
                                                 ();
                // Ensure the data reported for each data node is right
                IList <DatanodeDescriptor> live = new AList <DatanodeDescriptor>();
                IList <DatanodeDescriptor> dead = new AList <DatanodeDescriptor>();
                dm.FetchDatanodes(live, dead, false);
                NUnit.Framework.Assert.IsTrue(live.Count == 1);
                long  used;
                long  remaining;
                long  configCapacity;
                long  nonDFSUsed;
                long  bpUsed;
                float percentUsed;
                float percentRemaining;
                float percentBpUsed;
                foreach (DatanodeDescriptor datanode in live)
                {
                    used             = datanode.GetDfsUsed();
                    remaining        = datanode.GetRemaining();
                    nonDFSUsed       = datanode.GetNonDfsUsed();
                    configCapacity   = datanode.GetCapacity();
                    percentUsed      = datanode.GetDfsUsedPercent();
                    percentRemaining = datanode.GetRemainingPercent();
                    bpUsed           = datanode.GetBlockPoolUsed();
                    percentBpUsed    = datanode.GetBlockPoolUsedPercent();
                    Log.Info("Datanode configCapacity " + configCapacity + " used " + used + " non DFS used "
                             + nonDFSUsed + " remaining " + remaining + " perentUsed " + percentUsed + " percentRemaining "
                             + percentRemaining);
                    NUnit.Framework.Assert.IsTrue(configCapacity == (used + remaining + nonDFSUsed));
                    NUnit.Framework.Assert.IsTrue(percentUsed == DFSUtil.GetPercentUsed(used, configCapacity
                                                                                        ));
                    NUnit.Framework.Assert.IsTrue(percentRemaining == DFSUtil.GetPercentRemaining(remaining
                                                                                                  , configCapacity));
                    NUnit.Framework.Assert.IsTrue(percentBpUsed == DFSUtil.GetPercentUsed(bpUsed, configCapacity
                                                                                          ));
                }
                DF df = new DF(new FilePath(cluster.GetDataDirectory()), conf);
                //
                // Currently two data directories are created by the data node
                // in the MiniDFSCluster. This results in each data directory having
                // capacity equals to the disk capacity of the data directory.
                // Hence the capacity reported by the data node is twice the disk space
                // the disk capacity
                //
                // So multiply the disk capacity and reserved space by two
                // for accommodating it
                //
                int  numOfDataDirs = 2;
                long diskCapacity  = numOfDataDirs * df.GetCapacity();
                reserved        *= numOfDataDirs;
                configCapacity   = namesystem.GetCapacityTotal();
                used             = namesystem.GetCapacityUsed();
                nonDFSUsed       = namesystem.GetNonDfsUsedSpace();
                remaining        = namesystem.GetCapacityRemaining();
                percentUsed      = namesystem.GetPercentUsed();
                percentRemaining = namesystem.GetPercentRemaining();
                bpUsed           = namesystem.GetBlockPoolUsedSpace();
                percentBpUsed    = namesystem.GetPercentBlockPoolUsed();
                Log.Info("Data node directory " + cluster.GetDataDirectory());
                Log.Info("Name node diskCapacity " + diskCapacity + " configCapacity " + configCapacity
                         + " reserved " + reserved + " used " + used + " remaining " + remaining + " nonDFSUsed "
                         + nonDFSUsed + " remaining " + remaining + " percentUsed " + percentUsed + " percentRemaining "
                         + percentRemaining + " bpUsed " + bpUsed + " percentBpUsed " + percentBpUsed);
                // Ensure new total capacity reported excludes the reserved space
                NUnit.Framework.Assert.IsTrue(configCapacity == diskCapacity - reserved);
                // Ensure new total capacity reported excludes the reserved space
                NUnit.Framework.Assert.IsTrue(configCapacity == (used + remaining + nonDFSUsed));
                // Ensure percent used is calculated based on used and present capacity
                NUnit.Framework.Assert.IsTrue(percentUsed == DFSUtil.GetPercentUsed(used, configCapacity
                                                                                    ));
                // Ensure percent used is calculated based on used and present capacity
                NUnit.Framework.Assert.IsTrue(percentBpUsed == DFSUtil.GetPercentUsed(bpUsed, configCapacity
                                                                                      ));
                // Ensure percent used is calculated based on used and present capacity
                NUnit.Framework.Assert.IsTrue(percentRemaining == ((float)remaining * 100.0f) / (
                                                  float)configCapacity);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }