public virtual void TestPendingDeleteUnknownBlocks()
        {
            int fileNum = 5;

            // 5 files
            Path[] files = new Path[fileNum];
            MiniDFSCluster.DataNodeProperties[] dnprops = new MiniDFSCluster.DataNodeProperties
                                                          [Replication];
            // create a group of files, each file contains 1 block
            for (int i = 0; i < fileNum; i++)
            {
                files[i] = new Path("/file" + i);
                DFSTestUtil.CreateFile(dfs, files[i], Blocksize, Replication, i);
            }
            // wait until all DataNodes have replicas
            WaitForReplication();
            for (int i_1 = Replication - 1; i_1 >= 0; i_1--)
            {
                dnprops[i_1] = cluster.StopDataNode(i_1);
            }
            Sharpen.Thread.Sleep(2000);
            // delete 2 files, we still have 3 files remaining so that we can cover
            // every DN storage
            for (int i_2 = 0; i_2 < 2; i_2++)
            {
                dfs.Delete(files[i_2], true);
            }
            // restart NameNode
            cluster.RestartNameNode(false);
            InvalidateBlocks invalidateBlocks = (InvalidateBlocks)Whitebox.GetInternalState(cluster
                                                                                            .GetNamesystem().GetBlockManager(), "invalidateBlocks");
            InvalidateBlocks mockIb = Org.Mockito.Mockito.Spy(invalidateBlocks);

            Org.Mockito.Mockito.DoReturn(1L).When(mockIb).GetInvalidationDelay();
            Whitebox.SetInternalState(cluster.GetNamesystem().GetBlockManager(), "invalidateBlocks"
                                      , mockIb);
            NUnit.Framework.Assert.AreEqual(0L, cluster.GetNamesystem().GetPendingDeletionBlocks
                                                ());
            // restart DataNodes
            for (int i_3 = 0; i_3 < Replication; i_3++)
            {
                cluster.RestartDataNode(dnprops[i_3], true);
            }
            cluster.WaitActive();
            for (int i_4 = 0; i_4 < Replication; i_4++)
            {
                DataNodeTestUtils.TriggerBlockReport(cluster.GetDataNodes()[i_4]);
            }
            Sharpen.Thread.Sleep(2000);
            // make sure we have received block reports by checking the total block #
            NUnit.Framework.Assert.AreEqual(3, cluster.GetNamesystem().GetBlocksTotal());
            NUnit.Framework.Assert.AreEqual(4, cluster.GetNamesystem().GetPendingDeletionBlocks
                                                ());
            cluster.RestartNameNode(true);
            Sharpen.Thread.Sleep(6000);
            NUnit.Framework.Assert.AreEqual(3, cluster.GetNamesystem().GetBlocksTotal());
            NUnit.Framework.Assert.AreEqual(0, cluster.GetNamesystem().GetPendingDeletionBlocks
                                                ());
        }
Ejemplo n.º 2
0
 // let the NN finish deletion
 /// <summary>Verify block locations after running the migration tool.</summary>
 /// <exception cref="System.Exception"/>
 internal virtual void Verify(bool verifyAll)
 {
     foreach (DataNode dn in this.cluster.GetDataNodes())
     {
         DataNodeTestUtils.TriggerBlockReport(dn);
     }
     if (verifyAll)
     {
         this.VerifyNamespace();
     }
 }
Ejemplo n.º 3
0
        /// <exception cref="System.IO.IOException"/>
        private static void RunTest(string testCaseName, bool createFiles, int numInitialStorages
                                    , int expectedStoragesAfterTest)
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).StoragesPerDatanode(numInitialStorages
                                                                                               ).Build();
                cluster.WaitActive();
                DataNode dn0 = cluster.GetDataNodes()[0];
                // Ensure NN knows about the storage.
                DatanodeID         dnId         = dn0.GetDatanodeId();
                DatanodeDescriptor dnDescriptor = cluster.GetNamesystem().GetBlockManager().GetDatanodeManager
                                                      ().GetDatanode(dnId);
                Assert.AssertThat(dnDescriptor.GetStorageInfos().Length, IS.Is(numInitialStorages
                                                                               ));
                string bpid = cluster.GetNamesystem().GetBlockPoolId();
                DatanodeRegistration dnReg = dn0.GetDNRegistrationForBP(bpid);
                DataNodeTestUtils.TriggerBlockReport(dn0);
                if (createFiles)
                {
                    Path path = new Path("/", testCaseName);
                    DFSTestUtil.CreateFile(cluster.GetFileSystem(), path, 1024, (short)1, unchecked ((
                                                                                                         int)(0x1BAD5EED)));
                    DataNodeTestUtils.TriggerBlockReport(dn0);
                }
                // Generate a fake StorageReport that is missing one storage.
                StorageReport[] reports       = dn0.GetFSDataset().GetStorageReports(bpid);
                StorageReport[] prunedReports = new StorageReport[numInitialStorages - 1];
                System.Array.Copy(reports, 0, prunedReports, 0, prunedReports.Length);
                // Stop the DataNode and send fake heartbeat with missing storage.
                cluster.StopDataNode(0);
                cluster.GetNameNodeRpc().SendHeartbeat(dnReg, prunedReports, 0L, 0L, 0, 0, 0, null
                                                       );
                // Check that the missing storage was pruned.
                Assert.AssertThat(dnDescriptor.GetStorageInfos().Length, IS.Is(expectedStoragesAfterTest
                                                                               ));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Ejemplo n.º 4
0
        /// <summary>
        /// Regression test for HDFS-7960.<p/>
        /// Shutting down a datanode, removing a storage directory, and restarting
        /// the DataNode should not produce zombie storages.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestRemovingStorageDoesNotProduceZombies()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsDatanodeFailedVolumesToleratedKey, 1);
            int            NumStoragesPerDn = 2;
            MiniDFSCluster cluster          = new MiniDFSCluster.Builder(conf).NumDataNodes(3).StoragesPerDatanode
                                                  (NumStoragesPerDn).Build();

            try
            {
                cluster.WaitActive();
                foreach (DataNode dn in cluster.GetDataNodes())
                {
                    NUnit.Framework.Assert.AreEqual(NumStoragesPerDn, cluster.GetNamesystem().GetBlockManager
                                                        ().GetDatanodeManager().GetDatanode(dn.GetDatanodeId()).GetStorageInfos().Length
                                                    );
                }
                // Create a file which will end up on all 3 datanodes.
                Path TestPath            = new Path("/foo1");
                DistributedFileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, TestPath, 1024, (short)3, unchecked ((int)(0xcafecafe))
                                       );
                foreach (DataNode dn_1 in cluster.GetDataNodes())
                {
                    DataNodeTestUtils.TriggerBlockReport(dn_1);
                }
                ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, new Path("/foo1"));
                cluster.GetNamesystem().WriteLock();
                string storageIdToRemove;
                string datanodeUuid;
                // Find the first storage which this block is in.
                try
                {
                    IEnumerator <DatanodeStorageInfo> storageInfoIter = cluster.GetNamesystem().GetBlockManager
                                                                            ().GetStorages(block.GetLocalBlock()).GetEnumerator();
                    NUnit.Framework.Assert.IsTrue(storageInfoIter.HasNext());
                    DatanodeStorageInfo info = storageInfoIter.Next();
                    storageIdToRemove = info.GetStorageID();
                    datanodeUuid      = info.GetDatanodeDescriptor().GetDatanodeUuid();
                }
                finally
                {
                    cluster.GetNamesystem().WriteUnlock();
                }
                // Find the DataNode which holds that first storage.
                DataNode datanodeToRemoveStorageFrom;
                int      datanodeToRemoveStorageFromIdx = 0;
                while (true)
                {
                    if (datanodeToRemoveStorageFromIdx >= cluster.GetDataNodes().Count)
                    {
                        NUnit.Framework.Assert.Fail("failed to find datanode with uuid " + datanodeUuid);
                        datanodeToRemoveStorageFrom = null;
                        break;
                    }
                    DataNode dn_2 = cluster.GetDataNodes()[datanodeToRemoveStorageFromIdx];
                    if (dn_2.GetDatanodeUuid().Equals(datanodeUuid))
                    {
                        datanodeToRemoveStorageFrom = dn_2;
                        break;
                    }
                    datanodeToRemoveStorageFromIdx++;
                }
                // Find the volume within the datanode which holds that first storage.
                IList <FsVolumeSpi> volumes = datanodeToRemoveStorageFrom.GetFSDataset().GetVolumes
                                                  ();
                NUnit.Framework.Assert.AreEqual(NumStoragesPerDn, volumes.Count);
                string volumeDirectoryToRemove = null;
                foreach (FsVolumeSpi volume in volumes)
                {
                    if (volume.GetStorageID().Equals(storageIdToRemove))
                    {
                        volumeDirectoryToRemove = volume.GetBasePath();
                    }
                }
                // Shut down the datanode and remove the volume.
                // Replace the volume directory with a regular file, which will
                // cause a volume failure.  (If we merely removed the directory,
                // it would be re-initialized with a new storage ID.)
                NUnit.Framework.Assert.IsNotNull(volumeDirectoryToRemove);
                datanodeToRemoveStorageFrom.Shutdown();
                FileUtil.FullyDelete(new FilePath(volumeDirectoryToRemove));
                FileOutputStream fos = new FileOutputStream(volumeDirectoryToRemove);
                try
                {
                    fos.Write(1);
                }
                finally
                {
                    fos.Close();
                }
                cluster.RestartDataNode(datanodeToRemoveStorageFromIdx);
                // Wait for the NameNode to remove the storage.
                Log.Info("waiting for the datanode to remove " + storageIdToRemove);
                GenericTestUtils.WaitFor(new _Supplier_227(cluster, datanodeToRemoveStorageFrom,
                                                           storageIdToRemove, NumStoragesPerDn), 10, 30000);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Ejemplo n.º 5
0
 /// <exception cref="System.IO.IOException"/>
 /// <exception cref="System.Exception"/>
 protected internal void TriggerBlockReport()
 {
     // Trigger block report to NN
     DataNodeTestUtils.TriggerBlockReport(cluster.GetDataNodes()[0]);
     Sharpen.Thread.Sleep(10 * 1000);
 }
Ejemplo n.º 6
0
        public virtual void TestDecommissionStatus()
        {
            IPEndPoint addr   = new IPEndPoint("localhost", cluster.GetNameNodePort());
            DFSClient  client = new DFSClient(addr, conf);

            DatanodeInfo[] info = client.DatanodeReport(HdfsConstants.DatanodeReportType.Live
                                                        );
            NUnit.Framework.Assert.AreEqual("Number of Datanodes ", 2, info.Length);
            DistributedFileSystem fileSys = cluster.GetFileSystem();
            DFSAdmin admin    = new DFSAdmin(cluster.GetConfiguration(0));
            short    replicas = numDatanodes;
            //
            // Decommission one node. Verify the decommission status
            //
            Path file1 = new Path("decommission.dat");

            WriteFile(fileSys, file1, replicas);
            Path file2             = new Path("decommission1.dat");
            FSDataOutputStream st1 = WriteIncompleteFile(fileSys, file2, replicas);

            foreach (DataNode d in cluster.GetDataNodes())
            {
                DataNodeTestUtils.TriggerBlockReport(d);
            }
            FSNamesystem    fsn = cluster.GetNamesystem();
            DatanodeManager dm  = fsn.GetBlockManager().GetDatanodeManager();

            for (int iteration = 0; iteration < numDatanodes; iteration++)
            {
                string downnode = DecommissionNode(fsn, client, localFileSys, iteration);
                dm.RefreshNodes(conf);
                decommissionedNodes.AddItem(downnode);
                BlockManagerTestUtil.RecheckDecommissionState(dm);
                IList <DatanodeDescriptor> decommissioningNodes = dm.GetDecommissioningNodes();
                if (iteration == 0)
                {
                    NUnit.Framework.Assert.AreEqual(decommissioningNodes.Count, 1);
                    DatanodeDescriptor decommNode = decommissioningNodes[0];
                    CheckDecommissionStatus(decommNode, 3, 0, 1);
                    CheckDFSAdminDecommissionStatus(decommissioningNodes.SubList(0, 1), fileSys, admin
                                                    );
                }
                else
                {
                    NUnit.Framework.Assert.AreEqual(decommissioningNodes.Count, 2);
                    DatanodeDescriptor decommNode1 = decommissioningNodes[0];
                    DatanodeDescriptor decommNode2 = decommissioningNodes[1];
                    // This one is still 3,3,1 since it passed over the UC block
                    // earlier, before node 2 was decommed
                    CheckDecommissionStatus(decommNode1, 3, 3, 1);
                    // This one is 4,4,2 since it has the full state
                    CheckDecommissionStatus(decommNode2, 4, 4, 2);
                    CheckDFSAdminDecommissionStatus(decommissioningNodes.SubList(0, 2), fileSys, admin
                                                    );
                }
            }
            // Call refreshNodes on FSNamesystem with empty exclude file.
            // This will remove the datanodes from decommissioning list and
            // make them available again.
            WriteConfigFile(localFileSys, excludeFile, null);
            dm.RefreshNodes(conf);
            st1.Close();
            CleanupFile(fileSys, file1);
            CleanupFile(fileSys, file2);
        }