public virtual void TestNameEditsConfigsFailure()
        {
            Path           file1           = new Path("TestNameEditsConfigs1");
            Path           file2           = new Path("TestNameEditsConfigs2");
            Path           file3           = new Path("TestNameEditsConfigs3");
            MiniDFSCluster cluster         = null;
            Configuration  conf            = null;
            FileSystem     fileSys         = null;
            FilePath       nameOnlyDir     = new FilePath(base_dir, "name");
            FilePath       editsOnlyDir    = new FilePath(base_dir, "edits");
            FilePath       nameAndEditsDir = new FilePath(base_dir, "name_and_edits");

            // 1
            // Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir
            conf = new HdfsConfiguration();
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEditsDir.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEditsDir.GetPath());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            try
            {
                // Manage our own dfs directories
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).ManageNameDfsDirs
                              (false).Build();
                cluster.WaitActive();
                // Check that the dir has a VERSION file
                NUnit.Framework.Assert.IsTrue(new FilePath(nameAndEditsDir, "current/VERSION").Exists
                                                  ());
                fileSys = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                WriteFile(fileSys, file1, replication);
                CheckFile(fileSys, file1, replication);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
            // 2
            // Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir
            conf = new HdfsConfiguration();
            NUnit.Framework.Assert.IsTrue(nameOnlyDir.Mkdir());
            NUnit.Framework.Assert.IsTrue(editsOnlyDir.Mkdir());
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEditsDir.GetPath() + "," + nameOnlyDir
                     .GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEditsDir.GetPath() + "," +
                     editsOnlyDir.GetPath());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            try
            {
                // Manage our own dfs directories. Do not format.
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false
                                                                                             ).ManageNameDfsDirs(false).Build();
                cluster.WaitActive();
                // Check that the dirs have a VERSION file
                NUnit.Framework.Assert.IsTrue(new FilePath(nameAndEditsDir, "current/VERSION").Exists
                                                  ());
                NUnit.Framework.Assert.IsTrue(new FilePath(nameOnlyDir, "current/VERSION").Exists
                                                  ());
                NUnit.Framework.Assert.IsTrue(new FilePath(editsOnlyDir, "current/VERSION").Exists
                                                  ());
                fileSys = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file1));
                CheckFile(fileSys, file1, replication);
                CleanupFile(fileSys, file1);
                WriteFile(fileSys, file2, replication);
                CheckFile(fileSys, file2, replication);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
            // 3
            // Now remove common directory both have and start namenode with
            // separate name and edits dirs
            try
            {
                conf = new HdfsConfiguration();
                conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameOnlyDir.GetPath());
                conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, editsOnlyDir.GetPath());
                replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
                cluster     = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false
                                                                                                 ).ManageNameDfsDirs(false).Build();
                cluster.WaitActive();
                fileSys = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsFalse(fileSys.Exists(file1));
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file2));
                CheckFile(fileSys, file2, replication);
                CleanupFile(fileSys, file2);
                WriteFile(fileSys, file3, replication);
                CheckFile(fileSys, file3, replication);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
            // 4
            // Add old shared directory for name and edits along with latest name
            conf = new HdfsConfiguration();
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameOnlyDir.GetPath() + "," + nameAndEditsDir
                     .GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEditsDir.GetPath());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false
                                                                                             ).ManageNameDfsDirs(false).Build();
                NUnit.Framework.Assert.Fail("Successfully started cluster but should not have been able to."
                                            );
            }
            catch (IOException e)
            {
                // expect to fail
                Log.Info("EXPECTED: cluster start failed due to missing " + "latest edits dir", e
                         );
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
                cluster = null;
            }
            // 5
            // Add old shared directory for name and edits along with latest edits.
            // This is OK, since the latest edits will have segments leading all
            // the way from the image in name_and_edits.
            conf = new HdfsConfiguration();
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEditsDir.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, editsOnlyDir.GetPath() + "," + nameAndEditsDir
                     .GetPath());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false
                                                                                             ).ManageNameDfsDirs(false).Build();
                fileSys = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsFalse(fileSys.Exists(file1));
                NUnit.Framework.Assert.IsFalse(fileSys.Exists(file2));
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file3));
                CheckFile(fileSys, file3, replication);
                CleanupFile(fileSys, file3);
                WriteFile(fileSys, file3, replication);
                CheckFile(fileSys, file3, replication);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
        }
Esempio n. 2
0
        /// <exception cref="System.Exception"/>
        private void TestTriggerBlockReport(bool incremental)
        {
            Configuration conf = new HdfsConfiguration();

            // Set a really long value for dfs.blockreport.intervalMsec and
            // dfs.heartbeat.interval, so that incremental block reports and heartbeats
            // won't be sent during this test unless they're triggered
            // manually.
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 10800000L);
            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1080L);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.WaitActive();
            FileSystem fs = cluster.GetFileSystem();
            DatanodeProtocolClientSideTranslatorPB spy = DataNodeTestUtils.SpyOnBposToNN(cluster
                                                                                         .GetDataNodes()[0], cluster.GetNameNode());

            DFSTestUtil.CreateFile(fs, new Path("/abc"), 16, (short)1, 1L);
            // We should get 1 incremental block report.
            Org.Mockito.Mockito.Verify(spy, Org.Mockito.Mockito.Timeout(60000).Times(1)).BlockReceivedAndDeleted
                (Matchers.Any <DatanodeRegistration>(), Matchers.AnyString(), Matchers.Any <StorageReceivedDeletedBlocks
                                                                                            []>());
            // We should not receive any more incremental or incremental block reports,
            // since the interval we configured is so long.
            for (int i = 0; i < 3; i++)
            {
                Sharpen.Thread.Sleep(10);
                Org.Mockito.Mockito.Verify(spy, Org.Mockito.Mockito.Times(0)).BlockReport(Matchers.Any
                                                                                          <DatanodeRegistration>(), Matchers.AnyString(), Matchers.Any <StorageBlockReport[]
                                                                                                                                                        >(), Org.Mockito.Mockito.AnyObject <BlockReportContext>());
                Org.Mockito.Mockito.Verify(spy, Org.Mockito.Mockito.Times(1)).BlockReceivedAndDeleted
                    (Matchers.Any <DatanodeRegistration>(), Matchers.AnyString(), Matchers.Any <StorageReceivedDeletedBlocks
                                                                                                []>());
            }
            // Create a fake block deletion notification on the DataNode.
            // This will be sent with the next incremental block report.
            ReceivedDeletedBlockInfo rdbi = new ReceivedDeletedBlockInfo(new Block(5678, 512,
                                                                                   1000), ReceivedDeletedBlockInfo.BlockStatus.DeletedBlock, null);
            DataNode       datanode    = cluster.GetDataNodes()[0];
            BPServiceActor actor       = datanode.GetAllBpOs()[0].GetBPServiceActors()[0];
            string         storageUuid = datanode.GetFSDataset().GetVolumes()[0].GetStorageID();

            actor.NotifyNamenodeDeletedBlock(rdbi, storageUuid);
            // Manually trigger a block report.
            datanode.TriggerBlockReport(new BlockReportOptions.Factory().SetIncremental(incremental
                                                                                        ).Build());
            // triggerBlockReport returns before the block report is
            // actually sent.  Wait for it to be sent here.
            if (incremental)
            {
                Org.Mockito.Mockito.Verify(spy, Org.Mockito.Mockito.Timeout(60000).Times(2)).BlockReceivedAndDeleted
                    (Matchers.Any <DatanodeRegistration>(), Matchers.AnyString(), Matchers.Any <StorageReceivedDeletedBlocks
                                                                                                []>());
            }
            else
            {
                Org.Mockito.Mockito.Verify(spy, Org.Mockito.Mockito.Timeout(60000)).BlockReport(Matchers.Any
                                                                                                <DatanodeRegistration>(), Matchers.AnyString(), Matchers.Any <StorageBlockReport[]
                                                                                                                                                              >(), Org.Mockito.Mockito.AnyObject <BlockReportContext>());
            }
            cluster.Shutdown();
        }
        /// <summary>check if nn.getCorruptFiles() returns a file that has corrupted blocks</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestListCorruptFilesCorruptedBlock()
        {
            MiniDFSCluster cluster = null;
            Random         random  = new Random();

            try
            {
                Configuration conf = new HdfsConfiguration();
                conf.SetInt(DFSConfigKeys.DfsDatanodeDirectoryscanIntervalKey, 1);
                // datanode scans directories
                conf.SetInt(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 3 * 1000);
                // datanode sends block reports
                // Set short retry timeouts so this test runs faster
                conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 10);
                cluster = new MiniDFSCluster.Builder(conf).Build();
                FileSystem fs = cluster.GetFileSystem();
                // create two files with one block each
                DFSTestUtil util = new DFSTestUtil.Builder().SetName("testCorruptFilesCorruptedBlock"
                                                                     ).SetNumFiles(2).SetMaxLevels(1).SetMaxSize(512).Build();
                util.CreateFiles(fs, "/srcdat10");
                // fetch bad file list from namenode. There should be none.
                NameNode namenode = cluster.GetNameNode();
                ICollection <FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.GetNamesystem(
                    ).ListCorruptFileBlocks("/", null);
                NUnit.Framework.Assert.IsTrue("Namenode has " + badFiles.Count + " corrupt files. Expecting None."
                                              , badFiles.Count == 0);
                // Now deliberately corrupt one block
                string   bpid       = cluster.GetNamesystem().GetBlockPoolId();
                FilePath storageDir = cluster.GetInstanceStorageDir(0, 1);
                FilePath data_dir   = MiniDFSCluster.GetFinalizedDir(storageDir, bpid);
                NUnit.Framework.Assert.IsTrue("data directory does not exist", data_dir.Exists());
                IList <FilePath> metaFiles = MiniDFSCluster.GetAllBlockMetadataFiles(data_dir);
                NUnit.Framework.Assert.IsTrue("Data directory does not contain any blocks or there was an "
                                              + "IO error", metaFiles != null && !metaFiles.IsEmpty());
                FilePath         metaFile = metaFiles[0];
                RandomAccessFile file     = new RandomAccessFile(metaFile, "rw");
                FileChannel      channel  = file.GetChannel();
                long             position = channel.Size() - 2;
                int    length             = 2;
                byte[] buffer             = new byte[length];
                random.NextBytes(buffer);
                channel.Write(ByteBuffer.Wrap(buffer), position);
                file.Close();
                Log.Info("Deliberately corrupting file " + metaFile.GetName() + " at offset " + position
                         + " length " + length);
                // read all files to trigger detection of corrupted replica
                try
                {
                    util.CheckFiles(fs, "/srcdat10");
                }
                catch (BlockMissingException)
                {
                    System.Console.Out.WriteLine("Received BlockMissingException as expected.");
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue("Corrupted replicas not handled properly. Expecting BlockMissingException "
                                                  + " but received IOException " + e, false);
                }
                // fetch bad file list from namenode. There should be one file.
                badFiles = namenode.GetNamesystem().ListCorruptFileBlocks("/", null);
                Log.Info("Namenode has bad files. " + badFiles.Count);
                NUnit.Framework.Assert.IsTrue("Namenode has " + badFiles.Count + " bad files. Expecting 1."
                                              , badFiles.Count == 1);
                util.Cleanup(fs, "/srcdat10");
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void TestNameEditsConfigs()
        {
            Path                     file1                  = new Path("TestNameEditsConfigs1");
            Path                     file2                  = new Path("TestNameEditsConfigs2");
            Path                     file3                  = new Path("TestNameEditsConfigs3");
            MiniDFSCluster           cluster                = null;
            SecondaryNameNode        secondary              = null;
            Configuration            conf                   = null;
            FileSystem               fileSys                = null;
            FilePath                 newNameDir             = new FilePath(base_dir, "name");
            FilePath                 newEditsDir            = new FilePath(base_dir, "edits");
            FilePath                 nameAndEdits           = new FilePath(base_dir, "name_and_edits");
            FilePath                 checkpointNameDir      = new FilePath(base_dir, "secondname");
            FilePath                 checkpointEditsDir     = new FilePath(base_dir, "secondedits");
            FilePath                 checkpointNameAndEdits = new FilePath(base_dir, "second_name_and_edits");
            ImmutableList <FilePath> allCurrentDirs         = ImmutableList.Of(new FilePath(nameAndEdits
                                                                                            , "current"), new FilePath(newNameDir, "current"), new FilePath(newEditsDir, "current"
                                                                                                                                                            ), new FilePath(checkpointNameAndEdits, "current"), new FilePath(checkpointNameDir
                                                                                                                                                                                                                             , "current"), new FilePath(checkpointEditsDir, "current"));
            ImmutableList <FilePath> imageCurrentDirs = ImmutableList.Of(new FilePath(nameAndEdits
                                                                                      , "current"), new FilePath(newNameDir, "current"), new FilePath(checkpointNameAndEdits
                                                                                                                                                      , "current"), new FilePath(checkpointNameDir, "current"));

            // Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir
            conf = new HdfsConfiguration();
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEdits.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEdits.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameAndEdits.GetPath
                         ());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointNameAndEdits.GetPath
                         ());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            // Manage our own dfs directories
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).ManageNameDfsDirs
                          (false).Build();
            cluster.WaitActive();
            secondary = StartSecondaryNameNode(conf);
            fileSys   = cluster.GetFileSystem();
            try
            {
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                WriteFile(fileSys, file1, replication);
                CheckFile(fileSys, file1, replication);
                secondary.DoCheckpoint();
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
                secondary.Shutdown();
            }
            // Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir
            conf = new HdfsConfiguration();
            NUnit.Framework.Assert.IsTrue(newNameDir.Mkdir());
            NUnit.Framework.Assert.IsTrue(newEditsDir.Mkdir());
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEdits.GetPath() + "," + newNameDir
                     .GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEdits.GetPath() + "," + newEditsDir
                     .GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameDir.GetPath() +
                     "," + checkpointNameAndEdits.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointEditsDir.GetPath
                         () + "," + checkpointNameAndEdits.GetPath());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            // Manage our own dfs directories. Do not format.
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false
                                                                                         ).ManageNameDfsDirs(false).Build();
            cluster.WaitActive();
            secondary = StartSecondaryNameNode(conf);
            fileSys   = cluster.GetFileSystem();
            try
            {
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file1));
                CheckFile(fileSys, file1, replication);
                CleanupFile(fileSys, file1);
                WriteFile(fileSys, file2, replication);
                CheckFile(fileSys, file2, replication);
                secondary.DoCheckpoint();
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
                secondary.Shutdown();
            }
            FSImageTestUtil.AssertParallelFilesAreIdentical(allCurrentDirs, ImmutableSet.Of("VERSION"
                                                                                            ));
            FSImageTestUtil.AssertSameNewestImage(imageCurrentDirs);
            // Now remove common directory both have and start namenode with
            // separate name and edits dirs
            conf = new HdfsConfiguration();
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, newNameDir.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, newEditsDir.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameDir.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointEditsDir.GetPath
                         ());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            cluster     = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false
                                                                                             ).ManageNameDfsDirs(false).Build();
            cluster.WaitActive();
            secondary = StartSecondaryNameNode(conf);
            fileSys   = cluster.GetFileSystem();
            try
            {
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file2));
                CheckFile(fileSys, file2, replication);
                CleanupFile(fileSys, file2);
                WriteFile(fileSys, file3, replication);
                CheckFile(fileSys, file3, replication);
                secondary.DoCheckpoint();
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
                secondary.Shutdown();
            }
            // No edit logs in new name dir
            CheckImageAndEditsFilesExistence(newNameDir, true, false);
            CheckImageAndEditsFilesExistence(newEditsDir, false, true);
            CheckImageAndEditsFilesExistence(checkpointNameDir, true, false);
            CheckImageAndEditsFilesExistence(checkpointEditsDir, false, true);
            // Add old name_and_edits dir. File system should not read image or edits
            // from old dir
            NUnit.Framework.Assert.IsTrue(FileUtil.FullyDelete(new FilePath(nameAndEdits, "current"
                                                                            )));
            NUnit.Framework.Assert.IsTrue(FileUtil.FullyDelete(new FilePath(checkpointNameAndEdits
                                                                            , "current")));
            conf = new HdfsConfiguration();
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEdits.GetPath() + "," + newNameDir
                     .GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEdits + "," + newEditsDir.GetPath
                         ());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameDir.GetPath() +
                     "," + checkpointNameAndEdits.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointEditsDir.GetPath
                         () + "," + checkpointNameAndEdits.GetPath());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            cluster     = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false
                                                                                             ).ManageNameDfsDirs(false).Build();
            cluster.WaitActive();
            secondary = StartSecondaryNameNode(conf);
            fileSys   = cluster.GetFileSystem();
            try
            {
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file2));
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file3));
                CheckFile(fileSys, file3, replication);
                secondary.DoCheckpoint();
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
                secondary.Shutdown();
            }
            CheckImageAndEditsFilesExistence(nameAndEdits, true, true);
            CheckImageAndEditsFilesExistence(checkpointNameAndEdits, true, true);
        }
        public virtual void TestHeartbeat()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();

            try
            {
                cluster.WaitActive();
                FSNamesystem     namesystem = cluster.GetNamesystem();
                HeartbeatManager hm         = namesystem.GetBlockManager().GetDatanodeManager().GetHeartbeatManager
                                                  ();
                string poolId = namesystem.GetBlockPoolId();
                DatanodeRegistration nodeReg = DataNodeTestUtils.GetDNRegistrationForBP(cluster.GetDataNodes
                                                                                            ()[0], poolId);
                DatanodeDescriptor dd        = NameNodeAdapter.GetDatanode(namesystem, nodeReg);
                string             storageID = DatanodeStorage.GenerateUuid();
                dd.UpdateStorage(new DatanodeStorage(storageID));
                int RemainingBlocks   = 1;
                int MaxReplicateLimit = conf.GetInt(DFSConfigKeys.DfsNamenodeReplicationMaxStreamsKey
                                                    , 2);
                int MaxInvalidateLimit          = DFSConfigKeys.DfsBlockInvalidateLimitDefault;
                int MaxInvalidateBlocks         = 2 * MaxInvalidateLimit + RemainingBlocks;
                int MaxReplicateBlocks          = 2 * MaxReplicateLimit + RemainingBlocks;
                DatanodeStorageInfo[] OneTarget = new DatanodeStorageInfo[] { dd.GetStorageInfo(storageID
                                                                                                ) };
                try
                {
                    namesystem.WriteLock();
                    lock (hm)
                    {
                        for (int i = 0; i < MaxReplicateBlocks; i++)
                        {
                            dd.AddBlockToBeReplicated(new Block(i, 0, GenerationStamp.LastReservedStamp), OneTarget
                                                      );
                        }
                        DatanodeCommand[] cmds = NameNodeAdapter.SendHeartBeat(nodeReg, dd, namesystem).GetCommands
                                                     ();
                        NUnit.Framework.Assert.AreEqual(1, cmds.Length);
                        NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaTransfer, cmds[0].GetAction()
                                                        );
                        NUnit.Framework.Assert.AreEqual(MaxReplicateLimit, ((BlockCommand)cmds[0]).GetBlocks
                                                            ().Length);
                        AList <Block> blockList = new AList <Block>(MaxInvalidateBlocks);
                        for (int i_1 = 0; i_1 < MaxInvalidateBlocks; i_1++)
                        {
                            blockList.AddItem(new Block(i_1, 0, GenerationStamp.LastReservedStamp));
                        }
                        dd.AddBlocksToBeInvalidated(blockList);
                        cmds = NameNodeAdapter.SendHeartBeat(nodeReg, dd, namesystem).GetCommands();
                        NUnit.Framework.Assert.AreEqual(2, cmds.Length);
                        NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaTransfer, cmds[0].GetAction()
                                                        );
                        NUnit.Framework.Assert.AreEqual(MaxReplicateLimit, ((BlockCommand)cmds[0]).GetBlocks
                                                            ().Length);
                        NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaInvalidate, cmds[1].GetAction
                                                            ());
                        NUnit.Framework.Assert.AreEqual(MaxInvalidateLimit, ((BlockCommand)cmds[1]).GetBlocks
                                                            ().Length);
                        cmds = NameNodeAdapter.SendHeartBeat(nodeReg, dd, namesystem).GetCommands();
                        NUnit.Framework.Assert.AreEqual(2, cmds.Length);
                        NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaTransfer, cmds[0].GetAction()
                                                        );
                        NUnit.Framework.Assert.AreEqual(RemainingBlocks, ((BlockCommand)cmds[0]).GetBlocks
                                                            ().Length);
                        NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaInvalidate, cmds[1].GetAction
                                                            ());
                        NUnit.Framework.Assert.AreEqual(MaxInvalidateLimit, ((BlockCommand)cmds[1]).GetBlocks
                                                            ().Length);
                        cmds = NameNodeAdapter.SendHeartBeat(nodeReg, dd, namesystem).GetCommands();
                        NUnit.Framework.Assert.AreEqual(1, cmds.Length);
                        NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaInvalidate, cmds[0].GetAction
                                                            ());
                        NUnit.Framework.Assert.AreEqual(RemainingBlocks, ((BlockCommand)cmds[0]).GetBlocks
                                                            ().Length);
                        cmds = NameNodeAdapter.SendHeartBeat(nodeReg, dd, namesystem).GetCommands();
                        NUnit.Framework.Assert.AreEqual(0, cmds.Length);
                    }
                }
                finally
                {
                    namesystem.WriteUnlock();
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Esempio n. 6
0
        public virtual void TestTransferRbw()
        {
            HdfsConfiguration conf    = new HdfsConfiguration();
            MiniDFSCluster    cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(Replication
                                                                                      ).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem fs = cluster.GetFileSystem();
                //create a file, write some data and leave it open.
                Path p    = new Path("/foo");
                int  size = (1 << 16) + Ran.Next(1 << 16);
                Log.Info("size = " + size);
                FSDataOutputStream @out  = fs.Create(p, Replication);
                byte[]             bytes = new byte[1024];
                for (int remaining = size; remaining > 0;)
                {
                    Ran.NextBytes(bytes);
                    int len = bytes.Length < remaining ? bytes.Length : remaining;
                    @out.Write(bytes, 0, len);
                    @out.Hflush();
                    remaining -= len;
                }
                //get the RBW
                ReplicaBeingWritten oldrbw;
                DataNode            newnode;
                DatanodeInfo        newnodeinfo;
                string bpid = cluster.GetNamesystem().GetBlockPoolId();
                {
                    DataNode oldnode = cluster.GetDataNodes()[0];
                    oldrbw = GetRbw(oldnode, bpid);
                    Log.Info("oldrbw = " + oldrbw);
                    //add a datanode
                    cluster.StartDataNodes(conf, 1, true, null, null);
                    newnode = cluster.GetDataNodes()[Replication];
                    DatanodeInfo oldnodeinfo;
                    {
                        DatanodeInfo[] datatnodeinfos = cluster.GetNameNodeRpc().GetDatanodeReport(HdfsConstants.DatanodeReportType
                                                                                                   .Live);
                        NUnit.Framework.Assert.AreEqual(2, datatnodeinfos.Length);
                        int i = 0;
                        for (DatanodeRegistration dnReg = newnode.GetDNRegistrationForBP(bpid); i < datatnodeinfos
                             .Length && !datatnodeinfos[i].Equals(dnReg); i++)
                        {
                        }
                        NUnit.Framework.Assert.IsTrue(i < datatnodeinfos.Length);
                        newnodeinfo = datatnodeinfos[i];
                        oldnodeinfo = datatnodeinfos[1 - i];
                    }
                    //transfer RBW
                    ExtendedBlock b = new ExtendedBlock(bpid, oldrbw.GetBlockId(), oldrbw.GetBytesAcked
                                                            (), oldrbw.GetGenerationStamp());
                    DataTransferProtos.BlockOpResponseProto s = DFSTestUtil.TransferRbw(b, DFSClientAdapter
                                                                                        .GetDFSClient(fs), oldnodeinfo, newnodeinfo);
                    NUnit.Framework.Assert.AreEqual(DataTransferProtos.Status.Success, s.GetStatus());
                }
                //check new rbw
                ReplicaBeingWritten newrbw = GetRbw(newnode, bpid);
                Log.Info("newrbw = " + newrbw);
                NUnit.Framework.Assert.AreEqual(oldrbw.GetBlockId(), newrbw.GetBlockId());
                NUnit.Framework.Assert.AreEqual(oldrbw.GetGenerationStamp(), newrbw.GetGenerationStamp
                                                    ());
                NUnit.Framework.Assert.AreEqual(oldrbw.GetVisibleLength(), newrbw.GetVisibleLength
                                                    ());
                Log.Info("DONE");
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        public virtual void TestPurgingWithNameEditsDirAfterFailure()
        {
            MiniDFSCluster cluster = null;
            Configuration  conf    = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsNamenodeNumExtraEditsRetainedKey, 0);
            FilePath sd0 = new FilePath(TestRootDir, "nn0");
            FilePath sd1 = new FilePath(TestRootDir, "nn1");
            FilePath cd0 = new FilePath(sd0, "current");
            FilePath cd1 = new FilePath(sd1, "current");

            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, Joiner.On(",").Join(sd0, sd1));
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).ManageNameDfsDirs(false
                                                                                             ).Format(true).Build();
                NameNode nn = cluster.GetNameNode();
                DoSaveNamespace(nn);
                Log.Info("After first save, images 0 and 2 should exist in both dirs");
                GenericTestUtils.AssertGlobEquals(cd0, "fsimage_\\d*", NNStorage.GetImageFileName
                                                      (0), NNStorage.GetImageFileName(2));
                GenericTestUtils.AssertGlobEquals(cd1, "fsimage_\\d*", NNStorage.GetImageFileName
                                                      (0), NNStorage.GetImageFileName(2));
                GenericTestUtils.AssertGlobEquals(cd0, "edits_.*", NNStorage.GetFinalizedEditsFileName
                                                      (1, 2), NNStorage.GetInProgressEditsFileName(3));
                GenericTestUtils.AssertGlobEquals(cd1, "edits_.*", NNStorage.GetFinalizedEditsFileName
                                                      (1, 2), NNStorage.GetInProgressEditsFileName(3));
                DoSaveNamespace(nn);
                Log.Info("After second save, image 0 should be purged, " + "and image 4 should exist in both."
                         );
                GenericTestUtils.AssertGlobEquals(cd0, "fsimage_\\d*", NNStorage.GetImageFileName
                                                      (2), NNStorage.GetImageFileName(4));
                GenericTestUtils.AssertGlobEquals(cd1, "fsimage_\\d*", NNStorage.GetImageFileName
                                                      (2), NNStorage.GetImageFileName(4));
                GenericTestUtils.AssertGlobEquals(cd0, "edits_.*", NNStorage.GetFinalizedEditsFileName
                                                      (3, 4), NNStorage.GetInProgressEditsFileName(5));
                GenericTestUtils.AssertGlobEquals(cd1, "edits_.*", NNStorage.GetFinalizedEditsFileName
                                                      (3, 4), NNStorage.GetInProgressEditsFileName(5));
                Log.Info("Failing first storage dir by chmodding it");
                NUnit.Framework.Assert.AreEqual(0, FileUtil.Chmod(cd0.GetAbsolutePath(), "000"));
                DoSaveNamespace(nn);
                Log.Info("Restoring accessibility of first storage dir");
                NUnit.Framework.Assert.AreEqual(0, FileUtil.Chmod(cd0.GetAbsolutePath(), "755"));
                Log.Info("nothing should have been purged in first storage dir");
                GenericTestUtils.AssertGlobEquals(cd0, "fsimage_\\d*", NNStorage.GetImageFileName
                                                      (2), NNStorage.GetImageFileName(4));
                GenericTestUtils.AssertGlobEquals(cd0, "edits_.*", NNStorage.GetFinalizedEditsFileName
                                                      (3, 4), NNStorage.GetInProgressEditsFileName(5));
                Log.Info("fsimage_2 should be purged in second storage dir");
                GenericTestUtils.AssertGlobEquals(cd1, "fsimage_\\d*", NNStorage.GetImageFileName
                                                      (4), NNStorage.GetImageFileName(6));
                GenericTestUtils.AssertGlobEquals(cd1, "edits_.*", NNStorage.GetFinalizedEditsFileName
                                                      (5, 6), NNStorage.GetInProgressEditsFileName(7));
                Log.Info("On next save, we should purge logs from the failed dir," + " but not images, since the image directory is in failed state."
                         );
                DoSaveNamespace(nn);
                GenericTestUtils.AssertGlobEquals(cd1, "fsimage_\\d*", NNStorage.GetImageFileName
                                                      (6), NNStorage.GetImageFileName(8));
                GenericTestUtils.AssertGlobEquals(cd1, "edits_.*", NNStorage.GetFinalizedEditsFileName
                                                      (7, 8), NNStorage.GetInProgressEditsFileName(9));
                GenericTestUtils.AssertGlobEquals(cd0, "fsimage_\\d*", NNStorage.GetImageFileName
                                                      (2), NNStorage.GetImageFileName(4));
                GenericTestUtils.AssertGlobEquals(cd0, "edits_.*", NNStorage.GetInProgressEditsFileName
                                                      (9));
            }
            finally
            {
                FileUtil.Chmod(cd0.GetAbsolutePath(), "755");
                Log.Info("Shutting down...");
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Esempio n. 8
0
 public Hdfs()
 {
     Configuration = new HdfsConfiguration();
 }
Esempio n. 9
0
 static Hdfs()
 {
     HdfsConfiguration.Init();
 }
        public virtual void TestMiniDFSClusterWithMultipleNN()
        {
            Configuration conf = new HdfsConfiguration();
            // start Federated cluster and add a node.
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology
                                                                                 .SimpleFederatedTopology(2)).Build();

            // add a node
            try
            {
                cluster.WaitActive();
                NUnit.Framework.Assert.AreEqual("(1)Should be 2 namenodes", 2, cluster.GetNumNameNodes
                                                    ());
                cluster.AddNameNode(conf, 0);
                NUnit.Framework.Assert.AreEqual("(1)Should be 3 namenodes", 3, cluster.GetNumNameNodes
                                                    ());
            }
            catch (IOException ioe)
            {
                NUnit.Framework.Assert.Fail("Failed to add NN to cluster:" + StringUtils.StringifyException
                                                (ioe));
            }
            finally
            {
                cluster.Shutdown();
            }
            // 2. start with Federation flag set
            conf    = new HdfsConfiguration();
            cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleFederatedTopology
                                                                      (1)).Build();
            try
            {
                NUnit.Framework.Assert.IsNotNull(cluster);
                cluster.WaitActive();
                NUnit.Framework.Assert.AreEqual("(2)Should be 1 namenodes", 1, cluster.GetNumNameNodes
                                                    ());
                // add a node
                cluster.AddNameNode(conf, 0);
                NUnit.Framework.Assert.AreEqual("(2)Should be 2 namenodes", 2, cluster.GetNumNameNodes
                                                    ());
            }
            catch (IOException ioe)
            {
                NUnit.Framework.Assert.Fail("Failed to add NN to cluster:" + StringUtils.StringifyException
                                                (ioe));
            }
            finally
            {
                cluster.Shutdown();
            }
            // 3. start non-federated
            conf    = new HdfsConfiguration();
            cluster = new MiniDFSCluster.Builder(conf).Build();
            // add a node
            try
            {
                cluster.WaitActive();
                NUnit.Framework.Assert.IsNotNull(cluster);
                NUnit.Framework.Assert.AreEqual("(2)Should be 1 namenodes", 1, cluster.GetNumNameNodes
                                                    ());
                cluster.AddNameNode(conf, 9929);
                NUnit.Framework.Assert.Fail("shouldn't be able to add another NN to non federated cluster"
                                            );
            }
            catch (IOException e)
            {
                // correct
                NUnit.Framework.Assert.IsTrue(e.Message.StartsWith("cannot add namenode"));
                NUnit.Framework.Assert.AreEqual("(3)Should be 1 namenodes", 1, cluster.GetNumNameNodes
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Esempio n. 11
0
 static DFSck()
 {
     HdfsConfiguration.Init();
 }
Esempio n. 12
0
 static JournalNode()
 {
     HdfsConfiguration.Init();
 }
Esempio n. 13
0
 public virtual void Setup()
 {
     conf = new HdfsConfiguration(baseConf);
 }
Esempio n. 14
0
        /// <exception cref="System.Exception"/>
        public virtual void TestInvalidNetworkTopologiesNotCachedInHdfs()
        {
            // start a cluster
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = null;

            try
            {
                // bad rack topology
                string[] racks = new string[] { "/a/b", "/c" };
                string[] hosts = new string[] { "foo1.example.com", "foo2.example.com" };
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Racks(racks).Hosts(hosts
                                                                                              ).Build();
                cluster.WaitActive();
                NamenodeProtocols nn = cluster.GetNameNodeRpc();
                NUnit.Framework.Assert.IsNotNull(nn);
                // Wait for one DataNode to register.
                // The other DataNode will not be able to register up because of the rack mismatch.
                DatanodeInfo[] info;
                while (true)
                {
                    info = nn.GetDatanodeReport(HdfsConstants.DatanodeReportType.Live);
                    NUnit.Framework.Assert.IsFalse(info.Length == 2);
                    if (info.Length == 1)
                    {
                        break;
                    }
                    Sharpen.Thread.Sleep(1000);
                }
                // Set the network topology of the other node to the match the network
                // topology of the node that came up.
                int validIdx   = info[0].GetHostName().Equals(hosts[0]) ? 0 : 1;
                int invalidIdx = validIdx == 1 ? 0 : 1;
                StaticMapping.AddNodeToRack(hosts[invalidIdx], racks[validIdx]);
                Log.Info("datanode " + validIdx + " came up with network location " + info[0].GetNetworkLocation
                             ());
                // Restart the DN with the invalid topology and wait for it to register.
                cluster.RestartDataNode(invalidIdx);
                Sharpen.Thread.Sleep(5000);
                while (true)
                {
                    info = nn.GetDatanodeReport(HdfsConstants.DatanodeReportType.Live);
                    if (info.Length == 2)
                    {
                        break;
                    }
                    if (info.Length == 0)
                    {
                        Log.Info("got no valid DNs");
                    }
                    else
                    {
                        if (info.Length == 1)
                        {
                            Log.Info("got one valid DN: " + info[0].GetHostName() + " (at " + info[0].GetNetworkLocation
                                         () + ")");
                        }
                    }
                    Sharpen.Thread.Sleep(1000);
                }
                NUnit.Framework.Assert.AreEqual(info[0].GetNetworkLocation(), info[1].GetNetworkLocation
                                                    ());
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Esempio n. 15
0
 public virtual void SetUpNameNode()
 {
     conf    = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
 }
Esempio n. 16
0
        public virtual void TestBlockReplacement()
        {
            Configuration Conf = new HdfsConfiguration();

            string[] InitialRacks      = new string[] { "/RACK0", "/RACK1", "/RACK2" };
            string[] NewRacks          = new string[] { "/RACK2" };
            short    ReplicationFactor = (short)3;
            int      DefaultBlockSize  = 1024;
            Random   r = new Random();

            Conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockSize);
            Conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, DefaultBlockSize / 2);
            Conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 500);
            cluster = new MiniDFSCluster.Builder(Conf).NumDataNodes(ReplicationFactor).Racks(
                InitialRacks).Build();
            try
            {
                cluster.WaitActive();
                FileSystem fs       = cluster.GetFileSystem();
                Path       fileName = new Path("/tmp.txt");
                // create a file with one block
                DFSTestUtil.CreateFile(fs, fileName, DefaultBlockSize, ReplicationFactor, r.NextLong
                                           ());
                DFSTestUtil.WaitReplication(fs, fileName, ReplicationFactor);
                // get all datanodes
                IPEndPoint           addr          = new IPEndPoint("localhost", cluster.GetNameNodePort());
                DFSClient            client        = new DFSClient(addr, Conf);
                IList <LocatedBlock> locatedBlocks = client.GetNamenode().GetBlockLocations("/tmp.txt"
                                                                                            , 0, DefaultBlockSize).GetLocatedBlocks();
                NUnit.Framework.Assert.AreEqual(1, locatedBlocks.Count);
                LocatedBlock   block    = locatedBlocks[0];
                DatanodeInfo[] oldNodes = block.GetLocations();
                NUnit.Framework.Assert.AreEqual(oldNodes.Length, 3);
                ExtendedBlock b = block.GetBlock();
                // add a fourth datanode to the cluster
                cluster.StartDataNodes(Conf, 1, true, null, NewRacks);
                cluster.WaitActive();
                DatanodeInfo[] datanodes = client.DatanodeReport(HdfsConstants.DatanodeReportType
                                                                 .All);
                // find out the new node
                DatanodeInfo newNode = null;
                foreach (DatanodeInfo node in datanodes)
                {
                    bool isNewNode = true;
                    foreach (DatanodeInfo oldNode in oldNodes)
                    {
                        if (node.Equals(oldNode))
                        {
                            isNewNode = false;
                            break;
                        }
                    }
                    if (isNewNode)
                    {
                        newNode = node;
                        break;
                    }
                }
                NUnit.Framework.Assert.IsTrue(newNode != null);
                DatanodeInfo         source  = null;
                AList <DatanodeInfo> proxies = new AList <DatanodeInfo>(2);
                foreach (DatanodeInfo node_1 in datanodes)
                {
                    if (node_1 != newNode)
                    {
                        if (node_1.GetNetworkLocation().Equals(newNode.GetNetworkLocation()))
                        {
                            source = node_1;
                        }
                        else
                        {
                            proxies.AddItem(node_1);
                        }
                    }
                }
                //current state: the newNode is on RACK2, and "source" is the other dn on RACK2.
                //the two datanodes on RACK0 and RACK1 are in "proxies".
                //"source" and both "proxies" all contain the block, while newNode doesn't yet.
                NUnit.Framework.Assert.IsTrue(source != null && proxies.Count == 2);
                // start to replace the block
                // case 1: proxySource does not contain the block
                Log.Info("Testcase 1: Proxy " + newNode + " does not contain the block " + b);
                NUnit.Framework.Assert.IsFalse(ReplaceBlock(b, source, newNode, proxies[0]));
                // case 2: destination already contains the block
                Log.Info("Testcase 2: Destination " + proxies[1] + " contains the block " + b);
                NUnit.Framework.Assert.IsFalse(ReplaceBlock(b, source, proxies[0], proxies[1]));
                // case 3: correct case
                Log.Info("Testcase 3: Source=" + source + " Proxy=" + proxies[0] + " Destination="
                         + newNode);
                NUnit.Framework.Assert.IsTrue(ReplaceBlock(b, source, proxies[0], newNode));
                // after cluster has time to resolve the over-replication,
                // block locations should contain two proxies and newNode
                // but not source
                CheckBlocks(new DatanodeInfo[] { newNode, proxies[0], proxies[1] }, fileName.ToString
                                (), DefaultBlockSize, ReplicationFactor, client);
                // case 4: proxies.get(0) is not a valid del hint
                // expect either source or newNode replica to be deleted instead
                Log.Info("Testcase 4: invalid del hint " + proxies[0]);
                NUnit.Framework.Assert.IsTrue(ReplaceBlock(b, proxies[0], proxies[1], source));
                // after cluster has time to resolve the over-replication,
                // block locations should contain two proxies,
                // and either source or newNode, but not both.
                CheckBlocks(Sharpen.Collections.ToArray(proxies, new DatanodeInfo[proxies.Count])
                            , fileName.ToString(), DefaultBlockSize, ReplicationFactor, client);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        public virtual void TestSaveNamespace()
        {
            DistributedFileSystem fs = null;

            try
            {
                Configuration conf = new HdfsConfiguration();
                conf.SetBoolean(DFSConfigKeys.DfsNamenodeDelegationTokenAlwaysUseKey, true);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes).Build();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                FSNamesystem namesystem = cluster.GetNamesystem();
                string       renewer    = UserGroupInformation.GetLoginUser().GetUserName();
                Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token1 = namesystem
                                                                                            .GetDelegationToken(new Text(renewer));
                Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token2 = namesystem
                                                                                            .GetDelegationToken(new Text(renewer));
                // Saving image without safe mode should fail
                DFSAdmin admin = new DFSAdmin(conf);
                string[] args  = new string[] { "-saveNamespace" };
                // verify that the edits file is NOT empty
                NameNode nn = cluster.GetNameNode();
                foreach (Storage.StorageDirectory sd in nn.GetFSImage().GetStorage().DirIterable(
                             null))
                {
                    FileJournalManager.EditLogFile log = FSImageTestUtil.FindLatestEditsLog(sd);
                    NUnit.Framework.Assert.IsTrue(log.IsInProgress());
                    log.ValidateLog();
                    long numTransactions = (log.GetLastTxId() - log.GetFirstTxId()) + 1;
                    NUnit.Framework.Assert.AreEqual("In-progress log " + log + " should have 5 transactions"
                                                    , 5, numTransactions);
                }
                // Saving image in safe mode should succeed
                fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                try
                {
                    admin.Run(args);
                }
                catch (Exception e)
                {
                    throw new IOException(e.Message);
                }
                // verify that the edits file is empty except for the START txn
                foreach (Storage.StorageDirectory sd_1 in nn.GetFSImage().GetStorage().DirIterable
                             (null))
                {
                    FileJournalManager.EditLogFile log = FSImageTestUtil.FindLatestEditsLog(sd_1);
                    NUnit.Framework.Assert.IsTrue(log.IsInProgress());
                    log.ValidateLog();
                    long numTransactions = (log.GetLastTxId() - log.GetFirstTxId()) + 1;
                    NUnit.Framework.Assert.AreEqual("In-progress log " + log + " should only have START txn"
                                                    , 1, numTransactions);
                }
                // restart cluster
                cluster.Shutdown();
                cluster = null;
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes).Format(false
                                                                                             ).Build();
                cluster.WaitActive();
                //Should be able to renew & cancel the delegation token after cluster restart
                try
                {
                    RenewToken(token1);
                    RenewToken(token2);
                }
                catch (IOException)
                {
                    NUnit.Framework.Assert.Fail("Could not renew or cancel the token");
                }
                namesystem = cluster.GetNamesystem();
                Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token3 = namesystem
                                                                                            .GetDelegationToken(new Text(renewer));
                Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token4 = namesystem
                                                                                            .GetDelegationToken(new Text(renewer));
                // restart cluster again
                cluster.Shutdown();
                cluster = null;
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes).Format(false
                                                                                             ).Build();
                cluster.WaitActive();
                namesystem = cluster.GetNamesystem();
                Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token5 = namesystem
                                                                                            .GetDelegationToken(new Text(renewer));
                try
                {
                    RenewToken(token1);
                    RenewToken(token2);
                    RenewToken(token3);
                    RenewToken(token4);
                    RenewToken(token5);
                }
                catch (IOException)
                {
                    NUnit.Framework.Assert.Fail("Could not renew or cancel the token");
                }
                // restart cluster again
                cluster.Shutdown();
                cluster = null;
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes).Format(false
                                                                                             ).Build();
                cluster.WaitActive();
                namesystem = cluster.GetNamesystem();
                try
                {
                    RenewToken(token1);
                    CancelToken(token1);
                    RenewToken(token2);
                    CancelToken(token2);
                    RenewToken(token3);
                    CancelToken(token3);
                    RenewToken(token4);
                    CancelToken(token4);
                    RenewToken(token5);
                    CancelToken(token5);
                }
                catch (IOException)
                {
                    NUnit.Framework.Assert.Fail("Could not renew or cancel the token");
                }
            }
            finally
            {
                if (fs != null)
                {
                    fs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Esempio n. 18
0
        public virtual void TestHeartbeatBlockRecovery()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();

            try
            {
                cluster.WaitActive();
                FSNamesystem     namesystem = cluster.GetNamesystem();
                HeartbeatManager hm         = namesystem.GetBlockManager().GetDatanodeManager().GetHeartbeatManager
                                                  ();
                string poolId = namesystem.GetBlockPoolId();
                DatanodeRegistration nodeReg1 = DataNodeTestUtils.GetDNRegistrationForBP(cluster.
                                                                                         GetDataNodes()[0], poolId);
                DatanodeDescriptor dd1 = NameNodeAdapter.GetDatanode(namesystem, nodeReg1);
                dd1.UpdateStorage(new DatanodeStorage(DatanodeStorage.GenerateUuid()));
                DatanodeRegistration nodeReg2 = DataNodeTestUtils.GetDNRegistrationForBP(cluster.
                                                                                         GetDataNodes()[1], poolId);
                DatanodeDescriptor dd2 = NameNodeAdapter.GetDatanode(namesystem, nodeReg2);
                dd2.UpdateStorage(new DatanodeStorage(DatanodeStorage.GenerateUuid()));
                DatanodeRegistration nodeReg3 = DataNodeTestUtils.GetDNRegistrationForBP(cluster.
                                                                                         GetDataNodes()[2], poolId);
                DatanodeDescriptor dd3 = NameNodeAdapter.GetDatanode(namesystem, nodeReg3);
                dd3.UpdateStorage(new DatanodeStorage(DatanodeStorage.GenerateUuid()));
                try
                {
                    namesystem.WriteLock();
                    lock (hm)
                    {
                        NameNodeAdapter.SendHeartBeat(nodeReg1, dd1, namesystem);
                        NameNodeAdapter.SendHeartBeat(nodeReg2, dd2, namesystem);
                        NameNodeAdapter.SendHeartBeat(nodeReg3, dd3, namesystem);
                        // Test with all alive nodes.
                        DFSTestUtil.ResetLastUpdatesWithOffset(dd1, 0);
                        DFSTestUtil.ResetLastUpdatesWithOffset(dd2, 0);
                        DFSTestUtil.ResetLastUpdatesWithOffset(dd3, 0);
                        DatanodeStorageInfo[] storages = new DatanodeStorageInfo[] { dd1.GetStorageInfos(
                                                                                         )[0], dd2.GetStorageInfos()[0], dd3.GetStorageInfos()[0] };
                        BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction
                                                                             (new Block(0, 0, GenerationStamp.LastReservedStamp), (short)3, HdfsServerConstants.BlockUCState
                                                                             .UnderRecovery, storages);
                        dd1.AddBlockToBeRecovered(blockInfo);
                        DatanodeCommand[] cmds = NameNodeAdapter.SendHeartBeat(nodeReg1, dd1, namesystem)
                                                 .GetCommands();
                        NUnit.Framework.Assert.AreEqual(1, cmds.Length);
                        NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaRecoverblock, cmds[0].GetAction
                                                            ());
                        BlockRecoveryCommand recoveryCommand = (BlockRecoveryCommand)cmds[0];
                        NUnit.Framework.Assert.AreEqual(1, recoveryCommand.GetRecoveringBlocks().Count);
                        DatanodeInfo[] recoveringNodes = Sharpen.Collections.ToArray(recoveryCommand.GetRecoveringBlocks
                                                                                         (), new BlockRecoveryCommand.RecoveringBlock[0])[0].GetLocations();
                        NUnit.Framework.Assert.AreEqual(3, recoveringNodes.Length);
                        NUnit.Framework.Assert.AreEqual(recoveringNodes[0], dd1);
                        NUnit.Framework.Assert.AreEqual(recoveringNodes[1], dd2);
                        NUnit.Framework.Assert.AreEqual(recoveringNodes[2], dd3);
                        // Test with one stale node.
                        DFSTestUtil.ResetLastUpdatesWithOffset(dd1, 0);
                        // More than the default stale interval of 30 seconds.
                        DFSTestUtil.ResetLastUpdatesWithOffset(dd2, -40 * 1000);
                        DFSTestUtil.ResetLastUpdatesWithOffset(dd3, 0);
                        blockInfo = new BlockInfoContiguousUnderConstruction(new Block(0, 0, GenerationStamp
                                                                                       .LastReservedStamp), (short)3, HdfsServerConstants.BlockUCState.UnderRecovery, storages
                                                                             );
                        dd1.AddBlockToBeRecovered(blockInfo);
                        cmds = NameNodeAdapter.SendHeartBeat(nodeReg1, dd1, namesystem).GetCommands();
                        NUnit.Framework.Assert.AreEqual(1, cmds.Length);
                        NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaRecoverblock, cmds[0].GetAction
                                                            ());
                        recoveryCommand = (BlockRecoveryCommand)cmds[0];
                        NUnit.Framework.Assert.AreEqual(1, recoveryCommand.GetRecoveringBlocks().Count);
                        recoveringNodes = Sharpen.Collections.ToArray(recoveryCommand.GetRecoveringBlocks
                                                                          (), new BlockRecoveryCommand.RecoveringBlock[0])[0].GetLocations();
                        NUnit.Framework.Assert.AreEqual(2, recoveringNodes.Length);
                        // dd2 is skipped.
                        NUnit.Framework.Assert.AreEqual(recoveringNodes[0], dd1);
                        NUnit.Framework.Assert.AreEqual(recoveringNodes[1], dd3);
                        // Test with all stale node.
                        DFSTestUtil.ResetLastUpdatesWithOffset(dd1, -60 * 1000);
                        // More than the default stale interval of 30 seconds.
                        DFSTestUtil.ResetLastUpdatesWithOffset(dd2, -40 * 1000);
                        DFSTestUtil.ResetLastUpdatesWithOffset(dd3, -80 * 1000);
                        blockInfo = new BlockInfoContiguousUnderConstruction(new Block(0, 0, GenerationStamp
                                                                                       .LastReservedStamp), (short)3, HdfsServerConstants.BlockUCState.UnderRecovery, storages
                                                                             );
                        dd1.AddBlockToBeRecovered(blockInfo);
                        cmds = NameNodeAdapter.SendHeartBeat(nodeReg1, dd1, namesystem).GetCommands();
                        NUnit.Framework.Assert.AreEqual(1, cmds.Length);
                        NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaRecoverblock, cmds[0].GetAction
                                                            ());
                        recoveryCommand = (BlockRecoveryCommand)cmds[0];
                        NUnit.Framework.Assert.AreEqual(1, recoveryCommand.GetRecoveringBlocks().Count);
                        recoveringNodes = Sharpen.Collections.ToArray(recoveryCommand.GetRecoveringBlocks
                                                                          (), new BlockRecoveryCommand.RecoveringBlock[0])[0].GetLocations();
                        // Only dd1 is included since it heart beated and hence its not stale
                        // when the list of recovery blocks is constructed.
                        NUnit.Framework.Assert.AreEqual(3, recoveringNodes.Length);
                        NUnit.Framework.Assert.AreEqual(recoveringNodes[0], dd1);
                        NUnit.Framework.Assert.AreEqual(recoveringNodes[1], dd2);
                        NUnit.Framework.Assert.AreEqual(recoveringNodes[2], dd3);
                    }
                }
                finally
                {
                    namesystem.WriteUnlock();
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Esempio n. 19
0
 /// <summary>Starts a cluster with the given configuration.</summary>
 /// <param name="conf">cluster configuration</param>
 /// <exception cref="System.IO.IOException">if there is an I/O error</exception>
 private void StartCluster(HdfsConfiguration conf)
 {
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
     cluster.WaitActive();
 }
        public virtual void TestRestartDFS()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = null;
            FSNamesystem   fsn     = null;
            int            numNamenodeDirs;
            DFSTestUtil    files = new DFSTestUtil.Builder().SetName("TestRestartDFS").SetNumFiles
                                       (200).Build();
            string     dir      = "/srcdat";
            Path       rootpath = new Path("/");
            Path       dirpath  = new Path(dir);
            long       rootmtime;
            FileStatus rootstatus;
            FileStatus dirstatus;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).Format(true).NumDataNodes(NumDatanodes
                                                                                     ).Build();
                string[] nameNodeDirs = conf.GetStrings(DFSConfigKeys.DfsNamenodeNameDirKey, new
                                                        string[] {  });
                numNamenodeDirs = nameNodeDirs.Length;
                NUnit.Framework.Assert.IsTrue("failed to get number of Namenode StorageDirs", numNamenodeDirs
                                              != 0);
                FileSystem fs = cluster.GetFileSystem();
                files.CreateFiles(fs, dir);
                rootmtime  = fs.GetFileStatus(rootpath).GetModificationTime();
                rootstatus = fs.GetFileStatus(dirpath);
                dirstatus  = fs.GetFileStatus(dirpath);
                fs.SetOwner(rootpath, rootstatus.GetOwner() + "_XXX", null);
                fs.SetOwner(dirpath, null, dirstatus.GetGroup() + "_XXX");
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            try
            {
                // Force the NN to save its images on startup so long as
                // there are any uncheckpointed txns
                conf.SetInt(DFSConfigKeys.DfsNamenodeCheckpointTxnsKey, 1);
                // Here we restart the MiniDFScluster without formatting namenode
                cluster = new MiniDFSCluster.Builder(conf).Format(false).NumDataNodes(NumDatanodes
                                                                                      ).Build();
                fsn = cluster.GetNamesystem();
                FileSystem fs = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsTrue("Filesystem corrupted after restart.", files.CheckFiles
                                                  (fs, dir));
                FileStatus newrootstatus = fs.GetFileStatus(rootpath);
                NUnit.Framework.Assert.AreEqual(rootmtime, newrootstatus.GetModificationTime());
                NUnit.Framework.Assert.AreEqual(rootstatus.GetOwner() + "_XXX", newrootstatus.GetOwner
                                                    ());
                NUnit.Framework.Assert.AreEqual(rootstatus.GetGroup(), newrootstatus.GetGroup());
                FileStatus newdirstatus = fs.GetFileStatus(dirpath);
                NUnit.Framework.Assert.AreEqual(dirstatus.GetOwner(), newdirstatus.GetOwner());
                NUnit.Framework.Assert.AreEqual(dirstatus.GetGroup() + "_XXX", newdirstatus.GetGroup
                                                    ());
                rootmtime = fs.GetFileStatus(rootpath).GetModificationTime();
                string checkAfterRestart = CheckImages(fsn, numNamenodeDirs);
                // Modify the system and then perform saveNamespace
                files.Cleanup(fs, dir);
                files.CreateFiles(fs, dir);
                fsn.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                cluster.GetNameNodeRpc().SaveNamespace();
                string checkAfterModify = CheckImages(fsn, numNamenodeDirs);
                NUnit.Framework.Assert.IsFalse("Modified namespace should change fsimage contents. "
                                               + "was: " + checkAfterRestart + " now: " + checkAfterModify, checkAfterRestart.
                                               Equals(checkAfterModify));
                fsn.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
                files.Cleanup(fs, dir);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Esempio n. 21
0
        /// <exception cref="System.Exception"/>
        public virtual void TestXattrConfiguration()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = null;

            try
            {
                conf.SetInt(DFSConfigKeys.DfsNamenodeMaxXattrSizeKey, -1);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(true).Build();
                NUnit.Framework.Assert.Fail("Expected exception with negative xattr size");
            }
            catch (ArgumentException e)
            {
                GenericTestUtils.AssertExceptionContains("Cannot set a negative value for the maximum size of an xattr"
                                                         , e);
            }
            finally
            {
                conf.SetInt(DFSConfigKeys.DfsNamenodeMaxXattrSizeKey, DFSConfigKeys.DfsNamenodeMaxXattrSizeDefault
                            );
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            try
            {
                conf.SetInt(DFSConfigKeys.DfsNamenodeMaxXattrsPerInodeKey, -1);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(true).Build();
                NUnit.Framework.Assert.Fail("Expected exception with negative # xattrs per inode"
                                            );
            }
            catch (ArgumentException e)
            {
                GenericTestUtils.AssertExceptionContains("Cannot set a negative limit on the number of xattrs per inode"
                                                         , e);
            }
            finally
            {
                conf.SetInt(DFSConfigKeys.DfsNamenodeMaxXattrsPerInodeKey, DFSConfigKeys.DfsNamenodeMaxXattrsPerInodeDefault
                            );
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            try
            {
                // Set up a logger to check log message
                LogVerificationAppender appender = new LogVerificationAppender();
                Logger logger = Logger.GetRootLogger();
                logger.AddAppender(appender);
                int count = appender.CountLinesWithMessage("Maximum size of an xattr: 0 (unlimited)"
                                                           );
                NUnit.Framework.Assert.AreEqual("Expected no messages about unlimited xattr size"
                                                , 0, count);
                conf.SetInt(DFSConfigKeys.DfsNamenodeMaxXattrSizeKey, 0);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(true).Build();
                count   = appender.CountLinesWithMessage("Maximum size of an xattr: 0 (unlimited)");
                // happens twice because we format then run
                NUnit.Framework.Assert.AreEqual("Expected unlimited xattr size", 2, count);
            }
            finally
            {
                conf.SetInt(DFSConfigKeys.DfsNamenodeMaxXattrSizeKey, DFSConfigKeys.DfsNamenodeMaxXattrSizeDefault
                            );
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        /// <summary>Check that listCorruptFileBlocks works while the namenode is still in safemode.
        ///     </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestListCorruptFileBlocksInSafeMode()
        {
            MiniDFSCluster cluster = null;
            Random         random  = new Random();

            try
            {
                Configuration conf = new HdfsConfiguration();
                // datanode scans directories
                conf.SetInt(DFSConfigKeys.DfsDatanodeDirectoryscanIntervalKey, 1);
                // datanode sends block reports
                conf.SetInt(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 3 * 1000);
                // never leave safemode automatically
                conf.SetFloat(DFSConfigKeys.DfsNamenodeSafemodeThresholdPctKey, 1.5f);
                // start populating repl queues immediately
                conf.SetFloat(DFSConfigKeys.DfsNamenodeReplQueueThresholdPctKey, 0f);
                // Set short retry timeouts so this test runs faster
                conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 10);
                cluster = new MiniDFSCluster.Builder(conf).WaitSafeMode(false).Build();
                cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave,
                                                     false);
                FileSystem fs = cluster.GetFileSystem();
                // create two files with one block each
                DFSTestUtil util = new DFSTestUtil.Builder().SetName("testListCorruptFileBlocksInSafeMode"
                                                                     ).SetNumFiles(2).SetMaxLevels(1).SetMaxSize(512).Build();
                util.CreateFiles(fs, "/srcdat10");
                // fetch bad file list from namenode. There should be none.
                ICollection <FSNamesystem.CorruptFileBlockInfo> badFiles = cluster.GetNameNode().GetNamesystem
                                                                               ().ListCorruptFileBlocks("/", null);
                NUnit.Framework.Assert.IsTrue("Namenode has " + badFiles.Count + " corrupt files. Expecting None."
                                              , badFiles.Count == 0);
                // Now deliberately corrupt one block
                FilePath storageDir = cluster.GetInstanceStorageDir(0, 0);
                FilePath data_dir   = MiniDFSCluster.GetFinalizedDir(storageDir, cluster.GetNamesystem
                                                                         ().GetBlockPoolId());
                NUnit.Framework.Assert.IsTrue("data directory does not exist", data_dir.Exists());
                IList <FilePath> metaFiles = MiniDFSCluster.GetAllBlockMetadataFiles(data_dir);
                NUnit.Framework.Assert.IsTrue("Data directory does not contain any blocks or there was an "
                                              + "IO error", metaFiles != null && !metaFiles.IsEmpty());
                FilePath         metaFile = metaFiles[0];
                RandomAccessFile file     = new RandomAccessFile(metaFile, "rw");
                FileChannel      channel  = file.GetChannel();
                long             position = channel.Size() - 2;
                int    length             = 2;
                byte[] buffer             = new byte[length];
                random.NextBytes(buffer);
                channel.Write(ByteBuffer.Wrap(buffer), position);
                file.Close();
                Log.Info("Deliberately corrupting file " + metaFile.GetName() + " at offset " + position
                         + " length " + length);
                // read all files to trigger detection of corrupted replica
                try
                {
                    util.CheckFiles(fs, "/srcdat10");
                }
                catch (BlockMissingException)
                {
                    System.Console.Out.WriteLine("Received BlockMissingException as expected.");
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException "
                                                  + " but received IOException " + e, false);
                }
                // fetch bad file list from namenode. There should be one file.
                badFiles = cluster.GetNameNode().GetNamesystem().ListCorruptFileBlocks("/", null);
                Log.Info("Namenode has bad files. " + badFiles.Count);
                NUnit.Framework.Assert.IsTrue("Namenode has " + badFiles.Count + " bad files. Expecting 1."
                                              , badFiles.Count == 1);
                // restart namenode
                cluster.RestartNameNode(0);
                fs = cluster.GetFileSystem();
                // wait until replication queues have been initialized
                while (!cluster.GetNameNode().namesystem.IsPopulatingReplQueues())
                {
                    try
                    {
                        Log.Info("waiting for replication queues");
                        Sharpen.Thread.Sleep(1000);
                    }
                    catch (Exception)
                    {
                    }
                }
                // read all files to trigger detection of corrupted replica
                try
                {
                    util.CheckFiles(fs, "/srcdat10");
                }
                catch (BlockMissingException)
                {
                    System.Console.Out.WriteLine("Received BlockMissingException as expected.");
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException "
                                                  + " but received IOException " + e, false);
                }
                // fetch bad file list from namenode. There should be one file.
                badFiles = cluster.GetNameNode().GetNamesystem().ListCorruptFileBlocks("/", null);
                Log.Info("Namenode has bad files. " + badFiles.Count);
                NUnit.Framework.Assert.IsTrue("Namenode has " + badFiles.Count + " bad files. Expecting 1."
                                              , badFiles.Count == 1);
                // check that we are still in safe mode
                NUnit.Framework.Assert.IsTrue("Namenode is not in safe mode", cluster.GetNameNode
                                                  ().IsInSafeMode());
                // now leave safe mode so that we can clean up
                cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave,
                                                     false);
                util.Cleanup(fs, "/srcdat10");
            }
            catch (Exception e)
            {
                Log.Error(StringUtils.StringifyException(e));
                throw;
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Esempio n. 23
0
        public virtual void TestBackupNodeTailsEdits()
        {
            Configuration conf = new HdfsConfiguration();

            HAUtil.SetAllowStandbyReads(conf, true);
            MiniDFSCluster cluster = null;
            FileSystem     fileSys = null;
            BackupNode     backup  = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                fileSys = cluster.GetFileSystem();
                backup  = StartBackupNode(conf, HdfsServerConstants.StartupOption.Backup, 1);
                BackupImage bnImage = (BackupImage)backup.GetFSImage();
                TestBNInSync(cluster, backup, 1);
                // Force a roll -- BN should roll with NN.
                NameNode          nn    = cluster.GetNameNode();
                NamenodeProtocols nnRpc = nn.GetRpcServer();
                nnRpc.RollEditLog();
                NUnit.Framework.Assert.AreEqual(bnImage.GetEditLog().GetCurSegmentTxId(), nn.GetFSImage
                                                    ().GetEditLog().GetCurSegmentTxId());
                // BN should stay in sync after roll
                TestBNInSync(cluster, backup, 2);
                long nnImageBefore = nn.GetFSImage().GetStorage().GetMostRecentCheckpointTxId();
                // BN checkpoint
                backup.DoCheckpoint();
                // NN should have received a new image
                long nnImageAfter = nn.GetFSImage().GetStorage().GetMostRecentCheckpointTxId();
                NUnit.Framework.Assert.IsTrue("nn should have received new checkpoint. before: "
                                              + nnImageBefore + " after: " + nnImageAfter, nnImageAfter > nnImageBefore);
                // BN should stay in sync after checkpoint
                TestBNInSync(cluster, backup, 3);
                // Stop BN
                Storage.StorageDirectory sd = bnImage.GetStorage().GetStorageDir(0);
                backup.Stop();
                backup = null;
                // When shutting down the BN, it shouldn't finalize logs that are
                // still open on the NN
                FileJournalManager.EditLogFile editsLog = FSImageTestUtil.FindLatestEditsLog(sd);
                NUnit.Framework.Assert.AreEqual(editsLog.GetFirstTxId(), nn.GetFSImage().GetEditLog
                                                    ().GetCurSegmentTxId());
                NUnit.Framework.Assert.IsTrue("Should not have finalized " + editsLog, editsLog.IsInProgress
                                                  ());
                // do some edits
                NUnit.Framework.Assert.IsTrue(fileSys.Mkdirs(new Path("/edit-while-bn-down")));
                // start a new backup node
                backup = StartBackupNode(conf, HdfsServerConstants.StartupOption.Backup, 1);
                TestBNInSync(cluster, backup, 4);
                NUnit.Framework.Assert.IsNotNull(backup.GetNamesystem().GetFileInfo("/edit-while-bn-down"
                                                                                    , false));
            }
            finally
            {
                Log.Info("Shutting down...");
                if (backup != null)
                {
                    backup.Stop();
                }
                if (fileSys != null)
                {
                    fileSys.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            AssertStorageDirsMatch(cluster.GetNameNode(), backup);
        }
        /// <summary>Test if NN.listCorruptFiles() returns the right number of results.</summary>
        /// <remarks>
        /// Test if NN.listCorruptFiles() returns the right number of results.
        /// The corrupt blocks are detected by the BlockPoolSliceScanner.
        /// Also, test that DFS.listCorruptFileBlocks can make multiple successive
        /// calls.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestMaxCorruptFiles()
        {
            MiniDFSCluster cluster = null;

            try
            {
                Configuration conf = new HdfsConfiguration();
                conf.SetInt(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 3 * 1000);
                // datanode sends block reports
                cluster = new MiniDFSCluster.Builder(conf).Build();
                FileSystem fs = cluster.GetFileSystem();
                int        maxCorruptFileBlocks = FSNamesystem.DefaultMaxCorruptFileblocksReturned;
                // create 110 files with one block each
                DFSTestUtil util = new DFSTestUtil.Builder().SetName("testMaxCorruptFiles").SetNumFiles
                                       (maxCorruptFileBlocks * 3).SetMaxLevels(1).SetMaxSize(512).Build();
                util.CreateFiles(fs, "/srcdat2", (short)1);
                util.WaitReplication(fs, "/srcdat2", (short)1);
                // verify that there are no bad blocks.
                NameNode namenode = cluster.GetNameNode();
                ICollection <FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.GetNamesystem(
                    ).ListCorruptFileBlocks("/srcdat2", null);
                NUnit.Framework.Assert.IsTrue("Namenode has " + badFiles.Count + " corrupt files. Expecting none."
                                              , badFiles.Count == 0);
                // Now deliberately blocks from all files
                string bpid = cluster.GetNamesystem().GetBlockPoolId();
                for (int i = 0; i < 4; i++)
                {
                    for (int j = 0; j <= 1; j++)
                    {
                        FilePath storageDir = cluster.GetInstanceStorageDir(i, j);
                        FilePath data_dir   = MiniDFSCluster.GetFinalizedDir(storageDir, bpid);
                        Log.Info("Removing files from " + data_dir);
                        IList <FilePath> metadataFiles = MiniDFSCluster.GetAllBlockMetadataFiles(data_dir);
                        if (metadataFiles == null)
                        {
                            continue;
                        }
                        foreach (FilePath metadataFile in metadataFiles)
                        {
                            FilePath blockFile = Block.MetaToBlockFile(metadataFile);
                            NUnit.Framework.Assert.IsTrue("Cannot remove file.", blockFile.Delete());
                            NUnit.Framework.Assert.IsTrue("Cannot remove file.", metadataFile.Delete());
                        }
                    }
                }
                // Occasionally the BlockPoolSliceScanner can run before we have removed
                // the blocks. Restart the Datanode to trigger the scanner into running
                // once more.
                Log.Info("Restarting Datanode to trigger BlockPoolSliceScanner");
                cluster.RestartDataNodes();
                cluster.WaitActive();
                badFiles = namenode.GetNamesystem().ListCorruptFileBlocks("/srcdat2", null);
                while (badFiles.Count < maxCorruptFileBlocks)
                {
                    Log.Info("# of corrupt files is: " + badFiles.Count);
                    Sharpen.Thread.Sleep(10000);
                    badFiles = namenode.GetNamesystem().ListCorruptFileBlocks("/srcdat2", null);
                }
                badFiles = namenode.GetNamesystem().ListCorruptFileBlocks("/srcdat2", null);
                Log.Info("Namenode has bad files. " + badFiles.Count);
                NUnit.Framework.Assert.IsTrue("Namenode has " + badFiles.Count + " bad files. Expecting "
                                              + maxCorruptFileBlocks + ".", badFiles.Count == maxCorruptFileBlocks);
                CorruptFileBlockIterator iter = (CorruptFileBlockIterator)fs.ListCorruptFileBlocks
                                                    (new Path("/srcdat2"));
                int corruptPaths = CountPaths(iter);
                NUnit.Framework.Assert.IsTrue("Expected more than " + maxCorruptFileBlocks + " corrupt file blocks but got "
                                              + corruptPaths, corruptPaths > maxCorruptFileBlocks);
                NUnit.Framework.Assert.IsTrue("Iterator should have made more than 1 call but made "
                                              + iter.GetCallsMade(), iter.GetCallsMade() > 1);
                util.Cleanup(fs, "/srcdat2");
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Esempio n. 25
0
        /// <exception cref="System.Exception"/>
        internal virtual void TestCheckpoint(HdfsServerConstants.StartupOption op)
        {
            Path          file1 = new Path("/checkpoint.dat");
            Path          file2 = new Path("/checkpoint2.dat");
            Path          file3 = new Path("/backup.dat");
            Configuration conf  = new HdfsConfiguration();

            HAUtil.SetAllowStandbyReads(conf, true);
            short replication  = (short)conf.GetInt("dfs.replication", 3);
            int   numDatanodes = Math.Max(3, replication);

            conf.Set(DFSConfigKeys.DfsNamenodeBackupHttpAddressKey, "localhost:0");
            conf.Set(DFSConfigKeys.DfsBlockreportInitialDelayKey, "0");
            conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1);
            // disable block scanner
            conf.SetInt(DFSConfigKeys.DfsNamenodeCheckpointTxnsKey, 1);
            MiniDFSCluster cluster = null;
            FileSystem     fileSys = null;
            BackupNode     backup  = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                fileSys = cluster.GetFileSystem();
                //
                // verify that 'format' really blew away all pre-existing files
                //
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file2));
                //
                // Create file1
                //
                NUnit.Framework.Assert.IsTrue(fileSys.Mkdirs(file1));
                //
                // Take a checkpoint
                //
                long txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup = StartBackupNode(conf, op, 1);
                WaitCheckpointDone(cluster, txid);
            }
            catch (IOException e)
            {
                Log.Error("Error in TestBackupNode:", e);
                NUnit.Framework.Assert.IsTrue(e.GetLocalizedMessage(), false);
            }
            finally
            {
                if (backup != null)
                {
                    backup.Stop();
                }
                if (fileSys != null)
                {
                    fileSys.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            FilePath nnCurDir = new FilePath(BaseDir, "name1/current/");
            FilePath bnCurDir = new FilePath(GetBackupNodeDir(op, 1), "/current/");

            FSImageTestUtil.AssertParallelFilesAreIdentical(ImmutableList.Of(bnCurDir, nnCurDir
                                                                             ), ImmutableSet.Of <string>("VERSION"));
            try
            {
                //
                // Restart cluster and verify that file1 still exist.
                //
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes).Format(false
                                                                                             ).Build();
                fileSys = cluster.GetFileSystem();
                // check that file1 still exists
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file1));
                fileSys.Delete(file1, true);
                // create new file file2
                fileSys.Mkdirs(file2);
                //
                // Take a checkpoint
                //
                long txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup = StartBackupNode(conf, op, 1);
                WaitCheckpointDone(cluster, txid);
                for (int i = 0; i < 10; i++)
                {
                    fileSys.Mkdirs(new Path("file_" + i));
                }
                txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup.DoCheckpoint();
                WaitCheckpointDone(cluster, txid);
                txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup.DoCheckpoint();
                WaitCheckpointDone(cluster, txid);
                // Try BackupNode operations
                IPEndPoint add = backup.GetNameNodeAddress();
                // Write to BN
                FileSystem bnFS = FileSystem.Get(new Path("hdfs://" + NetUtils.GetHostPortString(
                                                              add)).ToUri(), conf);
                bool canWrite = true;
                try
                {
                    Org.Apache.Hadoop.Hdfs.Server.Namenode.TestCheckpoint.WriteFile(bnFS, file3, replication
                                                                                    );
                }
                catch (IOException eio)
                {
                    Log.Info("Write to " + backup.GetRole() + " failed as expected: ", eio);
                    canWrite = false;
                }
                NUnit.Framework.Assert.IsFalse("Write to BackupNode must be prohibited.", canWrite
                                               );
                // Reads are allowed for BackupNode, but not for CheckpointNode
                bool canRead = true;
                try
                {
                    bnFS.Exists(file2);
                }
                catch (IOException eio)
                {
                    Log.Info("Read from " + backup.GetRole() + " failed: ", eio);
                    canRead = false;
                }
                NUnit.Framework.Assert.AreEqual("Reads to BackupNode are allowed, but not CheckpointNode."
                                                , canRead, backup.IsRole(HdfsServerConstants.NamenodeRole.Backup));
                Org.Apache.Hadoop.Hdfs.Server.Namenode.TestCheckpoint.WriteFile(fileSys, file3, replication
                                                                                );
                Org.Apache.Hadoop.Hdfs.Server.Namenode.TestCheckpoint.CheckFile(fileSys, file3, replication
                                                                                );
                // should also be on BN right away
                NUnit.Framework.Assert.IsTrue("file3 does not exist on BackupNode", op != HdfsServerConstants.StartupOption
                                              .Backup || backup.GetNamesystem().GetFileInfo(file3.ToUri().GetPath(), false) !=
                                              null);
            }
            catch (IOException e)
            {
                Log.Error("Error in TestBackupNode:", e);
                throw new Exception(e);
            }
            finally
            {
                if (backup != null)
                {
                    backup.Stop();
                }
                if (fileSys != null)
                {
                    fileSys.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            FSImageTestUtil.AssertParallelFilesAreIdentical(ImmutableList.Of(bnCurDir, nnCurDir
                                                                             ), ImmutableSet.Of <string>("VERSION"));
            try
            {
                //
                // Restart cluster and verify that file2 exists and
                // file1 does not exist.
                //
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).Build();
                fileSys = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                // verify that file2 exists
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file2));
            }
            catch (IOException e)
            {
                Log.Error("Error in TestBackupNode: ", e);
                NUnit.Framework.Assert.IsTrue(e.GetLocalizedMessage(), false);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
        }
Esempio n. 26
0
        public virtual void TestNameEditsRequiredConfigs()
        {
            MiniDFSCluster cluster          = null;
            FilePath       nameAndEditsDir  = new FilePath(base_dir, "name_and_edits");
            FilePath       nameAndEditsDir2 = new FilePath(base_dir, "name_and_edits2");
            FilePath       nameDir          = new FilePath(base_dir, "name");

            // 1
            // Bad configuration. Add a directory to dfs.namenode.edits.dir.required
            // without adding it to dfs.namenode.edits.dir.
            try
            {
                Configuration conf = new HdfsConfiguration();
                conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDir.GetAbsolutePath());
                conf.Set(DFSConfigKeys.DfsNamenodeEditsDirRequiredKey, nameAndEditsDir2.ToURI().ToString
                             ());
                conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEditsDir.ToURI().ToString()
                         );
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).ManageNameDfsDirs
                              (false).Build();
                NUnit.Framework.Assert.Fail("Successfully started cluster but should not have been able to."
                                            );
            }
            catch (ArgumentException iae)
            {
                // expect to fail
                Log.Info("EXPECTED: cluster start failed due to bad configuration" + iae);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
                cluster = null;
            }
            // 2
            // Good configuration. Add a directory to both dfs.namenode.edits.dir.required
            // and dfs.namenode.edits.dir.
            try
            {
                Configuration conf = new HdfsConfiguration();
                conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDir.GetAbsolutePath());
                conf.SetStrings(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEditsDir.ToURI().ToString
                                    (), nameAndEditsDir2.ToURI().ToString());
                conf.Set(DFSConfigKeys.DfsNamenodeEditsDirRequiredKey, nameAndEditsDir2.ToURI().ToString
                             ());
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).ManageNameDfsDirs
                              (false).Build();
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            // 3
            // Good configuration. Adds a directory to dfs.namenode.edits.dir but not to
            // dfs.namenode.edits.dir.required.
            try
            {
                Configuration conf = new HdfsConfiguration();
                conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDir.GetAbsolutePath());
                conf.SetStrings(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEditsDir.ToURI().ToString
                                    (), nameAndEditsDir2.ToURI().ToString());
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).ManageNameDfsDirs
                              (false).Build();
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Esempio n. 27
0
        /// <exception cref="System.IO.IOException"/>
        internal static void TestNameNodeRecoveryImpl(TestNameNodeRecovery.Corruptor corruptor
                                                      , bool finalize)
        {
            string TestPath     = "/test/path/dir";
            string TestPath2    = "/second/dir";
            bool   needRecovery = corruptor.NeedRecovery(finalize);
            // start a cluster
            Configuration conf = new HdfsConfiguration();

            SetupRecoveryTestConf(conf);
            MiniDFSCluster cluster = null;
            FileSystem     fileSys = null;

            Storage.StorageDirectory sd = null;
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).ManageNameDfsDirs(false
                                                                                             ).Build();
                cluster.WaitActive();
                if (!finalize)
                {
                    // Normally, the in-progress edit log would be finalized by
                    // FSEditLog#endCurrentLogSegment.  For testing purposes, we
                    // disable that here.
                    FSEditLog spyLog = Org.Mockito.Mockito.Spy(cluster.GetNameNode().GetFSImage().GetEditLog
                                                                   ());
                    Org.Mockito.Mockito.DoNothing().When(spyLog).EndCurrentLogSegment(true);
                    DFSTestUtil.SetEditLogForTesting(cluster.GetNamesystem(), spyLog);
                }
                fileSys = cluster.GetFileSystem();
                FSNamesystem namesystem = cluster.GetNamesystem();
                FSImage      fsimage    = namesystem.GetFSImage();
                fileSys.Mkdirs(new Path(TestPath));
                fileSys.Mkdirs(new Path(TestPath2));
                sd = fsimage.GetStorage().DirIterator(NNStorage.NameNodeDirType.Edits).Next();
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            FilePath editFile = FSImageTestUtil.FindLatestEditsLog(sd).GetFile();

            NUnit.Framework.Assert.IsTrue("Should exist: " + editFile, editFile.Exists());
            // Corrupt the edit log
            Log.Info("corrupting edit log file '" + editFile + "'");
            corruptor.Corrupt(editFile);
            // If needRecovery == true, make sure that we can't start the
            // cluster normally before recovery
            cluster = null;
            try
            {
                Log.Debug("trying to start normally (this should fail)...");
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).EnableManagedDfsDirsRedundancy
                              (false).Format(false).Build();
                cluster.WaitActive();
                cluster.Shutdown();
                if (needRecovery)
                {
                    NUnit.Framework.Assert.Fail("expected the corrupted edit log to prevent normal startup"
                                                );
                }
            }
            catch (IOException e)
            {
                if (!needRecovery)
                {
                    Log.Error("Got unexpected failure with " + corruptor.GetName() + corruptor, e);
                    NUnit.Framework.Assert.Fail("got unexpected exception " + e.Message);
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            // Perform NameNode recovery.
            // Even if there was nothing wrong previously (needRecovery == false),
            // this should still work fine.
            cluster = null;
            try
            {
                Log.Debug("running recovery...");
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).EnableManagedDfsDirsRedundancy
                              (false).Format(false).StartupOption(recoverStartOpt).Build();
            }
            catch (IOException e)
            {
                NUnit.Framework.Assert.Fail("caught IOException while trying to recover. " + "message was "
                                            + e.Message + "\nstack trace\n" + StringUtils.StringifyException(e));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            // Make sure that we can start the cluster normally after recovery
            cluster = null;
            try
            {
                Log.Debug("starting cluster normally after recovery...");
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).EnableManagedDfsDirsRedundancy
                              (false).Format(false).Build();
                Log.Debug("successfully recovered the " + corruptor.GetName() + " corrupted edit log"
                          );
                cluster.WaitActive();
                NUnit.Framework.Assert.IsTrue(cluster.GetFileSystem().Exists(new Path(TestPath)));
            }
            catch (IOException e)
            {
                NUnit.Framework.Assert.Fail("failed to recover.  Error message: " + e.Message);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void TestProcesOverReplicateBlock()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, 100L);
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 1000L);
            conf.Set(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, Sharpen.Extensions.ToString
                         (2));
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            try
            {
                Path fileName = new Path("/foo1");
                DFSTestUtil.CreateFile(fs, fileName, 2, (short)3, 0L);
                DFSTestUtil.WaitReplication(fs, fileName, (short)3);
                // corrupt the block on datanode 0
                ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, fileName);
                NUnit.Framework.Assert.IsTrue(cluster.CorruptReplica(0, block));
                MiniDFSCluster.DataNodeProperties dnProps = cluster.StopDataNode(0);
                // remove block scanner log to trigger block scanning
                FilePath scanCursor = new FilePath(new FilePath(MiniDFSCluster.GetFinalizedDir(cluster
                                                                                               .GetInstanceStorageDir(0, 0), cluster.GetNamesystem().GetBlockPoolId()).GetParent
                                                                    ()).GetParent(), "scanner.cursor");
                //wait for one minute for deletion to succeed;
                for (int i = 0; !scanCursor.Delete(); i++)
                {
                    NUnit.Framework.Assert.IsTrue("Could not delete " + scanCursor.GetAbsolutePath()
                                                  + " in one minute", i < 60);
                    try
                    {
                        Sharpen.Thread.Sleep(1000);
                    }
                    catch (Exception)
                    {
                    }
                }
                // restart the datanode so the corrupt replica will be detected
                cluster.RestartDataNode(dnProps);
                DFSTestUtil.WaitReplication(fs, fileName, (short)2);
                string     blockPoolId     = cluster.GetNamesystem().GetBlockPoolId();
                DatanodeID corruptDataNode = DataNodeTestUtils.GetDNRegistrationForBP(cluster.GetDataNodes
                                                                                          ()[2], blockPoolId);
                FSNamesystem     namesystem = cluster.GetNamesystem();
                BlockManager     bm         = namesystem.GetBlockManager();
                HeartbeatManager hm         = bm.GetDatanodeManager().GetHeartbeatManager();
                try
                {
                    namesystem.WriteLock();
                    lock (hm)
                    {
                        // set live datanode's remaining space to be 0
                        // so they will be chosen to be deleted when over-replication occurs
                        string corruptMachineName = corruptDataNode.GetXferAddr();
                        foreach (DatanodeDescriptor datanode in hm.GetDatanodes())
                        {
                            if (!corruptMachineName.Equals(datanode.GetXferAddr()))
                            {
                                datanode.GetStorageInfos()[0].SetUtilizationForTesting(100L, 100L, 0, 100L);
                                datanode.UpdateHeartbeat(BlockManagerTestUtil.GetStorageReportsForDatanode(datanode
                                                                                                           ), 0L, 0L, 0, 0, null);
                            }
                        }
                        // decrease the replication factor to 1;
                        NameNodeAdapter.SetReplication(namesystem, fileName.ToString(), (short)1);
                        // corrupt one won't be chosen to be excess one
                        // without 4910 the number of live replicas would be 0: block gets lost
                        NUnit.Framework.Assert.AreEqual(1, bm.CountNodes(block.GetLocalBlock()).LiveReplicas
                                                            ());
                    }
                }
                finally
                {
                    namesystem.WriteUnlock();
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Esempio n. 29
0
        public virtual void TestDeletedBlockWhenAddBlockIsInEdit()
        {
            Configuration conf = new HdfsConfiguration();

            cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology
                                                                      ()).NumDataNodes(1).Build();
            DFSClient client = null;

            try
            {
                cluster.WaitActive();
                NUnit.Framework.Assert.AreEqual("Number of namenodes is not 2", 2, cluster.GetNumNameNodes
                                                    ());
                // Transitioning the namenode 0 to active.
                cluster.TransitionToActive(0);
                NUnit.Framework.Assert.IsTrue("Namenode 0 should be in active state", cluster.GetNameNode
                                                  (0).IsActiveState());
                NUnit.Framework.Assert.IsTrue("Namenode 1 should be in standby state", cluster.GetNameNode
                                                  (1).IsStandbyState());
                // Trigger heartbeat to mark DatanodeStorageInfo#heartbeatedSinceFailover
                // to true.
                DataNodeTestUtils.TriggerHeartbeat(cluster.GetDataNodes()[0]);
                FileSystem fs = cluster.GetFileSystem(0);
                // Trigger blockReport to mark DatanodeStorageInfo#blockContentsStale
                // to false.
                cluster.GetDataNodes()[0].TriggerBlockReport(new BlockReportOptions.Factory().SetIncremental
                                                                 (false).Build());
                Path fileName = new Path("/tmp.txt");
                // create a file with one block
                DFSTestUtil.CreateFile(fs, fileName, 10L, (short)1, 1234L);
                DFSTestUtil.WaitReplication(fs, fileName, (short)1);
                client = new DFSClient(cluster.GetFileSystem(0).GetUri(), conf);
                IList <LocatedBlock> locatedBlocks = client.GetNamenode().GetBlockLocations("/tmp.txt"
                                                                                            , 0, 10L).GetLocatedBlocks();
                NUnit.Framework.Assert.IsTrue(locatedBlocks.Count == 1);
                NUnit.Framework.Assert.IsTrue(locatedBlocks[0].GetLocations().Length == 1);
                // add a second datanode to the cluster
                cluster.StartDataNodes(conf, 1, true, null, null, null, null);
                NUnit.Framework.Assert.AreEqual("Number of datanodes should be 2", 2, cluster.GetDataNodes
                                                    ().Count);
                DataNode           dn0          = cluster.GetDataNodes()[0];
                DataNode           dn1          = cluster.GetDataNodes()[1];
                string             activeNNBPId = cluster.GetNamesystem(0).GetBlockPoolId();
                DatanodeDescriptor sourceDnDesc = NameNodeAdapter.GetDatanode(cluster.GetNamesystem
                                                                                  (0), dn0.GetDNRegistrationForBP(activeNNBPId));
                DatanodeDescriptor destDnDesc = NameNodeAdapter.GetDatanode(cluster.GetNamesystem
                                                                                (0), dn1.GetDNRegistrationForBP(activeNNBPId));
                ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, fileName);
                Log.Info("replaceBlock:  " + ReplaceBlock(block, (DatanodeInfo)sourceDnDesc, (DatanodeInfo
                                                                                              )sourceDnDesc, (DatanodeInfo)destDnDesc));
                // Waiting for the FsDatasetAsyncDsikService to delete the block
                Sharpen.Thread.Sleep(3000);
                // Triggering the incremental block report to report the deleted block to
                // namnemode
                cluster.GetDataNodes()[0].TriggerBlockReport(new BlockReportOptions.Factory().SetIncremental
                                                                 (true).Build());
                cluster.TransitionToStandby(0);
                cluster.TransitionToActive(1);
                NUnit.Framework.Assert.IsTrue("Namenode 1 should be in active state", cluster.GetNameNode
                                                  (1).IsActiveState());
                NUnit.Framework.Assert.IsTrue("Namenode 0 should be in standby state", cluster.GetNameNode
                                                  (0).IsStandbyState());
                client.Close();
                // Opening a new client for new active  namenode
                client = new DFSClient(cluster.GetFileSystem(1).GetUri(), conf);
                IList <LocatedBlock> locatedBlocks1 = client.GetNamenode().GetBlockLocations("/tmp.txt"
                                                                                             , 0, 10L).GetLocatedBlocks();
                NUnit.Framework.Assert.AreEqual(1, locatedBlocks1.Count);
                NUnit.Framework.Assert.AreEqual("The block should be only on 1 datanode ", 1, locatedBlocks1
                                                [0].GetLocations().Length);
            }
            finally
            {
                IOUtils.Cleanup(null, client);
                cluster.Shutdown();
            }
        }
Esempio n. 30
0
        public virtual void TestFileLimit()
        {
            Configuration conf       = new HdfsConfiguration();
            int           maxObjects = 5;

            conf.SetLong(DFSConfigKeys.DfsNamenodeMaxObjectsKey, maxObjects);
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 1000L);
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            int currentNodes = 0;

            if (simulatedStorage)
            {
                SimulatedFSDataset.SetFactory(conf);
            }
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fs      = cluster.GetFileSystem();
            FSNamesystem   namesys = cluster.GetNamesystem();

            try
            {
                //
                // check that / exists
                //
                Path path = new Path("/");
                NUnit.Framework.Assert.IsTrue("/ should be a directory", fs.GetFileStatus(path).IsDirectory
                                                  ());
                currentNodes = 1;
                // root inode
                // verify that we can create the specified number of files. We leave
                // one for the "/". Each file takes an inode and a block.
                //
                for (int i = 0; i < maxObjects / 2; i++)
                {
                    Path file = new Path("/filestatus" + i);
                    CreateFile(fs, file);
                    System.Console.Out.WriteLine("Created file " + file);
                    currentNodes += 2;
                }
                // two more objects for this creation.
                // verify that creating another file fails
                bool hitException = false;
                try
                {
                    Path file = new Path("/filestatus");
                    CreateFile(fs, file);
                    System.Console.Out.WriteLine("Created file " + file);
                }
                catch (IOException)
                {
                    hitException = true;
                }
                NUnit.Framework.Assert.IsTrue("Was able to exceed file limit", hitException);
                // delete one file
                Path file0 = new Path("/filestatus0");
                fs.Delete(file0, true);
                System.Console.Out.WriteLine("Deleted file " + file0);
                currentNodes -= 2;
                // wait for number of blocks to decrease
                WaitForLimit(namesys, currentNodes);
                // now, we shud be able to create a new file
                CreateFile(fs, file0);
                System.Console.Out.WriteLine("Created file " + file0 + " again.");
                currentNodes += 2;
                // delete the file again
                file0 = new Path("/filestatus0");
                fs.Delete(file0, true);
                System.Console.Out.WriteLine("Deleted file " + file0 + " again.");
                currentNodes -= 2;
                // wait for number of blocks to decrease
                WaitForLimit(namesys, currentNodes);
                // create two directories in place of the file that we deleted
                Path dir = new Path("/dir0/dir1");
                fs.Mkdirs(dir);
                System.Console.Out.WriteLine("Created directories " + dir);
                currentNodes += 2;
                WaitForLimit(namesys, currentNodes);
                // verify that creating another directory fails
                hitException = false;
                try
                {
                    fs.Mkdirs(new Path("dir.fail"));
                    System.Console.Out.WriteLine("Created directory should not have succeeded.");
                }
                catch (IOException)
                {
                    hitException = true;
                }
                NUnit.Framework.Assert.IsTrue("Was able to exceed dir limit", hitException);
            }
            finally
            {
                fs.Close();
                cluster.Shutdown();
            }
        }
Esempio n. 31
0
 public Hdfs()
 {
     Configuration = new HdfsConfiguration();
 }