Esempio n. 1
0
 private static void SetTimeout(HttpURLConnection connection)
 {
     if (timeout <= 0)
     {
         Configuration conf = new HdfsConfiguration();
         timeout = conf.GetInt(DFSConfigKeys.DfsImageTransferTimeoutKey, DFSConfigKeys.DfsImageTransferTimeoutDefault
                               );
         Log.Info("Image Transfer timeout configured to " + timeout + " milliseconds");
     }
     if (timeout > 0)
     {
         connection.SetConnectTimeout(timeout);
         connection.SetReadTimeout(timeout);
     }
 }
Esempio n. 2
0
        public virtual void TestCreate()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetBoolean(DFSConfigKeys.DfsPermissionsEnabledKey, true);
            conf.Set(FsPermission.UmaskLabel, "000");
            MiniDFSCluster cluster = null;
            FileSystem     fs      = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
                cluster.WaitActive();
                fs = FileSystem.Get(conf);
                FsPermission rootPerm    = CheckPermission(fs, "/", null);
                FsPermission inheritPerm = FsPermission.CreateImmutable((short)(rootPerm.ToShort(
                                                                                    ) | 0xc0));
                FsPermission dirPerm = new FsPermission((short)0x1ff);
                fs.Mkdirs(new Path("/a1/a2/a3"), dirPerm);
                CheckPermission(fs, "/a1", dirPerm);
                CheckPermission(fs, "/a1/a2", dirPerm);
                CheckPermission(fs, "/a1/a2/a3", dirPerm);
                dirPerm = new FsPermission((short)0x53);
                FsPermission permission = FsPermission.CreateImmutable((short)(dirPerm.ToShort()
                                                                               | 0xc0));
                fs.Mkdirs(new Path("/aa/1/aa/2/aa/3"), dirPerm);
                CheckPermission(fs, "/aa/1", permission);
                CheckPermission(fs, "/aa/1/aa/2", permission);
                CheckPermission(fs, "/aa/1/aa/2/aa/3", dirPerm);
                FsPermission       filePerm = new FsPermission((short)0x124);
                Path               p        = new Path("/b1/b2/b3.txt");
                FSDataOutputStream @out     = fs.Create(p, filePerm, true, conf.GetInt(CommonConfigurationKeys
                                                                                       .IoFileBufferSizeKey, 4096), fs.GetDefaultReplication(p), fs.GetDefaultBlockSize
                                                            (p), null);
                @out.Write(123);
                @out.Close();
                CheckPermission(fs, "/b1", inheritPerm);
                CheckPermission(fs, "/b1/b2", inheritPerm);
                CheckPermission(fs, "/b1/b2/b3.txt", filePerm);
                conf.Set(FsPermission.UmaskLabel, "022");
                permission = FsPermission.CreateImmutable((short)0x1b6);
                FileSystem.Mkdirs(fs, new Path("/c1"), new FsPermission(permission));
                FileSystem.Create(fs, new Path("/c1/c2.txt"), new FsPermission(permission));
                CheckPermission(fs, "/c1", permission);
                CheckPermission(fs, "/c1/c2.txt", permission);
            }
            finally
            {
                try
                {
                    if (fs != null)
                    {
                        fs.Close();
                    }
                }
                catch (Exception e)
                {
                    Log.Error(StringUtils.StringifyException(e));
                }
                try
                {
                    if (cluster != null)
                    {
                        cluster.Shutdown();
                    }
                }
                catch (Exception e)
                {
                    Log.Error(StringUtils.StringifyException(e));
                }
            }
        }
        public virtual void TestNameEditsConfigsFailure()
        {
            Path           file1           = new Path("TestNameEditsConfigs1");
            Path           file2           = new Path("TestNameEditsConfigs2");
            Path           file3           = new Path("TestNameEditsConfigs3");
            MiniDFSCluster cluster         = null;
            Configuration  conf            = null;
            FileSystem     fileSys         = null;
            FilePath       nameOnlyDir     = new FilePath(base_dir, "name");
            FilePath       editsOnlyDir    = new FilePath(base_dir, "edits");
            FilePath       nameAndEditsDir = new FilePath(base_dir, "name_and_edits");

            // 1
            // Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir
            conf = new HdfsConfiguration();
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEditsDir.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEditsDir.GetPath());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            try
            {
                // Manage our own dfs directories
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).ManageNameDfsDirs
                              (false).Build();
                cluster.WaitActive();
                // Check that the dir has a VERSION file
                NUnit.Framework.Assert.IsTrue(new FilePath(nameAndEditsDir, "current/VERSION").Exists
                                                  ());
                fileSys = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                WriteFile(fileSys, file1, replication);
                CheckFile(fileSys, file1, replication);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
            // 2
            // Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir
            conf = new HdfsConfiguration();
            NUnit.Framework.Assert.IsTrue(nameOnlyDir.Mkdir());
            NUnit.Framework.Assert.IsTrue(editsOnlyDir.Mkdir());
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEditsDir.GetPath() + "," + nameOnlyDir
                     .GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEditsDir.GetPath() + "," +
                     editsOnlyDir.GetPath());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            try
            {
                // Manage our own dfs directories. Do not format.
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false
                                                                                             ).ManageNameDfsDirs(false).Build();
                cluster.WaitActive();
                // Check that the dirs have a VERSION file
                NUnit.Framework.Assert.IsTrue(new FilePath(nameAndEditsDir, "current/VERSION").Exists
                                                  ());
                NUnit.Framework.Assert.IsTrue(new FilePath(nameOnlyDir, "current/VERSION").Exists
                                                  ());
                NUnit.Framework.Assert.IsTrue(new FilePath(editsOnlyDir, "current/VERSION").Exists
                                                  ());
                fileSys = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file1));
                CheckFile(fileSys, file1, replication);
                CleanupFile(fileSys, file1);
                WriteFile(fileSys, file2, replication);
                CheckFile(fileSys, file2, replication);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
            // 3
            // Now remove common directory both have and start namenode with
            // separate name and edits dirs
            try
            {
                conf = new HdfsConfiguration();
                conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameOnlyDir.GetPath());
                conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, editsOnlyDir.GetPath());
                replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
                cluster     = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false
                                                                                                 ).ManageNameDfsDirs(false).Build();
                cluster.WaitActive();
                fileSys = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsFalse(fileSys.Exists(file1));
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file2));
                CheckFile(fileSys, file2, replication);
                CleanupFile(fileSys, file2);
                WriteFile(fileSys, file3, replication);
                CheckFile(fileSys, file3, replication);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
            // 4
            // Add old shared directory for name and edits along with latest name
            conf = new HdfsConfiguration();
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameOnlyDir.GetPath() + "," + nameAndEditsDir
                     .GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEditsDir.GetPath());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false
                                                                                             ).ManageNameDfsDirs(false).Build();
                NUnit.Framework.Assert.Fail("Successfully started cluster but should not have been able to."
                                            );
            }
            catch (IOException e)
            {
                // expect to fail
                Log.Info("EXPECTED: cluster start failed due to missing " + "latest edits dir", e
                         );
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
                cluster = null;
            }
            // 5
            // Add old shared directory for name and edits along with latest edits.
            // This is OK, since the latest edits will have segments leading all
            // the way from the image in name_and_edits.
            conf = new HdfsConfiguration();
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEditsDir.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, editsOnlyDir.GetPath() + "," + nameAndEditsDir
                     .GetPath());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false
                                                                                             ).ManageNameDfsDirs(false).Build();
                fileSys = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsFalse(fileSys.Exists(file1));
                NUnit.Framework.Assert.IsFalse(fileSys.Exists(file2));
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file3));
                CheckFile(fileSys, file3, replication);
                CleanupFile(fileSys, file3);
                WriteFile(fileSys, file3, replication);
                CheckFile(fileSys, file3, replication);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
        }
        public virtual void TestNameEditsConfigs()
        {
            Path                     file1                  = new Path("TestNameEditsConfigs1");
            Path                     file2                  = new Path("TestNameEditsConfigs2");
            Path                     file3                  = new Path("TestNameEditsConfigs3");
            MiniDFSCluster           cluster                = null;
            SecondaryNameNode        secondary              = null;
            Configuration            conf                   = null;
            FileSystem               fileSys                = null;
            FilePath                 newNameDir             = new FilePath(base_dir, "name");
            FilePath                 newEditsDir            = new FilePath(base_dir, "edits");
            FilePath                 nameAndEdits           = new FilePath(base_dir, "name_and_edits");
            FilePath                 checkpointNameDir      = new FilePath(base_dir, "secondname");
            FilePath                 checkpointEditsDir     = new FilePath(base_dir, "secondedits");
            FilePath                 checkpointNameAndEdits = new FilePath(base_dir, "second_name_and_edits");
            ImmutableList <FilePath> allCurrentDirs         = ImmutableList.Of(new FilePath(nameAndEdits
                                                                                            , "current"), new FilePath(newNameDir, "current"), new FilePath(newEditsDir, "current"
                                                                                                                                                            ), new FilePath(checkpointNameAndEdits, "current"), new FilePath(checkpointNameDir
                                                                                                                                                                                                                             , "current"), new FilePath(checkpointEditsDir, "current"));
            ImmutableList <FilePath> imageCurrentDirs = ImmutableList.Of(new FilePath(nameAndEdits
                                                                                      , "current"), new FilePath(newNameDir, "current"), new FilePath(checkpointNameAndEdits
                                                                                                                                                      , "current"), new FilePath(checkpointNameDir, "current"));

            // Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir
            conf = new HdfsConfiguration();
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEdits.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEdits.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameAndEdits.GetPath
                         ());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointNameAndEdits.GetPath
                         ());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            // Manage our own dfs directories
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).ManageNameDfsDirs
                          (false).Build();
            cluster.WaitActive();
            secondary = StartSecondaryNameNode(conf);
            fileSys   = cluster.GetFileSystem();
            try
            {
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                WriteFile(fileSys, file1, replication);
                CheckFile(fileSys, file1, replication);
                secondary.DoCheckpoint();
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
                secondary.Shutdown();
            }
            // Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir
            conf = new HdfsConfiguration();
            NUnit.Framework.Assert.IsTrue(newNameDir.Mkdir());
            NUnit.Framework.Assert.IsTrue(newEditsDir.Mkdir());
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEdits.GetPath() + "," + newNameDir
                     .GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEdits.GetPath() + "," + newEditsDir
                     .GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameDir.GetPath() +
                     "," + checkpointNameAndEdits.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointEditsDir.GetPath
                         () + "," + checkpointNameAndEdits.GetPath());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            // Manage our own dfs directories. Do not format.
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false
                                                                                         ).ManageNameDfsDirs(false).Build();
            cluster.WaitActive();
            secondary = StartSecondaryNameNode(conf);
            fileSys   = cluster.GetFileSystem();
            try
            {
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file1));
                CheckFile(fileSys, file1, replication);
                CleanupFile(fileSys, file1);
                WriteFile(fileSys, file2, replication);
                CheckFile(fileSys, file2, replication);
                secondary.DoCheckpoint();
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
                secondary.Shutdown();
            }
            FSImageTestUtil.AssertParallelFilesAreIdentical(allCurrentDirs, ImmutableSet.Of("VERSION"
                                                                                            ));
            FSImageTestUtil.AssertSameNewestImage(imageCurrentDirs);
            // Now remove common directory both have and start namenode with
            // separate name and edits dirs
            conf = new HdfsConfiguration();
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, newNameDir.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, newEditsDir.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameDir.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointEditsDir.GetPath
                         ());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            cluster     = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false
                                                                                             ).ManageNameDfsDirs(false).Build();
            cluster.WaitActive();
            secondary = StartSecondaryNameNode(conf);
            fileSys   = cluster.GetFileSystem();
            try
            {
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file2));
                CheckFile(fileSys, file2, replication);
                CleanupFile(fileSys, file2);
                WriteFile(fileSys, file3, replication);
                CheckFile(fileSys, file3, replication);
                secondary.DoCheckpoint();
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
                secondary.Shutdown();
            }
            // No edit logs in new name dir
            CheckImageAndEditsFilesExistence(newNameDir, true, false);
            CheckImageAndEditsFilesExistence(newEditsDir, false, true);
            CheckImageAndEditsFilesExistence(checkpointNameDir, true, false);
            CheckImageAndEditsFilesExistence(checkpointEditsDir, false, true);
            // Add old name_and_edits dir. File system should not read image or edits
            // from old dir
            NUnit.Framework.Assert.IsTrue(FileUtil.FullyDelete(new FilePath(nameAndEdits, "current"
                                                                            )));
            NUnit.Framework.Assert.IsTrue(FileUtil.FullyDelete(new FilePath(checkpointNameAndEdits
                                                                            , "current")));
            conf = new HdfsConfiguration();
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEdits.GetPath() + "," + newNameDir
                     .GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEdits + "," + newEditsDir.GetPath
                         ());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameDir.GetPath() +
                     "," + checkpointNameAndEdits.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointEditsDir.GetPath
                         () + "," + checkpointNameAndEdits.GetPath());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            cluster     = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false
                                                                                             ).ManageNameDfsDirs(false).Build();
            cluster.WaitActive();
            secondary = StartSecondaryNameNode(conf);
            fileSys   = cluster.GetFileSystem();
            try
            {
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file2));
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file3));
                CheckFile(fileSys, file3, replication);
                secondary.DoCheckpoint();
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
                secondary.Shutdown();
            }
            CheckImageAndEditsFilesExistence(nameAndEdits, true, true);
            CheckImageAndEditsFilesExistence(checkpointNameAndEdits, true, true);
        }
        public virtual void TestHeartbeat()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();

            try
            {
                cluster.WaitActive();
                FSNamesystem     namesystem = cluster.GetNamesystem();
                HeartbeatManager hm         = namesystem.GetBlockManager().GetDatanodeManager().GetHeartbeatManager
                                                  ();
                string poolId = namesystem.GetBlockPoolId();
                DatanodeRegistration nodeReg = DataNodeTestUtils.GetDNRegistrationForBP(cluster.GetDataNodes
                                                                                            ()[0], poolId);
                DatanodeDescriptor dd        = NameNodeAdapter.GetDatanode(namesystem, nodeReg);
                string             storageID = DatanodeStorage.GenerateUuid();
                dd.UpdateStorage(new DatanodeStorage(storageID));
                int RemainingBlocks   = 1;
                int MaxReplicateLimit = conf.GetInt(DFSConfigKeys.DfsNamenodeReplicationMaxStreamsKey
                                                    , 2);
                int MaxInvalidateLimit          = DFSConfigKeys.DfsBlockInvalidateLimitDefault;
                int MaxInvalidateBlocks         = 2 * MaxInvalidateLimit + RemainingBlocks;
                int MaxReplicateBlocks          = 2 * MaxReplicateLimit + RemainingBlocks;
                DatanodeStorageInfo[] OneTarget = new DatanodeStorageInfo[] { dd.GetStorageInfo(storageID
                                                                                                ) };
                try
                {
                    namesystem.WriteLock();
                    lock (hm)
                    {
                        for (int i = 0; i < MaxReplicateBlocks; i++)
                        {
                            dd.AddBlockToBeReplicated(new Block(i, 0, GenerationStamp.LastReservedStamp), OneTarget
                                                      );
                        }
                        DatanodeCommand[] cmds = NameNodeAdapter.SendHeartBeat(nodeReg, dd, namesystem).GetCommands
                                                     ();
                        NUnit.Framework.Assert.AreEqual(1, cmds.Length);
                        NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaTransfer, cmds[0].GetAction()
                                                        );
                        NUnit.Framework.Assert.AreEqual(MaxReplicateLimit, ((BlockCommand)cmds[0]).GetBlocks
                                                            ().Length);
                        AList <Block> blockList = new AList <Block>(MaxInvalidateBlocks);
                        for (int i_1 = 0; i_1 < MaxInvalidateBlocks; i_1++)
                        {
                            blockList.AddItem(new Block(i_1, 0, GenerationStamp.LastReservedStamp));
                        }
                        dd.AddBlocksToBeInvalidated(blockList);
                        cmds = NameNodeAdapter.SendHeartBeat(nodeReg, dd, namesystem).GetCommands();
                        NUnit.Framework.Assert.AreEqual(2, cmds.Length);
                        NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaTransfer, cmds[0].GetAction()
                                                        );
                        NUnit.Framework.Assert.AreEqual(MaxReplicateLimit, ((BlockCommand)cmds[0]).GetBlocks
                                                            ().Length);
                        NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaInvalidate, cmds[1].GetAction
                                                            ());
                        NUnit.Framework.Assert.AreEqual(MaxInvalidateLimit, ((BlockCommand)cmds[1]).GetBlocks
                                                            ().Length);
                        cmds = NameNodeAdapter.SendHeartBeat(nodeReg, dd, namesystem).GetCommands();
                        NUnit.Framework.Assert.AreEqual(2, cmds.Length);
                        NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaTransfer, cmds[0].GetAction()
                                                        );
                        NUnit.Framework.Assert.AreEqual(RemainingBlocks, ((BlockCommand)cmds[0]).GetBlocks
                                                            ().Length);
                        NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaInvalidate, cmds[1].GetAction
                                                            ());
                        NUnit.Framework.Assert.AreEqual(MaxInvalidateLimit, ((BlockCommand)cmds[1]).GetBlocks
                                                            ().Length);
                        cmds = NameNodeAdapter.SendHeartBeat(nodeReg, dd, namesystem).GetCommands();
                        NUnit.Framework.Assert.AreEqual(1, cmds.Length);
                        NUnit.Framework.Assert.AreEqual(DatanodeProtocol.DnaInvalidate, cmds[0].GetAction
                                                            ());
                        NUnit.Framework.Assert.AreEqual(RemainingBlocks, ((BlockCommand)cmds[0]).GetBlocks
                                                            ().Length);
                        cmds = NameNodeAdapter.SendHeartBeat(nodeReg, dd, namesystem).GetCommands();
                        NUnit.Framework.Assert.AreEqual(0, cmds.Length);
                    }
                }
                finally
                {
                    namesystem.WriteUnlock();
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Esempio n. 6
0
        /// <exception cref="System.Exception"/>
        internal virtual void TestCheckpoint(HdfsServerConstants.StartupOption op)
        {
            Path          file1 = new Path("/checkpoint.dat");
            Path          file2 = new Path("/checkpoint2.dat");
            Path          file3 = new Path("/backup.dat");
            Configuration conf  = new HdfsConfiguration();

            HAUtil.SetAllowStandbyReads(conf, true);
            short replication  = (short)conf.GetInt("dfs.replication", 3);
            int   numDatanodes = Math.Max(3, replication);

            conf.Set(DFSConfigKeys.DfsNamenodeBackupHttpAddressKey, "localhost:0");
            conf.Set(DFSConfigKeys.DfsBlockreportInitialDelayKey, "0");
            conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1);
            // disable block scanner
            conf.SetInt(DFSConfigKeys.DfsNamenodeCheckpointTxnsKey, 1);
            MiniDFSCluster cluster = null;
            FileSystem     fileSys = null;
            BackupNode     backup  = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                fileSys = cluster.GetFileSystem();
                //
                // verify that 'format' really blew away all pre-existing files
                //
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file2));
                //
                // Create file1
                //
                NUnit.Framework.Assert.IsTrue(fileSys.Mkdirs(file1));
                //
                // Take a checkpoint
                //
                long txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup = StartBackupNode(conf, op, 1);
                WaitCheckpointDone(cluster, txid);
            }
            catch (IOException e)
            {
                Log.Error("Error in TestBackupNode:", e);
                NUnit.Framework.Assert.IsTrue(e.GetLocalizedMessage(), false);
            }
            finally
            {
                if (backup != null)
                {
                    backup.Stop();
                }
                if (fileSys != null)
                {
                    fileSys.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            FilePath nnCurDir = new FilePath(BaseDir, "name1/current/");
            FilePath bnCurDir = new FilePath(GetBackupNodeDir(op, 1), "/current/");

            FSImageTestUtil.AssertParallelFilesAreIdentical(ImmutableList.Of(bnCurDir, nnCurDir
                                                                             ), ImmutableSet.Of <string>("VERSION"));
            try
            {
                //
                // Restart cluster and verify that file1 still exist.
                //
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes).Format(false
                                                                                             ).Build();
                fileSys = cluster.GetFileSystem();
                // check that file1 still exists
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file1));
                fileSys.Delete(file1, true);
                // create new file file2
                fileSys.Mkdirs(file2);
                //
                // Take a checkpoint
                //
                long txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup = StartBackupNode(conf, op, 1);
                WaitCheckpointDone(cluster, txid);
                for (int i = 0; i < 10; i++)
                {
                    fileSys.Mkdirs(new Path("file_" + i));
                }
                txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup.DoCheckpoint();
                WaitCheckpointDone(cluster, txid);
                txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup.DoCheckpoint();
                WaitCheckpointDone(cluster, txid);
                // Try BackupNode operations
                IPEndPoint add = backup.GetNameNodeAddress();
                // Write to BN
                FileSystem bnFS = FileSystem.Get(new Path("hdfs://" + NetUtils.GetHostPortString(
                                                              add)).ToUri(), conf);
                bool canWrite = true;
                try
                {
                    Org.Apache.Hadoop.Hdfs.Server.Namenode.TestCheckpoint.WriteFile(bnFS, file3, replication
                                                                                    );
                }
                catch (IOException eio)
                {
                    Log.Info("Write to " + backup.GetRole() + " failed as expected: ", eio);
                    canWrite = false;
                }
                NUnit.Framework.Assert.IsFalse("Write to BackupNode must be prohibited.", canWrite
                                               );
                // Reads are allowed for BackupNode, but not for CheckpointNode
                bool canRead = true;
                try
                {
                    bnFS.Exists(file2);
                }
                catch (IOException eio)
                {
                    Log.Info("Read from " + backup.GetRole() + " failed: ", eio);
                    canRead = false;
                }
                NUnit.Framework.Assert.AreEqual("Reads to BackupNode are allowed, but not CheckpointNode."
                                                , canRead, backup.IsRole(HdfsServerConstants.NamenodeRole.Backup));
                Org.Apache.Hadoop.Hdfs.Server.Namenode.TestCheckpoint.WriteFile(fileSys, file3, replication
                                                                                );
                Org.Apache.Hadoop.Hdfs.Server.Namenode.TestCheckpoint.CheckFile(fileSys, file3, replication
                                                                                );
                // should also be on BN right away
                NUnit.Framework.Assert.IsTrue("file3 does not exist on BackupNode", op != HdfsServerConstants.StartupOption
                                              .Backup || backup.GetNamesystem().GetFileInfo(file3.ToUri().GetPath(), false) !=
                                              null);
            }
            catch (IOException e)
            {
                Log.Error("Error in TestBackupNode:", e);
                throw new Exception(e);
            }
            finally
            {
                if (backup != null)
                {
                    backup.Stop();
                }
                if (fileSys != null)
                {
                    fileSys.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            FSImageTestUtil.AssertParallelFilesAreIdentical(ImmutableList.Of(bnCurDir, nnCurDir
                                                                             ), ImmutableSet.Of <string>("VERSION"));
            try
            {
                //
                // Restart cluster and verify that file2 exists and
                // file1 does not exist.
                //
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).Build();
                fileSys = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                // verify that file2 exists
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file2));
            }
            catch (IOException e)
            {
                Log.Error("Error in TestBackupNode: ", e);
                NUnit.Framework.Assert.IsTrue(e.GetLocalizedMessage(), false);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
        }