public virtual void SetUpCluster()
 {
     conf = new Configuration();
     conf.SetInt(DFSConfigKeys.DfsNamenodeCheckpointCheckPeriodKey, 1);
     conf.SetInt(DFSConfigKeys.DfsNamenodeCheckpointTxnsKey, 1);
     conf.SetInt(DFSConfigKeys.DfsNamenodeNumCheckpointsRetainedKey, 10);
     conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
     HAUtil.SetAllowStandbyReads(conf, true);
     if (clusterType == TestFailureToReadEdits.TestType.SharedDirHa)
     {
         MiniDFSNNTopology topology = MiniQJMHACluster.CreateDefaultTopology(10000);
         cluster = new MiniDFSCluster.Builder(conf).NnTopology(topology).NumDataNodes(0).CheckExitOnShutdown
                       (false).Build();
     }
     else
     {
         MiniQJMHACluster.Builder builder = new MiniQJMHACluster.Builder(conf);
         builder.GetDfsBuilder().NumDataNodes(0).CheckExitOnShutdown(false);
         miniQjmHaCluster = builder.Build();
         cluster          = miniQjmHaCluster.GetDfsCluster();
     }
     cluster.WaitActive();
     nn0 = cluster.GetNameNode(0);
     nn1 = cluster.GetNameNode(1);
     cluster.TransitionToActive(0);
     fs = HATestUtil.ConfigureFailoverFs(cluster, conf);
 }
        public virtual void TestStartup()
        {
            Configuration conf = new Configuration();

            HAUtil.SetAllowStandbyReads(conf, true);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology
                                                                                 .SimpleHATopology()).NumDataNodes(0).Build();

            try
            {
                // During HA startup, both nodes should be in
                // standby and we shouldn't have any edits files
                // in any edits directory!
                IList <URI> allDirs = Lists.NewArrayList();
                Sharpen.Collections.AddAll(allDirs, cluster.GetNameDirs(0));
                Sharpen.Collections.AddAll(allDirs, cluster.GetNameDirs(1));
                allDirs.AddItem(cluster.GetSharedEditsDir(0, 1));
                AssertNoEditFiles(allDirs);
                // Set the first NN to active, make sure it creates edits
                // in its own dirs and the shared dir. The standby
                // should still have no edits!
                cluster.TransitionToActive(0);
                AssertEditFiles(cluster.GetNameDirs(0), NNStorage.GetInProgressEditsFileName(1));
                AssertEditFiles(Sharpen.Collections.SingletonList(cluster.GetSharedEditsDir(0, 1)
                                                                  ), NNStorage.GetInProgressEditsFileName(1));
                AssertNoEditFiles(cluster.GetNameDirs(1));
                cluster.GetNameNode(0).GetRpcServer().Mkdirs("/test", FsPermission.CreateImmutable
                                                                 ((short)0x1ed), true);
                // Restarting the standby should not finalize any edits files
                // in the shared directory when it starts up!
                cluster.RestartNameNode(1);
                AssertEditFiles(cluster.GetNameDirs(0), NNStorage.GetInProgressEditsFileName(1));
                AssertEditFiles(Sharpen.Collections.SingletonList(cluster.GetSharedEditsDir(0, 1)
                                                                  ), NNStorage.GetInProgressEditsFileName(1));
                AssertNoEditFiles(cluster.GetNameDirs(1));
                // Additionally it should not have applied any in-progress logs
                // at start-up -- otherwise, it would have read half-way into
                // the current log segment, and on the next roll, it would have to
                // either replay starting in the middle of the segment (not allowed)
                // or double-replay the edits (incorrect).
                NUnit.Framework.Assert.IsNull(NameNodeAdapter.GetFileInfo(cluster.GetNameNode(1),
                                                                          "/test", true));
                cluster.GetNameNode(0).GetRpcServer().Mkdirs("/test2", FsPermission.CreateImmutable
                                                                 ((short)0x1ed), true);
                // If we restart NN0, it'll come back as standby, and we can
                // transition NN1 to active and make sure it reads edits correctly at this point.
                cluster.RestartNameNode(0);
                cluster.TransitionToActive(1);
                // NN1 should have both the edits that came before its restart, and the edits that
                // came after its restart.
                NUnit.Framework.Assert.IsNotNull(NameNodeAdapter.GetFileInfo(cluster.GetNameNode(
                                                                                 1), "/test", true));
                NUnit.Framework.Assert.IsNotNull(NameNodeAdapter.GetFileInfo(cluster.GetNameNode(
                                                                                 1), "/test2", true));
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        /// <summary>
        /// Regression test for HDFS-2795:
        /// - Start an HA cluster with a DN.
        /// </summary>
        /// <remarks>
        /// Regression test for HDFS-2795:
        /// - Start an HA cluster with a DN.
        /// - Write several blocks to the FS with replication 1.
        /// - Shutdown the DN
        /// - Wait for the NNs to declare the DN dead. All blocks will be under-replicated.
        /// - Restart the DN.
        /// In the bug, the standby node would only very slowly notice the blocks returning
        /// to the cluster.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestDatanodeRestarts()
        {
            Configuration conf = new Configuration();

            conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, 1024);
            // We read from the standby to watch block locations
            HAUtil.SetAllowStandbyReads(conf, true);
            conf.SetLong(DFSConfigKeys.DfsNamenodeAccesstimePrecisionKey, 0);
            conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology
                                                                                 .SimpleHATopology()).NumDataNodes(1).Build();

            try
            {
                NameNode nn0 = cluster.GetNameNode(0);
                NameNode nn1 = cluster.GetNameNode(1);
                cluster.TransitionToActive(0);
                // Create 5 blocks.
                DFSTestUtil.CreateFile(cluster.GetFileSystem(0), TestFilePath, 5 * 1024, (short)1
                                       , 1L);
                HATestUtil.WaitForStandbyToCatchUp(nn0, nn1);
                // Stop the DN.
                DataNode dn     = cluster.GetDataNodes()[0];
                string   dnName = dn.GetDatanodeId().GetXferAddr();
                MiniDFSCluster.DataNodeProperties dnProps = cluster.StopDataNode(0);
                // Make sure both NNs register it as dead.
                BlockManagerTestUtil.NoticeDeadDatanode(nn0, dnName);
                BlockManagerTestUtil.NoticeDeadDatanode(nn1, dnName);
                BlockManagerTestUtil.UpdateState(nn0.GetNamesystem().GetBlockManager());
                BlockManagerTestUtil.UpdateState(nn1.GetNamesystem().GetBlockManager());
                NUnit.Framework.Assert.AreEqual(5, nn0.GetNamesystem().GetUnderReplicatedBlocks()
                                                );
                // The SBN will not have any blocks in its neededReplication queue
                // since the SBN doesn't process replication.
                NUnit.Framework.Assert.AreEqual(0, nn1.GetNamesystem().GetUnderReplicatedBlocks()
                                                );
                LocatedBlocks locs = nn1.GetRpcServer().GetBlockLocations(TestFile, 0, 1);
                NUnit.Framework.Assert.AreEqual("Standby should have registered that the block has no replicas"
                                                , 0, locs.Get(0).GetLocations().Length);
                cluster.RestartDataNode(dnProps);
                // Wait for both NNs to re-register the DN.
                cluster.WaitActive(0);
                cluster.WaitActive(1);
                BlockManagerTestUtil.UpdateState(nn0.GetNamesystem().GetBlockManager());
                BlockManagerTestUtil.UpdateState(nn1.GetNamesystem().GetBlockManager());
                NUnit.Framework.Assert.AreEqual(0, nn0.GetNamesystem().GetUnderReplicatedBlocks()
                                                );
                NUnit.Framework.Assert.AreEqual(0, nn1.GetNamesystem().GetUnderReplicatedBlocks()
                                                );
                locs = nn1.GetRpcServer().GetBlockLocations(TestFile, 0, 1);
                NUnit.Framework.Assert.AreEqual("Standby should have registered that the block has replicas again"
                                                , 1, locs.Get(0).GetLocations().Length);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        /// <summary>
        /// Test that getContentSummary on Standby should should throw standby
        /// exception.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestgetContentSummaryOnStandby()
        {
            Configuration nn1conf = cluster.GetConfiguration(1);

            // just reset the standby reads to default i.e False on standby.
            HAUtil.SetAllowStandbyReads(nn1conf, false);
            cluster.RestartNameNode(1);
            cluster.GetNameNodeRpc(1).GetContentSummary("/");
        }
Beispiel #5
0
        public virtual void SetupCluster()
        {
            conf = new Configuration();
            conf.SetInt(DFSConfigKeys.DfsHaLogrollPeriodKey, 1);
            conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
            HAUtil.SetAllowStandbyReads(conf, true);
            MiniDFSNNTopology topology = MiniDFSNNTopology.SimpleHATopology();

            cluster = new MiniDFSCluster.Builder(conf).NnTopology(topology).NumDataNodes(0).Build
                          ();
            cluster.WaitActive();
            ShutdownClusterAndRemoveSharedEditsDir();
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestStandbyIsHot()
        {
            Configuration conf = new Configuration();

            // We read from the standby to watch block locations
            HAUtil.SetAllowStandbyReads(conf, true);
            conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology
                                                                                 .SimpleHATopology()).NumDataNodes(3).Build();

            try
            {
                cluster.WaitActive();
                cluster.TransitionToActive(0);
                NameNode   nn1 = cluster.GetNameNode(0);
                NameNode   nn2 = cluster.GetNameNode(1);
                FileSystem fs  = HATestUtil.ConfigureFailoverFs(cluster, conf);
                Sharpen.Thread.Sleep(1000);
                System.Console.Error.WriteLine("==================================");
                DFSTestUtil.WriteFile(fs, TestFilePath, TestFileData);
                // Have to force an edit log roll so that the standby catches up
                nn1.GetRpcServer().RollEditLog();
                System.Console.Error.WriteLine("==================================");
                // Block locations should show up on standby.
                Log.Info("Waiting for block locations to appear on standby node");
                WaitForBlockLocations(cluster, nn2, TestFile, 3);
                // Trigger immediate heartbeats and block reports so
                // that the active "trusts" all of the DNs
                cluster.TriggerHeartbeats();
                cluster.TriggerBlockReports();
                // Change replication
                Log.Info("Changing replication to 1");
                fs.SetReplication(TestFilePath, (short)1);
                BlockManagerTestUtil.ComputeAllPendingWork(nn1.GetNamesystem().GetBlockManager());
                WaitForBlockLocations(cluster, nn1, TestFile, 1);
                nn1.GetRpcServer().RollEditLog();
                Log.Info("Waiting for lowered replication to show up on standby");
                WaitForBlockLocations(cluster, nn2, TestFile, 1);
                // Change back to 3
                Log.Info("Changing replication to 3");
                fs.SetReplication(TestFilePath, (short)3);
                BlockManagerTestUtil.ComputeAllPendingWork(nn1.GetNamesystem().GetBlockManager());
                nn1.GetRpcServer().RollEditLog();
                Log.Info("Waiting for higher replication to show up on standby");
                WaitForBlockLocations(cluster, nn2, TestFile, 3);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Beispiel #7
0
        public virtual void SetupCluster()
        {
            Configuration conf = new Configuration();

            conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
            HAUtil.SetAllowStandbyReads(conf, true);
            cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology
                                                                      ()).NumDataNodes(1).WaitSafeMode(false).Build();
            cluster.WaitActive();
            nn0 = cluster.GetNameNode(0);
            nn1 = cluster.GetNameNode(1);
            fs  = HATestUtil.ConfigureFailoverFs(cluster, conf);
            cluster.TransitionToActive(0);
        }
        public virtual void TestTailer()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
            HAUtil.SetAllowStandbyReads(conf, true);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology
                                                                                 .SimpleHATopology()).NumDataNodes(0).Build();

            cluster.WaitActive();
            cluster.TransitionToActive(0);
            NameNode nn1 = cluster.GetNameNode(0);
            NameNode nn2 = cluster.GetNameNode(1);

            try
            {
                for (int i = 0; i < DirsToMake / 2; i++)
                {
                    NameNodeAdapter.Mkdirs(nn1, GetDirPath(i), new PermissionStatus("test", "test", new
                                                                                    FsPermission((short)0x1ed)), true);
                }
                HATestUtil.WaitForStandbyToCatchUp(nn1, nn2);
                for (int i_1 = 0; i_1 < DirsToMake / 2; i_1++)
                {
                    NUnit.Framework.Assert.IsTrue(NameNodeAdapter.GetFileInfo(nn2, GetDirPath(i_1), false
                                                                              ).IsDir());
                }
                for (int i_2 = DirsToMake / 2; i_2 < DirsToMake; i_2++)
                {
                    NameNodeAdapter.Mkdirs(nn1, GetDirPath(i_2), new PermissionStatus("test", "test",
                                                                                      new FsPermission((short)0x1ed)), true);
                }
                HATestUtil.WaitForStandbyToCatchUp(nn1, nn2);
                for (int i_3 = DirsToMake / 2; i_3 < DirsToMake; i_3++)
                {
                    NUnit.Framework.Assert.IsTrue(NameNodeAdapter.GetFileInfo(nn2, GetDirPath(i_3), false
                                                                              ).IsDir());
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Beispiel #9
0
        public virtual void TestInitializeBKSharedEdits()
        {
            MiniDFSCluster cluster = null;

            try
            {
                Configuration conf = new Configuration();
                HAUtil.SetAllowStandbyReads(conf, true);
                conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
                MiniDFSNNTopology topology = MiniDFSNNTopology.SimpleHATopology();
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(topology).NumDataNodes(0).Build
                              ();
                cluster.WaitActive();
                // Shutdown and clear the current filebased shared dir.
                cluster.ShutdownNameNodes();
                FilePath shareddir = new FilePath(cluster.GetSharedEditsDir(0, 1));
                NUnit.Framework.Assert.IsTrue("Initial Shared edits dir not fully deleted", FileUtil
                                              .FullyDelete(shareddir));
                // Check namenodes should not start without shared dir.
                AssertCanNotStartNamenode(cluster, 0);
                AssertCanNotStartNamenode(cluster, 1);
                // Configure bkjm as new shared edits dir in both namenodes
                Configuration nn1Conf = cluster.GetConfiguration(0);
                Configuration nn2Conf = cluster.GetConfiguration(1);
                nn1Conf.Set(DFSConfigKeys.DfsNamenodeSharedEditsDirKey, BKJMUtil.CreateJournalURI
                                ("/initializeSharedEdits").ToString());
                nn2Conf.Set(DFSConfigKeys.DfsNamenodeSharedEditsDirKey, BKJMUtil.CreateJournalURI
                                ("/initializeSharedEdits").ToString());
                BKJMUtil.AddJournalManagerDefinition(nn1Conf);
                BKJMUtil.AddJournalManagerDefinition(nn2Conf);
                // Initialize the BKJM shared edits.
                NUnit.Framework.Assert.IsFalse(NameNode.InitializeSharedEdits(nn1Conf));
                // NameNode should be able to start and should be in sync with BKJM as
                // shared dir
                AssertCanStartHANameNodes(cluster, conf, "/testBKJMInitialize");
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestInvalidateBlock()
        {
            Configuration conf = new Configuration();

            HAUtil.SetAllowStandbyReads(conf, true);
            conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology
                                                                                 .SimpleHATopology()).NumDataNodes(3).Build();

            try
            {
                cluster.WaitActive();
                cluster.TransitionToActive(0);
                NameNode   nn1 = cluster.GetNameNode(0);
                NameNode   nn2 = cluster.GetNameNode(1);
                FileSystem fs  = HATestUtil.ConfigureFailoverFs(cluster, conf);
                Sharpen.Thread.Sleep(1000);
                Log.Info("==================================");
                DFSTestUtil.WriteFile(fs, TestFilePath, TestFileData);
                // Have to force an edit log roll so that the standby catches up
                nn1.GetRpcServer().RollEditLog();
                Log.Info("==================================");
                // delete the file
                fs.Delete(TestFilePath, false);
                BlockManagerTestUtil.ComputeAllPendingWork(nn1.GetNamesystem().GetBlockManager());
                nn1.GetRpcServer().RollEditLog();
                // standby nn doesn't need to invalidate blocks.
                NUnit.Framework.Assert.AreEqual(0, nn2.GetNamesystem().GetBlockManager().GetPendingDeletionBlocksCount
                                                    ());
                cluster.TriggerHeartbeats();
                cluster.TriggerBlockReports();
                // standby nn doesn't need to invalidate blocks.
                NUnit.Framework.Assert.AreEqual(0, nn2.GetNamesystem().GetBlockManager().GetPendingDeletionBlocksCount
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Beispiel #11
0
        /// <exception cref="System.Exception"/>
        internal virtual void TestCheckpoint(HdfsServerConstants.StartupOption op)
        {
            Path          file1 = new Path("/checkpoint.dat");
            Path          file2 = new Path("/checkpoint2.dat");
            Path          file3 = new Path("/backup.dat");
            Configuration conf  = new HdfsConfiguration();

            HAUtil.SetAllowStandbyReads(conf, true);
            short replication  = (short)conf.GetInt("dfs.replication", 3);
            int   numDatanodes = Math.Max(3, replication);

            conf.Set(DFSConfigKeys.DfsNamenodeBackupHttpAddressKey, "localhost:0");
            conf.Set(DFSConfigKeys.DfsBlockreportInitialDelayKey, "0");
            conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1);
            // disable block scanner
            conf.SetInt(DFSConfigKeys.DfsNamenodeCheckpointTxnsKey, 1);
            MiniDFSCluster cluster = null;
            FileSystem     fileSys = null;
            BackupNode     backup  = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                fileSys = cluster.GetFileSystem();
                //
                // verify that 'format' really blew away all pre-existing files
                //
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file2));
                //
                // Create file1
                //
                NUnit.Framework.Assert.IsTrue(fileSys.Mkdirs(file1));
                //
                // Take a checkpoint
                //
                long txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup = StartBackupNode(conf, op, 1);
                WaitCheckpointDone(cluster, txid);
            }
            catch (IOException e)
            {
                Log.Error("Error in TestBackupNode:", e);
                NUnit.Framework.Assert.IsTrue(e.GetLocalizedMessage(), false);
            }
            finally
            {
                if (backup != null)
                {
                    backup.Stop();
                }
                if (fileSys != null)
                {
                    fileSys.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            FilePath nnCurDir = new FilePath(BaseDir, "name1/current/");
            FilePath bnCurDir = new FilePath(GetBackupNodeDir(op, 1), "/current/");

            FSImageTestUtil.AssertParallelFilesAreIdentical(ImmutableList.Of(bnCurDir, nnCurDir
                                                                             ), ImmutableSet.Of <string>("VERSION"));
            try
            {
                //
                // Restart cluster and verify that file1 still exist.
                //
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes).Format(false
                                                                                             ).Build();
                fileSys = cluster.GetFileSystem();
                // check that file1 still exists
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file1));
                fileSys.Delete(file1, true);
                // create new file file2
                fileSys.Mkdirs(file2);
                //
                // Take a checkpoint
                //
                long txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup = StartBackupNode(conf, op, 1);
                WaitCheckpointDone(cluster, txid);
                for (int i = 0; i < 10; i++)
                {
                    fileSys.Mkdirs(new Path("file_" + i));
                }
                txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup.DoCheckpoint();
                WaitCheckpointDone(cluster, txid);
                txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup.DoCheckpoint();
                WaitCheckpointDone(cluster, txid);
                // Try BackupNode operations
                IPEndPoint add = backup.GetNameNodeAddress();
                // Write to BN
                FileSystem bnFS = FileSystem.Get(new Path("hdfs://" + NetUtils.GetHostPortString(
                                                              add)).ToUri(), conf);
                bool canWrite = true;
                try
                {
                    Org.Apache.Hadoop.Hdfs.Server.Namenode.TestCheckpoint.WriteFile(bnFS, file3, replication
                                                                                    );
                }
                catch (IOException eio)
                {
                    Log.Info("Write to " + backup.GetRole() + " failed as expected: ", eio);
                    canWrite = false;
                }
                NUnit.Framework.Assert.IsFalse("Write to BackupNode must be prohibited.", canWrite
                                               );
                // Reads are allowed for BackupNode, but not for CheckpointNode
                bool canRead = true;
                try
                {
                    bnFS.Exists(file2);
                }
                catch (IOException eio)
                {
                    Log.Info("Read from " + backup.GetRole() + " failed: ", eio);
                    canRead = false;
                }
                NUnit.Framework.Assert.AreEqual("Reads to BackupNode are allowed, but not CheckpointNode."
                                                , canRead, backup.IsRole(HdfsServerConstants.NamenodeRole.Backup));
                Org.Apache.Hadoop.Hdfs.Server.Namenode.TestCheckpoint.WriteFile(fileSys, file3, replication
                                                                                );
                Org.Apache.Hadoop.Hdfs.Server.Namenode.TestCheckpoint.CheckFile(fileSys, file3, replication
                                                                                );
                // should also be on BN right away
                NUnit.Framework.Assert.IsTrue("file3 does not exist on BackupNode", op != HdfsServerConstants.StartupOption
                                              .Backup || backup.GetNamesystem().GetFileInfo(file3.ToUri().GetPath(), false) !=
                                              null);
            }
            catch (IOException e)
            {
                Log.Error("Error in TestBackupNode:", e);
                throw new Exception(e);
            }
            finally
            {
                if (backup != null)
                {
                    backup.Stop();
                }
                if (fileSys != null)
                {
                    fileSys.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            FSImageTestUtil.AssertParallelFilesAreIdentical(ImmutableList.Of(bnCurDir, nnCurDir
                                                                             ), ImmutableSet.Of <string>("VERSION"));
            try
            {
                //
                // Restart cluster and verify that file2 exists and
                // file1 does not exist.
                //
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).Build();
                fileSys = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                // verify that file2 exists
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file2));
            }
            catch (IOException e)
            {
                Log.Error("Error in TestBackupNode: ", e);
                NUnit.Framework.Assert.IsTrue(e.GetLocalizedMessage(), false);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
        }
Beispiel #12
0
        public virtual void TestBackupNodeTailsEdits()
        {
            Configuration conf = new HdfsConfiguration();

            HAUtil.SetAllowStandbyReads(conf, true);
            MiniDFSCluster cluster = null;
            FileSystem     fileSys = null;
            BackupNode     backup  = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                fileSys = cluster.GetFileSystem();
                backup  = StartBackupNode(conf, HdfsServerConstants.StartupOption.Backup, 1);
                BackupImage bnImage = (BackupImage)backup.GetFSImage();
                TestBNInSync(cluster, backup, 1);
                // Force a roll -- BN should roll with NN.
                NameNode          nn    = cluster.GetNameNode();
                NamenodeProtocols nnRpc = nn.GetRpcServer();
                nnRpc.RollEditLog();
                NUnit.Framework.Assert.AreEqual(bnImage.GetEditLog().GetCurSegmentTxId(), nn.GetFSImage
                                                    ().GetEditLog().GetCurSegmentTxId());
                // BN should stay in sync after roll
                TestBNInSync(cluster, backup, 2);
                long nnImageBefore = nn.GetFSImage().GetStorage().GetMostRecentCheckpointTxId();
                // BN checkpoint
                backup.DoCheckpoint();
                // NN should have received a new image
                long nnImageAfter = nn.GetFSImage().GetStorage().GetMostRecentCheckpointTxId();
                NUnit.Framework.Assert.IsTrue("nn should have received new checkpoint. before: "
                                              + nnImageBefore + " after: " + nnImageAfter, nnImageAfter > nnImageBefore);
                // BN should stay in sync after checkpoint
                TestBNInSync(cluster, backup, 3);
                // Stop BN
                Storage.StorageDirectory sd = bnImage.GetStorage().GetStorageDir(0);
                backup.Stop();
                backup = null;
                // When shutting down the BN, it shouldn't finalize logs that are
                // still open on the NN
                FileJournalManager.EditLogFile editsLog = FSImageTestUtil.FindLatestEditsLog(sd);
                NUnit.Framework.Assert.AreEqual(editsLog.GetFirstTxId(), nn.GetFSImage().GetEditLog
                                                    ().GetCurSegmentTxId());
                NUnit.Framework.Assert.IsTrue("Should not have finalized " + editsLog, editsLog.IsInProgress
                                                  ());
                // do some edits
                NUnit.Framework.Assert.IsTrue(fileSys.Mkdirs(new Path("/edit-while-bn-down")));
                // start a new backup node
                backup = StartBackupNode(conf, HdfsServerConstants.StartupOption.Backup, 1);
                TestBNInSync(cluster, backup, 4);
                NUnit.Framework.Assert.IsNotNull(backup.GetNamesystem().GetFileInfo("/edit-while-bn-down"
                                                                                    , false));
            }
            finally
            {
                Log.Info("Shutting down...");
                if (backup != null)
                {
                    backup.Stop();
                }
                if (fileSys != null)
                {
                    fileSys.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            AssertStorageDirsMatch(cluster.GetNameNode(), backup);
        }