public virtual void TestUpgradeFromCorruptRel22Image()
        {
            UnpackStorage(Hadoop22Image, HadoopDfsDirTxt);
            // Overwrite the md5 stored in the VERSION files
            FilePath baseDir = new FilePath(MiniDFSCluster.GetBaseDirectory());

            FSImageTestUtil.CorruptVersionFile(new FilePath(baseDir, "name1/current/VERSION")
                                               , "imageMD5Digest", "22222222222222222222222222222222");
            FSImageTestUtil.CorruptVersionFile(new FilePath(baseDir, "name2/current/VERSION")
                                               , "imageMD5Digest", "22222222222222222222222222222222");
            // Attach our own log appender so we can verify output
            LogVerificationAppender appender = new LogVerificationAppender();
            Logger logger = Logger.GetRootLogger();

            logger.AddAppender(appender);
            // Upgrade should now fail
            try
            {
                UpgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).NumDataNodes(4), null);
                NUnit.Framework.Assert.Fail("Upgrade did not fail with bad MD5");
            }
            catch (IOException ioe)
            {
                string msg = StringUtils.StringifyException(ioe);
                if (!msg.Contains("Failed to load an FSImage file"))
                {
                    throw;
                }
                int md5failures = appender.CountExceptionsWithMessage(" is corrupt with MD5 checksum of "
                                                                      );
                NUnit.Framework.Assert.AreEqual("Upgrade did not fail with bad MD5", 1, md5failures
                                                );
            }
        }
 /// <exception cref="System.IO.IOException"/>
 private MiniJournalCluster(MiniJournalCluster.Builder b)
 {
     Log.Info("Starting MiniJournalCluster with " + b.numJournalNodes + " journal nodes"
              );
     if (b.baseDir != null)
     {
         this.baseDir = new FilePath(b.baseDir);
     }
     else
     {
         this.baseDir = new FilePath(MiniDFSCluster.GetBaseDirectory());
     }
     nodes = new MiniJournalCluster.JNInfo[b.numJournalNodes];
     for (int i = 0; i < b.numJournalNodes; i++)
     {
         if (b.format)
         {
             FilePath dir = GetStorageDir(i);
             Log.Debug("Fully deleting JN directory " + dir);
             FileUtil.FullyDelete(dir);
         }
         JournalNode jn = new JournalNode();
         jn.SetConf(CreateConfForNode(b, i));
         jn.Start();
         nodes[i] = new MiniJournalCluster.JNInfo(jn);
     }
 }
Beispiel #3
0
        //cluster.shutdown();
        /// <exception cref="System.Exception"/>
        public virtual void TestMismatchedNNIsRejected()
        {
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, MiniDFSCluster.GetBaseDirectory() +
                     "/TestNNWithQJM/image");
            string defaultEditsDir = conf.Get(DFSConfigKeys.DfsNamenodeEditsDirKey);

            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, mjc.GetQuorumJournalURI("myjournal"
                                                                                   ).ToString());
            // Start a NN, so the storage is formatted -- both on-disk
            // and QJM.
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).ManageNameDfsDirs
                                         (false).Build();

            cluster.Shutdown();
            // Reformat just the on-disk portion
            Configuration onDiskOnly = new Configuration(conf);

            onDiskOnly.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, defaultEditsDir);
            NameNode.Format(onDiskOnly);
            // Start the NN - should fail because the JNs are still formatted
            // with the old namespace ID.
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).ManageNameDfsDirs(false
                                                                                             ).Format(false).Build();
                NUnit.Framework.Assert.Fail("New NN with different namespace should have been rejected"
                                            );
            }
            catch (IOException ioe)
            {
                GenericTestUtils.AssertExceptionContains("Unable to start log segment 1: too few journals"
                                                         , ioe);
            }
        }
Beispiel #4
0
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestLogAndRestart()
        {
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, MiniDFSCluster.GetBaseDirectory() +
                     "/TestNNWithQJM/image");
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, mjc.GetQuorumJournalURI("myjournal"
                                                                                   ).ToString());
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).ManageNameDfsDirs
                                         (false).Build();

            try
            {
                cluster.GetFileSystem().Mkdirs(TestPath);
                // Restart the NN and make sure the edit was persisted
                // and loaded again
                cluster.RestartNameNode();
                NUnit.Framework.Assert.IsTrue(cluster.GetFileSystem().Exists(TestPath));
                cluster.GetFileSystem().Mkdirs(TestPath2);
                // Restart the NN again and make sure both edits are persisted.
                cluster.RestartNameNode();
                NUnit.Framework.Assert.IsTrue(cluster.GetFileSystem().Exists(TestPath));
                NUnit.Framework.Assert.IsTrue(cluster.GetFileSystem().Exists(TestPath2));
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        /// <summary>
        /// Tests setting the rpc port to the same as the web port to test that
        /// an exception
        /// is thrown when trying to re-use the same port
        /// </summary>
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestThatMatchingRPCandHttpPortsThrowException()
        {
            NameNode nameNode = null;

            try
            {
                Configuration conf    = new HdfsConfiguration();
                FilePath      nameDir = new FilePath(MiniDFSCluster.GetBaseDirectory(), "name");
                conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDir.GetAbsolutePath());
                Random rand = new Random();
                int    port = 30000 + rand.Next(30000);
                // set both of these to the same port. It should fail.
                FileSystem.SetDefaultUri(conf, "hdfs://localhost:" + port);
                conf.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, "127.0.0.1:" + port);
                DFSTestUtil.FormatNameNode(conf);
                nameNode = new NameNode(conf);
            }
            finally
            {
                if (nameNode != null)
                {
                    nameNode.Stop();
                }
            }
        }
        public virtual void TestInvalidateOverReplicatedBlock()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();

            try
            {
                FSNamesystem       namesystem = cluster.GetNamesystem();
                BlockManager       bm         = namesystem.GetBlockManager();
                FileSystem         fs         = cluster.GetFileSystem();
                Path               p          = new Path(MiniDFSCluster.GetBaseDirectory(), "/foo1");
                FSDataOutputStream @out       = fs.Create(p, (short)2);
                @out.WriteBytes("HDFS-3119: " + p);
                @out.Hsync();
                fs.SetReplication(p, (short)1);
                @out.Close();
                ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, p);
                NUnit.Framework.Assert.AreEqual("Expected only one live replica for the block", 1
                                                , bm.CountNodes(block.GetLocalBlock()).LiveReplicas());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        /// <summary>
        /// HDFS-3013: NameNode format command doesn't pick up
        /// dfs.namenode.name.dir.NameServiceId configuration.
        /// </summary>
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestGenericKeysForNameNodeFormat()
        {
            Configuration conf = new HdfsConfiguration();

            // Set ephemeral ports
            conf.Set(DFSConfigKeys.DfsNamenodeRpcAddressKey, "127.0.0.1:0");
            conf.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, "127.0.0.1:0");
            conf.Set(DFSConfigKeys.DfsNameservices, "ns1");
            // Set a nameservice-specific configuration for name dir
            FilePath dir = new FilePath(MiniDFSCluster.GetBaseDirectory(), "testGenericKeysForNameNodeFormat"
                                        );

            if (dir.Exists())
            {
                FileUtil.FullyDelete(dir);
            }
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey + ".ns1", dir.GetAbsolutePath());
            // Format and verify the right dir is formatted.
            DFSTestUtil.FormatNameNode(conf);
            GenericTestUtils.AssertExists(dir);
            // Ensure that the same dir is picked up by the running NN
            NameNode nameNode = new NameNode(conf);

            nameNode.Stop();
        }
        public virtual void SetUpNameDirs()
        {
            config  = new HdfsConfiguration();
            hdfsDir = new FilePath(MiniDFSCluster.GetBaseDirectory()).GetCanonicalFile();
            if (hdfsDir.Exists() && !FileUtil.FullyDelete(hdfsDir))
            {
                throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
            }
            hdfsDir.Mkdirs();
            path1 = new FilePath(hdfsDir, "name1");
            path2 = new FilePath(hdfsDir, "name2");
            path3 = new FilePath(hdfsDir, "name3");
            path1.Mkdir();
            path2.Mkdir();
            path3.Mkdir();
            if (!path2.Exists() || !path3.Exists() || !path1.Exists())
            {
                throw new IOException("Couldn't create dfs.name dirs in " + hdfsDir.GetAbsolutePath
                                          ());
            }
            string dfs_name_dir = new string(path1.GetPath() + "," + path2.GetPath());

            System.Console.Out.WriteLine("configuring hdfsdir is " + hdfsDir.GetAbsolutePath(
                                             ) + "; dfs_name_dir = " + dfs_name_dir + ";dfs_name_edits_dir(only)=" + path3.GetPath
                                             ());
            config.Set(DFSConfigKeys.DfsNamenodeNameDirKey, dfs_name_dir);
            config.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, dfs_name_dir + "," + path3.GetPath
                           ());
            config.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, new FilePath(hdfsDir, "secondary"
                                                                               ).GetPath());
            FileSystem.SetDefaultUri(config, "hdfs://" + NameNodeHost + "0");
            config.Set(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey, "0.0.0.0:0");
            // set the restore feature on
            config.SetBoolean(DFSConfigKeys.DfsNamenodeNameDirRestoreKey, true);
        }
Beispiel #9
0
 private void InitConfig()
 {
     conf.Set(DFSConfigKeys.DfsNameserviceId, "ns1");
     conf.Set(DFSConfigKeys.DfsHaNamenodesKeyPrefix + ".ns1", "nn1");
     conf.Set(DFSConfigKeys.DfsHaNamenodeIdKey, "nn1");
     conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey + ".ns1.nn1", MiniDFSCluster.GetBaseDirectory
                  () + "1");
     conf.Unset(DFSConfigKeys.DfsNamenodeNameDirKey);
 }
Beispiel #10
0
        public virtual void CleanupCluster()
        {
            FilePath hdfsDir = new FilePath(MiniDFSCluster.GetBaseDirectory()).GetCanonicalFile
                                   ();

            System.Console.Out.WriteLine("cleanupCluster deleting " + hdfsDir);
            if (hdfsDir.Exists() && !FileUtil.FullyDelete(hdfsDir))
            {
                throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
            }
        }
        public virtual void TestNNThroughput()
        {
            Configuration conf    = new HdfsConfiguration();
            FilePath      nameDir = new FilePath(MiniDFSCluster.GetBaseDirectory(), "name");

            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDir.GetAbsolutePath());
            FileSystem.SetDefaultUri(conf, "hdfs://localhost:" + 0);
            conf.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, "0.0.0.0:0");
            DFSTestUtil.FormatNameNode(conf);
            string[] args = new string[] { "-op", "all" };
            NNThroughputBenchmark.RunBenchmark(conf, Arrays.AsList(args));
        }
        /// <exception cref="System.IO.IOException"/>
        private Configuration GetConf()
        {
            string baseDir  = MiniDFSCluster.GetBaseDirectory();
            string nameDirs = Org.Apache.Hadoop.Hdfs.Server.Common.Util.FileAsURI(new FilePath
                                                                                      (baseDir, "name1")) + "," + Org.Apache.Hadoop.Hdfs.Server.Common.Util.FileAsURI(
                new FilePath(baseDir, "name2"));
            Configuration conf = new HdfsConfiguration();

            FileSystem.SetDefaultUri(conf, "hdfs://localhost:0");
            conf.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, "0.0.0.0:0");
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDirs);
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameDirs);
            conf.Set(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey, "0.0.0.0:0");
            conf.SetBoolean(DFSConfigKeys.DfsPermissionsEnabledKey, false);
            return(conf);
        }
Beispiel #13
0
        public virtual void Setup()
        {
            FilePath editsDir = new FilePath(MiniDFSCluster.GetBaseDirectory() + FilePath.separator
                                             + "TestJournalNode");

            FileUtil.FullyDelete(editsDir);
            conf.Set(DFSConfigKeys.DfsJournalnodeEditsDirKey, editsDir.GetAbsolutePath());
            conf.Set(DFSConfigKeys.DfsJournalnodeRpcAddressKey, "0.0.0.0:0");
            jn = new JournalNode();
            jn.SetConf(conf);
            jn.Start();
            journalId = "test-journalid-" + GenericTestUtils.UniqueSequenceId();
            journal   = jn.GetOrCreateJournal(journalId);
            journal.Format(FakeNsinfo);
            ch = new IPCLoggerChannel(conf, FakeNsinfo, journalId, jn.GetBoundIpcAddress());
        }
Beispiel #14
0
        public virtual void TestFSNamespaceClearLeases()
        {
            Configuration conf    = new HdfsConfiguration();
            FilePath      nameDir = new FilePath(MiniDFSCluster.GetBaseDirectory(), "name");

            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDir.GetAbsolutePath());
            NameNode.InitMetrics(conf, HdfsServerConstants.NamenodeRole.Namenode);
            DFSTestUtil.FormatNameNode(conf);
            FSNamesystem fsn      = FSNamesystem.LoadFromDisk(conf);
            LeaseManager leaseMan = fsn.GetLeaseManager();

            leaseMan.AddLease("client1", "importantFile");
            NUnit.Framework.Assert.AreEqual(1, leaseMan.CountLease());
            fsn.Clear();
            leaseMan = fsn.GetLeaseManager();
            NUnit.Framework.Assert.AreEqual(0, leaseMan.CountLease());
        }
        public virtual void TestValidVolumesAtStartup()
        {
            Assume.AssumeTrue(!Runtime.GetProperty("os.name").StartsWith("Windows"));
            // Make sure no DNs are running.
            cluster.ShutdownDataNodes();
            // Bring up a datanode with two default data dirs, but with one bad one.
            conf.SetInt(DFSConfigKeys.DfsDatanodeFailedVolumesToleratedKey, 1);
            // We use subdirectories 0 and 1 in order to have only a single
            // data dir's parent inject a failure.
            FilePath tld            = new FilePath(MiniDFSCluster.GetBaseDirectory(), "badData");
            FilePath dataDir1       = new FilePath(tld, "data1");
            FilePath dataDir1Actual = new FilePath(dataDir1, "1");

            dataDir1Actual.Mkdirs();
            // Force an IOE to occur on one of the dfs.data.dir.
            FilePath dataDir2 = new FilePath(tld, "data2");

            PrepareDirToFail(dataDir2);
            FilePath dataDir2Actual = new FilePath(dataDir2, "2");

            // Start one DN, with manually managed DN dir
            conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dataDir1Actual.GetPath() + "," + dataDir2Actual
                     .GetPath());
            cluster.StartDataNodes(conf, 1, false, null, null);
            cluster.WaitActive();
            try
            {
                NUnit.Framework.Assert.IsTrue("The DN should have started up fine.", cluster.IsDataNodeUp
                                                  ());
                DataNode dn = cluster.GetDataNodes()[0];
                string   si = DataNodeTestUtils.GetFSDataset(dn).GetStorageInfo();
                NUnit.Framework.Assert.IsTrue("The DN should have started with this directory", si
                                              .Contains(dataDir1Actual.GetPath()));
                NUnit.Framework.Assert.IsFalse("The DN shouldn't have a bad directory.", si.Contains
                                                   (dataDir2Actual.GetPath()));
            }
            finally
            {
                cluster.ShutdownDataNodes();
                FileUtil.Chmod(dataDir2.ToString(), "755");
            }
        }
        public virtual void TestStartStop()
        {
            Configuration      conf = new Configuration();
            MiniJournalCluster c    = new MiniJournalCluster.Builder(conf).Build();

            try
            {
                URI      uri   = c.GetQuorumJournalURI("myjournal");
                string[] addrs = uri.GetAuthority().Split(";");
                NUnit.Framework.Assert.AreEqual(3, addrs.Length);
                JournalNode node = c.GetJournalNode(0);
                string      dir  = node.GetConf().Get(DFSConfigKeys.DfsJournalnodeEditsDirKey);
                NUnit.Framework.Assert.AreEqual(new FilePath(MiniDFSCluster.GetBaseDirectory() +
                                                             "journalnode-0").GetAbsolutePath(), dir);
            }
            finally
            {
                c.Shutdown();
            }
        }
Beispiel #17
0
 public virtual void SetUp()
 {
     config  = new HdfsConfiguration();
     hdfsDir = new FilePath(MiniDFSCluster.GetBaseDirectory());
     if (hdfsDir.Exists() && !FileUtil.FullyDelete(hdfsDir))
     {
         throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
     }
     Log.Info("--hdfsdir is " + hdfsDir.GetAbsolutePath());
     config.Set(DFSConfigKeys.DfsNamenodeNameDirKey, Util.FileAsURI(new FilePath(hdfsDir
                                                                                 , "name")).ToString());
     config.Set(DFSConfigKeys.DfsDatanodeDataDirKey, new FilePath(hdfsDir, "data").GetPath
                    ());
     config.Set(DFSConfigKeys.DfsDatanodeAddressKey, "0.0.0.0:0");
     config.Set(DFSConfigKeys.DfsDatanodeHttpAddressKey, "0.0.0.0:0");
     config.Set(DFSConfigKeys.DfsDatanodeIpcAddressKey, "0.0.0.0:0");
     config.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, Util.FileAsURI(new FilePath
                                                                              (hdfsDir, "secondary")).ToString());
     config.Set(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey, WildcardHttpHost + "0"
                );
     FileSystem.SetDefaultUri(config, "hdfs://" + NameNodeHost + "0");
 }
        /// <summary>
        /// Tests setting the rpc port to a different as the web port that an
        /// exception is NOT thrown
        /// </summary>
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestThatDifferentRPCandHttpPortsAreOK()
        {
            Configuration conf    = new HdfsConfiguration();
            FilePath      nameDir = new FilePath(MiniDFSCluster.GetBaseDirectory(), "name");

            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDir.GetAbsolutePath());
            Random rand = new Random();

            // A few retries in case the ports we choose are in use.
            for (int i = 0; i < 5; ++i)
            {
                int port1 = 30000 + rand.Next(10000);
                int port2 = port1 + 1 + rand.Next(10000);
                FileSystem.SetDefaultUri(conf, "hdfs://localhost:" + port1);
                conf.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, "127.0.0.1:" + port2);
                DFSTestUtil.FormatNameNode(conf);
                NameNode nameNode = null;
                try
                {
                    nameNode = new NameNode(conf);
                    // should be OK!
                    break;
                }
                catch (BindException)
                {
                    continue;
                }
                finally
                {
                    // Port in use? Try another.
                    if (nameNode != null)
                    {
                        nameNode.Stop();
                    }
                }
            }
        }
Beispiel #19
0
        /// <summary>
        /// Test cancellation of ongoing checkpoints when failover happens
        /// mid-checkpoint.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestCheckpointCancellation()
        {
            cluster.TransitionToStandby(0);
            // Create an edit log in the shared edits dir with a lot
            // of mkdirs operations. This is solely so that the image is
            // large enough to take a non-trivial amount of time to load.
            // (only ~15MB)
            URI      sharedUri = cluster.GetSharedEditsDir(0, 1);
            FilePath sharedDir = new FilePath(sharedUri.GetPath(), "current");
            FilePath tmpDir    = new FilePath(MiniDFSCluster.GetBaseDirectory(), "testCheckpointCancellation-tmp"
                                              );
            FSNamesystem fsn = cluster.GetNamesystem(0);

            FSImageTestUtil.CreateAbortedLogWithMkdirs(tmpDir, NumDirsInLog, 3, fsn.GetFSDirectory
                                                           ().GetLastInodeId() + 1);
            string fname = NNStorage.GetInProgressEditsFileName(3);

            new FilePath(tmpDir, fname).RenameTo(new FilePath(sharedDir, fname));
            // Checkpoint as fast as we can, in a tight loop.
            cluster.GetConfiguration(1).SetInt(DFSConfigKeys.DfsNamenodeCheckpointPeriodKey,
                                               0);
            cluster.RestartNameNode(1);
            nn1 = cluster.GetNameNode(1);
            cluster.TransitionToActive(0);
            bool canceledOne = false;

            for (int i = 0; i < 10 && !canceledOne; i++)
            {
                DoEdits(i * 10, i * 10 + 10);
                cluster.TransitionToStandby(0);
                cluster.TransitionToActive(1);
                cluster.TransitionToStandby(1);
                cluster.TransitionToActive(0);
                canceledOne = StandbyCheckpointer.GetCanceledCount() > 0;
            }
            NUnit.Framework.Assert.IsTrue(canceledOne);
        }
Beispiel #20
0
 public virtual void CleanUp()
 {
     FileUtil.FullyDeleteContents(new FilePath(MiniDFSCluster.GetBaseDirectory()));
 }
Beispiel #21
0
 public virtual void SetUp()
 {
     conf = new Configuration();
     conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, Util.FileAsURI(new FilePath(MiniDFSCluster
                                                                               .GetBaseDirectory(), "namenode")).ToString());
     NameNode.InitMetrics(conf, HdfsServerConstants.NamenodeRole.Namenode);
     fs        = null;
     fsIsReady = true;
 }
        /// <exception cref="System.Exception"/>
        public virtual void TestRollingUpgradeWithQJM()
        {
            string   nnDirPrefix = MiniDFSCluster.GetBaseDirectory() + "/nn/";
            FilePath nn1Dir      = new FilePath(nnDirPrefix + "image1");
            FilePath nn2Dir      = new FilePath(nnDirPrefix + "image2");

            Log.Info("nn1Dir=" + nn1Dir);
            Log.Info("nn2Dir=" + nn2Dir);
            Configuration      conf = new HdfsConfiguration();
            MiniJournalCluster mjc  = new MiniJournalCluster.Builder(conf).Build();

            SetConf(conf, nn1Dir, mjc);
            {
                // Start the cluster once to generate the dfs dirs
                MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).ManageNameDfsDirs
                                             (false).CheckExitOnShutdown(false).Build();
                // Shutdown the cluster before making a copy of the namenode dir to release
                // all file locks, otherwise, the copy will fail on some platforms.
                cluster.Shutdown();
            }
            MiniDFSCluster cluster2 = null;

            try
            {
                // Start a second NN pointed to the same quorum.
                // We need to copy the image dir from the first NN -- or else
                // the new NN will just be rejected because of Namespace mismatch.
                FileUtil.FullyDelete(nn2Dir);
                FileUtil.Copy(nn1Dir, FileSystem.GetLocal(conf).GetRaw(), new Path(nn2Dir.GetAbsolutePath
                                                                                       ()), false, conf);
                // Start the cluster again
                MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(
                    false).ManageNameDfsDirs(false).CheckExitOnShutdown(false).Build();
                Path foo = new Path("/foo");
                Path bar = new Path("/bar");
                Path baz = new Path("/baz");
                RollingUpgradeInfo info1;
                {
                    DistributedFileSystem dfs = cluster.GetFileSystem();
                    dfs.Mkdirs(foo);
                    //start rolling upgrade
                    dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                    info1 = dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare);
                    dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
                    Log.Info("START\n" + info1);
                    //query rolling upgrade
                    NUnit.Framework.Assert.AreEqual(info1, dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction
                                                                              .Query));
                    dfs.Mkdirs(bar);
                    cluster.Shutdown();
                }
                // cluster2 takes over QJM
                Configuration conf2 = SetConf(new Configuration(), nn2Dir, mjc);
                cluster2 = new MiniDFSCluster.Builder(conf2).NumDataNodes(0).Format(false).ManageNameDfsDirs
                               (false).Build();
                DistributedFileSystem dfs2 = cluster2.GetFileSystem();
                // Check that cluster2 sees the edits made on cluster1
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(foo));
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(bar));
                NUnit.Framework.Assert.IsFalse(dfs2.Exists(baz));
                //query rolling upgrade in cluster2
                NUnit.Framework.Assert.AreEqual(info1, dfs2.RollingUpgrade(HdfsConstants.RollingUpgradeAction
                                                                           .Query));
                dfs2.Mkdirs(baz);
                Log.Info("RESTART cluster 2");
                cluster2.RestartNameNode();
                NUnit.Framework.Assert.AreEqual(info1, dfs2.RollingUpgrade(HdfsConstants.RollingUpgradeAction
                                                                           .Query));
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(foo));
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(bar));
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(baz));
                //restart cluster with -upgrade should fail.
                try
                {
                    cluster2.RestartNameNode("-upgrade");
                }
                catch (IOException e)
                {
                    Log.Info("The exception is expected.", e);
                }
                Log.Info("RESTART cluster 2 again");
                cluster2.RestartNameNode();
                NUnit.Framework.Assert.AreEqual(info1, dfs2.RollingUpgrade(HdfsConstants.RollingUpgradeAction
                                                                           .Query));
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(foo));
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(bar));
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(baz));
                //finalize rolling upgrade
                RollingUpgradeInfo finalize = dfs2.RollingUpgrade(HdfsConstants.RollingUpgradeAction
                                                                  .Finalize);
                NUnit.Framework.Assert.IsTrue(finalize.IsFinalized());
                Log.Info("RESTART cluster 2 with regular startup option");
                cluster2.GetNameNodeInfos()[0].SetStartOpt(HdfsServerConstants.StartupOption.Regular
                                                           );
                cluster2.RestartNameNode();
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(foo));
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(bar));
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(baz));
            }
            finally
            {
                if (cluster2 != null)
                {
                    cluster2.Shutdown();
                }
            }
        }
Beispiel #23
0
        /// <exception cref="System.Exception"/>
        public virtual void TestNewNamenodeTakesOverWriter()
        {
            FilePath nn1Dir = new FilePath(MiniDFSCluster.GetBaseDirectory() + "/TestNNWithQJM/image-nn1"
                                           );
            FilePath nn2Dir = new FilePath(MiniDFSCluster.GetBaseDirectory() + "/TestNNWithQJM/image-nn2"
                                           );

            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nn1Dir.GetAbsolutePath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, mjc.GetQuorumJournalURI("myjournal"
                                                                                   ).ToString());
            // Start the cluster once to generate the dfs dirs
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).ManageNameDfsDirs
                                         (false).CheckExitOnShutdown(false).Build();

            // Shutdown the cluster before making a copy of the namenode dir
            // to release all file locks, otherwise, the copy will fail on
            // some platforms.
            cluster.Shutdown();
            try
            {
                // Start a second NN pointed to the same quorum.
                // We need to copy the image dir from the first NN -- or else
                // the new NN will just be rejected because of Namespace mismatch.
                FileUtil.FullyDelete(nn2Dir);
                FileUtil.Copy(nn1Dir, FileSystem.GetLocal(conf).GetRaw(), new Path(nn2Dir.GetAbsolutePath
                                                                                       ()), false, conf);
                // Start the cluster again
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).ManageNameDfsDirs
                              (false).CheckExitOnShutdown(false).Build();
                cluster.GetFileSystem().Mkdirs(TestPath);
                Configuration conf2 = new Configuration();
                conf2.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nn2Dir.GetAbsolutePath());
                conf2.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, mjc.GetQuorumJournalURI("myjournal"
                                                                                        ).ToString());
                MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf2).NumDataNodes(0).Format
                                              (false).ManageNameDfsDirs(false).Build();
                // Check that the new cluster sees the edits made on the old cluster
                try
                {
                    NUnit.Framework.Assert.IsTrue(cluster2.GetFileSystem().Exists(TestPath));
                }
                finally
                {
                    cluster2.Shutdown();
                }
                // Check that, if we try to write to the old NN
                // that it aborts.
                try
                {
                    cluster.GetFileSystem().Mkdirs(new Path("/x"));
                    NUnit.Framework.Assert.Fail("Did not abort trying to write to a fenced NN");
                }
                catch (RemoteException re)
                {
                    GenericTestUtils.AssertExceptionContains("Could not sync enough journals to persistent storage"
                                                             , re);
                }
            }
            finally
            {
            }
        }