Exemple #1
0
        /// <exception cref="Org.Apache.Hadoop.HA.ServiceFailedException"/>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="Sharpen.URISyntaxException"/>
        /// <exception cref="System.Exception"/>
        private void AssertCanStartHANameNodes(MiniDFSCluster cluster, Configuration conf
                                               , string path)
        {
            // Now should be able to start both NNs. Pass "false" here so that we don't
            // try to waitActive on all NNs, since the second NN doesn't exist yet.
            cluster.RestartNameNode(0, false);
            cluster.RestartNameNode(1, true);
            // Make sure HA is working.
            cluster.GetNameNode(0).GetRpcServer().TransitionToActive(new HAServiceProtocol.StateChangeRequestInfo
                                                                         (HAServiceProtocol.RequestSource.RequestByUser));
            FileSystem fs = null;

            try
            {
                Path newPath = new Path(path);
                fs = HATestUtil.ConfigureFailoverFs(cluster, conf);
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(newPath));
                HATestUtil.WaitForStandbyToCatchUp(cluster.GetNameNode(0), cluster.GetNameNode(1)
                                                   );
                NUnit.Framework.Assert.IsTrue(NameNodeAdapter.GetFileInfo(cluster.GetNameNode(1),
                                                                          newPath.ToString(), false).IsDir());
            }
            finally
            {
                if (fs != null)
                {
                    fs.Close();
                }
            }
        }
        public virtual void TestBootstrapStandbyWithActiveNN()
        {
            // make nn0 active
            cluster.TransitionToActive(0);
            // do ops and generate in-progress edit log data
            Configuration         confNN1 = cluster.GetConfiguration(1);
            DistributedFileSystem dfs     = (DistributedFileSystem)HATestUtil.ConfigureFailoverFs
                                                (cluster, confNN1);

            for (int i = 1; i <= 10; i++)
            {
                dfs.Mkdirs(new Path("/test" + i));
            }
            dfs.Close();
            // shutdown nn1 and delete its edit log files
            cluster.ShutdownNameNode(1);
            DeleteEditLogIfExists(confNN1);
            cluster.GetNameNodeRpc(0).SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter,
                                                  true);
            cluster.GetNameNodeRpc(0).SaveNamespace();
            cluster.GetNameNodeRpc(0).SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave,
                                                  true);
            // check without -skipSharedEditsCheck, Bootstrap should fail for BKJM
            // immediately after saveNamespace
            int rc = BootstrapStandby.Run(new string[] { "-force", "-nonInteractive" }, confNN1
                                          );

            NUnit.Framework.Assert.AreEqual("Mismatches return code", 6, rc);
            // check with -skipSharedEditsCheck
            rc = BootstrapStandby.Run(new string[] { "-force", "-nonInteractive", "-skipSharedEditsCheck" }, confNN1);
            NUnit.Framework.Assert.AreEqual("Mismatches return code", 0, rc);
            // Checkpoint as fast as we can, in a tight loop.
            confNN1.SetInt(DFSConfigKeys.DfsNamenodeCheckpointPeriodKey, 1);
            cluster.RestartNameNode(1);
            cluster.TransitionToStandby(1);
            NameNode nn0 = cluster.GetNameNode(0);

            HATestUtil.WaitForStandbyToCatchUp(nn0, cluster.GetNameNode(1));
            long expectedCheckpointTxId = NameNodeAdapter.GetNamesystem(nn0).GetFSImage().GetMostRecentCheckpointTxId
                                              ();

            HATestUtil.WaitForCheckpoint(cluster, 1, ImmutableList.Of((int)expectedCheckpointTxId
                                                                      ));
            // Should have copied over the namespace
            FSImageTestUtil.AssertNNHasCheckpoints(cluster, 1, ImmutableList.Of((int)expectedCheckpointTxId
                                                                                ));
            FSImageTestUtil.AssertNNFilesMatch(cluster);
        }
        /// <summary>Test that encryption zones are properly tracked by the standby.</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestEncryptionZonesTrackedOnStandby()
        {
            int  len      = 8196;
            Path dir      = new Path("/enc");
            Path dirChild = new Path(dir, "child");
            Path dirFile  = new Path(dir, "file");

            fs.Mkdir(dir, FsPermission.GetDirDefault());
            dfsAdmin0.CreateEncryptionZone(dir, TestKey);
            fs.Mkdir(dirChild, FsPermission.GetDirDefault());
            DFSTestUtil.CreateFile(fs, dirFile, len, (short)1, unchecked ((int)(0xFEED)));
            string contents = DFSTestUtil.ReadFile(fs, dirFile);

            // Failover the current standby to active.
            HATestUtil.WaitForStandbyToCatchUp(nn0, nn1);
            cluster.ShutdownNameNode(0);
            cluster.TransitionToActive(1);
            NUnit.Framework.Assert.AreEqual("Got unexpected ez path", dir.ToString(), dfsAdmin1
                                            .GetEncryptionZoneForPath(dir).GetPath().ToString());
            NUnit.Framework.Assert.AreEqual("Got unexpected ez path", dir.ToString(), dfsAdmin1
                                            .GetEncryptionZoneForPath(dirChild).GetPath().ToString());
            NUnit.Framework.Assert.AreEqual("File contents after failover were changed", contents
                                            , DFSTestUtil.ReadFile(fs, dirFile));
        }