示例#1
0
        /// <exception cref="Org.Apache.Hadoop.HA.ServiceFailedException"/>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="Sharpen.URISyntaxException"/>
        /// <exception cref="System.Exception"/>
        private void AssertCanStartHANameNodes(MiniDFSCluster cluster, Configuration conf
                                               , string path)
        {
            // Now should be able to start both NNs. Pass "false" here so that we don't
            // try to waitActive on all NNs, since the second NN doesn't exist yet.
            cluster.RestartNameNode(0, false);
            cluster.RestartNameNode(1, true);
            // Make sure HA is working.
            cluster.GetNameNode(0).GetRpcServer().TransitionToActive(new HAServiceProtocol.StateChangeRequestInfo
                                                                         (HAServiceProtocol.RequestSource.RequestByUser));
            FileSystem fs = null;

            try
            {
                Path newPath = new Path(path);
                fs = HATestUtil.ConfigureFailoverFs(cluster, conf);
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(newPath));
                HATestUtil.WaitForStandbyToCatchUp(cluster.GetNameNode(0), cluster.GetNameNode(1)
                                                   );
                NUnit.Framework.Assert.IsTrue(NameNodeAdapter.GetFileInfo(cluster.GetNameNode(1),
                                                                          newPath.ToString(), false).IsDir());
            }
            finally
            {
                if (fs != null)
                {
                    fs.Close();
                }
            }
        }
示例#2
0
        public virtual void TestHA()
        {
            Configuration  conf    = DFSTestUtil.NewHAConfiguration(LogicalName);
            MiniDFSCluster cluster = null;
            FileSystem     fs      = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(topo).NumDataNodes(0).Build
                              ();
                HATestUtil.SetFailoverConfigurations(cluster, conf, LogicalName);
                cluster.WaitActive();
                fs = FileSystem.Get(WebhdfsUri, conf);
                cluster.TransitionToActive(0);
                Path dir = new Path("/test");
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(dir));
                cluster.ShutdownNameNode(0);
                cluster.TransitionToActive(1);
                Path dir2 = new Path("/test2");
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(dir2));
            }
            finally
            {
                IOUtils.Cleanup(null, fs);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void SetupCluster()
        {
            conf = new Configuration();
            conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
            HAUtil.SetAllowStandbyReads(conf, true);
            fsHelper = new FileSystemTestHelper();
            string testRoot = fsHelper.GetTestRootDir();

            testRootDir = new FilePath(testRoot).GetAbsoluteFile();
            conf.Set(DFSConfigKeys.DfsEncryptionKeyProviderUri, JavaKeyStoreProvider.SchemeName
                     + "://file" + new Path(testRootDir.ToString(), "test.jks").ToUri());
            cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology
                                                                      ()).NumDataNodes(1).Build();
            cluster.WaitActive();
            cluster.TransitionToActive(0);
            fs = (DistributedFileSystem)HATestUtil.ConfigureFailoverFs(cluster, conf);
            DFSTestUtil.CreateKey(TestKey, cluster, 0, conf);
            DFSTestUtil.CreateKey(TestKey, cluster, 1, conf);
            nn0       = cluster.GetNameNode(0);
            nn1       = cluster.GetNameNode(1);
            dfsAdmin0 = new HdfsAdmin(cluster.GetURI(0), conf);
            dfsAdmin1 = new HdfsAdmin(cluster.GetURI(1), conf);
            KeyProviderCryptoExtension nn0Provider = cluster.GetNameNode(0).GetNamesystem().GetProvider
                                                         ();

            fs.GetClient().SetKeyProvider(nn0Provider);
        }
示例#4
0
        public virtual void TestFailoverWithBK()
        {
            MiniDFSCluster cluster = null;

            try
            {
                Configuration conf = new Configuration();
                conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
                conf.Set(DFSConfigKeys.DfsNamenodeSharedEditsDirKey, BKJMUtil.CreateJournalURI("/hotfailover"
                                                                                               ).ToString());
                BKJMUtil.AddJournalManagerDefinition(conf);
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology
                                                                          ()).NumDataNodes(0).ManageNameDfsSharedDirs(false).Build();
                NameNode nn1 = cluster.GetNameNode(0);
                NameNode nn2 = cluster.GetNameNode(1);
                cluster.WaitActive();
                cluster.TransitionToActive(0);
                Path       p  = new Path("/testBKJMfailover");
                FileSystem fs = HATestUtil.ConfigureFailoverFs(cluster, conf);
                fs.Mkdirs(p);
                cluster.ShutdownNameNode(0);
                cluster.TransitionToActive(1);
                NUnit.Framework.Assert.IsTrue(fs.Exists(p));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
示例#5
0
        public virtual void TestMultipleNamespacesConfigured()
        {
            Configuration     conf    = DFSTestUtil.NewHAConfiguration(LogicalName);
            MiniDFSCluster    cluster = null;
            WebHdfsFileSystem fs      = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(topo).NumDataNodes(1).Build
                              ();
                HATestUtil.SetFailoverConfigurations(cluster, conf, LogicalName);
                cluster.WaitActive();
                DFSTestUtil.AddHAConfiguration(conf, LogicalName + "remote");
                DFSTestUtil.SetFakeHttpAddresses(conf, LogicalName + "remote");
                fs = (WebHdfsFileSystem)FileSystem.Get(WebhdfsUri, conf);
                NUnit.Framework.Assert.AreEqual(2, fs.GetResolvedNNAddr().Length);
            }
            finally
            {
                IOUtils.Cleanup(null, fs);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
示例#6
0
        public virtual void TestSecureHAToken()
        {
            Configuration conf = DFSTestUtil.NewHAConfiguration(LogicalName);

            conf.SetBoolean(DFSConfigKeys.DfsNamenodeDelegationTokenAlwaysUseKey, true);
            MiniDFSCluster    cluster = null;
            WebHdfsFileSystem fs      = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(topo).NumDataNodes(0).Build
                              ();
                HATestUtil.SetFailoverConfigurations(cluster, conf, LogicalName);
                cluster.WaitActive();
                fs = Org.Mockito.Mockito.Spy((WebHdfsFileSystem)FileSystem.Get(WebhdfsUri, conf));
                FileSystemTestHelper.AddFileSystemForTesting(WebhdfsUri, conf, fs);
                cluster.TransitionToActive(0);
                Org.Apache.Hadoop.Security.Token.Token <object> token = ((Org.Apache.Hadoop.Security.Token.Token
                                                                          <DelegationTokenIdentifier>)fs.GetDelegationToken(null));
                cluster.ShutdownNameNode(0);
                cluster.TransitionToActive(1);
                token.Renew(conf);
                token.Cancel(conf);
                Org.Mockito.Mockito.Verify(fs).RenewDelegationToken(token);
                Org.Mockito.Mockito.Verify(fs).CancelDelegationToken(token);
            }
            finally
            {
                IOUtils.Cleanup(null, fs);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
示例#7
0
        /// <summary>
        /// Test a cluster with even distribution, then a new empty node is added to
        /// the cluster.
        /// </summary>
        /// <remarks>
        /// Test a cluster with even distribution, then a new empty node is added to
        /// the cluster. Test start a cluster with specified number of nodes, and fills
        /// it to be 30% full (with a single file replicated identically to all
        /// datanodes); It then adds one new empty node and starts balancing.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestBalancerWithHANameNodes()
        {
            Configuration conf = new HdfsConfiguration();

            TestBalancer.InitConf(conf);
            long newNodeCapacity = TestBalancer.Capacity;
            // new node's capacity
            string newNodeRack = TestBalancer.Rack2;

            // new node's rack
            // array of racks for original nodes in cluster
            string[] racks = new string[] { TestBalancer.Rack0, TestBalancer.Rack1 };
            // array of capacities of original nodes in cluster
            long[] capacities = new long[] { TestBalancer.Capacity, TestBalancer.Capacity };
            NUnit.Framework.Assert.AreEqual(capacities.Length, racks.Length);
            int numOfDatanodes = capacities.Length;

            MiniDFSNNTopology.NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1");
            nn1Conf.SetIpcPort(NameNode.DefaultPort);
            Configuration copiedConf = new Configuration(conf);

            cluster = new MiniDFSCluster.Builder(copiedConf).NnTopology(MiniDFSNNTopology.SimpleHATopology
                                                                            ()).NumDataNodes(capacities.Length).Racks(racks).SimulatedCapacities(capacities)
                      .Build();
            HATestUtil.SetFailoverConfigurations(cluster, conf);
            try
            {
                cluster.WaitActive();
                cluster.TransitionToActive(1);
                Sharpen.Thread.Sleep(500);
                client = NameNodeProxies.CreateProxy <ClientProtocol>(conf, FileSystem.GetDefaultUri
                                                                          (conf)).GetProxy();
                long totalCapacity = TestBalancer.Sum(capacities);
                // fill up the cluster to be 30% full
                long totalUsedSpace = totalCapacity * 3 / 10;
                TestBalancer.CreateFile(cluster, TestBalancer.filePath, totalUsedSpace / numOfDatanodes
                                        , (short)numOfDatanodes, 1);
                // start up an empty node with the same capacity and on the same rack
                cluster.StartDataNodes(conf, 1, true, null, new string[] { newNodeRack }, new long
                                       [] { newNodeCapacity });
                totalCapacity += newNodeCapacity;
                TestBalancer.WaitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
                ICollection <URI> namenodes = DFSUtil.GetNsServiceRpcUris(conf);
                NUnit.Framework.Assert.AreEqual(1, namenodes.Count);
                NUnit.Framework.Assert.IsTrue(namenodes.Contains(HATestUtil.GetLogicalUri(cluster
                                                                                          )));
                int r = Org.Apache.Hadoop.Hdfs.Server.Balancer.Balancer.Run(namenodes, Balancer.Parameters
                                                                            .Default, conf);
                NUnit.Framework.Assert.AreEqual(ExitStatus.Success.GetExitCode(), r);
                TestBalancer.WaitForBalancer(totalUsedSpace, totalCapacity, client, cluster, Balancer.Parameters
                                             .Default);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        public virtual void TestDoesntDnsResolveLogicalURI()
        {
            FileSystem  fs            = HATestUtil.ConfigureFailoverFs(cluster, conf);
            NameService spyNS         = SpyOnNameService();
            string      logicalHost   = fs.GetUri().GetHost();
            Path        qualifiedRoot = fs.MakeQualified(new Path("/"));

            // Make a few calls against the filesystem.
            fs.GetCanonicalServiceName();
            fs.ListStatus(qualifiedRoot);
            // Ensure that the logical hostname was never resolved.
            Org.Mockito.Mockito.Verify(spyNS, Org.Mockito.Mockito.Never()).LookupAllHostAddr(
                Org.Mockito.Mockito.Eq(logicalHost));
        }
示例#9
0
 public virtual void Setup()
 {
     conf = new HdfsConfiguration();
     conf.SetBoolean(DFSConfigKeys.DfsNamenodeEnableRetryCacheKey, true);
     conf.SetInt(DFSConfigKeys.DfsClientTestDropNamenodeResponseNumKey, 2);
     cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology
                                                               ()).NumDataNodes(3).Build();
     cluster.WaitActive();
     cluster.TransitionToActive(namenodeId);
     HATestUtil.SetFailoverConfigurations(cluster, conf);
     filesystem = (DistributedFileSystem)HATestUtil.ConfigureFailoverFs(cluster, conf);
     namesystem = cluster.GetNamesystem(namenodeId);
     metrics    = namesystem.GetRetryCache().GetMetricsForTests();
 }
        public virtual void TestBootstrapStandbyWithActiveNN()
        {
            // make nn0 active
            cluster.TransitionToActive(0);
            // do ops and generate in-progress edit log data
            Configuration         confNN1 = cluster.GetConfiguration(1);
            DistributedFileSystem dfs     = (DistributedFileSystem)HATestUtil.ConfigureFailoverFs
                                                (cluster, confNN1);

            for (int i = 1; i <= 10; i++)
            {
                dfs.Mkdirs(new Path("/test" + i));
            }
            dfs.Close();
            // shutdown nn1 and delete its edit log files
            cluster.ShutdownNameNode(1);
            DeleteEditLogIfExists(confNN1);
            cluster.GetNameNodeRpc(0).SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter,
                                                  true);
            cluster.GetNameNodeRpc(0).SaveNamespace();
            cluster.GetNameNodeRpc(0).SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave,
                                                  true);
            // check without -skipSharedEditsCheck, Bootstrap should fail for BKJM
            // immediately after saveNamespace
            int rc = BootstrapStandby.Run(new string[] { "-force", "-nonInteractive" }, confNN1
                                          );

            NUnit.Framework.Assert.AreEqual("Mismatches return code", 6, rc);
            // check with -skipSharedEditsCheck
            rc = BootstrapStandby.Run(new string[] { "-force", "-nonInteractive", "-skipSharedEditsCheck" }, confNN1);
            NUnit.Framework.Assert.AreEqual("Mismatches return code", 0, rc);
            // Checkpoint as fast as we can, in a tight loop.
            confNN1.SetInt(DFSConfigKeys.DfsNamenodeCheckpointPeriodKey, 1);
            cluster.RestartNameNode(1);
            cluster.TransitionToStandby(1);
            NameNode nn0 = cluster.GetNameNode(0);

            HATestUtil.WaitForStandbyToCatchUp(nn0, cluster.GetNameNode(1));
            long expectedCheckpointTxId = NameNodeAdapter.GetNamesystem(nn0).GetFSImage().GetMostRecentCheckpointTxId
                                              ();

            HATestUtil.WaitForCheckpoint(cluster, 1, ImmutableList.Of((int)expectedCheckpointTxId
                                                                      ));
            // Should have copied over the namespace
            FSImageTestUtil.AssertNNHasCheckpoints(cluster, 1, ImmutableList.Of((int)expectedCheckpointTxId
                                                                                ));
            FSImageTestUtil.AssertNNFilesMatch(cluster);
        }
        public virtual void TestFileContextDoesntDnsResolveLogicalURI()
        {
            FileSystem    fs           = HATestUtil.ConfigureFailoverFs(cluster, conf);
            NameService   spyNS        = SpyOnNameService();
            string        logicalHost  = fs.GetUri().GetHost();
            Configuration haClientConf = fs.GetConf();
            FileContext   fc           = FileContext.GetFileContext(haClientConf);
            Path          root         = new Path("/");

            fc.ListStatus(root);
            fc.ListStatus(fc.MakeQualified(root));
            fc.GetDefaultFileSystem().GetCanonicalServiceName();
            // Ensure that the logical hostname was never resolved.
            Org.Mockito.Mockito.Verify(spyNS, Org.Mockito.Mockito.Never()).LookupAllHostAddr(
                Org.Mockito.Mockito.Eq(logicalHost));
        }
        public virtual void TestFailoverOnConnectTimeout()
        {
            conf.SetClass(CommonConfigurationKeysPublic.HadoopRpcSocketFactoryClassDefaultKey
                          , typeof(TestDFSClientFailover.InjectingSocketFactory), typeof(SocketFactory));
            // Set up the InjectingSocketFactory to throw a ConnectTimeoutException
            // when connecting to the first NN.
            TestDFSClientFailover.InjectingSocketFactory.portToInjectOn = cluster.GetNameNodePort
                                                                              (0);
            FileSystem fs = HATestUtil.ConfigureFailoverFs(cluster, conf);

            // Make the second NN the active one.
            cluster.ShutdownNameNode(0);
            cluster.TransitionToActive(1);
            // Call a non-idempotent method, and ensure the failover of the call proceeds
            // successfully.
            IOUtils.CloseStream(fs.Create(TestFile));
        }
        public virtual void TestWrappedFailoverProxyProvider()
        {
            // setup the config with the dummy provider class
            Configuration config      = new HdfsConfiguration(conf);
            string        logicalName = HATestUtil.GetLogicalHostname(cluster);

            HATestUtil.SetFailoverConfigurations(cluster, config, logicalName);
            config.Set(DFSConfigKeys.DfsClientFailoverProxyProviderKeyPrefix + "." + logicalName
                       , typeof(TestDFSClientFailover.DummyLegacyFailoverProxyProvider).FullName);
            Path p = new Path("hdfs://" + logicalName + "/");

            // not to use IP address for token service
            SecurityUtil.SetTokenServiceUseIp(false);
            // Logical URI should be used.
            NUnit.Framework.Assert.IsTrue("Legacy proxy providers should use logical URI.", HAUtil
                                          .UseLogicalUri(config, p.ToUri()));
        }
        public virtual void TestLogicalUriShouldNotHavePorts()
        {
            Configuration config      = new HdfsConfiguration(conf);
            string        logicalName = HATestUtil.GetLogicalHostname(cluster);

            HATestUtil.SetFailoverConfigurations(cluster, config, logicalName);
            Path p = new Path("hdfs://" + logicalName + ":12345/");

            try
            {
                p.GetFileSystem(config).Exists(p);
                NUnit.Framework.Assert.Fail("Did not fail with fake FS");
            }
            catch (IOException ioe)
            {
                GenericTestUtils.AssertExceptionContains("does not use port information", ioe);
            }
        }
        public override void SetupCluster()
        {
            Configuration conf = SetupCommonConfig();

            conf.Set(DFSConfigKeys.DfsNamenodeSharedEditsDirKey, BKJMUtil.CreateJournalURI("/checkpointing"
                                                                                           + journalCount++).ToString());
            BKJMUtil.AddJournalManagerDefinition(conf);
            MiniDFSNNTopology topology = new MiniDFSNNTopology().AddNameservice(new MiniDFSNNTopology.NSConf
                                                                                    ("ns1").AddNN(new MiniDFSNNTopology.NNConf("nn1").SetHttpPort(10001)).AddNN(new
                                                                                                                                                                MiniDFSNNTopology.NNConf("nn2").SetHttpPort(10002)));

            cluster = new MiniDFSCluster.Builder(conf).NnTopology(topology).NumDataNodes(1).ManageNameDfsSharedDirs
                          (false).Build();
            cluster.WaitActive();
            nn0 = cluster.GetNameNode(0);
            nn1 = cluster.GetNameNode(1);
            fs  = HATestUtil.ConfigureFailoverFs(cluster, conf);
            cluster.TransitionToActive(0);
        }
示例#16
0
        public virtual void TestMultiplePrimariesStarted()
        {
            Path           p1      = new Path("/testBKJMMultiplePrimary");
            MiniDFSCluster cluster = null;

            try
            {
                Configuration conf = new Configuration();
                conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
                conf.Set(DFSConfigKeys.DfsNamenodeSharedEditsDirKey, BKJMUtil.CreateJournalURI("/hotfailoverMultiple"
                                                                                               ).ToString());
                BKJMUtil.AddJournalManagerDefinition(conf);
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology
                                                                          ()).NumDataNodes(0).ManageNameDfsSharedDirs(false).CheckExitOnShutdown(false).Build
                              ();
                NameNode nn1 = cluster.GetNameNode(0);
                NameNode nn2 = cluster.GetNameNode(1);
                cluster.WaitActive();
                cluster.TransitionToActive(0);
                FileSystem fs = HATestUtil.ConfigureFailoverFs(cluster, conf);
                fs.Mkdirs(p1);
                nn1.GetRpcServer().RollEditLog();
                cluster.TransitionToActive(1);
                fs = cluster.GetFileSystem(0);
                // get the older active server.
                try
                {
                    fs.Delete(p1, true);
                    NUnit.Framework.Assert.Fail("Log update on older active should cause it to exit");
                }
                catch (RemoteException re)
                {
                    NUnit.Framework.Assert.IsTrue(re.GetClassName().Contains("ExitException"));
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void TestDfsClientFailover()
        {
            FileSystem fs = HATestUtil.ConfigureFailoverFs(cluster, conf);

            DFSTestUtil.CreateFile(fs, TestFile, FileLengthToVerify, (short)1, 1L);
            NUnit.Framework.Assert.AreEqual(fs.GetFileStatus(TestFile).GetLen(), FileLengthToVerify
                                            );
            cluster.ShutdownNameNode(0);
            cluster.TransitionToActive(1);
            NUnit.Framework.Assert.AreEqual(fs.GetFileStatus(TestFile).GetLen(), FileLengthToVerify
                                            );
            // Check that it functions even if the URL becomes canonicalized
            // to include a port number.
            Path withPort = new Path("hdfs://" + HATestUtil.GetLogicalHostname(cluster) + ":"
                                     + NameNode.DefaultPort + "/" + TestFile.ToUri().GetPath());
            FileSystem fs2 = withPort.GetFileSystem(fs.GetConf());

            NUnit.Framework.Assert.IsTrue(fs2.Exists(withPort));
            fs.Close();
        }
示例#18
0
        public virtual void TestFailoverAfterOpen()
        {
            Configuration conf = DFSTestUtil.NewHAConfiguration(LogicalName);

            conf.Set(CommonConfigurationKeysPublic.FsDefaultNameKey, HdfsConstants.HdfsUriScheme
                     + "://" + LogicalName);
            MiniDFSCluster cluster = null;
            FileSystem     fs      = null;
            Path           p       = new Path("/test");

            byte[] data = Sharpen.Runtime.GetBytesForString("Hello");
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(topo).NumDataNodes(1).Build
                              ();
                HATestUtil.SetFailoverConfigurations(cluster, conf, LogicalName);
                cluster.WaitActive();
                fs = FileSystem.Get(WebhdfsUri, conf);
                cluster.TransitionToActive(1);
                FSDataOutputStream @out = fs.Create(p);
                cluster.ShutdownNameNode(1);
                cluster.TransitionToActive(0);
                @out.Write(data);
                @out.Close();
                FSDataInputStream @in = fs.Open(p);
                byte[]            buf = new byte[data.Length];
                IOUtils.ReadFully(@in, buf, 0, buf.Length);
                Assert.AssertArrayEquals(data, buf);
            }
            finally
            {
                IOUtils.Cleanup(null, fs);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
示例#19
0
        public virtual void TestFormatShouldBeIgnoredForNonFileBasedDirs()
        {
            Configuration conf        = new HdfsConfiguration();
            string        logicalName = "mycluster";
            // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
            // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
            // is considered.
            string     localhost = "127.0.0.1";
            IPEndPoint nnAddr1   = new IPEndPoint(localhost, 8020);
            IPEndPoint nnAddr2   = new IPEndPoint(localhost, 9020);

            HATestUtil.SetFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, new FilePath(DfsBaseDir, "name").GetAbsolutePath
                         ());
            conf.SetBoolean(DFSConfigKeys.DfsNamenodeSupportAllowFormatKey, true);
            conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsNamenodeEditsPluginPrefix, "dummy"
                                            ), typeof(TestGenericJournalConf.DummyJournalManager).FullName);
            conf.Set(DFSConfigKeys.DfsNamenodeSharedEditsDirKey, "dummy://" + localhost + ":2181/ledgers"
                     );
            conf.Set(DFSConfigKeys.DfsHaNamenodeIdKey, "nn1");
            // An internal assert is added to verify the working of test
            NameNode.Format(conf);
        }
示例#20
0
        public virtual void SetUp()
        {
            conf = new Configuration();
            // Stall the standby checkpointer in two ways
            conf.SetLong(DFSConfigKeys.DfsNamenodeCheckpointPeriodKey, long.MaxValue);
            conf.SetLong(DFSConfigKeys.DfsNamenodeCheckpointTxnsKey, 20);
            // Make it autoroll after 10 edits
            conf.SetFloat(DFSConfigKeys.DfsNamenodeEditLogAutorollMultiplierThreshold, 0.5f);
            conf.SetInt(DFSConfigKeys.DfsNamenodeEditLogAutorollCheckIntervalMs, 100);
            int retryCount = 0;

            while (true)
            {
                try
                {
                    int basePort = 10060 + random.Next(100) * 2;
                    MiniDFSNNTopology topology = new MiniDFSNNTopology().AddNameservice(new MiniDFSNNTopology.NSConf
                                                                                            ("ns1").AddNN(new MiniDFSNNTopology.NNConf("nn1").SetHttpPort(basePort)).AddNN(new
                                                                                                                                                                           MiniDFSNNTopology.NNConf("nn2").SetHttpPort(basePort + 1)));
                    cluster = new MiniDFSCluster.Builder(conf).NnTopology(topology).NumDataNodes(0).Build
                                  ();
                    cluster.WaitActive();
                    nn0 = cluster.GetNameNode(0);
                    fs  = HATestUtil.ConfigureFailoverFs(cluster, conf);
                    cluster.TransitionToActive(0);
                    fs      = cluster.GetFileSystem(0);
                    editLog = nn0.GetNamesystem().GetEditLog();
                    ++retryCount;
                    break;
                }
                catch (BindException)
                {
                    Log.Info("Set up MiniDFSCluster failed due to port conflicts, retry " + retryCount
                             + " times");
                }
            }
        }
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="Sharpen.URISyntaxException"/>
        /// <exception cref="Org.Apache.Hadoop.Hdfs.Inotify.MissingEventsException"/>
        public virtual void TestNNFailover()
        {
            Configuration    conf    = new HdfsConfiguration();
            MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).Build();

            try
            {
                cluster.GetDfsCluster().WaitActive();
                cluster.GetDfsCluster().TransitionToActive(0);
                DFSClient client = ((DistributedFileSystem)HATestUtil.ConfigureFailoverFs(cluster
                                                                                          .GetDfsCluster(), conf)).dfs;
                DFSInotifyEventInputStream eis = client.GetInotifyEventStream();
                for (int i = 0; i < 10; i++)
                {
                    client.Mkdirs("/dir" + i, null, false);
                }
                cluster.GetDfsCluster().ShutdownNameNode(0);
                cluster.GetDfsCluster().TransitionToActive(1);
                EventBatch batch = null;
                // we can read all of the edits logged by the old active from the new
                // active
                for (int i_1 = 0; i_1 < 10; i_1++)
                {
                    batch = WaitForNextEvents(eis);
                    NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                    NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                                  .Create);
                    NUnit.Framework.Assert.IsTrue(((Event.CreateEvent)batch.GetEvents()[0]).GetPath()
                                                  .Equals("/dir" + i_1));
                }
                NUnit.Framework.Assert.IsTrue(eis.Poll() == null);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        /// <summary>Test that encryption zones are properly tracked by the standby.</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestEncryptionZonesTrackedOnStandby()
        {
            int  len      = 8196;
            Path dir      = new Path("/enc");
            Path dirChild = new Path(dir, "child");
            Path dirFile  = new Path(dir, "file");

            fs.Mkdir(dir, FsPermission.GetDirDefault());
            dfsAdmin0.CreateEncryptionZone(dir, TestKey);
            fs.Mkdir(dirChild, FsPermission.GetDirDefault());
            DFSTestUtil.CreateFile(fs, dirFile, len, (short)1, unchecked ((int)(0xFEED)));
            string contents = DFSTestUtil.ReadFile(fs, dirFile);

            // Failover the current standby to active.
            HATestUtil.WaitForStandbyToCatchUp(nn0, nn1);
            cluster.ShutdownNameNode(0);
            cluster.TransitionToActive(1);
            NUnit.Framework.Assert.AreEqual("Got unexpected ez path", dir.ToString(), dfsAdmin1
                                            .GetEncryptionZoneForPath(dir).GetPath().ToString());
            NUnit.Framework.Assert.AreEqual("Got unexpected ez path", dir.ToString(), dfsAdmin1
                                            .GetEncryptionZoneForPath(dirChild).GetPath().ToString());
            NUnit.Framework.Assert.AreEqual("File contents after failover were changed", contents
                                            , DFSTestUtil.ReadFile(fs, dirFile));
        }
示例#23
0
        public virtual void TestMoverCliWithHAConf()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).NnTopology
                                         (MiniDFSNNTopology.SimpleHATopology()).NumDataNodes(0).Build();

            HATestUtil.SetFailoverConfigurations(cluster, conf, "MyCluster");
            try
            {
                IDictionary <URI, IList <Path> > movePaths = Mover.Cli.GetNameNodePathsToMove(conf,
                                                                                              "-p", "/foo", "/bar");
                ICollection <URI> namenodes = DFSUtil.GetNsServiceRpcUris(conf);
                NUnit.Framework.Assert.AreEqual(1, namenodes.Count);
                NUnit.Framework.Assert.AreEqual(1, movePaths.Count);
                URI nn = namenodes.GetEnumerator().Next();
                NUnit.Framework.Assert.AreEqual(new URI("hdfs://MyCluster"), nn);
                NUnit.Framework.Assert.IsTrue(movePaths.Contains(nn));
                CheckMovePaths(movePaths[nn], new Path("/foo"), new Path("/bar"));
            }
            finally
            {
                cluster.Shutdown();
            }
        }
示例#24
0
        /// <summary>
        /// Make sure the WebHdfsFileSystem will retry based on RetriableException when
        /// rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestRetryWhileNNStartup()
        {
            Configuration              conf      = DFSTestUtil.NewHAConfiguration(LogicalName);
            MiniDFSCluster             cluster   = null;
            IDictionary <string, bool> resultMap = new Dictionary <string, bool>();

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(topo).NumDataNodes(0).Build
                              ();
                HATestUtil.SetFailoverConfigurations(cluster, conf, LogicalName);
                cluster.WaitActive();
                cluster.TransitionToActive(0);
                NameNode          namenode  = cluster.GetNameNode(0);
                NamenodeProtocols rpcServer = namenode.GetRpcServer();
                Whitebox.SetInternalState(namenode, "rpcServer", null);
                new _Thread_212(this, conf, resultMap).Start();
                Sharpen.Thread.Sleep(1000);
                Whitebox.SetInternalState(namenode, "rpcServer", rpcServer);
                lock (this)
                {
                    while (!resultMap.Contains("mkdirs"))
                    {
                        Sharpen.Runtime.Wait(this);
                    }
                    NUnit.Framework.Assert.IsTrue(resultMap["mkdirs"]);
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
示例#25
0
        public virtual void TestFailoverWithFailingBKCluster()
        {
            int          ensembleSize = numBookies + 1;
            BookieServer newBookie    = bkutil.NewBookie();

            NUnit.Framework.Assert.AreEqual("New bookie didn't start", ensembleSize, bkutil.CheckBookiesUp
                                                (ensembleSize, 10));
            BookieServer   replacementBookie = null;
            MiniDFSCluster cluster           = null;

            try
            {
                Configuration conf = new Configuration();
                conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
                conf.Set(DFSConfigKeys.DfsNamenodeSharedEditsDirKey, BKJMUtil.CreateJournalURI("/hotfailoverWithFail"
                                                                                               ).ToString());
                conf.SetInt(BookKeeperJournalManager.BkjmBookkeeperEnsembleSize, ensembleSize);
                conf.SetInt(BookKeeperJournalManager.BkjmBookkeeperQuorumSize, ensembleSize);
                BKJMUtil.AddJournalManagerDefinition(conf);
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology
                                                                          ()).NumDataNodes(0).ManageNameDfsSharedDirs(false).CheckExitOnShutdown(false).Build
                              ();
                NameNode nn1 = cluster.GetNameNode(0);
                NameNode nn2 = cluster.GetNameNode(1);
                cluster.WaitActive();
                cluster.TransitionToActive(0);
                Path       p1 = new Path("/testBKJMFailingBKCluster1");
                Path       p2 = new Path("/testBKJMFailingBKCluster2");
                FileSystem fs = HATestUtil.ConfigureFailoverFs(cluster, conf);
                fs.Mkdirs(p1);
                newBookie.Shutdown();
                // will take down shared storage
                NUnit.Framework.Assert.AreEqual("New bookie didn't stop", numBookies, bkutil.CheckBookiesUp
                                                    (numBookies, 10));
                try
                {
                    fs.Mkdirs(p2);
                    NUnit.Framework.Assert.Fail("mkdirs should result in the NN exiting");
                }
                catch (RemoteException re)
                {
                    NUnit.Framework.Assert.IsTrue(re.GetClassName().Contains("ExitException"));
                }
                cluster.ShutdownNameNode(0);
                try
                {
                    cluster.TransitionToActive(1);
                    NUnit.Framework.Assert.Fail("Shouldn't have been able to transition with bookies down"
                                                );
                }
                catch (ExitUtil.ExitException ee)
                {
                    NUnit.Framework.Assert.IsTrue("Should shutdown due to required journal failure",
                                                  ee.Message.Contains("starting log segment 3 failed for required journal"));
                }
                replacementBookie = bkutil.NewBookie();
                NUnit.Framework.Assert.AreEqual("Replacement bookie didn't start", ensembleSize,
                                                bkutil.CheckBookiesUp(ensembleSize, 10));
                cluster.TransitionToActive(1);
                // should work fine now
                NUnit.Framework.Assert.IsTrue(fs.Exists(p1));
                NUnit.Framework.Assert.IsFalse(fs.Exists(p2));
            }
            finally
            {
                newBookie.Shutdown();
                if (replacementBookie != null)
                {
                    replacementBookie.Shutdown();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        /// <summary>
        /// Regression test for HDFS-4799, a case where, upon restart, if there
        /// were RWR replicas with out-of-date genstamps, the NN could accidentally
        /// delete good replicas instead of the bad replicas.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestRWRInvalidation()
        {
            Configuration conf = new HdfsConfiguration();

            // Set the deletion policy to be randomized rather than the default.
            // The default is based on disk space, which isn't controllable
            // in the context of the test, whereas a random one is more accurate
            // to what is seen in real clusters (nodes have random amounts of free
            // space)
            conf.SetClass(DFSConfigKeys.DfsBlockReplicatorClassnameKey, typeof(TestDNFencing.RandomDeleterPolicy
                                                                               ), typeof(BlockPlacementPolicy));
            // Speed up the test a bit with faster heartbeats.
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            // Test with a bunch of separate files, since otherwise the test may
            // fail just due to "good luck", even if a bug is present.
            IList <Path> testPaths = Lists.NewArrayList();

            for (int i = 0; i < 10; i++)
            {
                testPaths.AddItem(new Path("/test" + i));
            }
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();

            try
            {
                IList <FSDataOutputStream> streams = Lists.NewArrayList();
                try
                {
                    // Open the test files and write some data to each
                    foreach (Path path in testPaths)
                    {
                        FSDataOutputStream @out = cluster.GetFileSystem().Create(path, (short)2);
                        streams.AddItem(@out);
                        @out.WriteBytes("old gs data\n");
                        @out.Hflush();
                    }
                    // Shutdown one of the nodes in the pipeline
                    MiniDFSCluster.DataNodeProperties oldGenstampNode = cluster.StopDataNode(0);
                    // Write some more data and flush again. This data will only
                    // be in the latter genstamp copy of the blocks.
                    for (int i_1 = 0; i_1 < streams.Count; i_1++)
                    {
                        Path path_1             = testPaths[i_1];
                        FSDataOutputStream @out = streams[i_1];
                        @out.WriteBytes("new gs data\n");
                        @out.Hflush();
                        // Set replication so that only one node is necessary for this block,
                        // and close it.
                        cluster.GetFileSystem().SetReplication(path_1, (short)1);
                        @out.Close();
                    }
                    // Upon restart, there will be two replicas, one with an old genstamp
                    // and one current copy. This test wants to ensure that the old genstamp
                    // copy is the one that is deleted.
                    Log.Info("=========================== restarting cluster");
                    MiniDFSCluster.DataNodeProperties otherNode = cluster.StopDataNode(0);
                    cluster.RestartNameNode();
                    // Restart the datanode with the corrupt replica first.
                    cluster.RestartDataNode(oldGenstampNode);
                    cluster.WaitActive();
                    // Then the other node
                    cluster.RestartDataNode(otherNode);
                    cluster.WaitActive();
                    // Compute and send invalidations, waiting until they're fully processed.
                    cluster.GetNameNode().GetNamesystem().GetBlockManager().ComputeInvalidateWork(2);
                    cluster.TriggerHeartbeats();
                    HATestUtil.WaitForDNDeletions(cluster);
                    cluster.TriggerDeletionReports();
                    // Make sure we can still read the blocks.
                    foreach (Path path_2 in testPaths)
                    {
                        string ret = DFSTestUtil.ReadFile(cluster.GetFileSystem(), path_2);
                        NUnit.Framework.Assert.AreEqual("old gs data\n" + "new gs data\n", ret);
                    }
                }
                finally
                {
                    IOUtils.Cleanup(Log, Sharpen.Collections.ToArray(streams, new IDisposable[0]));
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }