public virtual void SetupCluster() { Configuration conf = SetupCommonConfig(); // Dial down the retention of extra edits and checkpoints. This is to // help catch regressions of HDFS-4238 (SBN should not purge shared edits) conf.SetInt(DFSConfigKeys.DfsNamenodeNumCheckpointsRetainedKey, 1); conf.SetInt(DFSConfigKeys.DfsNamenodeNumExtraEditsRetainedKey, 0); int retryCount = 0; while (true) { try { int basePort = 10060 + random.Next(100) * 2; MiniDFSNNTopology topology = new MiniDFSNNTopology().AddNameservice(new MiniDFSNNTopology.NSConf ("ns1").AddNN(new MiniDFSNNTopology.NNConf("nn1").SetHttpPort(basePort)).AddNN(new MiniDFSNNTopology.NNConf("nn2").SetHttpPort(basePort + 1))); cluster = new MiniDFSCluster.Builder(conf).NnTopology(topology).NumDataNodes(1).Build (); cluster.WaitActive(); nn0 = cluster.GetNameNode(0); nn1 = cluster.GetNameNode(1); fs = HATestUtil.ConfigureFailoverFs(cluster, conf); cluster.TransitionToActive(0); ++retryCount; break; } catch (BindException) { Log.Info("Set up MiniDFSCluster failed due to port conflicts, retry " + retryCount + " times"); } } }
public virtual void TestFailureToTransitionCausesShutdown() { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); // Set an illegal value for the trash emptier interval. This will cause // the NN to fail to transition to the active state. conf.SetLong(CommonConfigurationKeys.FsTrashIntervalKey, -1); cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology ()).NumDataNodes(0).CheckExitOnShutdown(false).Build(); cluster.WaitActive(); try { cluster.TransitionToActive(0); NUnit.Framework.Assert.Fail("Transitioned to active but should not have been able to." ); } catch (ExitUtil.ExitException ee) { GenericTestUtils.AssertExceptionContains("Cannot start trash emptier with negative interval" , ee); } } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestStartup() { Configuration conf = new Configuration(); HAUtil.SetAllowStandbyReads(conf, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology .SimpleHATopology()).NumDataNodes(0).Build(); try { // During HA startup, both nodes should be in // standby and we shouldn't have any edits files // in any edits directory! IList <URI> allDirs = Lists.NewArrayList(); Sharpen.Collections.AddAll(allDirs, cluster.GetNameDirs(0)); Sharpen.Collections.AddAll(allDirs, cluster.GetNameDirs(1)); allDirs.AddItem(cluster.GetSharedEditsDir(0, 1)); AssertNoEditFiles(allDirs); // Set the first NN to active, make sure it creates edits // in its own dirs and the shared dir. The standby // should still have no edits! cluster.TransitionToActive(0); AssertEditFiles(cluster.GetNameDirs(0), NNStorage.GetInProgressEditsFileName(1)); AssertEditFiles(Sharpen.Collections.SingletonList(cluster.GetSharedEditsDir(0, 1) ), NNStorage.GetInProgressEditsFileName(1)); AssertNoEditFiles(cluster.GetNameDirs(1)); cluster.GetNameNode(0).GetRpcServer().Mkdirs("/test", FsPermission.CreateImmutable ((short)0x1ed), true); // Restarting the standby should not finalize any edits files // in the shared directory when it starts up! cluster.RestartNameNode(1); AssertEditFiles(cluster.GetNameDirs(0), NNStorage.GetInProgressEditsFileName(1)); AssertEditFiles(Sharpen.Collections.SingletonList(cluster.GetSharedEditsDir(0, 1) ), NNStorage.GetInProgressEditsFileName(1)); AssertNoEditFiles(cluster.GetNameDirs(1)); // Additionally it should not have applied any in-progress logs // at start-up -- otherwise, it would have read half-way into // the current log segment, and on the next roll, it would have to // either replay starting in the middle of the segment (not allowed) // or double-replay the edits (incorrect). NUnit.Framework.Assert.IsNull(NameNodeAdapter.GetFileInfo(cluster.GetNameNode(1), "/test", true)); cluster.GetNameNode(0).GetRpcServer().Mkdirs("/test2", FsPermission.CreateImmutable ((short)0x1ed), true); // If we restart NN0, it'll come back as standby, and we can // transition NN1 to active and make sure it reads edits correctly at this point. cluster.RestartNameNode(0); cluster.TransitionToActive(1); // NN1 should have both the edits that came before its restart, and the edits that // came after its restart. NUnit.Framework.Assert.IsNotNull(NameNodeAdapter.GetFileInfo(cluster.GetNameNode( 1), "/test", true)); NUnit.Framework.Assert.IsNotNull(NameNodeAdapter.GetFileInfo(cluster.GetNameNode( 1), "/test2", true)); } finally { cluster.Shutdown(); } }
/// <summary> /// Regression test for HDFS-2693: when doing state transitions, we need to /// lock the FSNamesystem so that we don't end up doing any writes while it's /// "in between" states. /// </summary> /// <remarks> /// Regression test for HDFS-2693: when doing state transitions, we need to /// lock the FSNamesystem so that we don't end up doing any writes while it's /// "in between" states. /// This test case starts up several client threads which do mutation operations /// while flipping a NN back and forth from active to standby. /// </remarks> /// <exception cref="System.Exception"/> public virtual void TestTransitionSynchronization() { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology .SimpleHATopology()).NumDataNodes(0).Build(); try { cluster.WaitActive(); ReentrantReadWriteLock spyLock = NameNodeAdapter.SpyOnFsLock(cluster.GetNameNode( 0).GetNamesystem()); Org.Mockito.Mockito.DoAnswer(new GenericTestUtils.SleepAnswer(50)).When(spyLock). WriteLock(); FileSystem fs = HATestUtil.ConfigureFailoverFs(cluster, conf); MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(); for (int i = 0; i < 50; i++) { int finalI = i; ctx.AddThread(new _RepeatingTestThread_256(finalI, fs, ctx)); } ctx.AddThread(new _RepeatingTestThread_266(cluster, ctx)); ctx.StartThreads(); ctx.WaitFor(20000); ctx.Stop(); } finally { cluster.Shutdown(); } }
public virtual void TestFailoverWithBK() { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1); conf.Set(DFSConfigKeys.DfsNamenodeSharedEditsDirKey, BKJMUtil.CreateJournalURI("/hotfailover" ).ToString()); BKJMUtil.AddJournalManagerDefinition(conf); cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology ()).NumDataNodes(0).ManageNameDfsSharedDirs(false).Build(); NameNode nn1 = cluster.GetNameNode(0); NameNode nn2 = cluster.GetNameNode(1); cluster.WaitActive(); cluster.TransitionToActive(0); Path p = new Path("/testBKJMfailover"); FileSystem fs = HATestUtil.ConfigureFailoverFs(cluster, conf); fs.Mkdirs(p); cluster.ShutdownNameNode(0); cluster.TransitionToActive(1); NUnit.Framework.Assert.IsTrue(fs.Exists(p)); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void Setup() { conf = new Configuration(); // Specify the quorum per-nameservice, to ensure that these configs // can be nameservice-scoped. conf.Set(ZKFailoverController.ZkQuorumKey + ".ns1", hostPort); conf.Set(DFSConfigKeys.DfsHaFenceMethodsKey, typeof(TestNodeFencer.AlwaysSucceedFencer ).FullName); conf.SetBoolean(DFSConfigKeys.DfsHaAutoFailoverEnabledKey, true); // Turn off IPC client caching, so that the suite can handle // the restart of the daemons between test cases. conf.SetInt(CommonConfigurationKeysPublic.IpcClientConnectionMaxidletimeKey, 0); conf.SetInt(DFSConfigKeys.DfsHaZkfcPortKey + ".ns1.nn1", 10023); conf.SetInt(DFSConfigKeys.DfsHaZkfcPortKey + ".ns1.nn2", 10024); MiniDFSNNTopology topology = new MiniDFSNNTopology().AddNameservice(new MiniDFSNNTopology.NSConf ("ns1").AddNN(new MiniDFSNNTopology.NNConf("nn1").SetIpcPort(10021)).AddNN(new MiniDFSNNTopology.NNConf ("nn2").SetIpcPort(10022))); cluster = new MiniDFSCluster.Builder(conf).NnTopology(topology).NumDataNodes(0).Build (); cluster.WaitActive(); ctx = new MultithreadedTestUtil.TestContext(); ctx.AddThread(thr1 = new TestDFSZKFailoverController.ZKFCThread(this, ctx, 0)); NUnit.Framework.Assert.AreEqual(0, thr1.zkfc.Run(new string[] { "-formatZK" })); thr1.Start(); WaitForHAState(0, HAServiceProtocol.HAServiceState.Active); ctx.AddThread(thr2 = new TestDFSZKFailoverController.ZKFCThread(this, ctx, 1)); thr2.Start(); // Wait for the ZKFCs to fully start up ZKFCTestUtil.WaitForHealthState(thr1.zkfc, HealthMonitor.State.ServiceHealthy, ctx ); ZKFCTestUtil.WaitForHealthState(thr2.zkfc, HealthMonitor.State.ServiceHealthy, ctx ); fs = HATestUtil.ConfigureFailoverFs(cluster, conf); }
/// <summary> /// This test also serves to test /// <see cref="Org.Apache.Hadoop.Hdfs.HAUtil.GetProxiesForAllNameNodesInNameservice(Org.Apache.Hadoop.Conf.Configuration, string) /// "/> /// and /// <see cref="Org.Apache.Hadoop.Hdfs.DFSUtil.GetRpcAddressesForNameserviceId(Org.Apache.Hadoop.Conf.Configuration, string, string) /// "/> /// by virtue of the fact that it wouldn't work properly if the proxies /// returned were not for the correct NNs. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestIsAtLeastOneActive() { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).NnTopology (MiniDFSNNTopology.SimpleHATopology()).NumDataNodes(0).Build(); try { Configuration conf = new HdfsConfiguration(); HATestUtil.SetFailoverConfigurations(cluster, conf); IList <ClientProtocol> namenodes = HAUtil.GetProxiesForAllNameNodesInNameservice(conf , HATestUtil.GetLogicalHostname(cluster)); NUnit.Framework.Assert.AreEqual(2, namenodes.Count); NUnit.Framework.Assert.IsFalse(HAUtil.IsAtLeastOneActive(namenodes)); cluster.TransitionToActive(0); NUnit.Framework.Assert.IsTrue(HAUtil.IsAtLeastOneActive(namenodes)); cluster.TransitionToStandby(0); NUnit.Framework.Assert.IsFalse(HAUtil.IsAtLeastOneActive(namenodes)); cluster.TransitionToActive(1); NUnit.Framework.Assert.IsTrue(HAUtil.IsAtLeastOneActive(namenodes)); cluster.TransitionToStandby(1); NUnit.Framework.Assert.IsFalse(HAUtil.IsAtLeastOneActive(namenodes)); } finally { if (cluster != null) { cluster.Shutdown(); } } }
/// <summary>Test that delegation tokens continue to work after the failover.</summary> /// <exception cref="System.IO.IOException"/> public virtual void TestDelegationTokensAfterFailover() { Configuration conf = new Configuration(); conf.SetBoolean(DFSConfigKeys.DfsNamenodeDelegationTokenAlwaysUseKey, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology .SimpleHATopology()).NumDataNodes(0).Build(); try { cluster.WaitActive(); cluster.TransitionToActive(0); NameNode nn1 = cluster.GetNameNode(0); NameNode nn2 = cluster.GetNameNode(1); string renewer = UserGroupInformation.GetLoginUser().GetUserName(); Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token = nn1.GetRpcServer ().GetDelegationToken(new Text(renewer)); Log.Info("Failing over to NN 1"); cluster.TransitionToStandby(0); cluster.TransitionToActive(1); nn2.GetRpcServer().RenewDelegationToken(token); nn2.GetRpcServer().CancelDelegationToken(token); token = nn2.GetRpcServer().GetDelegationToken(new Text(renewer)); NUnit.Framework.Assert.IsTrue(token != null); } finally { cluster.Shutdown(); } }
public virtual void TestNfsUpgrade() { MiniDFSCluster cluster = null; FileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology ()).NumDataNodes(0).Build(); FilePath sharedDir = new FilePath(cluster.GetSharedEditsDir(0, 1)); // No upgrade is in progress at the moment. CheckClusterPreviousDirExistence(cluster, false); AssertCTimesEqual(cluster); CheckPreviousDirExistence(sharedDir, false); // Transition NN0 to active and do some FS ops. cluster.TransitionToActive(0); fs = HATestUtil.ConfigureFailoverFs(cluster, conf); NUnit.Framework.Assert.IsTrue(fs.Mkdirs(new Path("/foo1"))); // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade // flag. cluster.ShutdownNameNode(1); cluster.GetNameNodeInfos()[0].SetStartOpt(HdfsServerConstants.StartupOption.Upgrade ); cluster.RestartNameNode(0, false); CheckNnPreviousDirExistence(cluster, 0, true); CheckNnPreviousDirExistence(cluster, 1, false); CheckPreviousDirExistence(sharedDir, true); // NN0 should come up in the active state when given the -upgrade option, // so no need to transition it to active. NUnit.Framework.Assert.IsTrue(fs.Mkdirs(new Path("/foo2"))); // Restart NN0 without the -upgrade flag, to make sure that works. cluster.GetNameNodeInfos()[0].SetStartOpt(HdfsServerConstants.StartupOption.Regular ); cluster.RestartNameNode(0, false); // Make sure we can still do FS ops after upgrading. cluster.TransitionToActive(0); NUnit.Framework.Assert.IsTrue(fs.Mkdirs(new Path("/foo3"))); // Now bootstrap the standby with the upgraded info. int rc = BootstrapStandby.Run(new string[] { "-force" }, cluster.GetConfiguration (1)); NUnit.Framework.Assert.AreEqual(0, rc); // Now restart NN1 and make sure that we can do ops against that as well. cluster.RestartNameNode(1); cluster.TransitionToStandby(0); cluster.TransitionToActive(1); NUnit.Framework.Assert.IsTrue(fs.Mkdirs(new Path("/foo4"))); AssertCTimesEqual(cluster); } finally { if (fs != null) { fs.Close(); } if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestStartingWithUpgradeInProgressSucceeds() { MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology ()).NumDataNodes(0).Build(); // Simulate an upgrade having started. for (int i = 0; i < 2; i++) { foreach (URI uri in cluster.GetNameDirs(i)) { FilePath prevTmp = new FilePath(new FilePath(uri), Storage.StorageTmpPrevious); Log.Info("creating previous tmp dir: " + prevTmp); NUnit.Framework.Assert.IsTrue(prevTmp.Mkdirs()); } } cluster.RestartNameNodes(); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void SetUpNameNode() { conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology ()).NumDataNodes(0).Build(); HATestUtil.SetFailoverConfigurations(cluster, conf); }
public virtual void TestMoverCliWithFederationHA() { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).NnTopology (MiniDFSNNTopology.SimpleHAFederatedTopology(3)).NumDataNodes(0).Build(); Configuration conf = new HdfsConfiguration(); DFSTestUtil.SetFederatedHAConfiguration(cluster, conf); try { ICollection <URI> namenodes = DFSUtil.GetNsServiceRpcUris(conf); NUnit.Framework.Assert.AreEqual(3, namenodes.Count); IEnumerator <URI> iter = namenodes.GetEnumerator(); URI nn1 = iter.Next(); URI nn2 = iter.Next(); URI nn3 = iter.Next(); IDictionary <URI, IList <Path> > movePaths = Mover.Cli.GetNameNodePathsToMove(conf, "-p", nn1 + "/foo", nn1 + "/bar", nn2 + "/foo/bar", nn3 + "/foobar"); NUnit.Framework.Assert.AreEqual(3, movePaths.Count); CheckMovePaths(movePaths[nn1], new Path("/foo"), new Path("/bar")); CheckMovePaths(movePaths[nn2], new Path("/foo/bar")); CheckMovePaths(movePaths[nn3], new Path("/foobar")); } finally { cluster.Shutdown(); } }
/// <exception cref="System.Exception"/> internal TestContext(Configuration conf, int numNameServices) { this.numNameServices = numNameServices; MiniDFSCluster.Builder bld = new MiniDFSCluster.Builder(conf).NumDataNodes(1).StoragesPerDatanode (1); if (numNameServices > 1) { bld.NnTopology(MiniDFSNNTopology.SimpleFederatedTopology(numNameServices)); } cluster = bld.Build(); cluster.WaitActive(); dfs = new DistributedFileSystem[numNameServices]; for (int i = 0; i < numNameServices; i++) { dfs[i] = cluster.GetFileSystem(i); } bpids = new string[numNameServices]; for (int i_1 = 0; i_1 < numNameServices; i_1++) { bpids[i_1] = cluster.GetNamesystem(i_1).GetBlockPoolId(); } datanode = cluster.GetDataNodes()[0]; blockScanner = datanode.GetBlockScanner(); for (int i_2 = 0; i_2 < numNameServices; i_2++) { dfs[i_2].Mkdirs(new Path("/test")); } data = datanode.GetFSDataset(); volumes = data.GetVolumes(); }
public virtual void SetUpCluster() { conf = new Configuration(); conf.SetInt(DFSConfigKeys.DfsNamenodeCheckpointCheckPeriodKey, 1); conf.SetInt(DFSConfigKeys.DfsNamenodeCheckpointTxnsKey, 1); conf.SetInt(DFSConfigKeys.DfsNamenodeNumCheckpointsRetainedKey, 10); conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1); HAUtil.SetAllowStandbyReads(conf, true); if (clusterType == TestFailureToReadEdits.TestType.SharedDirHa) { MiniDFSNNTopology topology = MiniQJMHACluster.CreateDefaultTopology(10000); cluster = new MiniDFSCluster.Builder(conf).NnTopology(topology).NumDataNodes(0).CheckExitOnShutdown (false).Build(); } else { MiniQJMHACluster.Builder builder = new MiniQJMHACluster.Builder(conf); builder.GetDfsBuilder().NumDataNodes(0).CheckExitOnShutdown(false); miniQjmHaCluster = builder.Build(); cluster = miniQjmHaCluster.GetDfsCluster(); } cluster.WaitActive(); nn0 = cluster.GetNameNode(0); nn1 = cluster.GetNameNode(1); cluster.TransitionToActive(0); fs = HATestUtil.ConfigureFailoverFs(cluster, conf); }
/// <summary> /// Test that transitioning a service to the state that it is already /// in is a nop, specifically, an exception is not thrown. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestTransitionToCurrentStateIsANop() { Configuration conf = new Configuration(); conf.SetLong(DFSConfigKeys.DfsNamenodePathBasedCacheRefreshIntervalMs, 1L); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology .SimpleHATopology()).NumDataNodes(1).Build(); List <Sharpen.Thread> crmThreads = new List <Sharpen.Thread>(); try { cluster.WaitActive(); AddCrmThreads(cluster, crmThreads); cluster.TransitionToActive(0); AddCrmThreads(cluster, crmThreads); cluster.TransitionToActive(0); AddCrmThreads(cluster, crmThreads); cluster.TransitionToStandby(0); AddCrmThreads(cluster, crmThreads); cluster.TransitionToStandby(0); AddCrmThreads(cluster, crmThreads); } finally { cluster.Shutdown(); } // Verify that all cacheReplicationMonitor threads shut down foreach (Sharpen.Thread thread in crmThreads) { Uninterruptibles.JoinUninterruptibly(thread); } }
/// <exception cref="System.Exception"/> public virtual void TestClusterIdMismatchAtStartupWithHA() { MiniDFSNNTopology top = new MiniDFSNNTopology().AddNameservice(new MiniDFSNNTopology.NSConf ("ns1").AddNN(new MiniDFSNNTopology.NNConf("nn0")).AddNN(new MiniDFSNNTopology.NNConf ("nn1"))).AddNameservice(new MiniDFSNNTopology.NSConf("ns2").AddNN(new MiniDFSNNTopology.NNConf ("nn2").SetClusterId("bad-cid")).AddNN(new MiniDFSNNTopology.NNConf("nn3").SetClusterId ("bad-cid"))); top.SetFederation(true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(top).NumDataNodes (0).Build(); try { cluster.StartDataNodes(conf, 1, true, null, null); // let the initialization be complete Sharpen.Thread.Sleep(10000); DataNode dn = cluster.GetDataNodes()[0]; NUnit.Framework.Assert.IsTrue("Datanode should be running", dn.IsDatanodeUp()); NUnit.Framework.Assert.AreEqual("Only one BPOfferService should be running", 1, dn .GetAllBpOs().Length); } finally { cluster.Shutdown(); } }
public static void ClusterSetupAtBeginning() { cluster = new MiniDFSCluster.Builder(clusterConf).NnTopology(MiniDFSNNTopology.SimpleFederatedTopology (2)).NumDataNodes(2).Build(); cluster.WaitClusterUp(); fc = FileContext.GetFileContext(cluster.GetURI(0), clusterConf); fc2 = FileContext.GetFileContext(cluster.GetURI(1), clusterConf); }
public static void ClusterSetupAtBeginning() { cluster = new MiniDFSCluster.Builder(clusterConf).NnTopology(MiniDFSNNTopology.SimpleFederatedTopology (2)).NumDataNodes(2).Build(); cluster.WaitClusterUp(); fHdfs = cluster.GetFileSystem(0); fHdfs2 = cluster.GetFileSystem(1); }
/// <summary> /// Regression test for HDFS-2795: /// - Start an HA cluster with a DN. /// </summary> /// <remarks> /// Regression test for HDFS-2795: /// - Start an HA cluster with a DN. /// - Write several blocks to the FS with replication 1. /// - Shutdown the DN /// - Wait for the NNs to declare the DN dead. All blocks will be under-replicated. /// - Restart the DN. /// In the bug, the standby node would only very slowly notice the blocks returning /// to the cluster. /// </remarks> /// <exception cref="System.Exception"/> public virtual void TestDatanodeRestarts() { Configuration conf = new Configuration(); conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, 1024); // We read from the standby to watch block locations HAUtil.SetAllowStandbyReads(conf, true); conf.SetLong(DFSConfigKeys.DfsNamenodeAccesstimePrecisionKey, 0); conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology .SimpleHATopology()).NumDataNodes(1).Build(); try { NameNode nn0 = cluster.GetNameNode(0); NameNode nn1 = cluster.GetNameNode(1); cluster.TransitionToActive(0); // Create 5 blocks. DFSTestUtil.CreateFile(cluster.GetFileSystem(0), TestFilePath, 5 * 1024, (short)1 , 1L); HATestUtil.WaitForStandbyToCatchUp(nn0, nn1); // Stop the DN. DataNode dn = cluster.GetDataNodes()[0]; string dnName = dn.GetDatanodeId().GetXferAddr(); MiniDFSCluster.DataNodeProperties dnProps = cluster.StopDataNode(0); // Make sure both NNs register it as dead. BlockManagerTestUtil.NoticeDeadDatanode(nn0, dnName); BlockManagerTestUtil.NoticeDeadDatanode(nn1, dnName); BlockManagerTestUtil.UpdateState(nn0.GetNamesystem().GetBlockManager()); BlockManagerTestUtil.UpdateState(nn1.GetNamesystem().GetBlockManager()); NUnit.Framework.Assert.AreEqual(5, nn0.GetNamesystem().GetUnderReplicatedBlocks() ); // The SBN will not have any blocks in its neededReplication queue // since the SBN doesn't process replication. NUnit.Framework.Assert.AreEqual(0, nn1.GetNamesystem().GetUnderReplicatedBlocks() ); LocatedBlocks locs = nn1.GetRpcServer().GetBlockLocations(TestFile, 0, 1); NUnit.Framework.Assert.AreEqual("Standby should have registered that the block has no replicas" , 0, locs.Get(0).GetLocations().Length); cluster.RestartDataNode(dnProps); // Wait for both NNs to re-register the DN. cluster.WaitActive(0); cluster.WaitActive(1); BlockManagerTestUtil.UpdateState(nn0.GetNamesystem().GetBlockManager()); BlockManagerTestUtil.UpdateState(nn1.GetNamesystem().GetBlockManager()); NUnit.Framework.Assert.AreEqual(0, nn0.GetNamesystem().GetUnderReplicatedBlocks() ); NUnit.Framework.Assert.AreEqual(0, nn1.GetNamesystem().GetUnderReplicatedBlocks() ); locs = nn1.GetRpcServer().GetBlockLocations(TestFile, 0, 1); NUnit.Framework.Assert.AreEqual("Standby should have registered that the block has replicas again" , 1, locs.Get(0).GetLocations().Length); } finally { cluster.Shutdown(); } }
public static void ClusterSetupAtBeginning() { clusterConf.SetBoolean(DFSConfigKeys.DfsNamenodeAclsEnabledKey, true); cluster = new MiniDFSCluster.Builder(clusterConf).NnTopology(MiniDFSNNTopology.SimpleFederatedTopology (2)).NumDataNodes(2).Build(); cluster.WaitClusterUp(); fc = FileContext.GetFileContext(cluster.GetURI(0), clusterConf); fc2 = FileContext.GetFileContext(cluster.GetURI(1), clusterConf); }
public virtual void StartCluster() { conf = new Configuration(); conf.SetBoolean(DFSConfigKeys.DfsBlockAccessTokenEnableKey, true); // Set short retry timeouts so this test runs faster conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 10); cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology ()).NumDataNodes(1).Build(); }
/// <summary> /// Test a cluster with even distribution, then a new empty node is added to /// the cluster. /// </summary> /// <remarks> /// Test a cluster with even distribution, then a new empty node is added to /// the cluster. Test start a cluster with specified number of nodes, and fills /// it to be 30% full (with a single file replicated identically to all /// datanodes); It then adds one new empty node and starts balancing. /// </remarks> /// <exception cref="System.Exception"/> public virtual void TestBalancerWithHANameNodes() { Configuration conf = new HdfsConfiguration(); TestBalancer.InitConf(conf); long newNodeCapacity = TestBalancer.Capacity; // new node's capacity string newNodeRack = TestBalancer.Rack2; // new node's rack // array of racks for original nodes in cluster string[] racks = new string[] { TestBalancer.Rack0, TestBalancer.Rack1 }; // array of capacities of original nodes in cluster long[] capacities = new long[] { TestBalancer.Capacity, TestBalancer.Capacity }; NUnit.Framework.Assert.AreEqual(capacities.Length, racks.Length); int numOfDatanodes = capacities.Length; MiniDFSNNTopology.NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1"); nn1Conf.SetIpcPort(NameNode.DefaultPort); Configuration copiedConf = new Configuration(conf); cluster = new MiniDFSCluster.Builder(copiedConf).NnTopology(MiniDFSNNTopology.SimpleHATopology ()).NumDataNodes(capacities.Length).Racks(racks).SimulatedCapacities(capacities) .Build(); HATestUtil.SetFailoverConfigurations(cluster, conf); try { cluster.WaitActive(); cluster.TransitionToActive(1); Sharpen.Thread.Sleep(500); client = NameNodeProxies.CreateProxy <ClientProtocol>(conf, FileSystem.GetDefaultUri (conf)).GetProxy(); long totalCapacity = TestBalancer.Sum(capacities); // fill up the cluster to be 30% full long totalUsedSpace = totalCapacity * 3 / 10; TestBalancer.CreateFile(cluster, TestBalancer.filePath, totalUsedSpace / numOfDatanodes , (short)numOfDatanodes, 1); // start up an empty node with the same capacity and on the same rack cluster.StartDataNodes(conf, 1, true, null, new string[] { newNodeRack }, new long [] { newNodeCapacity }); totalCapacity += newNodeCapacity; TestBalancer.WaitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); ICollection <URI> namenodes = DFSUtil.GetNsServiceRpcUris(conf); NUnit.Framework.Assert.AreEqual(1, namenodes.Count); NUnit.Framework.Assert.IsTrue(namenodes.Contains(HATestUtil.GetLogicalUri(cluster ))); int r = Org.Apache.Hadoop.Hdfs.Server.Balancer.Balancer.Run(namenodes, Balancer.Parameters .Default, conf); NUnit.Framework.Assert.AreEqual(ExitStatus.Success.GetExitCode(), r); TestBalancer.WaitForBalancer(totalUsedSpace, totalCapacity, client, cluster, Balancer.Parameters .Default); } finally { cluster.Shutdown(); } }
/// <exception cref="System.Exception"/> private void DoWriteOverFailoverTest(TestPipelinesFailover.TestScenario scenario, TestPipelinesFailover.MethodToTestIdempotence methodToTest) { Configuration conf = new Configuration(); conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, BlockSize); // Don't check replication periodically. conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, 1000); FSDataOutputStream stm = null; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology .SimpleHATopology()).NumDataNodes(3).Build(); try { int sizeWritten = 0; cluster.WaitActive(); cluster.TransitionToActive(0); Sharpen.Thread.Sleep(500); Log.Info("Starting with NN 0 active"); FileSystem fs = HATestUtil.ConfigureFailoverFs(cluster, conf); stm = fs.Create(TestPath); // write a block and a half AppendTestUtil.Write(stm, 0, BlockAndAHalf); sizeWritten += BlockAndAHalf; // Make sure all of the blocks are written out before failover. stm.Hflush(); Log.Info("Failing over to NN 1"); scenario.Run(cluster); // NOTE: explicitly do *not* make any further metadata calls // to the NN here. The next IPC call should be to allocate the next // block. Any other call would notice the failover and not test // idempotence of the operation (HDFS-3031) FSNamesystem ns1 = cluster.GetNameNode(1).GetNamesystem(); BlockManagerTestUtil.UpdateState(ns1.GetBlockManager()); NUnit.Framework.Assert.AreEqual(0, ns1.GetPendingReplicationBlocks()); NUnit.Framework.Assert.AreEqual(0, ns1.GetCorruptReplicaBlocks()); NUnit.Framework.Assert.AreEqual(0, ns1.GetMissingBlocksCount()); // If we're testing allocateBlock()'s idempotence, write another // block and a half, so we have to allocate a new block. // Otherise, don't write anything, so our next RPC will be // completeFile() if we're testing idempotence of that operation. if (methodToTest == TestPipelinesFailover.MethodToTestIdempotence.AllocateBlock) { // write another block and a half AppendTestUtil.Write(stm, sizeWritten, BlockAndAHalf); sizeWritten += BlockAndAHalf; } stm.Close(); stm = null; AppendTestUtil.Check(fs, TestPath, sizeWritten); } finally { IOUtils.CloseStream(stm); cluster.Shutdown(); } }
public virtual void Setup() { conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology ()).NumDataNodes(0).Build(); tool = new DFSHAAdmin(); tool.SetConf(conf); tool.SetErrOut(new TextWriter(errOutBytes)); cluster.WaitActive(); nn1Port = cluster.GetNameNodePort(0); }
public virtual void TestMoverCliWithFederation() { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).NnTopology (MiniDFSNNTopology.SimpleFederatedTopology(3)).NumDataNodes(0).Build(); Configuration conf = new HdfsConfiguration(); DFSTestUtil.SetFederatedConfiguration(cluster, conf); try { ICollection <URI> namenodes = DFSUtil.GetNsServiceRpcUris(conf); NUnit.Framework.Assert.AreEqual(3, namenodes.Count); try { Mover.Cli.GetNameNodePathsToMove(conf, "-p", "/foo"); NUnit.Framework.Assert.Fail("Expect exception for missing authority information"); } catch (ArgumentException e) { GenericTestUtils.AssertExceptionContains("does not contain scheme and authority", e); } try { Mover.Cli.GetNameNodePathsToMove(conf, "-p", "hdfs:///foo"); NUnit.Framework.Assert.Fail("Expect exception for missing authority information"); } catch (ArgumentException e) { GenericTestUtils.AssertExceptionContains("does not contain scheme and authority", e); } try { Mover.Cli.GetNameNodePathsToMove(conf, "-p", "wrong-hdfs://ns1/foo"); NUnit.Framework.Assert.Fail("Expect exception for wrong scheme"); } catch (ArgumentException e) { GenericTestUtils.AssertExceptionContains("Cannot resolve the path", e); } IEnumerator <URI> iter = namenodes.GetEnumerator(); URI nn1 = iter.Next(); URI nn2 = iter.Next(); IDictionary <URI, IList <Path> > movePaths = Mover.Cli.GetNameNodePathsToMove(conf, "-p", nn1 + "/foo", nn1 + "/bar", nn2 + "/foo/bar"); NUnit.Framework.Assert.AreEqual(2, movePaths.Count); CheckMovePaths(movePaths[nn1], new Path("/foo"), new Path("/bar")); CheckMovePaths(movePaths[nn2], new Path("/foo/bar")); } finally { cluster.Shutdown(); } }
/// <exception cref="System.Exception"/> private void RunTest(int nNameNodes, int nDataNodes, Configuration conf) { Log.Info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes); Log.Info("RUN_TEST -1"); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology .SimpleFederatedTopology(nNameNodes)).NumDataNodes(nDataNodes).Build(); Log.Info("RUN_TEST 0"); DFSTestUtil.SetFederatedConfiguration(cluster, conf); try { cluster.WaitActive(); Log.Info("RUN_TEST 1"); TestFsckWithMultipleNameNodes.Suite s = new TestFsckWithMultipleNameNodes.Suite(cluster , nNameNodes, nDataNodes); for (int i = 0; i < nNameNodes; i++) { s.CreateFile(i, 1024); } Log.Info("RUN_TEST 2"); string[] urls = new string[nNameNodes]; for (int i_1 = 0; i_1 < urls.Length; i_1++) { urls[i_1] = cluster.GetFileSystem(i_1).GetUri() + FileName; Log.Info("urls[" + i_1 + "]=" + urls[i_1]); string result = TestFsck.RunFsck(conf, 0, false, urls[i_1]); Log.Info("result=" + result); NUnit.Framework.Assert.IsTrue(result.Contains("Status: HEALTHY")); } // Test viewfs // Log.Info("RUN_TEST 3"); string[] vurls = new string[nNameNodes]; for (int i_2 = 0; i_2 < vurls.Length; i_2++) { string link = "/mount/nn_" + i_2 + FileName; ConfigUtil.AddLink(conf, link, new URI(urls[i_2])); vurls[i_2] = "viewfs:" + link; } for (int i_3 = 0; i_3 < vurls.Length; i_3++) { Log.Info("vurls[" + i_3 + "]=" + vurls[i_3]); string result = TestFsck.RunFsck(conf, 0, false, vurls[i_3]); Log.Info("result=" + result); NUnit.Framework.Assert.IsTrue(result.Contains("Status: HEALTHY")); } } finally { cluster.Shutdown(); } Log.Info("RUN_TEST 6"); }
public virtual void SetUp() { conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, 100); conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, 100); cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleFederatedTopology (3)).Build(); for (int i = 0; i < 3; i++) { cluster.WaitActive(i); } }
public virtual void SetupCluster() { conf = new Configuration(); conf.SetInt(DFSConfigKeys.DfsHaLogrollPeriodKey, 1); conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1); HAUtil.SetAllowStandbyReads(conf, true); MiniDFSNNTopology topology = MiniDFSNNTopology.SimpleHATopology(); cluster = new MiniDFSCluster.Builder(conf).NnTopology(topology).NumDataNodes(0).Build (); cluster.WaitActive(); ShutdownClusterAndRemoveSharedEditsDir(); }
/// <exception cref="System.Exception"/> public virtual void TestStandbyIsHot() { Configuration conf = new Configuration(); // We read from the standby to watch block locations HAUtil.SetAllowStandbyReads(conf, true); conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology .SimpleHATopology()).NumDataNodes(3).Build(); try { cluster.WaitActive(); cluster.TransitionToActive(0); NameNode nn1 = cluster.GetNameNode(0); NameNode nn2 = cluster.GetNameNode(1); FileSystem fs = HATestUtil.ConfigureFailoverFs(cluster, conf); Sharpen.Thread.Sleep(1000); System.Console.Error.WriteLine("=================================="); DFSTestUtil.WriteFile(fs, TestFilePath, TestFileData); // Have to force an edit log roll so that the standby catches up nn1.GetRpcServer().RollEditLog(); System.Console.Error.WriteLine("=================================="); // Block locations should show up on standby. Log.Info("Waiting for block locations to appear on standby node"); WaitForBlockLocations(cluster, nn2, TestFile, 3); // Trigger immediate heartbeats and block reports so // that the active "trusts" all of the DNs cluster.TriggerHeartbeats(); cluster.TriggerBlockReports(); // Change replication Log.Info("Changing replication to 1"); fs.SetReplication(TestFilePath, (short)1); BlockManagerTestUtil.ComputeAllPendingWork(nn1.GetNamesystem().GetBlockManager()); WaitForBlockLocations(cluster, nn1, TestFile, 1); nn1.GetRpcServer().RollEditLog(); Log.Info("Waiting for lowered replication to show up on standby"); WaitForBlockLocations(cluster, nn2, TestFile, 1); // Change back to 3 Log.Info("Changing replication to 3"); fs.SetReplication(TestFilePath, (short)3); BlockManagerTestUtil.ComputeAllPendingWork(nn1.GetNamesystem().GetBlockManager()); nn1.GetRpcServer().RollEditLog(); Log.Info("Waiting for higher replication to show up on standby"); WaitForBlockLocations(cluster, nn2, TestFile, 3); } finally { cluster.Shutdown(); } }
public virtual void SetupCluster() { Configuration conf = new Configuration(); MiniDFSNNTopology topology = new MiniDFSNNTopology().AddNameservice(new MiniDFSNNTopology.NSConf ("ns1").AddNN(new MiniDFSNNTopology.NNConf("nn1").SetHttpPort(20001)).AddNN(new MiniDFSNNTopology.NNConf("nn2").SetHttpPort(20002))); cluster = new MiniDFSCluster.Builder(conf).NnTopology(topology).NumDataNodes(0).Build (); cluster.WaitActive(); nn0 = cluster.GetNameNode(0); cluster.TransitionToActive(0); cluster.ShutdownNameNode(1); }