/// <summary>Verify BackupNode port usage.</summary> /// <exception cref="System.Exception"/> public virtual void TestBackupNodePorts() { NameNode nn = null; try { nn = StartNameNode(); Configuration backup_config = new HdfsConfiguration(config); backup_config.Set(DFSConfigKeys.DfsNamenodeBackupAddressKey, ThisHost); // bind http server to the same port as name-node backup_config.Set(DFSConfigKeys.DfsNamenodeBackupHttpAddressKey, backup_config.Get (DFSConfigKeys.DfsNamenodeHttpAddressKey)); Log.Info("= Starting 1 on: " + backup_config.Get(DFSConfigKeys.DfsNamenodeBackupHttpAddressKey )); NUnit.Framework.Assert.IsFalse("Backup started on same port as Namenode", CanStartBackupNode (backup_config)); // should fail // bind http server to a different port backup_config.Set(DFSConfigKeys.DfsNamenodeBackupHttpAddressKey, ThisHost); Log.Info("= Starting 2 on: " + backup_config.Get(DFSConfigKeys.DfsNamenodeBackupHttpAddressKey )); bool started = CanStartBackupNode(backup_config); NUnit.Framework.Assert.IsTrue("Backup Namenode should've started", started); } finally { // should start now StopNameNode(nn); } }
/// <summary>Verify secondary namenode port usage.</summary> /// <exception cref="System.Exception"/> public virtual void TestSecondaryNodePorts() { NameNode nn = null; try { nn = StartNameNode(); // bind http server to the same port as name-node Configuration conf2 = new HdfsConfiguration(config); conf2.Set(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey, config.Get(DFSConfigKeys .DfsNamenodeHttpAddressKey)); Log.Info("= Starting 1 on: " + conf2.Get(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey )); bool started = CanStartSecondaryNode(conf2); NUnit.Framework.Assert.IsFalse(started); // should fail // bind http server to a different port conf2.Set(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey, ThisHost); Log.Info("= Starting 2 on: " + conf2.Get(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey )); started = CanStartSecondaryNode(conf2); NUnit.Framework.Assert.IsTrue(started); } finally { // should start now StopNameNode(nn); } }
public virtual void TestConfModificationNoFederationOrHa() { HdfsConfiguration conf = new HdfsConfiguration(); string nsId = null; string nnId = null; conf.Set(DFSConfigKeys.DfsNamenodeRpcAddressKey, "localhost:1234"); NUnit.Framework.Assert.IsFalse("hdfs://localhost:1234".Equals(conf.Get(CommonConfigurationKeysPublic .FsDefaultNameKey))); NameNode.InitializeGenericKeys(conf, nsId, nnId); NUnit.Framework.Assert.AreEqual("hdfs://localhost:1234", conf.Get(CommonConfigurationKeysPublic .FsDefaultNameKey)); }
public virtual void TestDeprecatedKeys() { Configuration conf = new HdfsConfiguration(); conf.Set("topology.script.file.name", "xyz"); string scriptFile = conf.Get(DFSConfigKeys.NetTopologyScriptFileNameKey); NUnit.Framework.Assert.IsTrue(scriptFile.Equals("xyz")); conf.SetInt("dfs.replication.interval", 1); string alpha = DFSConfigKeys.DfsNamenodeReplicationIntervalKey; int repInterval = conf.GetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, 3); NUnit.Framework.Assert.IsTrue(repInterval == 1); }
public virtual void TestConfModificationFederationOnly() { HdfsConfiguration conf = new HdfsConfiguration(); string nsId = "ns1"; conf.Set(DFSConfigKeys.DfsNameservices, nsId); conf.Set(DFSConfigKeys.DfsNameserviceId, nsId); // Set the nameservice specific keys with nameserviceId in the config key foreach (string key in NameNode.NamenodeSpecificKeys) { // Note: value is same as the key conf.Set(DFSUtil.AddKeySuffixes(key, nsId), key); } // Initialize generic keys from specific keys NameNode.InitializeGenericKeys(conf, nsId, null); // Retrieve the keys without nameserviceId and Ensure generic keys are set // to the correct value foreach (string key_1 in NameNode.NamenodeSpecificKeys) { NUnit.Framework.Assert.AreEqual(key_1, conf.Get(key_1)); } }
public virtual void TestMultipleFilesSmallerThanOneBlock() { Configuration conf = new HdfsConfiguration(); int BlockSize = 6 * 1024; conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, BlockSize); conf.SetBoolean(DFSConfigKeys.DfsWebhdfsEnabledKey, true); // Make it relinquish locks. When run serially, the result should // be identical. conf.SetInt(DFSConfigKeys.DfsContentSummaryLimitKey, 2); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); DFSAdmin admin = new DFSAdmin(conf); string nnAddr = conf.Get(DFSConfigKeys.DfsNamenodeHttpAddressKey); string webhdfsuri = WebHdfsFileSystem.Scheme + "://" + nnAddr; System.Console.Out.WriteLine("webhdfsuri=" + webhdfsuri); FileSystem webhdfs = new Path(webhdfsuri).GetFileSystem(conf); try { Path dir = new Path("/test"); bool exceededQuota = false; ContentSummary c; // 1kb file // 6kb block // 192kb quota int FileSize = 1024; int QuotaSize = 32 * (int)fs.GetDefaultBlockSize(dir); NUnit.Framework.Assert.AreEqual(6 * 1024, fs.GetDefaultBlockSize(dir)); NUnit.Framework.Assert.AreEqual(192 * 1024, QuotaSize); // Create the dir and set the quota. We need to enable the quota before // writing the files as setting the quota afterwards will over-write // the cached disk space used for quota verification with the actual // amount used as calculated by INode#spaceConsumedInTree. NUnit.Framework.Assert.IsTrue(fs.Mkdirs(dir)); RunCommand(admin, false, "-setSpaceQuota", Sharpen.Extensions.ToString(QuotaSize) , dir.ToString()); // We can create at most 59 files because block allocation is // conservative and initially assumes a full block is used, so we // need to leave at least 3 * BLOCK_SIZE free space when allocating // the last block: (58 * 3 * 1024) (3 * 6 * 1024) = 192kb for (int i = 0; i < 59; i++) { Path file = new Path("/test/test" + i); DFSTestUtil.CreateFile(fs, file, FileSize, (short)3, 1L); DFSTestUtil.WaitReplication(fs, file, (short)3); } // Should account for all 59 files (almost QUOTA_SIZE) c = fs.GetContentSummary(dir); CheckContentSummary(c, webhdfs.GetContentSummary(dir)); NUnit.Framework.Assert.AreEqual("Invalid space consumed", 59 * FileSize * 3, c.GetSpaceConsumed ()); NUnit.Framework.Assert.AreEqual("Invalid space consumed", QuotaSize - (59 * FileSize * 3), 3 * (fs.GetDefaultBlockSize(dir) - FileSize)); // Now check that trying to create another file violates the quota try { Path file = new Path("/test/test59"); DFSTestUtil.CreateFile(fs, file, FileSize, (short)3, 1L); DFSTestUtil.WaitReplication(fs, file, (short)3); } catch (QuotaExceededException) { exceededQuota = true; } NUnit.Framework.Assert.IsTrue("Quota not exceeded", exceededQuota); NUnit.Framework.Assert.AreEqual(2, cluster.GetNamesystem().GetFSDirectory().GetYieldCount ()); } finally { cluster.Shutdown(); } }
public virtual void TestBlockAllocationAdjustsUsageConservatively() { Configuration conf = new HdfsConfiguration(); int BlockSize = 6 * 1024; conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, BlockSize); conf.SetBoolean(DFSConfigKeys.DfsWebhdfsEnabledKey, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); DFSAdmin admin = new DFSAdmin(conf); string nnAddr = conf.Get(DFSConfigKeys.DfsNamenodeHttpAddressKey); string webhdfsuri = WebHdfsFileSystem.Scheme + "://" + nnAddr; System.Console.Out.WriteLine("webhdfsuri=" + webhdfsuri); FileSystem webhdfs = new Path(webhdfsuri).GetFileSystem(conf); try { Path dir = new Path("/test"); Path file1 = new Path("/test/test1"); Path file2 = new Path("/test/test2"); bool exceededQuota = false; int QuotaSize = 3 * BlockSize; // total space usage including // repl. int FileSize = BlockSize / 2; ContentSummary c; // Create the directory and set the quota NUnit.Framework.Assert.IsTrue(fs.Mkdirs(dir)); RunCommand(admin, false, "-setSpaceQuota", Sharpen.Extensions.ToString(QuotaSize) , dir.ToString()); // Creating a file should use half the quota DFSTestUtil.CreateFile(fs, file1, FileSize, (short)3, 1L); DFSTestUtil.WaitReplication(fs, file1, (short)3); c = fs.GetContentSummary(dir); CheckContentSummary(c, webhdfs.GetContentSummary(dir)); NUnit.Framework.Assert.AreEqual("Quota is half consumed", QuotaSize / 2, c.GetSpaceConsumed ()); // We can not create the 2nd file because even though the total spaced // used by two files (2 * 3 * 512/2) would fit within the quota (3 * 512) // when a block for a file is created the space used is adjusted // conservatively (3 * block size, ie assumes a full block is written) // which will violate the quota (3 * block size) since we've already // used half the quota for the first file. try { DFSTestUtil.CreateFile(fs, file2, FileSize, (short)3, 1L); } catch (QuotaExceededException) { exceededQuota = true; } NUnit.Framework.Assert.IsTrue("Quota not exceeded", exceededQuota); } finally { cluster.Shutdown(); } }
/// <exception cref="System.IO.IOException"/> public virtual void StartDataNodes(Configuration conf, int numDataNodes, StorageType [][] storageTypes, bool manageDfsDirs, HdfsServerConstants.StartupOption operation , string[] racks, string[] nodeGroups, string[] hosts, long[][] storageCapacities , long[] simulatedCapacities, bool setupHostsFile, bool checkDataNodeAddrConfig, bool checkDataNodeHostConfig) { lock (this) { System.Diagnostics.Debug.Assert(storageCapacities == null || simulatedCapacities == null); System.Diagnostics.Debug.Assert(storageTypes == null || storageTypes.Length == numDataNodes ); System.Diagnostics.Debug.Assert(storageCapacities == null || storageCapacities.Length == numDataNodes); if (operation == HdfsServerConstants.StartupOption.Recover) { return; } if (checkDataNodeHostConfig) { conf.SetIfUnset(DFSConfigKeys.DfsDatanodeHostNameKey, "127.0.0.1"); } else { conf.Set(DFSConfigKeys.DfsDatanodeHostNameKey, "127.0.0.1"); } conf.Set(DFSConfigKeys.DfsDatanodeHostNameKey, "127.0.0.1"); int curDatanodesNum = dataNodes.Count; // for mincluster's the default initialDelay for BRs is 0 if (conf.Get(DFSConfigKeys.DfsBlockreportInitialDelayKey) == null) { conf.SetLong(DFSConfigKeys.DfsBlockreportInitialDelayKey, 0); } // If minicluster's name node is null assume that the conf has been // set with the right address:port of the name node. // if (racks != null && numDataNodes > racks.Length) { throw new ArgumentException("The length of racks [" + racks.Length + "] is less than the number of datanodes [" + numDataNodes + "]."); } if (nodeGroups != null && numDataNodes > nodeGroups.Length) { throw new ArgumentException("The length of nodeGroups [" + nodeGroups.Length + "] is less than the number of datanodes [" + numDataNodes + "]."); } if (hosts != null && numDataNodes > hosts.Length) { throw new ArgumentException("The length of hosts [" + hosts.Length + "] is less than the number of datanodes [" + numDataNodes + "]."); } //Generate some hostnames if required if (racks != null && hosts == null) { hosts = new string[numDataNodes]; for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) { hosts[i - curDatanodesNum] = "host" + i + ".foo.com"; } } if (simulatedCapacities != null && numDataNodes > simulatedCapacities.Length) { throw new ArgumentException("The length of simulatedCapacities [" + simulatedCapacities .Length + "] is less than the number of datanodes [" + numDataNodes + "]."); } string[] dnArgs = (operation == null || operation != HdfsServerConstants.StartupOption .Rollback) ? null : new string[] { operation.GetName() }; DataNode[] dns = new DataNode[numDataNodes]; for (int i_1 = curDatanodesNum; i_1 < curDatanodesNum + numDataNodes; i_1++) { Configuration dnConf = new HdfsConfiguration(conf); // Set up datanode address SetupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig); if (manageDfsDirs) { string dirs = MakeDataNodeDirs(i_1, storageTypes == null ? null : storageTypes[i_1 ]); dnConf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dirs); conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dirs); } if (simulatedCapacities != null) { SimulatedFSDataset.SetFactory(dnConf); dnConf.SetLong(SimulatedFSDataset.ConfigPropertyCapacity, simulatedCapacities[i_1 - curDatanodesNum]); } Log.Info("Starting DataNode " + i_1 + " with " + DFSConfigKeys.DfsDatanodeDataDirKey + ": " + dnConf.Get(DFSConfigKeys.DfsDatanodeDataDirKey)); if (hosts != null) { dnConf.Set(DFSConfigKeys.DfsDatanodeHostNameKey, hosts[i_1 - curDatanodesNum]); Log.Info("Starting DataNode " + i_1 + " with hostname set to: " + dnConf.Get(DFSConfigKeys .DfsDatanodeHostNameKey)); } if (racks != null) { string name = hosts[i_1 - curDatanodesNum]; if (nodeGroups == null) { Log.Info("Adding node with hostname : " + name + " to rack " + racks[i_1 - curDatanodesNum ]); StaticMapping.AddNodeToRack(name, racks[i_1 - curDatanodesNum]); } else { Log.Info("Adding node with hostname : " + name + " to serverGroup " + nodeGroups[ i_1 - curDatanodesNum] + " and rack " + racks[i_1 - curDatanodesNum]); StaticMapping.AddNodeToRack(name, racks[i_1 - curDatanodesNum] + nodeGroups[i_1 - curDatanodesNum]); } } Configuration newconf = new HdfsConfiguration(dnConf); // save config if (hosts != null) { NetUtils.AddStaticResolution(hosts[i_1 - curDatanodesNum], "localhost"); } SecureDataNodeStarter.SecureResources secureResources = null; if (UserGroupInformation.IsSecurityEnabled()) { try { secureResources = SecureDataNodeStarter.GetSecureResources(dnConf); } catch (Exception ex) { Sharpen.Runtime.PrintStackTrace(ex); } } DataNode dn = DataNode.InstantiateDataNode(dnArgs, dnConf, secureResources); if (dn == null) { throw new IOException("Cannot start DataNode in " + dnConf.Get(DFSConfigKeys.DfsDatanodeDataDirKey )); } //since the HDFS does things based on IP:port, we need to add the mapping //for IP:port to rackId string ipAddr = dn.GetXferAddress().Address.GetHostAddress(); if (racks != null) { int port = dn.GetXferAddress().Port; if (nodeGroups == null) { Log.Info("Adding node with IP:port : " + ipAddr + ":" + port + " to rack " + racks [i_1 - curDatanodesNum]); StaticMapping.AddNodeToRack(ipAddr + ":" + port, racks[i_1 - curDatanodesNum]); } else { Log.Info("Adding node with IP:port : " + ipAddr + ":" + port + " to nodeGroup " + nodeGroups[i_1 - curDatanodesNum] + " and rack " + racks[i_1 - curDatanodesNum] ); StaticMapping.AddNodeToRack(ipAddr + ":" + port, racks[i_1 - curDatanodesNum] + nodeGroups [i_1 - curDatanodesNum]); } } dn.RunDatanodeDaemon(); dataNodes.AddItem(new MiniDFSCluster.DataNodeProperties(this, dn, newconf, dnArgs , secureResources, dn.GetIpcPort())); dns[i_1 - curDatanodesNum] = dn; } curDatanodesNum += numDataNodes; this.numDataNodes += numDataNodes; WaitActive(); if (storageCapacities != null) { for (int i = curDatanodesNum; i_1 < curDatanodesNum + numDataNodes; ++i_1) { IList <FsVolumeSpi> volumes = dns[i_1].GetFSDataset().GetVolumes(); System.Diagnostics.Debug.Assert(volumes.Count == storagesPerDatanode); for (int j = 0; j < volumes.Count; ++j) { FsVolumeImpl volume = (FsVolumeImpl)volumes[j]; volume.SetCapacityForTesting(storageCapacities[i_1][j]); } } } } }