public virtual void TestChooseTargetsOnBoundaryTopology() { for (int i = 0; i < NumOfDatanodes; i++) { cluster.Remove(dataNodes[i]); } for (int i_1 = 0; i_1 < NumOfDatanodesBoundary; i_1++) { cluster.Add(dataNodesInBoundaryCase[i_1]); } for (int i_2 = 0; i_2 < NumOfDatanodesBoundary; i_2++) { UpdateHeartbeatWithUsage(dataNodes[0], 2 * HdfsConstants.MinBlocksForWrite * BlockSize , 0L, (HdfsConstants.MinBlocksForWrite - 1) * BlockSize, 0L, 0L, 0L, 0, 0); UpdateHeartbeatWithUsage(dataNodesInBoundaryCase[i_2], 2 * HdfsConstants.MinBlocksForWrite * BlockSize, 0L, 2 * HdfsConstants.MinBlocksForWrite * BlockSize, 0L, 0L, 0L, 0 , 0); } DatanodeStorageInfo[] targets; targets = ChooseTarget(0, dataNodesInBoundaryCase[0]); NUnit.Framework.Assert.AreEqual(targets.Length, 0); targets = ChooseTarget(1, dataNodesInBoundaryCase[0]); NUnit.Framework.Assert.AreEqual(targets.Length, 1); targets = ChooseTarget(2, dataNodesInBoundaryCase[0]); NUnit.Framework.Assert.AreEqual(targets.Length, 2); NUnit.Framework.Assert.IsFalse(IsOnSameRack(targets[0], targets[1])); targets = ChooseTarget(3, dataNodesInBoundaryCase[0]); NUnit.Framework.Assert.AreEqual(targets.Length, 3); NUnit.Framework.Assert.IsTrue(CheckTargetsOnDifferentNodeGroup(targets)); }
public virtual void SetUp() { FileSystem.SetDefaultUri(Conf, "hdfs://localhost:0"); Conf.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, "0.0.0.0:0"); // Set properties to make HDFS aware of NodeGroup. Conf.Set(DFSConfigKeys.DfsBlockReplicatorClassnameKey, typeof(BlockPlacementPolicyWithNodeGroup ).FullName); Conf.Set(CommonConfigurationKeysPublic.NetTopologyImplKey, typeof(NetworkTopologyWithNodeGroup ).FullName); Conf.SetBoolean(DFSConfigKeys.DfsNamenodeAvoidStaleDatanodeForWriteKey, true); FilePath baseDir = PathUtils.GetTestDir(typeof(TestReplicationPolicyWithNodeGroup )); Conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, new FilePath(baseDir, "name").GetPath ()); DFSTestUtil.FormatNameNode(Conf); namenode = new NameNode(Conf); BlockManager bm = namenode.GetNamesystem().GetBlockManager(); replicator = bm.GetBlockPlacementPolicy(); cluster = bm.GetDatanodeManager().GetNetworkTopology(); // construct network topology for (int i = 0; i < NumOfDatanodes; i++) { cluster.Add(dataNodes[i]); } SetupDataNodeCapacity(); }
private void AddNodes(IEnumerable <DatanodeDescriptor> nodesToAdd) { NetworkTopology cluster = bm.GetDatanodeManager().GetNetworkTopology(); // construct network topology foreach (DatanodeDescriptor dn in nodesToAdd) { cluster.Add(dn); dn.GetStorageInfos()[0].SetUtilizationForTesting(2 * HdfsConstants.MinBlocksForWrite * BlockSize, 0L, 2 * HdfsConstants.MinBlocksForWrite * BlockSize, 0L); dn.UpdateHeartbeat(BlockManagerTestUtil.GetStorageReportsForDatanode(dn), 0L, 0L, 0, 0, null); bm.GetDatanodeManager().CheckIfClusterIsNowMultiRack(dn); } }
static void Main(string[] args) { // Configure Log4Net var logRepository = LogManager.GetRepository(Assembly.GetEntryAssembly()); XmlConfigurator.Configure(logRepository, new FileInfo("log4net.config")); log.Info("NetworkManager Topology Builder Starting"); using (var connection = new OracleConnection(ConfigurationManager.ConnectionStrings["OMS"].ConnectionString)) { connection.Open(); var topology = new NetworkTopology(); var deviceTypes = LoadDeviceTypes(connection); topology.Add(LoadDevices(connection, deviceTypes)); topology.Connect(LoadEdges(connection)); topology.EnergizeNetwork(); using (var fs = new FileStream(ConfigurationManager.AppSettings["NetworkTopologyPath"], FileMode.Create)) { topology.Save(fs); } } }
/// <summary> /// This function identifies and returns the hosts that contribute /// most for a given split. /// </summary> /// <remarks> /// This function identifies and returns the hosts that contribute /// most for a given split. For calculating the contribution, rack /// locality is treated on par with host locality, so hosts from racks /// that contribute the most are preferred over hosts on racks that /// contribute less /// </remarks> /// <param name="blkLocations">The list of block locations</param> /// <param name="offset"></param> /// <param name="splitSize"></param> /// <returns> /// two arrays - one of hosts that contribute most to this split, and /// one of hosts that contribute most to this split that have the data /// cached on them /// </returns> /// <exception cref="System.IO.IOException"/> private string[][] GetSplitHostsAndCachedHosts(BlockLocation[] blkLocations, long offset, long splitSize, NetworkTopology clusterMap) { int startIndex = GetBlockIndex(blkLocations, offset); long bytesInThisBlock = blkLocations[startIndex].GetOffset() + blkLocations[startIndex ].GetLength() - offset; //If this is the only block, just return if (bytesInThisBlock >= splitSize) { return(new string[][] { blkLocations[startIndex].GetHosts(), blkLocations[startIndex ].GetCachedHosts() }); } long bytesInFirstBlock = bytesInThisBlock; int index = startIndex + 1; splitSize -= bytesInThisBlock; while (splitSize > 0) { bytesInThisBlock = Math.Min(splitSize, blkLocations[index++].GetLength()); splitSize -= bytesInThisBlock; } long bytesInLastBlock = bytesInThisBlock; int endIndex = index - 1; IDictionary <Node, FileInputFormat.NodeInfo> hostsMap = new IdentityHashMap <Node, FileInputFormat.NodeInfo>(); IDictionary <Node, FileInputFormat.NodeInfo> racksMap = new IdentityHashMap <Node, FileInputFormat.NodeInfo>(); string[] allTopos = new string[0]; // Build the hierarchy and aggregate the contribution of // bytes at each level. See TestGetSplitHosts.java for (index = startIndex; index <= endIndex; index++) { // Establish the bytes in this block if (index == startIndex) { bytesInThisBlock = bytesInFirstBlock; } else { if (index == endIndex) { bytesInThisBlock = bytesInLastBlock; } else { bytesInThisBlock = blkLocations[index].GetLength(); } } allTopos = blkLocations[index].GetTopologyPaths(); // If no topology information is available, just // prefix a fakeRack if (allTopos.Length == 0) { allTopos = FakeRacks(blkLocations, index); } // NOTE: This code currently works only for one level of // hierarchy (rack/host). However, it is relatively easy // to extend this to support aggregation at different // levels foreach (string topo in allTopos) { Node node; Node parentNode; FileInputFormat.NodeInfo nodeInfo; FileInputFormat.NodeInfo parentNodeInfo; node = clusterMap.GetNode(topo); if (node == null) { node = new NodeBase(topo); clusterMap.Add(node); } nodeInfo = hostsMap[node]; if (nodeInfo == null) { nodeInfo = new FileInputFormat.NodeInfo(node); hostsMap[node] = nodeInfo; parentNode = node.GetParent(); parentNodeInfo = racksMap[parentNode]; if (parentNodeInfo == null) { parentNodeInfo = new FileInputFormat.NodeInfo(parentNode); racksMap[parentNode] = parentNodeInfo; } parentNodeInfo.AddLeaf(nodeInfo); } else { nodeInfo = hostsMap[node]; parentNode = node.GetParent(); parentNodeInfo = racksMap[parentNode]; } nodeInfo.AddValue(index, bytesInThisBlock); parentNodeInfo.AddValue(index, bytesInThisBlock); } } // for all topos // for all indices // We don't yet support cached hosts when bytesInThisBlock > splitSize return(new string[][] { IdentifyHosts(allTopos.Length, racksMap), new string[0] }); }