public virtual void TestEncryptionProbe() { Configuration conf = new Configuration(false); conf.Unset(DFSConfigKeys.DfsEncryptionKeyProviderUri); NUnit.Framework.Assert.IsFalse("encryption enabled on no provider key", DFSUtil.IsHDFSEncryptionEnabled (conf)); conf.Set(DFSConfigKeys.DfsEncryptionKeyProviderUri, string.Empty); NUnit.Framework.Assert.IsFalse("encryption enabled on empty provider key", DFSUtil .IsHDFSEncryptionEnabled(conf)); conf.Set(DFSConfigKeys.DfsEncryptionKeyProviderUri, "\n\t\n"); NUnit.Framework.Assert.IsFalse("encryption enabled on whitespace provider key", DFSUtil .IsHDFSEncryptionEnabled(conf)); conf.Set(DFSConfigKeys.DfsEncryptionKeyProviderUri, "http://hadoop.apache.org"); NUnit.Framework.Assert.IsTrue("encryption disabled on valid provider key", DFSUtil .IsHDFSEncryptionEnabled(conf)); }
/// <exception cref="System.IO.IOException"/> public DatanodeHttpServer(Configuration conf, DataNode datanode, ServerSocketChannel externalHttpChannel) { this.conf = conf; Configuration confForInfoServer = new Configuration(conf); confForInfoServer.SetInt(HttpServer2.HttpMaxThreads, 10); HttpServer2.Builder builder = new HttpServer2.Builder().SetName("datanode").SetConf (confForInfoServer).SetACL(new AccessControlList(conf.Get(DFSConfigKeys.DfsAdmin , " "))).HostName(GetHostnameForSpnegoPrincipal(confForInfoServer)).AddEndpoint( URI.Create("http://localhost:0")).SetFindPort(true); this.infoServer = builder.Build(); this.infoServer.AddInternalServlet(null, "/streamFile/*", typeof(StreamFile)); this.infoServer.AddInternalServlet(null, "/getFileChecksum/*", typeof(FileChecksumServlets.GetServlet )); this.infoServer.SetAttribute("datanode", datanode); this.infoServer.SetAttribute(JspHelper.CurrentConf, conf); this.infoServer.AddServlet(null, "/blockScannerReport", typeof(BlockScanner.Servlet )); this.infoServer.Start(); IPEndPoint jettyAddr = infoServer.GetConnectorAddress(0); this.confForCreate = new Configuration(conf); confForCreate.Set(FsPermission.UmaskLabel, "000"); this.bossGroup = new NioEventLoopGroup(); this.workerGroup = new NioEventLoopGroup(); this.externalHttpChannel = externalHttpChannel; HttpConfig.Policy policy = DFSUtil.GetHttpPolicy(conf); if (policy.IsHttpEnabled()) { this.httpServer = new ServerBootstrap().Group(bossGroup, workerGroup).ChildHandler (new _ChannelInitializer_117(this, jettyAddr, conf)); if (externalHttpChannel == null) { httpServer.Channel(typeof(NioServerSocketChannel)); } else { httpServer.ChannelFactory(new _ChannelFactory_130(externalHttpChannel)); } } else { // The channel has been bounded externally via JSVC, // thus bind() becomes a no-op. this.httpServer = null; } if (policy.IsHttpsEnabled()) { this.sslFactory = new SSLFactory(SSLFactory.Mode.Server, conf); try { sslFactory.Init(); } catch (GeneralSecurityException e) { throw new IOException(e); } this.httpsServer = new ServerBootstrap().Group(bossGroup, workerGroup).Channel(typeof( NioServerSocketChannel)).ChildHandler(new _ChannelInitializer_155(this, jettyAddr , conf)); } else { this.httpsServer = null; this.sslFactory = null; } }
/// <returns>null if the local name is null; otherwise, return the local name.</returns> public string GetLocalName() { byte[] name = GetLocalNameBytes(); return(name == null ? null : DFSUtil.Bytes2String(name)); }
/// <summary>A formatted string for reporting the status of the DataNode.</summary> public virtual string GetDatanodeReport() { StringBuilder buffer = new StringBuilder(); long c = GetCapacity(); long r = GetRemaining(); long u = GetDfsUsed(); long nonDFSUsed = GetNonDfsUsed(); float usedPercent = GetDfsUsedPercent(); float remainingPercent = GetRemainingPercent(); long cc = GetCacheCapacity(); long cr = GetCacheRemaining(); long cu = GetCacheUsed(); float cacheUsedPercent = GetCacheUsedPercent(); float cacheRemainingPercent = GetCacheRemainingPercent(); string lookupName = NetUtils.GetHostNameOfIP(GetName()); buffer.Append("Name: " + GetName()); if (lookupName != null) { buffer.Append(" (" + lookupName + ")"); } buffer.Append("\n"); buffer.Append("Hostname: " + GetHostName() + "\n"); if (!NetworkTopology.DefaultRack.Equals(location)) { buffer.Append("Rack: " + location + "\n"); } buffer.Append("Decommission Status : "); if (IsDecommissioned()) { buffer.Append("Decommissioned\n"); } else { if (IsDecommissionInProgress()) { buffer.Append("Decommission in progress\n"); } else { buffer.Append("Normal\n"); } } buffer.Append("Configured Capacity: " + c + " (" + StringUtils.ByteDesc(c) + ")" + "\n"); buffer.Append("DFS Used: " + u + " (" + StringUtils.ByteDesc(u) + ")" + "\n"); buffer.Append("Non DFS Used: " + nonDFSUsed + " (" + StringUtils.ByteDesc(nonDFSUsed ) + ")" + "\n"); buffer.Append("DFS Remaining: " + r + " (" + StringUtils.ByteDesc(r) + ")" + "\n" ); buffer.Append("DFS Used%: " + DFSUtil.Percent2String(usedPercent) + "\n"); buffer.Append("DFS Remaining%: " + DFSUtil.Percent2String(remainingPercent) + "\n" ); buffer.Append("Configured Cache Capacity: " + cc + " (" + StringUtils.ByteDesc(cc ) + ")" + "\n"); buffer.Append("Cache Used: " + cu + " (" + StringUtils.ByteDesc(cu) + ")" + "\n"); buffer.Append("Cache Remaining: " + cr + " (" + StringUtils.ByteDesc(cr) + ")" + "\n"); buffer.Append("Cache Used%: " + DFSUtil.Percent2String(cacheUsedPercent) + "\n"); buffer.Append("Cache Remaining%: " + DFSUtil.Percent2String(cacheRemainingPercent ) + "\n"); buffer.Append("Xceivers: " + GetXceiverCount() + "\n"); buffer.Append("Last contact: " + Sharpen.Extensions.CreateDate(lastUpdate) + "\n" ); return(buffer.ToString()); }
public virtual string GetSymlinkString() { return(DFSUtil.Bytes2String(symlink)); }
/// <summary>Used space by the block pool as percentage of present capacity</summary> public virtual float GetBlockPoolUsedPercent() { return(DFSUtil.GetPercentUsed(blockPoolUsed, capacity)); }
/// <returns>Cache used as a percentage of the datanode's total cache capacity</returns> public virtual float GetCacheUsedPercent() { return(DFSUtil.GetPercentUsed(cacheUsed, cacheCapacity)); }
/// <exception cref="System.IO.IOException"/> public virtual void ToXML(XMLOutputter doc) { if (error != null) { // general exception, only print exception message onto web page. CreateGeneralException(doc, clusterid, StringUtils.StringifyException(error)); doc.GetWriter().Flush(); return; } int size = nnList.Count; long total = 0L; long free = 0L; long nonDfsUsed = 0l; float dfsUsedPercent = 0.0f; float dfsRemainingPercent = 0.0f; if (size > 0) { total = total_sum / size; free = free_sum / size; nonDfsUsed = nonDfsUsed_sum / size; dfsUsedPercent = DFSUtil.GetPercentUsed(clusterDfsUsed, total); dfsRemainingPercent = DFSUtil.GetPercentRemaining(free, total); } doc.StartTag("cluster"); doc.Attribute("clusterId", clusterid); doc.StartTag("storage"); ToXmlItemBlock(doc, "Total Files And Directories", System.Convert.ToString(totalFilesAndDirectories )); ToXmlItemBlock(doc, "Configured Capacity", StringUtils.ByteDesc(total)); ToXmlItemBlock(doc, "DFS Used", StringUtils.ByteDesc(clusterDfsUsed)); ToXmlItemBlock(doc, "Non DFS Used", StringUtils.ByteDesc(nonDfsUsed)); ToXmlItemBlock(doc, "DFS Remaining", StringUtils.ByteDesc(free)); // dfsUsedPercent ToXmlItemBlock(doc, "DFS Used%", DFSUtil.Percent2String(dfsUsedPercent)); // dfsRemainingPercent ToXmlItemBlock(doc, "DFS Remaining%", DFSUtil.Percent2String(dfsRemainingPercent) ); doc.EndTag(); // storage doc.StartTag("namenodes"); // number of namenodes ToXmlItemBlock(doc, "NamenodesCount", Sharpen.Extensions.ToString(size)); foreach (ClusterJspHelper.NamenodeStatus nn in nnList) { doc.StartTag("node"); ToXmlItemBlockWithLink(doc, nn.host, nn.httpAddress, "NameNode"); ToXmlItemBlock(doc, "Blockpool Used", StringUtils.ByteDesc(nn.bpUsed)); ToXmlItemBlock(doc, "Blockpool Used%", DFSUtil.Percent2String(DFSUtil.GetPercentUsed (nn.bpUsed, total))); ToXmlItemBlock(doc, "Files And Directories", System.Convert.ToString(nn.filesAndDirectories )); ToXmlItemBlock(doc, "Blocks", System.Convert.ToString(nn.blocksCount)); ToXmlItemBlock(doc, "Missing Blocks", System.Convert.ToString(nn.missingBlocksCount )); ToXmlItemBlockWithLink(doc, nn.liveDatanodeCount + " (" + nn.liveDecomCount + ")" , new Uri(nn.httpAddress, "/dfsnodelist.jsp?whatNodes=LIVE"), "Live Datanode (Decommissioned)" ); ToXmlItemBlockWithLink(doc, nn.deadDatanodeCount + " (" + nn.deadDecomCount + ")" , new Uri(nn.httpAddress, "/dfsnodelist.jsp?whatNodes=DEAD"), "Dead Datanode (Decommissioned)" ); ToXmlItemBlock(doc, "Software Version", nn.softwareVersion); doc.EndTag(); } // node doc.EndTag(); // namenodes CreateNamenodeExceptionMsg(doc, nnExceptions); doc.EndTag(); // cluster doc.GetWriter().Flush(); }
/// <summary>Get the string representation of the local name</summary> /// <returns>the local name in string</returns> public string GetLocalName() { return(DFSUtil.Bytes2String(path)); }
private string ToString(bool vaildateObject) { if (vaildateObject) { Validate(); } StringBuilder b = new StringBuilder(GetType().Name).Append(": path = ").Append(DFSUtil .ByteArray2PathString(path)).Append("\n inodes = "); if (inodes == null) { b.Append("null"); } else { if (inodes.Length == 0) { b.Append("[]"); } else { b.Append("[").Append(ToString(inodes[0])); for (int i = 1; i < inodes.Length; i++) { b.Append(", ").Append(ToString(inodes[i])); } b.Append("], length=").Append(inodes.Length); } } b.Append("\n isSnapshot = ").Append(isSnapshot).Append("\n snapshotId = " ).Append(snapshotId); return(b.ToString()); }
/// <exception cref="System.IO.IOException"/> /// <exception cref="Javax.Management.MalformedObjectNameException"/> internal NamenodeMXBeanHelper(IPEndPoint addr, Configuration conf) { this.host = addr.GetHostName(); this.httpAddress = DFSUtil.GetInfoServer(addr, conf, DFSUtil.GetHttpClientScheme( conf)); }
public virtual string GetPath(int pos) { return(DFSUtil.ByteArray2PathString(path, 0, pos)); }
/// <returns>the full path in string form</returns> public virtual string GetPath() { return(DFSUtil.ByteArray2PathString(path)); }
/// <summary>Retrieve existing INodes from a path.</summary> /// <remarks> /// Retrieve existing INodes from a path. For non-snapshot path, /// the number of INodes is equal to the number of path components. For /// snapshot path (e.g., /foo/.snapshot/s1/bar), the number of INodes is /// (number_of_path_components - 1). /// An UnresolvedPathException is always thrown when an intermediate path /// component refers to a symbolic link. If the final path component refers /// to a symbolic link then an UnresolvedPathException is only thrown if /// resolveLink is true. /// <p> /// Example: <br /> /// Given the path /c1/c2/c3 where only /c1/c2 exists, resulting in the /// following path components: ["","c1","c2","c3"] /// <p> /// <code>getExistingPathINodes(["","c1","c2"])</code> should fill /// the array with [rootINode,c1,c2], <br /> /// <code>getExistingPathINodes(["","c1","c2","c3"])</code> should /// fill the array with [rootINode,c1,c2,null] /// </remarks> /// <param name="startingDir">the starting directory</param> /// <param name="components">array of path component name</param> /// <param name="resolveLink"> /// indicates whether UnresolvedLinkException should /// be thrown when the path refers to a symbolic link. /// </param> /// <returns>the specified number of existing INodes in the path</returns> /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> internal static Org.Apache.Hadoop.Hdfs.Server.Namenode.INodesInPath Resolve(INodeDirectory startingDir, byte[][] components, bool resolveLink) { Preconditions.CheckArgument(startingDir.CompareTo(components[0]) == 0); INode curNode = startingDir; int count = 0; int inodeNum = 0; INode[] inodes = new INode[components.Length]; bool isSnapshot = false; int snapshotId = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId; while (count < components.Length && curNode != null) { bool lastComp = (count == components.Length - 1); inodes[inodeNum++] = curNode; bool isRef = curNode.IsReference(); bool isDir = curNode.IsDirectory(); INodeDirectory dir = isDir ? curNode.AsDirectory() : null; if (!isRef && isDir && dir.IsWithSnapshot()) { //if the path is a non-snapshot path, update the latest snapshot. if (!isSnapshot && ShouldUpdateLatestId(dir.GetDirectoryWithSnapshotFeature().GetLastSnapshotId (), snapshotId)) { snapshotId = dir.GetDirectoryWithSnapshotFeature().GetLastSnapshotId(); } } else { if (isRef && isDir && !lastComp) { // If the curNode is a reference node, need to check its dstSnapshot: // 1. if the existing snapshot is no later than the dstSnapshot (which // is the latest snapshot in dst before the rename), the changes // should be recorded in previous snapshots (belonging to src). // 2. however, if the ref node is already the last component, we still // need to know the latest snapshot among the ref node's ancestors, // in case of processing a deletion operation. Thus we do not overwrite // the latest snapshot if lastComp is true. In case of the operation is // a modification operation, we do a similar check in corresponding // recordModification method. if (!isSnapshot) { int dstSnapshotId = curNode.AsReference().GetDstSnapshotId(); if (snapshotId == Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId || (dstSnapshotId != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId && dstSnapshotId >= snapshotId)) { // no snapshot in dst tree of rename // the above scenario int lastSnapshot = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId; DirectoryWithSnapshotFeature sf; if (curNode.IsDirectory() && (sf = curNode.AsDirectory().GetDirectoryWithSnapshotFeature ()) != null) { lastSnapshot = sf.GetLastSnapshotId(); } snapshotId = lastSnapshot; } } } } if (curNode.IsSymlink() && (!lastComp || resolveLink)) { string path = ConstructPath(components, 0, components.Length); string preceding = ConstructPath(components, 0, count); string remainder = ConstructPath(components, count + 1, components.Length); string link = DFSUtil.Bytes2String(components[count]); string target = curNode.AsSymlink().GetSymlinkString(); if (Log.IsDebugEnabled()) { Log.Debug("UnresolvedPathException " + " path: " + path + " preceding: " + preceding + " count: " + count + " link: " + link + " target: " + target + " remainder: " + remainder); } throw new UnresolvedPathException(path, preceding, remainder, target); } if (lastComp || !isDir) { break; } byte[] childName = components[count + 1]; // check if the next byte[] in components is for ".snapshot" if (IsDotSnapshotDir(childName) && dir.IsSnapshottable()) { // skip the ".snapshot" in components count++; isSnapshot = true; // check if ".snapshot" is the last element of components if (count == components.Length - 1) { break; } // Resolve snapshot root Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s = dir.GetSnapshot(components [count + 1]); if (s == null) { curNode = null; } else { // snapshot not found curNode = s.GetRoot(); snapshotId = s.GetId(); } } else { // normal case, and also for resolving file/dir under snapshot root curNode = dir.GetChild(childName, isSnapshot ? snapshotId : Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId); } count++; } if (isSnapshot && !IsDotSnapshotDir(components[components.Length - 1])) { // for snapshot path shrink the inode array. however, for path ending with // .snapshot, still keep last the null inode in the array INode[] newNodes = new INode[components.Length - 1]; System.Array.Copy(inodes, 0, newNodes, 0, newNodes.Length); inodes = newNodes; } return(new Org.Apache.Hadoop.Hdfs.Server.Namenode.INodesInPath(inodes, components , isSnapshot, snapshotId)); }
/// <exception cref="System.Exception"/> internal static void RunBalancer(TestBalancerWithMultipleNameNodes.Suite s, long totalUsed, long totalCapacity) { double avg = totalUsed * 100.0 / totalCapacity; Log.Info("BALANCER 0: totalUsed=" + totalUsed + ", totalCapacity=" + totalCapacity + ", avg=" + avg); Wait(s.clients, totalUsed, totalCapacity); Log.Info("BALANCER 1"); // start rebalancing ICollection <URI> namenodes = DFSUtil.GetNsServiceRpcUris(s.conf); int r = Org.Apache.Hadoop.Hdfs.Server.Balancer.Balancer.Run(namenodes, Balancer.Parameters .Default, s.conf); NUnit.Framework.Assert.AreEqual(ExitStatus.Success.GetExitCode(), r); Log.Info("BALANCER 2"); Wait(s.clients, totalUsed, totalCapacity); Log.Info("BALANCER 3"); int i = 0; for (bool balanced = false; !balanced; i++) { long[] used = new long[s.cluster.GetDataNodes().Count]; long[] cap = new long[used.Length]; for (int n = 0; n < s.clients.Length; n++) { DatanodeInfo[] datanodes = s.clients[n].GetDatanodeReport(HdfsConstants.DatanodeReportType .All); NUnit.Framework.Assert.AreEqual(datanodes.Length, used.Length); for (int d = 0; d < datanodes.Length; d++) { if (n == 0) { used[d] = datanodes[d].GetDfsUsed(); cap[d] = datanodes[d].GetCapacity(); if (i % 100 == 0) { Log.Warn("datanodes[" + d + "]: getDfsUsed()=" + datanodes[d].GetDfsUsed() + ", getCapacity()=" + datanodes[d].GetCapacity()); } } else { NUnit.Framework.Assert.AreEqual(used[d], datanodes[d].GetDfsUsed()); NUnit.Framework.Assert.AreEqual(cap[d], datanodes[d].GetCapacity()); } } } balanced = true; for (int d_1 = 0; d_1 < used.Length; d_1++) { double p = used[d_1] * 100.0 / cap[d_1]; balanced = p <= avg + Balancer.Parameters.Default.threshold; if (!balanced) { if (i % 100 == 0) { Log.Warn("datanodes " + d_1 + " is not yet balanced: " + "used=" + used[d_1] + ", cap=" + cap[d_1] + ", avg=" + avg); Log.Warn("TestBalancer.sum(used)=" + TestBalancer.Sum(used) + ", TestBalancer.sum(cap)=" + TestBalancer.Sum(cap)); } Sleep(100); break; } } } Log.Info("BALANCER 6"); }
/// <summary>Get the string representation of the symlink.</summary> /// <returns>the symlink as a string.</returns> public string GetSymlink() { return(DFSUtil.Bytes2String(symlink)); }
/// <summary>The used space by the data node as percentage of present capacity</summary> public virtual float GetDfsUsedPercent() { return(DFSUtil.GetPercentUsed(dfsUsed, capacity)); }
public LocatedFileStatus MakeQualifiedLocated(URI defaultUri, Path path) { return(new LocatedFileStatus(GetLen(), IsDir(), GetReplication(), GetBlockSize(), GetModificationTime(), GetAccessTime(), GetPermission(), GetOwner(), GetGroup(), IsSymlink() ? new Path(GetSymlink()) : null, (GetFullPath(path)).MakeQualified(defaultUri , null), DFSUtil.LocatedBlocks2Locations(GetBlockLocations()))); }
/// <summary>The remaining space as percentage of configured capacity.</summary> public virtual float GetRemainingPercent() { return(DFSUtil.GetPercentRemaining(remaining, capacity)); }
/// <summary>List all the snapshottable directories that are owned by the current user. /// </summary> /// <param name="userName">Current user name.</param> /// <returns> /// Snapshottable directories that are owned by the current user, /// represented as an array of /// <see cref="Org.Apache.Hadoop.Hdfs.Protocol.SnapshottableDirectoryStatus"/> /// . If /// <paramref name="userName"/> /// is null, return all the snapshottable dirs. /// </returns> public virtual SnapshottableDirectoryStatus[] GetSnapshottableDirListing(string userName ) { if (snapshottables.IsEmpty()) { return(null); } IList <SnapshottableDirectoryStatus> statusList = new AList <SnapshottableDirectoryStatus >(); foreach (INodeDirectory dir in snapshottables.Values) { if (userName == null || userName.Equals(dir.GetUserName())) { SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(dir.GetModificationTime (), dir.GetAccessTime(), dir.GetFsPermission(), dir.GetUserName(), dir.GetGroupName (), dir.GetLocalNameBytes(), dir.GetId(), dir.GetChildrenNum(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId), dir.GetDirectorySnapshottableFeature().GetNumSnapshots(), dir. GetDirectorySnapshottableFeature().GetSnapshotQuota(), dir.GetParent() == null ? DFSUtil.EmptyBytes : DFSUtil.String2Bytes(dir.GetParent().GetFullPathName())); statusList.AddItem(status); } } statusList.Sort(SnapshottableDirectoryStatus.Comparator); return(Sharpen.Collections.ToArray(statusList, new SnapshottableDirectoryStatus[statusList .Count])); }
/// <returns> /// Cache remaining as a percentage of the datanode's total cache /// capacity /// </returns> public virtual float GetCacheRemainingPercent() { return(DFSUtil.GetPercentRemaining(GetCacheRemaining(), cacheCapacity)); }
private bool IsAlpha(Atom a) => DFSUtil.VertexDegree(a, Neighbors) == 3;
/// <summary>choose local node of localMachine as the target.</summary> /// <remarks> /// choose local node of localMachine as the target. /// if localMachine is not available, choose a node on the same nodegroup or /// rack instead. /// </remarks> /// <returns>the chosen node</returns> /// <exception cref="Org.Apache.Hadoop.Hdfs.Server.Blockmanagement.BlockPlacementPolicy.NotEnoughReplicasException /// "/> protected internal override DatanodeStorageInfo ChooseLocalStorage(Node localMachine , ICollection <Node> excludedNodes, long blocksize, int maxNodesPerRack, IList <DatanodeStorageInfo > results, bool avoidStaleNodes, EnumMap <StorageType, int> storageTypes, bool fallbackToLocalRack ) { // if no local machine, randomly choose one node if (localMachine == null) { return(ChooseRandom(NodeBase.Root, excludedNodes, blocksize, maxNodesPerRack, results , avoidStaleNodes, storageTypes)); } // otherwise try local machine first if (localMachine is DatanodeDescriptor) { DatanodeDescriptor localDataNode = (DatanodeDescriptor)localMachine; if (excludedNodes.AddItem(localMachine)) { // was not in the excluded list for (IEnumerator <KeyValuePair <StorageType, int> > iter = storageTypes.GetEnumerator (); iter.HasNext();) { KeyValuePair <StorageType, int> entry = iter.Next(); foreach (DatanodeStorageInfo localStorage in DFSUtil.Shuffle(localDataNode.GetStorageInfos ())) { StorageType type = entry.Key; if (AddIfIsGoodTarget(localStorage, excludedNodes, blocksize, maxNodesPerRack, false , results, avoidStaleNodes, type) >= 0) { int num = entry.Value; if (num == 1) { iter.Remove(); } else { entry.SetValue(num - 1); } return(localStorage); } } } } } // try a node on local node group DatanodeStorageInfo chosenStorage = ChooseLocalNodeGroup((NetworkTopologyWithNodeGroup )clusterMap, localMachine, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes , storageTypes); if (chosenStorage != null) { return(chosenStorage); } if (!fallbackToLocalRack) { return(null); } // try a node on local rack return(ChooseLocalRack(localMachine, excludedNodes, blocksize, maxNodesPerRack, results , avoidStaleNodes, storageTypes)); }
/// <exception cref="System.IO.IOException"/> internal static HdfsFileStatus Mkdirs(FSNamesystem fsn, string src, PermissionStatus permissions, bool createParent) { FSDirectory fsd = fsn.GetFSDirectory(); if (NameNode.stateChangeLog.IsDebugEnabled()) { NameNode.stateChangeLog.Debug("DIR* NameSystem.mkdirs: " + src); } if (!DFSUtil.IsValidName(src)) { throw new InvalidPathException(src); } FSPermissionChecker pc = fsd.GetPermissionChecker(); byte[][] pathComponents = FSDirectory.GetPathComponentsForReservedPath(src); fsd.WriteLock(); try { src = fsd.ResolvePath(pc, src, pathComponents); INodesInPath iip = fsd.GetINodesInPath4Write(src); if (fsd.IsPermissionEnabled()) { fsd.CheckTraverse(pc, iip); } INode lastINode = iip.GetLastINode(); if (lastINode != null && lastINode.IsFile()) { throw new FileAlreadyExistsException("Path is not a directory: " + src); } INodesInPath existing = lastINode != null ? iip : iip.GetExistingINodes(); if (lastINode == null) { if (fsd.IsPermissionEnabled()) { fsd.CheckAncestorAccess(pc, iip, FsAction.Write); } if (!createParent) { fsd.VerifyParentDir(iip, src); } // validate that we have enough inodes. This is, at best, a // heuristic because the mkdirs() operation might need to // create multiple inodes. fsn.CheckFsObjectLimit(); IList <string> nonExisting = iip.GetPath(existing.Length(), iip.Length() - existing .Length()); int length = nonExisting.Count; if (length > 1) { IList <string> ancestors = nonExisting.SubList(0, length - 1); // Ensure that the user can traversal the path by adding implicit // u+wx permission to all ancestor directories existing = CreateChildrenDirectories(fsd, existing, ancestors, AddImplicitUwx(permissions , permissions)); if (existing == null) { throw new IOException("Failed to create directory: " + src); } } if ((existing = CreateChildrenDirectories(fsd, existing, nonExisting.SubList(length - 1, length), permissions)) == null) { throw new IOException("Failed to create directory: " + src); } } return(fsd.GetAuditFileInfo(existing)); } finally { fsd.WriteUnlock(); } }
/// <exception cref="System.Exception"/> private static IDictionary <URI, IList <Path> > GetNameNodePaths(CommandLine line, Configuration conf) { IDictionary <URI, IList <Path> > map = Maps.NewHashMap(); string[] paths = null; if (line.HasOption("f")) { paths = ReadPathFile(line.GetOptionValue("f")); } else { if (line.HasOption("p")) { paths = line.GetOptionValues("p"); } } ICollection <URI> namenodes = DFSUtil.GetNsServiceRpcUris(conf); if (paths == null || paths.Length == 0) { foreach (URI namenode in namenodes) { map[namenode] = null; } return(map); } URI singleNs = namenodes.Count == 1 ? namenodes.GetEnumerator().Next() : null; foreach (string path in paths) { Path target = new Path(path); if (!target.IsUriPathAbsolute()) { throw new ArgumentException("The path " + target + " is not absolute"); } URI targetUri = target.ToUri(); if ((targetUri.GetAuthority() == null || targetUri.GetScheme() == null) && singleNs == null) { // each path must contains both scheme and authority information // unless there is only one name service specified in the // configuration throw new ArgumentException("The path " + target + " does not contain scheme and authority thus cannot identify" + " its name service"); } URI key = singleNs; if (singleNs == null) { key = new URI(targetUri.GetScheme(), targetUri.GetAuthority(), null, null, null); if (!namenodes.Contains(key)) { throw new ArgumentException("Cannot resolve the path " + target + ". The namenode services specified in the " + "configuration: " + namenodes); } } IList <Path> targets = map[key]; if (targets == null) { targets = Lists.NewArrayList(); map[key] = targets; } targets.AddItem(Path.GetPathWithoutSchemeAndAuthority(target)); } return(map); }
public virtual void TestHdfsFileStatus() { long now = Time.Now(); string parent = "/dir"; HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26, now, now + 10, new FsPermission((short)0x1a4), "user", "group", DFSUtil.String2Bytes("bar") , DFSUtil.String2Bytes("foo"), INodeId.GrandfatherInodeId, 0, null, unchecked ((byte )0)); FileStatus fstatus = ToFileStatus(status, parent); System.Console.Out.WriteLine("status = " + status); System.Console.Out.WriteLine("fstatus = " + fstatus); string json = JsonUtil.ToJsonString(status, true); System.Console.Out.WriteLine("json = " + json.Replace(",", ",\n ")); ObjectReader reader = new ObjectMapper().Reader(typeof(IDictionary)); HdfsFileStatus s2 = JsonUtil.ToFileStatus((IDictionary <object, object>)reader.ReadValue (json), true); FileStatus fs2 = ToFileStatus(s2, parent); System.Console.Out.WriteLine("s2 = " + s2); System.Console.Out.WriteLine("fs2 = " + fs2); NUnit.Framework.Assert.AreEqual(fstatus, fs2); }
internal Snapshot(int id, string name, INodeDirectory dir) : this(id, dir, dir) { this.root.SetLocalName(DFSUtil.String2Bytes(name)); }
/// <returns>When this directive expires, as an ISO-8601 formatted string.</returns> public string GetExpiryTimeString() { return(DFSUtil.DateToIso8601String(Sharpen.Extensions.CreateDate(expiryTime))); }
public int CompareTo(byte[] bytes) { return(DFSUtil.CompareBytes(GetLocalNameBytes(), bytes)); }
public virtual void GetBackupNameServiceId() { Configuration conf = SetupAddress(DFSConfigKeys.DfsNamenodeBackupAddressKey); NUnit.Framework.Assert.AreEqual("nn1", DFSUtil.GetBackupNameServiceId(conf)); }