/// <summary>Save one inode's attributes to the image.</summary> /// <exception cref="System.IO.IOException"/> public static void SaveINode2Image(INode node, DataOutput @out, bool writeUnderConstruction , SnapshotFSImageFormat.ReferenceMap referenceMap) { if (node.IsReference()) { WriteINodeReference(node.AsReference(), @out, writeUnderConstruction, referenceMap ); } else { if (node.IsDirectory()) { WriteINodeDirectory(node.AsDirectory(), @out); } else { if (node.IsSymlink()) { WriteINodeSymlink(node.AsSymlink(), @out); } else { if (node.IsFile()) { WriteINodeFile(node.AsFile(), @out, writeUnderConstruction); } } } } }
/// <summary>Get the file info for a specific file.</summary> /// <param name="fsd">FSDirectory</param> /// <param name="src">The string representation of the path to the file</param> /// <param name="isRawPath">true if a /.reserved/raw pathname was passed by the user</param> /// <param name="includeStoragePolicy">whether to include storage policy</param> /// <returns> /// object containing information regarding the file /// or null if file not found /// </returns> /// <exception cref="System.IO.IOException"/> internal static HdfsFileStatus GetFileInfo(FSDirectory fsd, string path, INodesInPath src, bool isRawPath, bool includeStoragePolicy) { fsd.ReadLock(); try { INode i = src.GetLastINode(); byte policyId = includeStoragePolicy && i != null && !i.IsSymlink() ? i.GetStoragePolicyID () : BlockStoragePolicySuite.IdUnspecified; return(i == null ? null : CreateFileStatus(fsd, path, HdfsFileStatus.EmptyName, i , policyId, src.GetPathSnapshotId(), isRawPath, src)); } finally { fsd.ReadUnlock(); } }
/// <exception cref="System.IO.IOException"/> private static void ValidateDestination(string src, string dst, INode srcInode) { string error; if (srcInode.IsSymlink() && dst.Equals(srcInode.AsSymlink().GetSymlinkString())) { throw new FileAlreadyExistsException("Cannot rename symlink " + src + " to its target " + dst); } // dst cannot be a directory or a file under src if (dst.StartsWith(src) && dst[src.Length] == Path.SeparatorChar) { error = "Rename destination " + dst + " is a directory or file under source " + src; NameNode.stateChangeLog.Warn("DIR* FSDirectory.unprotectedRenameTo: " + error); throw new IOException(error); } }
/// <exception cref="System.IO.IOException"/> private void Save(OutputStream @out, INode n) { if (n.IsDirectory()) { Save(@out, n.AsDirectory()); } else { if (n.IsFile()) { Save(@out, n.AsFile()); } else { if (n.IsSymlink()) { Save(@out, n.AsSymlink()); } } } }
/// <summary>Retrieve existing INodes from a path.</summary> /// <remarks> /// Retrieve existing INodes from a path. For non-snapshot path, /// the number of INodes is equal to the number of path components. For /// snapshot path (e.g., /foo/.snapshot/s1/bar), the number of INodes is /// (number_of_path_components - 1). /// An UnresolvedPathException is always thrown when an intermediate path /// component refers to a symbolic link. If the final path component refers /// to a symbolic link then an UnresolvedPathException is only thrown if /// resolveLink is true. /// <p> /// Example: <br /> /// Given the path /c1/c2/c3 where only /c1/c2 exists, resulting in the /// following path components: ["","c1","c2","c3"] /// <p> /// <code>getExistingPathINodes(["","c1","c2"])</code> should fill /// the array with [rootINode,c1,c2], <br /> /// <code>getExistingPathINodes(["","c1","c2","c3"])</code> should /// fill the array with [rootINode,c1,c2,null] /// </remarks> /// <param name="startingDir">the starting directory</param> /// <param name="components">array of path component name</param> /// <param name="resolveLink"> /// indicates whether UnresolvedLinkException should /// be thrown when the path refers to a symbolic link. /// </param> /// <returns>the specified number of existing INodes in the path</returns> /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> internal static Org.Apache.Hadoop.Hdfs.Server.Namenode.INodesInPath Resolve(INodeDirectory startingDir, byte[][] components, bool resolveLink) { Preconditions.CheckArgument(startingDir.CompareTo(components[0]) == 0); INode curNode = startingDir; int count = 0; int inodeNum = 0; INode[] inodes = new INode[components.Length]; bool isSnapshot = false; int snapshotId = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId; while (count < components.Length && curNode != null) { bool lastComp = (count == components.Length - 1); inodes[inodeNum++] = curNode; bool isRef = curNode.IsReference(); bool isDir = curNode.IsDirectory(); INodeDirectory dir = isDir ? curNode.AsDirectory() : null; if (!isRef && isDir && dir.IsWithSnapshot()) { //if the path is a non-snapshot path, update the latest snapshot. if (!isSnapshot && ShouldUpdateLatestId(dir.GetDirectoryWithSnapshotFeature().GetLastSnapshotId (), snapshotId)) { snapshotId = dir.GetDirectoryWithSnapshotFeature().GetLastSnapshotId(); } } else { if (isRef && isDir && !lastComp) { // If the curNode is a reference node, need to check its dstSnapshot: // 1. if the existing snapshot is no later than the dstSnapshot (which // is the latest snapshot in dst before the rename), the changes // should be recorded in previous snapshots (belonging to src). // 2. however, if the ref node is already the last component, we still // need to know the latest snapshot among the ref node's ancestors, // in case of processing a deletion operation. Thus we do not overwrite // the latest snapshot if lastComp is true. In case of the operation is // a modification operation, we do a similar check in corresponding // recordModification method. if (!isSnapshot) { int dstSnapshotId = curNode.AsReference().GetDstSnapshotId(); if (snapshotId == Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId || (dstSnapshotId != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId && dstSnapshotId >= snapshotId)) { // no snapshot in dst tree of rename // the above scenario int lastSnapshot = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId; DirectoryWithSnapshotFeature sf; if (curNode.IsDirectory() && (sf = curNode.AsDirectory().GetDirectoryWithSnapshotFeature ()) != null) { lastSnapshot = sf.GetLastSnapshotId(); } snapshotId = lastSnapshot; } } } } if (curNode.IsSymlink() && (!lastComp || resolveLink)) { string path = ConstructPath(components, 0, components.Length); string preceding = ConstructPath(components, 0, count); string remainder = ConstructPath(components, count + 1, components.Length); string link = DFSUtil.Bytes2String(components[count]); string target = curNode.AsSymlink().GetSymlinkString(); if (Log.IsDebugEnabled()) { Log.Debug("UnresolvedPathException " + " path: " + path + " preceding: " + preceding + " count: " + count + " link: " + link + " target: " + target + " remainder: " + remainder); } throw new UnresolvedPathException(path, preceding, remainder, target); } if (lastComp || !isDir) { break; } byte[] childName = components[count + 1]; // check if the next byte[] in components is for ".snapshot" if (IsDotSnapshotDir(childName) && dir.IsSnapshottable()) { // skip the ".snapshot" in components count++; isSnapshot = true; // check if ".snapshot" is the last element of components if (count == components.Length - 1) { break; } // Resolve snapshot root Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s = dir.GetSnapshot(components [count + 1]); if (s == null) { curNode = null; } else { // snapshot not found curNode = s.GetRoot(); snapshotId = s.GetId(); } } else { // normal case, and also for resolving file/dir under snapshot root curNode = dir.GetChild(childName, isSnapshot ? snapshotId : Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId); } count++; } if (isSnapshot && !IsDotSnapshotDir(components[components.Length - 1])) { // for snapshot path shrink the inode array. however, for path ending with // .snapshot, still keep last the null inode in the array INode[] newNodes = new INode[components.Length - 1]; System.Array.Copy(inodes, 0, newNodes, 0, newNodes.Length); inodes = newNodes; } return(new Org.Apache.Hadoop.Hdfs.Server.Namenode.INodesInPath(inodes, components , isSnapshot, snapshotId)); }
/// <summary>Create FileStatus with location info by file INode</summary> /// <exception cref="System.IO.IOException"/> private static HdfsLocatedFileStatus CreateLocatedFileStatus(FSDirectory fsd, string fullPath, byte[] path, INode node, byte storagePolicy, int snapshot, bool isRawPath , INodesInPath iip) { System.Diagnostics.Debug.Assert(fsd.HasReadLock()); long size = 0; // length is zero for directories short replication = 0; long blocksize = 0; LocatedBlocks loc = null; bool isEncrypted; FileEncryptionInfo feInfo = isRawPath ? null : fsd.GetFileEncryptionInfo(node, snapshot , iip); if (node.IsFile()) { INodeFile fileNode = node.AsFile(); size = fileNode.ComputeFileSize(snapshot); replication = fileNode.GetFileReplication(snapshot); blocksize = fileNode.GetPreferredBlockSize(); bool inSnapshot = snapshot != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId; bool isUc = !inSnapshot && fileNode.IsUnderConstruction(); long fileSize = !inSnapshot && isUc?fileNode.ComputeFileSizeNotIncludingLastUcBlock () : size; loc = fsd.GetFSNamesystem().GetBlockManager().CreateLocatedBlocks(fileNode.GetBlocks (snapshot), fileSize, isUc, 0L, size, false, inSnapshot, feInfo); if (loc == null) { loc = new LocatedBlocks(); } isEncrypted = (feInfo != null) || (isRawPath && fsd.IsInAnEZ(INodesInPath.FromINode (node))); } else { isEncrypted = fsd.IsInAnEZ(INodesInPath.FromINode(node)); } int childrenNum = node.IsDirectory() ? node.AsDirectory().GetChildrenNum(snapshot ) : 0; INodeAttributes nodeAttrs = fsd.GetAttributes(fullPath, path, node, snapshot); HdfsLocatedFileStatus status = new HdfsLocatedFileStatus(size, node.IsDirectory() , replication, blocksize, node.GetModificationTime(snapshot), node.GetAccessTime (snapshot), GetPermissionForFileStatus(nodeAttrs, isEncrypted), nodeAttrs.GetUserName (), nodeAttrs.GetGroupName(), node.IsSymlink() ? node.AsSymlink().GetSymlink() : null, path, node.GetId(), loc, childrenNum, feInfo, storagePolicy); // Set caching information for the located blocks. if (loc != null) { CacheManager cacheManager = fsd.GetFSNamesystem().GetCacheManager(); foreach (LocatedBlock lb in loc.GetLocatedBlocks()) { cacheManager.SetCachedLocations(lb); } } return(status); }
/// <summary>Create FileStatus by file INode</summary> /// <exception cref="System.IO.IOException"/> internal static HdfsFileStatus CreateFileStatus(FSDirectory fsd, string fullPath, byte[] path, INode node, byte storagePolicy, int snapshot, bool isRawPath, INodesInPath iip) { long size = 0; // length is zero for directories short replication = 0; long blocksize = 0; bool isEncrypted; FileEncryptionInfo feInfo = isRawPath ? null : fsd.GetFileEncryptionInfo(node, snapshot , iip); if (node.IsFile()) { INodeFile fileNode = node.AsFile(); size = fileNode.ComputeFileSize(snapshot); replication = fileNode.GetFileReplication(snapshot); blocksize = fileNode.GetPreferredBlockSize(); isEncrypted = (feInfo != null) || (isRawPath && fsd.IsInAnEZ(INodesInPath.FromINode (node))); } else { isEncrypted = fsd.IsInAnEZ(INodesInPath.FromINode(node)); } int childrenNum = node.IsDirectory() ? node.AsDirectory().GetChildrenNum(snapshot ) : 0; INodeAttributes nodeAttrs = fsd.GetAttributes(fullPath, path, node, snapshot); return(new HdfsFileStatus(size, node.IsDirectory(), replication, blocksize, node. GetModificationTime(snapshot), node.GetAccessTime(snapshot), GetPermissionForFileStatus (nodeAttrs, isEncrypted), nodeAttrs.GetUserName(), nodeAttrs.GetGroupName(), node .IsSymlink() ? node.AsSymlink().GetSymlink() : null, path, node.GetId(), childrenNum , feInfo, storagePolicy)); }
/// <summary> /// Get a partial listing of the indicated directory /// We will stop when any of the following conditions is met: /// 1) this.lsLimit files have been added /// 2) needLocation is true AND enough files have been added such /// that at least this.lsLimit block locations are in the response /// </summary> /// <param name="fsd">FSDirectory</param> /// <param name="iip"> /// the INodesInPath instance containing all the INodes along the /// path /// </param> /// <param name="src">the directory name</param> /// <param name="startAfter">the name to start listing after</param> /// <param name="needLocation">if block locations are returned</param> /// <returns>a partial listing starting after startAfter</returns> /// <exception cref="System.IO.IOException"/> private static DirectoryListing GetListing(FSDirectory fsd, INodesInPath iip, string src, byte[] startAfter, bool needLocation, bool isSuperUser) { string srcs = FSDirectory.NormalizePath(src); bool isRawPath = FSDirectory.IsReservedRawName(src); fsd.ReadLock(); try { if (srcs.EndsWith(HdfsConstants.SeparatorDotSnapshotDir)) { return(GetSnapshotsListing(fsd, srcs, startAfter)); } int snapshot = iip.GetPathSnapshotId(); INode targetNode = iip.GetLastINode(); if (targetNode == null) { return(null); } byte parentStoragePolicy = isSuperUser ? targetNode.GetStoragePolicyID() : BlockStoragePolicySuite .IdUnspecified; if (!targetNode.IsDirectory()) { return(new DirectoryListing(new HdfsFileStatus[] { CreateFileStatus(fsd, src, HdfsFileStatus .EmptyName, targetNode, needLocation, parentStoragePolicy, snapshot, isRawPath, iip) }, 0)); } INodeDirectory dirInode = targetNode.AsDirectory(); ReadOnlyList <INode> contents = dirInode.GetChildrenList(snapshot); int startChild = INodeDirectory.NextChild(contents, startAfter); int totalNumChildren = contents.Size(); int numOfListing = Math.Min(totalNumChildren - startChild, fsd.GetLsLimit()); int locationBudget = fsd.GetLsLimit(); int listingCnt = 0; HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing]; for (int i = 0; i < numOfListing && locationBudget > 0; i++) { INode cur = contents.Get(startChild + i); byte curPolicy = isSuperUser && !cur.IsSymlink() ? cur.GetLocalStoragePolicyID() : BlockStoragePolicySuite.IdUnspecified; listing[i] = CreateFileStatus(fsd, src, cur.GetLocalNameBytes(), cur, needLocation , GetStoragePolicyID(curPolicy, parentStoragePolicy), snapshot, isRawPath, iip); listingCnt++; if (needLocation) { // Once we hit lsLimit locations, stop. // This helps to prevent excessively large response payloads. // Approximate #locations with locatedBlockCount() * repl_factor LocatedBlocks blks = ((HdfsLocatedFileStatus)listing[i]).GetBlockLocations(); locationBudget -= (blks == null) ? 0 : blks.LocatedBlockCount() * listing[i].GetReplication (); } } // truncate return array if necessary if (listingCnt < numOfListing) { listing = Arrays.CopyOf(listing, listingCnt); } return(new DirectoryListing(listing, totalNumChildren - startChild - listingCnt)); } finally { fsd.ReadUnlock(); } }