internal static FileStatus ToFileStatus(HdfsFileStatus f, string parent) { return(new FileStatus(f.GetLen(), f.IsDir(), f.GetReplication(), f.GetBlockSize() , f.GetModificationTime(), f.GetAccessTime(), f.GetPermission(), f.GetOwner(), f .GetGroup(), f.IsSymlink() ? new Path(f.GetSymlink()) : null, new Path(f.GetFullName (parent)))); }
public static Nfs3FileAttributes GetNfs3FileAttrFromFileStatus(HdfsFileStatus fs, IdMappingServiceProvider iug) { NfsFileType fileType = fs.IsDir() ? NfsFileType.Nfsdir : NfsFileType.Nfsreg; fileType = fs.IsSymlink() ? NfsFileType.Nfslnk : fileType; int nlink = (fileType == NfsFileType.Nfsdir) ? fs.GetChildrenNum() + 2 : 1; long size = (fileType == NfsFileType.Nfsdir) ? GetDirSize(fs.GetChildrenNum()) : fs.GetLen(); return(new Nfs3FileAttributes(fileType, nlink, fs.GetPermission().ToShort(), iug. GetUidAllowingUnknown(fs.GetOwner()), iug.GetGidAllowingUnknown(fs.GetGroup()), size, 0, fs.GetFileId(), fs.GetModificationTime(), fs.GetAccessTime(), new Nfs3FileAttributes.Specdata3 ())); }
/// <summary>Convert a HdfsFileStatus object to a Json string.</summary> public static string ToJsonString(HdfsFileStatus status, bool includeType) { if (status == null) { return(null); } IDictionary <string, object> m = new SortedDictionary <string, object>(); m["pathSuffix"] = status.GetLocalName(); m["type"] = JsonUtil.PathType.ValueOf(status); if (status.IsSymlink()) { m["symlink"] = status.GetSymlink(); } m["length"] = status.GetLen(); m["owner"] = status.GetOwner(); m["group"] = status.GetGroup(); FsPermission perm = status.GetPermission(); m["permission"] = ToString(perm); if (perm.GetAclBit()) { m["aclBit"] = true; } if (perm.GetEncryptedBit()) { m["encBit"] = true; } m["accessTime"] = status.GetAccessTime(); m["modificationTime"] = status.GetModificationTime(); m["blockSize"] = status.GetBlockSize(); m["replication"] = status.GetReplication(); m["fileId"] = status.GetFileId(); m["childrenNum"] = status.GetChildrenNum(); m["storagePolicy"] = status.GetStoragePolicy(); ObjectMapper mapper = new ObjectMapper(); try { return(includeType ? ToJsonString(typeof(FileStatus), m) : mapper.WriteValueAsString (m)); } catch (IOException) { } return(null); }
/// <returns>whether the migration requires next round</returns> private bool ProcessRecursively(string parent, HdfsFileStatus status) { string fullPath = status.GetFullName(parent); bool hasRemaining = false; if (status.IsDir()) { if (!fullPath.EndsWith(Path.Separator)) { fullPath = fullPath + Path.Separator; } hasRemaining = this.ProcessPath(fullPath); // process snapshots if this is a snapshottable directory if (this.snapshottableDirs.Contains(fullPath)) { string dirSnapshot = fullPath + HdfsConstants.DotSnapshotDir; hasRemaining |= this.ProcessPath(dirSnapshot); } } else { if (!status.IsSymlink()) { // file try { if (!this.IsSnapshotPathInCurrent(fullPath)) { // the full path is a snapshot path but it is also included in the // current directory tree, thus ignore it. hasRemaining = this.ProcessFile(fullPath, (HdfsLocatedFileStatus)status); } } catch (IOException e) { Org.Apache.Hadoop.Hdfs.Server.Mover.Mover.Log.Warn("Failed to check the status of " + parent + ". Ignore it and continue.", e); return(false); } } } return(hasRemaining); }
/// <exception cref="System.Exception"/> private void VerifyRecursively(Path parent, HdfsFileStatus status) { if (status.IsDir()) { Path fullPath = parent == null ? new Path("/") : status.GetFullPath(parent); DirectoryListing children = this.dfs.GetClient().ListPaths(fullPath.ToString(), HdfsFileStatus .EmptyName, true); foreach (HdfsFileStatus child in children.GetPartialListing()) { this.VerifyRecursively(fullPath, child); } } else { if (!status.IsSymlink()) { // is file this.VerifyFile(parent, status, null); } } }
internal static JsonUtil.PathType ValueOf(HdfsFileStatus status) { return(status.IsDir() ? JsonUtil.PathType.Directory : status.IsSymlink() ? JsonUtil.PathType .Symlink : JsonUtil.PathType.File); }