internal static FileStatus ToFileStatus(HdfsFileStatus f, string parent) { return(new FileStatus(f.GetLen(), f.IsDir(), f.GetReplication(), f.GetBlockSize() , f.GetModificationTime(), f.GetAccessTime(), f.GetPermission(), f.GetOwner(), f .GetGroup(), f.IsSymlink() ? new Path(f.GetSymlink()) : null, new Path(f.GetFullName (parent)))); }
/// <summary>Select a datanode to service this request.</summary> /// <remarks> /// Select a datanode to service this request. /// Currently, this looks at no more than the first five blocks of a file, /// selecting a datanode randomly from the most represented. /// </remarks> /// <param name="conf"></param> /// <exception cref="System.IO.IOException"/> private DatanodeID PickSrcDatanode(LocatedBlocks blks, HdfsFileStatus i, Configuration conf) { if (i.GetLen() == 0 || blks.GetLocatedBlocks().Count <= 0) { // pick a random datanode NameNode nn = NameNodeHttpServer.GetNameNodeFromContext(GetServletContext()); return(NamenodeJspHelper.GetRandomDatanode(nn)); } return(JspHelper.BestNode(blks, conf)); }
/// <exception cref="System.IO.IOException"/> public static WccAttr GetWccAttr(DFSClient client, string fileIdPath) { HdfsFileStatus fstat = GetFileStatus(client, fileIdPath); if (fstat == null) { return(null); } long size = fstat.IsDir() ? GetDirSize(fstat.GetChildrenNum()) : fstat.GetLen(); return(new WccAttr(size, new NfsTime(fstat.GetModificationTime()), new NfsTime(fstat .GetModificationTime()))); }
public static Nfs3FileAttributes GetNfs3FileAttrFromFileStatus(HdfsFileStatus fs, IdMappingServiceProvider iug) { NfsFileType fileType = fs.IsDir() ? NfsFileType.Nfsdir : NfsFileType.Nfsreg; fileType = fs.IsSymlink() ? NfsFileType.Nfslnk : fileType; int nlink = (fileType == NfsFileType.Nfsdir) ? fs.GetChildrenNum() + 2 : 1; long size = (fileType == NfsFileType.Nfsdir) ? GetDirSize(fs.GetChildrenNum()) : fs.GetLen(); return(new Nfs3FileAttributes(fileType, nlink, fs.GetPermission().ToShort(), iug. GetUidAllowingUnknown(fs.GetOwner()), iug.GetGidAllowingUnknown(fs.GetGroup()), size, 0, fs.GetFileId(), fs.GetModificationTime(), fs.GetAccessTime(), new Nfs3FileAttributes.Specdata3 ())); }
/// <summary>Convert a HdfsFileStatus object to a Json string.</summary> public static string ToJsonString(HdfsFileStatus status, bool includeType) { if (status == null) { return(null); } IDictionary <string, object> m = new SortedDictionary <string, object>(); m["pathSuffix"] = status.GetLocalName(); m["type"] = JsonUtil.PathType.ValueOf(status); if (status.IsSymlink()) { m["symlink"] = status.GetSymlink(); } m["length"] = status.GetLen(); m["owner"] = status.GetOwner(); m["group"] = status.GetGroup(); FsPermission perm = status.GetPermission(); m["permission"] = ToString(perm); if (perm.GetAclBit()) { m["aclBit"] = true; } if (perm.GetEncryptedBit()) { m["encBit"] = true; } m["accessTime"] = status.GetAccessTime(); m["modificationTime"] = status.GetModificationTime(); m["blockSize"] = status.GetBlockSize(); m["replication"] = status.GetReplication(); m["fileId"] = status.GetFileId(); m["childrenNum"] = status.GetChildrenNum(); m["storagePolicy"] = status.GetStoragePolicy(); ObjectMapper mapper = new ObjectMapper(); try { return(includeType ? ToJsonString(typeof(FileStatus), m) : mapper.WriteValueAsString (m)); } catch (IOException) { } return(null); }
/// <summary>Write a node to output.</summary> /// <remarks> /// Write a node to output. /// Node information includes path, modification, permission, owner and group. /// For files, it also includes size, replication and block-size. /// </remarks> /// <exception cref="System.IO.IOException"/> internal static void WriteInfo(Path fullpath, HdfsFileStatus i, XMLOutputter doc) { SimpleDateFormat ldf = df.Get(); doc.StartTag(i.IsDir() ? "directory" : "file"); doc.Attribute("path", fullpath.ToUri().GetPath()); doc.Attribute("modified", ldf.Format(Sharpen.Extensions.CreateDate(i.GetModificationTime ()))); doc.Attribute("accesstime", ldf.Format(Sharpen.Extensions.CreateDate(i.GetAccessTime ()))); if (!i.IsDir()) { doc.Attribute("size", i.GetLen().ToString()); doc.Attribute("replication", i.GetReplication().ToString()); doc.Attribute("blocksize", i.GetBlockSize().ToString()); } doc.Attribute("permission", (i.IsDir() ? "d" : "-") + i.GetPermission()); doc.Attribute("owner", i.GetOwner()); doc.Attribute("group", i.GetGroup()); doc.EndTag(); }
public virtual void TestWriteStableHow() { NfsConfiguration config = new NfsConfiguration(); DFSClient client = null; MiniDFSCluster cluster = null; RpcProgramNfs3 nfsd; SecurityHandler securityHandler = Org.Mockito.Mockito.Mock <SecurityHandler>(); Org.Mockito.Mockito.When(securityHandler.GetUser()).ThenReturn(Runtime.GetProperty ("user.name")); string currentUser = Runtime.GetProperty("user.name"); config.Set(DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserGroupConfKey (currentUser), "*"); config.Set(DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserIpConfKey (currentUser), "*"); ProxyUsers.RefreshSuperUserGroupsConfiguration(config); try { cluster = new MiniDFSCluster.Builder(config).NumDataNodes(1).Build(); cluster.WaitActive(); client = new DFSClient(NameNode.GetAddress(config), config); // Use emphral port in case tests are running in parallel config.SetInt("nfs3.mountd.port", 0); config.SetInt("nfs3.server.port", 0); // Start nfs Org.Apache.Hadoop.Hdfs.Nfs.Nfs3.Nfs3 nfs3 = new Org.Apache.Hadoop.Hdfs.Nfs.Nfs3.Nfs3 (config); nfs3.StartServiceInternal(false); nfsd = (RpcProgramNfs3)nfs3.GetRpcProgram(); HdfsFileStatus status = client.GetFileInfo("/"); FileHandle rootHandle = new FileHandle(status.GetFileId()); // Create file1 CREATE3Request createReq = new CREATE3Request(rootHandle, "file1", Nfs3Constant.CreateUnchecked , new SetAttr3(), 0); XDR createXdr = new XDR(); createReq.Serialize(createXdr); CREATE3Response createRsp = nfsd.Create(createXdr.AsReadOnlyWrap(), securityHandler , new IPEndPoint("localhost", 1234)); FileHandle handle = createRsp.GetObjHandle(); // Test DATA_SYNC byte[] buffer = new byte[10]; for (int i = 0; i < 10; i++) { buffer[i] = unchecked ((byte)i); } WRITE3Request writeReq = new WRITE3Request(handle, 0, 10, Nfs3Constant.WriteStableHow .DataSync, ByteBuffer.Wrap(buffer)); XDR writeXdr = new XDR(); writeReq.Serialize(writeXdr); nfsd.Write(writeXdr.AsReadOnlyWrap(), null, 1, securityHandler, new IPEndPoint("localhost" , 1234)); WaitWrite(nfsd, handle, 60000); // Readback READ3Request readReq = new READ3Request(handle, 0, 10); XDR readXdr = new XDR(); readReq.Serialize(readXdr); READ3Response readRsp = nfsd.Read(readXdr.AsReadOnlyWrap(), securityHandler, new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.IsTrue(Arrays.Equals(buffer, ((byte[])readRsp.GetData().Array ()))); // Test FILE_SYNC // Create file2 CREATE3Request createReq2 = new CREATE3Request(rootHandle, "file2", Nfs3Constant. CreateUnchecked, new SetAttr3(), 0); XDR createXdr2 = new XDR(); createReq2.Serialize(createXdr2); CREATE3Response createRsp2 = nfsd.Create(createXdr2.AsReadOnlyWrap(), securityHandler , new IPEndPoint("localhost", 1234)); FileHandle handle2 = createRsp2.GetObjHandle(); WRITE3Request writeReq2 = new WRITE3Request(handle2, 0, 10, Nfs3Constant.WriteStableHow .FileSync, ByteBuffer.Wrap(buffer)); XDR writeXdr2 = new XDR(); writeReq2.Serialize(writeXdr2); nfsd.Write(writeXdr2.AsReadOnlyWrap(), null, 1, securityHandler, new IPEndPoint("localhost" , 1234)); WaitWrite(nfsd, handle2, 60000); // Readback READ3Request readReq2 = new READ3Request(handle2, 0, 10); XDR readXdr2 = new XDR(); readReq2.Serialize(readXdr2); READ3Response readRsp2 = nfsd.Read(readXdr2.AsReadOnlyWrap(), securityHandler, new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.IsTrue(Arrays.Equals(buffer, ((byte[])readRsp2.GetData().Array ()))); // FILE_SYNC should sync the file size status = client.GetFileInfo("/file2"); NUnit.Framework.Assert.IsTrue(status.GetLen() == 10); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestConcatNotCompleteBlock() { long trgFileLen = blockSize * 3; long srcFileLen = blockSize * 3 + 20; // block at the end - not full // create first file string name1 = "/trg"; string name2 = "/src"; Path filePath1 = new Path(name1); DFSTestUtil.CreateFile(dfs, filePath1, trgFileLen, ReplFactor, 1); HdfsFileStatus fStatus = nn.GetFileInfo(name1); long fileLen = fStatus.GetLen(); NUnit.Framework.Assert.AreEqual(fileLen, trgFileLen); //read the file FSDataInputStream stm = dfs.Open(filePath1); byte[] byteFile1 = new byte[(int)trgFileLen]; stm.ReadFully(0, byteFile1); stm.Close(); LocatedBlocks lb1 = nn.GetBlockLocations(name1, 0, trgFileLen); Path filePath2 = new Path(name2); DFSTestUtil.CreateFile(dfs, filePath2, srcFileLen, ReplFactor, 1); fStatus = nn.GetFileInfo(name2); fileLen = fStatus.GetLen(); NUnit.Framework.Assert.AreEqual(srcFileLen, fileLen); // read the file stm = dfs.Open(filePath2); byte[] byteFile2 = new byte[(int)srcFileLen]; stm.ReadFully(0, byteFile2); stm.Close(); LocatedBlocks lb2 = nn.GetBlockLocations(name2, 0, srcFileLen); System.Console.Out.WriteLine("trg len=" + trgFileLen + "; src len=" + srcFileLen); // move the blocks dfs.Concat(filePath1, new Path[] { filePath2 }); long totalLen = trgFileLen + srcFileLen; fStatus = nn.GetFileInfo(name1); fileLen = fStatus.GetLen(); // read the resulting file stm = dfs.Open(filePath1); byte[] byteFileConcat = new byte[(int)fileLen]; stm.ReadFully(0, byteFileConcat); stm.Close(); LocatedBlocks lbConcat = nn.GetBlockLocations(name1, 0, fileLen); //verifications // 1. number of blocks NUnit.Framework.Assert.AreEqual(lbConcat.LocatedBlockCount(), lb1.LocatedBlockCount () + lb2.LocatedBlockCount()); // 2. file lengths System.Console.Out.WriteLine("file1 len=" + fileLen + "; total len=" + totalLen); NUnit.Framework.Assert.AreEqual(fileLen, totalLen); // 3. removal of the src file fStatus = nn.GetFileInfo(name2); NUnit.Framework.Assert.IsNull("File " + name2 + "still exists", fStatus); // file shouldn't exist // 4. content CheckFileContent(byteFileConcat, new byte[][] { byteFile1, byteFile2 }); }