public virtual void TestReaddirPlus() { // Test readdirplus // Get inodeId of /tmp HdfsFileStatus status = nn.GetRpcServer().GetFileInfo(testdir); long dirId = status.GetFileId(); // Create related part of the XDR request XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); handle.Serialize(xdr_req); xdr_req.WriteLongAsHyper(0); // cookie xdr_req.WriteLongAsHyper(0); // verifier xdr_req.WriteInt(100); // dirCount xdr_req.WriteInt(1000); // maxCount READDIRPLUS3Response responsePlus = nfsd.Readdirplus(xdr_req.AsReadOnlyWrap(), securityHandler , new IPEndPoint("localhost", 1234)); IList <READDIRPLUS3Response.EntryPlus3> direntPlus = responsePlus.GetDirListPlus() .GetEntries(); NUnit.Framework.Assert.IsTrue(direntPlus.Count == 5); // including dot, dotdot // Test start listing from f2 status = nn.GetRpcServer().GetFileInfo(testdir + "/f2"); long f2Id = status.GetFileId(); // Create related part of the XDR request xdr_req = new XDR(); handle = new FileHandle(dirId); handle.Serialize(xdr_req); xdr_req.WriteLongAsHyper(f2Id); // cookie xdr_req.WriteLongAsHyper(0); // verifier xdr_req.WriteInt(100); // dirCount xdr_req.WriteInt(1000); // maxCount responsePlus = nfsd.Readdirplus(xdr_req.AsReadOnlyWrap(), securityHandler, new IPEndPoint ("localhost", 1234)); direntPlus = responsePlus.GetDirListPlus().GetEntries(); NUnit.Framework.Assert.IsTrue(direntPlus.Count == 1); READDIRPLUS3Response.EntryPlus3 entryPlus = direntPlus[0]; NUnit.Framework.Assert.IsTrue(entryPlus.GetName().Equals("f3")); // When the cookie is deleted, list starts over no including dot, dotdot hdfs.Delete(new Path(testdir + "/f2"), false); responsePlus = nfsd.Readdirplus(xdr_req.AsReadOnlyWrap(), securityHandler, new IPEndPoint ("localhost", 1234)); direntPlus = responsePlus.GetDirListPlus().GetEntries(); NUnit.Framework.Assert.IsTrue(direntPlus.Count == 2); }
/// <exception cref="System.Exception"/> public virtual void TestWrite() { HdfsFileStatus status = nn.GetRpcServer().GetFileInfo("/tmp/bar"); long dirId = status.GetFileId(); FileHandle handle = new FileHandle(dirId); byte[] buffer = new byte[10]; for (int i = 0; i < 10; i++) { buffer[i] = unchecked ((byte)i); } WRITE3Request writeReq = new WRITE3Request(handle, 0, 10, Nfs3Constant.WriteStableHow .DataSync, ByteBuffer.Wrap(buffer)); XDR xdr_req = new XDR(); writeReq.Serialize(xdr_req); // Attempt by an unpriviledged user should fail. WRITE3Response response1 = nfsd.Write(xdr_req.AsReadOnlyWrap(), null, 1, securityHandlerUnpriviledged , new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.AreEqual("Incorrect return code:", Nfs3Status.Nfs3errAcces , response1.GetStatus()); // Attempt by a priviledged user should pass. WRITE3Response response2 = nfsd.Write(xdr_req.AsReadOnlyWrap(), null, 1, securityHandler , new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.AreEqual("Incorrect response:", null, response2); }
/// <exception cref="System.Exception"/> public virtual void TestMkdir() { //FixME HdfsFileStatus status = nn.GetRpcServer().GetFileInfo(testdir); long dirId = status.GetFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); MKDIR3Request req = new MKDIR3Request(handle, "fubar1", new SetAttr3()); req.Serialize(xdr_req); // Attempt to mkdir by an unprivileged user should fail. MKDIR3Response response1 = nfsd.Mkdir(xdr_req.AsReadOnlyWrap(), securityHandlerUnpriviledged , new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.AreEqual("Incorrect return code:", Nfs3Status.Nfs3errAcces , response1.GetStatus()); XDR xdr_req2 = new XDR(); MKDIR3Request req2 = new MKDIR3Request(handle, "fubar2", new SetAttr3()); req2.Serialize(xdr_req2); // Attempt to mkdir by a privileged user should pass. MKDIR3Response response2 = nfsd.Mkdir(xdr_req2.AsReadOnlyWrap(), securityHandler, new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.AreEqual("Incorrect return code:", Nfs3Status.Nfs3Ok, response2 .GetStatus()); }
/// <exception cref="System.Exception"/> public virtual void TestReadlink() { // Create a symlink first. HdfsFileStatus status = nn.GetRpcServer().GetFileInfo(testdir); long dirId = status.GetFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(), "bar"); req.Serialize(xdr_req); SYMLINK3Response response = nfsd.Symlink(xdr_req.AsReadOnlyWrap(), securityHandler , new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.AreEqual("Incorrect return code:", Nfs3Status.Nfs3Ok, response .GetStatus()); // Now perform readlink operations. FileHandle handle2 = response.GetObjFileHandle(); XDR xdr_req2 = new XDR(); READLINK3Request req2 = new READLINK3Request(handle2); req2.Serialize(xdr_req2); // Attempt by an unpriviledged user should fail. READLINK3Response response1 = nfsd.Readlink(xdr_req2.AsReadOnlyWrap(), securityHandlerUnpriviledged , new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.AreEqual("Incorrect return code:", Nfs3Status.Nfs3errAcces , response1.GetStatus()); // Attempt by a priviledged user should pass. READLINK3Response response2 = nfsd.Readlink(xdr_req2.AsReadOnlyWrap(), securityHandler , new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.AreEqual("Incorrect return code:", Nfs3Status.Nfs3Ok, response2 .GetStatus()); }
/// <exception cref="System.Exception"/> public virtual void TestClientAccessPrivilegeForRemove() { // Configure ro access for nfs1 service config.Set("dfs.nfs.exports.allowed.hosts", "* ro"); // Start nfs Org.Apache.Hadoop.Hdfs.Nfs.Nfs3.Nfs3 nfs = new Org.Apache.Hadoop.Hdfs.Nfs.Nfs3.Nfs3 (config); nfs.StartServiceInternal(false); RpcProgramNfs3 nfsd = (RpcProgramNfs3)nfs.GetRpcProgram(); // Create a remove request HdfsFileStatus status = nn.GetRpcServer().GetFileInfo(testdir); long dirId = status.GetFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); handle.Serialize(xdr_req); xdr_req.WriteString("f1"); // Remove operation REMOVE3Response response = nfsd.Remove(xdr_req.AsReadOnlyWrap(), securityHandler, new IPEndPoint("localhost", 1234)); // Assert on return code NUnit.Framework.Assert.AreEqual("Incorrect return code", Nfs3Status.Nfs3errAcces, response.GetStatus()); }
/// <exception cref="System.IO.IOException"/> private void TestPlacement(string clientMachine, string clientRack) { // write 5 files and check whether all times block placed for (int i = 0; i < 5; i++) { string src = "/test-" + i; // Create the file with client machine HdfsFileStatus fileStatus = namesystem.StartFile(src, perm, clientMachine, clientMachine , EnumSet.Of(CreateFlag.Create), true, ReplicationFactor, DefaultBlockSize, null , false); LocatedBlock locatedBlock = nameNodeRpc.AddBlock(src, clientMachine, null, null, fileStatus.GetFileId(), null); NUnit.Framework.Assert.AreEqual("Block should be allocated sufficient locations", ReplicationFactor, locatedBlock.GetLocations().Length); if (clientRack != null) { NUnit.Framework.Assert.AreEqual("First datanode should be rack local", clientRack , locatedBlock.GetLocations()[0].GetNetworkLocation()); } nameNodeRpc.AbandonBlock(locatedBlock.GetBlock(), fileStatus.GetFileId(), src, clientMachine ); } }
public override XDR Mnt(XDR xdr, XDR @out, int xid, IPAddress client) { if (hostsMatcher == null) { return(MountResponse.WriteMNTResponse(Nfs3Status.Nfs3errAcces, @out, xid, null)); } AccessPrivilege accessPrivilege = hostsMatcher.GetAccessPrivilege(client); if (accessPrivilege == AccessPrivilege.None) { return(MountResponse.WriteMNTResponse(Nfs3Status.Nfs3errAcces, @out, xid, null)); } string path = xdr.ReadString(); if (Log.IsDebugEnabled()) { Log.Debug("MOUNT MNT path: " + path + " client: " + client); } string host = client.GetHostName(); if (Log.IsDebugEnabled()) { Log.Debug("Got host: " + host + " path: " + path); } if (!exports.Contains(path)) { Log.Info("Path " + path + " is not shared."); MountResponse.WriteMNTResponse(Nfs3Status.Nfs3errNoent, @out, xid, null); return(@out); } FileHandle handle = null; try { HdfsFileStatus exFileStatus = dfsClient.GetFileInfo(path); handle = new FileHandle(exFileStatus.GetFileId()); } catch (IOException e) { Log.Error("Can't get handle for export:" + path, e); MountResponse.WriteMNTResponse(Nfs3Status.Nfs3errNoent, @out, xid, null); return(@out); } System.Diagnostics.Debug.Assert((handle != null)); Log.Info("Giving handle (fileId:" + handle.GetFileId() + ") to client for export " + path); mounts.AddItem(new MountEntry(host, path)); MountResponse.WriteMNTResponse(Nfs3Status.Nfs3Ok, @out, xid, handle.GetContent()); return(@out); }
public static Nfs3FileAttributes GetNfs3FileAttrFromFileStatus(HdfsFileStatus fs, IdMappingServiceProvider iug) { NfsFileType fileType = fs.IsDir() ? NfsFileType.Nfsdir : NfsFileType.Nfsreg; fileType = fs.IsSymlink() ? NfsFileType.Nfslnk : fileType; int nlink = (fileType == NfsFileType.Nfsdir) ? fs.GetChildrenNum() + 2 : 1; long size = (fileType == NfsFileType.Nfsdir) ? GetDirSize(fs.GetChildrenNum()) : fs.GetLen(); return(new Nfs3FileAttributes(fileType, nlink, fs.GetPermission().ToShort(), iug. GetUidAllowingUnknown(fs.GetOwner()), iug.GetGidAllowingUnknown(fs.GetGroup()), size, 0, fs.GetFileId(), fs.GetModificationTime(), fs.GetAccessTime(), new Nfs3FileAttributes.Specdata3 ())); }
/// <exception cref="System.Exception"/> private void CreateFileUsingNfs(string fileName, byte[] buffer) { DFSTestUtil.CreateFile(hdfs, new Path(fileName), 0, (short)1, 0); HdfsFileStatus status = nn.GetRpcServer().GetFileInfo(fileName); long dirId = status.GetFileId(); FileHandle handle = new FileHandle(dirId); WRITE3Request writeReq = new WRITE3Request(handle, 0, buffer.Length, Nfs3Constant.WriteStableHow .DataSync, ByteBuffer.Wrap(buffer)); XDR xdr_req = new XDR(); writeReq.Serialize(xdr_req); WRITE3Response response = nfsd.Write(xdr_req.AsReadOnlyWrap(), null, 1, securityHandler , new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.AreEqual("Incorrect response: ", null, response); }
/// <exception cref="System.Exception"/> private void Commit(string fileName, int len) { HdfsFileStatus status = nn.GetRpcServer().GetFileInfo(fileName); long dirId = status.GetFileId(); FileHandle handle = new FileHandle(dirId); XDR xdr_req = new XDR(); COMMIT3Request req = new COMMIT3Request(handle, 0, len); req.Serialize(xdr_req); Org.Jboss.Netty.Channel.Channel ch = Org.Mockito.Mockito.Mock <Org.Jboss.Netty.Channel.Channel >(); COMMIT3Response response2 = nfsd.Commit(xdr_req.AsReadOnlyWrap(), ch, 1, securityHandler , new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.AreEqual("Incorrect COMMIT3Response:", null, response2); }
/// <summary>Convert a HdfsFileStatus object to a Json string.</summary> public static string ToJsonString(HdfsFileStatus status, bool includeType) { if (status == null) { return(null); } IDictionary <string, object> m = new SortedDictionary <string, object>(); m["pathSuffix"] = status.GetLocalName(); m["type"] = JsonUtil.PathType.ValueOf(status); if (status.IsSymlink()) { m["symlink"] = status.GetSymlink(); } m["length"] = status.GetLen(); m["owner"] = status.GetOwner(); m["group"] = status.GetGroup(); FsPermission perm = status.GetPermission(); m["permission"] = ToString(perm); if (perm.GetAclBit()) { m["aclBit"] = true; } if (perm.GetEncryptedBit()) { m["encBit"] = true; } m["accessTime"] = status.GetAccessTime(); m["modificationTime"] = status.GetModificationTime(); m["blockSize"] = status.GetBlockSize(); m["replication"] = status.GetReplication(); m["fileId"] = status.GetFileId(); m["childrenNum"] = status.GetChildrenNum(); m["storagePolicy"] = status.GetStoragePolicy(); ObjectMapper mapper = new ObjectMapper(); try { return(includeType ? ToJsonString(typeof(FileStatus), m) : mapper.WriteValueAsString (m)); } catch (IOException) { } return(null); }
/// <exception cref="System.Exception"/> private byte[] GetFileContentsUsingNfs(string fileName, int len) { HdfsFileStatus status = nn.GetRpcServer().GetFileInfo(fileName); long dirId = status.GetFileId(); FileHandle handle = new FileHandle(dirId); READ3Request readReq = new READ3Request(handle, 0, len); XDR xdr_req = new XDR(); readReq.Serialize(xdr_req); READ3Response response = nfsd.Read(xdr_req.AsReadOnlyWrap(), securityHandler, new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.AreEqual("Incorrect return code: ", Nfs3Status.Nfs3Ok, response .GetStatus()); NUnit.Framework.Assert.IsTrue("expected full read", response.IsEof()); return((byte[])response.GetData().Array()); }
/// <exception cref="System.Exception"/> public virtual void TestPathconf() { HdfsFileStatus status = nn.GetRpcServer().GetFileInfo("/tmp/bar"); long dirId = status.GetFileId(); FileHandle handle = new FileHandle(dirId); XDR xdr_req = new XDR(); PATHCONF3Request req = new PATHCONF3Request(handle); req.Serialize(xdr_req); // Attempt by an unpriviledged user should fail. PATHCONF3Response response1 = nfsd.Pathconf(xdr_req.AsReadOnlyWrap(), securityHandlerUnpriviledged , new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.AreEqual("Incorrect return code:", Nfs3Status.Nfs3errAcces , response1.GetStatus()); // Attempt by a priviledged user should pass. PATHCONF3Response response2 = nfsd.Pathconf(xdr_req.AsReadOnlyWrap(), securityHandler , new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.AreEqual("Incorrect return code:", Nfs3Status.Nfs3Ok, response2 .GetStatus()); }
/// <exception cref="System.Exception"/> public virtual void TestCommit() { HdfsFileStatus status = nn.GetRpcServer().GetFileInfo("/tmp/bar"); long dirId = status.GetFileId(); FileHandle handle = new FileHandle(dirId); XDR xdr_req = new XDR(); COMMIT3Request req = new COMMIT3Request(handle, 0, 5); req.Serialize(xdr_req); Org.Jboss.Netty.Channel.Channel ch = Org.Mockito.Mockito.Mock <Org.Jboss.Netty.Channel.Channel >(); // Attempt by an unpriviledged user should fail. COMMIT3Response response1 = nfsd.Commit(xdr_req.AsReadOnlyWrap(), ch, 1, securityHandlerUnpriviledged , new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.AreEqual("Incorrect return code:", Nfs3Status.Nfs3errAcces , response1.GetStatus()); // Attempt by a priviledged user should pass. COMMIT3Response response2 = nfsd.Commit(xdr_req.AsReadOnlyWrap(), ch, 1, securityHandler , new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.AreEqual("Incorrect COMMIT3Response:", null, response2); }
/// <exception cref="System.Exception"/> public virtual void TestCreate() { HdfsFileStatus status = nn.GetRpcServer().GetFileInfo(testdir); long dirId = status.GetFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); CREATE3Request req = new CREATE3Request(handle, "fubar", Nfs3Constant.CreateUnchecked , new SetAttr3(), 0); req.Serialize(xdr_req); // Attempt by an unpriviledged user should fail. CREATE3Response response1 = nfsd.Create(xdr_req.AsReadOnlyWrap(), securityHandlerUnpriviledged , new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.AreEqual("Incorrect return code:", Nfs3Status.Nfs3errAcces , response1.GetStatus()); // Attempt by a priviledged user should pass. CREATE3Response response2 = nfsd.Create(xdr_req.AsReadOnlyWrap(), securityHandler , new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.AreEqual("Incorrect return code:", Nfs3Status.Nfs3Ok, response2 .GetStatus()); }
/// <exception cref="System.Exception"/> public virtual void TestSetattr() { HdfsFileStatus status = nn.GetRpcServer().GetFileInfo(testdir); long dirId = status.GetFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); SetAttr3 symAttr = new SetAttr3(0, 1, 0, 0, null, null, EnumSet.Of(SetAttr3.SetAttrField .Uid)); SETATTR3Request req = new SETATTR3Request(handle, symAttr, false, null); req.Serialize(xdr_req); // Attempt by an unprivileged user should fail. SETATTR3Response response1 = nfsd.Setattr(xdr_req.AsReadOnlyWrap(), securityHandlerUnpriviledged , new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.AreEqual("Incorrect return code", Nfs3Status.Nfs3errAcces, response1.GetStatus()); // Attempt by a priviledged user should pass. SETATTR3Response response2 = nfsd.Setattr(xdr_req.AsReadOnlyWrap(), securityHandler , new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.AreEqual("Incorrect return code", Nfs3Status.Nfs3Ok, response2 .GetStatus()); }
public virtual void TestWriteStableHow() { NfsConfiguration config = new NfsConfiguration(); DFSClient client = null; MiniDFSCluster cluster = null; RpcProgramNfs3 nfsd; SecurityHandler securityHandler = Org.Mockito.Mockito.Mock <SecurityHandler>(); Org.Mockito.Mockito.When(securityHandler.GetUser()).ThenReturn(Runtime.GetProperty ("user.name")); string currentUser = Runtime.GetProperty("user.name"); config.Set(DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserGroupConfKey (currentUser), "*"); config.Set(DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserIpConfKey (currentUser), "*"); ProxyUsers.RefreshSuperUserGroupsConfiguration(config); try { cluster = new MiniDFSCluster.Builder(config).NumDataNodes(1).Build(); cluster.WaitActive(); client = new DFSClient(NameNode.GetAddress(config), config); // Use emphral port in case tests are running in parallel config.SetInt("nfs3.mountd.port", 0); config.SetInt("nfs3.server.port", 0); // Start nfs Org.Apache.Hadoop.Hdfs.Nfs.Nfs3.Nfs3 nfs3 = new Org.Apache.Hadoop.Hdfs.Nfs.Nfs3.Nfs3 (config); nfs3.StartServiceInternal(false); nfsd = (RpcProgramNfs3)nfs3.GetRpcProgram(); HdfsFileStatus status = client.GetFileInfo("/"); FileHandle rootHandle = new FileHandle(status.GetFileId()); // Create file1 CREATE3Request createReq = new CREATE3Request(rootHandle, "file1", Nfs3Constant.CreateUnchecked , new SetAttr3(), 0); XDR createXdr = new XDR(); createReq.Serialize(createXdr); CREATE3Response createRsp = nfsd.Create(createXdr.AsReadOnlyWrap(), securityHandler , new IPEndPoint("localhost", 1234)); FileHandle handle = createRsp.GetObjHandle(); // Test DATA_SYNC byte[] buffer = new byte[10]; for (int i = 0; i < 10; i++) { buffer[i] = unchecked ((byte)i); } WRITE3Request writeReq = new WRITE3Request(handle, 0, 10, Nfs3Constant.WriteStableHow .DataSync, ByteBuffer.Wrap(buffer)); XDR writeXdr = new XDR(); writeReq.Serialize(writeXdr); nfsd.Write(writeXdr.AsReadOnlyWrap(), null, 1, securityHandler, new IPEndPoint("localhost" , 1234)); WaitWrite(nfsd, handle, 60000); // Readback READ3Request readReq = new READ3Request(handle, 0, 10); XDR readXdr = new XDR(); readReq.Serialize(readXdr); READ3Response readRsp = nfsd.Read(readXdr.AsReadOnlyWrap(), securityHandler, new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.IsTrue(Arrays.Equals(buffer, ((byte[])readRsp.GetData().Array ()))); // Test FILE_SYNC // Create file2 CREATE3Request createReq2 = new CREATE3Request(rootHandle, "file2", Nfs3Constant. CreateUnchecked, new SetAttr3(), 0); XDR createXdr2 = new XDR(); createReq2.Serialize(createXdr2); CREATE3Response createRsp2 = nfsd.Create(createXdr2.AsReadOnlyWrap(), securityHandler , new IPEndPoint("localhost", 1234)); FileHandle handle2 = createRsp2.GetObjHandle(); WRITE3Request writeReq2 = new WRITE3Request(handle2, 0, 10, Nfs3Constant.WriteStableHow .FileSync, ByteBuffer.Wrap(buffer)); XDR writeXdr2 = new XDR(); writeReq2.Serialize(writeXdr2); nfsd.Write(writeXdr2.AsReadOnlyWrap(), null, 1, securityHandler, new IPEndPoint("localhost" , 1234)); WaitWrite(nfsd, handle2, 60000); // Readback READ3Request readReq2 = new READ3Request(handle2, 0, 10); XDR readXdr2 = new XDR(); readReq2.Serialize(readXdr2); READ3Response readRsp2 = nfsd.Read(readXdr2.AsReadOnlyWrap(), securityHandler, new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.IsTrue(Arrays.Equals(buffer, ((byte[])readRsp2.GetData().Array ()))); // FILE_SYNC should sync the file size status = client.GetFileInfo("/file2"); NUnit.Framework.Assert.IsTrue(status.GetLen() == 10); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestRestartDfsWithAbandonedBlock() { Configuration conf = new HdfsConfiguration(); // Turn off persistent IPC, so that the DFSClient can survive NN restart conf.SetInt(CommonConfigurationKeysPublic.IpcClientConnectionMaxidletimeKey, 0); MiniDFSCluster cluster = null; long len = 0; FSDataOutputStream stream; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); FileSystem fs = cluster.GetFileSystem(); // Creating a file with 4096 blockSize to write multiple blocks stream = fs.Create(FilePath, true, BlockSize, (short)1, BlockSize); stream.Write(DataBeforeRestart); stream.Hflush(); // Wait for all of the blocks to get through while (len < BlockSize * (NumBlocks - 1)) { FileStatus status = fs.GetFileStatus(FilePath); len = status.GetLen(); Sharpen.Thread.Sleep(100); } // Abandon the last block DFSClient dfsclient = DFSClientAdapter.GetDFSClient((DistributedFileSystem)fs); HdfsFileStatus fileStatus = dfsclient.GetNamenode().GetFileInfo(FileName); LocatedBlocks blocks = dfsclient.GetNamenode().GetBlockLocations(FileName, 0, BlockSize * NumBlocks); NUnit.Framework.Assert.AreEqual(NumBlocks, blocks.GetLocatedBlocks().Count); LocatedBlock b = blocks.GetLastLocatedBlock(); dfsclient.GetNamenode().AbandonBlock(b.GetBlock(), fileStatus.GetFileId(), FileName , dfsclient.clientName); // explicitly do NOT close the file. cluster.RestartNameNode(); // Check that the file has no less bytes than before the restart // This would mean that blocks were successfully persisted to the log FileStatus status_1 = fs.GetFileStatus(FilePath); NUnit.Framework.Assert.IsTrue("Length incorrect: " + status_1.GetLen(), status_1. GetLen() == len - BlockSize); // Verify the data showed up from before restart, sans abandoned block. FSDataInputStream readStream = fs.Open(FilePath); try { byte[] verifyBuf = new byte[DataBeforeRestart.Length - BlockSize]; IOUtils.ReadFully(readStream, verifyBuf, 0, verifyBuf.Length); byte[] expectedBuf = new byte[DataBeforeRestart.Length - BlockSize]; System.Array.Copy(DataBeforeRestart, 0, expectedBuf, 0, expectedBuf.Length); Assert.AssertArrayEquals(expectedBuf, verifyBuf); } finally { IOUtils.CloseStream(readStream); } } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestOOOWrites() { NfsConfiguration config = new NfsConfiguration(); MiniDFSCluster cluster = null; RpcProgramNfs3 nfsd; int bufSize = 32; int numOOO = 3; SecurityHandler securityHandler = Org.Mockito.Mockito.Mock <SecurityHandler>(); Org.Mockito.Mockito.When(securityHandler.GetUser()).ThenReturn(Runtime.GetProperty ("user.name")); string currentUser = Runtime.GetProperty("user.name"); config.Set(DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserGroupConfKey (currentUser), "*"); config.Set(DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserIpConfKey (currentUser), "*"); ProxyUsers.RefreshSuperUserGroupsConfiguration(config); // Use emphral port in case tests are running in parallel config.SetInt("nfs3.mountd.port", 0); config.SetInt("nfs3.server.port", 0); try { cluster = new MiniDFSCluster.Builder(config).NumDataNodes(1).Build(); cluster.WaitActive(); Org.Apache.Hadoop.Hdfs.Nfs.Nfs3.Nfs3 nfs3 = new Org.Apache.Hadoop.Hdfs.Nfs.Nfs3.Nfs3 (config); nfs3.StartServiceInternal(false); nfsd = (RpcProgramNfs3)nfs3.GetRpcProgram(); DFSClient dfsClient = new DFSClient(NameNode.GetAddress(config), config); HdfsFileStatus status = dfsClient.GetFileInfo("/"); FileHandle rootHandle = new FileHandle(status.GetFileId()); CREATE3Request createReq = new CREATE3Request(rootHandle, "out-of-order-write" + Runtime.CurrentTimeMillis(), Nfs3Constant.CreateUnchecked, new SetAttr3(), 0); XDR createXdr = new XDR(); createReq.Serialize(createXdr); CREATE3Response createRsp = nfsd.Create(createXdr.AsReadOnlyWrap(), securityHandler , new IPEndPoint("localhost", 1234)); FileHandle handle = createRsp.GetObjHandle(); byte[][] oooBuf = new byte[][] { new byte[bufSize], new byte[bufSize], new byte[bufSize ] }; for (int i = 0; i < numOOO; i++) { Arrays.Fill(oooBuf[i], unchecked ((byte)i)); } for (int i_1 = 0; i_1 < numOOO; i_1++) { long offset = (numOOO - 1 - i_1) * bufSize; WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize, Nfs3Constant.WriteStableHow .Unstable, ByteBuffer.Wrap(oooBuf[i_1])); XDR writeXdr = new XDR(); writeReq.Serialize(writeXdr); nfsd.Write(writeXdr.AsReadOnlyWrap(), null, 1, securityHandler, new IPEndPoint("localhost" , 1234)); } WaitWrite(nfsd, handle, 60000); READ3Request readReq = new READ3Request(handle, bufSize, bufSize); XDR readXdr = new XDR(); readReq.Serialize(readXdr); READ3Response readRsp = nfsd.Read(readXdr.AsReadOnlyWrap(), securityHandler, new IPEndPoint("localhost", config.GetInt(NfsConfigKeys.DfsNfsServerPortKey, NfsConfigKeys .DfsNfsServerPortDefault))); NUnit.Framework.Assert.IsTrue(Arrays.Equals(oooBuf[1], ((byte[])readRsp.GetData() .Array()))); } finally { if (cluster != null) { cluster.Shutdown(); } } }