public virtual void TestSmallAppendRace() { Path file = new Path("/testSmallAppendRace"); string fName = file.ToUri().GetPath(); // Create the file and write a small amount of data. FSDataOutputStream stm = fs.Create(file); AppendTestUtil.Write(stm, 0, 123); stm.Close(); // Introduce a delay between getFileInfo and calling append() against NN. DFSClient client = DFSClientAdapter.GetDFSClient(fs); DFSClient spyClient = Org.Mockito.Mockito.Spy(client); Org.Mockito.Mockito.When(spyClient.GetFileInfo(fName)).ThenAnswer(new _Answer_548 (client, fName)); DFSClientAdapter.SetDFSClient(fs, spyClient); // Create two threads for doing appends to the same file. Sharpen.Thread worker1 = new _Thread_564(this, file); Sharpen.Thread worker2 = new _Thread_574(this, file); worker1.Start(); worker2.Start(); // append will fail when the file size crosses the checksum chunk boundary, // if append was called with a stale file stat. DoSmallAppends(file, fs, 20); }
/// <exception cref="System.IO.IOException"/> /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> public override FileStatus GetFileStatus(Path f) { HdfsFileStatus fi = dfs.GetFileInfo(GetUriPath(f)); if (fi != null) { return(fi.MakeQualified(GetUri(), f)); } else { throw new FileNotFoundException("File does not exist: " + f.ToString()); } }
public override XDR Mnt(XDR xdr, XDR @out, int xid, IPAddress client) { if (hostsMatcher == null) { return(MountResponse.WriteMNTResponse(Nfs3Status.Nfs3errAcces, @out, xid, null)); } AccessPrivilege accessPrivilege = hostsMatcher.GetAccessPrivilege(client); if (accessPrivilege == AccessPrivilege.None) { return(MountResponse.WriteMNTResponse(Nfs3Status.Nfs3errAcces, @out, xid, null)); } string path = xdr.ReadString(); if (Log.IsDebugEnabled()) { Log.Debug("MOUNT MNT path: " + path + " client: " + client); } string host = client.GetHostName(); if (Log.IsDebugEnabled()) { Log.Debug("Got host: " + host + " path: " + path); } if (!exports.Contains(path)) { Log.Info("Path " + path + " is not shared."); MountResponse.WriteMNTResponse(Nfs3Status.Nfs3errNoent, @out, xid, null); return(@out); } FileHandle handle = null; try { HdfsFileStatus exFileStatus = dfsClient.GetFileInfo(path); handle = new FileHandle(exFileStatus.GetFileId()); } catch (IOException e) { Log.Error("Can't get handle for export:" + path, e); MountResponse.WriteMNTResponse(Nfs3Status.Nfs3errNoent, @out, xid, null); return(@out); } System.Diagnostics.Debug.Assert((handle != null)); Log.Info("Giving handle (fileId:" + handle.GetFileId() + ") to client for export " + path); mounts.AddItem(new MountEntry(host, path)); MountResponse.WriteMNTResponse(Nfs3Status.Nfs3Ok, @out, xid, handle.GetContent()); return(@out); }
public virtual void TestGetFileInfo() { // Check that / exists Path path = new Path("/"); NUnit.Framework.Assert.IsTrue("/ should be a directory", fs.GetFileStatus(path).IsDirectory ()); // Make sure getFileInfo returns null for files which do not exist HdfsFileStatus fileInfo = dfsClient.GetFileInfo("/noSuchFile"); NUnit.Framework.Assert.AreEqual("Non-existant file should result in null", null, fileInfo); Path path1 = new Path("/name1"); Path path2 = new Path("/name1/name2"); NUnit.Framework.Assert.IsTrue(fs.Mkdirs(path1)); FSDataOutputStream @out = fs.Create(path2, false); @out.Close(); fileInfo = dfsClient.GetFileInfo(path1.ToString()); NUnit.Framework.Assert.AreEqual(1, fileInfo.GetChildrenNum()); fileInfo = dfsClient.GetFileInfo(path2.ToString()); NUnit.Framework.Assert.AreEqual(0, fileInfo.GetChildrenNum()); // Test getFileInfo throws the right exception given a non-absolute path. try { dfsClient.GetFileInfo("non-absolute"); NUnit.Framework.Assert.Fail("getFileInfo for a non-absolute path did not throw IOException" ); } catch (RemoteException re) { NUnit.Framework.Assert.IsTrue("Wrong exception for invalid file name", re.ToString ().Contains("Invalid file name")); } }
/// <exception cref="System.IO.IOException"/> protected internal LocatedBlocks EnsureFileReplicasOnStorageType(Path path, StorageType storageType) { // Ensure that returned block locations returned are correct! Log.Info("Ensure path: " + path + " is on StorageType: " + storageType); Assert.AssertThat(fs.Exists(path), IS.Is(true)); long fileLength = client.GetFileInfo(path.ToString()).GetLen(); LocatedBlocks locatedBlocks = client.GetLocatedBlocks(path.ToString(), 0, fileLength ); foreach (LocatedBlock locatedBlock in locatedBlocks.GetLocatedBlocks()) { Assert.AssertThat(locatedBlock.GetStorageTypes()[0], IS.Is(storageType)); } return(locatedBlocks); }
/// <exception cref="System.IO.IOException"/> internal static void RecoverAllLeases(DFSClient dfs, Path path) { string pathStr = path.ToString(); HdfsFileStatus status = dfs.GetFileInfo(pathStr); if (!status.IsDir()) { dfs.RecoverLease(pathStr); return; } byte[] prev = HdfsFileStatus.EmptyName; DirectoryListing dirList; do { dirList = dfs.ListPaths(pathStr, prev); HdfsFileStatus[] files = dirList.GetPartialListing(); foreach (HdfsFileStatus f in files) { RecoverAllLeases(dfs, f.GetFullPath(path)); } prev = dirList.GetLastName(); }while (dirList.HasMore()); }
public virtual void TestOOOWrites() { NfsConfiguration config = new NfsConfiguration(); MiniDFSCluster cluster = null; RpcProgramNfs3 nfsd; int bufSize = 32; int numOOO = 3; SecurityHandler securityHandler = Org.Mockito.Mockito.Mock <SecurityHandler>(); Org.Mockito.Mockito.When(securityHandler.GetUser()).ThenReturn(Runtime.GetProperty ("user.name")); string currentUser = Runtime.GetProperty("user.name"); config.Set(DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserGroupConfKey (currentUser), "*"); config.Set(DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserIpConfKey (currentUser), "*"); ProxyUsers.RefreshSuperUserGroupsConfiguration(config); // Use emphral port in case tests are running in parallel config.SetInt("nfs3.mountd.port", 0); config.SetInt("nfs3.server.port", 0); try { cluster = new MiniDFSCluster.Builder(config).NumDataNodes(1).Build(); cluster.WaitActive(); Org.Apache.Hadoop.Hdfs.Nfs.Nfs3.Nfs3 nfs3 = new Org.Apache.Hadoop.Hdfs.Nfs.Nfs3.Nfs3 (config); nfs3.StartServiceInternal(false); nfsd = (RpcProgramNfs3)nfs3.GetRpcProgram(); DFSClient dfsClient = new DFSClient(NameNode.GetAddress(config), config); HdfsFileStatus status = dfsClient.GetFileInfo("/"); FileHandle rootHandle = new FileHandle(status.GetFileId()); CREATE3Request createReq = new CREATE3Request(rootHandle, "out-of-order-write" + Runtime.CurrentTimeMillis(), Nfs3Constant.CreateUnchecked, new SetAttr3(), 0); XDR createXdr = new XDR(); createReq.Serialize(createXdr); CREATE3Response createRsp = nfsd.Create(createXdr.AsReadOnlyWrap(), securityHandler , new IPEndPoint("localhost", 1234)); FileHandle handle = createRsp.GetObjHandle(); byte[][] oooBuf = new byte[][] { new byte[bufSize], new byte[bufSize], new byte[bufSize ] }; for (int i = 0; i < numOOO; i++) { Arrays.Fill(oooBuf[i], unchecked ((byte)i)); } for (int i_1 = 0; i_1 < numOOO; i_1++) { long offset = (numOOO - 1 - i_1) * bufSize; WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize, Nfs3Constant.WriteStableHow .Unstable, ByteBuffer.Wrap(oooBuf[i_1])); XDR writeXdr = new XDR(); writeReq.Serialize(writeXdr); nfsd.Write(writeXdr.AsReadOnlyWrap(), null, 1, securityHandler, new IPEndPoint("localhost" , 1234)); } WaitWrite(nfsd, handle, 60000); READ3Request readReq = new READ3Request(handle, bufSize, bufSize); XDR readXdr = new XDR(); readReq.Serialize(readXdr); READ3Response readRsp = nfsd.Read(readXdr.AsReadOnlyWrap(), securityHandler, new IPEndPoint("localhost", config.GetInt(NfsConfigKeys.DfsNfsServerPortKey, NfsConfigKeys .DfsNfsServerPortDefault))); NUnit.Framework.Assert.IsTrue(Arrays.Equals(oooBuf[1], ((byte[])readRsp.GetData() .Array()))); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestWriteStableHow() { NfsConfiguration config = new NfsConfiguration(); DFSClient client = null; MiniDFSCluster cluster = null; RpcProgramNfs3 nfsd; SecurityHandler securityHandler = Org.Mockito.Mockito.Mock <SecurityHandler>(); Org.Mockito.Mockito.When(securityHandler.GetUser()).ThenReturn(Runtime.GetProperty ("user.name")); string currentUser = Runtime.GetProperty("user.name"); config.Set(DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserGroupConfKey (currentUser), "*"); config.Set(DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserIpConfKey (currentUser), "*"); ProxyUsers.RefreshSuperUserGroupsConfiguration(config); try { cluster = new MiniDFSCluster.Builder(config).NumDataNodes(1).Build(); cluster.WaitActive(); client = new DFSClient(NameNode.GetAddress(config), config); // Use emphral port in case tests are running in parallel config.SetInt("nfs3.mountd.port", 0); config.SetInt("nfs3.server.port", 0); // Start nfs Org.Apache.Hadoop.Hdfs.Nfs.Nfs3.Nfs3 nfs3 = new Org.Apache.Hadoop.Hdfs.Nfs.Nfs3.Nfs3 (config); nfs3.StartServiceInternal(false); nfsd = (RpcProgramNfs3)nfs3.GetRpcProgram(); HdfsFileStatus status = client.GetFileInfo("/"); FileHandle rootHandle = new FileHandle(status.GetFileId()); // Create file1 CREATE3Request createReq = new CREATE3Request(rootHandle, "file1", Nfs3Constant.CreateUnchecked , new SetAttr3(), 0); XDR createXdr = new XDR(); createReq.Serialize(createXdr); CREATE3Response createRsp = nfsd.Create(createXdr.AsReadOnlyWrap(), securityHandler , new IPEndPoint("localhost", 1234)); FileHandle handle = createRsp.GetObjHandle(); // Test DATA_SYNC byte[] buffer = new byte[10]; for (int i = 0; i < 10; i++) { buffer[i] = unchecked ((byte)i); } WRITE3Request writeReq = new WRITE3Request(handle, 0, 10, Nfs3Constant.WriteStableHow .DataSync, ByteBuffer.Wrap(buffer)); XDR writeXdr = new XDR(); writeReq.Serialize(writeXdr); nfsd.Write(writeXdr.AsReadOnlyWrap(), null, 1, securityHandler, new IPEndPoint("localhost" , 1234)); WaitWrite(nfsd, handle, 60000); // Readback READ3Request readReq = new READ3Request(handle, 0, 10); XDR readXdr = new XDR(); readReq.Serialize(readXdr); READ3Response readRsp = nfsd.Read(readXdr.AsReadOnlyWrap(), securityHandler, new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.IsTrue(Arrays.Equals(buffer, ((byte[])readRsp.GetData().Array ()))); // Test FILE_SYNC // Create file2 CREATE3Request createReq2 = new CREATE3Request(rootHandle, "file2", Nfs3Constant. CreateUnchecked, new SetAttr3(), 0); XDR createXdr2 = new XDR(); createReq2.Serialize(createXdr2); CREATE3Response createRsp2 = nfsd.Create(createXdr2.AsReadOnlyWrap(), securityHandler , new IPEndPoint("localhost", 1234)); FileHandle handle2 = createRsp2.GetObjHandle(); WRITE3Request writeReq2 = new WRITE3Request(handle2, 0, 10, Nfs3Constant.WriteStableHow .FileSync, ByteBuffer.Wrap(buffer)); XDR writeXdr2 = new XDR(); writeReq2.Serialize(writeXdr2); nfsd.Write(writeXdr2.AsReadOnlyWrap(), null, 1, securityHandler, new IPEndPoint("localhost" , 1234)); WaitWrite(nfsd, handle2, 60000); // Readback READ3Request readReq2 = new READ3Request(handle2, 0, 10); XDR readXdr2 = new XDR(); readReq2.Serialize(readXdr2); READ3Response readRsp2 = nfsd.Read(readXdr2.AsReadOnlyWrap(), securityHandler, new IPEndPoint("localhost", 1234)); NUnit.Framework.Assert.IsTrue(Arrays.Equals(buffer, ((byte[])readRsp2.GetData().Array ()))); // FILE_SYNC should sync the file size status = client.GetFileInfo("/file2"); NUnit.Framework.Assert.IsTrue(status.GetLen() == 10); } finally { if (cluster != null) { cluster.Shutdown(); } } }