public virtual void TestHSyncWithReplication() { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); FileSystem fs = cluster.GetFileSystem(); Path p = new Path("/testHSyncWithReplication/foo"); int len = 1 << 16; FSDataOutputStream @out = fs.Create(p, FsPermission.GetDefault(), EnumSet.Of(CreateFlag .Create, CreateFlag.Overwrite, CreateFlag.SyncBlock), 4096, (short)3, len, null); @out.Write(1); @out.Hflush(); CheckSyncMetric(cluster, 0, 0); CheckSyncMetric(cluster, 1, 0); CheckSyncMetric(cluster, 2, 0); @out.Hsync(); CheckSyncMetric(cluster, 0, 1); CheckSyncMetric(cluster, 1, 1); CheckSyncMetric(cluster, 2, 1); @out.Hsync(); CheckSyncMetric(cluster, 0, 2); CheckSyncMetric(cluster, 1, 2); CheckSyncMetric(cluster, 2, 2); cluster.Shutdown(); }
public virtual void TestSequenceFileSync() { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = cluster.GetFileSystem(); Path p = new Path("/testSequenceFileSync/foo"); int len = 1 << 16; FSDataOutputStream @out = fs.Create(p, FsPermission.GetDefault(), EnumSet.Of(CreateFlag .Create, CreateFlag.Overwrite, CreateFlag.SyncBlock), 4096, (short)1, len, null); SequenceFile.Writer w = SequenceFile.CreateWriter(new Configuration(), SequenceFile.Writer .Stream(@out), SequenceFile.Writer.KeyClass(typeof(RandomDatum)), SequenceFile.Writer .ValueClass(typeof(RandomDatum)), SequenceFile.Writer.Compression(SequenceFile.CompressionType .None, new DefaultCodec())); w.Hflush(); CheckSyncMetric(cluster, 0); w.Hsync(); CheckSyncMetric(cluster, 1); int seed = new Random().Next(); RandomDatum.Generator generator = new RandomDatum.Generator(seed); generator.Next(); w.Append(generator.GetKey(), generator.GetValue()); w.Hsync(); CheckSyncMetric(cluster, 2); w.Close(); CheckSyncMetric(cluster, 2); @out.Close(); CheckSyncMetric(cluster, 3); cluster.Shutdown(); }
/// <exception cref="System.Exception"/> public virtual void TestFavoredNodesEndToEndForAppend() { // create 10 files with random preferred nodes for (int i = 0; i < NumFiles; i++) { Random rand = new Random(Runtime.CurrentTimeMillis() + i); // pass a new created rand so as to get a uniform distribution each time // without too much collisions (look at the do-while loop in getDatanodes) IPEndPoint[] datanode = GetDatanodes(rand); Path p = new Path("/filename" + i); // create and close the file. dfs.Create(p, FsPermission.GetDefault(), true, 4096, (short)3, 4096L, null, null) .Close(); // re-open for append FSDataOutputStream @out = dfs.Append(p, EnumSet.Of(CreateFlag.Append), 4096, null , datanode); @out.Write(SomeBytes); @out.Close(); BlockLocation[] locations = GetBlockLocations(p); // verify the files got created in the right nodes foreach (BlockLocation loc in locations) { string[] hosts = loc.GetNames(); string[] hosts1 = GetStringForInetSocketAddrs(datanode); NUnit.Framework.Assert.IsTrue(CompareNodes(hosts, hosts1)); } } }
private FileStatus DeprecatedGetFileLinkStatusInternal(Path f) { string target = FileUtil.ReadLink(new FilePath(f.ToString())); try { FileStatus fs = GetFileStatus(f); // If f refers to a regular file or directory if (target.IsEmpty()) { return(fs); } // Otherwise f refers to a symlink return(new FileStatus(fs.GetLen(), false, fs.GetReplication(), fs.GetBlockSize(), fs.GetModificationTime(), fs.GetAccessTime(), fs.GetPermission(), fs.GetOwner(), fs.GetGroup(), new Path(target), f)); } catch (FileNotFoundException e) { /* The exists method in the File class returns false for dangling * links so we can get a FileNotFoundException for links that exist. * It's also possible that we raced with a delete of the link. Use * the readBasicFileAttributes method in java.nio.file.attributes * when available. */ if (!target.IsEmpty()) { return(new FileStatus(0, false, 0, 0, 0, 0, FsPermission.GetDefault(), string.Empty , string.Empty, new Path(target), f)); } // f refers to a file or directory that does not exist throw; } }
public virtual void TestMkdirsFailsForSubdirectoryOfExistingFile() { Path testDir = QualifiedPath("test/hadoop", fc2); NUnit.Framework.Assert.IsFalse(FileContextTestHelper.Exists(fc2, testDir)); fc2.Mkdir(testDir, FsPermission.GetDefault(), true); Assert.True(FileContextTestHelper.Exists(fc2, testDir)); // Create file on fc1 using fc2 context FileContextTestHelper.CreateFile(fc1, QualifiedPath("test/hadoop/file", fc2)); Path testSubDir = QualifiedPath("test/hadoop/file/subdir", fc2); try { fc1.Mkdir(testSubDir, FsPermission.GetDefault(), true); NUnit.Framework.Assert.Fail("Should throw IOException."); } catch (IOException) { } // expected NUnit.Framework.Assert.IsFalse(FileContextTestHelper.Exists(fc1, testSubDir)); Path testDeepSubDir = QualifiedPath("test/hadoop/file/deep/sub/dir", fc1); try { fc2.Mkdir(testDeepSubDir, FsPermission.GetDefault(), true); NUnit.Framework.Assert.Fail("Should throw IOException."); } catch (IOException) { } // expected NUnit.Framework.Assert.IsFalse(FileContextTestHelper.Exists(fc1, testDeepSubDir)); }
public virtual void TestCreateDirectories() { conf.Set(CommonConfigurationKeys.FsPermissionsUmaskKey, "077"); string dirA = new FilePath(testDir, "dirA").GetPath(); string dirB = new FilePath(dirA, "dirB").GetPath(); string dirC = new FilePath(testDir, "dirC").GetPath(); Path pathC = new Path(dirC); FsPermission permDirC = new FsPermission((short)0x1c8); localFs.Mkdir(pathC, null, true); localFs.SetPermission(pathC, permDirC); string[] dirs = new string[] { dirA, dirB, dirC }; DirectoryCollection dc = new DirectoryCollection(dirs, conf.GetFloat(YarnConfiguration .NmMaxPerDiskUtilizationPercentage, YarnConfiguration.DefaultNmMaxPerDiskUtilizationPercentage )); FsPermission defaultPerm = FsPermission.GetDefault().ApplyUMask(new FsPermission( (short)FsPermission.DefaultUmask)); bool createResult = dc.CreateNonExistentDirs(localFs, defaultPerm); NUnit.Framework.Assert.IsTrue(createResult); FileStatus status = localFs.GetFileStatus(new Path(dirA)); NUnit.Framework.Assert.AreEqual("local dir parent not created with proper permissions" , defaultPerm, status.GetPermission()); status = localFs.GetFileStatus(new Path(dirB)); NUnit.Framework.Assert.AreEqual("local dir not created with proper permissions", defaultPerm, status.GetPermission()); status = localFs.GetFileStatus(pathC); NUnit.Framework.Assert.AreEqual("existing local directory permissions modified", permDirC, status.GetPermission()); }
public virtual void TestHSyncBlockBoundary() { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = cluster.GetFileSystem(); Path p = new Path("/testHSyncBlockBoundary/foo"); int len = 1 << 16; byte[] fileContents = AppendTestUtil.InitBuffer(len); FSDataOutputStream @out = fs.Create(p, FsPermission.GetDefault(), EnumSet.Of(CreateFlag .Create, CreateFlag.Overwrite, CreateFlag.SyncBlock), 4096, (short)1, len, null); // fill exactly one block (tests the SYNC_BLOCK case) and flush @out.Write(fileContents, 0, len); @out.Hflush(); // the full block should have caused a sync CheckSyncMetric(cluster, 1); @out.Hsync(); // first on block again CheckSyncMetric(cluster, 1); // write one more byte and sync again @out.Write(1); @out.Hsync(); CheckSyncMetric(cluster, 2); @out.Close(); CheckSyncMetric(cluster, 3); cluster.Shutdown(); }
public virtual void TestCreateDirectory() { Path path = QualifiedPath("test/hadoop", fc2); Path falsePath = QualifiedPath("path/doesnot.exist", fc2); Path subDirPath = QualifiedPath("dir0", fc2); // Ensure that testPath does not exist in fc1 NUnit.Framework.Assert.IsFalse(FileContextTestHelper.Exists(fc1, path)); NUnit.Framework.Assert.IsFalse(FileContextTestHelper.IsFile(fc1, path)); NUnit.Framework.Assert.IsFalse(FileContextTestHelper.IsDir(fc1, path)); // Create a directory on fc2's file system using fc1 fc1.Mkdir(path, FsPermission.GetDefault(), true); // Ensure fc2 has directory Assert.True(FileContextTestHelper.IsDir(fc2, path)); Assert.True(FileContextTestHelper.Exists(fc2, path)); NUnit.Framework.Assert.IsFalse(FileContextTestHelper.IsFile(fc2, path)); // Test to create same dir twice, (HDFS mkdir is similar to mkdir -p ) fc1.Mkdir(subDirPath, FsPermission.GetDefault(), true); // This should not throw exception fc1.Mkdir(subDirPath, FsPermission.GetDefault(), true); // Create Sub Dirs fc1.Mkdir(subDirPath, FsPermission.GetDefault(), true); // Check parent dir Path parentDir = path.GetParent(); Assert.True(FileContextTestHelper.Exists(fc2, parentDir)); NUnit.Framework.Assert.IsFalse(FileContextTestHelper.IsFile(fc2, parentDir)); // Check parent parent dir Path grandparentDir = parentDir.GetParent(); Assert.True(FileContextTestHelper.Exists(fc2, grandparentDir)); NUnit.Framework.Assert.IsFalse(FileContextTestHelper.IsFile(fc2, grandparentDir)); // Negative test cases NUnit.Framework.Assert.IsFalse(FileContextTestHelper.Exists(fc2, falsePath)); NUnit.Framework.Assert.IsFalse(FileContextTestHelper.IsDir(fc2, falsePath)); // TestCase - Create multiple directories string[] dirNames = new string[] { "createTest/testDir", "createTest/test Dir", "deleteTest/test*Dir" , "deleteTest/test#Dir", "deleteTest/test1234", "deleteTest/test_DIr", "deleteTest/1234Test" , "deleteTest/test)Dir", "deleteTest/()&^%$#@!~_+}{><?", " ", "^ " }; foreach (string f in dirNames) { if (!IsTestableFileNameOnPlatform(f)) { continue; } // Create a file on fc2's file system using fc1 Path testPath = QualifiedPath(f, fc2); // Ensure file does not exist NUnit.Framework.Assert.IsFalse(FileContextTestHelper.Exists(fc2, testPath)); // Now create directory fc1.Mkdir(testPath, FsPermission.GetDefault(), true); // Ensure fc2 has the created directory Assert.True(FileContextTestHelper.Exists(fc2, testPath)); Assert.True(FileContextTestHelper.IsDir(fc2, testPath)); } }
/// <exception cref="System.IO.IOException"/> private void TestHSyncOperation(bool testWithAppend) { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); DistributedFileSystem fs = cluster.GetFileSystem(); Path p = new Path("/testHSync/foo"); int len = 1 << 16; FSDataOutputStream @out = fs.Create(p, FsPermission.GetDefault(), EnumSet.Of(CreateFlag .Create, CreateFlag.Overwrite, CreateFlag.SyncBlock), 4096, (short)1, len, null); if (testWithAppend) { // re-open the file with append call @out.Close(); @out = fs.Append(p, EnumSet.Of(CreateFlag.Append, CreateFlag.SyncBlock), 4096, null ); } @out.Hflush(); // hflush does not sync CheckSyncMetric(cluster, 0); @out.Hsync(); // hsync on empty file does nothing CheckSyncMetric(cluster, 0); @out.Write(1); CheckSyncMetric(cluster, 0); @out.Hsync(); CheckSyncMetric(cluster, 1); // avoiding repeated hsyncs is a potential future optimization @out.Hsync(); CheckSyncMetric(cluster, 2); @out.Hflush(); // hflush still does not sync CheckSyncMetric(cluster, 2); @out.Close(); // close is sync'ing CheckSyncMetric(cluster, 3); // same with a file created with out SYNC_BLOCK @out = fs.Create(p, FsPermission.GetDefault(), EnumSet.Of(CreateFlag.Create, CreateFlag .Overwrite), 4096, (short)1, len, null); @out.Hsync(); CheckSyncMetric(cluster, 3); @out.Write(1); CheckSyncMetric(cluster, 3); @out.Hsync(); CheckSyncMetric(cluster, 4); // repeated hsyncs @out.Hsync(); CheckSyncMetric(cluster, 5); @out.Close(); // close does not sync (not opened with SYNC_BLOCK) CheckSyncMetric(cluster, 5); cluster.Shutdown(); }
/// <exception cref="System.Exception"/> public FileStatus Answer(InvocationOnMock args) { Path p = (Path)args.GetArguments()[0]; if ("file.txt".Equals(p.GetName())) { return(new FileStatus(201, false, 1, 500, 101, 101, FsPermission.GetDefault(), "me" , "me", filePath)); } else { throw new FileNotFoundException(p + " not supported by mocking"); } }
public virtual void TestDirectory() { fc.Mkdir(Dir1, FsPermission.GetDefault(), true); // test empty directory RemoteIterator <LocatedFileStatus> itor = fc.Util().ListFiles(Dir1, true); NUnit.Framework.Assert.IsFalse(itor.HasNext()); itor = fc.Util().ListFiles(Dir1, false); NUnit.Framework.Assert.IsFalse(itor.HasNext()); // testing directory with 1 file WriteFile(fc, File2, FileLen); itor = fc.Util().ListFiles(Dir1, true); LocatedFileStatus stat = itor.Next(); NUnit.Framework.Assert.IsFalse(itor.HasNext()); NUnit.Framework.Assert.IsTrue(stat.IsFile()); NUnit.Framework.Assert.AreEqual(FileLen, stat.GetLen()); NUnit.Framework.Assert.AreEqual(fc.MakeQualified(File2), stat.GetPath()); NUnit.Framework.Assert.AreEqual(1, stat.GetBlockLocations().Length); itor = fc.Util().ListFiles(Dir1, false); stat = itor.Next(); NUnit.Framework.Assert.IsFalse(itor.HasNext()); NUnit.Framework.Assert.IsTrue(stat.IsFile()); NUnit.Framework.Assert.AreEqual(FileLen, stat.GetLen()); NUnit.Framework.Assert.AreEqual(fc.MakeQualified(File2), stat.GetPath()); NUnit.Framework.Assert.AreEqual(1, stat.GetBlockLocations().Length); // test more complicated directory WriteFile(fc, File1, FileLen); WriteFile(fc, File3, FileLen); itor = fc.Util().ListFiles(TestDir, true); stat = itor.Next(); NUnit.Framework.Assert.IsTrue(stat.IsFile()); NUnit.Framework.Assert.AreEqual(fc.MakeQualified(File2), stat.GetPath()); stat = itor.Next(); NUnit.Framework.Assert.IsTrue(stat.IsFile()); NUnit.Framework.Assert.AreEqual(fc.MakeQualified(File3), stat.GetPath()); stat = itor.Next(); NUnit.Framework.Assert.IsTrue(stat.IsFile()); NUnit.Framework.Assert.AreEqual(fc.MakeQualified(File1), stat.GetPath()); NUnit.Framework.Assert.IsFalse(itor.HasNext()); itor = fc.Util().ListFiles(TestDir, false); stat = itor.Next(); NUnit.Framework.Assert.IsTrue(stat.IsFile()); NUnit.Framework.Assert.AreEqual(fc.MakeQualified(File1), stat.GetPath()); NUnit.Framework.Assert.IsFalse(itor.HasNext()); }
public virtual void TestMkdir() { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build(); DistributedFileSystem dfs = cluster.GetFileSystem(); try { // Create a dir in root dir, should succeed NUnit.Framework.Assert.IsTrue(dfs.Mkdir(new Path("/mkdir-" + Time.Now()), FsPermission .GetDefault())); // Create a dir when parent dir exists as a file, should fail IOException expectedException = null; string filePath = "/mkdir-file-" + Time.Now(); DFSTestUtil.WriteFile(dfs, new Path(filePath), "hello world"); try { dfs.Mkdir(new Path(filePath + "/mkdir"), FsPermission.GetDefault()); } catch (IOException e) { expectedException = e; } NUnit.Framework.Assert.IsTrue("Create a directory when parent dir exists as file using" + " mkdir() should throw ParentNotDirectoryException ", expectedException != null && expectedException is ParentNotDirectoryException); // Create a dir in a non-exist directory, should fail expectedException = null; try { dfs.Mkdir(new Path("/non-exist/mkdir-" + Time.Now()), FsPermission.GetDefault()); } catch (IOException e) { expectedException = e; } NUnit.Framework.Assert.IsTrue("Create a directory in a non-exist parent dir using" + " mkdir() should throw FileNotFoundException ", expectedException != null && expectedException is FileNotFoundException); } finally { dfs.Close(); cluster.Shutdown(); } }
public virtual void TestIsDirectory() { string dirName = "dirTest"; string invalidDir = "nonExistantDir"; string rootDir = "/"; Path existingPath = QualifiedPath(dirName, fc2); Path nonExistingPath = QualifiedPath(invalidDir, fc2); Path pathToRootDir = QualifiedPath(rootDir, fc2); // Create a directory on fc2's file system using fc1 fc1.Mkdir(existingPath, FsPermission.GetDefault(), true); // Ensure fc2 has directory Assert.True(FileContextTestHelper.IsDir(fc2, existingPath)); Assert.True(FileContextTestHelper.IsDir(fc2, pathToRootDir)); // Negative test case NUnit.Framework.Assert.IsFalse(FileContextTestHelper.IsDir(fc2, nonExistingPath)); }
/// <summary>Test NN crash and client crash/stuck immediately after block allocation</summary> /// <exception cref="System.Exception"/> public virtual void TestOpenFileWhenNNAndClientCrashAfterAddBlock() { cluster.GetConfiguration(0).Set(DFSConfigKeys.DfsNamenodeSafemodeThresholdPctKey, "1.0f"); string testData = "testData"; // to make sure we write the full block before creating dummy block at NN. cluster.GetConfiguration(0).SetInt("io.bytes.per.checksum", testData.Length); cluster.RestartNameNode(0); try { cluster.WaitActive(); cluster.TransitionToActive(0); cluster.TransitionToStandby(1); DistributedFileSystem dfs = cluster.GetFileSystem(0); string pathString = "/tmp1.txt"; Path filePath = new Path(pathString); FSDataOutputStream create = dfs.Create(filePath, FsPermission.GetDefault(), true, 1024, (short)3, testData.Length, null); create.Write(Sharpen.Runtime.GetBytesForString(testData)); create.Hflush(); long fileId = ((DFSOutputStream)create.GetWrappedStream()).GetFileId(); FileStatus fileStatus = dfs.GetFileStatus(filePath); DFSClient client = DFSClientAdapter.GetClient(dfs); // add one dummy block at NN, but not write to DataNode ExtendedBlock previousBlock = DFSClientAdapter.GetPreviousBlock(client, fileId); DFSClientAdapter.GetNamenode(client).AddBlock(pathString, client.GetClientName(), new ExtendedBlock(previousBlock), new DatanodeInfo[0], DFSClientAdapter.GetFileId ((DFSOutputStream)create.GetWrappedStream()), null); cluster.RestartNameNode(0, true); cluster.RestartDataNode(0); cluster.TransitionToActive(0); // let the block reports be processed. Sharpen.Thread.Sleep(2000); FSDataInputStream @is = dfs.Open(filePath); @is.Close(); dfs.RecoverLease(filePath); // initiate recovery NUnit.Framework.Assert.IsTrue("Recovery also should be success", dfs.RecoverLease (filePath)); } finally { cluster.Shutdown(); } }
/// <exception cref="System.Exception"/> public virtual void TestWhenFavoredNodesNotPresent() { //when we ask for favored nodes but the nodes are not there, we should //get some other nodes. In other words, the write to hdfs should not fail //and if we do getBlockLocations on the file, we should see one blklocation //and three hosts for that IPEndPoint[] arbitraryAddrs = new IPEndPoint[3]; for (int i = 0; i < 3; i++) { arbitraryAddrs[i] = GetArbitraryLocalHostAddr(); } Path p = new Path("/filename-foo-bar"); FSDataOutputStream @out = dfs.Create(p, FsPermission.GetDefault(), true, 4096, (short )3, 4096L, null, arbitraryAddrs); @out.Write(SomeBytes); @out.Close(); GetBlockLocations(p); }
public FileStatus(long length, bool isdir, int block_replication, long blocksize, long modification_time, long access_time, FsPermission permission, string owner, string group, Path symlink, Path path) { //We should deprecate this soon? this.length = length; this.isdir = isdir; this.block_replication = (short)block_replication; this.blocksize = blocksize; this.modification_time = modification_time; this.access_time = access_time; if (permission != null) { this.permission = permission; } else { if (isdir) { this.permission = FsPermission.GetDirDefault(); } else { if (symlink != null) { this.permission = FsPermission.GetDefault(); } else { this.permission = FsPermission.GetFileDefault(); } } } this.owner = (owner == null) ? string.Empty : owner; this.group = (group == null) ? string.Empty : group; this.symlink = symlink; this.path = path; // The variables isdir and symlink indicate the type: // 1. isdir implies directory, in which case symlink must be null. // 2. !isdir implies a file or symlink, symlink != null implies a // symlink, otherwise it's a file. System.Diagnostics.Debug.Assert((isdir && symlink == null) || !isdir); }
/// <exception cref="System.Exception"/> public virtual void TestWhenSomeNodesAreNotGood() { // 4 favored nodes IPEndPoint[] addrs = new IPEndPoint[4]; string[] hosts = new string[addrs.Length]; for (int i = 0; i < addrs.Length; i++) { addrs[i] = datanodes[i].GetXferAddress(); hosts[i] = addrs[i].Address.GetHostAddress() + ":" + addrs[i].Port; } //make some datanode not "good" so that even if the client prefers it, //the namenode would not give it as a replica to write to DatanodeInfo d = cluster.GetNameNode().GetNamesystem().GetBlockManager().GetDatanodeManager ().GetDatanodeByXferAddr(addrs[0].Address.GetHostAddress(), addrs[0].Port); //set the decommission status to true so that //BlockPlacementPolicyDefault.isGoodTarget returns false for this dn d.SetDecommissioned(); Path p = new Path("/filename-foo-bar-baz"); short replication = (short)3; FSDataOutputStream @out = dfs.Create(p, FsPermission.GetDefault(), true, 4096, replication , 4096L, null, addrs); @out.Write(SomeBytes); @out.Close(); //reset the state d.StopDecommission(); BlockLocation[] locations = GetBlockLocations(p); NUnit.Framework.Assert.AreEqual(replication, locations[0].GetNames().Length); //also make sure that the datanode[0] is not in the list of hosts for (int i_1 = 0; i_1 < replication; i_1++) { string loc = locations[0].GetNames()[i_1]; int j = 0; for (; j < hosts.Length && !loc.Equals(hosts[j]); j++) { } NUnit.Framework.Assert.IsTrue("j=" + j, j > 0); NUnit.Framework.Assert.IsTrue("loc=" + loc + " not in host list " + Arrays.AsList (hosts) + ", j=" + j, j < hosts.Length); } }
public virtual void TestFile() { fc.Mkdir(TestDir, FsPermission.GetDefault(), true); WriteFile(fc, File1, FileLen); RemoteIterator <LocatedFileStatus> itor = fc.Util().ListFiles(File1, true); LocatedFileStatus stat = itor.Next(); NUnit.Framework.Assert.IsFalse(itor.HasNext()); NUnit.Framework.Assert.IsTrue(stat.IsFile()); NUnit.Framework.Assert.AreEqual(FileLen, stat.GetLen()); NUnit.Framework.Assert.AreEqual(fc.MakeQualified(File1), stat.GetPath()); NUnit.Framework.Assert.AreEqual(1, stat.GetBlockLocations().Length); itor = fc.Util().ListFiles(File1, false); stat = itor.Next(); NUnit.Framework.Assert.IsFalse(itor.HasNext()); NUnit.Framework.Assert.IsTrue(stat.IsFile()); NUnit.Framework.Assert.AreEqual(FileLen, stat.GetLen()); NUnit.Framework.Assert.AreEqual(fc.MakeQualified(File1), stat.GetPath()); NUnit.Framework.Assert.AreEqual(1, stat.GetBlockLocations().Length); }
public virtual void TestDeleteDirectory() { string dirName = "dirTest"; Path testDirPath = QualifiedPath(dirName, fc2); // Ensure directory does not exist NUnit.Framework.Assert.IsFalse(FileContextTestHelper.Exists(fc2, testDirPath)); // Create a directory on fc2's file system using fc1 fc1.Mkdir(testDirPath, FsPermission.GetDefault(), true); // Ensure dir is created Assert.True(FileContextTestHelper.Exists(fc2, testDirPath)); Assert.True(FileContextTestHelper.IsDir(fc2, testDirPath)); fc2.Delete(testDirPath, true); // Ensure that directory is deleted NUnit.Framework.Assert.IsFalse(FileContextTestHelper.IsDir(fc2, testDirPath)); // TestCase - Create and delete multiple directories string[] dirNames = new string[] { "deleteTest/testDir", "deleteTest/test Dir", "deleteTest/test*Dir" , "deleteTest/test#Dir", "deleteTest/test1234", "deleteTest/1234Test", "deleteTest/test)Dir" , "deleteTest/test_DIr", "deleteTest/()&^%$#@!~_+}{><?", " ", "^ " }; foreach (string f in dirNames) { if (!IsTestableFileNameOnPlatform(f)) { continue; } // Create a file on fc2's file system using fc1 Path testPath = QualifiedPath(f, fc2); // Ensure file does not exist NUnit.Framework.Assert.IsFalse(FileContextTestHelper.Exists(fc2, testPath)); // Now create directory fc1.Mkdir(testPath, FsPermission.GetDefault(), true); // Ensure fc2 has the created directory Assert.True(FileContextTestHelper.Exists(fc2, testPath)); Assert.True(FileContextTestHelper.IsDir(fc2, testPath)); // Delete dir Assert.True(fc2.Delete(testPath, true)); // verify if directory is deleted NUnit.Framework.Assert.IsFalse(FileContextTestHelper.Exists(fc2, testPath)); NUnit.Framework.Assert.IsFalse(FileContextTestHelper.IsDir(fc2, testPath)); } }
public virtual void TestDeleteNonExistingDirectory() { string testDirName = "testFile"; Path testPath = QualifiedPath(testDirName, fc2); // TestCase1 : Test delete on directory never existed // Ensure directory does not exist NUnit.Framework.Assert.IsFalse(FileContextTestHelper.Exists(fc2, testPath)); // Delete on non existing directory should return false NUnit.Framework.Assert.IsFalse(fc2.Delete(testPath, false)); // TestCase2 : Create dir, Delete dir, Delete dir // Create a file on fc2's file system using fc1 fc1.Mkdir(testPath, FsPermission.GetDefault(), true); // Ensure dir exist Assert.True(FileContextTestHelper.Exists(fc2, testPath)); // Delete test file, deleting existing file should return true Assert.True(fc2.Delete(testPath, false)); // Ensure file does not exist NUnit.Framework.Assert.IsFalse(FileContextTestHelper.Exists(fc2, testPath)); // Delete on non existing file should return false NUnit.Framework.Assert.IsFalse(fc2.Delete(testPath, false)); }
/// <summary>Add the given symbolic link to the fs.</summary> /// <remarks>Add the given symbolic link to the fs. Record it in the edits log.</remarks> /// <exception cref="System.IO.IOException"/> private static INodeSymlink AddSymlink(FSDirectory fsd, string path, INodesInPath iip, string target, PermissionStatus dirPerms, bool createParent, bool logRetryCache ) { long mtime = Time.Now(); byte[] localName = iip.GetLastLocalName(); if (createParent) { KeyValuePair <INodesInPath, string> e = FSDirMkdirOp.CreateAncestorDirectories(fsd , iip, dirPerms); if (e == null) { return(null); } iip = INodesInPath.Append(e.Key, null, localName); } string userName = dirPerms.GetUserName(); long id = fsd.AllocateNewInodeId(); PermissionStatus perm = new PermissionStatus(userName, null, FsPermission.GetDefault ()); INodeSymlink newNode = UnprotectedAddSymlink(fsd, iip.GetExistingINodes(), localName , id, target, mtime, mtime, perm); if (newNode == null) { NameNode.stateChangeLog.Info("addSymlink: failed to add " + path); return(null); } fsd.GetEditLog().LogSymlink(path, target, mtime, mtime, newNode, logRetryCache); if (NameNode.stateChangeLog.IsDebugEnabled()) { NameNode.stateChangeLog.Debug("addSymlink: " + path + " is added"); } return(newNode); }
public static FSEditLogOp CreateMkdirOp(string path) { FSEditLogOp.MkdirOp op = FSEditLogOp.MkdirOp.GetInstance(new FSEditLogOp.OpInstanceCache ()).SetPath(path).SetTimestamp(0).SetPermissionStatus(new PermissionStatus("testuser" , "testgroup", FsPermission.GetDefault())); return(op); }
public virtual void Setup() { StaticMapping.ResetMap(); Configuration conf = new HdfsConfiguration(); string[] racks = new string[] { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" }; string[] hosts = new string[] { "/host0", "/host1", "/host2", "/host3", "/host4" }; conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockSize); conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, DefaultBlockSize / 2); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(5).Racks(racks).Hosts(hosts ).Build(); cluster.WaitActive(); nameNodeRpc = cluster.GetNameNodeRpc(); namesystem = cluster.GetNamesystem(); perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null, FsPermission .GetDefault()); }
public virtual void TestListStatus() { string hPrefix = "test/hadoop"; string[] dirs = new string[] { hPrefix + "/a", hPrefix + "/b", hPrefix + "/c", hPrefix + "/1", hPrefix + "/#@#@", hPrefix + "/&*#$#$@234" }; AList <Path> testDirs = new AList <Path>(); foreach (string d in dirs) { if (!IsTestableFileNameOnPlatform(d)) { continue; } testDirs.AddItem(QualifiedPath(d, fc2)); } NUnit.Framework.Assert.IsFalse(FileContextTestHelper.Exists(fc1, testDirs[0])); foreach (Path path in testDirs) { fc1.Mkdir(path, FsPermission.GetDefault(), true); } // test listStatus that returns an array of FileStatus FileStatus[] paths = fc1.Util().ListStatus(QualifiedPath("test", fc1)); Assert.Equal(1, paths.Length); Assert.Equal(QualifiedPath(hPrefix, fc1), paths[0].GetPath()); paths = fc1.Util().ListStatus(QualifiedPath(hPrefix, fc1)); Assert.Equal(testDirs.Count, paths.Length); for (int i = 0; i < testDirs.Count; i++) { bool found = false; for (int j = 0; j < paths.Length; j++) { if (QualifiedPath(testDirs[i].ToString(), fc1).Equals(paths[j].GetPath())) { found = true; } } Assert.True(testDirs[i] + " not found", found); } paths = fc1.Util().ListStatus(QualifiedPath(dirs[0], fc1)); Assert.Equal(0, paths.Length); // test listStatus that returns an iterator of FileStatus RemoteIterator <FileStatus> pathsItor = fc1.ListStatus(QualifiedPath("test", fc1)); Assert.Equal(QualifiedPath(hPrefix, fc1), pathsItor.Next().GetPath ()); NUnit.Framework.Assert.IsFalse(pathsItor.HasNext()); pathsItor = fc1.ListStatus(QualifiedPath(hPrefix, fc1)); int dirLen = 0; for (; pathsItor.HasNext(); dirLen++) { bool found = false; FileStatus stat = pathsItor.Next(); for (int j = 0; j < dirs.Length; j++) { if (QualifiedPath(dirs[j], fc1).Equals(stat.GetPath())) { found = true; break; } } Assert.True(stat.GetPath() + " not found", found); } Assert.Equal(testDirs.Count, dirLen); pathsItor = fc1.ListStatus(QualifiedPath(dirs[0], fc1)); NUnit.Framework.Assert.IsFalse(pathsItor.HasNext()); }
/// <exception cref="System.IO.IOException"/> public void Run(FileSystem fs) { fs.SetPermission(file1, FsPermission.GetDefault()); }
public virtual void TestCompression() { Log.Info("Test compressing image."); Configuration conf = new Configuration(); FileSystem.SetDefaultUri(conf, "hdfs://localhost:0"); conf.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, "127.0.0.1:0"); FilePath base_dir = new FilePath(PathUtils.GetTestDir(GetType()), "dfs/"); conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, new FilePath(base_dir, "name").GetPath ()); conf.SetBoolean(DFSConfigKeys.DfsPermissionsEnabledKey, false); DFSTestUtil.FormatNameNode(conf); // create an uncompressed image Log.Info("Create an uncompressed fsimage"); NameNode namenode = new NameNode(conf); namenode.GetNamesystem().Mkdirs("/test", new PermissionStatus("hairong", null, FsPermission .GetDefault()), true); NamenodeProtocols nnRpc = namenode.GetRpcServer(); NUnit.Framework.Assert.IsTrue(nnRpc.GetFileInfo("/test").IsDir()); nnRpc.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter, false); nnRpc.SaveNamespace(); namenode.Stop(); namenode.Join(); // compress image using default codec Log.Info("Read an uncomressed image and store it compressed using default codec." ); conf.SetBoolean(DFSConfigKeys.DfsImageCompressKey, true); CheckNameSpace(conf); // read image compressed using the default and compress it using Gzip codec Log.Info("Read a compressed image and store it using a different codec."); conf.Set(DFSConfigKeys.DfsImageCompressionCodecKey, "org.apache.hadoop.io.compress.GzipCodec" ); CheckNameSpace(conf); // read an image compressed in Gzip and store it uncompressed Log.Info("Read a compressed image and store it as uncompressed."); conf.SetBoolean(DFSConfigKeys.DfsImageCompressKey, false); CheckNameSpace(conf); // read an uncomrpessed image and store it uncompressed Log.Info("Read an uncompressed image and store it as uncompressed."); CheckNameSpace(conf); }
/// <summary>Constructor</summary> /// <param name="length">the number of bytes the file has</param> /// <param name="isdir">if the path is a directory</param> /// <param name="block_replication">the replication factor</param> /// <param name="blocksize">the block size</param> /// <param name="modification_time">modification time</param> /// <param name="access_time">access time</param> /// <param name="permission">permission</param> /// <param name="owner">the owner of the path</param> /// <param name="group">the group of the path</param> /// <param name="path">the local name in java UTF8 encoding the same as that in-memory /// </param> /// <param name="fileId">the file id</param> /// <param name="feInfo">the file's encryption info</param> public HdfsFileStatus(long length, bool isdir, int block_replication, long blocksize , long modification_time, long access_time, FsPermission permission, string owner , string group, byte[] symlink, byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy) { // local name of the inode that's encoded in java UTF8 // symlink target encoded in java UTF8 or null // Used by dir, not including dot and dotdot. Always zero for a regular file. this.length = length; this.isdir = isdir; this.block_replication = (short)block_replication; this.blocksize = blocksize; this.modification_time = modification_time; this.access_time = access_time; this.permission = (permission == null) ? ((isdir || symlink != null) ? FsPermission .GetDefault() : FsPermission.GetFileDefault()) : permission; this.owner = (owner == null) ? string.Empty : owner; this.group = (group == null) ? string.Empty : group; this.symlink = symlink; this.path = path; this.fileId = fileId; this.childrenNum = childrenNum; this.feInfo = feInfo; this.storagePolicy = storagePolicy; }