public virtual void TestTruncate() { short repl = 3; int blockSize = 1024; int numOfBlocks = 2; DistributedFileSystem fs = cluster.GetFileSystem(); Path dir = GetTestRootPath(fc, "test/hadoop"); Path file = GetTestRootPath(fc, "test/hadoop/file"); byte[] data = FileSystemTestHelper.GetFileData(numOfBlocks, blockSize); FileSystemTestHelper.CreateFile(fs, file, data, blockSize, repl); int newLength = blockSize; bool isReady = fc.Truncate(file, newLength); NUnit.Framework.Assert.IsTrue("Recovery is not expected.", isReady); FileStatus fileStatus = fc.GetFileStatus(file); NUnit.Framework.Assert.AreEqual(fileStatus.GetLen(), newLength); AppendTestUtil.CheckFullFile(fs, file, newLength, data, file.ToString()); ContentSummary cs = fs.GetContentSummary(dir); NUnit.Framework.Assert.AreEqual("Bad disk space usage", cs.GetSpaceConsumed(), newLength * repl); NUnit.Framework.Assert.IsTrue(fs.Delete(dir, true)); }
public virtual void TestStripFragmentFromPath() { FileSystem fs = FileSystem.GetLocal(new Configuration()); Path pathQualified = TestPath.MakeQualified(fs.GetUri(), fs.GetWorkingDirectory() ); Path pathWithFragment = new Path(new URI(pathQualified.ToString() + "#glacier")); // Create test file with fragment FileSystemTestHelper.CreateFile(fs, pathWithFragment); Path resolved = fs.ResolvePath(pathWithFragment); Assert.Equal("resolvePath did not strip fragment from Path", pathQualified , resolved); }