public virtual void TestAddBlock() { DistributedFileSystem fs = cluster.GetFileSystem(); Path file1 = new Path("/file1"); Path file2 = new Path("/file2"); Path file3 = new Path("/file3"); Path file4 = new Path("/file4"); DFSTestUtil.CreateFile(fs, file1, Blocksize - 1, Replication, 0L); DFSTestUtil.CreateFile(fs, file2, Blocksize, Replication, 0L); DFSTestUtil.CreateFile(fs, file3, Blocksize * 2 - 1, Replication, 0L); DFSTestUtil.CreateFile(fs, file4, Blocksize * 2, Replication, 0L); // restart NameNode cluster.RestartNameNode(true); FSDirectory fsdir = cluster.GetNamesystem().GetFSDirectory(); // check file1 INodeFile file1Node = fsdir.GetINode4Write(file1.ToString()).AsFile(); BlockInfoContiguous[] file1Blocks = file1Node.GetBlocks(); NUnit.Framework.Assert.AreEqual(1, file1Blocks.Length); NUnit.Framework.Assert.AreEqual(Blocksize - 1, file1Blocks[0].GetNumBytes()); NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.Complete, file1Blocks [0].GetBlockUCState()); // check file2 INodeFile file2Node = fsdir.GetINode4Write(file2.ToString()).AsFile(); BlockInfoContiguous[] file2Blocks = file2Node.GetBlocks(); NUnit.Framework.Assert.AreEqual(1, file2Blocks.Length); NUnit.Framework.Assert.AreEqual(Blocksize, file2Blocks[0].GetNumBytes()); NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.Complete, file2Blocks [0].GetBlockUCState()); // check file3 INodeFile file3Node = fsdir.GetINode4Write(file3.ToString()).AsFile(); BlockInfoContiguous[] file3Blocks = file3Node.GetBlocks(); NUnit.Framework.Assert.AreEqual(2, file3Blocks.Length); NUnit.Framework.Assert.AreEqual(Blocksize, file3Blocks[0].GetNumBytes()); NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.Complete, file3Blocks [0].GetBlockUCState()); NUnit.Framework.Assert.AreEqual(Blocksize - 1, file3Blocks[1].GetNumBytes()); NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.Complete, file3Blocks [1].GetBlockUCState()); // check file4 INodeFile file4Node = fsdir.GetINode4Write(file4.ToString()).AsFile(); BlockInfoContiguous[] file4Blocks = file4Node.GetBlocks(); NUnit.Framework.Assert.AreEqual(2, file4Blocks.Length); NUnit.Framework.Assert.AreEqual(Blocksize, file4Blocks[0].GetNumBytes()); NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.Complete, file4Blocks [0].GetBlockUCState()); NUnit.Framework.Assert.AreEqual(Blocksize, file4Blocks[1].GetNumBytes()); NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.Complete, file4Blocks [1].GetBlockUCState()); }
public virtual void TestDeletionWithZeroSizeBlock() { Path foo = new Path("/foo"); Path bar = new Path(foo, "bar"); DFSTestUtil.CreateFile(hdfs, bar, Blocksize, Replication, 0L); SnapshotTestHelper.CreateSnapshot(hdfs, foo, "s0"); hdfs.Append(bar); INodeFile barNode = fsdir.GetINode4Write(bar.ToString()).AsFile(); BlockInfoContiguous[] blks = barNode.GetBlocks(); NUnit.Framework.Assert.AreEqual(1, blks.Length); NUnit.Framework.Assert.AreEqual(Blocksize, blks[0].GetNumBytes()); ExtendedBlock previous = new ExtendedBlock(fsn.GetBlockPoolId(), blks[0]); cluster.GetNameNodeRpc().AddBlock(bar.ToString(), hdfs.GetClient().GetClientName( ), previous, null, barNode.GetId(), null); SnapshotTestHelper.CreateSnapshot(hdfs, foo, "s1"); barNode = fsdir.GetINode4Write(bar.ToString()).AsFile(); blks = barNode.GetBlocks(); NUnit.Framework.Assert.AreEqual(2, blks.Length); NUnit.Framework.Assert.AreEqual(Blocksize, blks[0].GetNumBytes()); NUnit.Framework.Assert.AreEqual(0, blks[1].GetNumBytes()); hdfs.Delete(bar, true); Path sbar = SnapshotTestHelper.GetSnapshotPath(foo, "s1", bar.GetName()); barNode = fsdir.GetINode(sbar.ToString()).AsFile(); blks = barNode.GetBlocks(); NUnit.Framework.Assert.AreEqual(1, blks.Length); NUnit.Framework.Assert.AreEqual(Blocksize, blks[0].GetNumBytes()); }
public virtual void TestClearQuota() { Path dir = new Path("/TestSnapshot"); hdfs.Mkdirs(dir); hdfs.AllowSnapshot(dir); hdfs.SetQuota(dir, HdfsConstants.QuotaDontSet, HdfsConstants.QuotaDontSet); INodeDirectory dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory(); NUnit.Framework.Assert.IsTrue(dirNode.IsSnapshottable()); NUnit.Framework.Assert.AreEqual(0, dirNode.GetDiffs().AsList().Count); hdfs.SetQuota(dir, HdfsConstants.QuotaDontSet - 1, HdfsConstants.QuotaDontSet - 1 ); dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory(); NUnit.Framework.Assert.IsTrue(dirNode.IsSnapshottable()); NUnit.Framework.Assert.AreEqual(0, dirNode.GetDiffs().AsList().Count); hdfs.SetQuota(dir, HdfsConstants.QuotaReset, HdfsConstants.QuotaReset); dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory(); NUnit.Framework.Assert.IsTrue(dirNode.IsSnapshottable()); NUnit.Framework.Assert.AreEqual(0, dirNode.GetDiffs().AsList().Count); // allow snapshot on dir and create snapshot s1 SnapshotTestHelper.CreateSnapshot(hdfs, dir, "s1"); // clear quota of dir hdfs.SetQuota(dir, HdfsConstants.QuotaReset, HdfsConstants.QuotaReset); // dir should still be a snapshottable directory dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory(); NUnit.Framework.Assert.IsTrue(dirNode.IsSnapshottable()); NUnit.Framework.Assert.AreEqual(1, dirNode.GetDiffs().AsList().Count); SnapshottableDirectoryStatus[] status = hdfs.GetSnapshottableDirListing(); NUnit.Framework.Assert.AreEqual(1, status.Length); NUnit.Framework.Assert.AreEqual(dir, status[0].GetFullPath()); Path subDir = new Path(dir, "sub"); hdfs.Mkdirs(subDir); hdfs.CreateSnapshot(dir, "s2"); Path file = new Path(subDir, "file"); DFSTestUtil.CreateFile(hdfs, file, Blocksize, Replication, seed); hdfs.SetQuota(dir, HdfsConstants.QuotaReset, HdfsConstants.QuotaReset); INode subNode = fsdir.GetINode4Write(subDir.ToString()); NUnit.Framework.Assert.IsTrue(subNode.AsDirectory().IsWithSnapshot()); IList <DirectoryWithSnapshotFeature.DirectoryDiff> diffList = subNode.AsDirectory( ).GetDiffs().AsList(); NUnit.Framework.Assert.AreEqual(1, diffList.Count); Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s2 = dirNode.GetSnapshot (DFSUtil.String2Bytes("s2")); NUnit.Framework.Assert.AreEqual(s2.GetId(), diffList[0].GetSnapshotId()); IList <INode> createdList = diffList[0].GetChildrenDiff().GetList(Diff.ListType.Created ); NUnit.Framework.Assert.AreEqual(1, createdList.Count); NUnit.Framework.Assert.AreSame(fsdir.GetINode4Write(file.ToString()), createdList [0]); }
public override void Run() { try { Sharpen.Thread.Sleep(1000); TestDeleteRace.Log.Info("Deleting" + this.path); FSDirectory fsdir = this._enclosing.cluster.GetNamesystem().dir; INode fileINode = fsdir.GetINode4Write(this.path.ToString()); INodeMap inodeMap = (INodeMap)Whitebox.GetInternalState(fsdir, "inodeMap"); this.fs.Delete(this.path, false); // after deletion, add the inode back to the inodeMap inodeMap.Put(fileINode); TestDeleteRace.Log.Info("Deleted" + this.path); } catch (Exception e) { TestDeleteRace.Log.Info(e); } }
public virtual void TestAddBlockUC() { DistributedFileSystem fs = cluster.GetFileSystem(); Path file1 = new Path("/file1"); DFSTestUtil.CreateFile(fs, file1, Blocksize - 1, Replication, 0L); FSDataOutputStream @out = null; try { // append files without closing the streams @out = fs.Append(file1); string appendContent = "appending-content"; @out.WriteBytes(appendContent); ((DFSOutputStream)@out.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag .UpdateLength)); // restart NN cluster.RestartNameNode(true); FSDirectory fsdir = cluster.GetNamesystem().GetFSDirectory(); INodeFile fileNode = fsdir.GetINode4Write(file1.ToString()).AsFile(); BlockInfoContiguous[] fileBlocks = fileNode.GetBlocks(); NUnit.Framework.Assert.AreEqual(2, fileBlocks.Length); NUnit.Framework.Assert.AreEqual(Blocksize, fileBlocks[0].GetNumBytes()); NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.Complete, fileBlocks [0].GetBlockUCState()); NUnit.Framework.Assert.AreEqual(appendContent.Length - 1, fileBlocks[1].GetNumBytes ()); NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.UnderConstruction , fileBlocks[1].GetBlockUCState()); } finally { if (@out != null) { @out.Close(); } } }
/// <summary>Test if the quota can be correctly updated for create file</summary> /// <exception cref="System.Exception"/> public virtual void TestQuotaUpdateWithFileCreate() { Path foo = new Path(dir, "foo"); Path createdFile = new Path(foo, "created_file.data"); dfs.Mkdirs(foo); dfs.SetQuota(foo, long.MaxValue - 1, long.MaxValue - 1); long fileLen = Blocksize * 2 + Blocksize / 2; DFSTestUtil.CreateFile(dfs, createdFile, Blocksize / 16, fileLen, Blocksize, Replication , seed); INode fnode = fsdir.GetINode4Write(foo.ToString()); NUnit.Framework.Assert.IsTrue(fnode.IsDirectory()); NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet()); QuotaCounts cnt = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed (); NUnit.Framework.Assert.AreEqual(2, cnt.GetNameSpace()); NUnit.Framework.Assert.AreEqual(fileLen * Replication, cnt.GetStorageSpace()); }
public virtual void TestCommitWithInvalidGenStamp() { Path file = new Path("/file"); FSDataOutputStream @out = null; try { @out = dfs.Create(file, (short)1); INodeFile fileNode = dir.GetINode4Write(file.ToString()).AsFile(); ExtendedBlock previous = null; Block newBlock = DFSTestUtil.AddBlockToFile(cluster.GetDataNodes(), dfs, cluster. GetNamesystem(), file.ToString(), fileNode, dfs.GetClient().GetClientName(), previous , 100); Block newBlockClone = new Block(newBlock); previous = new ExtendedBlock(cluster.GetNamesystem().GetBlockPoolId(), newBlockClone ); previous.SetGenerationStamp(123); try { dfs.GetClient().GetNamenode().Complete(file.ToString(), dfs.GetClient().GetClientName (), previous, fileNode.GetId()); NUnit.Framework.Assert.Fail("should throw exception because invalid genStamp"); } catch (IOException e) { NUnit.Framework.Assert.IsTrue(e.ToString().Contains("Commit block with mismatching GS. NN has " + newBlock + ", client submits " + newBlockClone)); } previous = new ExtendedBlock(cluster.GetNamesystem().GetBlockPoolId(), newBlock); bool complete = dfs.GetClient().GetNamenode().Complete(file.ToString(), dfs.GetClient ().GetClientName(), previous, fileNode.GetId()); NUnit.Framework.Assert.IsTrue("should complete successfully", complete); } finally { IOUtils.Cleanup(null, @out); } }
/// <exception cref="System.Exception"/> private void TestTruncate(long newLength, long expectedDiff, long expectedUsage) { // before doing the real truncation, make sure the computation is correct INodesInPath iip = fsdir.GetINodesInPath4Write(file.ToString()); INodeFile fileNode = iip.GetLastINode().AsFile(); fileNode.RecordModification(iip.GetLatestSnapshotId(), true); long diff = fileNode.ComputeQuotaDeltaForTruncate(newLength); NUnit.Framework.Assert.AreEqual(expectedDiff, diff); // do the real truncation dfs.Truncate(file, newLength); // wait for truncate to finish TestFileTruncate.CheckBlockRecovery(file, dfs); INodeDirectory dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory(); long spaceUsed = dirNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed().GetStorageSpace (); long diskUsed = dirNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed().GetTypeSpaces ().Get(StorageType.Disk); NUnit.Framework.Assert.AreEqual(expectedUsage, spaceUsed); NUnit.Framework.Assert.AreEqual(expectedUsage, diskUsed); }
/// <exception cref="System.Exception"/> internal virtual void TestQuotaByStorageTypeWithFileCreateCase(string storagePolicy , StorageType storageType, short replication) { Path foo = new Path(dir, "foo"); Path createdFile1 = new Path(foo, "created_file1.data"); dfs.Mkdirs(foo); // set storage policy on directory "foo" to storagePolicy dfs.SetStoragePolicy(foo, storagePolicy); // set quota by storage type on directory "foo" dfs.SetQuotaByStorageType(foo, storageType, Blocksize * 10); INode fnode = fsdir.GetINode4Write(foo.ToString()); NUnit.Framework.Assert.IsTrue(fnode.IsDirectory()); NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet()); // Create file of size 2 * BLOCKSIZE under directory "foo" long file1Len = Blocksize * 2 + Blocksize / 2; int bufLen = Blocksize / 16; DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, Replication , seed); // Verify space consumed and remaining quota long storageTypeConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed ().GetTypeSpaces().Get(storageType); NUnit.Framework.Assert.AreEqual(file1Len * replication, storageTypeConsumed); }