/// <summary>Test snapshot after file appending</summary> /// <exception cref="System.Exception"/> public virtual void TestSnapshotAfterAppending() { Path file = new Path(dir, "file"); // 1. create snapshot --> create file --> append SnapshotTestHelper.CreateSnapshot(hdfs, dir, "s0"); DFSTestUtil.CreateFile(hdfs, file, Blocksize, Replication, seed); DFSTestUtil.AppendFile(hdfs, file, Blocksize); INodeFile fileNode = (INodeFile)fsdir.GetINode(file.ToString()); // 2. create snapshot --> modify the file --> append hdfs.CreateSnapshot(dir, "s1"); hdfs.SetReplication(file, (short)(Replication - 1)); DFSTestUtil.AppendFile(hdfs, file, Blocksize); // check corresponding inodes fileNode = (INodeFile)fsdir.GetINode(file.ToString()); NUnit.Framework.Assert.AreEqual(Replication - 1, fileNode.GetFileReplication()); NUnit.Framework.Assert.AreEqual(Blocksize * 3, fileNode.ComputeFileSize()); // 3. create snapshot --> append hdfs.CreateSnapshot(dir, "s2"); DFSTestUtil.AppendFile(hdfs, file, Blocksize); // check corresponding inodes fileNode = (INodeFile)fsdir.GetINode(file.ToString()); NUnit.Framework.Assert.AreEqual(Replication - 1, fileNode.GetFileReplication()); NUnit.Framework.Assert.AreEqual(Blocksize * 4, fileNode.ComputeFileSize()); }
/// <summary> /// If some blocks at the end of the block list no longer belongs to /// any inode, collect them and update the block list. /// </summary> public virtual void CollectBlocksAndClear(BlockStoragePolicySuite bsps, INodeFile file, INode.BlocksMapUpdateInfo info, IList <INode> removedINodes) { // check if everything is deleted. if (IsCurrentFileDeleted() && GetDiffs().AsList().IsEmpty()) { file.DestroyAndCollectBlocks(bsps, info, removedINodes); return; } // find max file size. long max; FileDiff diff = GetDiffs().GetLast(); if (IsCurrentFileDeleted()) { max = diff == null ? 0 : diff.GetFileSize(); } else { max = file.ComputeFileSize(); } // Collect blocks that should be deleted FileDiff last = diffs.GetLast(); BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.GetBlocks(); if (snapshotBlocks == null) { file.CollectBlocksBeyondMax(max, info); } else { file.CollectBlocksBeyondSnapshot(snapshotBlocks, info); } }
/// <summary> /// Test snapshot during file appending, before the corresponding /// <see cref="Org.Apache.Hadoop.FS.FSDataOutputStream"/> /// instance closes. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestSnapshotWhileAppending() { Path file = new Path(dir, "file"); DFSTestUtil.CreateFile(hdfs, file, Blocksize, Replication, seed); // 1. append without closing stream --> create snapshot HdfsDataOutputStream @out = AppendFileWithoutClosing(file, Blocksize); @out.Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag.UpdateLength)); SnapshotTestHelper.CreateSnapshot(hdfs, dir, "s0"); @out.Close(); // check: an INodeFileUnderConstructionWithSnapshot should be stored into s0's // deleted list, with size BLOCKSIZE*2 INodeFile fileNode = (INodeFile)fsdir.GetINode(file.ToString()); NUnit.Framework.Assert.AreEqual(Blocksize * 2, fileNode.ComputeFileSize()); INodeDirectory dirNode = fsdir.GetINode(dir.ToString()).AsDirectory(); DirectoryWithSnapshotFeature.DirectoryDiff last = dirNode.GetDiffs().GetLast(); // 2. append without closing stream @out = AppendFileWithoutClosing(file, Blocksize); @out.Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag.UpdateLength)); // re-check nodeInDeleted_S0 dirNode = fsdir.GetINode(dir.ToString()).AsDirectory(); NUnit.Framework.Assert.AreEqual(Blocksize * 2, fileNode.ComputeFileSize(last.GetSnapshotId ())); // 3. take snapshot --> close stream hdfs.CreateSnapshot(dir, "s1"); @out.Close(); // check: an INodeFileUnderConstructionWithSnapshot with size BLOCKSIZE*3 should // have been stored in s1's deleted list fileNode = (INodeFile)fsdir.GetINode(file.ToString()); dirNode = fsdir.GetINode(dir.ToString()).AsDirectory(); last = dirNode.GetDiffs().GetLast(); NUnit.Framework.Assert.IsTrue(fileNode.IsWithSnapshot()); NUnit.Framework.Assert.AreEqual(Blocksize * 3, fileNode.ComputeFileSize(last.GetSnapshotId ())); // 4. modify file --> append without closing stream --> take snapshot --> // close stream hdfs.SetReplication(file, (short)(Replication - 1)); @out = AppendFileWithoutClosing(file, Blocksize); hdfs.CreateSnapshot(dir, "s2"); @out.Close(); // re-check the size of nodeInDeleted_S1 NUnit.Framework.Assert.AreEqual(Blocksize * 3, fileNode.ComputeFileSize(last.GetSnapshotId ())); }
internal virtual bool ChangedBetweenSnapshots(INodeFile file, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot from, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot to) { int[] diffIndexPair = diffs.ChangedBetweenSnapshots(from, to); if (diffIndexPair == null) { return(false); } int earlierDiffIndex = diffIndexPair[0]; int laterDiffIndex = diffIndexPair[1]; IList <FileDiff> diffList = diffs.AsList(); long earlierLength = diffList[earlierDiffIndex].GetFileSize(); long laterLength = laterDiffIndex == diffList.Count ? file.ComputeFileSize(true, false) : diffList[laterDiffIndex].GetFileSize(); if (earlierLength != laterLength) { // file length has been changed return(true); } INodeFileAttributes earlierAttr = null; // check the metadata for (int i = earlierDiffIndex; i < laterDiffIndex; i++) { FileDiff diff = diffList[i]; if (diff.snapshotINode != null) { earlierAttr = diff.snapshotINode; break; } } if (earlierAttr == null) { // no meta-change at all, return false return(false); } INodeFileAttributes laterAttr = diffs.GetSnapshotINode(Math.Max(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .GetSnapshotId(from), Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.GetSnapshotId (to)), file); return(!earlierAttr.MetadataEquals(laterAttr)); }
public virtual QuotaCounts UpdateQuotaAndCollectBlocks(BlockStoragePolicySuite bsps , INodeFile file, FileDiff removed, INode.BlocksMapUpdateInfo collectedBlocks, IList <INode> removedINodes) { long oldStoragespace = file.StoragespaceConsumed(); byte storagePolicyID = file.GetStoragePolicyID(); BlockStoragePolicy bsp = null; EnumCounters <StorageType> typeSpaces = new EnumCounters <StorageType>(typeof(StorageType )); if (storagePolicyID != BlockStoragePolicySuite.IdUnspecified) { bsp = bsps.GetPolicy(file.GetStoragePolicyID()); } if (removed.snapshotINode != null) { short replication = removed.snapshotINode.GetFileReplication(); short currentRepl = file.GetBlockReplication(); if (currentRepl == 0) { long oldFileSizeNoRep = file.ComputeFileSize(true, true); oldStoragespace = oldFileSizeNoRep * replication; if (bsp != null) { IList <StorageType> oldTypeChosen = bsp.ChooseStorageTypes(replication); foreach (StorageType t in oldTypeChosen) { if (t.SupportTypeQuota()) { typeSpaces.Add(t, -oldFileSizeNoRep); } } } } else { if (replication > currentRepl) { long oldFileSizeNoRep = file.StoragespaceConsumedNoReplication(); oldStoragespace = oldFileSizeNoRep * replication; if (bsp != null) { IList <StorageType> oldTypeChosen = bsp.ChooseStorageTypes(replication); foreach (StorageType t in oldTypeChosen) { if (t.SupportTypeQuota()) { typeSpaces.Add(t, -oldFileSizeNoRep); } } IList <StorageType> newTypeChosen = bsp.ChooseStorageTypes(currentRepl); foreach (StorageType t_1 in newTypeChosen) { if (t_1.SupportTypeQuota()) { typeSpaces.Add(t_1, oldFileSizeNoRep); } } } } } AclFeature aclFeature = removed.GetSnapshotINode().GetAclFeature(); if (aclFeature != null) { AclStorage.RemoveAclFeature(aclFeature); } } GetDiffs().CombineAndCollectSnapshotBlocks(bsps, file, removed, collectedBlocks, removedINodes); long ssDelta = oldStoragespace - file.StoragespaceConsumed(); return(new QuotaCounts.Builder().StorageSpace(ssDelta).TypeSpaces(typeSpaces).Build ()); }