public virtual void TestClearQuota() { Path dir = new Path("/TestSnapshot"); hdfs.Mkdirs(dir); hdfs.AllowSnapshot(dir); hdfs.SetQuota(dir, HdfsConstants.QuotaDontSet, HdfsConstants.QuotaDontSet); INodeDirectory dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory(); NUnit.Framework.Assert.IsTrue(dirNode.IsSnapshottable()); NUnit.Framework.Assert.AreEqual(0, dirNode.GetDiffs().AsList().Count); hdfs.SetQuota(dir, HdfsConstants.QuotaDontSet - 1, HdfsConstants.QuotaDontSet - 1 ); dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory(); NUnit.Framework.Assert.IsTrue(dirNode.IsSnapshottable()); NUnit.Framework.Assert.AreEqual(0, dirNode.GetDiffs().AsList().Count); hdfs.SetQuota(dir, HdfsConstants.QuotaReset, HdfsConstants.QuotaReset); dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory(); NUnit.Framework.Assert.IsTrue(dirNode.IsSnapshottable()); NUnit.Framework.Assert.AreEqual(0, dirNode.GetDiffs().AsList().Count); // allow snapshot on dir and create snapshot s1 SnapshotTestHelper.CreateSnapshot(hdfs, dir, "s1"); // clear quota of dir hdfs.SetQuota(dir, HdfsConstants.QuotaReset, HdfsConstants.QuotaReset); // dir should still be a snapshottable directory dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory(); NUnit.Framework.Assert.IsTrue(dirNode.IsSnapshottable()); NUnit.Framework.Assert.AreEqual(1, dirNode.GetDiffs().AsList().Count); SnapshottableDirectoryStatus[] status = hdfs.GetSnapshottableDirListing(); NUnit.Framework.Assert.AreEqual(1, status.Length); NUnit.Framework.Assert.AreEqual(dir, status[0].GetFullPath()); Path subDir = new Path(dir, "sub"); hdfs.Mkdirs(subDir); hdfs.CreateSnapshot(dir, "s2"); Path file = new Path(subDir, "file"); DFSTestUtil.CreateFile(hdfs, file, Blocksize, Replication, seed); hdfs.SetQuota(dir, HdfsConstants.QuotaReset, HdfsConstants.QuotaReset); INode subNode = fsdir.GetINode4Write(subDir.ToString()); NUnit.Framework.Assert.IsTrue(subNode.AsDirectory().IsWithSnapshot()); IList <DirectoryWithSnapshotFeature.DirectoryDiff> diffList = subNode.AsDirectory( ).GetDiffs().AsList(); NUnit.Framework.Assert.AreEqual(1, diffList.Count); Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s2 = dirNode.GetSnapshot (DFSUtil.String2Bytes("s2")); NUnit.Framework.Assert.AreEqual(s2.GetId(), diffList[0].GetSnapshotId()); IList <INode> createdList = diffList[0].GetChildrenDiff().GetList(Diff.ListType.Created ); NUnit.Framework.Assert.AreEqual(1, createdList.Count); NUnit.Framework.Assert.AreSame(fsdir.GetINode4Write(file.ToString()), createdList [0]); }
/// <summary>Load a node stored in the created list from fsimage.</summary> /// <param name="createdNodeName">The name of the created node.</param> /// <param name="parent">The directory that the created list belongs to.</param> /// <returns>The created node.</returns> /// <exception cref="System.IO.IOException"/> public static INode LoadCreated(byte[] createdNodeName, INodeDirectory parent) { // the INode in the created list should be a reference to another INode // in posterior SnapshotDiffs or one of the current children foreach (DirectoryWithSnapshotFeature.DirectoryDiff postDiff in parent.GetDiffs()) { INode d = postDiff.GetChildrenDiff().Search(Diff.ListType.Deleted, createdNodeName ); if (d != null) { return(d); } } // else go to the next SnapshotDiff // use the current child INode currentChild = parent.GetChild(createdNodeName, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId); if (currentChild == null) { throw new IOException("Cannot find an INode associated with the INode " + DFSUtil .Bytes2String(createdNodeName) + " in created list while loading FSImage."); } return(currentChild); }
/// <summary> /// Test snapshot during file appending, before the corresponding /// <see cref="Org.Apache.Hadoop.FS.FSDataOutputStream"/> /// instance closes. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestSnapshotWhileAppending() { Path file = new Path(dir, "file"); DFSTestUtil.CreateFile(hdfs, file, Blocksize, Replication, seed); // 1. append without closing stream --> create snapshot HdfsDataOutputStream @out = AppendFileWithoutClosing(file, Blocksize); @out.Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag.UpdateLength)); SnapshotTestHelper.CreateSnapshot(hdfs, dir, "s0"); @out.Close(); // check: an INodeFileUnderConstructionWithSnapshot should be stored into s0's // deleted list, with size BLOCKSIZE*2 INodeFile fileNode = (INodeFile)fsdir.GetINode(file.ToString()); NUnit.Framework.Assert.AreEqual(Blocksize * 2, fileNode.ComputeFileSize()); INodeDirectory dirNode = fsdir.GetINode(dir.ToString()).AsDirectory(); DirectoryWithSnapshotFeature.DirectoryDiff last = dirNode.GetDiffs().GetLast(); // 2. append without closing stream @out = AppendFileWithoutClosing(file, Blocksize); @out.Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag.UpdateLength)); // re-check nodeInDeleted_S0 dirNode = fsdir.GetINode(dir.ToString()).AsDirectory(); NUnit.Framework.Assert.AreEqual(Blocksize * 2, fileNode.ComputeFileSize(last.GetSnapshotId ())); // 3. take snapshot --> close stream hdfs.CreateSnapshot(dir, "s1"); @out.Close(); // check: an INodeFileUnderConstructionWithSnapshot with size BLOCKSIZE*3 should // have been stored in s1's deleted list fileNode = (INodeFile)fsdir.GetINode(file.ToString()); dirNode = fsdir.GetINode(dir.ToString()).AsDirectory(); last = dirNode.GetDiffs().GetLast(); NUnit.Framework.Assert.IsTrue(fileNode.IsWithSnapshot()); NUnit.Framework.Assert.AreEqual(Blocksize * 3, fileNode.ComputeFileSize(last.GetSnapshotId ())); // 4. modify file --> append without closing stream --> take snapshot --> // close stream hdfs.SetReplication(file, (short)(Replication - 1)); @out = AppendFileWithoutClosing(file, Blocksize); hdfs.CreateSnapshot(dir, "s2"); @out.Close(); // re-check the size of nodeInDeleted_S1 NUnit.Framework.Assert.AreEqual(Blocksize * 3, fileNode.ComputeFileSize(last.GetSnapshotId ())); }
/// <summary> /// Load the /// <see cref="Org.Apache.Hadoop.Hdfs.Tools.Snapshot.SnapshotDiff"/> /// list for the INodeDirectoryWithSnapshot /// directory. /// </summary> /// <param name="dir">The snapshottable directory for loading.</param> /// <param name="in"> /// The /// <see cref="System.IO.DataInput"/> /// instance to read. /// </param> /// <param name="loader">The loader</param> /// <exception cref="System.IO.IOException"/> public static void LoadDirectoryDiffList(INodeDirectory dir, DataInput @in, FSImageFormat.Loader loader) { int size = @in.ReadInt(); if (dir.IsWithSnapshot()) { DirectoryWithSnapshotFeature.DirectoryDiffList diffs = dir.GetDiffs(); for (int i = 0; i < size; i++) { diffs.AddFirst(LoadDirectoryDiff(dir, @in, loader)); } } }
/// <summary> /// Find the latest snapshot that 1) covers the given inode (which means the /// snapshot was either taken on the inode or taken on an ancestor of the /// inode), and 2) was taken before the given snapshot (if the given snapshot /// is not null). /// </summary> /// <param name="inode">the given inode that the returned snapshot needs to cover</param> /// <param name="anchor">the returned snapshot should be taken before this given id.</param> /// <returns> /// id of the latest snapshot that covers the given inode and was taken /// before the the given snapshot (if it is not null). /// </returns> public static int FindLatestSnapshot(INode inode, int anchor) { int latest = NoSnapshotId; for (; inode != null; inode = inode.GetParent()) { if (inode.IsDirectory()) { INodeDirectory dir = inode.AsDirectory(); if (dir.IsWithSnapshot()) { latest = dir.GetDiffs().UpdatePrior(anchor, latest); } } } return(latest); }
/// <summary> /// Load /// <see cref="DirectoryDiff"/> /// from fsimage. /// </summary> /// <param name="parent">The directory that the SnapshotDiff belongs to.</param> /// <param name="in"> /// The /// <see cref="System.IO.DataInput"/> /// instance to read. /// </param> /// <param name="loader"> /// The /// <see cref="Loader"/> /// instance that this loading procedure is /// using. /// </param> /// <returns> /// A /// <see cref="DirectoryDiff"/> /// . /// </returns> /// <exception cref="System.IO.IOException"/> private static DirectoryWithSnapshotFeature.DirectoryDiff LoadDirectoryDiff(INodeDirectory parent, DataInput @in, FSImageFormat.Loader loader) { // 1. Read the full path of the Snapshot root to identify the Snapshot Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot snapshot = loader.GetSnapshot (@in); // 2. Load DirectoryDiff#childrenSize int childrenSize = @in.ReadInt(); // 3. Load DirectoryDiff#snapshotINode INodeDirectoryAttributes snapshotINode = LoadSnapshotINodeInDirectoryDiff(snapshot , @in, loader); // 4. Load the created list in SnapshotDiff#Diff IList <INode> createdList = LoadCreatedList(parent, @in); // 5. Load the deleted list in SnapshotDiff#Diff IList <INode> deletedList = LoadDeletedList(parent, createdList, @in, loader); // 6. Compose the SnapshotDiff IList <DirectoryWithSnapshotFeature.DirectoryDiff> diffs = parent.GetDiffs().AsList (); DirectoryWithSnapshotFeature.DirectoryDiff sdiff = new DirectoryWithSnapshotFeature.DirectoryDiff (snapshot.GetId(), snapshotINode, diffs.IsEmpty() ? null : diffs[0], childrenSize , createdList, deletedList, snapshotINode == snapshot.GetRoot()); return(sdiff); }
/// <summary>Check the correctness of snapshot list within snapshottable dir</summary> private void CheckSnapshotList(INodeDirectory srcRoot, string[] sortedNames, string [] names) { NUnit.Framework.Assert.IsTrue(srcRoot.IsSnapshottable()); ReadOnlyList <Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot> listByName = srcRoot.GetDirectorySnapshottableFeature().GetSnapshotList(); NUnit.Framework.Assert.AreEqual(sortedNames.Length, listByName.Size()); for (int i = 0; i < listByName.Size(); i++) { NUnit.Framework.Assert.AreEqual(sortedNames[i], listByName.Get(i).GetRoot().GetLocalName ()); } IList <DirectoryWithSnapshotFeature.DirectoryDiff> listByTime = srcRoot.GetDiffs() .AsList(); NUnit.Framework.Assert.AreEqual(names.Length, listByTime.Count); for (int i_1 = 0; i_1 < listByTime.Count; i_1++) { Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s = srcRoot.GetDirectorySnapshottableFeature ().GetSnapshotById(listByTime[i_1].GetSnapshotId()); NUnit.Framework.Assert.AreEqual(names[i_1], s.GetRoot().GetLocalName()); } }
/// <summary>Load DirectoryDiff list for a directory with snapshot feature</summary> /// <exception cref="System.IO.IOException"/> private void LoadDirectoryDiffList(InputStream @in, INodeDirectory dir, int size, IList <INodeReference> refList) { if (!dir.IsWithSnapshot()) { dir.AddSnapshotFeature(null); } DirectoryWithSnapshotFeature.DirectoryDiffList diffs = dir.GetDiffs(); FSImageFormatProtobuf.LoaderContext state = parent.GetLoaderContext(); for (int i = 0; i < size; i++) { // load a directory diff FsImageProto.SnapshotDiffSection.DirectoryDiff diffInPb = FsImageProto.SnapshotDiffSection.DirectoryDiff .ParseDelimitedFrom(@in); int snapshotId = diffInPb.GetSnapshotId(); Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot snapshot = snapshotMap[snapshotId ]; int childrenSize = diffInPb.GetChildrenSize(); bool useRoot = diffInPb.GetIsSnapshotRoot(); INodeDirectoryAttributes copy = null; if (useRoot) { copy = snapshot.GetRoot(); } else { if (diffInPb.HasSnapshotCopy()) { FsImageProto.INodeSection.INodeDirectory dirCopyInPb = diffInPb.GetSnapshotCopy(); byte[] name = diffInPb.GetName().ToByteArray(); PermissionStatus permission = FSImageFormatPBINode.Loader.LoadPermission(dirCopyInPb .GetPermission(), state.GetStringTable()); AclFeature acl = null; if (dirCopyInPb.HasAcl()) { int[] entries = AclEntryStatusFormat.ToInt(FSImageFormatPBINode.Loader.LoadAclEntries (dirCopyInPb.GetAcl(), state.GetStringTable())); acl = new AclFeature(entries); } XAttrFeature xAttrs = null; if (dirCopyInPb.HasXAttrs()) { xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.LoadXAttrs(dirCopyInPb.GetXAttrs (), state.GetStringTable())); } long modTime = dirCopyInPb.GetModificationTime(); bool noQuota = dirCopyInPb.GetNsQuota() == -1 && dirCopyInPb.GetDsQuota() == -1 && (!dirCopyInPb.HasTypeQuotas()); if (noQuota) { copy = new INodeDirectoryAttributes.SnapshotCopy(name, permission, acl, modTime, xAttrs); } else { EnumCounters <StorageType> typeQuotas = null; if (dirCopyInPb.HasTypeQuotas()) { ImmutableList <QuotaByStorageTypeEntry> qes = FSImageFormatPBINode.Loader.LoadQuotaByStorageTypeEntries (dirCopyInPb.GetTypeQuotas()); typeQuotas = new EnumCounters <StorageType>(typeof(StorageType), HdfsConstants.QuotaReset ); foreach (QuotaByStorageTypeEntry qe in qes) { if (qe.GetQuota() >= 0 && qe.GetStorageType() != null && qe.GetStorageType().SupportTypeQuota ()) { typeQuotas.Set(qe.GetStorageType(), qe.GetQuota()); } } } copy = new INodeDirectoryAttributes.CopyWithQuota(name, permission, acl, modTime, dirCopyInPb.GetNsQuota(), dirCopyInPb.GetDsQuota(), typeQuotas, xAttrs); } } } // load created list IList <INode> clist = LoadCreatedList(@in, dir, diffInPb.GetCreatedListSize()); // load deleted list IList <INode> dlist = LoadDeletedList(refList, @in, dir, diffInPb.GetDeletedINodeList (), diffInPb.GetDeletedINodeRefList()); // create the directory diff DirectoryWithSnapshotFeature.DirectoryDiff diff = new DirectoryWithSnapshotFeature.DirectoryDiff (snapshotId, copy, null, childrenSize, clist, dlist, useRoot); diffs.AddFirst(diff); } }
/// <exception cref="System.IO.IOException"/> public static void SaveDirectoryDiffList(INodeDirectory dir, DataOutput @out, SnapshotFSImageFormat.ReferenceMap referenceMap) { SaveINodeDiffs(dir.GetDiffs(), @out, referenceMap); }