/// <summary>Add a snapshot.</summary> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.SnapshotException"/> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> public virtual Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot AddSnapshot (INodeDirectory snapshotRoot, int id, string name) { //check snapshot quota int n = GetNumSnapshots(); if (n + 1 > snapshotQuota) { throw new SnapshotException("Failed to add snapshot: there are already " + n + " snapshot(s) and the snapshot quota is " + snapshotQuota); } Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s = new Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot (id, name, snapshotRoot); byte[] nameBytes = s.GetRoot().GetLocalNameBytes(); int i = SearchSnapshot(nameBytes); if (i >= 0) { throw new SnapshotException("Failed to add snapshot: there is already a " + "snapshot with the same name \"" + Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.GetSnapshotName(s) + "\"."); } DirectoryWithSnapshotFeature.DirectoryDiff d = GetDiffs().AddDiff(id, snapshotRoot ); d.SetSnapshotRoot(s.GetRoot()); snapshotsByNames.Add(-i - 1, s); // set modification time long now = Time.Now(); snapshotRoot.UpdateModificationTime(now, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId); s.GetRoot().SetModificationTime(now, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId); return(s); }
internal int[] ChangedBetweenSnapshots(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot from, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot to) { Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot earlier = from; Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot later = to; if (Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.IdComparator.Compare (from, to) > 0) { earlier = to; later = from; } int size = diffs.Count; int earlierDiffIndex = Sharpen.Collections.BinarySearch(diffs, earlier.GetId()); int laterDiffIndex = later == null ? size : Sharpen.Collections.BinarySearch(diffs , later.GetId()); if (-earlierDiffIndex - 1 == size) { // if the earlierSnapshot is after the latest SnapshotDiff stored in // diffs, no modification happened after the earlierSnapshot return(null); } if (laterDiffIndex == -1 || laterDiffIndex == 0) { // if the laterSnapshot is the earliest SnapshotDiff stored in diffs, or // before it, no modification happened before the laterSnapshot return(null); } earlierDiffIndex = earlierDiffIndex < 0 ? (-earlierDiffIndex - 1) : earlierDiffIndex; laterDiffIndex = laterDiffIndex < 0 ? (-laterDiffIndex - 1) : laterDiffIndex; return(new int[] { earlierDiffIndex, laterDiffIndex }); }
/// <summary> /// Test /// <see cref="Snapshot.IdComparator"/> /// . /// </summary> public virtual void TestIdCmp() { PermissionStatus perm = PermissionStatus.CreateImmutable("user", "group", FsPermission .CreateImmutable((short)0)); INodeDirectory snapshottable = new INodeDirectory(0, DFSUtil.String2Bytes("foo"), perm, 0L); snapshottable.AddSnapshottableFeature(); Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot[] snapshots = new Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot [] { new Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot(1, "s1", snapshottable ), new Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot(1, "s1", snapshottable ), new Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot(2, "s2", snapshottable ), new Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot(2, "s2", snapshottable ) }; NUnit.Framework.Assert.AreEqual(0, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .IdComparator.Compare(null, null)); foreach (Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s in snapshots) { NUnit.Framework.Assert.IsTrue(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .IdComparator.Compare(null, s) > 0); NUnit.Framework.Assert.IsTrue(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .IdComparator.Compare(s, null) < 0); foreach (Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot t in snapshots) { int expected = string.CompareOrdinal(s.GetRoot().GetLocalName(), t.GetRoot().GetLocalName ()); int computed = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.IdComparator .Compare(s, t); NUnit.Framework.Assert.AreEqual(expected > 0, computed > 0); NUnit.Framework.Assert.AreEqual(expected == 0, computed == 0); NUnit.Framework.Assert.AreEqual(expected < 0, computed < 0); } } }
internal SnapshotDiffInfo(INodeDirectory snapshotRoot, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot start, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot end) { Preconditions.CheckArgument(snapshotRoot.IsSnapshottable()); this.snapshotRoot = snapshotRoot; this.from = start; this.to = end; }
/// <summary>Compute the difference between Snapshots.</summary> /// <param name="fromSnapshot"> /// Start point of the diff computation. Null indicates /// current tree. /// </param> /// <param name="toSnapshot"> /// End point of the diff computation. Null indicates current /// tree. /// </param> /// <param name="diff"> /// Used to capture the changes happening to the children. Note /// that the diff still represents (later_snapshot - earlier_snapshot) /// although toSnapshot can be before fromSnapshot. /// </param> /// <param name="currentINode"> /// The /// <see cref="Org.Apache.Hadoop.Hdfs.Server.Namenode.INodeDirectory"/> /// this feature belongs to. /// </param> /// <returns>Whether changes happened between the startSnapshot and endSnaphsot.</returns> internal virtual bool ComputeDiffBetweenSnapshots(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot fromSnapshot, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot toSnapshot , DirectoryWithSnapshotFeature.ChildrenDiff diff, INodeDirectory currentINode) { int[] diffIndexPair = diffs.ChangedBetweenSnapshots(fromSnapshot, toSnapshot); if (diffIndexPair == null) { return(false); } int earlierDiffIndex = diffIndexPair[0]; int laterDiffIndex = diffIndexPair[1]; bool dirMetadataChanged = false; INodeDirectoryAttributes dirCopy = null; IList <DirectoryWithSnapshotFeature.DirectoryDiff> difflist = diffs.AsList(); for (int i = earlierDiffIndex; i < laterDiffIndex; i++) { DirectoryWithSnapshotFeature.DirectoryDiff sdiff = difflist[i]; diff.CombinePosterior(sdiff.diff, null); if (!dirMetadataChanged && sdiff.snapshotINode != null) { if (dirCopy == null) { dirCopy = sdiff.snapshotINode; } else { if (!dirCopy.MetadataEquals(sdiff.snapshotINode)) { dirMetadataChanged = true; } } } } if (!diff.IsEmpty() || dirMetadataChanged) { return(true); } else { if (dirCopy != null) { for (int i_1 = laterDiffIndex; i_1 < difflist.Count; i_1++) { if (!dirCopy.MetadataEquals(difflist[i_1].snapshotINode)) { return(true); } } return(!dirCopy.MetadataEquals(currentINode)); } else { return(false); } } }
public virtual void TestClearQuota() { Path dir = new Path("/TestSnapshot"); hdfs.Mkdirs(dir); hdfs.AllowSnapshot(dir); hdfs.SetQuota(dir, HdfsConstants.QuotaDontSet, HdfsConstants.QuotaDontSet); INodeDirectory dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory(); NUnit.Framework.Assert.IsTrue(dirNode.IsSnapshottable()); NUnit.Framework.Assert.AreEqual(0, dirNode.GetDiffs().AsList().Count); hdfs.SetQuota(dir, HdfsConstants.QuotaDontSet - 1, HdfsConstants.QuotaDontSet - 1 ); dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory(); NUnit.Framework.Assert.IsTrue(dirNode.IsSnapshottable()); NUnit.Framework.Assert.AreEqual(0, dirNode.GetDiffs().AsList().Count); hdfs.SetQuota(dir, HdfsConstants.QuotaReset, HdfsConstants.QuotaReset); dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory(); NUnit.Framework.Assert.IsTrue(dirNode.IsSnapshottable()); NUnit.Framework.Assert.AreEqual(0, dirNode.GetDiffs().AsList().Count); // allow snapshot on dir and create snapshot s1 SnapshotTestHelper.CreateSnapshot(hdfs, dir, "s1"); // clear quota of dir hdfs.SetQuota(dir, HdfsConstants.QuotaReset, HdfsConstants.QuotaReset); // dir should still be a snapshottable directory dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory(); NUnit.Framework.Assert.IsTrue(dirNode.IsSnapshottable()); NUnit.Framework.Assert.AreEqual(1, dirNode.GetDiffs().AsList().Count); SnapshottableDirectoryStatus[] status = hdfs.GetSnapshottableDirListing(); NUnit.Framework.Assert.AreEqual(1, status.Length); NUnit.Framework.Assert.AreEqual(dir, status[0].GetFullPath()); Path subDir = new Path(dir, "sub"); hdfs.Mkdirs(subDir); hdfs.CreateSnapshot(dir, "s2"); Path file = new Path(subDir, "file"); DFSTestUtil.CreateFile(hdfs, file, Blocksize, Replication, seed); hdfs.SetQuota(dir, HdfsConstants.QuotaReset, HdfsConstants.QuotaReset); INode subNode = fsdir.GetINode4Write(subDir.ToString()); NUnit.Framework.Assert.IsTrue(subNode.AsDirectory().IsWithSnapshot()); IList <DirectoryWithSnapshotFeature.DirectoryDiff> diffList = subNode.AsDirectory( ).GetDiffs().AsList(); NUnit.Framework.Assert.AreEqual(1, diffList.Count); Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s2 = dirNode.GetSnapshot (DFSUtil.String2Bytes("s2")); NUnit.Framework.Assert.AreEqual(s2.GetId(), diffList[0].GetSnapshotId()); IList <INode> createdList = diffList[0].GetChildrenDiff().GetList(Diff.ListType.Created ); NUnit.Framework.Assert.AreEqual(1, createdList.Count); NUnit.Framework.Assert.AreSame(fsdir.GetINode4Write(file.ToString()), createdList [0]); }
/// <exception cref="System.IO.IOException"/> private static FileDiff LoadFileDiff(FileDiff posterior, DataInput @in, FSImageFormat.Loader loader) { // 1. Read the id of the Snapshot root to identify the Snapshot Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot snapshot = loader.GetSnapshot (@in); // 2. Load file size long fileSize = @in.ReadLong(); // 3. Load snapshotINode INodeFileAttributes snapshotINode = @in.ReadBoolean() ? loader.LoadINodeFileAttributes (@in) : null; return(new FileDiff(snapshot.GetId(), snapshotINode, posterior, fileSize)); }
/// <summary> /// Load the snapshotINode field of /// <see cref="AbstractINodeDiff{N, A, D}"/> /// . /// </summary> /// <param name="snapshot"> /// The Snapshot associated with the /// <see cref="AbstractINodeDiff{N, A, D}"/> /// . /// </param> /// <param name="in"> /// The /// <see cref="System.IO.DataInput"/> /// to read. /// </param> /// <param name="loader"> /// The /// <see cref="Loader"/> /// instance that this loading procedure is /// using. /// </param> /// <returns>The snapshotINode.</returns> /// <exception cref="System.IO.IOException"/> private static INodeDirectoryAttributes LoadSnapshotINodeInDirectoryDiff(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot snapshot, DataInput @in, FSImageFormat.Loader loader) { // read the boolean indicating whether snapshotINode == Snapshot.Root bool useRoot = @in.ReadBoolean(); if (useRoot) { return(snapshot.GetRoot()); } else { // another boolean is used to indicate whether snapshotINode is non-null return(@in.ReadBoolean() ? loader.LoadINodeDirectoryAttributes(@in) : null); } }
/// <summary>Find the snapshot matching the given name.</summary> /// <param name="snapshotRoot">The directory where snapshots were taken.</param> /// <param name="snapshotName">The name of the snapshot.</param> /// <returns>The corresponding snapshot. Null if snapshotName is null or empty.</returns> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.SnapshotException"> /// If snapshotName is not null or empty, but there /// is no snapshot matching the name. /// </exception> private Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot GetSnapshotByName (INodeDirectory snapshotRoot, string snapshotName) { Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s = null; if (snapshotName != null && !snapshotName.IsEmpty()) { int index = SearchSnapshot(DFSUtil.String2Bytes(snapshotName)); if (index < 0) { throw new SnapshotException("Cannot find the snapshot of directory " + snapshotRoot .GetFullPathName() + " with name " + snapshotName); } s = snapshotsByNames[index]; } return(s); }
> Read(DataInput @in, FSImageFormat.Loader loader) { snapshotCounter = @in.ReadInt(); numSnapshots.Set(@in.ReadInt()); // read snapshots IDictionary <int, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot> snapshotMap = new Dictionary <int, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot> (); for (int i = 0; i < numSnapshots.Get(); i++) { Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .Read(@in, loader); snapshotMap[s.GetId()] = s; } return(snapshotMap); }
/// <summary> /// Compute the difference between two snapshots (or a snapshot and the current /// directory) of the directory. /// </summary> /// <param name="from"> /// The name of the start point of the comparison. Null indicating /// the current tree. /// </param> /// <param name="to">The name of the end point. Null indicating the current tree.</param> /// <returns>The difference between the start/end points.</returns> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.SnapshotException"> /// If there is no snapshot matching the starting /// point, or if endSnapshotName is not null but cannot be identified /// as a previous snapshot. /// </exception> internal virtual SnapshotDiffInfo ComputeDiff(INodeDirectory snapshotRoot, string from, string to) { Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot fromSnapshot = GetSnapshotByName (snapshotRoot, from); Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot toSnapshot = GetSnapshotByName (snapshotRoot, to); // if the start point is equal to the end point, return null if (from.Equals(to)) { return(null); } SnapshotDiffInfo diffs = new SnapshotDiffInfo(snapshotRoot, fromSnapshot, toSnapshot ); ComputeDiffRecursively(snapshotRoot, snapshotRoot, new AList <byte[]>(), diffs); return(diffs); }
/// <exception cref="System.IO.IOException"/> private void LoadSnapshots(InputStream @in, int size) { for (int i = 0; i < size; i++) { FsImageProto.SnapshotSection.Snapshot pbs = FsImageProto.SnapshotSection.Snapshot .ParseDelimitedFrom(@in); INodeDirectory root = FSImageFormatPBINode.Loader.LoadINodeDirectory(pbs.GetRoot( ), parent.GetLoaderContext()); int sid = pbs.GetSnapshotId(); INodeDirectory parent = fsDir.GetInode(root.GetId()).AsDirectory(); Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot snapshot = new Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot (sid, root, parent); // add the snapshot to parent, since we follow the sequence of // snapshotsByNames when saving, we do not need to sort when loading parent.GetDirectorySnapshottableFeature().AddSnapshot(snapshot); snapshotMap[sid] = snapshot; } }
/// <summary>Load snapshots and snapshotQuota for a Snapshottable directory.</summary> /// <param name="snapshottableParent">The snapshottable directory for loading.</param> /// <param name="numSnapshots">The number of snapshots that the directory has.</param> /// <param name="loader">The loader</param> /// <exception cref="System.IO.IOException"/> public static void LoadSnapshotList(INodeDirectory snapshottableParent, int numSnapshots , DataInput @in, FSImageFormat.Loader loader) { DirectorySnapshottableFeature sf = snapshottableParent.GetDirectorySnapshottableFeature (); Preconditions.CheckArgument(sf != null); for (int i = 0; i < numSnapshots; i++) { // read snapshots Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s = loader.GetSnapshot(@in ); s.GetRoot().SetParent(snapshottableParent); sf.AddSnapshot(s); } int snapshotQuota = @in.ReadInt(); snapshottableParent.SetSnapshotQuota(snapshotQuota); }
internal virtual bool ChangedBetweenSnapshots(INodeFile file, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot from, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot to) { int[] diffIndexPair = diffs.ChangedBetweenSnapshots(from, to); if (diffIndexPair == null) { return(false); } int earlierDiffIndex = diffIndexPair[0]; int laterDiffIndex = diffIndexPair[1]; IList <FileDiff> diffList = diffs.AsList(); long earlierLength = diffList[earlierDiffIndex].GetFileSize(); long laterLength = laterDiffIndex == diffList.Count ? file.ComputeFileSize(true, false) : diffList[laterDiffIndex].GetFileSize(); if (earlierLength != laterLength) { // file length has been changed return(true); } INodeFileAttributes earlierAttr = null; // check the metadata for (int i = earlierDiffIndex; i < laterDiffIndex; i++) { FileDiff diff = diffList[i]; if (diff.snapshotINode != null) { earlierAttr = diff.snapshotINode; break; } } if (earlierAttr == null) { // no meta-change at all, return false return(false); } INodeFileAttributes laterAttr = diffs.GetSnapshotINode(Math.Max(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .GetSnapshotId(from), Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.GetSnapshotId (to)), file); return(!earlierAttr.MetadataEquals(laterAttr)); }
/// <summary>Rename a snapshot</summary> /// <param name="path"> /// The directory path where the snapshot was taken. Used for /// generating exception message. /// </param> /// <param name="oldName">Old name of the snapshot</param> /// <param name="newName">New name the snapshot will be renamed to</param> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.SnapshotException"> /// Throw SnapshotException when either the snapshot with the old /// name does not exist or a snapshot with the new name already /// exists /// </exception> public virtual void RenameSnapshot(string path, string oldName, string newName) { if (newName.Equals(oldName)) { return; } int indexOfOld = SearchSnapshot(DFSUtil.String2Bytes(oldName)); if (indexOfOld < 0) { throw new SnapshotException("The snapshot " + oldName + " does not exist for directory " + path); } else { byte[] newNameBytes = DFSUtil.String2Bytes(newName); int indexOfNew = SearchSnapshot(newNameBytes); if (indexOfNew >= 0) { throw new SnapshotException("The snapshot " + newName + " already exists for directory " + path); } // remove the one with old name from snapshotsByNames Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot snapshot = snapshotsByNames .Remove(indexOfOld); INodeDirectory ssRoot = snapshot.GetRoot(); ssRoot.SetLocalName(newNameBytes); indexOfNew = -indexOfNew - 1; if (indexOfNew <= indexOfOld) { snapshotsByNames.Add(indexOfNew, snapshot); } else { // indexOfNew > indexOfOld snapshotsByNames.Add(indexOfNew - 1, snapshot); } } }
/// <summary> /// Remove the snapshot with the given name from /// <see cref="snapshotsByNames"/> /// , /// and delete all the corresponding DirectoryDiff. /// </summary> /// <param name="snapshotRoot">The directory where we take snapshots</param> /// <param name="snapshotName">The name of the snapshot to be removed</param> /// <param name="collectedBlocks">Used to collect information to update blocksMap</param> /// <returns> /// The removed snapshot. Null if no snapshot with the given name /// exists. /// </returns> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.SnapshotException"/> public virtual Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot RemoveSnapshot (BlockStoragePolicySuite bsps, INodeDirectory snapshotRoot, string snapshotName, INode.BlocksMapUpdateInfo collectedBlocks, IList <INode> removedINodes) { int i = SearchSnapshot(DFSUtil.String2Bytes(snapshotName)); if (i < 0) { throw new SnapshotException("Cannot delete snapshot " + snapshotName + " from path " + snapshotRoot.GetFullPathName() + ": the snapshot does not exist."); } else { Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot snapshot = snapshotsByNames [i]; int prior = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.FindLatestSnapshot (snapshotRoot, snapshot.GetId()); try { QuotaCounts counts = snapshotRoot.CleanSubtree(bsps, snapshot.GetId(), prior, collectedBlocks , removedINodes); INodeDirectory parent = snapshotRoot.GetParent(); if (parent != null) { // there will not be any WithName node corresponding to the deleted // snapshot, thus only update the quota usage in the current tree parent.AddSpaceConsumed(counts.Negation(), true); } } catch (QuotaExceededException e) { INode.Log.Error("BUG: removeSnapshot increases namespace usage.", e); } // remove from snapshotsByNames after successfully cleaning the subtree snapshotsByNames.Remove(i); return(snapshot); } }
/// <summary> /// Load /// <see cref="DirectoryDiff"/> /// from fsimage. /// </summary> /// <param name="parent">The directory that the SnapshotDiff belongs to.</param> /// <param name="in"> /// The /// <see cref="System.IO.DataInput"/> /// instance to read. /// </param> /// <param name="loader"> /// The /// <see cref="Loader"/> /// instance that this loading procedure is /// using. /// </param> /// <returns> /// A /// <see cref="DirectoryDiff"/> /// . /// </returns> /// <exception cref="System.IO.IOException"/> private static DirectoryWithSnapshotFeature.DirectoryDiff LoadDirectoryDiff(INodeDirectory parent, DataInput @in, FSImageFormat.Loader loader) { // 1. Read the full path of the Snapshot root to identify the Snapshot Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot snapshot = loader.GetSnapshot (@in); // 2. Load DirectoryDiff#childrenSize int childrenSize = @in.ReadInt(); // 3. Load DirectoryDiff#snapshotINode INodeDirectoryAttributes snapshotINode = LoadSnapshotINodeInDirectoryDiff(snapshot , @in, loader); // 4. Load the created list in SnapshotDiff#Diff IList <INode> createdList = LoadCreatedList(parent, @in); // 5. Load the deleted list in SnapshotDiff#Diff IList <INode> deletedList = LoadDeletedList(parent, createdList, @in, loader); // 6. Compose the SnapshotDiff IList <DirectoryWithSnapshotFeature.DirectoryDiff> diffs = parent.GetDiffs().AsList (); DirectoryWithSnapshotFeature.DirectoryDiff sdiff = new DirectoryWithSnapshotFeature.DirectoryDiff (snapshot.GetId(), snapshotINode, diffs.IsEmpty() ? null : diffs[0], childrenSize , createdList, deletedList, snapshotINode == snapshot.GetRoot()); return(sdiff); }
/// <summary>Check the correctness of snapshot list within snapshottable dir</summary> private void CheckSnapshotList(INodeDirectory srcRoot, string[] sortedNames, string [] names) { NUnit.Framework.Assert.IsTrue(srcRoot.IsSnapshottable()); ReadOnlyList <Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot> listByName = srcRoot.GetDirectorySnapshottableFeature().GetSnapshotList(); NUnit.Framework.Assert.AreEqual(sortedNames.Length, listByName.Size()); for (int i = 0; i < listByName.Size(); i++) { NUnit.Framework.Assert.AreEqual(sortedNames[i], listByName.Get(i).GetRoot().GetLocalName ()); } IList <DirectoryWithSnapshotFeature.DirectoryDiff> listByTime = srcRoot.GetDiffs() .AsList(); NUnit.Framework.Assert.AreEqual(names.Length, listByTime.Count); for (int i_1 = 0; i_1 < listByTime.Count; i_1++) { Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s = srcRoot.GetDirectorySnapshottableFeature ().GetSnapshotById(listByTime[i_1].GetSnapshotId()); NUnit.Framework.Assert.AreEqual(names[i_1], s.GetRoot().GetLocalName()); } }
public static int GetSnapshotId(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s) { return(s == null ? CurrentStateId : s.GetId()); }
/// <summary>Get the name of the given snapshot.</summary> /// <param name="s">The given snapshot.</param> /// <returns> /// The name of the snapshot, or an empty string if /// <paramref name="s"/> /// is null /// </returns> internal static string GetSnapshotName(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s) { return(s != null?s.GetRoot().GetLocalName() : string.Empty); }
public static SnapshotInfo.Bean ToBean(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s) { return(new SnapshotInfo.Bean(s.GetRoot().GetLocalName(), s.GetRoot().GetFullPathName (), s.GetRoot().GetModificationTime())); }
/// <summary>Load DirectoryDiff list for a directory with snapshot feature</summary> /// <exception cref="System.IO.IOException"/> private void LoadDirectoryDiffList(InputStream @in, INodeDirectory dir, int size, IList <INodeReference> refList) { if (!dir.IsWithSnapshot()) { dir.AddSnapshotFeature(null); } DirectoryWithSnapshotFeature.DirectoryDiffList diffs = dir.GetDiffs(); FSImageFormatProtobuf.LoaderContext state = parent.GetLoaderContext(); for (int i = 0; i < size; i++) { // load a directory diff FsImageProto.SnapshotDiffSection.DirectoryDiff diffInPb = FsImageProto.SnapshotDiffSection.DirectoryDiff .ParseDelimitedFrom(@in); int snapshotId = diffInPb.GetSnapshotId(); Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot snapshot = snapshotMap[snapshotId ]; int childrenSize = diffInPb.GetChildrenSize(); bool useRoot = diffInPb.GetIsSnapshotRoot(); INodeDirectoryAttributes copy = null; if (useRoot) { copy = snapshot.GetRoot(); } else { if (diffInPb.HasSnapshotCopy()) { FsImageProto.INodeSection.INodeDirectory dirCopyInPb = diffInPb.GetSnapshotCopy(); byte[] name = diffInPb.GetName().ToByteArray(); PermissionStatus permission = FSImageFormatPBINode.Loader.LoadPermission(dirCopyInPb .GetPermission(), state.GetStringTable()); AclFeature acl = null; if (dirCopyInPb.HasAcl()) { int[] entries = AclEntryStatusFormat.ToInt(FSImageFormatPBINode.Loader.LoadAclEntries (dirCopyInPb.GetAcl(), state.GetStringTable())); acl = new AclFeature(entries); } XAttrFeature xAttrs = null; if (dirCopyInPb.HasXAttrs()) { xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.LoadXAttrs(dirCopyInPb.GetXAttrs (), state.GetStringTable())); } long modTime = dirCopyInPb.GetModificationTime(); bool noQuota = dirCopyInPb.GetNsQuota() == -1 && dirCopyInPb.GetDsQuota() == -1 && (!dirCopyInPb.HasTypeQuotas()); if (noQuota) { copy = new INodeDirectoryAttributes.SnapshotCopy(name, permission, acl, modTime, xAttrs); } else { EnumCounters <StorageType> typeQuotas = null; if (dirCopyInPb.HasTypeQuotas()) { ImmutableList <QuotaByStorageTypeEntry> qes = FSImageFormatPBINode.Loader.LoadQuotaByStorageTypeEntries (dirCopyInPb.GetTypeQuotas()); typeQuotas = new EnumCounters <StorageType>(typeof(StorageType), HdfsConstants.QuotaReset ); foreach (QuotaByStorageTypeEntry qe in qes) { if (qe.GetQuota() >= 0 && qe.GetStorageType() != null && qe.GetStorageType().SupportTypeQuota ()) { typeQuotas.Set(qe.GetStorageType(), qe.GetQuota()); } } } copy = new INodeDirectoryAttributes.CopyWithQuota(name, permission, acl, modTime, dirCopyInPb.GetNsQuota(), dirCopyInPb.GetDsQuota(), typeQuotas, xAttrs); } } } // load created list IList <INode> clist = LoadCreatedList(@in, dir, diffInPb.GetCreatedListSize()); // load deleted list IList <INode> dlist = LoadDeletedList(refList, @in, dir, diffInPb.GetDeletedINodeList (), diffInPb.GetDeletedINodeRefList()); // create the directory diff DirectoryWithSnapshotFeature.DirectoryDiff diff = new DirectoryWithSnapshotFeature.DirectoryDiff (snapshotId, copy, null, childrenSize, clist, dlist, useRoot); diffs.AddFirst(diff); } }
/// <summary> /// Simply add a snapshot into the /// <see cref="snapshotsByNames"/> /// . Used when loading /// fsimage. /// </summary> internal virtual void AddSnapshot(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot snapshot) { this.snapshotsByNames.AddItem(snapshot); }
/// <summary> /// Recursively compute the difference between snapshots under a given /// directory/file. /// </summary> /// <param name="snapshotRoot">The directory where snapshots were taken.</param> /// <param name="node">The directory/file under which the diff is computed.</param> /// <param name="parentPath"> /// Relative path (corresponding to the snapshot root) of /// the node's parent. /// </param> /// <param name="diffReport">data structure used to store the diff.</param> private void ComputeDiffRecursively(INodeDirectory snapshotRoot, INode node, IList <byte[]> parentPath, SnapshotDiffInfo diffReport) { Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot earlierSnapshot = diffReport .IsFromEarlier() ? diffReport.GetFrom() : diffReport.GetTo(); Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot laterSnapshot = diffReport .IsFromEarlier() ? diffReport.GetTo() : diffReport.GetFrom(); byte[][] relativePath = Sharpen.Collections.ToArray(parentPath, new byte[parentPath .Count][]); if (node.IsDirectory()) { DirectoryWithSnapshotFeature.ChildrenDiff diff = new DirectoryWithSnapshotFeature.ChildrenDiff (); INodeDirectory dir = node.AsDirectory(); DirectoryWithSnapshotFeature sf = dir.GetDirectoryWithSnapshotFeature(); if (sf != null) { bool change = sf.ComputeDiffBetweenSnapshots(earlierSnapshot, laterSnapshot, diff , dir); if (change) { diffReport.AddDirDiff(dir, relativePath, diff); } } ReadOnlyList <INode> children = dir.GetChildrenList(earlierSnapshot.GetId()); foreach (INode child in children) { byte[] name = child.GetLocalNameBytes(); bool toProcess = diff.SearchIndex(Diff.ListType.Deleted, name) < 0; if (!toProcess && child is INodeReference.WithName) { byte[][] renameTargetPath = FindRenameTargetPath(snapshotRoot, (INodeReference.WithName )child, laterSnapshot == null ? Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId : laterSnapshot.GetId()); if (renameTargetPath != null) { toProcess = true; diffReport.SetRenameTarget(child.GetId(), renameTargetPath); } } if (toProcess) { parentPath.AddItem(name); ComputeDiffRecursively(snapshotRoot, child, parentPath, diffReport); parentPath.Remove(parentPath.Count - 1); } } } else { if (node.IsFile() && node.AsFile().IsWithSnapshot()) { INodeFile file = node.AsFile(); bool change = file.GetFileWithSnapshotFeature().ChangedBetweenSnapshots(file, earlierSnapshot , laterSnapshot); if (change) { diffReport.AddFileDiff(file, relativePath); } } } }