public static SnapshottableDirectoryStatus.Bean ToBean(INodeDirectory d) { return(new SnapshottableDirectoryStatus.Bean(d.GetFullPathName(), d.GetDirectorySnapshottableFeature ().GetNumSnapshots(), d.GetDirectorySnapshottableFeature().GetSnapshotQuota(), d .GetModificationTime(), short.ValueOf(Sharpen.Extensions.ToOctalString(d.GetFsPermissionShort ())), d.GetUserName(), d.GetGroupName())); }
/// <summary>Set the given snapshottable directory to non-snapshottable.</summary> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.SnapshotException">if there are snapshots in the directory. /// </exception> /// <exception cref="System.IO.IOException"/> public virtual void ResetSnapshottable(string path) { INodesInPath iip = fsdir.GetINodesInPath4Write(path); INodeDirectory d = INodeDirectory.ValueOf(iip.GetLastINode(), path); DirectorySnapshottableFeature sf = d.GetDirectorySnapshottableFeature(); if (sf == null) { // the directory is already non-snapshottable return; } if (sf.GetNumSnapshots() > 0) { throw new SnapshotException("The directory " + path + " has snapshot(s). " + "Please redo the operation after removing all the snapshots." ); } if (d == fsdir.GetRoot()) { d.SetSnapshotQuota(0); } else { d.RemoveSnapshottableFeature(); } RemoveSnapshottable(d); }
/// <summary>Check the correctness of snapshot list within snapshottable dir</summary> private void CheckSnapshotList(INodeDirectory srcRoot, string[] sortedNames, string [] names) { NUnit.Framework.Assert.IsTrue(srcRoot.IsSnapshottable()); ReadOnlyList <Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot> listByName = srcRoot.GetDirectorySnapshottableFeature().GetSnapshotList(); NUnit.Framework.Assert.AreEqual(sortedNames.Length, listByName.Size()); for (int i = 0; i < listByName.Size(); i++) { NUnit.Framework.Assert.AreEqual(sortedNames[i], listByName.Get(i).GetRoot().GetLocalName ()); } IList <DirectoryWithSnapshotFeature.DirectoryDiff> listByTime = srcRoot.GetDiffs() .AsList(); NUnit.Framework.Assert.AreEqual(names.Length, listByTime.Count); for (int i_1 = 0; i_1 < listByTime.Count; i_1++) { Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s = srcRoot.GetDirectorySnapshottableFeature ().GetSnapshotById(listByTime[i_1].GetSnapshotId()); NUnit.Framework.Assert.AreEqual(names[i_1], s.GetRoot().GetLocalName()); } }
/// <exception cref="System.IO.IOException"/> private void LoadSnapshots(InputStream @in, int size) { for (int i = 0; i < size; i++) { FsImageProto.SnapshotSection.Snapshot pbs = FsImageProto.SnapshotSection.Snapshot .ParseDelimitedFrom(@in); INodeDirectory root = FSImageFormatPBINode.Loader.LoadINodeDirectory(pbs.GetRoot( ), parent.GetLoaderContext()); int sid = pbs.GetSnapshotId(); INodeDirectory parent = fsDir.GetInode(root.GetId()).AsDirectory(); Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot snapshot = new Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot (sid, root, parent); // add the snapshot to parent, since we follow the sequence of // snapshotsByNames when saving, we do not need to sort when loading parent.GetDirectorySnapshottableFeature().AddSnapshot(snapshot); snapshotMap[sid] = snapshot; } }
/// <summary>Save snapshots and snapshot quota for a snapshottable directory.</summary> /// <param name="current">The directory that the snapshots belongs to.</param> /// <param name="out"> /// The /// <see cref="System.IO.DataOutput"/> /// to write. /// </param> /// <exception cref="System.IO.IOException"/> public static void SaveSnapshots(INodeDirectory current, DataOutput @out) { DirectorySnapshottableFeature sf = current.GetDirectorySnapshottableFeature(); Preconditions.CheckArgument(sf != null); // list of snapshots in snapshotsByNames ReadOnlyList <Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot> snapshots = sf.GetSnapshotList(); @out.WriteInt(snapshots.Size()); foreach (Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s in snapshots) { // write the snapshot id @out.WriteInt(s.GetId()); } // snapshot quota @out.WriteInt(sf.GetSnapshotQuota()); }
/// <summary> /// Compute the difference between two snapshots of a directory, or between a /// snapshot of the directory and its current tree. /// </summary> /// <exception cref="System.IO.IOException"/> public virtual SnapshotDiffReport Diff(INodesInPath iip, string snapshotRootPath, string from, string to) { // Find the source root directory path where the snapshots were taken. // All the check for path has been included in the valueOf method. INodeDirectory snapshotRoot = GetSnapshottableRoot(iip); if ((from == null || from.IsEmpty()) && (to == null || to.IsEmpty())) { // both fromSnapshot and toSnapshot indicate the current tree return(new SnapshotDiffReport(snapshotRootPath, from, to, Sharpen.Collections.EmptyList <SnapshotDiffReport.DiffReportEntry>())); } SnapshotDiffInfo diffs = snapshotRoot.GetDirectorySnapshottableFeature().ComputeDiff (snapshotRoot, from, to); return(diffs != null?diffs.GenerateReport() : new SnapshotDiffReport(snapshotRootPath , from, to, Sharpen.Collections.EmptyList <SnapshotDiffReport.DiffReportEntry>())); }
/// <summary>Load snapshots and snapshotQuota for a Snapshottable directory.</summary> /// <param name="snapshottableParent">The snapshottable directory for loading.</param> /// <param name="numSnapshots">The number of snapshots that the directory has.</param> /// <param name="loader">The loader</param> /// <exception cref="System.IO.IOException"/> public static void LoadSnapshotList(INodeDirectory snapshottableParent, int numSnapshots , DataInput @in, FSImageFormat.Loader loader) { DirectorySnapshottableFeature sf = snapshottableParent.GetDirectorySnapshottableFeature (); Preconditions.CheckArgument(sf != null); for (int i = 0; i < numSnapshots; i++) { // read snapshots Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s = loader.GetSnapshot(@in ); s.GetRoot().SetParent(snapshottableParent); sf.AddSnapshot(s); } int snapshotQuota = @in.ReadInt(); snapshottableParent.SetSnapshotQuota(snapshotQuota); }