public virtual void TestSnapshotOnRoot() { Path root = new Path("/"); hdfs.AllowSnapshot(root); hdfs.CreateSnapshot(root, "s1"); cluster.Shutdown(); cluster = new MiniDFSCluster.Builder(conf).Format(false).NumDataNodes(Replication ).Build(); cluster.WaitActive(); fsn = cluster.GetNamesystem(); hdfs = cluster.GetFileSystem(); // save namespace and restart cluster hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter); hdfs.SaveNamespace(); hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave); cluster.Shutdown(); cluster = new MiniDFSCluster.Builder(conf).Format(false).NumDataNodes(Replication ).Build(); cluster.WaitActive(); fsn = cluster.GetNamesystem(); hdfs = cluster.GetFileSystem(); INodeDirectory rootNode = fsn.dir.GetINode4Write(root.ToString()).AsDirectory(); NUnit.Framework.Assert.IsTrue("The children list of root should be empty", rootNode .GetChildrenList(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId ).IsEmpty()); // one snapshot on root: s1 IList <DirectoryWithSnapshotFeature.DirectoryDiff> diffList = rootNode.GetDiffs(). AsList(); NUnit.Framework.Assert.AreEqual(1, diffList.Count); Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s1 = rootNode.GetSnapshot (DFSUtil.String2Bytes("s1")); NUnit.Framework.Assert.AreEqual(s1.GetId(), diffList[0].GetSnapshotId()); // check SnapshotManager's snapshottable directory list NUnit.Framework.Assert.AreEqual(1, fsn.GetSnapshotManager().GetNumSnapshottableDirs ()); SnapshottableDirectoryStatus[] sdirs = fsn.GetSnapshotManager().GetSnapshottableDirListing (null); NUnit.Framework.Assert.AreEqual(root, sdirs[0].GetFullPath()); // save namespace and restart cluster hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter); hdfs.SaveNamespace(); hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave); cluster.Shutdown(); cluster = new MiniDFSCluster.Builder(conf).Format(false).NumDataNodes(Replication ).Build(); cluster.WaitActive(); fsn = cluster.GetNamesystem(); hdfs = cluster.GetFileSystem(); }
/// <summary>Retrieve existing INodes from a path.</summary> /// <remarks> /// Retrieve existing INodes from a path. For non-snapshot path, /// the number of INodes is equal to the number of path components. For /// snapshot path (e.g., /foo/.snapshot/s1/bar), the number of INodes is /// (number_of_path_components - 1). /// An UnresolvedPathException is always thrown when an intermediate path /// component refers to a symbolic link. If the final path component refers /// to a symbolic link then an UnresolvedPathException is only thrown if /// resolveLink is true. /// <p> /// Example: <br /> /// Given the path /c1/c2/c3 where only /c1/c2 exists, resulting in the /// following path components: ["","c1","c2","c3"] /// <p> /// <code>getExistingPathINodes(["","c1","c2"])</code> should fill /// the array with [rootINode,c1,c2], <br /> /// <code>getExistingPathINodes(["","c1","c2","c3"])</code> should /// fill the array with [rootINode,c1,c2,null] /// </remarks> /// <param name="startingDir">the starting directory</param> /// <param name="components">array of path component name</param> /// <param name="resolveLink"> /// indicates whether UnresolvedLinkException should /// be thrown when the path refers to a symbolic link. /// </param> /// <returns>the specified number of existing INodes in the path</returns> /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> internal static Org.Apache.Hadoop.Hdfs.Server.Namenode.INodesInPath Resolve(INodeDirectory startingDir, byte[][] components, bool resolveLink) { Preconditions.CheckArgument(startingDir.CompareTo(components[0]) == 0); INode curNode = startingDir; int count = 0; int inodeNum = 0; INode[] inodes = new INode[components.Length]; bool isSnapshot = false; int snapshotId = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId; while (count < components.Length && curNode != null) { bool lastComp = (count == components.Length - 1); inodes[inodeNum++] = curNode; bool isRef = curNode.IsReference(); bool isDir = curNode.IsDirectory(); INodeDirectory dir = isDir ? curNode.AsDirectory() : null; if (!isRef && isDir && dir.IsWithSnapshot()) { //if the path is a non-snapshot path, update the latest snapshot. if (!isSnapshot && ShouldUpdateLatestId(dir.GetDirectoryWithSnapshotFeature().GetLastSnapshotId (), snapshotId)) { snapshotId = dir.GetDirectoryWithSnapshotFeature().GetLastSnapshotId(); } } else { if (isRef && isDir && !lastComp) { // If the curNode is a reference node, need to check its dstSnapshot: // 1. if the existing snapshot is no later than the dstSnapshot (which // is the latest snapshot in dst before the rename), the changes // should be recorded in previous snapshots (belonging to src). // 2. however, if the ref node is already the last component, we still // need to know the latest snapshot among the ref node's ancestors, // in case of processing a deletion operation. Thus we do not overwrite // the latest snapshot if lastComp is true. In case of the operation is // a modification operation, we do a similar check in corresponding // recordModification method. if (!isSnapshot) { int dstSnapshotId = curNode.AsReference().GetDstSnapshotId(); if (snapshotId == Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId || (dstSnapshotId != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId && dstSnapshotId >= snapshotId)) { // no snapshot in dst tree of rename // the above scenario int lastSnapshot = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId; DirectoryWithSnapshotFeature sf; if (curNode.IsDirectory() && (sf = curNode.AsDirectory().GetDirectoryWithSnapshotFeature ()) != null) { lastSnapshot = sf.GetLastSnapshotId(); } snapshotId = lastSnapshot; } } } } if (curNode.IsSymlink() && (!lastComp || resolveLink)) { string path = ConstructPath(components, 0, components.Length); string preceding = ConstructPath(components, 0, count); string remainder = ConstructPath(components, count + 1, components.Length); string link = DFSUtil.Bytes2String(components[count]); string target = curNode.AsSymlink().GetSymlinkString(); if (Log.IsDebugEnabled()) { Log.Debug("UnresolvedPathException " + " path: " + path + " preceding: " + preceding + " count: " + count + " link: " + link + " target: " + target + " remainder: " + remainder); } throw new UnresolvedPathException(path, preceding, remainder, target); } if (lastComp || !isDir) { break; } byte[] childName = components[count + 1]; // check if the next byte[] in components is for ".snapshot" if (IsDotSnapshotDir(childName) && dir.IsSnapshottable()) { // skip the ".snapshot" in components count++; isSnapshot = true; // check if ".snapshot" is the last element of components if (count == components.Length - 1) { break; } // Resolve snapshot root Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s = dir.GetSnapshot(components [count + 1]); if (s == null) { curNode = null; } else { // snapshot not found curNode = s.GetRoot(); snapshotId = s.GetId(); } } else { // normal case, and also for resolving file/dir under snapshot root curNode = dir.GetChild(childName, isSnapshot ? snapshotId : Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId); } count++; } if (isSnapshot && !IsDotSnapshotDir(components[components.Length - 1])) { // for snapshot path shrink the inode array. however, for path ending with // .snapshot, still keep last the null inode in the array INode[] newNodes = new INode[components.Length - 1]; System.Array.Copy(inodes, 0, newNodes, 0, newNodes.Length); inodes = newNodes; } return(new Org.Apache.Hadoop.Hdfs.Server.Namenode.INodesInPath(inodes, components , isSnapshot, snapshotId)); }