Пример #1
0
        public virtual void TestSnapshotOnRoot()
        {
            Path root = new Path("/");

            hdfs.AllowSnapshot(root);
            hdfs.CreateSnapshot(root, "s1");
            cluster.Shutdown();
            cluster = new MiniDFSCluster.Builder(conf).Format(false).NumDataNodes(Replication
                                                                                  ).Build();
            cluster.WaitActive();
            fsn  = cluster.GetNamesystem();
            hdfs = cluster.GetFileSystem();
            // save namespace and restart cluster
            hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
            hdfs.SaveNamespace();
            hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
            cluster.Shutdown();
            cluster = new MiniDFSCluster.Builder(conf).Format(false).NumDataNodes(Replication
                                                                                  ).Build();
            cluster.WaitActive();
            fsn  = cluster.GetNamesystem();
            hdfs = cluster.GetFileSystem();
            INodeDirectory rootNode = fsn.dir.GetINode4Write(root.ToString()).AsDirectory();

            NUnit.Framework.Assert.IsTrue("The children list of root should be empty", rootNode
                                          .GetChildrenList(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId
                                                           ).IsEmpty());
            // one snapshot on root: s1
            IList <DirectoryWithSnapshotFeature.DirectoryDiff> diffList = rootNode.GetDiffs().
                                                                          AsList();

            NUnit.Framework.Assert.AreEqual(1, diffList.Count);
            Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s1 = rootNode.GetSnapshot
                                                                              (DFSUtil.String2Bytes("s1"));
            NUnit.Framework.Assert.AreEqual(s1.GetId(), diffList[0].GetSnapshotId());
            // check SnapshotManager's snapshottable directory list
            NUnit.Framework.Assert.AreEqual(1, fsn.GetSnapshotManager().GetNumSnapshottableDirs
                                                ());
            SnapshottableDirectoryStatus[] sdirs = fsn.GetSnapshotManager().GetSnapshottableDirListing
                                                       (null);
            NUnit.Framework.Assert.AreEqual(root, sdirs[0].GetFullPath());
            // save namespace and restart cluster
            hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
            hdfs.SaveNamespace();
            hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
            cluster.Shutdown();
            cluster = new MiniDFSCluster.Builder(conf).Format(false).NumDataNodes(Replication
                                                                                  ).Build();
            cluster.WaitActive();
            fsn  = cluster.GetNamesystem();
            hdfs = cluster.GetFileSystem();
        }
Пример #2
0
        /// <summary>for snapshot file while modifying file after snapshot.</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestSnapshotPathINodesAfterModification()
        {
            // First check the INode for /TestSnapshot/sub1/file1
            string[]     names       = INode.GetPathNames(file1.ToString());
            byte[][]     components  = INode.GetPathComponents(names);
            INodesInPath nodesInPath = INodesInPath.Resolve(fsdir.rootDir, components, false);

            // The number of inodes should be equal to components.length
            NUnit.Framework.Assert.AreEqual(nodesInPath.Length(), components.Length);
            // The last INode should be associated with file1
            NUnit.Framework.Assert.AreEqual(nodesInPath.GetINode(components.Length - 1).GetFullPathName
                                                (), file1.ToString());
            // record the modification time of the inode
            long modTime = nodesInPath.GetINode(nodesInPath.Length() - 1).GetModificationTime
                               ();

            // Create a snapshot for the dir, and check the inodes for the path
            // pointing to a snapshot file
            hdfs.AllowSnapshot(sub1);
            hdfs.CreateSnapshot(sub1, "s3");
            // Modify file1
            DFSTestUtil.AppendFile(hdfs, file1, "the content for appending");
            // Check the INodes for snapshot of file1
            string snapshotPath = sub1.ToString() + "/.snapshot/s3/file1";

            names      = INode.GetPathNames(snapshotPath);
            components = INode.GetPathComponents(names);
            INodesInPath ssNodesInPath = INodesInPath.Resolve(fsdir.rootDir, components, false
                                                              );

            // Length of ssInodes should be (components.length - 1), since we will
            // ignore ".snapshot"
            NUnit.Framework.Assert.AreEqual(ssNodesInPath.Length(), components.Length - 1);
            Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s3 = GetSnapshot(ssNodesInPath
                                                                                      , "s3", 3);
            AssertSnapshot(ssNodesInPath, true, s3, 3);
            // Check the INode for snapshot of file1
            INode snapshotFileNode = ssNodesInPath.GetLastINode();

            NUnit.Framework.Assert.AreEqual(snapshotFileNode.GetLocalName(), file1.GetName());
            NUnit.Framework.Assert.IsTrue(snapshotFileNode.AsFile().IsWithSnapshot());
            // The modification time of the snapshot INode should be the same with the
            // original INode before modification
            NUnit.Framework.Assert.AreEqual(modTime, snapshotFileNode.GetModificationTime(ssNodesInPath
                                                                                          .GetPathSnapshotId()));
            // Check the INode for /TestSnapshot/sub1/file1 again
            names      = INode.GetPathNames(file1.ToString());
            components = INode.GetPathComponents(names);
            INodesInPath newNodesInPath = INodesInPath.Resolve(fsdir.rootDir, components, false
                                                               );

            AssertSnapshot(newNodesInPath, false, s3, -1);
            // The number of inodes should be equal to components.length
            NUnit.Framework.Assert.AreEqual(newNodesInPath.Length(), components.Length);
            // The last INode should be associated with file1
            int last = components.Length - 1;

            NUnit.Framework.Assert.AreEqual(newNodesInPath.GetINode(last).GetFullPathName(),
                                            file1.ToString());
            // The modification time of the INode for file3 should have been changed
            NUnit.Framework.Assert.IsFalse(modTime == newNodesInPath.GetINode(last).GetModificationTime
                                               ());
            hdfs.DeleteSnapshot(sub1, "s3");
            hdfs.DisallowSnapshot(sub1);
        }
Пример #3
0
 internal static void AssertSnapshot(INodesInPath inodesInPath, bool isSnapshot, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                     snapshot, int index)
 {
     NUnit.Framework.Assert.AreEqual(isSnapshot, inodesInPath.IsSnapshot());
     NUnit.Framework.Assert.AreEqual(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                     .GetSnapshotId(isSnapshot ? snapshot : null), inodesInPath.GetPathSnapshotId());
     if (!isSnapshot)
     {
         NUnit.Framework.Assert.AreEqual(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                         .GetSnapshotId(snapshot), inodesInPath.GetLatestSnapshotId());
     }
     if (isSnapshot && index >= 0)
     {
         NUnit.Framework.Assert.AreEqual(typeof(Snapshot.Root), inodesInPath.GetINode(index
                                                                                      ).GetType());
     }
 }
Пример #4
0
        /// <summary>for snapshot file.</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestSnapshotPathINodes()
        {
            // Create a snapshot for the dir, and check the inodes for the path
            // pointing to a snapshot file
            hdfs.AllowSnapshot(sub1);
            hdfs.CreateSnapshot(sub1, "s1");
            // The path when accessing the snapshot file of file1 is
            // /TestSnapshot/sub1/.snapshot/s1/file1
            string snapshotPath = sub1.ToString() + "/.snapshot/s1/file1";

            string[]     names       = INode.GetPathNames(snapshotPath);
            byte[][]     components  = INode.GetPathComponents(names);
            INodesInPath nodesInPath = INodesInPath.Resolve(fsdir.rootDir, components, false);

            // Length of inodes should be (components.length - 1), since we will ignore
            // ".snapshot"
            NUnit.Framework.Assert.AreEqual(nodesInPath.Length(), components.Length - 1);
            // SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s1, file1}
            Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot snapshot = GetSnapshot(nodesInPath
                                                                                            , "s1", 3);
            AssertSnapshot(nodesInPath, true, snapshot, 3);
            // Check the INode for file1 (snapshot file)
            INode snapshotFileNode = nodesInPath.GetLastINode();

            AssertINodeFile(snapshotFileNode, file1);
            NUnit.Framework.Assert.IsTrue(snapshotFileNode.GetParent().IsWithSnapshot());
            // Call getExistingPathINodes and request only one INode.
            nodesInPath = INodesInPath.Resolve(fsdir.rootDir, components, false);
            NUnit.Framework.Assert.AreEqual(nodesInPath.Length(), components.Length - 1);
            AssertSnapshot(nodesInPath, true, snapshot, 3);
            // Check the INode for file1 (snapshot file)
            AssertINodeFile(nodesInPath.GetLastINode(), file1);
            // Resolve the path "/TestSnapshot/sub1/.snapshot"
            string dotSnapshotPath = sub1.ToString() + "/.snapshot";

            names       = INode.GetPathNames(dotSnapshotPath);
            components  = INode.GetPathComponents(names);
            nodesInPath = INodesInPath.Resolve(fsdir.rootDir, components, false);
            // The number of INodes returned should still be components.length
            // since we put a null in the inode array for ".snapshot"
            NUnit.Framework.Assert.AreEqual(nodesInPath.Length(), components.Length);
            // No SnapshotRoot dir is included in the resolved inodes
            AssertSnapshot(nodesInPath, true, snapshot, -1);
            // The last INode should be null, the last but 1 should be sub1
            NUnit.Framework.Assert.IsNull(nodesInPath.GetLastINode());
            NUnit.Framework.Assert.AreEqual(nodesInPath.GetINode(-2).GetFullPathName(), sub1.
                                            ToString());
            NUnit.Framework.Assert.IsTrue(nodesInPath.GetINode(-2).IsDirectory());
            string[] invalidPathComponent = new string[] { "invalidDir", "foo", ".snapshot",
                                                           "bar" };
            Path invalidPath = new Path(invalidPathComponent[0]);

            for (int i = 1; i < invalidPathComponent.Length; i++)
            {
                invalidPath = new Path(invalidPath, invalidPathComponent[i]);
                try
                {
                    hdfs.GetFileStatus(invalidPath);
                    NUnit.Framework.Assert.Fail();
                }
                catch (FileNotFoundException fnfe)
                {
                    System.Console.Out.WriteLine("The exception is expected: " + fnfe);
                }
            }
            hdfs.DeleteSnapshot(sub1, "s1");
            hdfs.DisallowSnapshot(sub1);
        }
Пример #5
0
        /// <summary>Retrieve existing INodes from a path.</summary>
        /// <remarks>
        /// Retrieve existing INodes from a path. For non-snapshot path,
        /// the number of INodes is equal to the number of path components. For
        /// snapshot path (e.g., /foo/.snapshot/s1/bar), the number of INodes is
        /// (number_of_path_components - 1).
        /// An UnresolvedPathException is always thrown when an intermediate path
        /// component refers to a symbolic link. If the final path component refers
        /// to a symbolic link then an UnresolvedPathException is only thrown if
        /// resolveLink is true.
        /// <p>
        /// Example: <br />
        /// Given the path /c1/c2/c3 where only /c1/c2 exists, resulting in the
        /// following path components: ["","c1","c2","c3"]
        /// <p>
        /// <code>getExistingPathINodes(["","c1","c2"])</code> should fill
        /// the array with [rootINode,c1,c2], <br />
        /// <code>getExistingPathINodes(["","c1","c2","c3"])</code> should
        /// fill the array with [rootINode,c1,c2,null]
        /// </remarks>
        /// <param name="startingDir">the starting directory</param>
        /// <param name="components">array of path component name</param>
        /// <param name="resolveLink">
        /// indicates whether UnresolvedLinkException should
        /// be thrown when the path refers to a symbolic link.
        /// </param>
        /// <returns>the specified number of existing INodes in the path</returns>
        /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/>
        internal static Org.Apache.Hadoop.Hdfs.Server.Namenode.INodesInPath Resolve(INodeDirectory
                                                                                    startingDir, byte[][] components, bool resolveLink)
        {
            Preconditions.CheckArgument(startingDir.CompareTo(components[0]) == 0);
            INode curNode  = startingDir;
            int   count    = 0;
            int   inodeNum = 0;

            INode[] inodes     = new INode[components.Length];
            bool    isSnapshot = false;
            int     snapshotId = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId;

            while (count < components.Length && curNode != null)
            {
                bool lastComp = (count == components.Length - 1);
                inodes[inodeNum++] = curNode;
                bool           isRef = curNode.IsReference();
                bool           isDir = curNode.IsDirectory();
                INodeDirectory dir   = isDir ? curNode.AsDirectory() : null;
                if (!isRef && isDir && dir.IsWithSnapshot())
                {
                    //if the path is a non-snapshot path, update the latest snapshot.
                    if (!isSnapshot && ShouldUpdateLatestId(dir.GetDirectoryWithSnapshotFeature().GetLastSnapshotId
                                                                (), snapshotId))
                    {
                        snapshotId = dir.GetDirectoryWithSnapshotFeature().GetLastSnapshotId();
                    }
                }
                else
                {
                    if (isRef && isDir && !lastComp)
                    {
                        // If the curNode is a reference node, need to check its dstSnapshot:
                        // 1. if the existing snapshot is no later than the dstSnapshot (which
                        // is the latest snapshot in dst before the rename), the changes
                        // should be recorded in previous snapshots (belonging to src).
                        // 2. however, if the ref node is already the last component, we still
                        // need to know the latest snapshot among the ref node's ancestors,
                        // in case of processing a deletion operation. Thus we do not overwrite
                        // the latest snapshot if lastComp is true. In case of the operation is
                        // a modification operation, we do a similar check in corresponding
                        // recordModification method.
                        if (!isSnapshot)
                        {
                            int dstSnapshotId = curNode.AsReference().GetDstSnapshotId();
                            if (snapshotId == Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId ||
                                (dstSnapshotId != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId &&
                                 dstSnapshotId >= snapshotId))
                            {
                                // no snapshot in dst tree of rename
                                // the above scenario
                                int lastSnapshot = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId;
                                DirectoryWithSnapshotFeature sf;
                                if (curNode.IsDirectory() && (sf = curNode.AsDirectory().GetDirectoryWithSnapshotFeature
                                                                       ()) != null)
                                {
                                    lastSnapshot = sf.GetLastSnapshotId();
                                }
                                snapshotId = lastSnapshot;
                            }
                        }
                    }
                }
                if (curNode.IsSymlink() && (!lastComp || resolveLink))
                {
                    string path      = ConstructPath(components, 0, components.Length);
                    string preceding = ConstructPath(components, 0, count);
                    string remainder = ConstructPath(components, count + 1, components.Length);
                    string link      = DFSUtil.Bytes2String(components[count]);
                    string target    = curNode.AsSymlink().GetSymlinkString();
                    if (Log.IsDebugEnabled())
                    {
                        Log.Debug("UnresolvedPathException " + " path: " + path + " preceding: " + preceding
                                  + " count: " + count + " link: " + link + " target: " + target + " remainder: "
                                  + remainder);
                    }
                    throw new UnresolvedPathException(path, preceding, remainder, target);
                }
                if (lastComp || !isDir)
                {
                    break;
                }
                byte[] childName = components[count + 1];
                // check if the next byte[] in components is for ".snapshot"
                if (IsDotSnapshotDir(childName) && dir.IsSnapshottable())
                {
                    // skip the ".snapshot" in components
                    count++;
                    isSnapshot = true;
                    // check if ".snapshot" is the last element of components
                    if (count == components.Length - 1)
                    {
                        break;
                    }
                    // Resolve snapshot root
                    Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s = dir.GetSnapshot(components
                                                                                                 [count + 1]);
                    if (s == null)
                    {
                        curNode = null;
                    }
                    else
                    {
                        // snapshot not found
                        curNode    = s.GetRoot();
                        snapshotId = s.GetId();
                    }
                }
                else
                {
                    // normal case, and also for resolving file/dir under snapshot root
                    curNode = dir.GetChild(childName, isSnapshot ? snapshotId : Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                           .CurrentStateId);
                }
                count++;
            }
            if (isSnapshot && !IsDotSnapshotDir(components[components.Length - 1]))
            {
                // for snapshot path shrink the inode array. however, for path ending with
                // .snapshot, still keep last the null inode in the array
                INode[] newNodes = new INode[components.Length - 1];
                System.Array.Copy(inodes, 0, newNodes, 0, newNodes.Length);
                inodes = newNodes;
            }
            return(new Org.Apache.Hadoop.Hdfs.Server.Namenode.INodesInPath(inodes, components
                                                                           , isSnapshot, snapshotId));
        }