/// <summary>Test snapshot after file appending</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestSnapshotAfterAppending()
        {
            Path file = new Path(dir, "file");

            // 1. create snapshot --> create file --> append
            SnapshotTestHelper.CreateSnapshot(hdfs, dir, "s0");
            DFSTestUtil.CreateFile(hdfs, file, Blocksize, Replication, seed);
            DFSTestUtil.AppendFile(hdfs, file, Blocksize);
            INodeFile fileNode = (INodeFile)fsdir.GetINode(file.ToString());

            // 2. create snapshot --> modify the file --> append
            hdfs.CreateSnapshot(dir, "s1");
            hdfs.SetReplication(file, (short)(Replication - 1));
            DFSTestUtil.AppendFile(hdfs, file, Blocksize);
            // check corresponding inodes
            fileNode = (INodeFile)fsdir.GetINode(file.ToString());
            NUnit.Framework.Assert.AreEqual(Replication - 1, fileNode.GetFileReplication());
            NUnit.Framework.Assert.AreEqual(Blocksize * 3, fileNode.ComputeFileSize());
            // 3. create snapshot --> append
            hdfs.CreateSnapshot(dir, "s2");
            DFSTestUtil.AppendFile(hdfs, file, Blocksize);
            // check corresponding inodes
            fileNode = (INodeFile)fsdir.GetINode(file.ToString());
            NUnit.Framework.Assert.AreEqual(Replication - 1, fileNode.GetFileReplication());
            NUnit.Framework.Assert.AreEqual(Blocksize * 4, fileNode.ComputeFileSize());
        }
Exemplo n.º 2
0
        /// <summary>Get a listing of all the snapshots of a snapshottable directory</summary>
        /// <exception cref="System.IO.IOException"/>
        private static DirectoryListing GetSnapshotsListing(FSDirectory fsd, string src,
                                                            byte[] startAfter)
        {
            Preconditions.CheckState(fsd.HasReadLock());
            Preconditions.CheckArgument(src.EndsWith(HdfsConstants.SeparatorDotSnapshotDir),
                                        "%s does not end with %s", src, HdfsConstants.SeparatorDotSnapshotDir);
            string dirPath = FSDirectory.NormalizePath(Sharpen.Runtime.Substring(src, 0, src.
                                                                                 Length - HdfsConstants.DotSnapshotDir.Length));
            INode          node              = fsd.GetINode(dirPath);
            INodeDirectory dirNode           = INodeDirectory.ValueOf(node, dirPath);
            DirectorySnapshottableFeature sf = dirNode.GetDirectorySnapshottableFeature();

            if (sf == null)
            {
                throw new SnapshotException("Directory is not a snapshottable directory: " + dirPath
                                            );
            }
            ReadOnlyList <Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot> snapshots =
                sf.GetSnapshotList();
            int skipSize = ReadOnlyList.Util.BinarySearch(snapshots, startAfter);

            skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
            int numOfListing = Math.Min(snapshots.Size() - skipSize, fsd.GetLsLimit());

            HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing];
            for (int i = 0; i < numOfListing; i++)
            {
                Snapshot.Root sRoot = snapshots.Get(i + skipSize).GetRoot();
                listing[i] = CreateFileStatus(fsd, src, sRoot.GetLocalNameBytes(), sRoot, BlockStoragePolicySuite
                                              .IdUnspecified, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId
                                              , false, INodesInPath.FromINode(sRoot));
            }
            return(new DirectoryListing(listing, snapshots.Size() - skipSize - numOfListing));
        }
Exemplo n.º 3
0
        /// <summary>Check the replication of a given file.</summary>
        /// <remarks>
        /// Check the replication of a given file. We test both
        /// <see cref="Org.Apache.Hadoop.Hdfs.Server.Namenode.INodeFile.GetFileReplication()"
        ///     />
        /// and
        /// <see cref="Org.Apache.Hadoop.Hdfs.Server.Namenode.INodeFile.GetBlockReplication()
        ///     "/>
        /// .
        /// </remarks>
        /// <param name="file">The given file</param>
        /// <param name="replication">The expected replication number</param>
        /// <param name="blockReplication">The expected replication number for the block</param>
        /// <exception cref="System.Exception"/>
        private void CheckFileReplication(Path file, short replication, short blockReplication
                                          )
        {
            // Get FileStatus of file1, and identify the replication number of file1.
            // Note that the replication number in FileStatus was derived from
            // INodeFile#getFileReplication().
            short fileReplication = hdfs.GetFileStatus(file1).GetReplication();

            NUnit.Framework.Assert.AreEqual(replication, fileReplication);
            // Check the correctness of getBlockReplication()
            INode inode = fsdir.GetINode(file1.ToString());

            NUnit.Framework.Assert.IsTrue(inode is INodeFile);
            NUnit.Framework.Assert.AreEqual(blockReplication, ((INodeFile)inode).GetBlockReplication
                                                ());
        }
Exemplo n.º 4
0
        /// <exception cref="System.IO.IOException"/>
        public static void DumpTree2File(FSDirectory fsdir, FilePath f)
        {
            PrintWriter @out = new PrintWriter(new FileWriter(f, false), true);

            fsdir.GetINode("/").DumpTreeRecursively(@out, new StringBuilder(), Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                                    .CurrentStateId);
            @out.Close();
        }
Exemplo n.º 5
0
        /// <summary>Test allow-snapshot operation.</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestAllowSnapshot()
        {
            string pathStr = sub1.ToString();
            INode  before  = fsdir.GetINode(pathStr);

            // Before a directory is snapshottable
            NUnit.Framework.Assert.IsFalse(before.AsDirectory().IsSnapshottable());
            // After a directory is snapshottable
            Path path = new Path(pathStr);

            hdfs.AllowSnapshot(path);
            {
                INode after = fsdir.GetINode(pathStr);
                NUnit.Framework.Assert.IsTrue(after.AsDirectory().IsSnapshottable());
            }
            hdfs.DisallowSnapshot(path);
            {
                INode after = fsdir.GetINode(pathStr);
                NUnit.Framework.Assert.IsFalse(after.AsDirectory().IsSnapshottable());
            }
        }
Exemplo n.º 6
0
        /// <exception cref="System.Exception"/>
        internal static INodeFile AssertBlockCollection(string path, int numBlocks, FSDirectory
                                                        dir, BlockManager blkManager)
        {
            INodeFile file = INodeFile.ValueOf(dir.GetINode(path), path);

            NUnit.Framework.Assert.AreEqual(numBlocks, file.GetBlocks().Length);
            foreach (BlockInfoContiguous b in file.GetBlocks())
            {
                AssertBlockCollection(blkManager, file, b);
            }
            return(file);
        }
Exemplo n.º 7
0
        /// <summary>Test append over storage quota does not mark file as UC or create lease</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestAppendOverStorageQuota()
        {
            Path dir  = new Path("/TestAppendOverQuota");
            Path file = new Path(dir, "file");

            // create partial block file
            dfs.Mkdirs(dir);
            DFSTestUtil.CreateFile(dfs, file, Blocksize / 2, Replication, seed);
            // lower quota to cause exception when appending to partial block
            dfs.SetQuota(dir, long.MaxValue - 1, 1);
            INodeDirectory dirNode   = fsdir.GetINode4Write(dir.ToString()).AsDirectory();
            long           spaceUsed = dirNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed().GetStorageSpace
                                           ();

            try
            {
                DFSTestUtil.AppendFile(dfs, file, Blocksize);
                NUnit.Framework.Assert.Fail("append didn't fail");
            }
            catch (DSQuotaExceededException)
            {
            }
            // ignore
            // check that the file exists, isn't UC, and has no dangling lease
            INodeFile inode = fsdir.GetINode(file.ToString()).AsFile();

            NUnit.Framework.Assert.IsNotNull(inode);
            NUnit.Framework.Assert.IsFalse("should not be UC", inode.IsUnderConstruction());
            NUnit.Framework.Assert.IsNull("should not have a lease", cluster.GetNamesystem().
                                          GetLeaseManager().GetLeaseByPath(file.ToString()));
            // make sure the quota usage is unchanged
            long newSpaceUsed = dirNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed().GetStorageSpace
                                    ();

            NUnit.Framework.Assert.AreEqual(spaceUsed, newSpaceUsed);
            // make sure edits aren't corrupted
            dfs.RecoverLease(file);
            cluster.RestartNameNodes();
        }
Exemplo n.º 8
0
        public virtual void TestDisallowNestedSnapshottableDir()
        {
            cluster.GetNamesystem().GetSnapshotManager().SetAllowNestedSnapshots(true);
            Path dir = new Path("/dir");
            Path sub = new Path(dir, "sub");

            hdfs.Mkdirs(sub);
            SnapshotTestHelper.CreateSnapshot(hdfs, dir, "s1");
            Path file = new Path(sub, "file");

            DFSTestUtil.CreateFile(hdfs, file, Blocksize, Replication, Seed);
            FSDirectory fsdir   = cluster.GetNamesystem().GetFSDirectory();
            INode       subNode = fsdir.GetINode(sub.ToString());

            NUnit.Framework.Assert.IsTrue(subNode.AsDirectory().IsWithSnapshot());
            hdfs.AllowSnapshot(sub);
            subNode = fsdir.GetINode(sub.ToString());
            NUnit.Framework.Assert.IsTrue(subNode.IsDirectory() && subNode.AsDirectory().IsSnapshottable
                                              ());
            hdfs.DisallowSnapshot(sub);
            subNode = fsdir.GetINode(sub.ToString());
            NUnit.Framework.Assert.IsTrue(subNode.AsDirectory().IsWithSnapshot());
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestSetQuota()
        {
            Path dir = new Path("/TestSnapshot");

            hdfs.Mkdirs(dir);
            // allow snapshot on dir and create snapshot s1
            SnapshotTestHelper.CreateSnapshot(hdfs, dir, "s1");
            Path sub = new Path(dir, "sub");

            hdfs.Mkdirs(sub);
            Path fileInSub = new Path(sub, "file");

            DFSTestUtil.CreateFile(hdfs, fileInSub, Blocksize, Replication, seed);
            INodeDirectory subNode = INodeDirectory.ValueOf(fsdir.GetINode(sub.ToString()), sub
                                                            );

            // subNode should be a INodeDirectory, but not an INodeDirectoryWithSnapshot
            NUnit.Framework.Assert.IsFalse(subNode.IsWithSnapshot());
            hdfs.SetQuota(sub, long.MaxValue - 1, long.MaxValue - 1);
            subNode = INodeDirectory.ValueOf(fsdir.GetINode(sub.ToString()), sub);
            NUnit.Framework.Assert.IsTrue(subNode.IsQuotaSet());
            NUnit.Framework.Assert.IsFalse(subNode.IsWithSnapshot());
        }
Exemplo n.º 10
0
        /// <summary>
        /// Rename snapshot(s), and check the correctness of the snapshot list within
        /// <see cref="INodeDirectorySnapshottable"/>
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestSnapshotList()
        {
            DFSTestUtil.CreateFile(hdfs, file1, Blocksize, Replication, seed);
            // Create three snapshots for sub1
            SnapshotTestHelper.CreateSnapshot(hdfs, sub1, "s1");
            SnapshotTestHelper.CreateSnapshot(hdfs, sub1, "s2");
            SnapshotTestHelper.CreateSnapshot(hdfs, sub1, "s3");
            // Rename s3 to s22
            hdfs.RenameSnapshot(sub1, "s3", "s22");
            // Check the snapshots list
            INodeDirectory srcRoot = fsdir.GetINode(sub1.ToString()).AsDirectory();

            CheckSnapshotList(srcRoot, new string[] { "s1", "s2", "s22" }, new string[] { "s1"
                                                                                          , "s2", "s22" });
            // Rename s1 to s4
            hdfs.RenameSnapshot(sub1, "s1", "s4");
            CheckSnapshotList(srcRoot, new string[] { "s2", "s22", "s4" }, new string[] { "s4"
                                                                                          , "s2", "s22" });
            // Rename s22 to s0
            hdfs.RenameSnapshot(sub1, "s22", "s0");
            CheckSnapshotList(srcRoot, new string[] { "s0", "s2", "s4" }, new string[] { "s4"
                                                                                         , "s2", "s0" });
        }
Exemplo n.º 11
0
        public virtual void TestDumpTree()
        {
            INode root = fsdir.GetINode("/");

            Log.Info("Original tree");
            StringBuilder b1 = root.DumpTreeRecursively();

            System.Console.Out.WriteLine("b1=" + b1);
            BufferedReader @in  = new BufferedReader(new StringReader(b1.ToString()));
            string         line = @in.ReadLine();

            CheckClassName(line);
            for (; (line = @in.ReadLine()) != null;)
            {
                line = line.Trim();
                if (!line.IsEmpty() && !line.Contains("snapshot"))
                {
                    NUnit.Framework.Assert.IsTrue("line=" + line, line.StartsWith(INodeDirectory.DumptreeLastItem
                                                                                  ) || line.StartsWith(INodeDirectory.DumptreeExceptLastItem));
                    CheckClassName(line);
                }
            }
        }
Exemplo n.º 12
0
 /// <exception cref="System.Exception"/>
 internal virtual void AssertINodeNull(string path)
 {
     NUnit.Framework.Assert.IsNull(fsdir.GetINode(path));
 }
Exemplo n.º 13
0
        /// <summary>Scan all CacheDirectives.</summary>
        /// <remarks>
        /// Scan all CacheDirectives.  Use the information to figure out
        /// what cache replication factor each block should have.
        /// </remarks>
        private void RescanCacheDirectives()
        {
            FSDirectory fsDir = namesystem.GetFSDirectory();
            long        now   = new DateTime().GetTime();

            foreach (CacheDirective directive in cacheManager.GetCacheDirectives())
            {
                scannedDirectives++;
                // Skip processing this entry if it has expired
                if (directive.GetExpiryTime() > 0 && directive.GetExpiryTime() <= now)
                {
                    Log.Debug("Directive {}: the directive expired at {} (now = {})", directive.GetId
                                  (), directive.GetExpiryTime(), now);
                    continue;
                }
                string path = directive.GetPath();
                INode  node;
                try
                {
                    node = fsDir.GetINode(path);
                }
                catch (UnresolvedLinkException)
                {
                    // We don't cache through symlinks
                    Log.Debug("Directive {}: got UnresolvedLinkException while resolving " + "path {}"
                              , directive.GetId(), path);
                    continue;
                }
                if (node == null)
                {
                    Log.Debug("Directive {}: No inode found at {}", directive.GetId(), path);
                }
                else
                {
                    if (node.IsDirectory())
                    {
                        INodeDirectory       dir      = node.AsDirectory();
                        ReadOnlyList <INode> children = dir.GetChildrenList(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                                                            .CurrentStateId);
                        foreach (INode child in children)
                        {
                            if (child.IsFile())
                            {
                                RescanFile(directive, child.AsFile());
                            }
                        }
                    }
                    else
                    {
                        if (node.IsFile())
                        {
                            RescanFile(directive, node.AsFile());
                        }
                        else
                        {
                            Log.Debug("Directive {}: ignoring non-directive, non-file inode {} ", directive.GetId
                                          (), node);
                        }
                    }
                }
            }
        }
Exemplo n.º 14
0
        /// <summary>
        /// Test that an append with no locations fails with an exception
        /// showing insufficient locations.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestAppendInsufficientLocations()
        {
            Configuration conf = new Configuration();

            // lower heartbeat interval for fast recognition of DN
            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1000);
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            conf.SetInt(DFSConfigKeys.DfsClientSocketTimeoutKey, 3000);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(4).Build();
            DistributedFileSystem fileSystem = null;

            try
            {
                // create a file with replication 3
                fileSystem = cluster.GetFileSystem();
                Path f = new Path("/testAppend");
                FSDataOutputStream create = fileSystem.Create(f, (short)2);
                create.Write(Sharpen.Runtime.GetBytesForString("/testAppend"));
                create.Close();
                // Check for replications
                DFSTestUtil.WaitReplication(fileSystem, f, (short)2);
                // Shut down all DNs that have the last block location for the file
                LocatedBlocks lbs = fileSystem.dfs.GetNamenode().GetBlockLocations("/testAppend",
                                                                                   0, long.MaxValue);
                IList <DataNode> dnsOfCluster     = cluster.GetDataNodes();
                DatanodeInfo[]   dnsWithLocations = lbs.GetLastLocatedBlock().GetLocations();
                foreach (DataNode dn in dnsOfCluster)
                {
                    foreach (DatanodeInfo loc in dnsWithLocations)
                    {
                        if (dn.GetDatanodeId().Equals(loc))
                        {
                            dn.Shutdown();
                            DFSTestUtil.WaitForDatanodeDeath(dn);
                        }
                    }
                }
                // Wait till 0 replication is recognized
                DFSTestUtil.WaitReplication(fileSystem, f, (short)0);
                // Append to the file, at this state there are 3 live DNs but none of them
                // have the block.
                try
                {
                    fileSystem.Append(f);
                    NUnit.Framework.Assert.Fail("Append should fail because insufficient locations");
                }
                catch (IOException e)
                {
                    Log.Info("Expected exception: ", e);
                }
                FSDirectory dir   = cluster.GetNamesystem().GetFSDirectory();
                INodeFile   inode = INodeFile.ValueOf(dir.GetINode("/testAppend"), "/testAppend");
                NUnit.Framework.Assert.IsTrue("File should remain closed", !inode.IsUnderConstruction
                                                  ());
            }
            finally
            {
                if (null != fileSystem)
                {
                    fileSystem.Close();
                }
                cluster.Shutdown();
            }
        }