コード例 #1
0
        public virtual void TestRollbackCommand()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = null;
            Path           foo     = new Path("/foo");
            Path           bar     = new Path("/bar");

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                DFSAdmin dfsadmin         = new DFSAdmin(conf);
                dfs.Mkdirs(foo);
                // start rolling upgrade
                dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                NUnit.Framework.Assert.AreEqual(0, dfsadmin.Run(new string[] { "-rollingUpgrade",
                                                                               "prepare" }));
                dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
                // create new directory
                dfs.Mkdirs(bar);
                // check NNStorage
                NNStorage storage = cluster.GetNamesystem().GetFSImage().GetStorage();
                CheckNNStorage(storage, 3, -1);
            }
            finally
            {
                // (startSegment, mkdir, endSegment)
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            NameNode nn = null;

            try
            {
                nn = NameNode.CreateNameNode(new string[] { "-rollingUpgrade", "rollback" }, conf
                                             );
                // make sure /foo is still there, but /bar is not
                INode fooNode = nn.GetNamesystem().GetFSDirectory().GetINode4Write(foo.ToString()
                                                                                   );
                NUnit.Framework.Assert.IsNotNull(fooNode);
                INode barNode = nn.GetNamesystem().GetFSDirectory().GetINode4Write(bar.ToString()
                                                                                   );
                NUnit.Framework.Assert.IsNull(barNode);
                // check the details of NNStorage
                NNStorage storage = nn.GetNamesystem().GetFSImage().GetStorage();
                // (startSegment, upgrade marker, mkdir, endSegment)
                CheckNNStorage(storage, 3, 7);
            }
            finally
            {
                if (nn != null)
                {
                    nn.Stop();
                    nn.Join();
                }
            }
        }
コード例 #2
0
 public virtual void SetUp()
 {
     fs.Mkdirs(new Path("/sub1"));
     fs.AllowSnapshot(new Path("/sub1"));
     fs.Mkdirs(new Path("/sub1/sub1sub1"));
     fs.Mkdirs(new Path("/sub1/sub1sub2"));
 }
コード例 #3
0
 public virtual void CreateFiles()
 {
     hdfs.Delete(new Path(testdir), true);
     hdfs.Mkdirs(new Path(testdir));
     hdfs.Mkdirs(new Path(testdir + "/foo"));
     DFSTestUtil.CreateFile(hdfs, new Path(testdir + "/bar"), 0, (short)1, 0);
 }
コード例 #4
0
 public virtual void TestSetXAttr()
 {
     InitCluster(true, false);
     fs.Mkdirs(Path);
     ExpectException();
     fs.SetXAttr(Path, "user.foo", null);
 }
コード例 #5
0
        /// <summary>
        /// Test the listing with different user names to make sure only directories
        /// that are owned by the user are listed.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestListWithDifferentUser()
        {
            cluster.GetNamesystem().GetSnapshotManager().SetAllowNestedSnapshots(true);
            // first make dir1 and dir2 snapshottable
            hdfs.AllowSnapshot(dir1);
            hdfs.AllowSnapshot(dir2);
            hdfs.SetPermission(root, FsPermission.ValueOf("-rwxrwxrwx"));
            // create two dirs and make them snapshottable under the name of user1
            UserGroupInformation ugi1 = UserGroupInformation.CreateUserForTesting("user1", new
                                                                                  string[] { "group1" });
            DistributedFileSystem fs1 = (DistributedFileSystem)DFSTestUtil.GetFileSystemAs(ugi1
                                                                                           , conf);
            Path dir1_user1 = new Path("/dir1_user1");
            Path dir2_user1 = new Path("/dir2_user1");

            fs1.Mkdirs(dir1_user1);
            fs1.Mkdirs(dir2_user1);
            hdfs.AllowSnapshot(dir1_user1);
            hdfs.AllowSnapshot(dir2_user1);
            // user2
            UserGroupInformation ugi2 = UserGroupInformation.CreateUserForTesting("user2", new
                                                                                  string[] { "group2" });
            DistributedFileSystem fs2 = (DistributedFileSystem)DFSTestUtil.GetFileSystemAs(ugi2
                                                                                           , conf);
            Path dir_user2    = new Path("/dir_user2");
            Path subdir_user2 = new Path(dir_user2, "subdir");

            fs2.Mkdirs(dir_user2);
            fs2.Mkdirs(subdir_user2);
            hdfs.AllowSnapshot(dir_user2);
            hdfs.AllowSnapshot(subdir_user2);
            // super user
            string supergroup = conf.Get(DFSConfigKeys.DfsPermissionsSuperusergroupKey, DFSConfigKeys
                                         .DfsPermissionsSuperusergroupDefault);
            UserGroupInformation superUgi = UserGroupInformation.CreateUserForTesting("superuser"
                                                                                      , new string[] { supergroup });
            DistributedFileSystem fs3 = (DistributedFileSystem)DFSTestUtil.GetFileSystemAs(superUgi
                                                                                           , conf);

            // list the snapshottable dirs for superuser
            SnapshottableDirectoryStatus[] dirs = fs3.GetSnapshottableDirListing();
            // 6 snapshottable dirs: dir1, dir2, dir1_user1, dir2_user1, dir_user2, and
            // subdir_user2
            NUnit.Framework.Assert.AreEqual(6, dirs.Length);
            // list the snapshottable dirs for user1
            dirs = fs1.GetSnapshottableDirListing();
            // 2 dirs owned by user1: dir1_user1 and dir2_user1
            NUnit.Framework.Assert.AreEqual(2, dirs.Length);
            NUnit.Framework.Assert.AreEqual(dir1_user1, dirs[0].GetFullPath());
            NUnit.Framework.Assert.AreEqual(dir2_user1, dirs[1].GetFullPath());
            // list the snapshottable dirs for user2
            dirs = fs2.GetSnapshottableDirListing();
            // 2 dirs owned by user2: dir_user2 and subdir_user2
            NUnit.Framework.Assert.AreEqual(2, dirs.Length);
            NUnit.Framework.Assert.AreEqual(dir_user2, dirs[0].GetFullPath());
            NUnit.Framework.Assert.AreEqual(subdir_user2, dirs[1].GetFullPath());
        }
コード例 #6
0
 public virtual void SetUp()
 {
     conf    = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(Replication).Build();
     cluster.WaitActive();
     fsn  = cluster.GetNamesystem();
     hdfs = cluster.GetFileSystem();
     hdfs.Mkdirs(dir1);
     hdfs.Mkdirs(dir2);
 }
コード例 #7
0
        public virtual void TestRollbackWithQJM()
        {
            Configuration      conf    = new HdfsConfiguration();
            MiniJournalCluster mjc     = null;
            MiniDFSCluster     cluster = null;
            Path foo = new Path("/foo");
            Path bar = new Path("/bar");

            try
            {
                mjc = new MiniJournalCluster.Builder(conf).NumJournalNodes(NumJournalNodes).Build
                          ();
                conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, mjc.GetQuorumJournalURI(JournalId)
                         .ToString());
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                DFSAdmin dfsadmin         = new DFSAdmin(conf);
                dfs.Mkdirs(foo);
                // start rolling upgrade
                dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                NUnit.Framework.Assert.AreEqual(0, dfsadmin.Run(new string[] { "-rollingUpgrade",
                                                                               "prepare" }));
                dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
                // create new directory
                dfs.Mkdirs(bar);
                dfs.Close();
                // rollback
                cluster.RestartNameNode("-rollingUpgrade", "rollback");
                // make sure /foo is still there, but /bar is not
                dfs = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
                NUnit.Framework.Assert.IsFalse(dfs.Exists(bar));
                // check storage in JNs
                for (int i = 0; i < NumJournalNodes; i++)
                {
                    FilePath dir = mjc.GetCurrentDir(0, JournalId);
                    // segments:(startSegment, mkdir, endSegment), (startSegment, upgrade
                    // marker, mkdir, endSegment)
                    CheckJNStorage(dir, 4, 7);
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
                if (mjc != null)
                {
                    mjc.Shutdown();
                }
            }
        }
コード例 #8
0
 public virtual void TestModifyAclEntries()
 {
     InitCluster(true, false);
     fs.Mkdirs(Path);
     ExpectException();
     fs.ModifyAclEntries(Path, Lists.NewArrayList(AclTestHelpers.AclEntry(AclEntryScope
                                                                          .Default, AclEntryType.User, "foo", FsAction.ReadWrite)));
 }
コード例 #9
0
        public virtual void TestSaveNamespaceWithRenamedLease()
        {
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration()).NumDataNodes
                                         (1).Build();

            cluster.WaitActive();
            DistributedFileSystem fs   = (DistributedFileSystem)cluster.GetFileSystem();
            OutputStream          @out = null;

            try
            {
                fs.Mkdirs(new Path("/test-target"));
                @out = fs.Create(new Path("/test-source/foo"));
                // don't close
                fs.Rename(new Path("/test-source/"), new Path("/test-target/"));
                fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                cluster.GetNameNodeRpc().SaveNamespace();
                fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
            }
            finally
            {
                IOUtils.Cleanup(Log, @out, fs);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
コード例 #10
0
ファイル: TestWebHDFS.cs プロジェクト: orf53975/hadoop.net
        public virtual void TestWebHdfsRenameSnapshot()
        {
            MiniDFSCluster cluster = null;
            Configuration  conf    = WebHdfsTestUtil.CreateConf();

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                cluster.WaitActive();
                DistributedFileSystem dfs     = cluster.GetFileSystem();
                FileSystem            webHdfs = WebHdfsTestUtil.GetWebHdfsFileSystem(conf, WebHdfsFileSystem
                                                                                     .Scheme);
                Path foo = new Path("/foo");
                dfs.Mkdirs(foo);
                dfs.AllowSnapshot(foo);
                webHdfs.CreateSnapshot(foo, "s1");
                Path s1path = SnapshotTestHelper.GetSnapshotRoot(foo, "s1");
                NUnit.Framework.Assert.IsTrue(webHdfs.Exists(s1path));
                // rename s1 to s2
                webHdfs.RenameSnapshot(foo, "s1", "s2");
                NUnit.Framework.Assert.IsFalse(webHdfs.Exists(s1path));
                Path s2path = SnapshotTestHelper.GetSnapshotRoot(foo, "s2");
                NUnit.Framework.Assert.IsTrue(webHdfs.Exists(s2path));
                webHdfs.DeleteSnapshot(foo, "s2");
                NUnit.Framework.Assert.IsFalse(webHdfs.Exists(s2path));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
コード例 #11
0
        public static void SetUp()
        {
            conf    = new Configuration();
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            cluster.WaitActive();
            fsn = cluster.GetNamesystem();
            fs  = cluster.GetFileSystem();
            Path path1 = new Path(sub1, "dir1");

            NUnit.Framework.Assert.IsTrue(fs.Mkdirs(path1));
            Path path2 = new Path(sub2, "dir2");

            NUnit.Framework.Assert.IsTrue(fs.Mkdirs(path2));
            SnapshotTestHelper.CreateSnapshot(fs, sub1, "testSnapshot");
            objInSnapshot = SnapshotTestHelper.GetSnapshotPath(sub1, "testSnapshot", "dir1");
        }
コード例 #12
0
        public virtual void TestSaveLoadImage()
        {
            int s = 0;

            // make changes to the namesystem
            hdfs.Mkdirs(dir);
            SnapshotTestHelper.CreateSnapshot(hdfs, dir, "s" + ++s);
            Path sub1 = new Path(dir, "sub1");

            hdfs.Mkdirs(sub1);
            hdfs.SetPermission(sub1, new FsPermission((short)0x1ff));
            Path sub11 = new Path(sub1, "sub11");

            hdfs.Mkdirs(sub11);
            CheckImage(s);
            hdfs.CreateSnapshot(dir, "s" + ++s);
            Path sub1file1 = new Path(sub1, "sub1file1");
            Path sub1file2 = new Path(sub1, "sub1file2");

            DFSTestUtil.CreateFile(hdfs, sub1file1, Blocksize, Replication, seed);
            DFSTestUtil.CreateFile(hdfs, sub1file2, Blocksize, Replication, seed);
            CheckImage(s);
            hdfs.CreateSnapshot(dir, "s" + ++s);
            Path sub2      = new Path(dir, "sub2");
            Path sub2file1 = new Path(sub2, "sub2file1");
            Path sub2file2 = new Path(sub2, "sub2file2");

            DFSTestUtil.CreateFile(hdfs, sub2file1, Blocksize, Replication, seed);
            DFSTestUtil.CreateFile(hdfs, sub2file2, Blocksize, Replication, seed);
            CheckImage(s);
            hdfs.CreateSnapshot(dir, "s" + ++s);
            hdfs.SetReplication(sub1file1, (short)(Replication - 1));
            hdfs.Delete(sub1file2, true);
            hdfs.SetOwner(sub2, "dr.who", "unknown");
            hdfs.Delete(sub2file1, true);
            CheckImage(s);
            hdfs.CreateSnapshot(dir, "s" + ++s);
            Path sub1_sub2file2 = new Path(sub1, "sub2file2");

            hdfs.Rename(sub2file2, sub1_sub2file2);
            hdfs.Rename(sub1file1, sub2file1);
            CheckImage(s);
            hdfs.Rename(sub2file1, sub2file2);
            CheckImage(s);
        }
コード例 #13
0
        /// <summary>Basic read/write tests of raw files.</summary>
        /// <remarks>
        /// Basic read/write tests of raw files.
        /// Create a non-encrypted file
        /// Create an encryption zone
        /// Verify that non-encrypted file contents and decrypted file in EZ are equal
        /// Compare the raw encrypted bytes of the file with the decrypted version to
        /// ensure they're different
        /// Compare the raw and non-raw versions of the non-encrypted file to ensure
        /// they're the same.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestReadWriteRaw()
        {
            // Create a base file for comparison
            Path baseFile = new Path("/base");
            int  len      = 8192;

            DFSTestUtil.CreateFile(fs, baseFile, len, (short)1, unchecked ((int)(0xFEED)));
            // Create the first enc file
            Path zone = new Path("/zone");

            fs.Mkdirs(zone);
            dfsAdmin.CreateEncryptionZone(zone, TestKey);
            Path encFile1 = new Path(zone, "myfile");

            DFSTestUtil.CreateFile(fs, encFile1, len, (short)1, unchecked ((int)(0xFEED)));
            // Read them back in and compare byte-by-byte
            DFSTestUtil.VerifyFilesEqual(fs, baseFile, encFile1, len);
            // Raw file should be different from encrypted file
            Path encFile1Raw = new Path(zone, "/.reserved/raw/zone/myfile");

            DFSTestUtil.VerifyFilesNotEqual(fs, encFile1Raw, encFile1, len);
            // Raw file should be same as /base which is not in an EZ
            Path baseFileRaw = new Path(zone, "/.reserved/raw/base");

            DFSTestUtil.VerifyFilesEqual(fs, baseFile, baseFileRaw, len);
        }
コード例 #14
0
        /// <exception cref="System.Exception"/>
        public virtual void TestDowngrade()
        {
            Configuration    conf    = new HdfsConfiguration();
            MiniQJMHACluster cluster = null;
            Path             foo     = new Path("/foo");
            Path             bar     = new Path("/bar");

            try
            {
                cluster = new MiniQJMHACluster.Builder(conf).Build();
                MiniDFSCluster dfsCluster = cluster.GetDfsCluster();
                dfsCluster.WaitActive();
                // let NN1 tail editlog every 1s
                dfsCluster.GetConfiguration(1).SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
                dfsCluster.RestartNameNode(1);
                dfsCluster.TransitionToActive(0);
                DistributedFileSystem dfs = dfsCluster.GetFileSystem(0);
                dfs.Mkdirs(foo);
                // start rolling upgrade
                RollingUpgradeInfo info = dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare
                                                             );
                NUnit.Framework.Assert.IsTrue(info.IsStarted());
                dfs.Mkdirs(bar);
                TestRollingUpgrade.QueryForPreparation(dfs);
                dfs.Close();
                dfsCluster.RestartNameNode(0, true, "-rollingUpgrade", "downgrade");
                // Once downgraded, there should be no more fsimage for rollbacks.
                NUnit.Framework.Assert.IsFalse(dfsCluster.GetNamesystem(0).GetFSImage().HasRollbackFSImage
                                                   ());
                // shutdown NN1
                dfsCluster.ShutdownNameNode(1);
                dfsCluster.TransitionToActive(0);
                dfs = dfsCluster.GetFileSystem(0);
                NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
                NUnit.Framework.Assert.IsTrue(dfs.Exists(bar));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
コード例 #15
0
        /// <exception cref="System.Exception"/>
        public virtual void TestFinalize()
        {
            Configuration    conf    = new HdfsConfiguration();
            MiniQJMHACluster cluster = null;
            Path             foo     = new Path("/foo");
            Path             bar     = new Path("/bar");

            try
            {
                cluster = new MiniQJMHACluster.Builder(conf).Build();
                MiniDFSCluster dfsCluster = cluster.GetDfsCluster();
                dfsCluster.WaitActive();
                // let NN1 tail editlog every 1s
                dfsCluster.GetConfiguration(1).SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
                dfsCluster.RestartNameNode(1);
                dfsCluster.TransitionToActive(0);
                DistributedFileSystem dfs = dfsCluster.GetFileSystem(0);
                dfs.Mkdirs(foo);
                FSImage fsimage = dfsCluster.GetNamesystem(0).GetFSImage();
                // start rolling upgrade
                RollingUpgradeInfo info = dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare
                                                             );
                NUnit.Framework.Assert.IsTrue(info.IsStarted());
                dfs.Mkdirs(bar);
                QueryForPreparation(dfs);
                // The NN should have a copy of the fsimage in case of rollbacks.
                NUnit.Framework.Assert.IsTrue(fsimage.HasRollbackFSImage());
                info = dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Finalize);
                NUnit.Framework.Assert.IsTrue(info.IsFinalized());
                NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
                // Once finalized, there should be no more fsimage for rollbacks.
                NUnit.Framework.Assert.IsFalse(fsimage.HasRollbackFSImage());
                // Should have no problem in restart and replaying edits that include
                // the FINALIZE op.
                dfsCluster.RestartNameNode(0);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
コード例 #16
0
        public virtual void TestSocketFactory()
        {
            // Create a standard mini-cluster
            Configuration  sconf   = new Configuration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(sconf).NumDataNodes(1).Build(
                );
            int nameNodePort = cluster.GetNameNodePort();
            // Get a reference to its DFS directly
            FileSystem fs = cluster.GetFileSystem();

            NUnit.Framework.Assert.IsTrue(fs is DistributedFileSystem);
            DistributedFileSystem directDfs = (DistributedFileSystem)fs;
            Configuration         cconf     = GetCustomSocketConfigs(nameNodePort);

            fs = FileSystem.Get(cconf);
            NUnit.Framework.Assert.IsTrue(fs is DistributedFileSystem);
            DistributedFileSystem dfs               = (DistributedFileSystem)fs;
            JobClient             client            = null;
            MiniMRYarnCluster     miniMRYarnCluster = null;

            try
            {
                // This will test RPC to the NameNode only.
                // could we test Client-DataNode connections?
                Path filePath = new Path("/dir");
                NUnit.Framework.Assert.IsFalse(directDfs.Exists(filePath));
                NUnit.Framework.Assert.IsFalse(dfs.Exists(filePath));
                directDfs.Mkdirs(filePath);
                NUnit.Framework.Assert.IsTrue(directDfs.Exists(filePath));
                NUnit.Framework.Assert.IsTrue(dfs.Exists(filePath));
                // This will test RPC to a Resource Manager
                fs = FileSystem.Get(sconf);
                JobConf jobConf = new JobConf();
                FileSystem.SetDefaultUri(jobConf, fs.GetUri().ToString());
                miniMRYarnCluster = InitAndStartMiniMRYarnCluster(jobConf);
                JobConf jconf = new JobConf(miniMRYarnCluster.GetConfig());
                jconf.Set("hadoop.rpc.socket.factory.class.default", "org.apache.hadoop.ipc.DummySocketFactory"
                          );
                jconf.Set(MRConfig.FrameworkName, MRConfig.YarnFrameworkName);
                string   rmAddress = jconf.Get("yarn.resourcemanager.address");
                string[] split     = rmAddress.Split(":");
                jconf.Set("yarn.resourcemanager.address", split[0] + ':' + (System.Convert.ToInt32
                                                                                (split[1]) + 10));
                client = new JobClient(jconf);
                JobStatus[] jobs = client.JobsToComplete();
                NUnit.Framework.Assert.IsTrue(jobs.Length == 0);
            }
            finally
            {
                CloseClient(client);
                CloseDfs(dfs);
                CloseDfs(directDfs);
                StopMiniMRYarnCluster(miniMRYarnCluster);
                ShutdownDFSCluster(cluster);
            }
        }
コード例 #17
0
 public virtual void SetUp()
 {
     conf = new Configuration();
     conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, Blocksize);
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(Replication).Build();
     cluster.WaitActive();
     fsn   = cluster.GetNamesystem();
     fsdir = fsn.GetFSDirectory();
     hdfs  = cluster.GetFileSystem();
     hdfs.Mkdirs(dir);
 }
コード例 #18
0
        /// <summary>Test if the quota can be correctly updated for create file</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaUpdateWithFileCreate()
        {
            Path foo         = new Path(dir, "foo");
            Path createdFile = new Path(foo, "created_file.data");

            dfs.Mkdirs(foo);
            dfs.SetQuota(foo, long.MaxValue - 1, long.MaxValue - 1);
            long fileLen = Blocksize * 2 + Blocksize / 2;

            DFSTestUtil.CreateFile(dfs, createdFile, Blocksize / 16, fileLen, Blocksize, Replication
                                   , seed);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            QuotaCounts cnt = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                  ();

            NUnit.Framework.Assert.AreEqual(2, cnt.GetNameSpace());
            NUnit.Framework.Assert.AreEqual(fileLen * Replication, cnt.GetStorageSpace());
        }
コード例 #19
0
        public virtual void SetUp()
        {
            Configuration conf = new Configuration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, Blocksize);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(Replication).Build();
            cluster.WaitActive();
            fsdir = cluster.GetNamesystem().GetFSDirectory();
            dfs   = cluster.GetFileSystem();
            dfs.Mkdirs(dir);
            dfs.SetQuota(dir, long.MaxValue - 1, Diskquota);
            dfs.SetQuotaByStorageType(dir, StorageType.Disk, Diskquota);
            dfs.SetStoragePolicy(dir, HdfsConstants.HotStoragePolicyName);
        }
コード例 #20
0
        public virtual void TestTransactionAndCheckpointMetrics()
        {
            long lastCkptTime = MetricsAsserts.GetLongGauge("LastCheckpointTime", MetricsAsserts.GetMetrics
                                                                (NsMetrics));

            MetricsAsserts.AssertGauge("LastCheckpointTime", lastCkptTime, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("LastWrittenTransactionId", 1L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("TransactionsSinceLastCheckpoint", 1L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("TransactionsSinceLastLogRoll", 1L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            fs.Mkdirs(new Path(TestRootDirPath, "/tmp"));
            UpdateMetrics();
            MetricsAsserts.AssertGauge("LastCheckpointTime", lastCkptTime, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("LastWrittenTransactionId", 2L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("TransactionsSinceLastCheckpoint", 2L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("TransactionsSinceLastLogRoll", 2L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            cluster.GetNameNodeRpc().RollEditLog();
            UpdateMetrics();
            MetricsAsserts.AssertGauge("LastCheckpointTime", lastCkptTime, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("LastWrittenTransactionId", 4L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("TransactionsSinceLastCheckpoint", 4L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("TransactionsSinceLastLogRoll", 1L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter,
                                                 false);
            cluster.GetNameNodeRpc().SaveNamespace();
            cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave,
                                                 false);
            UpdateMetrics();
            long newLastCkptTime = MetricsAsserts.GetLongGauge("LastCheckpointTime", MetricsAsserts.GetMetrics
                                                                   (NsMetrics));

            NUnit.Framework.Assert.IsTrue(lastCkptTime < newLastCkptTime);
            MetricsAsserts.AssertGauge("LastWrittenTransactionId", 6L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("TransactionsSinceLastCheckpoint", 1L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("TransactionsSinceLastLogRoll", 1L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
        }
コード例 #21
0
        /// <exception cref="System.Exception"/>
        public virtual void TestSetQuota()
        {
            Path dir = new Path("/TestSnapshot");

            hdfs.Mkdirs(dir);
            // allow snapshot on dir and create snapshot s1
            SnapshotTestHelper.CreateSnapshot(hdfs, dir, "s1");
            Path sub = new Path(dir, "sub");

            hdfs.Mkdirs(sub);
            Path fileInSub = new Path(sub, "file");

            DFSTestUtil.CreateFile(hdfs, fileInSub, Blocksize, Replication, seed);
            INodeDirectory subNode = INodeDirectory.ValueOf(fsdir.GetINode(sub.ToString()), sub
                                                            );

            // subNode should be a INodeDirectory, but not an INodeDirectoryWithSnapshot
            NUnit.Framework.Assert.IsFalse(subNode.IsWithSnapshot());
            hdfs.SetQuota(sub, long.MaxValue - 1, long.MaxValue - 1);
            subNode = INodeDirectory.ValueOf(fsdir.GetINode(sub.ToString()), sub);
            NUnit.Framework.Assert.IsTrue(subNode.IsQuotaSet());
            NUnit.Framework.Assert.IsFalse(subNode.IsWithSnapshot());
        }
コード例 #22
0
        public virtual void TestCheckpointWithSNN()
        {
            MiniDFSCluster        cluster = null;
            DistributedFileSystem dfs     = null;
            SecondaryNameNode     snn     = null;

            try
            {
                Configuration conf = new HdfsConfiguration();
                cluster = new MiniDFSCluster.Builder(conf).Build();
                cluster.WaitActive();
                conf.Set(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey, "0.0.0.0:0");
                snn = new SecondaryNameNode(conf);
                dfs = cluster.GetFileSystem();
                dfs.Mkdirs(new Path("/test/foo"));
                snn.DoCheckpoint();
                //start rolling upgrade
                dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare);
                dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
                dfs.Mkdirs(new Path("/test/bar"));
                // do checkpoint in SNN again
                snn.DoCheckpoint();
            }
            finally
            {
                IOUtils.Cleanup(null, dfs);
                if (snn != null)
                {
                    snn.Shutdown();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
コード例 #23
0
        /// <summary>Ensure mtime and atime can be loaded from fsimage.</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestLoadMtimeAtime()
        {
            Configuration  conf    = new Configuration();
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                DistributedFileSystem hdfs = cluster.GetFileSystem();
                string userDir             = hdfs.GetHomeDirectory().ToUri().GetPath().ToString();
                Path   file = new Path(userDir, "file");
                Path   dir  = new Path(userDir, "/dir");
                Path   link = new Path(userDir, "/link");
                hdfs.CreateNewFile(file);
                hdfs.Mkdirs(dir);
                hdfs.CreateSymlink(file, link, false);
                long mtimeFile = hdfs.GetFileStatus(file).GetModificationTime();
                long atimeFile = hdfs.GetFileStatus(file).GetAccessTime();
                long mtimeDir  = hdfs.GetFileStatus(dir).GetModificationTime();
                long mtimeLink = hdfs.GetFileLinkStatus(link).GetModificationTime();
                long atimeLink = hdfs.GetFileLinkStatus(link).GetAccessTime();
                // save namespace and restart cluster
                hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                hdfs.SaveNamespace();
                hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
                cluster.Shutdown();
                cluster = new MiniDFSCluster.Builder(conf).Format(false).NumDataNodes(1).Build();
                cluster.WaitActive();
                hdfs = cluster.GetFileSystem();
                NUnit.Framework.Assert.AreEqual(mtimeFile, hdfs.GetFileStatus(file).GetModificationTime
                                                    ());
                NUnit.Framework.Assert.AreEqual(atimeFile, hdfs.GetFileStatus(file).GetAccessTime
                                                    ());
                NUnit.Framework.Assert.AreEqual(mtimeDir, hdfs.GetFileStatus(dir).GetModificationTime
                                                    ());
                NUnit.Framework.Assert.AreEqual(mtimeLink, hdfs.GetFileLinkStatus(link).GetModificationTime
                                                    ());
                NUnit.Framework.Assert.AreEqual(atimeLink, hdfs.GetFileLinkStatus(link).GetAccessTime
                                                    ());
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
コード例 #24
0
        /*
         * Try to read the files inside snapshot but deleted in original place after
         * restarting post checkpoint. refer HDFS-5427
         */
        /// <exception cref="System.Exception"/>
        public virtual void TestReadSnapshotFileWithCheckpoint()
        {
            Path foo = new Path("/foo");

            hdfs.Mkdirs(foo);
            hdfs.AllowSnapshot(foo);
            Path bar = new Path("/foo/bar");

            DFSTestUtil.CreateFile(hdfs, bar, 100, (short)2, 100024L);
            hdfs.CreateSnapshot(foo, "s1");
            NUnit.Framework.Assert.IsTrue(hdfs.Delete(bar, true));
            // checkpoint
            NameNode nameNode = cluster.GetNameNode();

            NameNodeAdapter.EnterSafeMode(nameNode, false);
            NameNodeAdapter.SaveNamespace(nameNode);
            NameNodeAdapter.LeaveSafeMode(nameNode);
            // restart namenode to load snapshot files from fsimage
            cluster.RestartNameNode(true);
            string snapshotPath = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.GetSnapshotPath
                                      (foo.ToString(), "s1/bar");

            DFSTestUtil.ReadFile(hdfs, new Path(snapshotPath));
        }
コード例 #25
0
 public virtual void SetUp()
 {
     conf = new Configuration();
     conf.SetInt(DFSConfigKeys.DfsNamenodeMaxXattrsPerInodeKey, 2);
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(Replication).Build();
     cluster.WaitActive();
     fsn   = cluster.GetNamesystem();
     fsdir = fsn.GetFSDirectory();
     hdfs  = cluster.GetFileSystem();
     DFSTestUtil.CreateFile(hdfs, file1, 1024, Replication, seed);
     DFSTestUtil.CreateFile(hdfs, file2, 1024, Replication, seed);
     DFSTestUtil.CreateFile(hdfs, file3, 1024, Replication, seed);
     DFSTestUtil.CreateFile(hdfs, file5, 1024, Replication, seed);
     hdfs.Mkdirs(sub2);
 }
コード例 #26
0
        public virtual void TestBootstrapStandbyWithActiveNN()
        {
            // make nn0 active
            cluster.TransitionToActive(0);
            // do ops and generate in-progress edit log data
            Configuration         confNN1 = cluster.GetConfiguration(1);
            DistributedFileSystem dfs     = (DistributedFileSystem)HATestUtil.ConfigureFailoverFs
                                                (cluster, confNN1);

            for (int i = 1; i <= 10; i++)
            {
                dfs.Mkdirs(new Path("/test" + i));
            }
            dfs.Close();
            // shutdown nn1 and delete its edit log files
            cluster.ShutdownNameNode(1);
            DeleteEditLogIfExists(confNN1);
            cluster.GetNameNodeRpc(0).SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter,
                                                  true);
            cluster.GetNameNodeRpc(0).SaveNamespace();
            cluster.GetNameNodeRpc(0).SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave,
                                                  true);
            // check without -skipSharedEditsCheck, Bootstrap should fail for BKJM
            // immediately after saveNamespace
            int rc = BootstrapStandby.Run(new string[] { "-force", "-nonInteractive" }, confNN1
                                          );

            NUnit.Framework.Assert.AreEqual("Mismatches return code", 6, rc);
            // check with -skipSharedEditsCheck
            rc = BootstrapStandby.Run(new string[] { "-force", "-nonInteractive", "-skipSharedEditsCheck" }, confNN1);
            NUnit.Framework.Assert.AreEqual("Mismatches return code", 0, rc);
            // Checkpoint as fast as we can, in a tight loop.
            confNN1.SetInt(DFSConfigKeys.DfsNamenodeCheckpointPeriodKey, 1);
            cluster.RestartNameNode(1);
            cluster.TransitionToStandby(1);
            NameNode nn0 = cluster.GetNameNode(0);

            HATestUtil.WaitForStandbyToCatchUp(nn0, cluster.GetNameNode(1));
            long expectedCheckpointTxId = NameNodeAdapter.GetNamesystem(nn0).GetFSImage().GetMostRecentCheckpointTxId
                                              ();

            HATestUtil.WaitForCheckpoint(cluster, 1, ImmutableList.Of((int)expectedCheckpointTxId
                                                                      ));
            // Should have copied over the namespace
            FSImageTestUtil.AssertNNHasCheckpoints(cluster, 1, ImmutableList.Of((int)expectedCheckpointTxId
                                                                                ));
            FSImageTestUtil.AssertNNFilesMatch(cluster);
        }
コード例 #27
0
ファイル: TestMover.cs プロジェクト: orf53975/hadoop.net
        public virtual void TestScheduleBlockWithinSameNode()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).StorageTypes
                                         (new StorageType[] { StorageType.Disk, StorageType.Archive }).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                string file = "/testScheduleWithinSameNode/file";
                Path   dir  = new Path("/testScheduleWithinSameNode");
                dfs.Mkdirs(dir);
                // write to DISK
                dfs.SetStoragePolicy(dir, "HOT");
                {
                    FSDataOutputStream @out = dfs.Create(new Path(file));
                    @out.WriteChars("testScheduleWithinSameNode");
                    @out.Close();
                }
                //verify before movement
                LocatedBlock  lb           = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                StorageType[] storageTypes = lb.GetStorageTypes();
                foreach (StorageType storageType in storageTypes)
                {
                    NUnit.Framework.Assert.IsTrue(StorageType.Disk == storageType);
                }
                // move to ARCHIVE
                dfs.SetStoragePolicy(dir, "COLD");
                int rc = ToolRunner.Run(conf, new Mover.Cli(), new string[] { "-p", dir.ToString(
                                                                                  ) });
                NUnit.Framework.Assert.AreEqual("Movement to ARCHIVE should be successfull", 0, rc
                                                );
                // Wait till namenode notified
                Sharpen.Thread.Sleep(3000);
                lb           = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                storageTypes = lb.GetStorageTypes();
                foreach (StorageType storageType_1 in storageTypes)
                {
                    NUnit.Framework.Assert.IsTrue(StorageType.Archive == storageType_1);
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
コード例 #28
0
            /// <exception cref="System.Exception"/>
            public object Run()
            {
                DistributedFileSystem fs = this._enclosing.cluster.GetFileSystem();
                Path d1 = new Path(reservedRaw, "dir1");

                try
                {
                    fs.Mkdirs(d1);
                    NUnit.Framework.Assert.Fail("access to /.reserved/raw is superuser-only operation"
                                                );
                }
                catch (AccessControlException e)
                {
                    GenericTestUtils.AssertExceptionContains("Superuser privilege is required", e);
                }
                return(null);
            }
コード例 #29
0
 /// <summary>Create files/directories/snapshots.</summary>
 /// <exception cref="System.Exception"/>
 internal virtual void Prepare(DistributedFileSystem dfs, short repl)
 {
     foreach (Path d in dirs)
     {
         dfs.Mkdirs(d);
     }
     foreach (Path file in files)
     {
         DFSTestUtil.CreateFile(dfs, file, fileSize, repl, 0L);
     }
     foreach (KeyValuePair <Path, IList <string> > entry in snapshotMap)
     {
         foreach (string snapshot in entry.Value)
         {
             SnapshotTestHelper.CreateSnapshot(dfs, entry.Key, snapshot);
         }
     }
 }
コード例 #30
0
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        public virtual void TestCheckpoint()
        {
            Configuration conf = new Configuration();

            conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
            conf.SetInt(DFSConfigKeys.DfsNamenodeCheckpointPeriodKey, 1);
            MiniQJMHACluster cluster = null;
            Path             foo     = new Path("/foo");

            try
            {
                cluster = new MiniQJMHACluster.Builder(conf).Build();
                MiniDFSCluster dfsCluster = cluster.GetDfsCluster();
                dfsCluster.WaitActive();
                dfsCluster.TransitionToActive(0);
                DistributedFileSystem dfs = dfsCluster.GetFileSystem(0);
                // start rolling upgrade
                RollingUpgradeInfo info = dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare
                                                             );
                NUnit.Framework.Assert.IsTrue(info.IsStarted());
                QueryForPreparation(dfs);
                dfs.Mkdirs(foo);
                long txid = dfs.RollEdits();
                NUnit.Framework.Assert.IsTrue(txid > 0);
                int retries = 0;
                while (++retries < 5)
                {
                    NNStorage storage = dfsCluster.GetNamesystem(1).GetFSImage().GetStorage();
                    if (storage.GetFsImageName(txid - 1) != null)
                    {
                        return;
                    }
                    Sharpen.Thread.Sleep(1000);
                }
                NUnit.Framework.Assert.Fail("new checkpoint does not exist");
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }