예제 #1
0
        /// <summary>Test FileStatus of snapshot file before/after rename</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestSnapshotRename()
        {
            DFSTestUtil.CreateFile(hdfs, file1, Blocksize, Replication, seed);
            // Create snapshot for sub1
            Path snapshotRoot = SnapshotTestHelper.CreateSnapshot(hdfs, sub1, "s1");
            Path ssPath       = new Path(snapshotRoot, file1.GetName());

            NUnit.Framework.Assert.IsTrue(hdfs.Exists(ssPath));
            FileStatus statusBeforeRename = hdfs.GetFileStatus(ssPath);

            // Rename the snapshot
            hdfs.RenameSnapshot(sub1, "s1", "s2");
            // <sub1>/.snapshot/s1/file1 should no longer exist
            NUnit.Framework.Assert.IsFalse(hdfs.Exists(ssPath));
            snapshotRoot = SnapshotTestHelper.GetSnapshotRoot(sub1, "s2");
            ssPath       = new Path(snapshotRoot, file1.GetName());
            // Instead, <sub1>/.snapshot/s2/file1 should exist
            NUnit.Framework.Assert.IsTrue(hdfs.Exists(ssPath));
            FileStatus statusAfterRename = hdfs.GetFileStatus(ssPath);

            // FileStatus of the snapshot should not change except the path
            NUnit.Framework.Assert.IsFalse(statusBeforeRename.Equals(statusAfterRename));
            statusBeforeRename.SetPath(statusAfterRename.GetPath());
            NUnit.Framework.Assert.AreEqual(statusBeforeRename.ToString(), statusAfterRename.
                                            ToString());
        }
예제 #2
0
        public virtual void TestConcatInEditLog()
        {
            Path TestDir = new Path("/testConcatInEditLog");
            long FileLen = blockSize;

            // 1. Concat some files
            Path[] srcFiles = new Path[3];
            for (int i = 0; i < srcFiles.Length; i++)
            {
                Path path = new Path(TestDir, "src-" + i);
                DFSTestUtil.CreateFile(dfs, path, FileLen, ReplFactor, 1);
                srcFiles[i] = path;
            }
            Path targetFile = new Path(TestDir, "target");

            DFSTestUtil.CreateFile(dfs, targetFile, FileLen, ReplFactor, 1);
            dfs.Concat(targetFile, srcFiles);
            // 2. Verify the concat operation basically worked, and record
            // file status.
            NUnit.Framework.Assert.IsTrue(dfs.Exists(targetFile));
            FileStatus origStatus = dfs.GetFileStatus(targetFile);

            // 3. Restart NN to force replay from edit log
            cluster.RestartNameNode(true);
            // 4. Verify concat operation was replayed correctly and file status
            // did not change.
            NUnit.Framework.Assert.IsTrue(dfs.Exists(targetFile));
            NUnit.Framework.Assert.IsFalse(dfs.Exists(srcFiles[0]));
            FileStatus statusAfterRestart = dfs.GetFileStatus(targetFile);

            NUnit.Framework.Assert.AreEqual(origStatus.GetModificationTime(), statusAfterRestart
                                            .GetModificationTime());
        }
예제 #3
0
        public virtual void TestSocketFactory()
        {
            // Create a standard mini-cluster
            Configuration  sconf   = new Configuration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(sconf).NumDataNodes(1).Build(
                );
            int nameNodePort = cluster.GetNameNodePort();
            // Get a reference to its DFS directly
            FileSystem fs = cluster.GetFileSystem();

            NUnit.Framework.Assert.IsTrue(fs is DistributedFileSystem);
            DistributedFileSystem directDfs = (DistributedFileSystem)fs;
            Configuration         cconf     = GetCustomSocketConfigs(nameNodePort);

            fs = FileSystem.Get(cconf);
            NUnit.Framework.Assert.IsTrue(fs is DistributedFileSystem);
            DistributedFileSystem dfs               = (DistributedFileSystem)fs;
            JobClient             client            = null;
            MiniMRYarnCluster     miniMRYarnCluster = null;

            try
            {
                // This will test RPC to the NameNode only.
                // could we test Client-DataNode connections?
                Path filePath = new Path("/dir");
                NUnit.Framework.Assert.IsFalse(directDfs.Exists(filePath));
                NUnit.Framework.Assert.IsFalse(dfs.Exists(filePath));
                directDfs.Mkdirs(filePath);
                NUnit.Framework.Assert.IsTrue(directDfs.Exists(filePath));
                NUnit.Framework.Assert.IsTrue(dfs.Exists(filePath));
                // This will test RPC to a Resource Manager
                fs = FileSystem.Get(sconf);
                JobConf jobConf = new JobConf();
                FileSystem.SetDefaultUri(jobConf, fs.GetUri().ToString());
                miniMRYarnCluster = InitAndStartMiniMRYarnCluster(jobConf);
                JobConf jconf = new JobConf(miniMRYarnCluster.GetConfig());
                jconf.Set("hadoop.rpc.socket.factory.class.default", "org.apache.hadoop.ipc.DummySocketFactory"
                          );
                jconf.Set(MRConfig.FrameworkName, MRConfig.YarnFrameworkName);
                string   rmAddress = jconf.Get("yarn.resourcemanager.address");
                string[] split     = rmAddress.Split(":");
                jconf.Set("yarn.resourcemanager.address", split[0] + ':' + (System.Convert.ToInt32
                                                                                (split[1]) + 10));
                client = new JobClient(jconf);
                JobStatus[] jobs = client.JobsToComplete();
                NUnit.Framework.Assert.IsTrue(jobs.Length == 0);
            }
            finally
            {
                CloseClient(client);
                CloseDfs(dfs);
                CloseDfs(directDfs);
                StopMiniMRYarnCluster(miniMRYarnCluster);
                ShutdownDFSCluster(cluster);
            }
        }
        public virtual void TestRollbackWithQJM()
        {
            Configuration      conf    = new HdfsConfiguration();
            MiniJournalCluster mjc     = null;
            MiniDFSCluster     cluster = null;
            Path foo = new Path("/foo");
            Path bar = new Path("/bar");

            try
            {
                mjc = new MiniJournalCluster.Builder(conf).NumJournalNodes(NumJournalNodes).Build
                          ();
                conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, mjc.GetQuorumJournalURI(JournalId)
                         .ToString());
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                DFSAdmin dfsadmin         = new DFSAdmin(conf);
                dfs.Mkdirs(foo);
                // start rolling upgrade
                dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                NUnit.Framework.Assert.AreEqual(0, dfsadmin.Run(new string[] { "-rollingUpgrade",
                                                                               "prepare" }));
                dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
                // create new directory
                dfs.Mkdirs(bar);
                dfs.Close();
                // rollback
                cluster.RestartNameNode("-rollingUpgrade", "rollback");
                // make sure /foo is still there, but /bar is not
                dfs = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
                NUnit.Framework.Assert.IsFalse(dfs.Exists(bar));
                // check storage in JNs
                for (int i = 0; i < NumJournalNodes; i++)
                {
                    FilePath dir = mjc.GetCurrentDir(0, JournalId);
                    // segments:(startSegment, mkdir, endSegment), (startSegment, upgrade
                    // marker, mkdir, endSegment)
                    CheckJNStorage(dir, 4, 7);
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
                if (mjc != null)
                {
                    mjc.Shutdown();
                }
            }
        }
예제 #5
0
 /// <exception cref="System.IO.IOException"/>
 private static void AssertFile(Path s1, Path s2, Path file, params bool[] expected
                                )
 {
     Path[] paths = new Path[] { file, new Path(s1, "bar/" + file.GetName()), new Path
                                     (s2, file.GetName()) };
     NUnit.Framework.Assert.AreEqual(expected.Length, paths.Length);
     for (int i = 0; i < paths.Length; i++)
     {
         bool computed = hdfs.Exists(paths[i]);
         NUnit.Framework.Assert.AreEqual("Failed on " + paths[i], expected[i], computed);
     }
 }
예제 #6
0
        /// <exception cref="System.IO.IOException"/>
        private static void RollbackRollingUpgrade(Path foo, Path bar, Path file, byte[]
                                                   data, MiniDFSCluster cluster)
        {
            MiniDFSCluster.DataNodeProperties dnprop = cluster.StopDataNode(0);
            cluster.RestartNameNode("-rollingUpgrade", "rollback");
            cluster.RestartDataNode(dnprop, true);
            DistributedFileSystem dfs = cluster.GetFileSystem();

            NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
            NUnit.Framework.Assert.IsFalse(dfs.Exists(bar));
            AppendTestUtil.CheckFullFile(dfs, file, data.Length, data);
        }
예제 #7
0
 /// <summary>Ensures that the blocks belonging to the deleted file are in trash</summary>
 /// <exception cref="System.Exception"/>
 private void DeleteAndEnsureInTrash(Path pathToDelete, FilePath blockFile, FilePath
                                     trashFile)
 {
     NUnit.Framework.Assert.IsTrue(blockFile.Exists());
     NUnit.Framework.Assert.IsFalse(trashFile.Exists());
     // Now delete the file and ensure the corresponding block in trash
     Log.Info("Deleting file " + pathToDelete + " during rolling upgrade");
     fs.Delete(pathToDelete, false);
     System.Diagnostics.Debug.Assert((!fs.Exists(pathToDelete)));
     TriggerHeartBeats();
     NUnit.Framework.Assert.IsTrue(trashFile.Exists());
     NUnit.Framework.Assert.IsFalse(blockFile.Exists());
 }
예제 #8
0
        /// <exception cref="System.IO.IOException"/>
        private void TestPersistHelper(Configuration conf)
        {
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).Build();
                cluster.WaitActive();
                FSNamesystem          fsn = cluster.GetNamesystem();
                DistributedFileSystem fs  = cluster.GetFileSystem();
                Path dir   = new Path("/abc/def");
                Path file1 = new Path(dir, "f1");
                Path file2 = new Path(dir, "f2");
                // create an empty file f1
                fs.Create(file1).Close();
                // create an under-construction file f2
                FSDataOutputStream @out = fs.Create(file2);
                @out.WriteBytes("hello");
                ((DFSOutputStream)@out.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                            .UpdateLength));
                // checkpoint
                fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                fs.SaveNamespace();
                fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
                cluster.RestartNameNode();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsTrue(fs.IsDirectory(dir));
                NUnit.Framework.Assert.IsTrue(fs.Exists(file1));
                NUnit.Framework.Assert.IsTrue(fs.Exists(file2));
                // check internals of file2
                INodeFile file2Node = fsn.dir.GetINode4Write(file2.ToString()).AsFile();
                NUnit.Framework.Assert.AreEqual("hello".Length, file2Node.ComputeFileSize());
                NUnit.Framework.Assert.IsTrue(file2Node.IsUnderConstruction());
                BlockInfoContiguous[] blks = file2Node.GetBlocks();
                NUnit.Framework.Assert.AreEqual(1, blks.Length);
                NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.UnderConstruction
                                                , blks[0].GetBlockUCState());
                // check lease manager
                LeaseManager.Lease lease = fsn.leaseManager.GetLeaseByPath(file2.ToString());
                NUnit.Framework.Assert.IsNotNull(lease);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #9
0
 public virtual void TearDown()
 {
     if (fs.Exists(new Path("/sub1")))
     {
         if (fs.Exists(new Path("/sub1/.snapshot")))
         {
             foreach (FileStatus st in fs.ListStatus(new Path("/sub1/.snapshot")))
             {
                 fs.DeleteSnapshot(new Path("/sub1"), st.GetPath().GetName());
             }
             fs.DisallowSnapshot(new Path("/sub1"));
         }
         fs.Delete(new Path("/sub1"), true);
     }
 }
예제 #10
0
 /// <summary>
 /// The idea for making sure that there is no more than one instance
 /// running in an HDFS is to create a file in the HDFS, writes the hostname
 /// of the machine on which the instance is running to the file, but did not
 /// close the file until it exits.
 /// </summary>
 /// <remarks>
 /// The idea for making sure that there is no more than one instance
 /// running in an HDFS is to create a file in the HDFS, writes the hostname
 /// of the machine on which the instance is running to the file, but did not
 /// close the file until it exits.
 /// This prevents the second instance from running because it can not
 /// creates the file while the first one is running.
 /// This method checks if there is any running instance. If no, mark yes.
 /// Note that this is an atomic operation.
 /// </remarks>
 /// <returns>
 /// null if there is a running instance;
 /// otherwise, the output stream to the newly created file.
 /// </returns>
 /// <exception cref="System.IO.IOException"/>
 private OutputStream CheckAndMarkRunning()
 {
     try
     {
         if (fs.Exists(idPath))
         {
             // try appending to it so that it will fail fast if another balancer is
             // running.
             IOUtils.CloseStream(fs.Append(idPath));
             fs.Delete(idPath, true);
         }
         FSDataOutputStream fsout = fs.Create(idPath, false);
         // mark balancer idPath to be deleted during filesystem closure
         fs.DeleteOnExit(idPath);
         if (write2IdFile)
         {
             fsout.WriteBytes(Sharpen.Runtime.GetLocalHost().GetHostName());
             fsout.Hflush();
         }
         return(fsout);
     }
     catch (RemoteException e)
     {
         if (typeof(AlreadyBeingCreatedException).FullName.Equals(e.GetClassName()))
         {
             return(null);
         }
         else
         {
             throw;
         }
     }
 }
예제 #11
0
        /// <summary>
        /// Check the blocks of dst file are cleaned after rename with overwrite
        /// Restart NN to check the rename successfully
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestRenameWithOverwrite()
        {
            short          replFactor = 2;
            long           blockSize  = 512;
            Configuration  conf       = new Configuration();
            MiniDFSCluster cluster    = new MiniDFSCluster.Builder(conf).NumDataNodes(replFactor
                                                                                      ).Build();
            DistributedFileSystem dfs = cluster.GetFileSystem();

            try
            {
                long   fileLen = blockSize * 3;
                string src     = "/foo/src";
                string dst     = "/foo/dst";
                Path   srcPath = new Path(src);
                Path   dstPath = new Path(dst);
                DFSTestUtil.CreateFile(dfs, srcPath, fileLen, replFactor, 1);
                DFSTestUtil.CreateFile(dfs, dstPath, fileLen, replFactor, 1);
                LocatedBlocks lbs = NameNodeAdapter.GetBlockLocations(cluster.GetNameNode(), dst,
                                                                      0, fileLen);
                BlockManager bm = NameNodeAdapter.GetNamesystem(cluster.GetNameNode()).GetBlockManager
                                      ();
                NUnit.Framework.Assert.IsTrue(bm.GetStoredBlock(lbs.GetLocatedBlocks()[0].GetBlock
                                                                    ().GetLocalBlock()) != null);
                dfs.Rename(srcPath, dstPath, Options.Rename.Overwrite);
                NUnit.Framework.Assert.IsTrue(bm.GetStoredBlock(lbs.GetLocatedBlocks()[0].GetBlock
                                                                    ().GetLocalBlock()) == null);
                // Restart NN and check the rename successfully
                cluster.RestartNameNodes();
                NUnit.Framework.Assert.IsFalse(dfs.Exists(srcPath));
                NUnit.Framework.Assert.IsTrue(dfs.Exists(dstPath));
            }
            finally
            {
                if (dfs != null)
                {
                    dfs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestDowngrade()
        {
            Configuration    conf    = new HdfsConfiguration();
            MiniQJMHACluster cluster = null;
            Path             foo     = new Path("/foo");
            Path             bar     = new Path("/bar");

            try
            {
                cluster = new MiniQJMHACluster.Builder(conf).Build();
                MiniDFSCluster dfsCluster = cluster.GetDfsCluster();
                dfsCluster.WaitActive();
                // let NN1 tail editlog every 1s
                dfsCluster.GetConfiguration(1).SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
                dfsCluster.RestartNameNode(1);
                dfsCluster.TransitionToActive(0);
                DistributedFileSystem dfs = dfsCluster.GetFileSystem(0);
                dfs.Mkdirs(foo);
                // start rolling upgrade
                RollingUpgradeInfo info = dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare
                                                             );
                NUnit.Framework.Assert.IsTrue(info.IsStarted());
                dfs.Mkdirs(bar);
                TestRollingUpgrade.QueryForPreparation(dfs);
                dfs.Close();
                dfsCluster.RestartNameNode(0, true, "-rollingUpgrade", "downgrade");
                // Once downgraded, there should be no more fsimage for rollbacks.
                NUnit.Framework.Assert.IsFalse(dfsCluster.GetNamesystem(0).GetFSImage().HasRollbackFSImage
                                                   ());
                // shutdown NN1
                dfsCluster.ShutdownNameNode(1);
                dfsCluster.TransitionToActive(0);
                dfs = dfsCluster.GetFileSystem(0);
                NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
                NUnit.Framework.Assert.IsTrue(dfs.Exists(bar));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #13
0
        /// <exception cref="System.IO.IOException"/>
        private static void StartRollingUpgrade(Path foo, Path bar, Path file, byte[] data
                                                , MiniDFSCluster cluster)
        {
            DistributedFileSystem dfs = cluster.GetFileSystem();

            //start rolling upgrade
            dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
            dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare);
            dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
            dfs.Mkdirs(bar);
            NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
            NUnit.Framework.Assert.IsTrue(dfs.Exists(bar));
            //truncate a file
            int newLength = DFSUtil.GetRandom().Next(data.Length - 1) + 1;

            dfs.Truncate(file, newLength);
            TestFileTruncate.CheckBlockRecovery(file, dfs);
            AppendTestUtil.CheckFullFile(dfs, file, newLength, data);
        }
예제 #14
0
 /// <summary>Check the functionality of a snapshot.</summary>
 /// <param name="hdfs">DistributedFileSystem instance</param>
 /// <param name="snapshotRoot">The root of the snapshot</param>
 /// <param name="snapshottedDir">The snapshotted directory</param>
 /// <exception cref="System.Exception"/>
 public static void CheckSnapshotCreation(DistributedFileSystem hdfs, Path snapshotRoot
                                          , Path snapshottedDir)
 {
     // Currently we only check if the snapshot was created successfully
     NUnit.Framework.Assert.IsTrue(hdfs.Exists(snapshotRoot));
     // Compare the snapshot with the current dir
     FileStatus[] currentFiles  = hdfs.ListStatus(snapshottedDir);
     FileStatus[] snapshotFiles = hdfs.ListStatus(snapshotRoot);
     NUnit.Framework.Assert.AreEqual("snapshottedDir=" + snapshottedDir + ", snapshotRoot="
                                     + snapshotRoot, currentFiles.Length, snapshotFiles.Length);
 }
예제 #15
0
 /// <summary>Create snapshot for a dir using a given snapshot name</summary>
 /// <param name="hdfs">DistributedFileSystem instance</param>
 /// <param name="snapshotRoot">The dir to be snapshotted</param>
 /// <param name="snapshotName">The name of the snapshot</param>
 /// <returns>The path of the snapshot root</returns>
 /// <exception cref="System.Exception"/>
 public static Path CreateSnapshot(DistributedFileSystem hdfs, Path snapshotRoot,
                                   string snapshotName)
 {
     Log.Info("createSnapshot " + snapshotName + " for " + snapshotRoot);
     NUnit.Framework.Assert.IsTrue(hdfs.Exists(snapshotRoot));
     hdfs.AllowSnapshot(snapshotRoot);
     hdfs.CreateSnapshot(snapshotRoot, snapshotName);
     // set quota to a large value for testing counts
     hdfs.SetQuota(snapshotRoot, long.MaxValue - 1, long.MaxValue - 1);
     return(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.SnapshotTestHelper.GetSnapshotRoot
                (snapshotRoot, snapshotName));
 }
        public virtual void TestEditsLogRename()
        {
            DistributedFileSystem fs = cluster.GetFileSystem();
            Path src1 = GetTestRootPath(fc, "testEditsLogRename/srcdir/src1");
            Path dst1 = GetTestRootPath(fc, "testEditsLogRename/dstdir/dst1");

            CreateFile(src1);
            fs.Mkdirs(dst1.GetParent());
            CreateFile(dst1);
            // Set quota so that dst1 parent cannot allow under it new files/directories
            fs.SetQuota(dst1.GetParent(), 2, HdfsConstants.QuotaDontSet);
            // Free up quota for a subsequent rename
            fs.Delete(dst1, true);
            Rename(src1, dst1, true, true, false, Options.Rename.Overwrite);
            // Restart the cluster and ensure the above operations can be
            // loaded from the edits log
            RestartCluster();
            fs   = cluster.GetFileSystem();
            src1 = GetTestRootPath(fc, "testEditsLogRename/srcdir/src1");
            dst1 = GetTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
            NUnit.Framework.Assert.IsFalse(fs.Exists(src1));
            // ensure src1 is already renamed
            NUnit.Framework.Assert.IsTrue(fs.Exists(dst1));
        }
예제 #17
0
        /// <exception cref="System.IO.IOException"/>
        protected internal LocatedBlocks EnsureFileReplicasOnStorageType(Path path, StorageType
                                                                         storageType)
        {
            // Ensure that returned block locations returned are correct!
            Log.Info("Ensure path: " + path + " is on StorageType: " + storageType);
            Assert.AssertThat(fs.Exists(path), IS.Is(true));
            long          fileLength    = client.GetFileInfo(path.ToString()).GetLen();
            LocatedBlocks locatedBlocks = client.GetLocatedBlocks(path.ToString(), 0, fileLength
                                                                  );

            foreach (LocatedBlock locatedBlock in locatedBlocks.GetLocatedBlocks())
            {
                Assert.AssertThat(locatedBlock.GetStorageTypes()[0], IS.Is(storageType));
            }
            return(locatedBlocks);
        }
예제 #18
0
        /// <exception cref="System.Exception"/>
        public virtual void TestFinalize()
        {
            Configuration    conf    = new HdfsConfiguration();
            MiniQJMHACluster cluster = null;
            Path             foo     = new Path("/foo");
            Path             bar     = new Path("/bar");

            try
            {
                cluster = new MiniQJMHACluster.Builder(conf).Build();
                MiniDFSCluster dfsCluster = cluster.GetDfsCluster();
                dfsCluster.WaitActive();
                // let NN1 tail editlog every 1s
                dfsCluster.GetConfiguration(1).SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
                dfsCluster.RestartNameNode(1);
                dfsCluster.TransitionToActive(0);
                DistributedFileSystem dfs = dfsCluster.GetFileSystem(0);
                dfs.Mkdirs(foo);
                FSImage fsimage = dfsCluster.GetNamesystem(0).GetFSImage();
                // start rolling upgrade
                RollingUpgradeInfo info = dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare
                                                             );
                NUnit.Framework.Assert.IsTrue(info.IsStarted());
                dfs.Mkdirs(bar);
                QueryForPreparation(dfs);
                // The NN should have a copy of the fsimage in case of rollbacks.
                NUnit.Framework.Assert.IsTrue(fsimage.HasRollbackFSImage());
                info = dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Finalize);
                NUnit.Framework.Assert.IsTrue(info.IsFinalized());
                NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
                // Once finalized, there should be no more fsimage for rollbacks.
                NUnit.Framework.Assert.IsFalse(fsimage.HasRollbackFSImage());
                // Should have no problem in restart and replaying edits that include
                // the FINALIZE op.
                dfsCluster.RestartNameNode(0);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #19
0
        public virtual void TestNamespaceCommands()
        {
            Configuration conf = new HdfsConfiguration();

            // Make it relinquish locks. When run serially, the result should
            // be identical.
            conf.SetInt(DFSConfigKeys.DfsContentSummaryLimitKey, 2);
            MiniDFSCluster        cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            DistributedFileSystem dfs     = cluster.GetFileSystem();

            try
            {
                // 1: create directory /nqdir0/qdir1/qdir20/nqdir30
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(new Path("/nqdir0/qdir1/qdir20/nqdir30")
                                                         ));
                // 2: set the quota of /nqdir0/qdir1 to be 6
                Path quotaDir1 = new Path("/nqdir0/qdir1");
                dfs.SetQuota(quotaDir1, 6, HdfsConstants.QuotaDontSet);
                ContentSummary c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 3);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 6);
                // 3: set the quota of /nqdir0/qdir1/qdir20 to be 7
                Path quotaDir2 = new Path("/nqdir0/qdir1/qdir20");
                dfs.SetQuota(quotaDir2, 7, HdfsConstants.QuotaDontSet);
                c = dfs.GetContentSummary(quotaDir2);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 2);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 7);
                // 4: Create directory /nqdir0/qdir1/qdir21 and set its quota to 2
                Path quotaDir3 = new Path("/nqdir0/qdir1/qdir21");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir3));
                dfs.SetQuota(quotaDir3, 2, HdfsConstants.QuotaDontSet);
                c = dfs.GetContentSummary(quotaDir3);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 1);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 2);
                // 5: Create directory /nqdir0/qdir1/qdir21/nqdir32
                Path tempPath = new Path(quotaDir3, "nqdir32");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(tempPath));
                c = dfs.GetContentSummary(quotaDir3);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 2);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 2);
                // 6: Create directory /nqdir0/qdir1/qdir21/nqdir33
                tempPath = new Path(quotaDir3, "nqdir33");
                bool hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Mkdirs(tempPath));
                }
                catch (NSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                c = dfs.GetContentSummary(quotaDir3);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 2);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 2);
                // 7: Create directory /nqdir0/qdir1/qdir20/nqdir31
                tempPath = new Path(quotaDir2, "nqdir31");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(tempPath));
                c = dfs.GetContentSummary(quotaDir2);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 3);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 7);
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 6);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 6);
                // 8: Create directory /nqdir0/qdir1/qdir20/nqdir33
                tempPath     = new Path(quotaDir2, "nqdir33");
                hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Mkdirs(tempPath));
                }
                catch (NSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // 9: Move /nqdir0/qdir1/qdir21/nqdir32 /nqdir0/qdir1/qdir20/nqdir30
                tempPath = new Path(quotaDir2, "nqdir30");
                dfs.Rename(new Path(quotaDir3, "nqdir32"), tempPath);
                c = dfs.GetContentSummary(quotaDir2);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 4);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 7);
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 6);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 6);
                // 10: Move /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0/qdir1/qdir21
                hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Rename(tempPath, quotaDir3));
                }
                catch (NSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                NUnit.Framework.Assert.IsTrue(dfs.Exists(tempPath));
                NUnit.Framework.Assert.IsFalse(dfs.Exists(new Path(quotaDir3, "nqdir30")));
                // 10.a: Rename /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0/qdir1/qdir21/nqdir32
                hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Rename(tempPath, new Path(quotaDir3, "nqdir32"
                                                                                 )));
                }
                catch (QuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                NUnit.Framework.Assert.IsTrue(dfs.Exists(tempPath));
                NUnit.Framework.Assert.IsFalse(dfs.Exists(new Path(quotaDir3, "nqdir32")));
                // 11: Move /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0
                NUnit.Framework.Assert.IsTrue(dfs.Rename(tempPath, new Path("/nqdir0")));
                c = dfs.GetContentSummary(quotaDir2);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 2);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 7);
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 4);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 6);
                // 12: Create directory /nqdir0/nqdir30/nqdir33
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(new Path("/nqdir0/nqdir30/nqdir33")));
                // 13: Move /nqdir0/nqdir30 /nqdir0/qdir1/qdir20/qdir30
                hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Rename(new Path("/nqdir0/nqdir30"), tempPath));
                }
                catch (NSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // 14: Move /nqdir0/qdir1/qdir21 /nqdir0/qdir1/qdir20
                NUnit.Framework.Assert.IsTrue(dfs.Rename(quotaDir3, quotaDir2));
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 4);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 6);
                c = dfs.GetContentSummary(quotaDir2);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 3);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 7);
                tempPath = new Path(quotaDir2, "qdir21");
                c        = dfs.GetContentSummary(tempPath);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 1);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 2);
                // 15: Delete /nqdir0/qdir1/qdir20/qdir21
                dfs.Delete(tempPath, true);
                c = dfs.GetContentSummary(quotaDir2);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 2);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 7);
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 3);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 6);
                // 16: Move /nqdir0/qdir30 /nqdir0/qdir1/qdir20
                NUnit.Framework.Assert.IsTrue(dfs.Rename(new Path("/nqdir0/nqdir30"), quotaDir2));
                c = dfs.GetContentSummary(quotaDir2);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 5);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 7);
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 6);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 6);
                NUnit.Framework.Assert.AreEqual(14, cluster.GetNamesystem().GetFSDirectory().GetYieldCount
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
예제 #20
0
        public virtual void TestBlockSynchronization()
        {
            int           OrgFileSize = 3000;
            Configuration conf        = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(5).Build();
            cluster.WaitActive();
            //create a file
            DistributedFileSystem dfs = cluster.GetFileSystem();
            string filestr            = "/foo";
            Path   filepath           = new Path(filestr);

            DFSTestUtil.CreateFile(dfs, filepath, OrgFileSize, ReplicationNum, 0L);
            NUnit.Framework.Assert.IsTrue(dfs.Exists(filepath));
            DFSTestUtil.WaitReplication(dfs, filepath, ReplicationNum);
            //get block info for the last block
            LocatedBlock locatedblock = TestInterDatanodeProtocol.GetLastLocatedBlock(dfs.dfs
                                                                                      .GetNamenode(), filestr);

            DatanodeInfo[] datanodeinfos = locatedblock.GetLocations();
            NUnit.Framework.Assert.AreEqual(ReplicationNum, datanodeinfos.Length);
            //connect to data nodes
            DataNode[] datanodes = new DataNode[ReplicationNum];
            for (int i = 0; i < ReplicationNum; i++)
            {
                datanodes[i] = cluster.GetDataNode(datanodeinfos[i].GetIpcPort());
                NUnit.Framework.Assert.IsTrue(datanodes[i] != null);
            }
            //verify Block Info
            ExtendedBlock lastblock = locatedblock.GetBlock();

            DataNode.Log.Info("newblocks=" + lastblock);
            for (int i_1 = 0; i_1 < ReplicationNum; i_1++)
            {
                CheckMetaInfo(lastblock, datanodes[i_1]);
            }
            DataNode.Log.Info("dfs.dfs.clientName=" + dfs.dfs.clientName);
            cluster.GetNameNodeRpc().Append(filestr, dfs.dfs.clientName, new EnumSetWritable <
                                                CreateFlag>(EnumSet.Of(CreateFlag.Append)));
            // expire lease to trigger block recovery.
            WaitLeaseRecovery(cluster);
            Block[] updatedmetainfo = new Block[ReplicationNum];
            long    oldSize         = lastblock.GetNumBytes();

            lastblock = TestInterDatanodeProtocol.GetLastLocatedBlock(dfs.dfs.GetNamenode(),
                                                                      filestr).GetBlock();
            long currentGS = lastblock.GetGenerationStamp();

            for (int i_2 = 0; i_2 < ReplicationNum; i_2++)
            {
                updatedmetainfo[i_2] = DataNodeTestUtils.GetFSDataset(datanodes[i_2]).GetStoredBlock
                                           (lastblock.GetBlockPoolId(), lastblock.GetBlockId());
                NUnit.Framework.Assert.AreEqual(lastblock.GetBlockId(), updatedmetainfo[i_2].GetBlockId
                                                    ());
                NUnit.Framework.Assert.AreEqual(oldSize, updatedmetainfo[i_2].GetNumBytes());
                NUnit.Framework.Assert.AreEqual(currentGS, updatedmetainfo[i_2].GetGenerationStamp
                                                    ());
            }
            // verify that lease recovery does not occur when namenode is in safemode
            System.Console.Out.WriteLine("Testing that lease recovery cannot happen during safemode."
                                         );
            filestr  = "/foo.safemode";
            filepath = new Path(filestr);
            dfs.Create(filepath, (short)1);
            cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter,
                                                 false);
            NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(filestr));
            DFSTestUtil.WaitReplication(dfs, filepath, (short)1);
            WaitLeaseRecovery(cluster);
            // verify that we still cannot recover the lease
            LeaseManager lm = NameNodeAdapter.GetLeaseManager(cluster.GetNamesystem());

            NUnit.Framework.Assert.IsTrue("Found " + lm.CountLease() + " lease, expected 1",
                                          lm.CountLease() == 1);
            cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave,
                                                 false);
        }
예제 #21
0
        public virtual void TestDFSAdminRollingUpgradeCommands()
        {
            // start a cluster
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                cluster.WaitActive();
                Path foo = new Path("/foo");
                Path bar = new Path("/bar");
                Path baz = new Path("/baz");
                {
                    DistributedFileSystem dfs = cluster.GetFileSystem();
                    DFSAdmin dfsadmin         = new DFSAdmin(conf);
                    dfs.Mkdirs(foo);
                    //illegal argument "abc" to rollingUpgrade option
                    RunCmd(dfsadmin, false, "-rollingUpgrade", "abc");
                    CheckMxBeanIsNull();
                    //query rolling upgrade
                    RunCmd(dfsadmin, true, "-rollingUpgrade");
                    //start rolling upgrade
                    dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                    RunCmd(dfsadmin, true, "-rollingUpgrade", "prepare");
                    dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
                    //query rolling upgrade
                    RunCmd(dfsadmin, true, "-rollingUpgrade", "query");
                    CheckMxBean();
                    dfs.Mkdirs(bar);
                    //finalize rolling upgrade
                    RunCmd(dfsadmin, true, "-rollingUpgrade", "finalize");
                    // RollingUpgradeInfo should be null after finalization, both via
                    // Java API and in JMX
                    NUnit.Framework.Assert.IsNull(dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction
                                                                     .Query));
                    CheckMxBeanIsNull();
                    dfs.Mkdirs(baz);
                    RunCmd(dfsadmin, true, "-rollingUpgrade");
                    // All directories created before upgrade, when upgrade in progress and
                    // after upgrade finalize exists
                    NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
                    NUnit.Framework.Assert.IsTrue(dfs.Exists(bar));
                    NUnit.Framework.Assert.IsTrue(dfs.Exists(baz));
                    dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                    dfs.SaveNamespace();
                    dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
                }
                // Ensure directories exist after restart
                cluster.RestartNameNode();
                {
                    DistributedFileSystem dfs = cluster.GetFileSystem();
                    NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
                    NUnit.Framework.Assert.IsTrue(dfs.Exists(bar));
                    NUnit.Framework.Assert.IsTrue(dfs.Exists(baz));
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #22
0
        public virtual void TestQuotaCommands()
        {
            Configuration conf = new HdfsConfiguration();
            // set a smaller block size so that we can test with smaller
            // Space quotas
            int DefaultBlockSize = 512;

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockSize);
            // Make it relinquish locks. When run serially, the result should
            // be identical.
            conf.SetInt(DFSConfigKeys.DfsContentSummaryLimitKey, 2);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            NUnit.Framework.Assert.IsTrue("Not a HDFS: " + fs.GetUri(), fs is DistributedFileSystem
                                          );
            DistributedFileSystem dfs = (DistributedFileSystem)fs;
            DFSAdmin admin            = new DFSAdmin(conf);

            try
            {
                int   fileLen     = 1024;
                short replication = 5;
                long  spaceQuota  = fileLen * replication * 15 / 8;
                // 1: create a directory /test and set its quota to be 3
                Path parent = new Path("/test");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(parent));
                string[] args = new string[] { "-setQuota", "3", parent.ToString() };
                RunCommand(admin, args, false);
                //try setting space quota with a 'binary prefix'
                RunCommand(admin, false, "-setSpaceQuota", "2t", parent.ToString());
                NUnit.Framework.Assert.AreEqual(2L << 40, dfs.GetContentSummary(parent).GetSpaceQuota
                                                    ());
                // set diskspace quota to 10000
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota), parent
                           .ToString());
                // 2: create directory /test/data0
                Path childDir0 = new Path(parent, "data0");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(childDir0));
                // 3: create a file /test/datafile0
                Path childFile0 = new Path(parent, "datafile0");
                DFSTestUtil.CreateFile(fs, childFile0, fileLen, replication, 0);
                // 4: count -q /test
                ContentSummary c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetFileCount() + c.GetDirectoryCount(), 3);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 3);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileLen * replication);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), spaceQuota);
                // 5: count -q /test/data0
                c = dfs.GetContentSummary(childDir0);
                NUnit.Framework.Assert.AreEqual(c.GetFileCount() + c.GetDirectoryCount(), 1);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                // check disk space consumed
                c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileLen * replication);
                // 6: create a directory /test/data1
                Path childDir1    = new Path(parent, "data1");
                bool hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Mkdirs(childDir1));
                }
                catch (QuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                OutputStream fout;
                // 7: create a file /test/datafile1
                Path childFile1 = new Path(parent, "datafile1");
                hasException = false;
                try
                {
                    fout = dfs.Create(childFile1);
                }
                catch (QuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // 8: clear quota /test
                RunCommand(admin, new string[] { "-clrQuota", parent.ToString() }, false);
                c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), spaceQuota);
                // 9: clear quota /test/data0
                RunCommand(admin, new string[] { "-clrQuota", childDir0.ToString() }, false);
                c = dfs.GetContentSummary(childDir0);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                // 10: create a file /test/datafile1
                fout = dfs.Create(childFile1, replication);
                // 10.s: but writing fileLen bytes should result in an quota exception
                try
                {
                    fout.Write(new byte[fileLen]);
                    fout.Close();
                    NUnit.Framework.Assert.Fail();
                }
                catch (QuotaExceededException)
                {
                    IOUtils.CloseStream(fout);
                }
                //delete the file
                dfs.Delete(childFile1, false);
                // 9.s: clear diskspace quota
                RunCommand(admin, false, "-clrSpaceQuota", parent.ToString());
                c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), -1);
                // now creating childFile1 should succeed
                DFSTestUtil.CreateFile(dfs, childFile1, fileLen, replication, 0);
                // 11: set the quota of /test to be 1
                // HADOOP-5872 - we can set quota even if it is immediately violated
                args = new string[] { "-setQuota", "1", parent.ToString() };
                RunCommand(admin, args, false);
                RunCommand(admin, false, "-setSpaceQuota", Sharpen.Extensions.ToString(fileLen),
                           args[2]);
                // for space quota
                // 12: set the quota of /test/data0 to be 1
                args = new string[] { "-setQuota", "1", childDir0.ToString() };
                RunCommand(admin, args, false);
                // 13: not able create a directory under data0
                hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Mkdirs(new Path(childDir0, "in")));
                }
                catch (QuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                c = dfs.GetContentSummary(childDir0);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount() + c.GetFileCount(), 1);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 1);
                // 14a: set quota on a non-existent directory
                Path nonExistentPath = new Path("/test1");
                NUnit.Framework.Assert.IsFalse(dfs.Exists(nonExistentPath));
                args = new string[] { "-setQuota", "1", nonExistentPath.ToString() };
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", "1g", nonExistentPath.ToString());
                // for space quota
                // 14b: set quota on a file
                NUnit.Framework.Assert.IsTrue(dfs.IsFile(childFile0));
                args[1] = childFile0.ToString();
                RunCommand(admin, args, true);
                // same for space quota
                RunCommand(admin, true, "-setSpaceQuota", "1t", args[1]);
                // 15a: clear quota on a file
                args[0] = "-clrQuota";
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-clrSpaceQuota", args[1]);
                // 15b: clear quota on a non-existent directory
                args[1] = nonExistentPath.ToString();
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-clrSpaceQuota", args[1]);
                // 16a: set the quota of /test to be 0
                args = new string[] { "-setQuota", "0", parent.ToString() };
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", "0", args[2]);
                // 16b: set the quota of /test to be -1
                args[1] = "-1";
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
                // 16c: set the quota of /test to be Long.MAX_VALUE+1
                args[1] = (long.MaxValue + 1L).ToString();
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
                // 16d: set the quota of /test to be a non integer
                args[1] = "33aa1.5";
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
                // 16e: set space quota with a value larger than Long.MAX_VALUE
                RunCommand(admin, true, "-setSpaceQuota", (long.MaxValue / 1024 / 1024 + 1024) +
                           "m", args[2]);
                // 17:  setQuota by a non-administrator
                string username          = "******";
                UserGroupInformation ugi = UserGroupInformation.CreateUserForTesting(username, new
                                                                                     string[] { "groupyy" });
                string[] args2 = args.MemberwiseClone();
                // need final ref for doAs block
                ugi.DoAs(new _PrivilegedExceptionAction_275(this, username, conf, args2, parent));
                // 18: clrQuota by a non-administrator
                // 19: clrQuota on the root directory ("/") should fail
                RunCommand(admin, true, "-clrQuota", "/");
                // 20: setQuota on the root directory ("/") should succeed
                RunCommand(admin, false, "-setQuota", "1000000", "/");
                RunCommand(admin, true, "-clrQuota", "/");
                RunCommand(admin, false, "-clrSpaceQuota", "/");
                RunCommand(admin, new string[] { "-clrQuota", parent.ToString() }, false);
                RunCommand(admin, false, "-clrSpaceQuota", parent.ToString());
                // 2: create directory /test/data2
                Path childDir2 = new Path(parent, "data2");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(childDir2));
                Path childFile2  = new Path(childDir2, "datafile2");
                Path childFile3  = new Path(childDir2, "datafile3");
                long spaceQuota2 = DefaultBlockSize * replication;
                long fileLen2    = DefaultBlockSize;
                // set space quota to a real low value
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           childDir2.ToString());
                // clear space quota
                RunCommand(admin, false, "-clrSpaceQuota", childDir2.ToString());
                // create a file that is greater than the size of space quota
                DFSTestUtil.CreateFile(fs, childFile2, fileLen2, replication, 0);
                // now set space quota again. This should succeed
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           childDir2.ToString());
                hasException = false;
                try
                {
                    DFSTestUtil.CreateFile(fs, childFile3, fileLen2, replication, 0);
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // now test the same for root
                Path childFile4 = new Path("/", "datafile2");
                Path childFile5 = new Path("/", "datafile3");
                RunCommand(admin, true, "-clrQuota", "/");
                RunCommand(admin, false, "-clrSpaceQuota", "/");
                // set space quota to a real low value
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           "/");
                RunCommand(admin, false, "-clrSpaceQuota", "/");
                DFSTestUtil.CreateFile(fs, childFile4, fileLen2, replication, 0);
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           "/");
                hasException = false;
                try
                {
                    DFSTestUtil.CreateFile(fs, childFile5, fileLen2, replication, 0);
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                NUnit.Framework.Assert.AreEqual(4, cluster.GetNamesystem().GetFSDirectory().GetYieldCount
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
예제 #23
0
        public virtual void TestSpaceCommands()
        {
            Configuration conf = new HdfsConfiguration();

            // set a smaller block size so that we can test with smaller
            // diskspace quotas
            conf.Set(DFSConfigKeys.DfsBlockSizeKey, "512");
            // Make it relinquish locks. When run serially, the result should
            // be identical.
            conf.SetInt(DFSConfigKeys.DfsContentSummaryLimitKey, 2);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            NUnit.Framework.Assert.IsTrue("Not a HDFS: " + fs.GetUri(), fs is DistributedFileSystem
                                          );
            DistributedFileSystem dfs = (DistributedFileSystem)fs;

            try
            {
                int   fileLen     = 1024;
                short replication = 3;
                int   fileSpace   = fileLen * replication;
                // create directory /nqdir0/qdir1/qdir20/nqdir30
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(new Path("/nqdir0/qdir1/qdir20/nqdir30")
                                                         ));
                // set the quota of /nqdir0/qdir1 to 4 * fileSpace
                Path quotaDir1 = new Path("/nqdir0/qdir1");
                dfs.SetQuota(quotaDir1, HdfsConstants.QuotaDontSet, 4 * fileSpace);
                ContentSummary c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 4 * fileSpace);
                // set the quota of /nqdir0/qdir1/qdir20 to 6 * fileSpace
                Path quotaDir20 = new Path("/nqdir0/qdir1/qdir20");
                dfs.SetQuota(quotaDir20, HdfsConstants.QuotaDontSet, 6 * fileSpace);
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 6 * fileSpace);
                // Create /nqdir0/qdir1/qdir21 and set its space quota to 2 * fileSpace
                Path quotaDir21 = new Path("/nqdir0/qdir1/qdir21");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir21));
                dfs.SetQuota(quotaDir21, HdfsConstants.QuotaDontSet, 2 * fileSpace);
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 2 * fileSpace);
                // 5: Create directory /nqdir0/qdir1/qdir21/nqdir32
                Path tempPath = new Path(quotaDir21, "nqdir32");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(tempPath));
                // create a file under nqdir32/fileDir
                DFSTestUtil.CreateFile(dfs, new Path(tempPath, "fileDir/file1"), fileLen, replication
                                       , 0);
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileSpace);
                // Create a larger file /nqdir0/qdir1/qdir21/nqdir33/
                bool hasException = false;
                try
                {
                    DFSTestUtil.CreateFile(dfs, new Path(quotaDir21, "nqdir33/file2"), 2 * fileLen, replication
                                           , 0);
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // delete nqdir33
                NUnit.Framework.Assert.IsTrue(dfs.Delete(new Path(quotaDir21, "nqdir33"), true));
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileSpace);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 2 * fileSpace);
                // Verify space before the move:
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 0);
                // Move /nqdir0/qdir1/qdir21/nqdir32 /nqdir0/qdir1/qdir20/nqdir30
                Path dstPath = new Path(quotaDir20, "nqdir30");
                Path srcPath = new Path(quotaDir21, "nqdir32");
                NUnit.Framework.Assert.IsTrue(dfs.Rename(srcPath, dstPath));
                // verify space after the move
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileSpace);
                // verify space for its parent
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileSpace);
                // verify space for source for the move
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 0);
                Path file2    = new Path(dstPath, "fileDir/file2");
                int  file2Len = 2 * fileLen;
                // create a larger file under /nqdir0/qdir1/qdir20/nqdir30
                DFSTestUtil.CreateFile(dfs, file2, file2Len, replication, 0);
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 3 * fileSpace);
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 0);
                // Reverse: Move /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0/qdir1/qdir21/
                hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Rename(dstPath, srcPath));
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // make sure no intermediate directories left by failed rename
                NUnit.Framework.Assert.IsFalse(dfs.Exists(srcPath));
                // directory should exist
                NUnit.Framework.Assert.IsTrue(dfs.Exists(dstPath));
                // verify space after the failed move
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 3 * fileSpace);
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 0);
                // Test Append :
                // verify space quota
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 4 * fileSpace);
                // verify space before append;
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 3 * fileSpace);
                OutputStream @out = dfs.Append(file2);
                // appending 1 fileLen should succeed
                @out.Write(new byte[fileLen]);
                @out.Close();
                file2Len += fileLen;
                // after append
                // verify space after append;
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 4 * fileSpace);
                // now increase the quota for quotaDir1
                dfs.SetQuota(quotaDir1, HdfsConstants.QuotaDontSet, 5 * fileSpace);
                // Now, appending more than 1 fileLen should result in an error
                @out         = dfs.Append(file2);
                hasException = false;
                try
                {
                    @out.Write(new byte[fileLen + 1024]);
                    @out.Flush();
                    @out.Close();
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                    IOUtils.CloseStream(@out);
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                file2Len += fileLen;
                // after partial append
                // verify space after partial append
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 5 * fileSpace);
                // Test set replication :
                // first reduce the replication
                dfs.SetReplication(file2, (short)(replication - 1));
                // verify that space is reduced by file2Len
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 5 * fileSpace - file2Len);
                // now try to increase the replication and and expect an error.
                hasException = false;
                try
                {
                    dfs.SetReplication(file2, (short)(replication + 1));
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // verify space consumed remains unchanged.
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 5 * fileSpace - file2Len);
                // now increase the quota for quotaDir1 and quotaDir20
                dfs.SetQuota(quotaDir1, HdfsConstants.QuotaDontSet, 10 * fileSpace);
                dfs.SetQuota(quotaDir20, HdfsConstants.QuotaDontSet, 10 * fileSpace);
                // then increasing replication should be ok.
                dfs.SetReplication(file2, (short)(replication + 1));
                // verify increase in space
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 5 * fileSpace + file2Len);
                // Test HDFS-2053 :
                // Create directory /hdfs-2053
                Path quotaDir2053 = new Path("/hdfs-2053");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir2053));
                // Create subdirectories /hdfs-2053/{A,B,C}
                Path quotaDir2053_A = new Path(quotaDir2053, "A");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir2053_A));
                Path quotaDir2053_B = new Path(quotaDir2053, "B");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir2053_B));
                Path quotaDir2053_C = new Path(quotaDir2053, "C");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir2053_C));
                // Factors to vary the sizes of test files created in each subdir.
                // The actual factors are not really important but they allow us to create
                // identifiable file sizes per subdir, which helps during debugging.
                int sizeFactorA = 1;
                int sizeFactorB = 2;
                int sizeFactorC = 4;
                // Set space quota for subdirectory C
                dfs.SetQuota(quotaDir2053_C, HdfsConstants.QuotaDontSet, (sizeFactorC + 1) * fileSpace
                             );
                c = dfs.GetContentSummary(quotaDir2053_C);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), (sizeFactorC + 1) * fileSpace);
                // Create a file under subdirectory A
                DFSTestUtil.CreateFile(dfs, new Path(quotaDir2053_A, "fileA"), sizeFactorA * fileLen
                                       , replication, 0);
                c = dfs.GetContentSummary(quotaDir2053_A);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), sizeFactorA * fileSpace);
                // Create a file under subdirectory B
                DFSTestUtil.CreateFile(dfs, new Path(quotaDir2053_B, "fileB"), sizeFactorB * fileLen
                                       , replication, 0);
                c = dfs.GetContentSummary(quotaDir2053_B);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), sizeFactorB * fileSpace);
                // Create a file under subdirectory C (which has a space quota)
                DFSTestUtil.CreateFile(dfs, new Path(quotaDir2053_C, "fileC"), sizeFactorC * fileLen
                                       , replication, 0);
                c = dfs.GetContentSummary(quotaDir2053_C);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), sizeFactorC * fileSpace);
                // Check space consumed for /hdfs-2053
                c = dfs.GetContentSummary(quotaDir2053);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), (sizeFactorA + sizeFactorB
                                                                       + sizeFactorC) * fileSpace);
                NUnit.Framework.Assert.AreEqual(20, cluster.GetNamesystem().GetFSDirectory().GetYieldCount
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        /// <summary>The following test first creates a file.</summary>
        /// <remarks>
        /// The following test first creates a file.
        /// It verifies the block information from a datanode.
        /// Then, it updates the block with new information and verifies again.
        /// </remarks>
        /// <param name="useDnHostname">whether DNs should connect to other DNs by hostname</param>
        /// <exception cref="System.Exception"/>
        private void CheckBlockMetaDataInfo(bool useDnHostname)
        {
            MiniDFSCluster cluster = null;

            conf.SetBoolean(DFSConfigKeys.DfsDatanodeUseDnHostname, useDnHostname);
            if (useDnHostname)
            {
                // Since the mini cluster only listens on the loopback we have to
                // ensure the hostname used to access DNs maps to the loopback. We
                // do this by telling the DN to advertise localhost as its hostname
                // instead of the default hostname.
                conf.Set(DFSConfigKeys.DfsDatanodeHostNameKey, "localhost");
            }
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).CheckDataNodeHostConfig
                              (true).Build();
                cluster.WaitActive();
                //create a file
                DistributedFileSystem dfs = cluster.GetFileSystem();
                string filestr            = "/foo";
                Path   filepath           = new Path(filestr);
                DFSTestUtil.CreateFile(dfs, filepath, 1024L, (short)3, 0L);
                NUnit.Framework.Assert.IsTrue(dfs.Exists(filepath));
                //get block info
                LocatedBlock locatedblock = GetLastLocatedBlock(DFSClientAdapter.GetDFSClient(dfs
                                                                                              ).GetNamenode(), filestr);
                DatanodeInfo[] datanodeinfo = locatedblock.GetLocations();
                NUnit.Framework.Assert.IsTrue(datanodeinfo.Length > 0);
                //connect to a data node
                DataNode datanode         = cluster.GetDataNode(datanodeinfo[0].GetIpcPort());
                InterDatanodeProtocol idp = DataNodeTestUtils.CreateInterDatanodeProtocolProxy(datanode
                                                                                               , datanodeinfo[0], conf, useDnHostname);
                // Stop the block scanners.
                datanode.GetBlockScanner().RemoveAllVolumeScanners();
                //verify BlockMetaDataInfo
                ExtendedBlock b = locatedblock.GetBlock();
                InterDatanodeProtocol.Log.Info("b=" + b + ", " + b.GetType());
                CheckMetaInfo(b, datanode);
                long recoveryId = b.GetGenerationStamp() + 1;
                idp.InitReplicaRecovery(new BlockRecoveryCommand.RecoveringBlock(b, locatedblock.
                                                                                 GetLocations(), recoveryId));
                //verify updateBlock
                ExtendedBlock newblock = new ExtendedBlock(b.GetBlockPoolId(), b.GetBlockId(), b.
                                                           GetNumBytes() / 2, b.GetGenerationStamp() + 1);
                idp.UpdateReplicaUnderRecovery(b, recoveryId, b.GetBlockId(), newblock.GetNumBytes
                                                   ());
                CheckMetaInfo(newblock, datanode);
                // Verify correct null response trying to init recovery for a missing block
                ExtendedBlock badBlock = new ExtendedBlock("fake-pool", b.GetBlockId(), 0, 0);
                NUnit.Framework.Assert.IsNull(idp.InitReplicaRecovery(new BlockRecoveryCommand.RecoveringBlock
                                                                          (badBlock, locatedblock.GetLocations(), recoveryId)));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #25
0
        public virtual void TestLeaseAfterRename()
        {
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();

            try
            {
                Path p  = new Path("/test-file");
                Path d  = new Path("/test-d");
                Path d2 = new Path("/test-d-other");
                // open a file to get a lease
                FileSystem         fs   = cluster.GetFileSystem();
                FSDataOutputStream @out = fs.Create(p);
                @out.WriteBytes("something");
                //out.hsync();
                NUnit.Framework.Assert.IsTrue(HasLease(cluster, p));
                NUnit.Framework.Assert.AreEqual(1, LeaseCount(cluster));
                // just to ensure first fs doesn't have any logic to twiddle leases
                DistributedFileSystem fs2 = (DistributedFileSystem)FileSystem.NewInstance(fs.GetUri
                                                                                              (), fs.GetConf());
                // rename the file into an existing dir
                Log.Info("DMS: rename file into dir");
                Path pRenamed = new Path(d, p.GetName());
                fs2.Mkdirs(d);
                fs2.Rename(p, pRenamed);
                NUnit.Framework.Assert.IsFalse(p + " exists", fs2.Exists(p));
                NUnit.Framework.Assert.IsTrue(pRenamed + " not found", fs2.Exists(pRenamed));
                NUnit.Framework.Assert.IsFalse("has lease for " + p, HasLease(cluster, p));
                NUnit.Framework.Assert.IsTrue("no lease for " + pRenamed, HasLease(cluster, pRenamed
                                                                                   ));
                NUnit.Framework.Assert.AreEqual(1, LeaseCount(cluster));
                // rename the parent dir to a new non-existent dir
                Log.Info("DMS: rename parent dir");
                Path pRenamedAgain = new Path(d2, pRenamed.GetName());
                fs2.Rename(d, d2);
                // src gone
                NUnit.Framework.Assert.IsFalse(d + " exists", fs2.Exists(d));
                NUnit.Framework.Assert.IsFalse("has lease for " + pRenamed, HasLease(cluster, pRenamed
                                                                                     ));
                // dst checks
                NUnit.Framework.Assert.IsTrue(d2 + " not found", fs2.Exists(d2));
                NUnit.Framework.Assert.IsTrue(pRenamedAgain + " not found", fs2.Exists(pRenamedAgain
                                                                                       ));
                NUnit.Framework.Assert.IsTrue("no lease for " + pRenamedAgain, HasLease(cluster,
                                                                                        pRenamedAgain));
                NUnit.Framework.Assert.AreEqual(1, LeaseCount(cluster));
                // rename the parent dir to existing dir
                // NOTE: rename w/o options moves paths into existing dir
                Log.Info("DMS: rename parent again");
                pRenamed      = pRenamedAgain;
                pRenamedAgain = new Path(new Path(d, d2.GetName()), p.GetName());
                fs2.Mkdirs(d);
                fs2.Rename(d2, d);
                // src gone
                NUnit.Framework.Assert.IsFalse(d2 + " exists", fs2.Exists(d2));
                NUnit.Framework.Assert.IsFalse("no lease for " + pRenamed, HasLease(cluster, pRenamed
                                                                                    ));
                // dst checks
                NUnit.Framework.Assert.IsTrue(d + " not found", fs2.Exists(d));
                NUnit.Framework.Assert.IsTrue(pRenamedAgain + " not found", fs2.Exists(pRenamedAgain
                                                                                       ));
                NUnit.Framework.Assert.IsTrue("no lease for " + pRenamedAgain, HasLease(cluster,
                                                                                        pRenamedAgain));
                NUnit.Framework.Assert.AreEqual(1, LeaseCount(cluster));
                // rename with opts to non-existent dir
                pRenamed      = pRenamedAgain;
                pRenamedAgain = new Path(d2, p.GetName());
                fs2.Rename(pRenamed.GetParent(), d2, Options.Rename.Overwrite);
                // src gone
                NUnit.Framework.Assert.IsFalse(pRenamed.GetParent() + " not found", fs2.Exists(pRenamed
                                                                                               .GetParent()));
                NUnit.Framework.Assert.IsFalse("has lease for " + pRenamed, HasLease(cluster, pRenamed
                                                                                     ));
                // dst checks
                NUnit.Framework.Assert.IsTrue(d2 + " not found", fs2.Exists(d2));
                NUnit.Framework.Assert.IsTrue(pRenamedAgain + " not found", fs2.Exists(pRenamedAgain
                                                                                       ));
                NUnit.Framework.Assert.IsTrue("no lease for " + pRenamedAgain, HasLease(cluster,
                                                                                        pRenamedAgain));
                NUnit.Framework.Assert.AreEqual(1, LeaseCount(cluster));
                // rename with opts to existing dir
                // NOTE: rename with options will not move paths into the existing dir
                pRenamed      = pRenamedAgain;
                pRenamedAgain = new Path(d, p.GetName());
                fs2.Rename(pRenamed.GetParent(), d, Options.Rename.Overwrite);
                // src gone
                NUnit.Framework.Assert.IsFalse(pRenamed.GetParent() + " not found", fs2.Exists(pRenamed
                                                                                               .GetParent()));
                NUnit.Framework.Assert.IsFalse("has lease for " + pRenamed, HasLease(cluster, pRenamed
                                                                                     ));
                // dst checks
                NUnit.Framework.Assert.IsTrue(d + " not found", fs2.Exists(d));
                NUnit.Framework.Assert.IsTrue(pRenamedAgain + " not found", fs2.Exists(pRenamedAgain
                                                                                       ));
                NUnit.Framework.Assert.IsTrue("no lease for " + pRenamedAgain, HasLease(cluster,
                                                                                        pRenamedAgain));
                NUnit.Framework.Assert.AreEqual(1, LeaseCount(cluster));
                @out.Close();
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        public virtual void TestRollbackWithHAQJM()
        {
            Configuration    conf    = new HdfsConfiguration();
            MiniQJMHACluster cluster = null;
            Path             foo     = new Path("/foo");
            Path             bar     = new Path("/bar");

            try
            {
                cluster = new MiniQJMHACluster.Builder(conf).Build();
                MiniDFSCluster dfsCluster = cluster.GetDfsCluster();
                dfsCluster.WaitActive();
                // let NN1 tail editlog every 1s
                dfsCluster.GetConfiguration(1).SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
                dfsCluster.RestartNameNode(1);
                dfsCluster.TransitionToActive(0);
                DistributedFileSystem dfs = dfsCluster.GetFileSystem(0);
                dfs.Mkdirs(foo);
                // start rolling upgrade
                RollingUpgradeInfo info = dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare
                                                             );
                NUnit.Framework.Assert.IsTrue(info.IsStarted());
                // create new directory
                dfs.Mkdirs(bar);
                dfs.Close();
                TestRollingUpgrade.QueryForPreparation(dfs);
                // If the query returns true, both active and the standby NN should have
                // rollback fsimage ready.
                NUnit.Framework.Assert.IsTrue(dfsCluster.GetNameNode(0).GetFSImage().HasRollbackFSImage
                                                  ());
                NUnit.Framework.Assert.IsTrue(dfsCluster.GetNameNode(1).GetFSImage().HasRollbackFSImage
                                                  ());
                // rollback NN0
                dfsCluster.RestartNameNode(0, true, "-rollingUpgrade", "rollback");
                // shutdown NN1
                dfsCluster.ShutdownNameNode(1);
                dfsCluster.TransitionToActive(0);
                // make sure /foo is still there, but /bar is not
                dfs = dfsCluster.GetFileSystem(0);
                NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
                NUnit.Framework.Assert.IsFalse(dfs.Exists(bar));
                // check the details of NNStorage
                NNStorage storage = dfsCluster.GetNamesystem(0).GetFSImage().GetStorage();
                // segments:(startSegment, mkdir, start upgrade endSegment),
                // (startSegment, mkdir, endSegment)
                CheckNNStorage(storage, 4, 7);
                // check storage in JNs
                for (int i = 0; i < NumJournalNodes; i++)
                {
                    FilePath dir = cluster.GetJournalCluster().GetCurrentDir(0, MiniQJMHACluster.Nameservice
                                                                             );
                    CheckJNStorage(dir, 5, 7);
                }
                // restart NN0 again to make sure we can start using the new fsimage and
                // the corresponding md5 checksum
                dfsCluster.RestartNameNode(0);
                // start the rolling upgrade again to make sure we do not load upgrade
                // status after the rollback
                dfsCluster.TransitionToActive(0);
                dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #27
0
        /// <exception cref="System.Exception"/>
        public virtual void TestRollingUpgradeWithQJM()
        {
            string   nnDirPrefix = MiniDFSCluster.GetBaseDirectory() + "/nn/";
            FilePath nn1Dir      = new FilePath(nnDirPrefix + "image1");
            FilePath nn2Dir      = new FilePath(nnDirPrefix + "image2");

            Log.Info("nn1Dir=" + nn1Dir);
            Log.Info("nn2Dir=" + nn2Dir);
            Configuration      conf = new HdfsConfiguration();
            MiniJournalCluster mjc  = new MiniJournalCluster.Builder(conf).Build();

            SetConf(conf, nn1Dir, mjc);
            {
                // Start the cluster once to generate the dfs dirs
                MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).ManageNameDfsDirs
                                             (false).CheckExitOnShutdown(false).Build();
                // Shutdown the cluster before making a copy of the namenode dir to release
                // all file locks, otherwise, the copy will fail on some platforms.
                cluster.Shutdown();
            }
            MiniDFSCluster cluster2 = null;

            try
            {
                // Start a second NN pointed to the same quorum.
                // We need to copy the image dir from the first NN -- or else
                // the new NN will just be rejected because of Namespace mismatch.
                FileUtil.FullyDelete(nn2Dir);
                FileUtil.Copy(nn1Dir, FileSystem.GetLocal(conf).GetRaw(), new Path(nn2Dir.GetAbsolutePath
                                                                                       ()), false, conf);
                // Start the cluster again
                MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(
                    false).ManageNameDfsDirs(false).CheckExitOnShutdown(false).Build();
                Path foo = new Path("/foo");
                Path bar = new Path("/bar");
                Path baz = new Path("/baz");
                RollingUpgradeInfo info1;
                {
                    DistributedFileSystem dfs = cluster.GetFileSystem();
                    dfs.Mkdirs(foo);
                    //start rolling upgrade
                    dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                    info1 = dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare);
                    dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
                    Log.Info("START\n" + info1);
                    //query rolling upgrade
                    NUnit.Framework.Assert.AreEqual(info1, dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction
                                                                              .Query));
                    dfs.Mkdirs(bar);
                    cluster.Shutdown();
                }
                // cluster2 takes over QJM
                Configuration conf2 = SetConf(new Configuration(), nn2Dir, mjc);
                cluster2 = new MiniDFSCluster.Builder(conf2).NumDataNodes(0).Format(false).ManageNameDfsDirs
                               (false).Build();
                DistributedFileSystem dfs2 = cluster2.GetFileSystem();
                // Check that cluster2 sees the edits made on cluster1
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(foo));
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(bar));
                NUnit.Framework.Assert.IsFalse(dfs2.Exists(baz));
                //query rolling upgrade in cluster2
                NUnit.Framework.Assert.AreEqual(info1, dfs2.RollingUpgrade(HdfsConstants.RollingUpgradeAction
                                                                           .Query));
                dfs2.Mkdirs(baz);
                Log.Info("RESTART cluster 2");
                cluster2.RestartNameNode();
                NUnit.Framework.Assert.AreEqual(info1, dfs2.RollingUpgrade(HdfsConstants.RollingUpgradeAction
                                                                           .Query));
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(foo));
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(bar));
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(baz));
                //restart cluster with -upgrade should fail.
                try
                {
                    cluster2.RestartNameNode("-upgrade");
                }
                catch (IOException e)
                {
                    Log.Info("The exception is expected.", e);
                }
                Log.Info("RESTART cluster 2 again");
                cluster2.RestartNameNode();
                NUnit.Framework.Assert.AreEqual(info1, dfs2.RollingUpgrade(HdfsConstants.RollingUpgradeAction
                                                                           .Query));
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(foo));
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(bar));
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(baz));
                //finalize rolling upgrade
                RollingUpgradeInfo finalize = dfs2.RollingUpgrade(HdfsConstants.RollingUpgradeAction
                                                                  .Finalize);
                NUnit.Framework.Assert.IsTrue(finalize.IsFinalized());
                Log.Info("RESTART cluster 2 with regular startup option");
                cluster2.GetNameNodeInfos()[0].SetStartOpt(HdfsServerConstants.StartupOption.Regular
                                                           );
                cluster2.RestartNameNode();
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(foo));
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(bar));
                NUnit.Framework.Assert.IsTrue(dfs2.Exists(baz));
            }
            finally
            {
                if (cluster2 != null)
                {
                    cluster2.Shutdown();
                }
            }
        }