Beispiel #1
0
        public virtual void TestLinkTargetNonSymlink()
        {
            FileContext fc         = null;
            Path        notSymlink = new Path("/notasymlink");

            try
            {
                fc = FileContext.GetFileContext(cluster.GetFileSystem().GetUri());
                fc.Create(notSymlink, EnumSet.Of(CreateFlag.Create));
                DFSClient client = new DFSClient(cluster.GetFileSystem().GetUri(), cluster.GetConfiguration
                                                     (0));
                try
                {
                    client.GetLinkTarget(notSymlink.ToString());
                    NUnit.Framework.Assert.Fail("Expected exception for resolving non-symlink");
                }
                catch (IOException e)
                {
                    GenericTestUtils.AssertExceptionContains("is not a symbolic link", e);
                }
            }
            finally
            {
                if (fc != null)
                {
                    fc.Delete(notSymlink, false);
                }
            }
        }
Beispiel #2
0
        /// <summary>
        /// Test that, if there are no blocks in the filesystem,
        /// the NameNode doesn't enter the "safemode extension" period.
        /// </summary>
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestNoExtensionIfNoBlocks()
        {
            cluster.GetConfiguration(0).SetInt(DFSConfigKeys.DfsNamenodeSafemodeExtensionKey,
                                               60000);
            cluster.RestartNameNode();
            // Even though we have safemode extension set high, we should immediately
            // exit safemode on startup because there are no blocks in the namespace.
            string status = cluster.GetNameNode().GetNamesystem().GetSafemode();

            NUnit.Framework.Assert.AreEqual(string.Empty, status);
        }
Beispiel #3
0
        public virtual void TestDelegationToProvider()
        {
            NUnit.Framework.Assert.IsTrue(Called.Contains("start"));
            FileSystem fs = FileSystem.Get(miniDFS.GetConfiguration(0));

            fs.Mkdirs(new Path("/tmp"));
            fs.SetPermission(new Path("/tmp"), new FsPermission((short)0x1ff));
            UserGroupInformation ugi = UserGroupInformation.CreateUserForTesting("u1", new string
                                                                                 [] { "g1" });

            ugi.DoAs(new _PrivilegedExceptionAction_201(this));
        }
        public virtual void TestDataDirectories()
        {
            FilePath      dataDir = new FilePath(BaseDir, "data").GetCanonicalFile();
            Configuration conf    = cluster.GetConfiguration(0);
            // 1. Test unsupported schema. Only "file:" is supported.
            string dnDir = MakeURI("shv", null, Util.FileAsURI(dataDir).GetPath());

            conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dnDir);
            DataNode dn = null;

            try
            {
                dn = DataNode.CreateDataNode(new string[] {  }, conf);
                NUnit.Framework.Assert.Fail();
            }
            catch (Exception)
            {
            }
            finally
            {
                // expecting exception here
                if (dn != null)
                {
                    dn.Shutdown();
                }
            }
            NUnit.Framework.Assert.IsNull("Data-node startup should have failed.", dn);
            // 2. Test "file:" schema and no schema (path-only). Both should work.
            string dnDir1 = Util.FileAsURI(dataDir).ToString() + "1";
            string dnDir2 = MakeURI("file", "localhost", Util.FileAsURI(dataDir).GetPath() +
                                    "2");
            string dnDir3 = dataDir.GetAbsolutePath() + "3";

            conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dnDir1 + "," + dnDir2 + "," + dnDir3
                     );
            try
            {
                cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Regular,
                                       null);
                NUnit.Framework.Assert.IsTrue("Data-node should startup.", cluster.IsDataNodeUp()
                                              );
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.ShutdownDataNodes();
                }
            }
        }
Beispiel #5
0
 /// <exception cref="System.IO.IOException"/>
 private void RestartStandby()
 {
     cluster.ShutdownNameNode(1);
     // Set the safemode extension to be lengthy, so that the tests
     // can check the safemode message after the safemode conditions
     // have been achieved, without being racy.
     cluster.GetConfiguration(1).SetInt(DFSConfigKeys.DfsNamenodeSafemodeExtensionKey,
                                        30000);
     cluster.GetConfiguration(1).SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
     cluster.RestartNameNode(1);
     nn1 = cluster.GetNameNode(1);
     NUnit.Framework.Assert.AreEqual(nn1.GetNamesystem().GetTransactionsSinceLastLogRoll
                                         (), 0L);
 }
Beispiel #6
0
        public virtual void TestBootstrapStandbyWithStandbyNN()
        {
            // make the first NN in standby state
            cluster.TransitionToStandby(0);
            Configuration confNN1 = cluster.GetConfiguration(1);

            // shut down nn1
            cluster.ShutdownNameNode(1);
            int rc = BootstrapStandby.Run(new string[] { "-force" }, confNN1);

            NUnit.Framework.Assert.AreEqual(0, rc);
            // Should have copied over the namespace from the standby
            FSImageTestUtil.AssertNNHasCheckpoints(cluster, 1, ImmutableList.Of(0));
            FSImageTestUtil.AssertNNFilesMatch(cluster);
        }
        /// <summary>Fire up our own hand-rolled MiniDFSCluster.</summary>
        /// <remarks>
        /// Fire up our own hand-rolled MiniDFSCluster.  We do this here instead
        /// of relying on TestHdfsHelper because we don't want to turn on XAttr
        /// support.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        private void StartMiniDFS()
        {
            FilePath testDirRoot = TestDirHelper.GetTestDir();

            if (Runtime.GetProperty("hadoop.log.dir") == null)
            {
                Runtime.SetProperty("hadoop.log.dir", new FilePath(testDirRoot, "hadoop-log").GetAbsolutePath
                                        ());
            }
            if (Runtime.GetProperty("test.build.data") == null)
            {
                Runtime.SetProperty("test.build.data", new FilePath(testDirRoot, "hadoop-data").GetAbsolutePath
                                        ());
            }
            Configuration conf = HadoopUsersConfTestHelper.GetBaseConf();

            HadoopUsersConfTestHelper.AddUserConf(conf);
            conf.Set("fs.hdfs.impl.disable.cache", "true");
            conf.Set("dfs.block.access.token.enable", "false");
            conf.Set("dfs.permissions", "true");
            conf.Set("hadoop.security.authentication", "simple");
            // Explicitly turn off XAttr support
            conf.SetBoolean(DFSConfigKeys.DfsNamenodeXattrsEnabledKey, false);
            MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
            builder.NumDataNodes(2);
            miniDfs = builder.Build();
            nnConf  = miniDfs.GetConfiguration(0);
        }
Beispiel #8
0
 public virtual void TestInitializeSharedEdits()
 {
     AssertCannotStartNameNodes();
     // Initialize the shared edits dir.
     NUnit.Framework.Assert.IsFalse(NameNode.InitializeSharedEdits(cluster.GetConfiguration
                                                                       (0)));
     AssertCanStartHaNameNodes("1");
     // Now that we've done a metadata operation, make sure that deleting and
     // re-initializing the shared edits dir will let the standby still start.
     ShutdownClusterAndRemoveSharedEditsDir();
     AssertCannotStartNameNodes();
     // Re-initialize the shared edits dir.
     NUnit.Framework.Assert.IsFalse(NameNode.InitializeSharedEdits(cluster.GetConfiguration
                                                                       (0)));
     // Should *still* be able to start both NNs
     AssertCanStartHaNameNodes("2");
 }
        public virtual void TestFinalizeWithJournalNodes()
        {
            MiniQJMHACluster qjCluster = null;
            FileSystem       fs        = null;

            try
            {
                MiniQJMHACluster.Builder builder = new MiniQJMHACluster.Builder(conf);
                builder.GetDfsBuilder().NumDataNodes(0);
                qjCluster = builder.Build();
                MiniDFSCluster cluster = qjCluster.GetDfsCluster();
                // No upgrade is in progress at the moment.
                CheckJnPreviousDirExistence(qjCluster, false);
                CheckClusterPreviousDirExistence(cluster, false);
                AssertCTimesEqual(cluster);
                // Transition NN0 to active and do some FS ops.
                cluster.TransitionToActive(0);
                fs = HATestUtil.ConfigureFailoverFs(cluster, conf);
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(new Path("/foo1")));
                long cidBeforeUpgrade = GetCommittedTxnIdValue(qjCluster);
                // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
                // flag.
                cluster.ShutdownNameNode(1);
                cluster.GetNameNodeInfos()[0].SetStartOpt(HdfsServerConstants.StartupOption.Upgrade
                                                          );
                cluster.RestartNameNode(0, false);
                NUnit.Framework.Assert.IsTrue(cidBeforeUpgrade <= GetCommittedTxnIdValue(qjCluster
                                                                                         ));
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(new Path("/foo2")));
                CheckNnPreviousDirExistence(cluster, 0, true);
                CheckNnPreviousDirExistence(cluster, 1, false);
                CheckJnPreviousDirExistence(qjCluster, true);
                // Now bootstrap the standby with the upgraded info.
                int rc = BootstrapStandby.Run(new string[] { "-force" }, cluster.GetConfiguration
                                                  (1));
                NUnit.Framework.Assert.AreEqual(0, rc);
                cluster.RestartNameNode(1);
                long cidDuringUpgrade = GetCommittedTxnIdValue(qjCluster);
                NUnit.Framework.Assert.IsTrue(cidDuringUpgrade > cidBeforeUpgrade);
                RunFinalizeCommand(cluster);
                NUnit.Framework.Assert.AreEqual(cidDuringUpgrade, GetCommittedTxnIdValue(qjCluster
                                                                                         ));
                CheckClusterPreviousDirExistence(cluster, false);
                CheckJnPreviousDirExistence(qjCluster, false);
                AssertCTimesEqual(cluster);
            }
            finally
            {
                if (fs != null)
                {
                    fs.Close();
                }
                if (qjCluster != null)
                {
                    qjCluster.Shutdown();
                }
            }
        }
Beispiel #10
0
        /// <summary>
        /// Test that getContentSummary on Standby should should throw standby
        /// exception.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestgetContentSummaryOnStandby()
        {
            Configuration nn1conf = cluster.GetConfiguration(1);

            // just reset the standby reads to default i.e False on standby.
            HAUtil.SetAllowStandbyReads(nn1conf, false);
            cluster.RestartNameNode(1);
            cluster.GetNameNodeRpc(1).GetContentSummary("/");
        }
        /// <summary>
        /// Run file operations to create edits for all op codes
        /// to be tested.
        /// </summary>
        /// <remarks>
        /// Run file operations to create edits for all op codes
        /// to be tested.
        /// the following op codes are deprecated and therefore not tested:
        /// OP_DATANODE_ADD    ( 5)
        /// OP_DATANODE_REMOVE ( 6)
        /// OP_SET_NS_QUOTA    (11)
        /// OP_CLEAR_NS_QUOTA  (12)
        /// </remarks>
        /// <exception cref="System.IO.IOException"/>
        private CheckpointSignature RunOperations()
        {
            Log.Info("Creating edits by performing fs operations");
            // no check, if it's not it throws an exception which is what we want
            DistributedFileSystem dfs = cluster.GetFileSystem();

            DFSTestUtil.RunOperations(cluster, dfs, cluster.GetConfiguration(0), dfs.GetDefaultBlockSize
                                          (), 0);
            // OP_ROLLING_UPGRADE_START
            cluster.GetNamesystem().GetEditLog().LogStartRollingUpgrade(Time.Now());
            // OP_ROLLING_UPGRADE_FINALIZE
            cluster.GetNamesystem().GetEditLog().LogFinalizeRollingUpgrade(Time.Now());
            // Force a roll so we get an OP_END_LOG_SEGMENT txn
            return(cluster.GetNameNodeRpc().RollEditLog());
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestFinalize()
        {
            Configuration    conf    = new HdfsConfiguration();
            MiniQJMHACluster cluster = null;
            Path             foo     = new Path("/foo");
            Path             bar     = new Path("/bar");

            try
            {
                cluster = new MiniQJMHACluster.Builder(conf).Build();
                MiniDFSCluster dfsCluster = cluster.GetDfsCluster();
                dfsCluster.WaitActive();
                // let NN1 tail editlog every 1s
                dfsCluster.GetConfiguration(1).SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
                dfsCluster.RestartNameNode(1);
                dfsCluster.TransitionToActive(0);
                DistributedFileSystem dfs = dfsCluster.GetFileSystem(0);
                dfs.Mkdirs(foo);
                FSImage fsimage = dfsCluster.GetNamesystem(0).GetFSImage();
                // start rolling upgrade
                RollingUpgradeInfo info = dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare
                                                             );
                NUnit.Framework.Assert.IsTrue(info.IsStarted());
                dfs.Mkdirs(bar);
                QueryForPreparation(dfs);
                // The NN should have a copy of the fsimage in case of rollbacks.
                NUnit.Framework.Assert.IsTrue(fsimage.HasRollbackFSImage());
                info = dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Finalize);
                NUnit.Framework.Assert.IsTrue(info.IsFinalized());
                NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
                // Once finalized, there should be no more fsimage for rollbacks.
                NUnit.Framework.Assert.IsFalse(fsimage.HasRollbackFSImage());
                // Should have no problem in restart and replaying edits that include
                // the FINALIZE op.
                dfsCluster.RestartNameNode(0);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestDowngrade()
        {
            Configuration    conf    = new HdfsConfiguration();
            MiniQJMHACluster cluster = null;
            Path             foo     = new Path("/foo");
            Path             bar     = new Path("/bar");

            try
            {
                cluster = new MiniQJMHACluster.Builder(conf).Build();
                MiniDFSCluster dfsCluster = cluster.GetDfsCluster();
                dfsCluster.WaitActive();
                // let NN1 tail editlog every 1s
                dfsCluster.GetConfiguration(1).SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
                dfsCluster.RestartNameNode(1);
                dfsCluster.TransitionToActive(0);
                DistributedFileSystem dfs = dfsCluster.GetFileSystem(0);
                dfs.Mkdirs(foo);
                // start rolling upgrade
                RollingUpgradeInfo info = dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare
                                                             );
                NUnit.Framework.Assert.IsTrue(info.IsStarted());
                dfs.Mkdirs(bar);
                TestRollingUpgrade.QueryForPreparation(dfs);
                dfs.Close();
                dfsCluster.RestartNameNode(0, true, "-rollingUpgrade", "downgrade");
                // Once downgraded, there should be no more fsimage for rollbacks.
                NUnit.Framework.Assert.IsFalse(dfsCluster.GetNamesystem(0).GetFSImage().HasRollbackFSImage
                                                   ());
                // shutdown NN1
                dfsCluster.ShutdownNameNode(1);
                dfsCluster.TransitionToActive(0);
                dfs = dfsCluster.GetFileSystem(0);
                NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
                NUnit.Framework.Assert.IsTrue(dfs.Exists(bar));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void TestSingleRequiredFailedEditsDirOnSetReadyToFlush()
        {
            // Set one of the edits dirs to be required.
            string[] editsDirs = cluster.GetConfiguration(0).GetTrimmedStrings(DFSConfigKeys.
                                                                               DfsNamenodeNameDirKey);
            ShutDownMiniCluster();
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirRequiredKey, editsDirs[0]);
            conf.SetInt(DFSConfigKeys.DfsNamenodeEditsDirMinimumKey, 0);
            conf.SetInt(DFSConfigKeys.DfsNamenodeCheckedVolumesMinimumKey, 0);
            SetUpMiniCluster(conf, true);
            NUnit.Framework.Assert.IsTrue(DoAnEdit());
            // Invalidated the one required edits journal.
            InvalidateEditsDirAtIndex(0, false, false);
            JournalSet.JournalAndStream nonRequiredJas = GetJournalAndStream(1);
            EditLogFileOutputStream     nonRequiredSpy = SpyOnStream(nonRequiredJas);

            // The NN has not terminated (no ExitException thrown)
            // ..and that the other stream is active.
            NUnit.Framework.Assert.IsTrue(nonRequiredJas.IsActive());
            try
            {
                DoAnEdit();
                NUnit.Framework.Assert.Fail("A single failure of a required journal should have halted the NN"
                                            );
            }
            catch (RemoteException re)
            {
                NUnit.Framework.Assert.IsTrue(re.GetClassName().Contains("ExitException"));
                GenericTestUtils.AssertExceptionContains("setReadyToFlush failed for required journal"
                                                         , re);
            }
            // Since the required directory failed setReadyToFlush, and that
            // directory was listed prior to the non-required directory,
            // we should not call setReadyToFlush on the non-required
            // directory. Regression test for HDFS-2874.
            Org.Mockito.Mockito.Verify(nonRequiredSpy, Org.Mockito.Mockito.Never()).SetReadyToFlush
                ();
            NUnit.Framework.Assert.IsFalse(nonRequiredJas.IsActive());
        }
Beispiel #15
0
            /// <exception cref="System.Exception"/>
            public override void Evaluate()
            {
                MiniDFSCluster miniHdfs = null;
                Configuration  conf     = HadoopUsersConfTestHelper.GetBaseConf();

                if (System.Boolean.Parse(Runtime.GetProperty(HadoopMiniHdfs, "true")))
                {
                    miniHdfs = StartMiniHdfs(conf);
                    conf     = miniHdfs.GetConfiguration(0);
                }
                try
                {
                    HdfsConfTl.Set(conf);
                    HdfsTestDirTl.Set(ResetHdfsTestDir(conf));
                    statement.Evaluate();
                }
                finally
                {
                    HdfsConfTl.Remove();
                    HdfsTestDirTl.Remove();
                }
            }
Beispiel #16
0
        /// <exception cref="System.IO.IOException"/>
        private MiniQJMHACluster(MiniQJMHACluster.Builder builder)
        {
            this.conf = builder.conf;
            int retryCount = 0;

            while (true)
            {
                try
                {
                    basePort = 10000 + Random.Next(1000) * 4;
                    // start 3 journal nodes
                    journalCluster = new MiniJournalCluster.Builder(conf).Format(true).Build();
                    URI journalURI = journalCluster.GetQuorumJournalURI(Nameservice);
                    // start cluster with 2 NameNodes
                    MiniDFSNNTopology topology = CreateDefaultTopology(basePort);
                    InitHAConf(journalURI, builder.conf);
                    // First start up the NNs just to format the namespace. The MinIDFSCluster
                    // has no way to just format the NameNodes without also starting them.
                    cluster = builder.dfsBuilder.NnTopology(topology).ManageNameDfsSharedDirs(false).
                              Build();
                    cluster.WaitActive();
                    cluster.ShutdownNameNodes();
                    // initialize the journal nodes
                    Configuration confNN0 = cluster.GetConfiguration(0);
                    NameNode.InitializeSharedEdits(confNN0, true);
                    cluster.GetNameNodeInfos()[0].SetStartOpt(builder.startOpt);
                    cluster.GetNameNodeInfos()[1].SetStartOpt(builder.startOpt);
                    // restart the cluster
                    cluster.RestartNameNodes();
                    ++retryCount;
                    break;
                }
                catch (BindException)
                {
                    Log.Info("MiniQJMHACluster port conflicts, retried " + retryCount + " times");
                }
            }
        }
        public virtual void TestSuccessfulBaseCase()
        {
            RemoveStandbyNameDirs();
            try
            {
                cluster.RestartNameNode(1);
                NUnit.Framework.Assert.Fail("Did not throw");
            }
            catch (IOException ioe)
            {
                GenericTestUtils.AssertExceptionContains("storage directory does not exist or is not accessible"
                                                         , ioe);
            }
            int rc = BootstrapStandby.Run(new string[] { "-nonInteractive" }, cluster.GetConfiguration
                                              (1));

            NUnit.Framework.Assert.AreEqual(0, rc);
            // Should have copied over the namespace from the active
            FSImageTestUtil.AssertNNHasCheckpoints(cluster, 1, ImmutableList.Of(0));
            FSImageTestUtil.AssertNNFilesMatch(cluster);
            // We should now be able to start the standby successfully.
            cluster.RestartNameNode(1);
        }
        public virtual void TestLeaseRecoveryAndAppend()
        {
            Configuration conf = new Configuration();

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                Path file = new Path("/testLeaseRecovery");
                DistributedFileSystem dfs = cluster.GetFileSystem();
                // create a file with 0 bytes
                FSDataOutputStream @out = dfs.Create(file);
                @out.Hflush();
                @out.Hsync();
                // abort the original stream
                ((DFSOutputStream)@out.GetWrappedStream()).Abort();
                DistributedFileSystem newdfs = (DistributedFileSystem)FileSystem.NewInstance(cluster
                                                                                             .GetConfiguration(0));
                // Append to a file , whose lease is held by another client should fail
                try
                {
                    newdfs.Append(file);
                    NUnit.Framework.Assert.Fail("Append to a file(lease is held by another client) should fail"
                                                );
                }
                catch (RemoteException e)
                {
                    NUnit.Framework.Assert.IsTrue(e.Message.Contains("file lease is currently owned")
                                                  );
                }
                // Lease recovery on first try should be successful
                bool recoverLease = newdfs.RecoverLease(file);
                NUnit.Framework.Assert.IsTrue(recoverLease);
                FSDataOutputStream append = newdfs.Append(file);
                append.Write(Sharpen.Runtime.GetBytesForString("test"));
                append.Close();
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                    cluster = null;
                }
            }
        }
        public virtual void TestBlockRecoveryWithLessMetafile()
        {
            Configuration conf = new Configuration();

            conf.Set(DFSConfigKeys.DfsBlockLocalPathAccessUserKey, UserGroupInformation.GetCurrentUser
                         ().GetShortUserName());
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            Path file = new Path("/testRecoveryFile");
            DistributedFileSystem dfs  = cluster.GetFileSystem();
            FSDataOutputStream    @out = dfs.Create(file);
            int count = 0;

            while (count < 2 * 1024 * 1024)
            {
                @out.WriteBytes("Data");
                count += 4;
            }
            @out.Hsync();
            // abort the original stream
            ((DFSOutputStream)@out.GetWrappedStream()).Abort();
            LocatedBlocks locations = cluster.GetNameNodeRpc().GetBlockLocations(file.ToString
                                                                                     (), 0, count);
            ExtendedBlock      block         = locations.Get(0).GetBlock();
            DataNode           dn            = cluster.GetDataNodes()[0];
            BlockLocalPathInfo localPathInfo = dn.GetBlockLocalPathInfo(block, null);
            FilePath           metafile      = new FilePath(localPathInfo.GetMetaPath());

            NUnit.Framework.Assert.IsTrue(metafile.Exists());
            // reduce the block meta file size
            RandomAccessFile raf = new RandomAccessFile(metafile, "rw");

            raf.SetLength(metafile.Length() - 20);
            raf.Close();
            // restart DN to make replica to RWR
            MiniDFSCluster.DataNodeProperties dnProp = cluster.StopDataNode(0);
            cluster.RestartDataNode(dnProp, true);
            // try to recover the lease
            DistributedFileSystem newdfs = (DistributedFileSystem)FileSystem.NewInstance(cluster
                                                                                         .GetConfiguration(0));

            count = 0;
            while (++count < 10 && !newdfs.RecoverLease(file))
            {
                Sharpen.Thread.Sleep(1000);
            }
            NUnit.Framework.Assert.IsTrue("File should be closed", newdfs.RecoverLease(file));
        }
        public virtual void TestUpgradeWithJournalNodes()
        {
            MiniQJMHACluster qjCluster = null;
            FileSystem       fs        = null;

            try
            {
                MiniQJMHACluster.Builder builder = new MiniQJMHACluster.Builder(conf);
                builder.GetDfsBuilder().NumDataNodes(0);
                qjCluster = builder.Build();
                MiniDFSCluster cluster = qjCluster.GetDfsCluster();
                // No upgrade is in progress at the moment.
                CheckJnPreviousDirExistence(qjCluster, false);
                CheckClusterPreviousDirExistence(cluster, false);
                AssertCTimesEqual(cluster);
                // Transition NN0 to active and do some FS ops.
                cluster.TransitionToActive(0);
                fs = HATestUtil.ConfigureFailoverFs(cluster, conf);
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(new Path("/foo1")));
                // get the value of the committedTxnId in journal nodes
                long cidBeforeUpgrade = GetCommittedTxnIdValue(qjCluster);
                // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
                // flag.
                cluster.ShutdownNameNode(1);
                cluster.GetNameNodeInfos()[0].SetStartOpt(HdfsServerConstants.StartupOption.Upgrade
                                                          );
                cluster.RestartNameNode(0, false);
                CheckNnPreviousDirExistence(cluster, 0, true);
                CheckNnPreviousDirExistence(cluster, 1, false);
                CheckJnPreviousDirExistence(qjCluster, true);
                NUnit.Framework.Assert.IsTrue(cidBeforeUpgrade <= GetCommittedTxnIdValue(qjCluster
                                                                                         ));
                // NN0 should come up in the active state when given the -upgrade option,
                // so no need to transition it to active.
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(new Path("/foo2")));
                // Restart NN0 without the -upgrade flag, to make sure that works.
                cluster.GetNameNodeInfos()[0].SetStartOpt(HdfsServerConstants.StartupOption.Regular
                                                          );
                cluster.RestartNameNode(0, false);
                // Make sure we can still do FS ops after upgrading.
                cluster.TransitionToActive(0);
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(new Path("/foo3")));
                NUnit.Framework.Assert.IsTrue(GetCommittedTxnIdValue(qjCluster) > cidBeforeUpgrade
                                              );
                // Now bootstrap the standby with the upgraded info.
                int rc = BootstrapStandby.Run(new string[] { "-force" }, cluster.GetConfiguration
                                                  (1));
                NUnit.Framework.Assert.AreEqual(0, rc);
                // Now restart NN1 and make sure that we can do ops against that as well.
                cluster.RestartNameNode(1);
                cluster.TransitionToStandby(0);
                cluster.TransitionToActive(1);
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(new Path("/foo4")));
                AssertCTimesEqual(cluster);
            }
            finally
            {
                if (fs != null)
                {
                    fs.Close();
                }
                if (qjCluster != null)
                {
                    qjCluster.Shutdown();
                }
            }
        }
        public virtual void TestRollbackWithJournalNodes()
        {
            MiniQJMHACluster qjCluster = null;
            FileSystem       fs        = null;

            try
            {
                MiniQJMHACluster.Builder builder = new MiniQJMHACluster.Builder(conf);
                builder.GetDfsBuilder().NumDataNodes(0);
                qjCluster = builder.Build();
                MiniDFSCluster cluster = qjCluster.GetDfsCluster();
                // No upgrade is in progress at the moment.
                CheckClusterPreviousDirExistence(cluster, false);
                AssertCTimesEqual(cluster);
                CheckJnPreviousDirExistence(qjCluster, false);
                // Transition NN0 to active and do some FS ops.
                cluster.TransitionToActive(0);
                fs = HATestUtil.ConfigureFailoverFs(cluster, conf);
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(new Path("/foo1")));
                long cidBeforeUpgrade = GetCommittedTxnIdValue(qjCluster);
                // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
                // flag.
                cluster.ShutdownNameNode(1);
                cluster.GetNameNodeInfos()[0].SetStartOpt(HdfsServerConstants.StartupOption.Upgrade
                                                          );
                cluster.RestartNameNode(0, false);
                CheckNnPreviousDirExistence(cluster, 0, true);
                CheckNnPreviousDirExistence(cluster, 1, false);
                CheckJnPreviousDirExistence(qjCluster, true);
                // NN0 should come up in the active state when given the -upgrade option,
                // so no need to transition it to active.
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(new Path("/foo2")));
                long cidDuringUpgrade = GetCommittedTxnIdValue(qjCluster);
                NUnit.Framework.Assert.IsTrue(cidDuringUpgrade > cidBeforeUpgrade);
                // Now bootstrap the standby with the upgraded info.
                int rc = BootstrapStandby.Run(new string[] { "-force" }, cluster.GetConfiguration
                                                  (1));
                NUnit.Framework.Assert.AreEqual(0, rc);
                cluster.RestartNameNode(1);
                CheckNnPreviousDirExistence(cluster, 0, true);
                CheckNnPreviousDirExistence(cluster, 1, true);
                CheckJnPreviousDirExistence(qjCluster, true);
                AssertCTimesEqual(cluster);
                // Shut down the NNs, but deliberately leave the JNs up and running.
                ICollection <URI> nn1NameDirs = cluster.GetNameDirs(0);
                cluster.Shutdown();
                conf.SetStrings(DFSConfigKeys.DfsNamenodeNameDirKey, Joiner.On(",").Join(nn1NameDirs
                                                                                         ));
                NameNode.DoRollback(conf, false);
                long cidAfterRollback = GetCommittedTxnIdValue(qjCluster);
                NUnit.Framework.Assert.IsTrue(cidBeforeUpgrade < cidAfterRollback);
                // make sure the committedTxnId has been reset correctly after rollback
                NUnit.Framework.Assert.IsTrue(cidDuringUpgrade > cidAfterRollback);
                // The rollback operation should have rolled back the first NN's local
                // dirs, and the shared dir, but not the other NN's dirs. Those have to be
                // done by bootstrapping the standby.
                CheckNnPreviousDirExistence(cluster, 0, false);
                CheckJnPreviousDirExistence(qjCluster, false);
            }
            finally
            {
                if (fs != null)
                {
                    fs.Close();
                }
                if (qjCluster != null)
                {
                    qjCluster.Shutdown();
                }
            }
        }
        public virtual void TestRollbackWithHAQJM()
        {
            Configuration    conf    = new HdfsConfiguration();
            MiniQJMHACluster cluster = null;
            Path             foo     = new Path("/foo");
            Path             bar     = new Path("/bar");

            try
            {
                cluster = new MiniQJMHACluster.Builder(conf).Build();
                MiniDFSCluster dfsCluster = cluster.GetDfsCluster();
                dfsCluster.WaitActive();
                // let NN1 tail editlog every 1s
                dfsCluster.GetConfiguration(1).SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
                dfsCluster.RestartNameNode(1);
                dfsCluster.TransitionToActive(0);
                DistributedFileSystem dfs = dfsCluster.GetFileSystem(0);
                dfs.Mkdirs(foo);
                // start rolling upgrade
                RollingUpgradeInfo info = dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare
                                                             );
                NUnit.Framework.Assert.IsTrue(info.IsStarted());
                // create new directory
                dfs.Mkdirs(bar);
                dfs.Close();
                TestRollingUpgrade.QueryForPreparation(dfs);
                // If the query returns true, both active and the standby NN should have
                // rollback fsimage ready.
                NUnit.Framework.Assert.IsTrue(dfsCluster.GetNameNode(0).GetFSImage().HasRollbackFSImage
                                                  ());
                NUnit.Framework.Assert.IsTrue(dfsCluster.GetNameNode(1).GetFSImage().HasRollbackFSImage
                                                  ());
                // rollback NN0
                dfsCluster.RestartNameNode(0, true, "-rollingUpgrade", "rollback");
                // shutdown NN1
                dfsCluster.ShutdownNameNode(1);
                dfsCluster.TransitionToActive(0);
                // make sure /foo is still there, but /bar is not
                dfs = dfsCluster.GetFileSystem(0);
                NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
                NUnit.Framework.Assert.IsFalse(dfs.Exists(bar));
                // check the details of NNStorage
                NNStorage storage = dfsCluster.GetNamesystem(0).GetFSImage().GetStorage();
                // segments:(startSegment, mkdir, start upgrade endSegment),
                // (startSegment, mkdir, endSegment)
                CheckNNStorage(storage, 4, 7);
                // check storage in JNs
                for (int i = 0; i < NumJournalNodes; i++)
                {
                    FilePath dir = cluster.GetJournalCluster().GetCurrentDir(0, MiniQJMHACluster.Nameservice
                                                                             );
                    CheckJNStorage(dir, 5, 7);
                }
                // restart NN0 again to make sure we can start using the new fsimage and
                // the corresponding md5 checksum
                dfsCluster.RestartNameNode(0);
                // start the rolling upgrade again to make sure we do not load upgrade
                // status after the rollback
                dfsCluster.TransitionToActive(0);
                dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Beispiel #23
0
 public static void TestSetUp()
 {
     cluster = new MiniDFSCluster.Builder(conf).Build();
     fc      = FileContext.GetFileContext(cluster.GetConfiguration(0));
     fc.Delete(TestDir, true);
 }
        public virtual void Setup()
        {
            conf = new HdfsConfiguration();
            SimulatedFSDataset.SetFactory(conf);
            Configuration[] overlays = new Configuration[NumDatanodes];
            for (int i = 0; i < overlays.Length; i++)
            {
                overlays[i] = new Configuration();
                if (i == RoNodeIndex)
                {
                    overlays[i].SetEnum(SimulatedFSDataset.ConfigPropertyState, i == RoNodeIndex ? DatanodeStorage.State
                                        .ReadOnlyShared : DatanodeStorage.State.Normal);
                }
            }
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDatanodes).DataNodeConfOverlays
                          (overlays).Build();
            fs              = cluster.GetFileSystem();
            blockManager    = cluster.GetNameNode().GetNamesystem().GetBlockManager();
            datanodeManager = blockManager.GetDatanodeManager();
            client          = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()), cluster
                                            .GetConfiguration(0));
            for (int i_1 = 0; i_1 < NumDatanodes; i_1++)
            {
                DataNode dataNode = cluster.GetDataNodes()[i_1];
                ValidateStorageState(BlockManagerTestUtil.GetStorageReportsForDatanode(datanodeManager
                                                                                       .GetDatanode(dataNode.GetDatanodeId())), i_1 == RoNodeIndex ? DatanodeStorage.State
                                     .ReadOnlyShared : DatanodeStorage.State.Normal);
            }
            // Create a 1 block file
            DFSTestUtil.CreateFile(fs, Path, BlockSize, BlockSize, BlockSize, (short)1, seed);
            LocatedBlock locatedBlock = GetLocatedBlock();

            extendedBlock = locatedBlock.GetBlock();
            block         = extendedBlock.GetLocalBlock();
            Assert.AssertThat(locatedBlock.GetLocations().Length, CoreMatchers.Is(1));
            normalDataNode   = locatedBlock.GetLocations()[0];
            readOnlyDataNode = datanodeManager.GetDatanode(cluster.GetDataNodes()[RoNodeIndex
                                                           ].GetDatanodeId());
            Assert.AssertThat(normalDataNode, CoreMatchers.Is(CoreMatchers.Not(readOnlyDataNode
                                                                               )));
            ValidateNumberReplicas(1);
            // Inject the block into the datanode with READ_ONLY_SHARED storage
            cluster.InjectBlocks(0, RoNodeIndex, Collections.Singleton(block));
            // There should now be 2 *locations* for the block
            // Must wait until the NameNode has processed the block report for the injected blocks
            WaitForLocations(2);
        }
Beispiel #25
0
        public virtual void TestDecommissionStatus()
        {
            IPEndPoint addr   = new IPEndPoint("localhost", cluster.GetNameNodePort());
            DFSClient  client = new DFSClient(addr, conf);

            DatanodeInfo[] info = client.DatanodeReport(HdfsConstants.DatanodeReportType.Live
                                                        );
            NUnit.Framework.Assert.AreEqual("Number of Datanodes ", 2, info.Length);
            DistributedFileSystem fileSys = cluster.GetFileSystem();
            DFSAdmin admin    = new DFSAdmin(cluster.GetConfiguration(0));
            short    replicas = numDatanodes;
            //
            // Decommission one node. Verify the decommission status
            //
            Path file1 = new Path("decommission.dat");

            WriteFile(fileSys, file1, replicas);
            Path file2             = new Path("decommission1.dat");
            FSDataOutputStream st1 = WriteIncompleteFile(fileSys, file2, replicas);

            foreach (DataNode d in cluster.GetDataNodes())
            {
                DataNodeTestUtils.TriggerBlockReport(d);
            }
            FSNamesystem    fsn = cluster.GetNamesystem();
            DatanodeManager dm  = fsn.GetBlockManager().GetDatanodeManager();

            for (int iteration = 0; iteration < numDatanodes; iteration++)
            {
                string downnode = DecommissionNode(fsn, client, localFileSys, iteration);
                dm.RefreshNodes(conf);
                decommissionedNodes.AddItem(downnode);
                BlockManagerTestUtil.RecheckDecommissionState(dm);
                IList <DatanodeDescriptor> decommissioningNodes = dm.GetDecommissioningNodes();
                if (iteration == 0)
                {
                    NUnit.Framework.Assert.AreEqual(decommissioningNodes.Count, 1);
                    DatanodeDescriptor decommNode = decommissioningNodes[0];
                    CheckDecommissionStatus(decommNode, 3, 0, 1);
                    CheckDFSAdminDecommissionStatus(decommissioningNodes.SubList(0, 1), fileSys, admin
                                                    );
                }
                else
                {
                    NUnit.Framework.Assert.AreEqual(decommissioningNodes.Count, 2);
                    DatanodeDescriptor decommNode1 = decommissioningNodes[0];
                    DatanodeDescriptor decommNode2 = decommissioningNodes[1];
                    // This one is still 3,3,1 since it passed over the UC block
                    // earlier, before node 2 was decommed
                    CheckDecommissionStatus(decommNode1, 3, 3, 1);
                    // This one is 4,4,2 since it has the full state
                    CheckDecommissionStatus(decommNode2, 4, 4, 2);
                    CheckDFSAdminDecommissionStatus(decommissioningNodes.SubList(0, 2), fileSys, admin
                                                    );
                }
            }
            // Call refreshNodes on FSNamesystem with empty exclude file.
            // This will remove the datanodes from decommissioning list and
            // make them available again.
            WriteConfigFile(localFileSys, excludeFile, null);
            dm.RefreshNodes(conf);
            st1.Close();
            CleanupFile(fileSys, file1);
            CleanupFile(fileSys, file2);
        }
 public virtual void StartUpCluster()
 {
     conf    = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDatanodes).Build();
     fs      = cluster.GetFileSystem();
     client  = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()), cluster
                             .GetConfiguration(0));
     dn0    = cluster.GetDataNodes()[0];
     poolId = cluster.GetNamesystem().GetBlockPoolId();
     dn0Reg = dn0.GetDNRegistrationForBP(poolId);
 }