Ejemplo n.º 1
0
 /// <summary>Attempts to start a NameNode with the given operation.</summary>
 /// <remarks>
 /// Attempts to start a NameNode with the given operation.  Starting
 /// the NameNode should throw an exception.
 /// </remarks>
 /// <param name="operation">- NameNode startup operation</param>
 /// <param name="exceptionClass">
 /// - if non-null, will check that the caught exception
 /// is assignment-compatible with exceptionClass
 /// </param>
 /// <param name="messagePattern">
 /// - if non-null, will check that a substring of the
 /// message from the caught exception matches this pattern, via the
 /// <see cref="Matcher#find()"/>
 /// method.
 /// </param>
 internal virtual void StartNameNodeShouldFail(HdfsServerConstants.StartupOption operation
                                               , Type exceptionClass, Sharpen.Pattern messagePattern)
 {
     try
     {
         cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).StartupOption(operation
                                                                                  ).Format(false).ManageDataDfsDirs(false).ManageNameDfsDirs(false).Build();
         // should fail
         NUnit.Framework.Assert.Fail("NameNode should have failed to start");
     }
     catch (Exception e)
     {
         // expect exception
         if (exceptionClass != null)
         {
             NUnit.Framework.Assert.IsTrue("Caught exception is not of expected class " + exceptionClass
                                           .Name + ": " + StringUtils.StringifyException(e), exceptionClass.IsInstanceOfType
                                               (e));
         }
         if (messagePattern != null)
         {
             NUnit.Framework.Assert.IsTrue("Caught exception message string does not match expected pattern \""
                                           + messagePattern.Pattern() + "\" : " + StringUtils.StringifyException(e), messagePattern
                                           .Matcher(e.Message).Find());
         }
         Log.Info("Successfully detected expected NameNode startup failure.");
     }
 }
 /// <exception cref="System.IO.IOException"/>
 public virtual void StartDataNodes(Configuration conf, int numDataNodes, bool manageDfsDirs
                                    , HdfsServerConstants.StartupOption operation, string[] racks, long[] simulatedCapacities
                                    , string[] nodeGroups)
 {
     StartDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, nodeGroups, null
                    , simulatedCapacities, false);
 }
Ejemplo n.º 3
0
 // expected
 /// <summary>Attempts to start a DataNode with the given operation.</summary>
 /// <remarks>
 /// Attempts to start a DataNode with the given operation. Starting
 /// the given block pool should fail.
 /// </remarks>
 /// <param name="operation">startup option</param>
 /// <param name="bpid">block pool Id that should fail to start</param>
 /// <exception cref="System.IO.IOException"></exception>
 internal virtual void StartBlockPoolShouldFail(HdfsServerConstants.StartupOption
                                                operation, string bpid)
 {
     cluster.StartDataNodes(conf, 1, false, operation, null);
     // should fail
     NUnit.Framework.Assert.IsFalse("Block pool " + bpid + " should have failed to start"
                                    , cluster.GetDataNodes()[0].IsBPServiceAlive(bpid));
 }
Ejemplo n.º 4
0
 /// <param name="conf">Configuration object</param>
 /// <param name="logDir">the path to the directory in which data will be stored</param>
 /// <param name="errorReporter">a callback to report errors</param>
 /// <exception cref="System.IO.IOException"></exception>
 protected internal JNStorage(Configuration conf, FilePath logDir, HdfsServerConstants.StartupOption
                              startOpt, StorageErrorReporter errorReporter)
     : base(HdfsServerConstants.NodeType.JournalNode)
 {
     sd = new Storage.StorageDirectory(logDir);
     this.AddStorageDir(sd);
     this.fjm = new FileJournalManager(conf, sd, errorReporter);
     AnalyzeAndRecoverStorage(startOpt);
 }
 // This is for initialize from parent class.
 /// <exception cref="System.IO.IOException"/>
 public override void StartDataNodes(Configuration conf, int numDataNodes, StorageType
                                     [][] storageTypes, bool manageDfsDirs, HdfsServerConstants.StartupOption operation
                                     , string[] racks, string[] hosts, long[][] storageCapacities, long[] simulatedCapacities
                                     , bool setupHostsFile, bool checkDataNodeAddrConfig, bool checkDataNodeHostConfig
                                     , Configuration[] dnConfOverlays)
 {
     lock (this)
     {
         StartDataNodes(conf, numDataNodes, storageTypes, manageDfsDirs, operation, racks,
                        NodeGroups, hosts, storageCapacities, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig
                        , checkDataNodeHostConfig);
     }
 }
Ejemplo n.º 6
0
 /// <summary>Verify that parsing a StartupOption string gives the expected results.</summary>
 /// <remarks>
 /// Verify that parsing a StartupOption string gives the expected results.
 /// If a RollingUpgradeStartupOption is specified than it is also checked.
 /// </remarks>
 /// <param name="value"/>
 /// <param name="expectedOption"/>
 /// <param name="expectedRollupOption">optional, may be null.</param>
 private static void VerifyStartupOptionResult(string value, HdfsServerConstants.StartupOption
                                               expectedOption, HdfsServerConstants.RollingUpgradeStartupOption expectedRollupOption
                                               )
 {
     HdfsServerConstants.StartupOption option = HdfsServerConstants.StartupOption.GetEnum
                                                    (value);
     NUnit.Framework.Assert.AreEqual(expectedOption, option);
     if (expectedRollupOption != null)
     {
         NUnit.Framework.Assert.AreEqual(expectedRollupOption, option.GetRollingUpgradeStartupOption
                                             ());
     }
 }
Ejemplo n.º 7
0
        /// <summary>Parses arguments and fills out the member variables.</summary>
        /// <param name="args">Command-line arguments.</param>
        /// <returns>
        /// true on successful parse; false to indicate that the
        /// program should exit.
        /// </returns>
        private bool ParseArguments(string[] args)
        {
            Options     options = MakeOptions();
            CommandLine cli;

            try
            {
                CommandLineParser parser = new GnuParser();
                cli = parser.Parse(options, args);
            }
            catch (ParseException e)
            {
                Log.Warn("options parsing failed:  " + e.Message);
                new HelpFormatter().PrintHelp("...", options);
                return(false);
            }
            if (cli.HasOption("help"))
            {
                new HelpFormatter().PrintHelp("...", options);
                return(false);
            }
            if (cli.GetArgs().Length > 0)
            {
                foreach (string arg in cli.GetArgs())
                {
                    Log.Error("Unrecognized option: " + arg);
                    new HelpFormatter().PrintHelp("...", options);
                    return(false);
                }
            }
            // HDFS
            numDataNodes = IntArgument(cli, "datanodes", 1);
            nameNodePort = IntArgument(cli, "nnport", 0);
            if (cli.HasOption("format"))
            {
                dfsOpts = HdfsServerConstants.StartupOption.Format;
                format  = true;
            }
            else
            {
                dfsOpts = HdfsServerConstants.StartupOption.Regular;
                format  = false;
            }
            // Runner
            writeDetails = cli.GetOptionValue("writeDetails");
            writeConfig  = cli.GetOptionValue("writeConfig");
            // General
            conf = new HdfsConfiguration();
            UpdateConfiguration(conf, cli.GetOptionValues("D"));
            return(true);
        }
Ejemplo n.º 8
0
        /// <summary>Parses arguments and fills out the member variables.</summary>
        /// <param name="args">Command-line arguments.</param>
        /// <returns>
        /// true on successful parse; false to indicate that the program should
        /// exit.
        /// </returns>
        private bool ParseArguments(string[] args)
        {
            Options     options = MakeOptions();
            CommandLine cli;

            try
            {
                CommandLineParser parser = new GnuParser();
                cli = parser.Parse(options, args);
            }
            catch (ParseException e)
            {
                Log.Warn("options parsing failed:  " + e.Message);
                new HelpFormatter().PrintHelp("...", options);
                return(false);
            }
            if (cli.HasOption("help"))
            {
                new HelpFormatter().PrintHelp("...", options);
                return(false);
            }
            if (cli.GetArgs().Length > 0)
            {
                foreach (string arg in cli.GetArgs())
                {
                    System.Console.Error.WriteLine("Unrecognized option: " + arg);
                    new HelpFormatter().PrintHelp("...", options);
                    return(false);
                }
            }
            // MR
            noMR            = cli.HasOption("nomr");
            numNodeManagers = IntArgument(cli, "nodemanagers", 1);
            rmPort          = IntArgument(cli, "rmport", 0);
            jhsPort         = IntArgument(cli, "jhsport", 0);
            fs = cli.GetOptionValue("namenode");
            // HDFS
            noDFS        = cli.HasOption("nodfs");
            numDataNodes = IntArgument(cli, "datanodes", 1);
            nnPort       = IntArgument(cli, "nnport", 0);
            dfsOpts      = cli.HasOption("format") ? HdfsServerConstants.StartupOption.Format : HdfsServerConstants.StartupOption
                           .Regular;
            // Runner
            writeDetails = cli.GetOptionValue("writeDetails");
            writeConfig  = cli.GetOptionValue("writeConfig");
            // General
            conf = new JobConf();
            UpdateConfiguration(conf, cli.GetOptionValues("D"));
            return(true);
        }
 public virtual void TestRollingUpgrade()
 {
     {
         string[] args = new string[] { "-rollingUpgrade" };
         HdfsServerConstants.StartupOption opt = NameNode.ParseArguments(args);
         NUnit.Framework.Assert.IsNull(opt);
     }
     {
         string[] args = new string[] { "-rollingUpgrade", "started" };
         HdfsServerConstants.StartupOption opt = NameNode.ParseArguments(args);
         NUnit.Framework.Assert.AreEqual(HdfsServerConstants.StartupOption.Rollingupgrade,
                                         opt);
         NUnit.Framework.Assert.AreEqual(HdfsServerConstants.RollingUpgradeStartupOption.Started
                                         , opt.GetRollingUpgradeStartupOption());
         NUnit.Framework.Assert.IsTrue(HdfsServerConstants.RollingUpgradeStartupOption.Started
                                       .Matches(opt));
     }
     {
         string[] args = new string[] { "-rollingUpgrade", "downgrade" };
         HdfsServerConstants.StartupOption opt = NameNode.ParseArguments(args);
         NUnit.Framework.Assert.AreEqual(HdfsServerConstants.StartupOption.Rollingupgrade,
                                         opt);
         NUnit.Framework.Assert.AreEqual(HdfsServerConstants.RollingUpgradeStartupOption.Downgrade
                                         , opt.GetRollingUpgradeStartupOption());
         NUnit.Framework.Assert.IsTrue(HdfsServerConstants.RollingUpgradeStartupOption.Downgrade
                                       .Matches(opt));
     }
     {
         string[] args = new string[] { "-rollingUpgrade", "rollback" };
         HdfsServerConstants.StartupOption opt = NameNode.ParseArguments(args);
         NUnit.Framework.Assert.AreEqual(HdfsServerConstants.StartupOption.Rollingupgrade,
                                         opt);
         NUnit.Framework.Assert.AreEqual(HdfsServerConstants.RollingUpgradeStartupOption.Rollback
                                         , opt.GetRollingUpgradeStartupOption());
         NUnit.Framework.Assert.IsTrue(HdfsServerConstants.RollingUpgradeStartupOption.Rollback
                                       .Matches(opt));
     }
     {
         string[] args = new string[] { "-rollingUpgrade", "foo" };
         try
         {
             NameNode.ParseArguments(args);
             NUnit.Framework.Assert.Fail();
         }
         catch (ArgumentException)
         {
         }
     }
 }
Ejemplo n.º 10
0
 /// <exception cref="System.IO.IOException"/>
 internal virtual Journal GetOrCreateJournal(string jid, HdfsServerConstants.StartupOption
                                             startOpt)
 {
     lock (this)
     {
         QuorumJournalManager.CheckJournalId(jid);
         Journal journal = journalsById[jid];
         if (journal == null)
         {
             FilePath logDir = GetLogDir(jid);
             Log.Info("Initializing journal in directory " + logDir);
             journal = new Journal(conf, logDir, jid, startOpt, new JournalNode.ErrorReporter(
                                       this));
             journalsById[jid] = journal;
         }
         return(journal);
     }
 }
        /// <summary>
        /// Process the given arg list as command line arguments to the DataNode
        /// to make sure we get the expected result.
        /// </summary>
        /// <remarks>
        /// Process the given arg list as command line arguments to the DataNode
        /// to make sure we get the expected result. If the expected result is
        /// success then further validate that the parsed startup option is the
        /// same as what was expected.
        /// </remarks>
        /// <param name="expectSuccess"/>
        /// <param name="expectedOption"/>
        /// <param name="conf"/>
        /// <param name="arg"/>
        private static void CheckExpected(bool expectSuccess, HdfsServerConstants.StartupOption
                                          expectedOption, Configuration conf, params string[] arg)
        {
            string[] args = new string[arg.Length];
            int      i    = 0;

            foreach (string currentArg in arg)
            {
                args[i++] = currentArg;
            }
            bool returnValue = DataNode.ParseArguments(args, conf);

            HdfsServerConstants.StartupOption option = DataNode.GetStartupOption(conf);
            Assert.AssertThat(returnValue, IS.Is(expectSuccess));
            if (expectSuccess)
            {
                Assert.AssertThat(option, IS.Is(expectedOption));
            }
        }
Ejemplo n.º 12
0
        /// <exception cref="System.IO.IOException"/>
        internal virtual void AnalyzeAndRecoverStorage(HdfsServerConstants.StartupOption
                                                       startOpt)
        {
            this.state = sd.AnalyzeStorage(startOpt, this);
            bool needRecover = state != Storage.StorageState.Normal && state != Storage.StorageState
                               .NonExistent && state != Storage.StorageState.NotFormatted;

            if (state == Storage.StorageState.Normal && startOpt != HdfsServerConstants.StartupOption
                .Rollback)
            {
                ReadProperties(sd);
            }
            else
            {
                if (needRecover)
                {
                    sd.DoRecover(state);
                }
            }
        }
Ejemplo n.º 13
0
        /// <exception cref="System.IO.IOException"/>
        internal virtual BackupNode StartBackupNode(Configuration conf, HdfsServerConstants.StartupOption
                                                    startupOpt, int idx)
        {
            Configuration c    = new HdfsConfiguration(conf);
            string        dirs = GetBackupNodeDir(startupOpt, idx);

            c.Set(DFSConfigKeys.DfsNamenodeNameDirKey, dirs);
            c.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, "${" + DFSConfigKeys.DfsNamenodeNameDirKey
                  + "}");
            c.Set(DFSConfigKeys.DfsNamenodeBackupAddressKey, "127.0.0.1:0");
            c.Set(DFSConfigKeys.DfsNamenodeBackupHttpAddressKey, "127.0.0.1:0");
            BackupNode bn = (BackupNode)NameNode.CreateNameNode(new string[] { startupOpt.GetName
                                                                                   () }, c);

            NUnit.Framework.Assert.IsTrue(bn.GetRole() + " must be in SafeMode.", bn.IsInSafeMode
                                              ());
            NUnit.Framework.Assert.IsTrue(bn.GetRole() + " must be in StandbyState", Sharpen.Runtime.EqualsIgnoreCase
                                              (bn.GetNamesystem().GetHAState(), HAServiceProtocol.HAServiceState.Standby.ToString
                                                  ()));
            return(bn);
        }
Ejemplo n.º 14
0
        /// <exception cref="System.Exception"/>
        internal virtual void TestCheckpoint(HdfsServerConstants.StartupOption op)
        {
            Path          file1 = new Path("/checkpoint.dat");
            Path          file2 = new Path("/checkpoint2.dat");
            Path          file3 = new Path("/backup.dat");
            Configuration conf  = new HdfsConfiguration();

            HAUtil.SetAllowStandbyReads(conf, true);
            short replication  = (short)conf.GetInt("dfs.replication", 3);
            int   numDatanodes = Math.Max(3, replication);

            conf.Set(DFSConfigKeys.DfsNamenodeBackupHttpAddressKey, "localhost:0");
            conf.Set(DFSConfigKeys.DfsBlockreportInitialDelayKey, "0");
            conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1);
            // disable block scanner
            conf.SetInt(DFSConfigKeys.DfsNamenodeCheckpointTxnsKey, 1);
            MiniDFSCluster cluster = null;
            FileSystem     fileSys = null;
            BackupNode     backup  = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                fileSys = cluster.GetFileSystem();
                //
                // verify that 'format' really blew away all pre-existing files
                //
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file2));
                //
                // Create file1
                //
                NUnit.Framework.Assert.IsTrue(fileSys.Mkdirs(file1));
                //
                // Take a checkpoint
                //
                long txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup = StartBackupNode(conf, op, 1);
                WaitCheckpointDone(cluster, txid);
            }
            catch (IOException e)
            {
                Log.Error("Error in TestBackupNode:", e);
                NUnit.Framework.Assert.IsTrue(e.GetLocalizedMessage(), false);
            }
            finally
            {
                if (backup != null)
                {
                    backup.Stop();
                }
                if (fileSys != null)
                {
                    fileSys.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            FilePath nnCurDir = new FilePath(BaseDir, "name1/current/");
            FilePath bnCurDir = new FilePath(GetBackupNodeDir(op, 1), "/current/");

            FSImageTestUtil.AssertParallelFilesAreIdentical(ImmutableList.Of(bnCurDir, nnCurDir
                                                                             ), ImmutableSet.Of <string>("VERSION"));
            try
            {
                //
                // Restart cluster and verify that file1 still exist.
                //
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes).Format(false
                                                                                             ).Build();
                fileSys = cluster.GetFileSystem();
                // check that file1 still exists
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file1));
                fileSys.Delete(file1, true);
                // create new file file2
                fileSys.Mkdirs(file2);
                //
                // Take a checkpoint
                //
                long txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup = StartBackupNode(conf, op, 1);
                WaitCheckpointDone(cluster, txid);
                for (int i = 0; i < 10; i++)
                {
                    fileSys.Mkdirs(new Path("file_" + i));
                }
                txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup.DoCheckpoint();
                WaitCheckpointDone(cluster, txid);
                txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup.DoCheckpoint();
                WaitCheckpointDone(cluster, txid);
                // Try BackupNode operations
                IPEndPoint add = backup.GetNameNodeAddress();
                // Write to BN
                FileSystem bnFS = FileSystem.Get(new Path("hdfs://" + NetUtils.GetHostPortString(
                                                              add)).ToUri(), conf);
                bool canWrite = true;
                try
                {
                    Org.Apache.Hadoop.Hdfs.Server.Namenode.TestCheckpoint.WriteFile(bnFS, file3, replication
                                                                                    );
                }
                catch (IOException eio)
                {
                    Log.Info("Write to " + backup.GetRole() + " failed as expected: ", eio);
                    canWrite = false;
                }
                NUnit.Framework.Assert.IsFalse("Write to BackupNode must be prohibited.", canWrite
                                               );
                // Reads are allowed for BackupNode, but not for CheckpointNode
                bool canRead = true;
                try
                {
                    bnFS.Exists(file2);
                }
                catch (IOException eio)
                {
                    Log.Info("Read from " + backup.GetRole() + " failed: ", eio);
                    canRead = false;
                }
                NUnit.Framework.Assert.AreEqual("Reads to BackupNode are allowed, but not CheckpointNode."
                                                , canRead, backup.IsRole(HdfsServerConstants.NamenodeRole.Backup));
                Org.Apache.Hadoop.Hdfs.Server.Namenode.TestCheckpoint.WriteFile(fileSys, file3, replication
                                                                                );
                Org.Apache.Hadoop.Hdfs.Server.Namenode.TestCheckpoint.CheckFile(fileSys, file3, replication
                                                                                );
                // should also be on BN right away
                NUnit.Framework.Assert.IsTrue("file3 does not exist on BackupNode", op != HdfsServerConstants.StartupOption
                                              .Backup || backup.GetNamesystem().GetFileInfo(file3.ToUri().GetPath(), false) !=
                                              null);
            }
            catch (IOException e)
            {
                Log.Error("Error in TestBackupNode:", e);
                throw new Exception(e);
            }
            finally
            {
                if (backup != null)
                {
                    backup.Stop();
                }
                if (fileSys != null)
                {
                    fileSys.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            FSImageTestUtil.AssertParallelFilesAreIdentical(ImmutableList.Of(bnCurDir, nnCurDir
                                                                             ), ImmutableSet.Of <string>("VERSION"));
            try
            {
                //
                // Restart cluster and verify that file2 exists and
                // file1 does not exist.
                //
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).Build();
                fileSys = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                // verify that file2 exists
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file2));
            }
            catch (IOException e)
            {
                Log.Error("Error in TestBackupNode: ", e);
                NUnit.Framework.Assert.IsTrue(e.GetLocalizedMessage(), false);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
        }
Ejemplo n.º 15
0
 public virtual void StartupOption(HdfsServerConstants.StartupOption startOpt)
 {
     this.startOpt = startOpt;
 }
Ejemplo n.º 16
0
        /// <summary>Analyze and load storage directories.</summary>
        /// <remarks>
        /// Analyze and load storage directories. Recover from previous transitions if
        /// required.
        /// The block pool storages are either all analyzed or none of them is loaded.
        /// Therefore, a failure on loading any block pool storage results a faulty
        /// data volume.
        /// </remarks>
        /// <param name="datanode">Datanode to which this storage belongs to</param>
        /// <param name="nsInfo">namespace information</param>
        /// <param name="dataDirs">storage directories of block pool</param>
        /// <param name="startOpt">startup option</param>
        /// <returns>an array of loaded block pool directories.</returns>
        /// <exception cref="System.IO.IOException">on error</exception>
        internal virtual IList <Storage.StorageDirectory> LoadBpStorageDirectories(DataNode
                                                                                   datanode, NamespaceInfo nsInfo, ICollection <FilePath> dataDirs, HdfsServerConstants.StartupOption
                                                                                   startOpt)
        {
            IList <Storage.StorageDirectory> succeedDirs = Lists.NewArrayList();

            try
            {
                foreach (FilePath dataDir in dataDirs)
                {
                    if (ContainsStorageDir(dataDir))
                    {
                        throw new IOException("BlockPoolSliceStorage.recoverTransitionRead: " + "attempt to load an used block storage: "
                                              + dataDir);
                    }
                    Storage.StorageDirectory sd = LoadStorageDirectory(datanode, nsInfo, dataDir, startOpt
                                                                       );
                    succeedDirs.AddItem(sd);
                }
            }
            catch (IOException e)
            {
                Log.Warn("Failed to analyze storage directories for block pool " + nsInfo.GetBlockPoolID
                             (), e);
                throw;
            }
            return(succeedDirs);
        }
Ejemplo n.º 17
0
 public TestStartupOptionUpgrade(HdfsServerConstants.StartupOption startOption)
     : base()
 {
     this.startOpt = startOption;
 }
Ejemplo n.º 18
0
 /// <summary>Attempts to start a NameNode with the given operation.</summary>
 /// <remarks>
 /// Attempts to start a NameNode with the given operation.  Starting
 /// the NameNode should throw an exception.
 /// </remarks>
 internal virtual void StartNameNodeShouldFail(HdfsServerConstants.StartupOption operation
                                               )
 {
     StartNameNodeShouldFail(operation, null, null);
 }
Ejemplo n.º 19
0
 public virtual void TearDown()
 {
     conf     = null;
     startOpt = null;
 }
Ejemplo n.º 20
0
 internal static string GetBackupNodeDir(HdfsServerConstants.StartupOption t, int
                                         idx)
 {
     return(BaseDir + "name" + t.GetName() + idx + "/");
 }
Ejemplo n.º 21
0
        /// <summary>Load one storage directory.</summary>
        /// <remarks>Load one storage directory. Recover from previous transitions if required.
        ///     </remarks>
        /// <param name="datanode">datanode instance</param>
        /// <param name="nsInfo">namespace information</param>
        /// <param name="dataDir">the root path of the storage directory</param>
        /// <param name="startOpt">startup option</param>
        /// <returns>the StorageDirectory successfully loaded.</returns>
        /// <exception cref="System.IO.IOException"/>
        private Storage.StorageDirectory LoadStorageDirectory(DataNode datanode, NamespaceInfo
                                                              nsInfo, FilePath dataDir, HdfsServerConstants.StartupOption startOpt)
        {
            Storage.StorageDirectory sd = new Storage.StorageDirectory(dataDir, null, true);
            try
            {
                Storage.StorageState curState = sd.AnalyzeStorage(startOpt, this);
                switch (curState)
                {
                case Storage.StorageState.Normal:
                {
                    // sd is locked but not opened
                    break;
                }

                case Storage.StorageState.NonExistent:
                {
                    Log.Info("Block pool storage directory " + dataDir + " does not exist");
                    throw new IOException("Storage directory " + dataDir + " does not exist");
                }

                case Storage.StorageState.NotFormatted:
                {
                    // format
                    Log.Info("Block pool storage directory " + dataDir + " is not formatted for " + nsInfo
                             .GetBlockPoolID());
                    Log.Info("Formatting ...");
                    Format(sd, nsInfo);
                    break;
                }

                default:
                {
                    // recovery part is common
                    sd.DoRecover(curState);
                    break;
                }
                }
                // 2. Do transitions
                // Each storage directory is treated individually.
                // During startup some of them can upgrade or roll back
                // while others could be up-to-date for the regular startup.
                DoTransition(datanode, sd, nsInfo, startOpt);
                if (GetCTime() != nsInfo.GetCTime())
                {
                    throw new IOException("Data-node and name-node CTimes must be the same.");
                }
                // 3. Update successfully loaded storage.
                SetServiceLayoutVersion(GetServiceLayoutVersion());
                WriteProperties(sd);
                return(sd);
            }
            catch (IOException ioe)
            {
                sd.Unlock();
                throw;
            }
        }
 /// <exception cref="System.IO.IOException"/>
 public virtual void StartDataNodes(Configuration conf, int numDataNodes, StorageType
                                    [][] storageTypes, bool manageDfsDirs, HdfsServerConstants.StartupOption operation
                                    , string[] racks, string[] nodeGroups, string[] hosts, long[][] storageCapacities
                                    , long[] simulatedCapacities, bool setupHostsFile, bool checkDataNodeAddrConfig,
                                    bool checkDataNodeHostConfig)
 {
     lock (this)
     {
         System.Diagnostics.Debug.Assert(storageCapacities == null || simulatedCapacities
                                         == null);
         System.Diagnostics.Debug.Assert(storageTypes == null || storageTypes.Length == numDataNodes
                                         );
         System.Diagnostics.Debug.Assert(storageCapacities == null || storageCapacities.Length
                                         == numDataNodes);
         if (operation == HdfsServerConstants.StartupOption.Recover)
         {
             return;
         }
         if (checkDataNodeHostConfig)
         {
             conf.SetIfUnset(DFSConfigKeys.DfsDatanodeHostNameKey, "127.0.0.1");
         }
         else
         {
             conf.Set(DFSConfigKeys.DfsDatanodeHostNameKey, "127.0.0.1");
         }
         conf.Set(DFSConfigKeys.DfsDatanodeHostNameKey, "127.0.0.1");
         int curDatanodesNum = dataNodes.Count;
         // for mincluster's the default initialDelay for BRs is 0
         if (conf.Get(DFSConfigKeys.DfsBlockreportInitialDelayKey) == null)
         {
             conf.SetLong(DFSConfigKeys.DfsBlockreportInitialDelayKey, 0);
         }
         // If minicluster's name node is null assume that the conf has been
         // set with the right address:port of the name node.
         //
         if (racks != null && numDataNodes > racks.Length)
         {
             throw new ArgumentException("The length of racks [" + racks.Length + "] is less than the number of datanodes ["
                                         + numDataNodes + "].");
         }
         if (nodeGroups != null && numDataNodes > nodeGroups.Length)
         {
             throw new ArgumentException("The length of nodeGroups [" + nodeGroups.Length + "] is less than the number of datanodes ["
                                         + numDataNodes + "].");
         }
         if (hosts != null && numDataNodes > hosts.Length)
         {
             throw new ArgumentException("The length of hosts [" + hosts.Length + "] is less than the number of datanodes ["
                                         + numDataNodes + "].");
         }
         //Generate some hostnames if required
         if (racks != null && hosts == null)
         {
             hosts = new string[numDataNodes];
             for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++)
             {
                 hosts[i - curDatanodesNum] = "host" + i + ".foo.com";
             }
         }
         if (simulatedCapacities != null && numDataNodes > simulatedCapacities.Length)
         {
             throw new ArgumentException("The length of simulatedCapacities [" + simulatedCapacities
                                         .Length + "] is less than the number of datanodes [" + numDataNodes + "].");
         }
         string[] dnArgs = (operation == null || operation != HdfsServerConstants.StartupOption
                            .Rollback) ? null : new string[] { operation.GetName() };
         DataNode[] dns = new DataNode[numDataNodes];
         for (int i_1 = curDatanodesNum; i_1 < curDatanodesNum + numDataNodes; i_1++)
         {
             Configuration dnConf = new HdfsConfiguration(conf);
             // Set up datanode address
             SetupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
             if (manageDfsDirs)
             {
                 string dirs = MakeDataNodeDirs(i_1, storageTypes == null ? null : storageTypes[i_1
                                                ]);
                 dnConf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dirs);
                 conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dirs);
             }
             if (simulatedCapacities != null)
             {
                 SimulatedFSDataset.SetFactory(dnConf);
                 dnConf.SetLong(SimulatedFSDataset.ConfigPropertyCapacity, simulatedCapacities[i_1
                                                                                               - curDatanodesNum]);
             }
             Log.Info("Starting DataNode " + i_1 + " with " + DFSConfigKeys.DfsDatanodeDataDirKey
                      + ": " + dnConf.Get(DFSConfigKeys.DfsDatanodeDataDirKey));
             if (hosts != null)
             {
                 dnConf.Set(DFSConfigKeys.DfsDatanodeHostNameKey, hosts[i_1 - curDatanodesNum]);
                 Log.Info("Starting DataNode " + i_1 + " with hostname set to: " + dnConf.Get(DFSConfigKeys
                                                                                              .DfsDatanodeHostNameKey));
             }
             if (racks != null)
             {
                 string name = hosts[i_1 - curDatanodesNum];
                 if (nodeGroups == null)
                 {
                     Log.Info("Adding node with hostname : " + name + " to rack " + racks[i_1 - curDatanodesNum
                              ]);
                     StaticMapping.AddNodeToRack(name, racks[i_1 - curDatanodesNum]);
                 }
                 else
                 {
                     Log.Info("Adding node with hostname : " + name + " to serverGroup " + nodeGroups[
                                  i_1 - curDatanodesNum] + " and rack " + racks[i_1 - curDatanodesNum]);
                     StaticMapping.AddNodeToRack(name, racks[i_1 - curDatanodesNum] + nodeGroups[i_1 -
                                                                                                 curDatanodesNum]);
                 }
             }
             Configuration newconf = new HdfsConfiguration(dnConf);
             // save config
             if (hosts != null)
             {
                 NetUtils.AddStaticResolution(hosts[i_1 - curDatanodesNum], "localhost");
             }
             SecureDataNodeStarter.SecureResources secureResources = null;
             if (UserGroupInformation.IsSecurityEnabled())
             {
                 try
                 {
                     secureResources = SecureDataNodeStarter.GetSecureResources(dnConf);
                 }
                 catch (Exception ex)
                 {
                     Sharpen.Runtime.PrintStackTrace(ex);
                 }
             }
             DataNode dn = DataNode.InstantiateDataNode(dnArgs, dnConf, secureResources);
             if (dn == null)
             {
                 throw new IOException("Cannot start DataNode in " + dnConf.Get(DFSConfigKeys.DfsDatanodeDataDirKey
                                                                                ));
             }
             //since the HDFS does things based on IP:port, we need to add the mapping
             //for IP:port to rackId
             string ipAddr = dn.GetXferAddress().Address.GetHostAddress();
             if (racks != null)
             {
                 int port = dn.GetXferAddress().Port;
                 if (nodeGroups == null)
                 {
                     Log.Info("Adding node with IP:port : " + ipAddr + ":" + port + " to rack " + racks
                              [i_1 - curDatanodesNum]);
                     StaticMapping.AddNodeToRack(ipAddr + ":" + port, racks[i_1 - curDatanodesNum]);
                 }
                 else
                 {
                     Log.Info("Adding node with IP:port : " + ipAddr + ":" + port + " to nodeGroup " +
                              nodeGroups[i_1 - curDatanodesNum] + " and rack " + racks[i_1 - curDatanodesNum]
                              );
                     StaticMapping.AddNodeToRack(ipAddr + ":" + port, racks[i_1 - curDatanodesNum] + nodeGroups
                                                 [i_1 - curDatanodesNum]);
                 }
             }
             dn.RunDatanodeDaemon();
             dataNodes.AddItem(new MiniDFSCluster.DataNodeProperties(this, dn, newconf, dnArgs
                                                                     , secureResources, dn.GetIpcPort()));
             dns[i_1 - curDatanodesNum] = dn;
         }
         curDatanodesNum   += numDataNodes;
         this.numDataNodes += numDataNodes;
         WaitActive();
         if (storageCapacities != null)
         {
             for (int i = curDatanodesNum; i_1 < curDatanodesNum + numDataNodes; ++i_1)
             {
                 IList <FsVolumeSpi> volumes = dns[i_1].GetFSDataset().GetVolumes();
                 System.Diagnostics.Debug.Assert(volumes.Count == storagesPerDatanode);
                 for (int j = 0; j < volumes.Count; ++j)
                 {
                     FsVolumeImpl volume = (FsVolumeImpl)volumes[j];
                     volume.SetCapacityForTesting(storageCapacities[i_1][j]);
                 }
             }
         }
     }
 }
Ejemplo n.º 23
0
 protected internal override HAState CreateHAState(HdfsServerConstants.StartupOption
                                                   startOpt)
 {
     return(new BackupState());
 }
 public virtual void TestUpgrade()
 {
     HdfsServerConstants.StartupOption opt = null;
     // UPGRADE is set, but nothing else
     opt = NameNode.ParseArguments(new string[] { "-upgrade" });
     NUnit.Framework.Assert.AreEqual(opt, HdfsServerConstants.StartupOption.Upgrade);
     NUnit.Framework.Assert.IsNull(opt.GetClusterId());
     NUnit.Framework.Assert.IsTrue(FSImageFormat.renameReservedMap.IsEmpty());
     // cluster ID is set
     opt = NameNode.ParseArguments(new string[] { "-upgrade", "-clusterid", "mycid" });
     NUnit.Framework.Assert.AreEqual(HdfsServerConstants.StartupOption.Upgrade, opt);
     NUnit.Framework.Assert.AreEqual("mycid", opt.GetClusterId());
     NUnit.Framework.Assert.IsTrue(FSImageFormat.renameReservedMap.IsEmpty());
     // Everything is set
     opt = NameNode.ParseArguments(new string[] { "-upgrade", "-clusterid", "mycid", "-renameReserved"
                                                  , ".snapshot=.my-snapshot,.reserved=.my-reserved" });
     NUnit.Framework.Assert.AreEqual(HdfsServerConstants.StartupOption.Upgrade, opt);
     NUnit.Framework.Assert.AreEqual("mycid", opt.GetClusterId());
     NUnit.Framework.Assert.AreEqual(".my-snapshot", FSImageFormat.renameReservedMap[".snapshot"
                                     ]);
     NUnit.Framework.Assert.AreEqual(".my-reserved", FSImageFormat.renameReservedMap[".reserved"
                                     ]);
     // Reset the map
     FSImageFormat.renameReservedMap.Clear();
     // Everything is set, but in a different order
     opt = NameNode.ParseArguments(new string[] { "-upgrade", "-renameReserved", ".reserved=.my-reserved,.snapshot=.my-snapshot"
                                                  , "-clusterid", "mycid" });
     NUnit.Framework.Assert.AreEqual(HdfsServerConstants.StartupOption.Upgrade, opt);
     NUnit.Framework.Assert.AreEqual("mycid", opt.GetClusterId());
     NUnit.Framework.Assert.AreEqual(".my-snapshot", FSImageFormat.renameReservedMap[".snapshot"
                                     ]);
     NUnit.Framework.Assert.AreEqual(".my-reserved", FSImageFormat.renameReservedMap[".reserved"
                                     ]);
     // Try the default renameReserved
     opt = NameNode.ParseArguments(new string[] { "-upgrade", "-renameReserved" });
     NUnit.Framework.Assert.AreEqual(HdfsServerConstants.StartupOption.Upgrade, opt);
     NUnit.Framework.Assert.AreEqual(".snapshot." + HdfsConstants.NamenodeLayoutVersion
                                     + ".UPGRADE_RENAMED", FSImageFormat.renameReservedMap[".snapshot"]);
     NUnit.Framework.Assert.AreEqual(".reserved." + HdfsConstants.NamenodeLayoutVersion
                                     + ".UPGRADE_RENAMED", FSImageFormat.renameReservedMap[".reserved"]);
     // Try some error conditions
     try
     {
         opt = NameNode.ParseArguments(new string[] { "-upgrade", "-renameReserved", ".reserved=.my-reserved,.not-reserved=.my-not-reserved" });
     }
     catch (ArgumentException e)
     {
         GenericTestUtils.AssertExceptionContains("Unknown reserved path", e);
     }
     try
     {
         opt = NameNode.ParseArguments(new string[] { "-upgrade", "-renameReserved", ".reserved=.my-reserved,.snapshot=.snapshot" });
     }
     catch (ArgumentException e)
     {
         GenericTestUtils.AssertExceptionContains("Invalid rename path", e);
     }
     try
     {
         opt = NameNode.ParseArguments(new string[] { "-upgrade", "-renameReserved", ".snapshot=.reserved" });
     }
     catch (ArgumentException e)
     {
         GenericTestUtils.AssertExceptionContains("Invalid rename path", e);
     }
     opt = NameNode.ParseArguments(new string[] { "-upgrade", "-cid" });
     NUnit.Framework.Assert.IsNull(opt);
 }
Ejemplo n.º 25
0
 /// <summary>
 /// Analyze whether a transition of the BP state is required and
 /// perform it if necessary.
 /// </summary>
 /// <remarks>
 /// Analyze whether a transition of the BP state is required and
 /// perform it if necessary.
 /// <br />
 /// Rollback if previousLV &gt;= LAYOUT_VERSION && prevCTime &lt;= namenode.cTime.
 /// Upgrade if this.LV &gt; LAYOUT_VERSION || this.cTime &lt; namenode.cTime Regular
 /// startup if this.LV = LAYOUT_VERSION && this.cTime = namenode.cTime
 /// </remarks>
 /// <param name="sd">storage directory <SD>/current/<bpid></param>
 /// <param name="nsInfo">namespace info</param>
 /// <param name="startOpt">startup option</param>
 /// <exception cref="System.IO.IOException"/>
 private void DoTransition(DataNode datanode, Storage.StorageDirectory sd, NamespaceInfo
                           nsInfo, HdfsServerConstants.StartupOption startOpt)
 {
     if (startOpt == HdfsServerConstants.StartupOption.Rollback && sd.GetPreviousDir()
         .Exists())
     {
         Preconditions.CheckState(!GetTrashRootDir(sd).Exists(), sd.GetPreviousDir() + " and "
                                  + GetTrashRootDir(sd) + " should not " + " both be present.");
         DoRollback(sd, nsInfo);
     }
     else
     {
         // rollback if applicable
         if (startOpt == HdfsServerConstants.StartupOption.Rollback && !sd.GetPreviousDir(
                 ).Exists())
         {
             // Restore all the files in the trash. The restored files are retained
             // during rolling upgrade rollback. They are deleted during rolling
             // upgrade downgrade.
             int restored = RestoreBlockFilesFromTrash(GetTrashRootDir(sd));
             Log.Info("Restored " + restored + " block files from trash.");
         }
     }
     ReadProperties(sd);
     CheckVersionUpgradable(this.layoutVersion);
     System.Diagnostics.Debug.Assert(this.layoutVersion >= HdfsConstants.DatanodeLayoutVersion
                                     , "Future version is not allowed");
     if (GetNamespaceID() != nsInfo.GetNamespaceID())
     {
         throw new IOException("Incompatible namespaceIDs in " + sd.GetRoot().GetCanonicalPath
                                   () + ": namenode namespaceID = " + nsInfo.GetNamespaceID() + "; datanode namespaceID = "
                               + GetNamespaceID());
     }
     if (!blockpoolID.Equals(nsInfo.GetBlockPoolID()))
     {
         throw new IOException("Incompatible blockpoolIDs in " + sd.GetRoot().GetCanonicalPath
                                   () + ": namenode blockpoolID = " + nsInfo.GetBlockPoolID() + "; datanode blockpoolID = "
                               + blockpoolID);
     }
     if (this.layoutVersion == HdfsConstants.DatanodeLayoutVersion && this.cTime == nsInfo
         .GetCTime())
     {
         return;
     }
     // regular startup
     if (this.layoutVersion > HdfsConstants.DatanodeLayoutVersion)
     {
         int restored = RestoreBlockFilesFromTrash(GetTrashRootDir(sd));
         Log.Info("Restored " + restored + " block files from trash " + "before the layout upgrade. These blocks will be moved to "
                  + "the previous directory during the upgrade");
     }
     if (this.layoutVersion > HdfsConstants.DatanodeLayoutVersion || this.cTime < nsInfo
         .GetCTime())
     {
         DoUpgrade(datanode, sd, nsInfo);
         // upgrade
         return;
     }
     // layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime
     // must shutdown
     throw new IOException("Datanode state: LV = " + this.GetLayoutVersion() + " CTime = "
                           + this.GetCTime() + " is newer than the namespace state: LV = " + nsInfo.GetLayoutVersion
                               () + " CTime = " + nsInfo.GetCTime());
 }
Ejemplo n.º 26
0
 /// <summary>Analyze storage directories.</summary>
 /// <remarks>
 /// Analyze storage directories. Recover from previous transitions if required.
 /// The block pool storages are either all analyzed or none of them is loaded.
 /// Therefore, a failure on loading any block pool storage results a faulty
 /// data volume.
 /// </remarks>
 /// <param name="datanode">Datanode to which this storage belongs to</param>
 /// <param name="nsInfo">namespace information</param>
 /// <param name="dataDirs">storage directories of block pool</param>
 /// <param name="startOpt">startup option</param>
 /// <exception cref="System.IO.IOException">on error</exception>
 internal virtual void RecoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo
                                             , ICollection <FilePath> dataDirs, HdfsServerConstants.StartupOption startOpt)
 {
     Log.Info("Analyzing storage directories for bpid " + nsInfo.GetBlockPoolID());
     foreach (Storage.StorageDirectory sd in LoadBpStorageDirectories(datanode, nsInfo
                                                                      , dataDirs, startOpt))
     {
         AddStorageDir(sd);
     }
 }