Ejemplo n.º 1
0
        public virtual void TestBlockPoolStorageStates()
        {
            string[] baseDirs;
            // First setup the datanode storage directory
            string bpid = UpgradeUtilities.GetCurrentBlockPoolID(null);

            for (int numDirs = 1; numDirs <= 2; numDirs++)
            {
                conf = new HdfsConfiguration();
                conf.SetInt("dfs.datanode.scan.period.hours", -1);
                conf = UpgradeUtilities.InitializeStorageStateConf(numDirs, conf);
                for (int i = 0; i < NumDnTestCases; i++)
                {
                    bool[] testCase         = testCases[i];
                    bool   shouldRecover    = testCase[ShouldRecover];
                    bool   curAfterRecover  = testCase[CurrentShouldExistAfterRecover];
                    bool   prevAfterRecover = testCase[PreviousShouldExistAfterRecover];
                    Log("BLOCK_POOL recovery", numDirs, i, testCase);
                    CreateNameNodeStorageState(new bool[] { true, true, false, false, false });
                    cluster  = CreateCluster(conf);
                    baseDirs = CreateBlockPoolStorageState(bpid, testCase);
                    if (!testCase[CurrentExists] && !testCase[PreviousExists] && !testCase[PreviousTmpExists
                        ] && !testCase[RemovedTmpExists])
                    {
                        // DataNode will create and format current if no directories exist
                        cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Regular,
                                               null);
                    }
                    else
                    {
                        if (shouldRecover)
                        {
                            cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Regular,
                                                   null);
                            CheckResultBlockPool(baseDirs, curAfterRecover, prevAfterRecover);
                        }
                        else
                        {
                            cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Regular,
                                                   null);
                            NUnit.Framework.Assert.IsFalse(cluster.GetDataNodes()[0].IsBPServiceAlive(bpid));
                        }
                    }
                    cluster.Shutdown();
                }
            }
        }
Ejemplo n.º 2
0
 public virtual void TestNNStorageStates()
 {
     string[] baseDirs;
     for (int numDirs = 1; numDirs <= 2; numDirs++)
     {
         conf = new HdfsConfiguration();
         conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1);
         conf = UpgradeUtilities.InitializeStorageStateConf(numDirs, conf);
         for (int i = 0; i < NumNnTestCases; i++)
         {
             bool[] testCase         = testCases[i];
             bool   shouldRecover    = testCase[ShouldRecover];
             bool   curAfterRecover  = testCase[CurrentShouldExistAfterRecover];
             bool   prevAfterRecover = testCase[PreviousShouldExistAfterRecover];
             Log("NAME_NODE recovery", numDirs, i, testCase);
             baseDirs = CreateNameNodeStorageState(testCase);
             if (shouldRecover)
             {
                 cluster = CreateCluster(conf);
                 CheckResultNameNode(baseDirs, curAfterRecover, prevAfterRecover);
                 cluster.Shutdown();
             }
             else
             {
                 try
                 {
                     cluster = CreateCluster(conf);
                     throw new Exception("NameNode should have failed to start");
                 }
                 catch (IOException expected)
                 {
                     // the exception is expected
                     // check that the message says "not formatted"
                     // when storage directory is empty (case #5)
                     if (!testCases[i][CurrentExists] && !testCases[i][PreviousTmpExists] && !testCases
                         [i][PreviousExists] && !testCases[i][RemovedTmpExists])
                     {
                         NUnit.Framework.Assert.IsTrue(expected.GetLocalizedMessage().Contains("NameNode is not formatted"
                                                                                               ));
                     }
                 }
             }
             cluster.Shutdown();
         }
     }
 }
Ejemplo n.º 3
0
        /// <summary>
        /// This test ensures the appropriate response (successful or failure) from
        /// a Datanode when the system is started with differing version combinations.
        /// </summary>
        /// <remarks>
        /// This test ensures the appropriate response (successful or failure) from
        /// a Datanode when the system is started with differing version combinations.
        /// <pre>
        /// For each 3-tuple in the cross product
        /// ({oldLayoutVersion,currentLayoutVersion,futureLayoutVersion},
        /// {currentNamespaceId,incorrectNamespaceId},
        /// {pastFsscTime,currentFsscTime,futureFsscTime})
        /// 1. Startup Namenode with version file containing
        /// (currentLayoutVersion,currentNamespaceId,currentFsscTime)
        /// 2. Attempt to startup Datanode with version file containing
        /// this iterations version 3-tuple
        /// </pre>
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestVersions()
        {
            UpgradeUtilities.Initialize();
            Configuration conf = UpgradeUtilities.InitializeStorageStateConf(1, new HdfsConfiguration
                                                                                 ());

            TestDFSStartupVersions.StorageData[] versions = InitializeVersions();
            UpgradeUtilities.CreateNameNodeStorageDirs(conf.GetStrings(DFSConfigKeys.DfsNamenodeNameDirKey
                                                                       ), "current");
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).ManageDataDfsDirs
                          (false).ManageNameDfsDirs(false).StartupOption(HdfsServerConstants.StartupOption
                                                                         .Regular).Build();
            TestDFSStartupVersions.StorageData nameNodeVersion = new TestDFSStartupVersions.StorageData
                                                                     (HdfsConstants.NamenodeLayoutVersion, UpgradeUtilities.GetCurrentNamespaceID(cluster
                                                                                                                                                  ), UpgradeUtilities.GetCurrentClusterID(cluster), UpgradeUtilities.GetCurrentFsscTime
                                                                         (cluster), UpgradeUtilities.GetCurrentBlockPoolID(cluster));
            Log("NameNode version info", HdfsServerConstants.NodeType.NameNode, null, nameNodeVersion
                );
            string bpid = UpgradeUtilities.GetCurrentBlockPoolID(cluster);

            for (int i = 0; i < versions.Length; i++)
            {
                FilePath[] storage = UpgradeUtilities.CreateDataNodeStorageDirs(conf.GetStrings(DFSConfigKeys
                                                                                                .DfsDatanodeDataDirKey), "current");
                Log("DataNode version info", HdfsServerConstants.NodeType.DataNode, i, versions[i
                    ]);
                UpgradeUtilities.CreateDataNodeVersionFile(storage, versions[i].storageInfo, bpid
                                                           , versions[i].blockPoolId);
                try
                {
                    cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Regular,
                                           null);
                }
                catch (Exception)
                {
                }
                // Ignore.  The asserts below will check for problems.
                // ignore.printStackTrace();
                NUnit.Framework.Assert.IsTrue(cluster.GetNameNode() != null);
                NUnit.Framework.Assert.AreEqual(IsVersionCompatible(nameNodeVersion, versions[i])
                                                , cluster.IsDataNodeUp());
                cluster.ShutdownDataNodes();
            }
        }
Ejemplo n.º 4
0
        public virtual void TestUpgrade4()
        {
            int numDirs = 4;

            conf = new HdfsConfiguration();
            conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1);
            conf.SetBoolean(DFSConfigKeys.DfsDatanodeDuplicateReplicaDeletion, false);
            conf = UpgradeUtilities.InitializeStorageStateConf(numDirs, conf);
            string[] nameNodeDirs = conf.GetStrings(DFSConfigKeys.DfsNamenodeNameDirKey);
            Log("NameNode upgrade with one bad storage dir", numDirs);
            UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
            try
            {
                // assert("storage dir has been prepared for failure before reaching this point");
                StartNameNodeShouldFail(HdfsServerConstants.StartupOption.Upgrade, typeof(IOException
                                                                                          ), Sharpen.Pattern.Compile("failed in 1 storage"));
            }
            finally
            {
                // assert("storage dir shall be returned to normal state before exiting");
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
            }
        }
Ejemplo n.º 5
0
        public virtual void TestRollback()
        {
            FilePath[] baseDirs;
            UpgradeUtilities.Initialize();
            StorageInfo storageInfo = null;

            for (int numDirs = 1; numDirs <= 2; numDirs++)
            {
                conf = new HdfsConfiguration();
                conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1);
                conf = UpgradeUtilities.InitializeStorageStateConf(numDirs, conf);
                string[] nameNodeDirs = conf.GetStrings(DFSConfigKeys.DfsNamenodeNameDirKey);
                string[] dataNodeDirs = conf.GetStrings(DFSConfigKeys.DfsDatanodeDataDirKey);
                Log("Normal NameNode rollback", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                NameNode.DoRollback(conf, false);
                CheckResult(HdfsServerConstants.NodeType.NameNode, nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("Normal DataNode rollback", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                NameNode.DoRollback(conf, false);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).ManageDataDfsDirs
                              (false).ManageNameDfsDirs(false).DnStartupOption(HdfsServerConstants.StartupOption
                                                                               .Rollback).Build();
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "previous");
                cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Rollback
                                       , null);
                CheckResult(HdfsServerConstants.NodeType.DataNode, dataNodeDirs);
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("Normal BlockPool rollback", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                NameNode.DoRollback(conf, false);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).ManageDataDfsDirs
                              (false).ManageNameDfsDirs(false).DnStartupOption(HdfsServerConstants.StartupOption
                                                                               .Rollback).Build();
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                UpgradeUtilities.CreateBlockPoolStorageDirs(dataNodeDirs, "current", UpgradeUtilities
                                                            .GetCurrentBlockPoolID(cluster));
                // Create a previous snapshot for the blockpool
                UpgradeUtilities.CreateBlockPoolStorageDirs(dataNodeDirs, "previous", UpgradeUtilities
                                                            .GetCurrentBlockPoolID(cluster));
                // Put newer layout version in current.
                storageInfo = new StorageInfo(HdfsConstants.DatanodeLayoutVersion - 1, UpgradeUtilities
                                              .GetCurrentNamespaceID(cluster), UpgradeUtilities.GetCurrentClusterID(cluster),
                                              UpgradeUtilities.GetCurrentFsscTime(cluster), HdfsServerConstants.NodeType.DataNode
                                              );
                // Overwrite VERSION file in the current directory of
                // volume directories and block pool slice directories
                // with a layout version from future.
                FilePath[] dataCurrentDirs = new FilePath[dataNodeDirs.Length];
                for (int i = 0; i < dataNodeDirs.Length; i++)
                {
                    dataCurrentDirs[i] = new FilePath((new Path(dataNodeDirs[i] + "/current")).ToString
                                                          ());
                }
                UpgradeUtilities.CreateDataNodeVersionFile(dataCurrentDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Rollback
                                       , null);
                NUnit.Framework.Assert.IsTrue(cluster.IsDataNodeUp());
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("NameNode rollback without existing previous dir", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                StartNameNodeShouldFail("None of the storage directories contain previous fs state"
                                        );
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("DataNode rollback without existing previous dir", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).ManageDataDfsDirs
                              (false).ManageNameDfsDirs(false).StartupOption(HdfsServerConstants.StartupOption
                                                                             .Upgrade).Build();
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Rollback
                                       , null);
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("DataNode rollback with future stored layout version in previous", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                NameNode.DoRollback(conf, false);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).ManageDataDfsDirs
                              (false).ManageNameDfsDirs(false).DnStartupOption(HdfsServerConstants.StartupOption
                                                                               .Rollback).Build();
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                baseDirs    = UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "previous");
                storageInfo = new StorageInfo(int.MinValue, UpgradeUtilities.GetCurrentNamespaceID
                                                  (cluster), UpgradeUtilities.GetCurrentClusterID(cluster), UpgradeUtilities.GetCurrentFsscTime
                                                  (cluster), HdfsServerConstants.NodeType.DataNode);
                UpgradeUtilities.CreateDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                StartBlockPoolShouldFail(HdfsServerConstants.StartupOption.Rollback, cluster.GetNamesystem
                                             ().GetBlockPoolId());
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("DataNode rollback with newer fsscTime in previous", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                NameNode.DoRollback(conf, false);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).ManageDataDfsDirs
                              (false).ManageNameDfsDirs(false).DnStartupOption(HdfsServerConstants.StartupOption
                                                                               .Rollback).Build();
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                baseDirs    = UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "previous");
                storageInfo = new StorageInfo(HdfsConstants.DatanodeLayoutVersion, UpgradeUtilities
                                              .GetCurrentNamespaceID(cluster), UpgradeUtilities.GetCurrentClusterID(cluster),
                                              long.MaxValue, HdfsServerConstants.NodeType.DataNode);
                UpgradeUtilities.CreateDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                StartBlockPoolShouldFail(HdfsServerConstants.StartupOption.Rollback, cluster.GetNamesystem
                                             ().GetBlockPoolId());
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("NameNode rollback with no edits file", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                baseDirs = UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                DeleteMatchingFiles(baseDirs, "edits.*");
                StartNameNodeShouldFail("Gap in transactions");
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("NameNode rollback with no image file", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                baseDirs = UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                DeleteMatchingFiles(baseDirs, "fsimage_.*");
                StartNameNodeShouldFail("No valid image files found");
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("NameNode rollback with corrupt version file", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                baseDirs = UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                foreach (FilePath f in baseDirs)
                {
                    UpgradeUtilities.CorruptFile(new FilePath(f, "VERSION"), Sharpen.Runtime.GetBytesForString
                                                     ("layoutVersion", Charsets.Utf8), Sharpen.Runtime.GetBytesForString("xxxxxxxxxxxxx"
                                                                                                                         , Charsets.Utf8));
                }
                StartNameNodeShouldFail("file VERSION has layoutVersion missing");
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("NameNode rollback with old layout version in previous", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                baseDirs    = UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                storageInfo = new StorageInfo(1, UpgradeUtilities.GetCurrentNamespaceID(null), UpgradeUtilities
                                              .GetCurrentClusterID(null), UpgradeUtilities.GetCurrentFsscTime(null), HdfsServerConstants.NodeType
                                              .NameNode);
                UpgradeUtilities.CreateNameNodeVersionFile(conf, baseDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                StartNameNodeShouldFail("Cannot rollback to storage version 1 using this version"
                                        );
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
            }
        }
Ejemplo n.º 6
0
 public virtual void TestFinalize()
 {
     UpgradeUtilities.Initialize();
     for (int numDirs = 1; numDirs <= 2; numDirs++)
     {
         /* This test requires that "current" directory not change after
          * the upgrade. Actually it is ok for those contents to change.
          * For now disabling block verification so that the contents are
          * not changed.
          * Disable duplicate replica deletion as the test intentionally
          * mirrors the contents of storage directories.
          */
         conf = new HdfsConfiguration();
         conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1);
         conf.SetBoolean(DFSConfigKeys.DfsDatanodeDuplicateReplicaDeletion, false);
         conf = UpgradeUtilities.InitializeStorageStateConf(numDirs, conf);
         string[] nameNodeDirs = conf.GetStrings(DFSConfigKeys.DfsNamenodeNameDirKey);
         string[] dataNodeDirs = conf.GetStrings(DFSConfigKeys.DfsDatanodeDataDirKey);
         Log("Finalize NN & DN with existing previous dir", numDirs);
         UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
         UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
         UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
         UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "previous");
         cluster = new MiniDFSCluster.Builder(conf).Format(false).ManageDataDfsDirs(false)
                   .ManageNameDfsDirs(false).StartupOption(HdfsServerConstants.StartupOption.Regular
                                                           ).Build();
         cluster.FinalizeCluster(conf);
         cluster.TriggerBlockReports();
         // 1 second should be enough for asynchronous DN finalize
         Sharpen.Thread.Sleep(1000);
         CheckResult(nameNodeDirs, dataNodeDirs, null);
         Log("Finalize NN & DN without existing previous dir", numDirs);
         cluster.FinalizeCluster(conf);
         cluster.TriggerBlockReports();
         // 1 second should be enough for asynchronous DN finalize
         Sharpen.Thread.Sleep(1000);
         CheckResult(nameNodeDirs, dataNodeDirs, null);
         cluster.Shutdown();
         UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
         UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
         Log("Finalize NN & BP with existing previous dir", numDirs);
         string bpid = UpgradeUtilities.GetCurrentBlockPoolID(cluster);
         UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
         UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
         UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
         UpgradeUtilities.CreateBlockPoolStorageDirs(dataNodeDirs, "current", bpid);
         UpgradeUtilities.CreateBlockPoolStorageDirs(dataNodeDirs, "previous", bpid);
         cluster = new MiniDFSCluster.Builder(conf).Format(false).ManageDataDfsDirs(false)
                   .ManageNameDfsDirs(false).StartupOption(HdfsServerConstants.StartupOption.Regular
                                                           ).Build();
         cluster.FinalizeCluster(conf);
         cluster.TriggerBlockReports();
         // 1 second should be enough for asynchronous BP finalize
         Sharpen.Thread.Sleep(1000);
         CheckResult(nameNodeDirs, dataNodeDirs, bpid);
         Log("Finalize NN & BP without existing previous dir", numDirs);
         cluster.FinalizeCluster(conf);
         cluster.TriggerBlockReports();
         // 1 second should be enough for asynchronous BP finalize
         Sharpen.Thread.Sleep(1000);
         CheckResult(nameNodeDirs, dataNodeDirs, bpid);
         cluster.Shutdown();
         UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
         UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
     }
 }
Ejemplo n.º 7
0
        /// <summary>
        /// This test attempts to upgrade the NameNode and DataNode under
        /// a number of valid and invalid conditions.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestUpgrade()
        {
            FilePath[]  baseDirs;
            StorageInfo storageInfo = null;

            for (int numDirs = 1; numDirs <= 2; numDirs++)
            {
                conf = new HdfsConfiguration();
                conf = UpgradeUtilities.InitializeStorageStateConf(numDirs, conf);
                string[] nameNodeDirs = conf.GetStrings(DFSConfigKeys.DfsNamenodeNameDirKey);
                string[] dataNodeDirs = conf.GetStrings(DFSConfigKeys.DfsDatanodeDataDirKey);
                conf.SetBoolean(DFSConfigKeys.DfsDatanodeDuplicateReplicaDeletion, false);
                Log("Normal NameNode upgrade", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                cluster = CreateCluster();
                // make sure that rolling upgrade cannot be started
                try
                {
                    DistributedFileSystem dfs = cluster.GetFileSystem();
                    dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                    dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare);
                    NUnit.Framework.Assert.Fail();
                }
                catch (RemoteException re)
                {
                    NUnit.Framework.Assert.AreEqual(typeof(InconsistentFSStateException).FullName, re
                                                    .GetClassName());
                    Log.Info("The exception is expected.", re);
                }
                CheckNameNode(nameNodeDirs, ExpectedTxid);
                if (numDirs > 1)
                {
                    TestParallelImageWrite.CheckImages(cluster.GetNamesystem(), numDirs);
                }
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("Normal DataNode upgrade", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                cluster = CreateCluster();
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Regular,
                                       null);
                CheckDataNode(dataNodeDirs, UpgradeUtilities.GetCurrentBlockPoolID(null));
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("NameNode upgrade with existing previous dir", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                StartNameNodeShouldFail(HdfsServerConstants.StartupOption.Upgrade);
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("DataNode upgrade with existing previous dir", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                cluster = CreateCluster();
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "previous");
                cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Regular,
                                       null);
                CheckDataNode(dataNodeDirs, UpgradeUtilities.GetCurrentBlockPoolID(null));
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("DataNode upgrade with future stored layout version in current", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                cluster     = CreateCluster();
                baseDirs    = UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                storageInfo = new StorageInfo(int.MinValue, UpgradeUtilities.GetCurrentNamespaceID
                                                  (cluster), UpgradeUtilities.GetCurrentClusterID(cluster), UpgradeUtilities.GetCurrentFsscTime
                                                  (cluster), HdfsServerConstants.NodeType.DataNode);
                UpgradeUtilities.CreateDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                StartBlockPoolShouldFail(HdfsServerConstants.StartupOption.Regular, UpgradeUtilities
                                         .GetCurrentBlockPoolID(null));
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("DataNode upgrade with newer fsscTime in current", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                cluster     = CreateCluster();
                baseDirs    = UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                storageInfo = new StorageInfo(HdfsConstants.DatanodeLayoutVersion, UpgradeUtilities
                                              .GetCurrentNamespaceID(cluster), UpgradeUtilities.GetCurrentClusterID(cluster),
                                              long.MaxValue, HdfsServerConstants.NodeType.DataNode);
                UpgradeUtilities.CreateDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                // Ensure corresponding block pool failed to initialized
                StartBlockPoolShouldFail(HdfsServerConstants.StartupOption.Regular, UpgradeUtilities
                                         .GetCurrentBlockPoolID(null));
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("NameNode upgrade with no edits file", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                DeleteStorageFilesWithPrefix(nameNodeDirs, "edits_");
                StartNameNodeShouldFail(HdfsServerConstants.StartupOption.Upgrade);
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("NameNode upgrade with no image file", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                DeleteStorageFilesWithPrefix(nameNodeDirs, "fsimage_");
                StartNameNodeShouldFail(HdfsServerConstants.StartupOption.Upgrade);
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("NameNode upgrade with corrupt version file", numDirs);
                baseDirs = UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                foreach (FilePath f in baseDirs)
                {
                    UpgradeUtilities.CorruptFile(new FilePath(f, "VERSION"), Sharpen.Runtime.GetBytesForString
                                                     ("layoutVersion", Charsets.Utf8), Sharpen.Runtime.GetBytesForString("xxxxxxxxxxxxx"
                                                                                                                         , Charsets.Utf8));
                }
                StartNameNodeShouldFail(HdfsServerConstants.StartupOption.Upgrade);
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("NameNode upgrade with old layout version in current", numDirs);
                baseDirs    = UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                storageInfo = new StorageInfo(Storage.LastUpgradableLayoutVersion + 1, UpgradeUtilities
                                              .GetCurrentNamespaceID(null), UpgradeUtilities.GetCurrentClusterID(null), UpgradeUtilities
                                              .GetCurrentFsscTime(null), HdfsServerConstants.NodeType.NameNode);
                UpgradeUtilities.CreateNameNodeVersionFile(conf, baseDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                StartNameNodeShouldFail(HdfsServerConstants.StartupOption.Upgrade);
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("NameNode upgrade with future layout version in current", numDirs);
                baseDirs    = UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                storageInfo = new StorageInfo(int.MinValue, UpgradeUtilities.GetCurrentNamespaceID
                                                  (null), UpgradeUtilities.GetCurrentClusterID(null), UpgradeUtilities.GetCurrentFsscTime
                                                  (null), HdfsServerConstants.NodeType.NameNode);
                UpgradeUtilities.CreateNameNodeVersionFile(conf, baseDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                StartNameNodeShouldFail(HdfsServerConstants.StartupOption.Upgrade);
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
            }
            // end numDir loop
            // One more check: normal NN upgrade with 4 directories, concurrent write
            int numDirs_1 = 4;
            {
                conf = new HdfsConfiguration();
                conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1);
                conf.SetBoolean(DFSConfigKeys.DfsDatanodeDuplicateReplicaDeletion, false);
                conf = UpgradeUtilities.InitializeStorageStateConf(numDirs_1, conf);
                string[] nameNodeDirs = conf.GetStrings(DFSConfigKeys.DfsNamenodeNameDirKey);
                Log("Normal NameNode upgrade", numDirs_1);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                cluster = CreateCluster();
                // make sure that rolling upgrade cannot be started
                try
                {
                    DistributedFileSystem dfs = cluster.GetFileSystem();
                    dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                    dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare);
                    NUnit.Framework.Assert.Fail();
                }
                catch (RemoteException re)
                {
                    NUnit.Framework.Assert.AreEqual(typeof(InconsistentFSStateException).FullName, re
                                                    .GetClassName());
                    Log.Info("The exception is expected.", re);
                }
                CheckNameNode(nameNodeDirs, ExpectedTxid);
                TestParallelImageWrite.CheckImages(cluster.GetNamesystem(), numDirs_1);
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
            }
        }
Ejemplo n.º 8
0
        public virtual void TestPreserveEditLogs()
        {
            UnpackStorage(Hadoop252Image, HadoopDfsDirTxt);
            Configuration conf = new HdfsConfiguration();

            conf = UpgradeUtilities.InitializeStorageStateConf(1, conf);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(
                false).ManageDataDfsDirs(false).ManageNameDfsDirs(false).StartupOption(HdfsServerConstants.StartupOption
                                                                                       .Upgrade).Build();
            DFSInotifyEventInputStream ieis = cluster.GetFileSystem().GetInotifyEventStream(0
                                                                                            );
            EventBatch batch;

            Event.CreateEvent ce;
            Event.RenameEvent re;
            // mkdir /input
            batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
            NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
            NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                          .Create);
            ce = (Event.CreateEvent)batch.GetEvents()[0];
            NUnit.Framework.Assert.AreEqual(ce.GetPath(), "/input");
            // mkdir /input/dir1~5
            for (int i = 1; i <= 5; i++)
            {
                batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Create);
                ce = (Event.CreateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.AreEqual(ce.GetPath(), "/input/dir" + i);
            }
            // copyFromLocal randome_file_1~2 /input/dir1~2
            for (int i_1 = 1; i_1 <= 2; i_1++)
            {
                batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                if (batch.GetEvents()[0].GetEventType() != Event.EventType.Create)
                {
                    FSImage.Log.Debug(string.Empty);
                }
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Create);
                // copyFromLocal randome_file_1 /input/dir1, CLOSE
                batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Close);
                // copyFromLocal randome_file_1 /input/dir1, CLOSE
                batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Rename);
                re = (Event.RenameEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.AreEqual(re.GetDstPath(), "/input/dir" + i_1 + "/randome_file_"
                                                + i_1);
            }
            // mv /input/dir1/randome_file_1 /input/dir3/randome_file_3
            long txIDBeforeRename = batch.GetTxid();

            batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
            NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
            NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                          .Rename);
            re = (Event.RenameEvent)batch.GetEvents()[0];
            NUnit.Framework.Assert.AreEqual(re.GetDstPath(), "/input/dir3/randome_file_3");
            // rmdir /input/dir1
            batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
            NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
            NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                          .Unlink);
            NUnit.Framework.Assert.AreEqual(((Event.UnlinkEvent)batch.GetEvents()[0]).GetPath
                                                (), "/input/dir1");
            long lastTxID = batch.GetTxid();

            // Start inotify from the tx before rename /input/dir1/randome_file_1
            ieis  = cluster.GetFileSystem().GetInotifyEventStream(txIDBeforeRename);
            batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
            NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
            NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                          .Rename);
            re = (Event.RenameEvent)batch.GetEvents()[0];
            NUnit.Framework.Assert.AreEqual(re.GetDstPath(), "/input/dir3/randome_file_3");
            // Try to read beyond available edits
            ieis = cluster.GetFileSystem().GetInotifyEventStream(lastTxID + 1);
            NUnit.Framework.Assert.IsNull(ieis.Poll());
            cluster.Shutdown();
        }