Ejemplo n.º 1
0
 /// <summary>
 /// For NameNode, verify that the current and/or previous exist as indicated by
 /// the method parameters.
 /// </summary>
 /// <remarks>
 /// For NameNode, verify that the current and/or previous exist as indicated by
 /// the method parameters.  If previous exists, verify that
 /// it hasn't been modified by comparing the checksum of all it's
 /// containing files with their original checksum.  It is assumed that
 /// the server has recovered.
 /// </remarks>
 /// <exception cref="System.IO.IOException"/>
 internal virtual void CheckResultNameNode(string[] baseDirs, bool currentShouldExist
                                           , bool previousShouldExist)
 {
     if (currentShouldExist)
     {
         for (int i = 0; i < baseDirs.Length; i++)
         {
             NUnit.Framework.Assert.IsTrue(new FilePath(baseDirs[i], "current").IsDirectory());
             NUnit.Framework.Assert.IsTrue(new FilePath(baseDirs[i], "current/VERSION").IsFile
                                               ());
             NUnit.Framework.Assert.IsNotNull(FSImageTestUtil.FindNewestImageFile(baseDirs[i]
                                                                                  + "/current"));
             NUnit.Framework.Assert.IsTrue(new FilePath(baseDirs[i], "current/seen_txid").IsFile
                                               ());
         }
     }
     if (previousShouldExist)
     {
         for (int i = 0; i < baseDirs.Length; i++)
         {
             NUnit.Framework.Assert.IsTrue(new FilePath(baseDirs[i], "previous").IsDirectory()
                                           );
             NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType
                                                                               .NameNode, new FilePath(baseDirs[i], "previous"), false), UpgradeUtilities.ChecksumMasterNameNodeContents
                                                 ());
         }
     }
 }
Ejemplo n.º 2
0
 /// <summary>
 /// Sets up the storage directories for a block pool under
 /// <see cref="DFSConfigKeys.DfsDatanodeDataDirKey"/>
 /// . For each element
 /// in
 /// <see cref="DFSConfigKeys.DfsDatanodeDataDirKey"/>
 /// , the subdirectories
 /// represented by the first four elements of the <code>state</code> array
 /// will be created and populated.
 /// See
 /// <see cref="UpgradeUtilities.CreateBlockPoolStorageDirs(string[], string, string)"
 ///     />
 /// </summary>
 /// <param name="bpid">block pool Id</param>
 /// <param name="state">
 /// a row from the testCases table which indicates which directories
 /// to setup for the node
 /// </param>
 /// <returns>file paths representing block pool storage directories</returns>
 /// <exception cref="System.Exception"/>
 internal virtual string[] CreateBlockPoolStorageState(string bpid, bool[] state)
 {
     string[] baseDirs = conf.GetStrings(DFSConfigKeys.DfsDatanodeDataDirKey);
     UpgradeUtilities.CreateEmptyDirs(baseDirs);
     UpgradeUtilities.CreateDataNodeStorageDirs(baseDirs, "current");
     // After copying the storage directories from master datanode, empty
     // the block pool storage directories
     string[] bpDirs = UpgradeUtilities.CreateEmptyBPDirs(baseDirs, bpid);
     if (state[CurrentExists])
     {
         // current
         UpgradeUtilities.CreateBlockPoolStorageDirs(baseDirs, "current", bpid);
     }
     if (state[PreviousExists])
     {
         // previous
         UpgradeUtilities.CreateBlockPoolStorageDirs(baseDirs, "previous", bpid);
     }
     if (state[PreviousTmpExists])
     {
         // previous.tmp
         UpgradeUtilities.CreateBlockPoolStorageDirs(baseDirs, "previous.tmp", bpid);
     }
     if (state[RemovedTmpExists])
     {
         // removed.tmp
         UpgradeUtilities.CreateBlockPoolStorageDirs(baseDirs, "removed.tmp", bpid);
     }
     return(bpDirs);
 }
Ejemplo n.º 3
0
 /// <summary>
 /// Sets up the storage directories for a datanode under
 /// <see cref="DFSConfigKeys.DfsDatanodeDataDirKey"/>
 /// . For each element in
 /// <see cref="DFSConfigKeys.DfsDatanodeDataDirKey"/>
 /// , the subdirectories
 /// represented by the first four elements of the <code>state</code> array
 /// will be created and populated.
 /// See
 /// <see cref="UpgradeUtilities.CreateDataNodeStorageDirs(string[], string)"/>
 /// </summary>
 /// <param name="state">
 /// a row from the testCases table which indicates which directories
 /// to setup for the node
 /// </param>
 /// <returns>file paths representing datanode storage directories</returns>
 /// <exception cref="System.Exception"/>
 internal virtual string[] CreateDataNodeStorageState(bool[] state)
 {
     string[] baseDirs = conf.GetStrings(DFSConfigKeys.DfsDatanodeDataDirKey);
     UpgradeUtilities.CreateEmptyDirs(baseDirs);
     if (state[CurrentExists])
     {
         // current
         UpgradeUtilities.CreateDataNodeStorageDirs(baseDirs, "current");
     }
     if (state[PreviousExists])
     {
         // previous
         UpgradeUtilities.CreateDataNodeStorageDirs(baseDirs, "previous");
     }
     if (state[PreviousTmpExists])
     {
         // previous.tmp
         UpgradeUtilities.CreateDataNodeStorageDirs(baseDirs, "previous.tmp");
     }
     if (state[RemovedTmpExists])
     {
         // removed.tmp
         UpgradeUtilities.CreateDataNodeStorageDirs(baseDirs, "removed.tmp");
     }
     return(baseDirs);
 }
Ejemplo n.º 4
0
        /// <summary>Verify that the new current directory is the old previous.</summary>
        /// <remarks>
        /// Verify that the new current directory is the old previous.
        /// It is assumed that the server has recovered and rolled back.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        internal virtual void CheckResult(HdfsServerConstants.NodeType nodeType, string[]
                                          baseDirs)
        {
            IList <FilePath> curDirs = Lists.NewArrayList();

            foreach (string baseDir in baseDirs)
            {
                FilePath curDir = new FilePath(baseDir, "current");
                curDirs.AddItem(curDir);
                switch (nodeType)
                {
                case HdfsServerConstants.NodeType.NameNode:
                {
                    FSImageTestUtil.AssertReasonableNameCurrentDir(curDir);
                    break;
                }

                case HdfsServerConstants.NodeType.DataNode:
                {
                    NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(nodeType, curDir
                                                                                      , false), UpgradeUtilities.ChecksumMasterDataNodeContents());
                    break;
                }
                }
            }
            FSImageTestUtil.AssertParallelFilesAreIdentical(curDirs, Sharpen.Collections.EmptySet
                                                            <string>());
            for (int i = 0; i < baseDirs.Length; i++)
            {
                NUnit.Framework.Assert.IsFalse(new FilePath(baseDirs[i], "previous").IsDirectory(
                                                   ));
            }
        }
Ejemplo n.º 5
0
        /// <summary>
        /// Verify that the current directory exists and that the previous directory
        /// does not exist.
        /// </summary>
        /// <remarks>
        /// Verify that the current directory exists and that the previous directory
        /// does not exist.  Verify that current hasn't been modified by comparing
        /// the checksum of all it's containing files with their original checksum.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        internal static void CheckResult(string[] nameNodeDirs, string[] dataNodeDirs, string
                                         bpid)
        {
            IList <FilePath> dirs = Lists.NewArrayList();

            for (int i = 0; i < nameNodeDirs.Length; i++)
            {
                FilePath curDir = new FilePath(nameNodeDirs[i], "current");
                dirs.AddItem(curDir);
                FSImageTestUtil.AssertReasonableNameCurrentDir(curDir);
            }
            FSImageTestUtil.AssertParallelFilesAreIdentical(dirs, Sharpen.Collections.EmptySet
                                                            <string>());
            FilePath[] dnCurDirs = new FilePath[dataNodeDirs.Length];
            for (int i_1 = 0; i_1 < dataNodeDirs.Length; i_1++)
            {
                dnCurDirs[i_1] = new FilePath(dataNodeDirs[i_1], "current");
                NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType
                                                                                  .DataNode, dnCurDirs[i_1], false), UpgradeUtilities.ChecksumMasterDataNodeContents
                                                    ());
            }
            for (int i_2 = 0; i_2 < nameNodeDirs.Length; i_2++)
            {
                NUnit.Framework.Assert.IsFalse(new FilePath(nameNodeDirs[i_2], "previous").IsDirectory
                                                   ());
            }
            if (bpid == null)
            {
                for (int i_3 = 0; i_3 < dataNodeDirs.Length; i_3++)
                {
                    NUnit.Framework.Assert.IsFalse(new FilePath(dataNodeDirs[i_3], "previous").IsDirectory
                                                       ());
                }
            }
            else
            {
                for (int i_3 = 0; i_3 < dataNodeDirs.Length; i_3++)
                {
                    FilePath bpRoot = BlockPoolSliceStorage.GetBpRoot(bpid, dnCurDirs[i_3]);
                    NUnit.Framework.Assert.IsFalse(new FilePath(bpRoot, "previous").IsDirectory());
                    FilePath bpCurFinalizeDir = new FilePath(bpRoot, "current/" + DataStorage.StorageDirFinalized
                                                             );
                    NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType
                                                                                      .DataNode, bpCurFinalizeDir, true), UpgradeUtilities.ChecksumMasterBlockPoolFinalizedContents
                                                        ());
                }
            }
        }
Ejemplo n.º 6
0
        public virtual void TestBlockPoolStorageStates()
        {
            string[] baseDirs;
            // First setup the datanode storage directory
            string bpid = UpgradeUtilities.GetCurrentBlockPoolID(null);

            for (int numDirs = 1; numDirs <= 2; numDirs++)
            {
                conf = new HdfsConfiguration();
                conf.SetInt("dfs.datanode.scan.period.hours", -1);
                conf = UpgradeUtilities.InitializeStorageStateConf(numDirs, conf);
                for (int i = 0; i < NumDnTestCases; i++)
                {
                    bool[] testCase         = testCases[i];
                    bool   shouldRecover    = testCase[ShouldRecover];
                    bool   curAfterRecover  = testCase[CurrentShouldExistAfterRecover];
                    bool   prevAfterRecover = testCase[PreviousShouldExistAfterRecover];
                    Log("BLOCK_POOL recovery", numDirs, i, testCase);
                    CreateNameNodeStorageState(new bool[] { true, true, false, false, false });
                    cluster  = CreateCluster(conf);
                    baseDirs = CreateBlockPoolStorageState(bpid, testCase);
                    if (!testCase[CurrentExists] && !testCase[PreviousExists] && !testCase[PreviousTmpExists
                        ] && !testCase[RemovedTmpExists])
                    {
                        // DataNode will create and format current if no directories exist
                        cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Regular,
                                               null);
                    }
                    else
                    {
                        if (shouldRecover)
                        {
                            cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Regular,
                                                   null);
                            CheckResultBlockPool(baseDirs, curAfterRecover, prevAfterRecover);
                        }
                        else
                        {
                            cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Regular,
                                                   null);
                            NUnit.Framework.Assert.IsFalse(cluster.GetDataNodes()[0].IsBPServiceAlive(bpid));
                        }
                    }
                    cluster.Shutdown();
                }
            }
        }
Ejemplo n.º 7
0
 public virtual void TestNNStorageStates()
 {
     string[] baseDirs;
     for (int numDirs = 1; numDirs <= 2; numDirs++)
     {
         conf = new HdfsConfiguration();
         conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1);
         conf = UpgradeUtilities.InitializeStorageStateConf(numDirs, conf);
         for (int i = 0; i < NumNnTestCases; i++)
         {
             bool[] testCase         = testCases[i];
             bool   shouldRecover    = testCase[ShouldRecover];
             bool   curAfterRecover  = testCase[CurrentShouldExistAfterRecover];
             bool   prevAfterRecover = testCase[PreviousShouldExistAfterRecover];
             Log("NAME_NODE recovery", numDirs, i, testCase);
             baseDirs = CreateNameNodeStorageState(testCase);
             if (shouldRecover)
             {
                 cluster = CreateCluster(conf);
                 CheckResultNameNode(baseDirs, curAfterRecover, prevAfterRecover);
                 cluster.Shutdown();
             }
             else
             {
                 try
                 {
                     cluster = CreateCluster(conf);
                     throw new Exception("NameNode should have failed to start");
                 }
                 catch (IOException expected)
                 {
                     // the exception is expected
                     // check that the message says "not formatted"
                     // when storage directory is empty (case #5)
                     if (!testCases[i][CurrentExists] && !testCases[i][PreviousTmpExists] && !testCases
                         [i][PreviousExists] && !testCases[i][RemovedTmpExists])
                     {
                         NUnit.Framework.Assert.IsTrue(expected.GetLocalizedMessage().Contains("NameNode is not formatted"
                                                                                               ));
                     }
                 }
             }
             cluster.Shutdown();
         }
     }
 }
Ejemplo n.º 8
0
        /// <summary>
        /// This test ensures the appropriate response (successful or failure) from
        /// a Datanode when the system is started with differing version combinations.
        /// </summary>
        /// <remarks>
        /// This test ensures the appropriate response (successful or failure) from
        /// a Datanode when the system is started with differing version combinations.
        /// <pre>
        /// For each 3-tuple in the cross product
        /// ({oldLayoutVersion,currentLayoutVersion,futureLayoutVersion},
        /// {currentNamespaceId,incorrectNamespaceId},
        /// {pastFsscTime,currentFsscTime,futureFsscTime})
        /// 1. Startup Namenode with version file containing
        /// (currentLayoutVersion,currentNamespaceId,currentFsscTime)
        /// 2. Attempt to startup Datanode with version file containing
        /// this iterations version 3-tuple
        /// </pre>
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestVersions()
        {
            UpgradeUtilities.Initialize();
            Configuration conf = UpgradeUtilities.InitializeStorageStateConf(1, new HdfsConfiguration
                                                                                 ());

            TestDFSStartupVersions.StorageData[] versions = InitializeVersions();
            UpgradeUtilities.CreateNameNodeStorageDirs(conf.GetStrings(DFSConfigKeys.DfsNamenodeNameDirKey
                                                                       ), "current");
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).ManageDataDfsDirs
                          (false).ManageNameDfsDirs(false).StartupOption(HdfsServerConstants.StartupOption
                                                                         .Regular).Build();
            TestDFSStartupVersions.StorageData nameNodeVersion = new TestDFSStartupVersions.StorageData
                                                                     (HdfsConstants.NamenodeLayoutVersion, UpgradeUtilities.GetCurrentNamespaceID(cluster
                                                                                                                                                  ), UpgradeUtilities.GetCurrentClusterID(cluster), UpgradeUtilities.GetCurrentFsscTime
                                                                         (cluster), UpgradeUtilities.GetCurrentBlockPoolID(cluster));
            Log("NameNode version info", HdfsServerConstants.NodeType.NameNode, null, nameNodeVersion
                );
            string bpid = UpgradeUtilities.GetCurrentBlockPoolID(cluster);

            for (int i = 0; i < versions.Length; i++)
            {
                FilePath[] storage = UpgradeUtilities.CreateDataNodeStorageDirs(conf.GetStrings(DFSConfigKeys
                                                                                                .DfsDatanodeDataDirKey), "current");
                Log("DataNode version info", HdfsServerConstants.NodeType.DataNode, i, versions[i
                    ]);
                UpgradeUtilities.CreateDataNodeVersionFile(storage, versions[i].storageInfo, bpid
                                                           , versions[i].blockPoolId);
                try
                {
                    cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Regular,
                                           null);
                }
                catch (Exception)
                {
                }
                // Ignore.  The asserts below will check for problems.
                // ignore.printStackTrace();
                NUnit.Framework.Assert.IsTrue(cluster.GetNameNode() != null);
                NUnit.Framework.Assert.AreEqual(IsVersionCompatible(nameNodeVersion, versions[i])
                                                , cluster.IsDataNodeUp());
                cluster.ShutdownDataNodes();
            }
        }
Ejemplo n.º 9
0
 /// <summary>For namenode, Verify that the current and previous directories exist.</summary>
 /// <remarks>
 /// For namenode, Verify that the current and previous directories exist.
 /// Verify that previous hasn't been modified by comparing the checksum of all
 /// its files with their original checksum. It is assumed that the
 /// server has recovered and upgraded.
 /// </remarks>
 /// <exception cref="System.IO.IOException"/>
 internal virtual void CheckNameNode(string[] baseDirs, long imageTxId)
 {
     foreach (string baseDir in baseDirs)
     {
         Log.Info("Checking namenode directory " + baseDir);
         Log.Info("==== Contents ====:\n  " + Joiner.On("  \n").Join(new FilePath(baseDir,
                                                                                  "current").List()));
         Log.Info("==================");
         GenericTestUtils.AssertExists(new FilePath(baseDir, "current"));
         GenericTestUtils.AssertExists(new FilePath(baseDir, "current/VERSION"));
         GenericTestUtils.AssertExists(new FilePath(baseDir, "current/" + NNStorage.GetInProgressEditsFileName
                                                        (imageTxId + 1)));
         GenericTestUtils.AssertExists(new FilePath(baseDir, "current/" + NNStorage.GetImageFileName
                                                        (imageTxId)));
         GenericTestUtils.AssertExists(new FilePath(baseDir, "current/seen_txid"));
         FilePath previous = new FilePath(baseDir, "previous");
         GenericTestUtils.AssertExists(previous);
         NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType
                                                                           .NameNode, previous, false), UpgradeUtilities.ChecksumMasterNameNodeContents());
     }
 }
Ejemplo n.º 10
0
        public virtual void TestUpgrade4()
        {
            int numDirs = 4;

            conf = new HdfsConfiguration();
            conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1);
            conf.SetBoolean(DFSConfigKeys.DfsDatanodeDuplicateReplicaDeletion, false);
            conf = UpgradeUtilities.InitializeStorageStateConf(numDirs, conf);
            string[] nameNodeDirs = conf.GetStrings(DFSConfigKeys.DfsNamenodeNameDirKey);
            Log("NameNode upgrade with one bad storage dir", numDirs);
            UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
            try
            {
                // assert("storage dir has been prepared for failure before reaching this point");
                StartNameNodeShouldFail(HdfsServerConstants.StartupOption.Upgrade, typeof(IOException
                                                                                          ), Sharpen.Pattern.Compile("failed in 1 storage"));
            }
            finally
            {
                // assert("storage dir shall be returned to normal state before exiting");
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
            }
        }
Ejemplo n.º 11
0
        /// <summary>Initialize the versions array.</summary>
        /// <remarks>
        /// Initialize the versions array.  This array stores all combinations
        /// of cross product:
        /// {oldLayoutVersion,currentLayoutVersion,futureLayoutVersion} X
        /// {currentNamespaceId,incorrectNamespaceId} X
        /// {pastFsscTime,currentFsscTime,futureFsscTime}
        /// </remarks>
        /// <exception cref="System.Exception"/>
        private TestDFSStartupVersions.StorageData[] InitializeVersions()
        {
            int    layoutVersionOld = Storage.LastUpgradableLayoutVersion;
            int    layoutVersionCur = HdfsConstants.DatanodeLayoutVersion;
            int    layoutVersionNew = int.MinValue;
            int    namespaceIdCur   = UpgradeUtilities.GetCurrentNamespaceID(null);
            int    namespaceIdOld   = int.MinValue;
            long   fsscTimeOld      = long.MinValue;
            long   fsscTimeCur      = UpgradeUtilities.GetCurrentFsscTime(null);
            long   fsscTimeNew      = long.MaxValue;
            string clusterID        = "testClusterID";
            string invalidClusterID = "testClusterID";
            string bpid             = UpgradeUtilities.GetCurrentBlockPoolID(null);
            string invalidBpid      = "invalidBpid";

            return(new TestDFSStartupVersions.StorageData[] { new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionOld, namespaceIdCur, clusterID, fsscTimeOld, bpid), new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionOld, namespaceIdCur, clusterID, fsscTimeCur, bpid), new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionOld, namespaceIdCur, clusterID, fsscTimeNew, bpid), new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionOld, namespaceIdOld, clusterID, fsscTimeOld, bpid), new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionOld, namespaceIdOld, clusterID, fsscTimeCur, bpid), new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionOld, namespaceIdOld, clusterID, fsscTimeNew, bpid), new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionCur, namespaceIdCur, clusterID, fsscTimeOld, bpid), new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionCur, namespaceIdCur, clusterID, fsscTimeCur, bpid), new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionCur, namespaceIdCur, clusterID, fsscTimeNew, bpid), new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionCur, namespaceIdOld, clusterID, fsscTimeOld, bpid), new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionCur, namespaceIdOld, clusterID, fsscTimeCur, bpid), new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionCur, namespaceIdOld, clusterID, fsscTimeNew, bpid), new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionNew, namespaceIdCur, clusterID, fsscTimeOld, bpid), new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionNew, namespaceIdCur, clusterID, fsscTimeCur, bpid), new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionNew, namespaceIdCur, clusterID, fsscTimeNew, bpid), new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionNew, namespaceIdOld, clusterID, fsscTimeOld, bpid), new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionNew, namespaceIdOld, clusterID, fsscTimeCur, bpid), new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionNew, namespaceIdOld, clusterID, fsscTimeNew, bpid), new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionCur, namespaceIdCur, invalidClusterID, fsscTimeCur, bpid), new TestDFSStartupVersions.StorageData
                                                                  (layoutVersionCur, namespaceIdCur, clusterID, fsscTimeCur, invalidBpid) });
        }
Ejemplo n.º 12
0
 /// <summary>
 /// For datanode, for a block pool, verify that the current and previous
 /// directories exist.
 /// </summary>
 /// <remarks>
 /// For datanode, for a block pool, verify that the current and previous
 /// directories exist. Verify that previous hasn't been modified by comparing
 /// the checksum of all its files with their original checksum. It
 /// is assumed that the server has recovered and upgraded.
 /// </remarks>
 /// <exception cref="System.IO.IOException"/>
 internal virtual void CheckDataNode(string[] baseDirs, string bpid)
 {
     for (int i = 0; i < baseDirs.Length; i++)
     {
         FilePath current = new FilePath(baseDirs[i], "current/" + bpid + "/current");
         NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType
                                                                           .DataNode, current, false), UpgradeUtilities.ChecksumMasterDataNodeContents());
         // block files are placed under <sd>/current/<bpid>/current/finalized
         FilePath currentFinalized = MiniDFSCluster.GetFinalizedDir(new FilePath(baseDirs[
                                                                                     i]), bpid);
         NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType
                                                                           .DataNode, currentFinalized, true), UpgradeUtilities.ChecksumMasterBlockPoolFinalizedContents
                                             ());
         FilePath previous = new FilePath(baseDirs[i], "current/" + bpid + "/previous");
         NUnit.Framework.Assert.IsTrue(previous.IsDirectory());
         NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType
                                                                           .DataNode, previous, false), UpgradeUtilities.ChecksumMasterDataNodeContents());
         FilePath previousFinalized = new FilePath(baseDirs[i], "current/" + bpid + "/previous"
                                                   + "/finalized");
         NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType
                                                                           .DataNode, previousFinalized, true), UpgradeUtilities.ChecksumMasterBlockPoolFinalizedContents
                                             ());
     }
 }
Ejemplo n.º 13
0
 /// <summary>
 /// For datanode, verify that the current and/or previous exist as indicated by
 /// the method parameters.
 /// </summary>
 /// <remarks>
 /// For datanode, verify that the current and/or previous exist as indicated by
 /// the method parameters.  If previous exists, verify that
 /// it hasn't been modified by comparing the checksum of all it's
 /// containing files with their original checksum.  It is assumed that
 /// the server has recovered.
 /// </remarks>
 /// <exception cref="System.IO.IOException"/>
 internal virtual void CheckResultDataNode(string[] baseDirs, bool currentShouldExist
                                           , bool previousShouldExist)
 {
     if (currentShouldExist)
     {
         for (int i = 0; i < baseDirs.Length; i++)
         {
             NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType
                                                                               .DataNode, new FilePath(baseDirs[i], "current"), false), UpgradeUtilities.ChecksumMasterDataNodeContents
                                                 ());
         }
     }
     if (previousShouldExist)
     {
         for (int i = 0; i < baseDirs.Length; i++)
         {
             NUnit.Framework.Assert.IsTrue(new FilePath(baseDirs[i], "previous").IsDirectory()
                                           );
             NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType
                                                                               .DataNode, new FilePath(baseDirs[i], "previous"), false), UpgradeUtilities.ChecksumMasterDataNodeContents
                                                 ());
         }
     }
 }
Ejemplo n.º 14
0
 /// <summary>
 /// For block pool, verify that the current and/or previous exist as indicated
 /// by the method parameters.
 /// </summary>
 /// <remarks>
 /// For block pool, verify that the current and/or previous exist as indicated
 /// by the method parameters.  If previous exists, verify that
 /// it hasn't been modified by comparing the checksum of all it's
 /// containing files with their original checksum.  It is assumed that
 /// the server has recovered.
 /// </remarks>
 /// <param name="baseDirs">directories pointing to block pool storage</param>
 /// <param name="bpid">block pool Id</param>
 /// <param name="currentShouldExist">current directory exists under storage</param>
 /// <param name="currentShouldExist">previous directory exists under storage</param>
 /// <exception cref="System.IO.IOException"/>
 internal virtual void CheckResultBlockPool(string[] baseDirs, bool currentShouldExist
                                            , bool previousShouldExist)
 {
     if (currentShouldExist)
     {
         for (int i = 0; i < baseDirs.Length; i++)
         {
             FilePath bpCurDir = new FilePath(baseDirs[i], Storage.StorageDirCurrent);
             NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType
                                                                               .DataNode, bpCurDir, false), UpgradeUtilities.ChecksumMasterBlockPoolContents());
         }
     }
     if (previousShouldExist)
     {
         for (int i = 0; i < baseDirs.Length; i++)
         {
             FilePath bpPrevDir = new FilePath(baseDirs[i], Storage.StorageDirPrevious);
             NUnit.Framework.Assert.IsTrue(bpPrevDir.IsDirectory());
             NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType
                                                                               .DataNode, bpPrevDir, false), UpgradeUtilities.ChecksumMasterBlockPoolContents()
                                             );
         }
     }
 }
Ejemplo n.º 15
0
 public virtual void SetUp()
 {
     Log.Info("Setting up the directory structures.");
     UpgradeUtilities.Initialize();
 }
Ejemplo n.º 16
0
        public virtual void TestRollback()
        {
            FilePath[] baseDirs;
            UpgradeUtilities.Initialize();
            StorageInfo storageInfo = null;

            for (int numDirs = 1; numDirs <= 2; numDirs++)
            {
                conf = new HdfsConfiguration();
                conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1);
                conf = UpgradeUtilities.InitializeStorageStateConf(numDirs, conf);
                string[] nameNodeDirs = conf.GetStrings(DFSConfigKeys.DfsNamenodeNameDirKey);
                string[] dataNodeDirs = conf.GetStrings(DFSConfigKeys.DfsDatanodeDataDirKey);
                Log("Normal NameNode rollback", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                NameNode.DoRollback(conf, false);
                CheckResult(HdfsServerConstants.NodeType.NameNode, nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("Normal DataNode rollback", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                NameNode.DoRollback(conf, false);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).ManageDataDfsDirs
                              (false).ManageNameDfsDirs(false).DnStartupOption(HdfsServerConstants.StartupOption
                                                                               .Rollback).Build();
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "previous");
                cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Rollback
                                       , null);
                CheckResult(HdfsServerConstants.NodeType.DataNode, dataNodeDirs);
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("Normal BlockPool rollback", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                NameNode.DoRollback(conf, false);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).ManageDataDfsDirs
                              (false).ManageNameDfsDirs(false).DnStartupOption(HdfsServerConstants.StartupOption
                                                                               .Rollback).Build();
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                UpgradeUtilities.CreateBlockPoolStorageDirs(dataNodeDirs, "current", UpgradeUtilities
                                                            .GetCurrentBlockPoolID(cluster));
                // Create a previous snapshot for the blockpool
                UpgradeUtilities.CreateBlockPoolStorageDirs(dataNodeDirs, "previous", UpgradeUtilities
                                                            .GetCurrentBlockPoolID(cluster));
                // Put newer layout version in current.
                storageInfo = new StorageInfo(HdfsConstants.DatanodeLayoutVersion - 1, UpgradeUtilities
                                              .GetCurrentNamespaceID(cluster), UpgradeUtilities.GetCurrentClusterID(cluster),
                                              UpgradeUtilities.GetCurrentFsscTime(cluster), HdfsServerConstants.NodeType.DataNode
                                              );
                // Overwrite VERSION file in the current directory of
                // volume directories and block pool slice directories
                // with a layout version from future.
                FilePath[] dataCurrentDirs = new FilePath[dataNodeDirs.Length];
                for (int i = 0; i < dataNodeDirs.Length; i++)
                {
                    dataCurrentDirs[i] = new FilePath((new Path(dataNodeDirs[i] + "/current")).ToString
                                                          ());
                }
                UpgradeUtilities.CreateDataNodeVersionFile(dataCurrentDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Rollback
                                       , null);
                NUnit.Framework.Assert.IsTrue(cluster.IsDataNodeUp());
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("NameNode rollback without existing previous dir", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                StartNameNodeShouldFail("None of the storage directories contain previous fs state"
                                        );
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("DataNode rollback without existing previous dir", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).ManageDataDfsDirs
                              (false).ManageNameDfsDirs(false).StartupOption(HdfsServerConstants.StartupOption
                                                                             .Upgrade).Build();
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Rollback
                                       , null);
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("DataNode rollback with future stored layout version in previous", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                NameNode.DoRollback(conf, false);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).ManageDataDfsDirs
                              (false).ManageNameDfsDirs(false).DnStartupOption(HdfsServerConstants.StartupOption
                                                                               .Rollback).Build();
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                baseDirs    = UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "previous");
                storageInfo = new StorageInfo(int.MinValue, UpgradeUtilities.GetCurrentNamespaceID
                                                  (cluster), UpgradeUtilities.GetCurrentClusterID(cluster), UpgradeUtilities.GetCurrentFsscTime
                                                  (cluster), HdfsServerConstants.NodeType.DataNode);
                UpgradeUtilities.CreateDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                StartBlockPoolShouldFail(HdfsServerConstants.StartupOption.Rollback, cluster.GetNamesystem
                                             ().GetBlockPoolId());
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("DataNode rollback with newer fsscTime in previous", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                NameNode.DoRollback(conf, false);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).ManageDataDfsDirs
                              (false).ManageNameDfsDirs(false).DnStartupOption(HdfsServerConstants.StartupOption
                                                                               .Rollback).Build();
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                baseDirs    = UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "previous");
                storageInfo = new StorageInfo(HdfsConstants.DatanodeLayoutVersion, UpgradeUtilities
                                              .GetCurrentNamespaceID(cluster), UpgradeUtilities.GetCurrentClusterID(cluster),
                                              long.MaxValue, HdfsServerConstants.NodeType.DataNode);
                UpgradeUtilities.CreateDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                StartBlockPoolShouldFail(HdfsServerConstants.StartupOption.Rollback, cluster.GetNamesystem
                                             ().GetBlockPoolId());
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("NameNode rollback with no edits file", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                baseDirs = UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                DeleteMatchingFiles(baseDirs, "edits.*");
                StartNameNodeShouldFail("Gap in transactions");
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("NameNode rollback with no image file", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                baseDirs = UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                DeleteMatchingFiles(baseDirs, "fsimage_.*");
                StartNameNodeShouldFail("No valid image files found");
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("NameNode rollback with corrupt version file", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                baseDirs = UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                foreach (FilePath f in baseDirs)
                {
                    UpgradeUtilities.CorruptFile(new FilePath(f, "VERSION"), Sharpen.Runtime.GetBytesForString
                                                     ("layoutVersion", Charsets.Utf8), Sharpen.Runtime.GetBytesForString("xxxxxxxxxxxxx"
                                                                                                                         , Charsets.Utf8));
                }
                StartNameNodeShouldFail("file VERSION has layoutVersion missing");
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("NameNode rollback with old layout version in previous", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                baseDirs    = UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                storageInfo = new StorageInfo(1, UpgradeUtilities.GetCurrentNamespaceID(null), UpgradeUtilities
                                              .GetCurrentClusterID(null), UpgradeUtilities.GetCurrentFsscTime(null), HdfsServerConstants.NodeType
                                              .NameNode);
                UpgradeUtilities.CreateNameNodeVersionFile(conf, baseDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                StartNameNodeShouldFail("Cannot rollback to storage version 1 using this version"
                                        );
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
            }
        }
Ejemplo n.º 17
0
 public virtual void TestFinalize()
 {
     UpgradeUtilities.Initialize();
     for (int numDirs = 1; numDirs <= 2; numDirs++)
     {
         /* This test requires that "current" directory not change after
          * the upgrade. Actually it is ok for those contents to change.
          * For now disabling block verification so that the contents are
          * not changed.
          * Disable duplicate replica deletion as the test intentionally
          * mirrors the contents of storage directories.
          */
         conf = new HdfsConfiguration();
         conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1);
         conf.SetBoolean(DFSConfigKeys.DfsDatanodeDuplicateReplicaDeletion, false);
         conf = UpgradeUtilities.InitializeStorageStateConf(numDirs, conf);
         string[] nameNodeDirs = conf.GetStrings(DFSConfigKeys.DfsNamenodeNameDirKey);
         string[] dataNodeDirs = conf.GetStrings(DFSConfigKeys.DfsDatanodeDataDirKey);
         Log("Finalize NN & DN with existing previous dir", numDirs);
         UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
         UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
         UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
         UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "previous");
         cluster = new MiniDFSCluster.Builder(conf).Format(false).ManageDataDfsDirs(false)
                   .ManageNameDfsDirs(false).StartupOption(HdfsServerConstants.StartupOption.Regular
                                                           ).Build();
         cluster.FinalizeCluster(conf);
         cluster.TriggerBlockReports();
         // 1 second should be enough for asynchronous DN finalize
         Sharpen.Thread.Sleep(1000);
         CheckResult(nameNodeDirs, dataNodeDirs, null);
         Log("Finalize NN & DN without existing previous dir", numDirs);
         cluster.FinalizeCluster(conf);
         cluster.TriggerBlockReports();
         // 1 second should be enough for asynchronous DN finalize
         Sharpen.Thread.Sleep(1000);
         CheckResult(nameNodeDirs, dataNodeDirs, null);
         cluster.Shutdown();
         UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
         UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
         Log("Finalize NN & BP with existing previous dir", numDirs);
         string bpid = UpgradeUtilities.GetCurrentBlockPoolID(cluster);
         UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
         UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
         UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
         UpgradeUtilities.CreateBlockPoolStorageDirs(dataNodeDirs, "current", bpid);
         UpgradeUtilities.CreateBlockPoolStorageDirs(dataNodeDirs, "previous", bpid);
         cluster = new MiniDFSCluster.Builder(conf).Format(false).ManageDataDfsDirs(false)
                   .ManageNameDfsDirs(false).StartupOption(HdfsServerConstants.StartupOption.Regular
                                                           ).Build();
         cluster.FinalizeCluster(conf);
         cluster.TriggerBlockReports();
         // 1 second should be enough for asynchronous BP finalize
         Sharpen.Thread.Sleep(1000);
         CheckResult(nameNodeDirs, dataNodeDirs, bpid);
         Log("Finalize NN & BP without existing previous dir", numDirs);
         cluster.FinalizeCluster(conf);
         cluster.TriggerBlockReports();
         // 1 second should be enough for asynchronous BP finalize
         Sharpen.Thread.Sleep(1000);
         CheckResult(nameNodeDirs, dataNodeDirs, bpid);
         cluster.Shutdown();
         UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
         UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
     }
 }
Ejemplo n.º 18
0
        public virtual void TestPreserveEditLogs()
        {
            UnpackStorage(Hadoop252Image, HadoopDfsDirTxt);
            Configuration conf = new HdfsConfiguration();

            conf = UpgradeUtilities.InitializeStorageStateConf(1, conf);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(
                false).ManageDataDfsDirs(false).ManageNameDfsDirs(false).StartupOption(HdfsServerConstants.StartupOption
                                                                                       .Upgrade).Build();
            DFSInotifyEventInputStream ieis = cluster.GetFileSystem().GetInotifyEventStream(0
                                                                                            );
            EventBatch batch;

            Event.CreateEvent ce;
            Event.RenameEvent re;
            // mkdir /input
            batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
            NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
            NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                          .Create);
            ce = (Event.CreateEvent)batch.GetEvents()[0];
            NUnit.Framework.Assert.AreEqual(ce.GetPath(), "/input");
            // mkdir /input/dir1~5
            for (int i = 1; i <= 5; i++)
            {
                batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Create);
                ce = (Event.CreateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.AreEqual(ce.GetPath(), "/input/dir" + i);
            }
            // copyFromLocal randome_file_1~2 /input/dir1~2
            for (int i_1 = 1; i_1 <= 2; i_1++)
            {
                batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                if (batch.GetEvents()[0].GetEventType() != Event.EventType.Create)
                {
                    FSImage.Log.Debug(string.Empty);
                }
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Create);
                // copyFromLocal randome_file_1 /input/dir1, CLOSE
                batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Close);
                // copyFromLocal randome_file_1 /input/dir1, CLOSE
                batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Rename);
                re = (Event.RenameEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.AreEqual(re.GetDstPath(), "/input/dir" + i_1 + "/randome_file_"
                                                + i_1);
            }
            // mv /input/dir1/randome_file_1 /input/dir3/randome_file_3
            long txIDBeforeRename = batch.GetTxid();

            batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
            NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
            NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                          .Rename);
            re = (Event.RenameEvent)batch.GetEvents()[0];
            NUnit.Framework.Assert.AreEqual(re.GetDstPath(), "/input/dir3/randome_file_3");
            // rmdir /input/dir1
            batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
            NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
            NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                          .Unlink);
            NUnit.Framework.Assert.AreEqual(((Event.UnlinkEvent)batch.GetEvents()[0]).GetPath
                                                (), "/input/dir1");
            long lastTxID = batch.GetTxid();

            // Start inotify from the tx before rename /input/dir1/randome_file_1
            ieis  = cluster.GetFileSystem().GetInotifyEventStream(txIDBeforeRename);
            batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
            NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
            NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                          .Rename);
            re = (Event.RenameEvent)batch.GetEvents()[0];
            NUnit.Framework.Assert.AreEqual(re.GetDstPath(), "/input/dir3/randome_file_3");
            // Try to read beyond available edits
            ieis = cluster.GetFileSystem().GetInotifyEventStream(lastTxID + 1);
            NUnit.Framework.Assert.IsNull(ieis.Poll());
            cluster.Shutdown();
        }
Ejemplo n.º 19
0
        /// <summary>
        /// This test attempts to upgrade the NameNode and DataNode under
        /// a number of valid and invalid conditions.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestUpgrade()
        {
            FilePath[]  baseDirs;
            StorageInfo storageInfo = null;

            for (int numDirs = 1; numDirs <= 2; numDirs++)
            {
                conf = new HdfsConfiguration();
                conf = UpgradeUtilities.InitializeStorageStateConf(numDirs, conf);
                string[] nameNodeDirs = conf.GetStrings(DFSConfigKeys.DfsNamenodeNameDirKey);
                string[] dataNodeDirs = conf.GetStrings(DFSConfigKeys.DfsDatanodeDataDirKey);
                conf.SetBoolean(DFSConfigKeys.DfsDatanodeDuplicateReplicaDeletion, false);
                Log("Normal NameNode upgrade", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                cluster = CreateCluster();
                // make sure that rolling upgrade cannot be started
                try
                {
                    DistributedFileSystem dfs = cluster.GetFileSystem();
                    dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                    dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare);
                    NUnit.Framework.Assert.Fail();
                }
                catch (RemoteException re)
                {
                    NUnit.Framework.Assert.AreEqual(typeof(InconsistentFSStateException).FullName, re
                                                    .GetClassName());
                    Log.Info("The exception is expected.", re);
                }
                CheckNameNode(nameNodeDirs, ExpectedTxid);
                if (numDirs > 1)
                {
                    TestParallelImageWrite.CheckImages(cluster.GetNamesystem(), numDirs);
                }
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("Normal DataNode upgrade", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                cluster = CreateCluster();
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Regular,
                                       null);
                CheckDataNode(dataNodeDirs, UpgradeUtilities.GetCurrentBlockPoolID(null));
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("NameNode upgrade with existing previous dir", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                StartNameNodeShouldFail(HdfsServerConstants.StartupOption.Upgrade);
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("DataNode upgrade with existing previous dir", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                cluster = CreateCluster();
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "previous");
                cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Regular,
                                       null);
                CheckDataNode(dataNodeDirs, UpgradeUtilities.GetCurrentBlockPoolID(null));
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("DataNode upgrade with future stored layout version in current", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                cluster     = CreateCluster();
                baseDirs    = UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                storageInfo = new StorageInfo(int.MinValue, UpgradeUtilities.GetCurrentNamespaceID
                                                  (cluster), UpgradeUtilities.GetCurrentClusterID(cluster), UpgradeUtilities.GetCurrentFsscTime
                                                  (cluster), HdfsServerConstants.NodeType.DataNode);
                UpgradeUtilities.CreateDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                StartBlockPoolShouldFail(HdfsServerConstants.StartupOption.Regular, UpgradeUtilities
                                         .GetCurrentBlockPoolID(null));
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("DataNode upgrade with newer fsscTime in current", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                cluster     = CreateCluster();
                baseDirs    = UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                storageInfo = new StorageInfo(HdfsConstants.DatanodeLayoutVersion, UpgradeUtilities
                                              .GetCurrentNamespaceID(cluster), UpgradeUtilities.GetCurrentClusterID(cluster),
                                              long.MaxValue, HdfsServerConstants.NodeType.DataNode);
                UpgradeUtilities.CreateDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                // Ensure corresponding block pool failed to initialized
                StartBlockPoolShouldFail(HdfsServerConstants.StartupOption.Regular, UpgradeUtilities
                                         .GetCurrentBlockPoolID(null));
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("NameNode upgrade with no edits file", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                DeleteStorageFilesWithPrefix(nameNodeDirs, "edits_");
                StartNameNodeShouldFail(HdfsServerConstants.StartupOption.Upgrade);
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("NameNode upgrade with no image file", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                DeleteStorageFilesWithPrefix(nameNodeDirs, "fsimage_");
                StartNameNodeShouldFail(HdfsServerConstants.StartupOption.Upgrade);
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("NameNode upgrade with corrupt version file", numDirs);
                baseDirs = UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                foreach (FilePath f in baseDirs)
                {
                    UpgradeUtilities.CorruptFile(new FilePath(f, "VERSION"), Sharpen.Runtime.GetBytesForString
                                                     ("layoutVersion", Charsets.Utf8), Sharpen.Runtime.GetBytesForString("xxxxxxxxxxxxx"
                                                                                                                         , Charsets.Utf8));
                }
                StartNameNodeShouldFail(HdfsServerConstants.StartupOption.Upgrade);
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("NameNode upgrade with old layout version in current", numDirs);
                baseDirs    = UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                storageInfo = new StorageInfo(Storage.LastUpgradableLayoutVersion + 1, UpgradeUtilities
                                              .GetCurrentNamespaceID(null), UpgradeUtilities.GetCurrentClusterID(null), UpgradeUtilities
                                              .GetCurrentFsscTime(null), HdfsServerConstants.NodeType.NameNode);
                UpgradeUtilities.CreateNameNodeVersionFile(conf, baseDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                StartNameNodeShouldFail(HdfsServerConstants.StartupOption.Upgrade);
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("NameNode upgrade with future layout version in current", numDirs);
                baseDirs    = UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                storageInfo = new StorageInfo(int.MinValue, UpgradeUtilities.GetCurrentNamespaceID
                                                  (null), UpgradeUtilities.GetCurrentClusterID(null), UpgradeUtilities.GetCurrentFsscTime
                                                  (null), HdfsServerConstants.NodeType.NameNode);
                UpgradeUtilities.CreateNameNodeVersionFile(conf, baseDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                StartNameNodeShouldFail(HdfsServerConstants.StartupOption.Upgrade);
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
            }
            // end numDir loop
            // One more check: normal NN upgrade with 4 directories, concurrent write
            int numDirs_1 = 4;
            {
                conf = new HdfsConfiguration();
                conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1);
                conf.SetBoolean(DFSConfigKeys.DfsDatanodeDuplicateReplicaDeletion, false);
                conf = UpgradeUtilities.InitializeStorageStateConf(numDirs_1, conf);
                string[] nameNodeDirs = conf.GetStrings(DFSConfigKeys.DfsNamenodeNameDirKey);
                Log("Normal NameNode upgrade", numDirs_1);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                cluster = CreateCluster();
                // make sure that rolling upgrade cannot be started
                try
                {
                    DistributedFileSystem dfs = cluster.GetFileSystem();
                    dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                    dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare);
                    NUnit.Framework.Assert.Fail();
                }
                catch (RemoteException re)
                {
                    NUnit.Framework.Assert.AreEqual(typeof(InconsistentFSStateException).FullName, re
                                                    .GetClassName());
                    Log.Info("The exception is expected.", re);
                }
                CheckNameNode(nameNodeDirs, ExpectedTxid);
                TestParallelImageWrite.CheckImages(cluster.GetNamesystem(), numDirs_1);
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
            }
        }
Ejemplo n.º 20
0
 public static void Initialize()
 {
     UpgradeUtilities.Initialize();
 }