public virtual void TestDataDirectories()
        {
            FilePath      dataDir = new FilePath(BaseDir, "data").GetCanonicalFile();
            Configuration conf    = cluster.GetConfiguration(0);
            // 1. Test unsupported schema. Only "file:" is supported.
            string dnDir = MakeURI("shv", null, Util.FileAsURI(dataDir).GetPath());

            conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dnDir);
            DataNode dn = null;

            try
            {
                dn = DataNode.CreateDataNode(new string[] {  }, conf);
                NUnit.Framework.Assert.Fail();
            }
            catch (Exception)
            {
            }
            finally
            {
                // expecting exception here
                if (dn != null)
                {
                    dn.Shutdown();
                }
            }
            NUnit.Framework.Assert.IsNull("Data-node startup should have failed.", dn);
            // 2. Test "file:" schema and no schema (path-only). Both should work.
            string dnDir1 = Util.FileAsURI(dataDir).ToString() + "1";
            string dnDir2 = MakeURI("file", "localhost", Util.FileAsURI(dataDir).GetPath() +
                                    "2");
            string dnDir3 = dataDir.GetAbsolutePath() + "3";

            conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dnDir1 + "," + dnDir2 + "," + dnDir3
                     );
            try
            {
                cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Regular,
                                       null);
                NUnit.Framework.Assert.IsTrue("Data-node should startup.", cluster.IsDataNodeUp()
                                              );
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.ShutdownDataNodes();
                }
            }
        }
        /// <summary>
        /// This test ensures the appropriate response (successful or failure) from
        /// a Datanode when the system is started with differing version combinations.
        /// </summary>
        /// <remarks>
        /// This test ensures the appropriate response (successful or failure) from
        /// a Datanode when the system is started with differing version combinations.
        /// <pre>
        /// For each 3-tuple in the cross product
        /// ({oldLayoutVersion,currentLayoutVersion,futureLayoutVersion},
        /// {currentNamespaceId,incorrectNamespaceId},
        /// {pastFsscTime,currentFsscTime,futureFsscTime})
        /// 1. Startup Namenode with version file containing
        /// (currentLayoutVersion,currentNamespaceId,currentFsscTime)
        /// 2. Attempt to startup Datanode with version file containing
        /// this iterations version 3-tuple
        /// </pre>
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestVersions()
        {
            UpgradeUtilities.Initialize();
            Configuration conf = UpgradeUtilities.InitializeStorageStateConf(1, new HdfsConfiguration
                                                                                 ());

            TestDFSStartupVersions.StorageData[] versions = InitializeVersions();
            UpgradeUtilities.CreateNameNodeStorageDirs(conf.GetStrings(DFSConfigKeys.DfsNamenodeNameDirKey
                                                                       ), "current");
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).ManageDataDfsDirs
                          (false).ManageNameDfsDirs(false).StartupOption(HdfsServerConstants.StartupOption
                                                                         .Regular).Build();
            TestDFSStartupVersions.StorageData nameNodeVersion = new TestDFSStartupVersions.StorageData
                                                                     (HdfsConstants.NamenodeLayoutVersion, UpgradeUtilities.GetCurrentNamespaceID(cluster
                                                                                                                                                  ), UpgradeUtilities.GetCurrentClusterID(cluster), UpgradeUtilities.GetCurrentFsscTime
                                                                         (cluster), UpgradeUtilities.GetCurrentBlockPoolID(cluster));
            Log("NameNode version info", HdfsServerConstants.NodeType.NameNode, null, nameNodeVersion
                );
            string bpid = UpgradeUtilities.GetCurrentBlockPoolID(cluster);

            for (int i = 0; i < versions.Length; i++)
            {
                FilePath[] storage = UpgradeUtilities.CreateDataNodeStorageDirs(conf.GetStrings(DFSConfigKeys
                                                                                                .DfsDatanodeDataDirKey), "current");
                Log("DataNode version info", HdfsServerConstants.NodeType.DataNode, i, versions[i
                    ]);
                UpgradeUtilities.CreateDataNodeVersionFile(storage, versions[i].storageInfo, bpid
                                                           , versions[i].blockPoolId);
                try
                {
                    cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Regular,
                                           null);
                }
                catch (Exception)
                {
                }
                // Ignore.  The asserts below will check for problems.
                // ignore.printStackTrace();
                NUnit.Framework.Assert.IsTrue(cluster.GetNameNode() != null);
                NUnit.Framework.Assert.AreEqual(IsVersionCompatible(nameNodeVersion, versions[i])
                                                , cluster.IsDataNodeUp());
                cluster.ShutdownDataNodes();
            }
        }
        public virtual void TestValidVolumesAtStartup()
        {
            Assume.AssumeTrue(!Runtime.GetProperty("os.name").StartsWith("Windows"));
            // Make sure no DNs are running.
            cluster.ShutdownDataNodes();
            // Bring up a datanode with two default data dirs, but with one bad one.
            conf.SetInt(DFSConfigKeys.DfsDatanodeFailedVolumesToleratedKey, 1);
            // We use subdirectories 0 and 1 in order to have only a single
            // data dir's parent inject a failure.
            FilePath tld            = new FilePath(MiniDFSCluster.GetBaseDirectory(), "badData");
            FilePath dataDir1       = new FilePath(tld, "data1");
            FilePath dataDir1Actual = new FilePath(dataDir1, "1");

            dataDir1Actual.Mkdirs();
            // Force an IOE to occur on one of the dfs.data.dir.
            FilePath dataDir2 = new FilePath(tld, "data2");

            PrepareDirToFail(dataDir2);
            FilePath dataDir2Actual = new FilePath(dataDir2, "2");

            // Start one DN, with manually managed DN dir
            conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dataDir1Actual.GetPath() + "," + dataDir2Actual
                     .GetPath());
            cluster.StartDataNodes(conf, 1, false, null, null);
            cluster.WaitActive();
            try
            {
                NUnit.Framework.Assert.IsTrue("The DN should have started up fine.", cluster.IsDataNodeUp
                                                  ());
                DataNode dn = cluster.GetDataNodes()[0];
                string   si = DataNodeTestUtils.GetFSDataset(dn).GetStorageInfo();
                NUnit.Framework.Assert.IsTrue("The DN should have started with this directory", si
                                              .Contains(dataDir1Actual.GetPath()));
                NUnit.Framework.Assert.IsFalse("The DN shouldn't have a bad directory.", si.Contains
                                                   (dataDir2Actual.GetPath()));
            }
            finally
            {
                cluster.ShutdownDataNodes();
                FileUtil.Chmod(dataDir2.ToString(), "755");
            }
        }
Beispiel #4
0
        public virtual void TestRollback()
        {
            FilePath[] baseDirs;
            UpgradeUtilities.Initialize();
            StorageInfo storageInfo = null;

            for (int numDirs = 1; numDirs <= 2; numDirs++)
            {
                conf = new HdfsConfiguration();
                conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1);
                conf = UpgradeUtilities.InitializeStorageStateConf(numDirs, conf);
                string[] nameNodeDirs = conf.GetStrings(DFSConfigKeys.DfsNamenodeNameDirKey);
                string[] dataNodeDirs = conf.GetStrings(DFSConfigKeys.DfsDatanodeDataDirKey);
                Log("Normal NameNode rollback", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                NameNode.DoRollback(conf, false);
                CheckResult(HdfsServerConstants.NodeType.NameNode, nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("Normal DataNode rollback", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                NameNode.DoRollback(conf, false);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).ManageDataDfsDirs
                              (false).ManageNameDfsDirs(false).DnStartupOption(HdfsServerConstants.StartupOption
                                                                               .Rollback).Build();
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "previous");
                cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Rollback
                                       , null);
                CheckResult(HdfsServerConstants.NodeType.DataNode, dataNodeDirs);
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("Normal BlockPool rollback", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                NameNode.DoRollback(conf, false);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).ManageDataDfsDirs
                              (false).ManageNameDfsDirs(false).DnStartupOption(HdfsServerConstants.StartupOption
                                                                               .Rollback).Build();
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                UpgradeUtilities.CreateBlockPoolStorageDirs(dataNodeDirs, "current", UpgradeUtilities
                                                            .GetCurrentBlockPoolID(cluster));
                // Create a previous snapshot for the blockpool
                UpgradeUtilities.CreateBlockPoolStorageDirs(dataNodeDirs, "previous", UpgradeUtilities
                                                            .GetCurrentBlockPoolID(cluster));
                // Put newer layout version in current.
                storageInfo = new StorageInfo(HdfsConstants.DatanodeLayoutVersion - 1, UpgradeUtilities
                                              .GetCurrentNamespaceID(cluster), UpgradeUtilities.GetCurrentClusterID(cluster),
                                              UpgradeUtilities.GetCurrentFsscTime(cluster), HdfsServerConstants.NodeType.DataNode
                                              );
                // Overwrite VERSION file in the current directory of
                // volume directories and block pool slice directories
                // with a layout version from future.
                FilePath[] dataCurrentDirs = new FilePath[dataNodeDirs.Length];
                for (int i = 0; i < dataNodeDirs.Length; i++)
                {
                    dataCurrentDirs[i] = new FilePath((new Path(dataNodeDirs[i] + "/current")).ToString
                                                          ());
                }
                UpgradeUtilities.CreateDataNodeVersionFile(dataCurrentDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Rollback
                                       , null);
                NUnit.Framework.Assert.IsTrue(cluster.IsDataNodeUp());
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("NameNode rollback without existing previous dir", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                StartNameNodeShouldFail("None of the storage directories contain previous fs state"
                                        );
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("DataNode rollback without existing previous dir", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).ManageDataDfsDirs
                              (false).ManageNameDfsDirs(false).StartupOption(HdfsServerConstants.StartupOption
                                                                             .Upgrade).Build();
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                cluster.StartDataNodes(conf, 1, false, HdfsServerConstants.StartupOption.Rollback
                                       , null);
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("DataNode rollback with future stored layout version in previous", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                NameNode.DoRollback(conf, false);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).ManageDataDfsDirs
                              (false).ManageNameDfsDirs(false).DnStartupOption(HdfsServerConstants.StartupOption
                                                                               .Rollback).Build();
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                baseDirs    = UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "previous");
                storageInfo = new StorageInfo(int.MinValue, UpgradeUtilities.GetCurrentNamespaceID
                                                  (cluster), UpgradeUtilities.GetCurrentClusterID(cluster), UpgradeUtilities.GetCurrentFsscTime
                                                  (cluster), HdfsServerConstants.NodeType.DataNode);
                UpgradeUtilities.CreateDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                StartBlockPoolShouldFail(HdfsServerConstants.StartupOption.Rollback, cluster.GetNamesystem
                                             ().GetBlockPoolId());
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("DataNode rollback with newer fsscTime in previous", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                NameNode.DoRollback(conf, false);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).ManageDataDfsDirs
                              (false).ManageNameDfsDirs(false).DnStartupOption(HdfsServerConstants.StartupOption
                                                                               .Rollback).Build();
                UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "current");
                baseDirs    = UpgradeUtilities.CreateDataNodeStorageDirs(dataNodeDirs, "previous");
                storageInfo = new StorageInfo(HdfsConstants.DatanodeLayoutVersion, UpgradeUtilities
                                              .GetCurrentNamespaceID(cluster), UpgradeUtilities.GetCurrentClusterID(cluster),
                                              long.MaxValue, HdfsServerConstants.NodeType.DataNode);
                UpgradeUtilities.CreateDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                StartBlockPoolShouldFail(HdfsServerConstants.StartupOption.Rollback, cluster.GetNamesystem
                                             ().GetBlockPoolId());
                cluster.Shutdown();
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                UpgradeUtilities.CreateEmptyDirs(dataNodeDirs);
                Log("NameNode rollback with no edits file", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                baseDirs = UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                DeleteMatchingFiles(baseDirs, "edits.*");
                StartNameNodeShouldFail("Gap in transactions");
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("NameNode rollback with no image file", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                baseDirs = UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                DeleteMatchingFiles(baseDirs, "fsimage_.*");
                StartNameNodeShouldFail("No valid image files found");
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("NameNode rollback with corrupt version file", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                baseDirs = UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                foreach (FilePath f in baseDirs)
                {
                    UpgradeUtilities.CorruptFile(new FilePath(f, "VERSION"), Sharpen.Runtime.GetBytesForString
                                                     ("layoutVersion", Charsets.Utf8), Sharpen.Runtime.GetBytesForString("xxxxxxxxxxxxx"
                                                                                                                         , Charsets.Utf8));
                }
                StartNameNodeShouldFail("file VERSION has layoutVersion missing");
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
                Log("NameNode rollback with old layout version in previous", numDirs);
                UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "current");
                baseDirs    = UpgradeUtilities.CreateNameNodeStorageDirs(nameNodeDirs, "previous");
                storageInfo = new StorageInfo(1, UpgradeUtilities.GetCurrentNamespaceID(null), UpgradeUtilities
                                              .GetCurrentClusterID(null), UpgradeUtilities.GetCurrentFsscTime(null), HdfsServerConstants.NodeType
                                              .NameNode);
                UpgradeUtilities.CreateNameNodeVersionFile(conf, baseDirs, storageInfo, UpgradeUtilities
                                                           .GetCurrentBlockPoolID(cluster));
                StartNameNodeShouldFail("Cannot rollback to storage version 1 using this version"
                                        );
                UpgradeUtilities.CreateEmptyDirs(nameNodeDirs);
            }
        }