public virtual void TestPurgeLogs()
        {
            for (int txid = 1; txid <= 5; txid++)
            {
                QJMTestUtil.WriteSegment(cluster, qjm, txid, 1, true);
            }
            FilePath curDir = cluster.GetCurrentDir(0, QJMTestUtil.Jid);

            GenericTestUtils.AssertGlobEquals(curDir, "edits_.*", NNStorage.GetFinalizedEditsFileName
                                                  (1, 1), NNStorage.GetFinalizedEditsFileName(2, 2), NNStorage.GetFinalizedEditsFileName
                                                  (3, 3), NNStorage.GetFinalizedEditsFileName(4, 4), NNStorage.GetFinalizedEditsFileName
                                                  (5, 5));
            FilePath paxosDir = new FilePath(curDir, "paxos");

            GenericTestUtils.AssertExists(paxosDir);
            // Create new files in the paxos directory, which should get purged too.
            NUnit.Framework.Assert.IsTrue(new FilePath(paxosDir, "1").CreateNewFile());
            NUnit.Framework.Assert.IsTrue(new FilePath(paxosDir, "3").CreateNewFile());
            GenericTestUtils.AssertGlobEquals(paxosDir, "\\d+", "1", "3");
            // Create some temporary files of the sort that are used during recovery.
            NUnit.Framework.Assert.IsTrue(new FilePath(curDir, "edits_inprogress_0000000000000000001.epoch=140"
                                                       ).CreateNewFile());
            NUnit.Framework.Assert.IsTrue(new FilePath(curDir, "edits_inprogress_0000000000000000002.empty"
                                                       ).CreateNewFile());
            qjm.PurgeLogsOlderThan(3);
            // Log purging is asynchronous, so we have to wait for the calls
            // to be sent and respond before verifying.
            WaitForAllPendingCalls(qjm.GetLoggerSetForTests());
            // Older edits should be purged
            GenericTestUtils.AssertGlobEquals(curDir, "edits_.*", NNStorage.GetFinalizedEditsFileName
                                                  (3, 3), NNStorage.GetFinalizedEditsFileName(4, 4), NNStorage.GetFinalizedEditsFileName
                                                  (5, 5));
            // Older paxos files should be purged
            GenericTestUtils.AssertGlobEquals(paxosDir, "\\d+", "3");
        }
Пример #2
0
        public virtual void TestLoadLogsFromBuggyEarlierVersions()
        {
            Configuration conf    = new HdfsConfiguration();
            string        tarFile = Runtime.GetProperty("test.cache.data", "build/test/cache") + "/"
                                    + Hadoop23BrokenAppendTgz;
            string   testDir = PathUtils.GetTestDirName(GetType());
            FilePath dfsDir  = new FilePath(testDir, "image-with-buggy-append");

            if (dfsDir.Exists() && !FileUtil.FullyDelete(dfsDir))
            {
                throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
            }
            FileUtil.UnTar(new FilePath(tarFile), new FilePath(testDir));
            FilePath nameDir = new FilePath(dfsDir, "name");

            GenericTestUtils.AssertExists(nameDir);
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDir.GetAbsolutePath());
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(
                false).ManageDataDfsDirs(false).ManageNameDfsDirs(false).NumDataNodes(0).WaitSafeMode
                                         (false).StartupOption(HdfsServerConstants.StartupOption.Upgrade).Build();

            try
            {
                FileSystem fs       = cluster.GetFileSystem();
                Path       testPath = new Path("/tmp/io_data/test_io_0");
                NUnit.Framework.Assert.AreEqual(2 * 1024 * 1024, fs.GetFileStatus(testPath).GetLen
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Пример #3
0
		/// <exception cref="System.Exception"/>
		public virtual void TestJournalLocking()
		{
			Assume.AssumeTrue(journal.GetStorage().GetStorageDir(0).IsLockSupported());
			Storage.StorageDirectory sd = journal.GetStorage().GetStorageDir(0);
			FilePath lockFile = new FilePath(sd.GetRoot(), Storage.StorageFileLock);
			// Journal should be locked, since the format() call locks it.
			GenericTestUtils.AssertExists(lockFile);
			journal.NewEpoch(FakeNsinfo, 1);
			try
			{
				new Journal(conf, TestLogDir, Jid, HdfsServerConstants.StartupOption.Regular, mockErrorReporter
					);
				NUnit.Framework.Assert.Fail("Did not fail to create another journal in same dir");
			}
			catch (IOException ioe)
			{
				GenericTestUtils.AssertExceptionContains("Cannot lock storage", ioe);
			}
			journal.Close();
			// Journal should no longer be locked after the close() call.
			// Hence, should be able to create a new Journal in the same dir.
			Journal journal2 = new Journal(conf, TestLogDir, Jid, HdfsServerConstants.StartupOption
				.Regular, mockErrorReporter);
			journal2.NewEpoch(FakeNsinfo, 2);
			journal2.Close();
		}
Пример #4
0
        public virtual void TestSharedEditsMissingLogs()
        {
            RemoveStandbyNameDirs();
            CheckpointSignature sig = nn0.GetRpcServer().RollEditLog();

            NUnit.Framework.Assert.AreEqual(3, sig.GetCurSegmentTxId());
            // Should have created edits_1-2 in shared edits dir
            URI      editsUri     = cluster.GetSharedEditsDir(0, 1);
            FilePath editsDir     = new FilePath(editsUri);
            FilePath editsSegment = new FilePath(new FilePath(editsDir, "current"), NNStorage
                                                 .GetFinalizedEditsFileName(1, 2));

            GenericTestUtils.AssertExists(editsSegment);
            // Delete the segment.
            NUnit.Framework.Assert.IsTrue(editsSegment.Delete());
            // Trying to bootstrap standby should now fail since the edit
            // logs aren't available in the shared dir.
            GenericTestUtils.LogCapturer logs = GenericTestUtils.LogCapturer.CaptureLogs(LogFactory
                                                                                         .GetLog(typeof(BootstrapStandby)));
            try
            {
                int rc = BootstrapStandby.Run(new string[] { "-force" }, cluster.GetConfiguration
                                                  (1));
                NUnit.Framework.Assert.AreEqual(BootstrapStandby.ErrCodeLogsUnavailable, rc);
            }
            finally
            {
                logs.StopCapturing();
            }
            GenericTestUtils.AssertMatches(logs.GetOutput(), "FATAL.*Unable to read transaction ids 1-3 from the configured shared"
                                           );
        }
        /// <summary>
        /// HDFS-3013: NameNode format command doesn't pick up
        /// dfs.namenode.name.dir.NameServiceId configuration.
        /// </summary>
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestGenericKeysForNameNodeFormat()
        {
            Configuration conf = new HdfsConfiguration();

            // Set ephemeral ports
            conf.Set(DFSConfigKeys.DfsNamenodeRpcAddressKey, "127.0.0.1:0");
            conf.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, "127.0.0.1:0");
            conf.Set(DFSConfigKeys.DfsNameservices, "ns1");
            // Set a nameservice-specific configuration for name dir
            FilePath dir = new FilePath(MiniDFSCluster.GetBaseDirectory(), "testGenericKeysForNameNodeFormat"
                                        );

            if (dir.Exists())
            {
                FileUtil.FullyDelete(dir);
            }
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey + ".ns1", dir.GetAbsolutePath());
            // Format and verify the right dir is formatted.
            DFSTestUtil.FormatNameNode(conf);
            GenericTestUtils.AssertExists(dir);
            // Ensure that the same dir is picked up by the running NN
            NameNode nameNode = new NameNode(conf);

            nameNode.Stop();
        }
Пример #6
0
        public virtual void TestEarlierVersionEditLog()
        {
            Configuration conf    = new HdfsConfiguration();
            string        tarFile = Runtime.GetProperty("test.cache.data", "build/test/cache") + "/"
                                    + Hadoop10MultiblockTgz;
            string   testDir = PathUtils.GetTestDirName(GetType());
            FilePath dfsDir  = new FilePath(testDir, "image-1.0");

            if (dfsDir.Exists() && !FileUtil.FullyDelete(dfsDir))
            {
                throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
            }
            FileUtil.UnTar(new FilePath(tarFile), new FilePath(testDir));
            FilePath nameDir = new FilePath(dfsDir, "name");

            GenericTestUtils.AssertExists(nameDir);
            FilePath dataDir = new FilePath(dfsDir, "data");

            GenericTestUtils.AssertExists(dataDir);
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDir.GetAbsolutePath());
            conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dataDir.GetAbsolutePath());
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(
                false).ManageDataDfsDirs(false).ManageNameDfsDirs(false).NumDataNodes(1).StartupOption
                                         (HdfsServerConstants.StartupOption.Upgrade).Build();

            try
            {
                FileSystem fs       = cluster.GetFileSystem();
                Path       testPath = new Path("/user/todd/4blocks");
                // Read it without caring about the actual data within - we just need
                // to make sure that the block states and locations are OK.
                DFSTestUtil.ReadFile(fs, testPath);
                // Ensure that we can append to it - if the blocks were in some funny
                // state we'd get some kind of issue here.
                FSDataOutputStream stm = fs.Append(testPath);
                try
                {
                    stm.Write(1);
                }
                finally
                {
                    IOUtils.CloseStream(stm);
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Пример #7
0
		/// <summary>
		/// Assume that a client is writing to a journal, but loses its connection
		/// in the middle of a segment.
		/// </summary>
		/// <remarks>
		/// Assume that a client is writing to a journal, but loses its connection
		/// in the middle of a segment. Thus, any future journal() calls in that
		/// segment may fail, because some txns were missed while the connection was
		/// down.
		/// Eventually, the connection comes back, and the NN tries to start a new
		/// segment at a higher txid. This should abort the old one and succeed.
		/// </remarks>
		/// <exception cref="System.Exception"/>
		public virtual void TestAbortOldSegmentIfFinalizeIsMissed()
		{
			journal.NewEpoch(FakeNsinfo, 1);
			// Start a segment at txid 1, and write a batch of 3 txns.
			journal.StartLogSegment(MakeRI(1), 1, NameNodeLayoutVersion.CurrentLayoutVersion);
			journal.Journal(MakeRI(2), 1, 1, 3, QJMTestUtil.CreateTxnData(1, 3));
			GenericTestUtils.AssertExists(journal.GetStorage().GetInProgressEditLog(1));
			// Try to start new segment at txid 6, this should abort old segment and
			// then succeed, allowing us to write txid 6-9.
			journal.StartLogSegment(MakeRI(3), 6, NameNodeLayoutVersion.CurrentLayoutVersion);
			journal.Journal(MakeRI(4), 6, 6, 3, QJMTestUtil.CreateTxnData(6, 3));
			// The old segment should *not* be finalized.
			GenericTestUtils.AssertExists(journal.GetStorage().GetInProgressEditLog(1));
			GenericTestUtils.AssertExists(journal.GetStorage().GetInProgressEditLog(6));
		}
 /// <summary>
 /// Check that the given list of edits files are present in the given storage
 /// dirs.
 /// </summary>
 /// <exception cref="System.IO.IOException"/>
 private void AssertEditFiles(IEnumerable <URI> dirs, params string[] files)
 {
     foreach (URI u in dirs)
     {
         FilePath editDirRoot = new FilePath(u.GetPath());
         FilePath editDir     = new FilePath(editDirRoot, "current");
         GenericTestUtils.AssertExists(editDir);
         if (files.Length == 0)
         {
             Log.Info("Checking no edit files exist in " + editDir);
         }
         else
         {
             Log.Info("Checking for following edit files in " + editDir + ": " + Joiner.On(","
                                                                                           ).Join(files));
         }
         GenericTestUtils.AssertGlobEquals(editDir, "edits_.*", files);
     }
 }
Пример #9
0
 /// <summary>For namenode, Verify that the current and previous directories exist.</summary>
 /// <remarks>
 /// For namenode, Verify that the current and previous directories exist.
 /// Verify that previous hasn't been modified by comparing the checksum of all
 /// its files with their original checksum. It is assumed that the
 /// server has recovered and upgraded.
 /// </remarks>
 /// <exception cref="System.IO.IOException"/>
 internal virtual void CheckNameNode(string[] baseDirs, long imageTxId)
 {
     foreach (string baseDir in baseDirs)
     {
         Log.Info("Checking namenode directory " + baseDir);
         Log.Info("==== Contents ====:\n  " + Joiner.On("  \n").Join(new FilePath(baseDir,
                                                                                  "current").List()));
         Log.Info("==================");
         GenericTestUtils.AssertExists(new FilePath(baseDir, "current"));
         GenericTestUtils.AssertExists(new FilePath(baseDir, "current/VERSION"));
         GenericTestUtils.AssertExists(new FilePath(baseDir, "current/" + NNStorage.GetInProgressEditsFileName
                                                        (imageTxId + 1)));
         GenericTestUtils.AssertExists(new FilePath(baseDir, "current/" + NNStorage.GetImageFileName
                                                        (imageTxId)));
         GenericTestUtils.AssertExists(new FilePath(baseDir, "current/seen_txid"));
         FilePath previous = new FilePath(baseDir, "previous");
         GenericTestUtils.AssertExists(previous);
         NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType
                                                                           .NameNode, previous, false), UpgradeUtilities.ChecksumMasterNameNodeContents());
     }
 }
Пример #10
0
        public virtual void TestZeroBlockSize()
        {
            Configuration conf    = new HdfsConfiguration();
            string        tarFile = Runtime.GetProperty("test.cache.data", "build/test/cache") + "/"
                                    + Hadoop27Zer0BlockSizeTgz;
            string   testDir = PathUtils.GetTestDirName(GetType());
            FilePath dfsDir  = new FilePath(testDir, "image-with-zero-block-size");

            if (dfsDir.Exists() && !FileUtil.FullyDelete(dfsDir))
            {
                throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
            }
            FileUtil.UnTar(new FilePath(tarFile), new FilePath(testDir));
            FilePath nameDir = new FilePath(dfsDir, "name");

            GenericTestUtils.AssertExists(nameDir);
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDir.GetAbsolutePath());
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Format(
                false).ManageDataDfsDirs(false).ManageNameDfsDirs(false).WaitSafeMode(false).StartupOption
                                         (HdfsServerConstants.StartupOption.Upgrade).Build();

            try
            {
                FileSystem fs       = cluster.GetFileSystem();
                Path       testPath = new Path("/tmp/zeroBlockFile");
                NUnit.Framework.Assert.IsTrue("File /tmp/zeroBlockFile doesn't exist ", fs.Exists
                                                  (testPath));
                NUnit.Framework.Assert.IsTrue("Name node didn't come up", cluster.IsNameNodeUp(0)
                                              );
            }
            finally
            {
                cluster.Shutdown();
                //Clean up
                FileUtil.FullyDelete(dfsDir);
            }
        }