/// <summary> /// Verify that the current directory exists and that the previous directory /// does not exist. /// </summary> /// <remarks> /// Verify that the current directory exists and that the previous directory /// does not exist. Verify that current hasn't been modified by comparing /// the checksum of all it's containing files with their original checksum. /// </remarks> /// <exception cref="System.Exception"/> internal static void CheckResult(string[] nameNodeDirs, string[] dataNodeDirs, string bpid) { IList <FilePath> dirs = Lists.NewArrayList(); for (int i = 0; i < nameNodeDirs.Length; i++) { FilePath curDir = new FilePath(nameNodeDirs[i], "current"); dirs.AddItem(curDir); FSImageTestUtil.AssertReasonableNameCurrentDir(curDir); } FSImageTestUtil.AssertParallelFilesAreIdentical(dirs, Sharpen.Collections.EmptySet <string>()); FilePath[] dnCurDirs = new FilePath[dataNodeDirs.Length]; for (int i_1 = 0; i_1 < dataNodeDirs.Length; i_1++) { dnCurDirs[i_1] = new FilePath(dataNodeDirs[i_1], "current"); NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType .DataNode, dnCurDirs[i_1], false), UpgradeUtilities.ChecksumMasterDataNodeContents ()); } for (int i_2 = 0; i_2 < nameNodeDirs.Length; i_2++) { NUnit.Framework.Assert.IsFalse(new FilePath(nameNodeDirs[i_2], "previous").IsDirectory ()); } if (bpid == null) { for (int i_3 = 0; i_3 < dataNodeDirs.Length; i_3++) { NUnit.Framework.Assert.IsFalse(new FilePath(dataNodeDirs[i_3], "previous").IsDirectory ()); } } else { for (int i_3 = 0; i_3 < dataNodeDirs.Length; i_3++) { FilePath bpRoot = BlockPoolSliceStorage.GetBpRoot(bpid, dnCurDirs[i_3]); NUnit.Framework.Assert.IsFalse(new FilePath(bpRoot, "previous").IsDirectory()); FilePath bpCurFinalizeDir = new FilePath(bpRoot, "current/" + DataStorage.StorageDirFinalized ); NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType .DataNode, bpCurFinalizeDir, true), UpgradeUtilities.ChecksumMasterBlockPoolFinalizedContents ()); } } }
/// <summary> /// For datanode, for a block pool, verify that the current and previous /// directories exist. /// </summary> /// <remarks> /// For datanode, for a block pool, verify that the current and previous /// directories exist. Verify that previous hasn't been modified by comparing /// the checksum of all its files with their original checksum. It /// is assumed that the server has recovered and upgraded. /// </remarks> /// <exception cref="System.IO.IOException"/> internal virtual void CheckDataNode(string[] baseDirs, string bpid) { for (int i = 0; i < baseDirs.Length; i++) { FilePath current = new FilePath(baseDirs[i], "current/" + bpid + "/current"); NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType .DataNode, current, false), UpgradeUtilities.ChecksumMasterDataNodeContents()); // block files are placed under <sd>/current/<bpid>/current/finalized FilePath currentFinalized = MiniDFSCluster.GetFinalizedDir(new FilePath(baseDirs[ i]), bpid); NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType .DataNode, currentFinalized, true), UpgradeUtilities.ChecksumMasterBlockPoolFinalizedContents ()); FilePath previous = new FilePath(baseDirs[i], "current/" + bpid + "/previous"); NUnit.Framework.Assert.IsTrue(previous.IsDirectory()); NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType .DataNode, previous, false), UpgradeUtilities.ChecksumMasterDataNodeContents()); FilePath previousFinalized = new FilePath(baseDirs[i], "current/" + bpid + "/previous" + "/finalized"); NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType .DataNode, previousFinalized, true), UpgradeUtilities.ChecksumMasterBlockPoolFinalizedContents ()); } }