Esempio n. 1
0
 /// <summary>
 /// For NameNode, verify that the current and/or previous exist as indicated by
 /// the method parameters.
 /// </summary>
 /// <remarks>
 /// For NameNode, verify that the current and/or previous exist as indicated by
 /// the method parameters.  If previous exists, verify that
 /// it hasn't been modified by comparing the checksum of all it's
 /// containing files with their original checksum.  It is assumed that
 /// the server has recovered.
 /// </remarks>
 /// <exception cref="System.IO.IOException"/>
 internal virtual void CheckResultNameNode(string[] baseDirs, bool currentShouldExist
                                           , bool previousShouldExist)
 {
     if (currentShouldExist)
     {
         for (int i = 0; i < baseDirs.Length; i++)
         {
             NUnit.Framework.Assert.IsTrue(new FilePath(baseDirs[i], "current").IsDirectory());
             NUnit.Framework.Assert.IsTrue(new FilePath(baseDirs[i], "current/VERSION").IsFile
                                               ());
             NUnit.Framework.Assert.IsNotNull(FSImageTestUtil.FindNewestImageFile(baseDirs[i]
                                                                                  + "/current"));
             NUnit.Framework.Assert.IsTrue(new FilePath(baseDirs[i], "current/seen_txid").IsFile
                                               ());
         }
     }
     if (previousShouldExist)
     {
         for (int i = 0; i < baseDirs.Length; i++)
         {
             NUnit.Framework.Assert.IsTrue(new FilePath(baseDirs[i], "previous").IsDirectory()
                                           );
             NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType
                                                                               .NameNode, new FilePath(baseDirs[i], "previous"), false), UpgradeUtilities.ChecksumMasterNameNodeContents
                                                 ());
         }
     }
 }
Esempio n. 2
0
        /// <exception cref="System.Exception"/>
        public static void WaitForCheckpoint(MiniDFSCluster cluster, int nnIdx, IList <int
                                                                                       > txids)
        {
            long start = Time.Now();

            while (true)
            {
                try
                {
                    FSImageTestUtil.AssertNNHasCheckpoints(cluster, nnIdx, txids);
                    return;
                }
                catch (Exception err)
                {
                    if (Time.Now() - start > 10000)
                    {
                        throw;
                    }
                    else
                    {
                        Sharpen.Thread.Sleep(300);
                    }
                }
            }
        }
Esempio n. 3
0
        /// <summary>Verify that the new current directory is the old previous.</summary>
        /// <remarks>
        /// Verify that the new current directory is the old previous.
        /// It is assumed that the server has recovered and rolled back.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        internal virtual void CheckResult(HdfsServerConstants.NodeType nodeType, string[]
                                          baseDirs)
        {
            IList <FilePath> curDirs = Lists.NewArrayList();

            foreach (string baseDir in baseDirs)
            {
                FilePath curDir = new FilePath(baseDir, "current");
                curDirs.AddItem(curDir);
                switch (nodeType)
                {
                case HdfsServerConstants.NodeType.NameNode:
                {
                    FSImageTestUtil.AssertReasonableNameCurrentDir(curDir);
                    break;
                }

                case HdfsServerConstants.NodeType.DataNode:
                {
                    NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(nodeType, curDir
                                                                                      , false), UpgradeUtilities.ChecksumMasterDataNodeContents());
                    break;
                }
                }
            }
            FSImageTestUtil.AssertParallelFilesAreIdentical(curDirs, Sharpen.Collections.EmptySet
                                                            <string>());
            for (int i = 0; i < baseDirs.Length; i++)
            {
                NUnit.Framework.Assert.IsFalse(new FilePath(baseDirs[i], "previous").IsDirectory(
                                                   ));
            }
        }
        public virtual void TestUpgradeFromCorruptRel22Image()
        {
            UnpackStorage(Hadoop22Image, HadoopDfsDirTxt);
            // Overwrite the md5 stored in the VERSION files
            FilePath baseDir = new FilePath(MiniDFSCluster.GetBaseDirectory());

            FSImageTestUtil.CorruptVersionFile(new FilePath(baseDir, "name1/current/VERSION")
                                               , "imageMD5Digest", "22222222222222222222222222222222");
            FSImageTestUtil.CorruptVersionFile(new FilePath(baseDir, "name2/current/VERSION")
                                               , "imageMD5Digest", "22222222222222222222222222222222");
            // Attach our own log appender so we can verify output
            LogVerificationAppender appender = new LogVerificationAppender();
            Logger logger = Logger.GetRootLogger();

            logger.AddAppender(appender);
            // Upgrade should now fail
            try
            {
                UpgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).NumDataNodes(4), null);
                NUnit.Framework.Assert.Fail("Upgrade did not fail with bad MD5");
            }
            catch (IOException ioe)
            {
                string msg = StringUtils.StringifyException(ioe);
                if (!msg.Contains("Failed to load an FSImage file"))
                {
                    throw;
                }
                int md5failures = appender.CountExceptionsWithMessage(" is corrupt with MD5 checksum of "
                                                                      );
                NUnit.Framework.Assert.AreEqual("Upgrade did not fail with bad MD5", 1, md5failures
                                                );
            }
        }
        /// <exception cref="System.Exception"/>
        private void TestFailoverFinalizesAndReadsInProgress(bool partialTxAtEnd)
        {
            Configuration  conf    = new Configuration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology
                                                                                 .SimpleHATopology()).NumDataNodes(0).Build();

            try
            {
                // Create a fake in-progress edit-log in the shared directory
                URI          sharedUri = cluster.GetSharedEditsDir(0, 1);
                FilePath     sharedDir = new FilePath(sharedUri.GetPath(), "current");
                FSNamesystem fsn       = cluster.GetNamesystem(0);
                FSImageTestUtil.CreateAbortedLogWithMkdirs(sharedDir, NumDirsInLog, 1, fsn.GetFSDirectory
                                                               ().GetLastInodeId() + 1);
                AssertEditFiles(Sharpen.Collections.SingletonList(sharedUri), NNStorage.GetInProgressEditsFileName
                                    (1));
                if (partialTxAtEnd)
                {
                    FileOutputStream outs = null;
                    try
                    {
                        FilePath editLogFile = new FilePath(sharedDir, NNStorage.GetInProgressEditsFileName
                                                                (1));
                        outs = new FileOutputStream(editLogFile, true);
                        outs.Write(new byte[] { unchecked ((int)(0x18)), unchecked ((int)(0x00)), unchecked (
                                                    (int)(0x00)), unchecked ((int)(0x00)) });
                        Log.Error("editLogFile = " + editLogFile);
                    }
                    finally
                    {
                        IOUtils.Cleanup(Log, outs);
                    }
                }
                // Transition one of the NNs to active
                cluster.TransitionToActive(0);
                // In the transition to active, it should have read the log -- and
                // hence see one of the dirs we made in the fake log.
                string testPath = "/dir" + NumDirsInLog;
                NUnit.Framework.Assert.IsNotNull(cluster.GetNameNode(0).GetRpcServer().GetFileInfo
                                                     (testPath));
                // It also should have finalized that log in the shared directory and started
                // writing to a new one at the next txid.
                AssertEditFiles(Sharpen.Collections.SingletonList(sharedUri), NNStorage.GetFinalizedEditsFileName
                                    (1, NumDirsInLog + 1), NNStorage.GetInProgressEditsFileName(NumDirsInLog + 2));
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        public virtual void TestFedSingleNN()
        {
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NameNodePort(9927).Build
                                         ();

            try
            {
                NameNode nn1 = cluster.GetNameNode();
                NUnit.Framework.Assert.IsNotNull("cannot create nn1", nn1);
                string bpid1 = FSImageTestUtil.GetFSImage(nn1).GetBlockPoolID();
                string cid1  = FSImageTestUtil.GetFSImage(nn1).GetClusterID();
                int    lv1   = FSImageTestUtil.GetFSImage(nn1).GetLayoutVersion();
                Log.Info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri=" + nn1.GetNameNodeAddress
                             ());
                // check number of vlumes in fsdataset
                DataNode dn = cluster.GetDataNodes()[0];
                IDictionary <string, object> volInfos = dn.data.GetVolumeInfoMap();
                NUnit.Framework.Assert.IsTrue("No volumes in the fsdataset", volInfos.Count > 0);
                int i = 0;
                foreach (KeyValuePair <string, object> e in volInfos)
                {
                    Log.Info("vol " + i++ + ") " + e.Key + ": " + e.Value);
                }
                // number of volumes should be 2 - [data1, data2]
                NUnit.Framework.Assert.AreEqual("number of volumes is wrong", 2, volInfos.Count);
                foreach (BPOfferService bpos in dn.GetAllBpOs())
                {
                    Log.Info("reg: bpid=" + "; name=" + bpos.bpRegistration + "; sid=" + bpos.bpRegistration
                             .GetDatanodeUuid() + "; nna=" + GetNNSocketAddress(bpos));
                }
                // try block report
                BPOfferService bpos1 = dn.GetAllBpOs()[0];
                bpos1.TriggerBlockReportForTests();
                NUnit.Framework.Assert.AreEqual("wrong nn address", GetNNSocketAddress(bpos1), nn1
                                                .GetNameNodeAddress());
                NUnit.Framework.Assert.AreEqual("wrong bpid", bpos1.GetBlockPoolId(), bpid1);
                NUnit.Framework.Assert.AreEqual("wrong cid", dn.GetClusterId(), cid1);
                cluster.Shutdown();
                // Ensure all the BPOfferService threads are shutdown
                NUnit.Framework.Assert.AreEqual(0, dn.GetAllBpOs().Length);
                cluster = null;
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Esempio n. 7
0
        /// <exception cref="System.Exception"/>
        private void TestUpgrade(TestBootstrapStandbyWithQJM.UpgradeState state)
        {
            cluster.TransitionToActive(0);
            Configuration confNN1 = cluster.GetConfiguration(1);
            FilePath      current = cluster.GetNameNode(1).GetFSImage().GetStorage().GetStorageDir
                                        (0).GetCurrentDir();
            FilePath tmp = cluster.GetNameNode(1).GetFSImage().GetStorage().GetStorageDir(0).
                           GetPreviousTmp();

            // shut down nn1
            cluster.ShutdownNameNode(1);
            // make NN0 in upgrade state
            FSImage fsImage0 = cluster.GetNameNode(0).GetNamesystem().GetFSImage();

            Whitebox.SetInternalState(fsImage0, "isUpgradeFinalized", false);
            switch (state)
            {
            case TestBootstrapStandbyWithQJM.UpgradeState.Recover:
            {
                // rename the current directory to previous.tmp in nn1
                NNStorage.Rename(current, tmp);
                break;
            }

            case TestBootstrapStandbyWithQJM.UpgradeState.Format:
            {
                // rename the current directory to a random name so it's not formatted
                FilePath wrongPath = new FilePath(current.GetParentFile(), "wrong");
                NNStorage.Rename(current, wrongPath);
                break;
            }

            default:
            {
                break;
            }
            }
            int rc = BootstrapStandby.Run(new string[] { "-force" }, confNN1);

            NUnit.Framework.Assert.AreEqual(0, rc);
            // Should have copied over the namespace from the standby
            FSImageTestUtil.AssertNNHasCheckpoints(cluster, 1, ImmutableList.Of(0));
            FSImageTestUtil.AssertNNFilesMatch(cluster);
            // make sure the NN1 is in upgrade state, i.e., the previous directory has
            // been successfully created
            cluster.RestartNameNode(1);
            NUnit.Framework.Assert.IsFalse(cluster.GetNameNode(1).GetNamesystem().IsUpgradeFinalized
                                               ());
        }
Esempio n. 8
0
        public virtual void TestBootstrapStandbyWithActiveNN()
        {
            // make the first NN in active state
            cluster.TransitionToActive(0);
            Configuration confNN1 = cluster.GetConfiguration(1);

            // shut down nn1
            cluster.ShutdownNameNode(1);
            int rc = BootstrapStandby.Run(new string[] { "-force" }, confNN1);

            NUnit.Framework.Assert.AreEqual(0, rc);
            // Should have copied over the namespace from the standby
            FSImageTestUtil.AssertNNHasCheckpoints(cluster, 1, ImmutableList.Of(0));
            FSImageTestUtil.AssertNNFilesMatch(cluster);
        }
Esempio n. 9
0
        /// <summary>
        /// Verify that the current directory exists and that the previous directory
        /// does not exist.
        /// </summary>
        /// <remarks>
        /// Verify that the current directory exists and that the previous directory
        /// does not exist.  Verify that current hasn't been modified by comparing
        /// the checksum of all it's containing files with their original checksum.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        internal static void CheckResult(string[] nameNodeDirs, string[] dataNodeDirs, string
                                         bpid)
        {
            IList <FilePath> dirs = Lists.NewArrayList();

            for (int i = 0; i < nameNodeDirs.Length; i++)
            {
                FilePath curDir = new FilePath(nameNodeDirs[i], "current");
                dirs.AddItem(curDir);
                FSImageTestUtil.AssertReasonableNameCurrentDir(curDir);
            }
            FSImageTestUtil.AssertParallelFilesAreIdentical(dirs, Sharpen.Collections.EmptySet
                                                            <string>());
            FilePath[] dnCurDirs = new FilePath[dataNodeDirs.Length];
            for (int i_1 = 0; i_1 < dataNodeDirs.Length; i_1++)
            {
                dnCurDirs[i_1] = new FilePath(dataNodeDirs[i_1], "current");
                NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType
                                                                                  .DataNode, dnCurDirs[i_1], false), UpgradeUtilities.ChecksumMasterDataNodeContents
                                                    ());
            }
            for (int i_2 = 0; i_2 < nameNodeDirs.Length; i_2++)
            {
                NUnit.Framework.Assert.IsFalse(new FilePath(nameNodeDirs[i_2], "previous").IsDirectory
                                                   ());
            }
            if (bpid == null)
            {
                for (int i_3 = 0; i_3 < dataNodeDirs.Length; i_3++)
                {
                    NUnit.Framework.Assert.IsFalse(new FilePath(dataNodeDirs[i_3], "previous").IsDirectory
                                                       ());
                }
            }
            else
            {
                for (int i_3 = 0; i_3 < dataNodeDirs.Length; i_3++)
                {
                    FilePath bpRoot = BlockPoolSliceStorage.GetBpRoot(bpid, dnCurDirs[i_3]);
                    NUnit.Framework.Assert.IsFalse(new FilePath(bpRoot, "previous").IsDirectory());
                    FilePath bpCurFinalizeDir = new FilePath(bpRoot, "current/" + DataStorage.StorageDirFinalized
                                                             );
                    NUnit.Framework.Assert.AreEqual(UpgradeUtilities.ChecksumContents(HdfsServerConstants.NodeType
                                                                                      .DataNode, bpCurFinalizeDir, true), UpgradeUtilities.ChecksumMasterBlockPoolFinalizedContents
                                                        ());
                }
            }
        }
        public virtual void TestBootstrapStandbyWithActiveNN()
        {
            // make nn0 active
            cluster.TransitionToActive(0);
            // do ops and generate in-progress edit log data
            Configuration         confNN1 = cluster.GetConfiguration(1);
            DistributedFileSystem dfs     = (DistributedFileSystem)HATestUtil.ConfigureFailoverFs
                                                (cluster, confNN1);

            for (int i = 1; i <= 10; i++)
            {
                dfs.Mkdirs(new Path("/test" + i));
            }
            dfs.Close();
            // shutdown nn1 and delete its edit log files
            cluster.ShutdownNameNode(1);
            DeleteEditLogIfExists(confNN1);
            cluster.GetNameNodeRpc(0).SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter,
                                                  true);
            cluster.GetNameNodeRpc(0).SaveNamespace();
            cluster.GetNameNodeRpc(0).SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave,
                                                  true);
            // check without -skipSharedEditsCheck, Bootstrap should fail for BKJM
            // immediately after saveNamespace
            int rc = BootstrapStandby.Run(new string[] { "-force", "-nonInteractive" }, confNN1
                                          );

            NUnit.Framework.Assert.AreEqual("Mismatches return code", 6, rc);
            // check with -skipSharedEditsCheck
            rc = BootstrapStandby.Run(new string[] { "-force", "-nonInteractive", "-skipSharedEditsCheck" }, confNN1);
            NUnit.Framework.Assert.AreEqual("Mismatches return code", 0, rc);
            // Checkpoint as fast as we can, in a tight loop.
            confNN1.SetInt(DFSConfigKeys.DfsNamenodeCheckpointPeriodKey, 1);
            cluster.RestartNameNode(1);
            cluster.TransitionToStandby(1);
            NameNode nn0 = cluster.GetNameNode(0);

            HATestUtil.WaitForStandbyToCatchUp(nn0, cluster.GetNameNode(1));
            long expectedCheckpointTxId = NameNodeAdapter.GetNamesystem(nn0).GetFSImage().GetMostRecentCheckpointTxId
                                              ();

            HATestUtil.WaitForCheckpoint(cluster, 1, ImmutableList.Of((int)expectedCheckpointTxId
                                                                      ));
            // Should have copied over the namespace
            FSImageTestUtil.AssertNNHasCheckpoints(cluster, 1, ImmutableList.Of((int)expectedCheckpointTxId
                                                                                ));
            FSImageTestUtil.AssertNNFilesMatch(cluster);
        }
Esempio n. 11
0
        /// <summary>
        /// Test for the case when both of the NNs in the cluster are
        /// in the standby state, and thus are both creating checkpoints
        /// and uploading them to each other.
        /// </summary>
        /// <remarks>
        /// Test for the case when both of the NNs in the cluster are
        /// in the standby state, and thus are both creating checkpoints
        /// and uploading them to each other.
        /// In this circumstance, they should receive the error from the
        /// other node indicating that the other node already has a
        /// checkpoint for the given txid, but this should not cause
        /// an abort, etc.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestBothNodesInStandbyState()
        {
            DoEdits(0, 10);
            cluster.TransitionToStandby(0);
            // Transitioning to standby closed the edit log on the active,
            // so the standby will catch up. Then, both will be in standby mode
            // with enough uncheckpointed txns to cause a checkpoint, and they
            // will each try to take a checkpoint and upload to each other.
            HATestUtil.WaitForCheckpoint(cluster, 1, ImmutableList.Of(12));
            HATestUtil.WaitForCheckpoint(cluster, 0, ImmutableList.Of(12));
            NUnit.Framework.Assert.AreEqual(12, nn0.GetNamesystem().GetFSImage().GetMostRecentCheckpointTxId
                                                ());
            NUnit.Framework.Assert.AreEqual(12, nn1.GetNamesystem().GetFSImage().GetMostRecentCheckpointTxId
                                                ());
            IList <FilePath> dirs = Lists.NewArrayList();

            Sharpen.Collections.AddAll(dirs, FSImageTestUtil.GetNameNodeCurrentDirs(cluster,
                                                                                    0));
            Sharpen.Collections.AddAll(dirs, FSImageTestUtil.GetNameNodeCurrentDirs(cluster,
                                                                                    1));
            FSImageTestUtil.AssertParallelFilesAreIdentical(dirs, ImmutableSet.Of <string>());
        }
Esempio n. 12
0
        public virtual void TestSuccessfulBaseCase()
        {
            RemoveStandbyNameDirs();
            try
            {
                cluster.RestartNameNode(1);
                NUnit.Framework.Assert.Fail("Did not throw");
            }
            catch (IOException ioe)
            {
                GenericTestUtils.AssertExceptionContains("storage directory does not exist or is not accessible"
                                                         , ioe);
            }
            int rc = BootstrapStandby.Run(new string[] { "-nonInteractive" }, cluster.GetConfiguration
                                              (1));

            NUnit.Framework.Assert.AreEqual(0, rc);
            // Should have copied over the namespace from the active
            FSImageTestUtil.AssertNNHasCheckpoints(cluster, 1, ImmutableList.Of(0));
            FSImageTestUtil.AssertNNFilesMatch(cluster);
            // We should now be able to start the standby successfully.
            cluster.RestartNameNode(1);
        }
Esempio n. 13
0
        /// <summary>
        /// Test cancellation of ongoing checkpoints when failover happens
        /// mid-checkpoint.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestCheckpointCancellation()
        {
            cluster.TransitionToStandby(0);
            // Create an edit log in the shared edits dir with a lot
            // of mkdirs operations. This is solely so that the image is
            // large enough to take a non-trivial amount of time to load.
            // (only ~15MB)
            URI      sharedUri = cluster.GetSharedEditsDir(0, 1);
            FilePath sharedDir = new FilePath(sharedUri.GetPath(), "current");
            FilePath tmpDir    = new FilePath(MiniDFSCluster.GetBaseDirectory(), "testCheckpointCancellation-tmp"
                                              );
            FSNamesystem fsn = cluster.GetNamesystem(0);

            FSImageTestUtil.CreateAbortedLogWithMkdirs(tmpDir, NumDirsInLog, 3, fsn.GetFSDirectory
                                                           ().GetLastInodeId() + 1);
            string fname = NNStorage.GetInProgressEditsFileName(3);

            new FilePath(tmpDir, fname).RenameTo(new FilePath(sharedDir, fname));
            // Checkpoint as fast as we can, in a tight loop.
            cluster.GetConfiguration(1).SetInt(DFSConfigKeys.DfsNamenodeCheckpointPeriodKey,
                                               0);
            cluster.RestartNameNode(1);
            nn1 = cluster.GetNameNode(1);
            cluster.TransitionToActive(0);
            bool canceledOne = false;

            for (int i = 0; i < 10 && !canceledOne; i++)
            {
                DoEdits(i * 10, i * 10 + 10);
                cluster.TransitionToStandby(0);
                cluster.TransitionToActive(1);
                cluster.TransitionToStandby(1);
                cluster.TransitionToActive(0);
                canceledOne = StandbyCheckpointer.GetCanceledCount() > 0;
            }
            NUnit.Framework.Assert.IsTrue(canceledOne);
        }
Esempio n. 14
0
        public virtual void TestDownloadingLaterCheckpoint()
        {
            // Roll edit logs a few times to inflate txid
            nn0.GetRpcServer().RollEditLog();
            nn0.GetRpcServer().RollEditLog();
            // Make checkpoint
            NameNodeAdapter.EnterSafeMode(nn0, false);
            NameNodeAdapter.SaveNamespace(nn0);
            NameNodeAdapter.LeaveSafeMode(nn0);
            long expectedCheckpointTxId = NameNodeAdapter.GetNamesystem(nn0).GetFSImage().GetMostRecentCheckpointTxId
                                              ();

            NUnit.Framework.Assert.AreEqual(6, expectedCheckpointTxId);
            int rc = BootstrapStandby.Run(new string[] { "-force" }, cluster.GetConfiguration
                                              (1));

            NUnit.Framework.Assert.AreEqual(0, rc);
            // Should have copied over the namespace from the active
            FSImageTestUtil.AssertNNHasCheckpoints(cluster, 1, ImmutableList.Of((int)expectedCheckpointTxId
                                                                                ));
            FSImageTestUtil.AssertNNFilesMatch(cluster);
            // We should now be able to start the standby successfully.
            cluster.RestartNameNode(1);
        }
        public static void CreateOriginalFSImage()
        {
            MiniDFSCluster cluster = null;

            try
            {
                Configuration conf = new Configuration();
                conf.SetLong(DFSConfigKeys.DfsNamenodeDelegationTokenMaxLifetimeKey, 10000);
                conf.SetLong(DFSConfigKeys.DfsNamenodeDelegationTokenRenewIntervalKey, 5000);
                conf.SetBoolean(DFSConfigKeys.DfsNamenodeDelegationTokenAlwaysUseKey, true);
                conf.Set(CommonConfigurationKeysPublic.HadoopSecurityAuthToLocal, "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//"
                         + "DEFAULT");
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                DistributedFileSystem hdfs = cluster.GetFileSystem();
                // Create a reasonable namespace
                for (int i = 0; i < NumDirs; i++)
                {
                    Path dir = new Path("/dir" + i);
                    hdfs.Mkdirs(dir);
                    writtenFiles[dir.ToString()] = PathToFileEntry(hdfs, dir.ToString());
                    for (int j = 0; j < FilesPerDir; j++)
                    {
                        Path file            = new Path(dir, "file" + j);
                        FSDataOutputStream o = hdfs.Create(file);
                        o.Write(23);
                        o.Close();
                        writtenFiles[file.ToString()] = PathToFileEntry(hdfs, file.ToString());
                    }
                }
                // Create an empty directory
                Path emptydir = new Path("/emptydir");
                hdfs.Mkdirs(emptydir);
                writtenFiles[emptydir.ToString()] = hdfs.GetFileStatus(emptydir);
                //Create a directory whose name should be escaped in XML
                Path invalidXMLDir = new Path("/dirContainingInvalidXMLChar\u0000here");
                hdfs.Mkdirs(invalidXMLDir);
                // Get delegation tokens so we log the delegation token op
                Org.Apache.Hadoop.Security.Token.Token <object>[] delegationTokens = hdfs.AddDelegationTokens
                                                                                         (TestRenewer, null);
                foreach (Org.Apache.Hadoop.Security.Token.Token <object> t in delegationTokens)
                {
                    Log.Debug("got token " + t);
                }
                Path snapshot = new Path("/snapshot");
                hdfs.Mkdirs(snapshot);
                hdfs.AllowSnapshot(snapshot);
                hdfs.Mkdirs(new Path("/snapshot/1"));
                hdfs.Delete(snapshot, true);
                // Set XAttrs so the fsimage contains XAttr ops
                Path xattr = new Path("/xattr");
                hdfs.Mkdirs(xattr);
                hdfs.SetXAttr(xattr, "user.a1", new byte[] { unchecked ((int)(0x31)), unchecked ((int
                                                                                                  )(0x32)), unchecked ((int)(0x33)) });
                hdfs.SetXAttr(xattr, "user.a2", new byte[] { unchecked ((int)(0x37)), unchecked ((int
                                                                                                  )(0x38)), unchecked ((int)(0x39)) });
                // OIV should be able to handle empty value XAttrs
                hdfs.SetXAttr(xattr, "user.a3", null);
                writtenFiles[xattr.ToString()] = hdfs.GetFileStatus(xattr);
                // Write results to the fsimage file
                hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter, false);
                hdfs.SaveNamespace();
                // Determine location of fsimage file
                originalFsimage = FSImageTestUtil.FindLatestImageFile(FSImageTestUtil.GetFSImage(
                                                                          cluster.GetNameNode()).GetStorage().GetStorageDir(0));
                if (originalFsimage == null)
                {
                    throw new RuntimeException("Didn't generate or can't find fsimage");
                }
                Log.Debug("original FS image file is " + originalFsimage);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public static void CreateOriginalFSImage()
        {
            MiniDFSCluster cluster = null;

            try
            {
                Configuration conf = new Configuration();
                conf.SetBoolean(DFSConfigKeys.DfsNamenodeAclsEnabledKey, true);
                cluster = new MiniDFSCluster.Builder(conf).Build();
                cluster.WaitActive();
                DistributedFileSystem hdfs = cluster.GetFileSystem();
                // Create a reasonable namespace with ACLs
                Path dir = new Path("/dirWithNoAcl");
                hdfs.Mkdirs(dir);
                writtenAcls[dir.ToString()] = hdfs.GetAclStatus(dir);
                dir = new Path("/dirWithDefaultAcl");
                hdfs.Mkdirs(dir);
                hdfs.SetAcl(dir, Lists.NewArrayList(AclTestHelpers.AclEntry(AclEntryScope.Default
                                                                            , AclEntryType.User, FsAction.All), AclTestHelpers.AclEntry(AclEntryScope.Default
                                                                                                                                        , AclEntryType.User, "foo", FsAction.All), AclTestHelpers.AclEntry(AclEntryScope
                                                                                                                                                                                                           .Default, AclEntryType.Group, FsAction.ReadExecute), AclTestHelpers.AclEntry(AclEntryScope
                                                                                                                                                                                                                                                                                        .Default, AclEntryType.Other, FsAction.None)));
                writtenAcls[dir.ToString()] = hdfs.GetAclStatus(dir);
                Path file            = new Path("/noAcl");
                FSDataOutputStream o = hdfs.Create(file);
                o.Write(23);
                o.Close();
                writtenAcls[file.ToString()] = hdfs.GetAclStatus(file);
                file = new Path("/withAcl");
                o    = hdfs.Create(file);
                o.Write(23);
                o.Close();
                hdfs.SetAcl(file, Lists.NewArrayList(AclTestHelpers.AclEntry(AclEntryScope.Access
                                                                             , AclEntryType.User, FsAction.ReadWrite), AclTestHelpers.AclEntry(AclEntryScope.
                                                                                                                                               Access, AclEntryType.User, "foo", FsAction.Read), AclTestHelpers.AclEntry(AclEntryScope
                                                                                                                                                                                                                         .Access, AclEntryType.Group, FsAction.Read), AclTestHelpers.AclEntry(AclEntryScope
                                                                                                                                                                                                                                                                                              .Access, AclEntryType.Other, FsAction.None)));
                writtenAcls[file.ToString()] = hdfs.GetAclStatus(file);
                file = new Path("/withSeveralAcls");
                o    = hdfs.Create(file);
                o.Write(23);
                o.Close();
                hdfs.SetAcl(file, Lists.NewArrayList(AclTestHelpers.AclEntry(AclEntryScope.Access
                                                                             , AclEntryType.User, FsAction.ReadWrite), AclTestHelpers.AclEntry(AclEntryScope.
                                                                                                                                               Access, AclEntryType.User, "foo", FsAction.ReadWrite), AclTestHelpers.AclEntry(AclEntryScope
                                                                                                                                                                                                                              .Access, AclEntryType.User, "bar", FsAction.Read), AclTestHelpers.AclEntry(AclEntryScope
                                                                                                                                                                                                                                                                                                         .Access, AclEntryType.Group, FsAction.Read), AclTestHelpers.AclEntry(AclEntryScope
                                                                                                                                                                                                                                                                                                                                                                              .Access, AclEntryType.Group, "group", FsAction.Read), AclTestHelpers.AclEntry(AclEntryScope
                                                                                                                                                                                                                                                                                                                                                                                                                                                            .Access, AclEntryType.Other, FsAction.None)));
                writtenAcls[file.ToString()] = hdfs.GetAclStatus(file);
                // Write results to the fsimage file
                hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter, false);
                hdfs.SaveNamespace();
                // Determine the location of the fsimage file
                originalFsimage = FSImageTestUtil.FindLatestImageFile(FSImageTestUtil.GetFSImage(
                                                                          cluster.GetNameNode()).GetStorage().GetStorageDir(0));
                if (originalFsimage == null)
                {
                    throw new RuntimeException("Didn't generate or can't find fsimage");
                }
                Log.Debug("original FS image file is " + originalFsimage);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Esempio n. 17
0
        public virtual void TestAppendRestart()
        {
            Configuration conf = new HdfsConfiguration();

            // Turn off persistent IPC, so that the DFSClient can survive NN restart
            conf.SetInt(CommonConfigurationKeysPublic.IpcClientConnectionMaxidletimeKey, 0);
            MiniDFSCluster     cluster = null;
            FSDataOutputStream stream  = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                FileSystem fs      = cluster.GetFileSystem();
                FilePath   editLog = new FilePath(FSImageTestUtil.GetNameNodeCurrentDirs(cluster, 0
                                                                                         )[0], NNStorage.GetInProgressEditsFileName(1));
                EnumMap <FSEditLogOpCodes, Holder <int> > counts;
                Path p1 = new Path("/block-boundaries");
                WriteAndAppend(fs, p1, BlockSize, BlockSize);
                counts = FSImageTestUtil.CountEditLogOpTypes(editLog);
                // OP_ADD to create file
                // OP_ADD_BLOCK for first block
                // OP_CLOSE to close file
                // OP_APPEND to reopen file
                // OP_ADD_BLOCK for second block
                // OP_CLOSE to close file
                NUnit.Framework.Assert.AreEqual(1, (int)counts[FSEditLogOpCodes.OpAdd].held);
                NUnit.Framework.Assert.AreEqual(1, (int)counts[FSEditLogOpCodes.OpAppend].held);
                NUnit.Framework.Assert.AreEqual(2, (int)counts[FSEditLogOpCodes.OpAddBlock].held);
                NUnit.Framework.Assert.AreEqual(2, (int)counts[FSEditLogOpCodes.OpClose].held);
                Path p2 = new Path("/not-block-boundaries");
                WriteAndAppend(fs, p2, BlockSize / 2, BlockSize);
                counts = FSImageTestUtil.CountEditLogOpTypes(editLog);
                // OP_ADD to create file
                // OP_ADD_BLOCK for first block
                // OP_CLOSE to close file
                // OP_APPEND to re-establish the lease
                // OP_UPDATE_BLOCKS from the updatePipeline call (increments genstamp of last block)
                // OP_ADD_BLOCK at the start of the second block
                // OP_CLOSE to close file
                // Total: 2 OP_ADDs, 1 OP_UPDATE_BLOCKS, 2 OP_ADD_BLOCKs, and 2 OP_CLOSEs
                //       in addition to the ones above
                NUnit.Framework.Assert.AreEqual(2, (int)counts[FSEditLogOpCodes.OpAdd].held);
                NUnit.Framework.Assert.AreEqual(2, (int)counts[FSEditLogOpCodes.OpAppend].held);
                NUnit.Framework.Assert.AreEqual(1, (int)counts[FSEditLogOpCodes.OpUpdateBlocks].held
                                                );
                NUnit.Framework.Assert.AreEqual(2 + 2, (int)counts[FSEditLogOpCodes.OpAddBlock].held
                                                );
                NUnit.Framework.Assert.AreEqual(2 + 2, (int)counts[FSEditLogOpCodes.OpClose].held
                                                );
                cluster.RestartNameNode();
                AppendTestUtil.Check(fs, p1, 2 * BlockSize);
                AppendTestUtil.Check(fs, p2, 3 * BlockSize / 2);
            }
            finally
            {
                IOUtils.CloseStream(stream);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void Test2NNRegistration()
        {
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology
                                                                                 .SimpleFederatedTopology(2)).Build();

            try
            {
                cluster.WaitActive();
                NameNode nn1 = cluster.GetNameNode(0);
                NameNode nn2 = cluster.GetNameNode(1);
                NUnit.Framework.Assert.IsNotNull("cannot create nn1", nn1);
                NUnit.Framework.Assert.IsNotNull("cannot create nn2", nn2);
                string bpid1 = FSImageTestUtil.GetFSImage(nn1).GetBlockPoolID();
                string bpid2 = FSImageTestUtil.GetFSImage(nn2).GetBlockPoolID();
                string cid1  = FSImageTestUtil.GetFSImage(nn1).GetClusterID();
                string cid2  = FSImageTestUtil.GetFSImage(nn2).GetClusterID();
                int    lv1   = FSImageTestUtil.GetFSImage(nn1).GetLayoutVersion();
                int    lv2   = FSImageTestUtil.GetFSImage(nn2).GetLayoutVersion();
                int    ns1   = FSImageTestUtil.GetFSImage(nn1).GetNamespaceID();
                int    ns2   = FSImageTestUtil.GetFSImage(nn2).GetNamespaceID();
                NUnit.Framework.Assert.AreNotSame("namespace ids should be different", ns1, ns2);
                Log.Info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri=" + nn1.GetNameNodeAddress
                             ());
                Log.Info("nn2: lv=" + lv2 + ";cid=" + cid2 + ";bpid=" + bpid2 + ";uri=" + nn2.GetNameNodeAddress
                             ());
                // check number of volumes in fsdataset
                DataNode dn = cluster.GetDataNodes()[0];
                IDictionary <string, object> volInfos = dn.data.GetVolumeInfoMap();
                NUnit.Framework.Assert.IsTrue("No volumes in the fsdataset", volInfos.Count > 0);
                int i = 0;
                foreach (KeyValuePair <string, object> e in volInfos)
                {
                    Log.Info("vol " + i++ + ") " + e.Key + ": " + e.Value);
                }
                // number of volumes should be 2 - [data1, data2]
                NUnit.Framework.Assert.AreEqual("number of volumes is wrong", 2, volInfos.Count);
                foreach (BPOfferService bpos in dn.GetAllBpOs())
                {
                    Log.Info("BP: " + bpos);
                }
                BPOfferService bpos1 = dn.GetAllBpOs()[0];
                BPOfferService bpos2 = dn.GetAllBpOs()[1];
                // The order of bpos is not guaranteed, so fix the order
                if (GetNNSocketAddress(bpos1).Equals(nn2.GetNameNodeAddress()))
                {
                    BPOfferService tmp = bpos1;
                    bpos1 = bpos2;
                    bpos2 = tmp;
                }
                NUnit.Framework.Assert.AreEqual("wrong nn address", GetNNSocketAddress(bpos1), nn1
                                                .GetNameNodeAddress());
                NUnit.Framework.Assert.AreEqual("wrong nn address", GetNNSocketAddress(bpos2), nn2
                                                .GetNameNodeAddress());
                NUnit.Framework.Assert.AreEqual("wrong bpid", bpos1.GetBlockPoolId(), bpid1);
                NUnit.Framework.Assert.AreEqual("wrong bpid", bpos2.GetBlockPoolId(), bpid2);
                NUnit.Framework.Assert.AreEqual("wrong cid", dn.GetClusterId(), cid1);
                NUnit.Framework.Assert.AreEqual("cid should be same", cid2, cid1);
                NUnit.Framework.Assert.AreEqual("namespace should be same", bpos1.bpNSInfo.namespaceID
                                                , ns1);
                NUnit.Framework.Assert.AreEqual("namespace should be same", bpos2.bpNSInfo.namespaceID
                                                , ns2);
            }
            finally
            {
                cluster.Shutdown();
            }
        }