コード例 #1
0
        /// <summary>
        /// Confirm that FSImage files in all StorageDirectory are the same,
        /// and non-empty, and there are the expected number of them.
        /// </summary>
        /// <param name="fsn">- the FSNamesystem being checked.</param>
        /// <param name="numImageDirs">- the configured number of StorageDirectory of type IMAGE.
        ///     </param>
        /// <returns>- the md5 hash of the most recent FSImage files, which must all be the same.
        ///     </returns>
        /// <exception cref="System.Exception">
        /// if image files are empty or different,
        /// if less than two StorageDirectory are provided, or if the
        /// actual number of StorageDirectory is less than configured.
        /// </exception>
        public static string CheckImages(FSNamesystem fsn, int numImageDirs)
        {
            NNStorage stg = fsn.GetFSImage().GetStorage();

            //any failed StorageDirectory is removed from the storageDirs list
            NUnit.Framework.Assert.AreEqual("Some StorageDirectories failed Upgrade", numImageDirs
                                            , stg.GetNumStorageDirs(NNStorage.NameNodeDirType.Image));
            NUnit.Framework.Assert.IsTrue("Not enough fsimage copies in MiniDFSCluster " + "to test parallel write"
                                          , numImageDirs > 1);
            // List of "current/" directory from each SD
            IList <FilePath> dirs = FSImageTestUtil.GetCurrentDirs(stg, NNStorage.NameNodeDirType
                                                                   .Image);

            // across directories, all files with same names should be identical hashes
            FSImageTestUtil.AssertParallelFilesAreIdentical(dirs, Sharpen.Collections.EmptySet
                                                            <string>());
            FSImageTestUtil.AssertSameNewestImage(dirs);
            // Return the hash of the newest image file
            Storage.StorageDirectory firstSd = stg.DirIterator(NNStorage.NameNodeDirType.Image
                                                               ).Next();
            FilePath latestImage = FSImageTestUtil.FindLatestImageFile(firstSd);
            string   md5         = FSImageTestUtil.GetImageFileMD5IgnoringTxId(latestImage);

            System.Console.Error.WriteLine("md5 of " + latestImage + ": " + md5);
            return(md5);
        }
コード例 #2
0
        public virtual void TestReadURL()
        {
            HttpURLConnection conn = Org.Mockito.Mockito.Mock <HttpURLConnection>();

            Org.Mockito.Mockito.DoReturn(new ByteArrayInputStream(FakeLogData)).When(conn).GetInputStream
                ();
            Org.Mockito.Mockito.DoReturn(HttpURLConnection.HttpOk).When(conn).GetResponseCode
                ();
            Org.Mockito.Mockito.DoReturn(Sharpen.Extensions.ToString(FakeLogData.Length)).When
                (conn).GetHeaderField("Content-Length");
            URLConnectionFactory factory = Org.Mockito.Mockito.Mock <URLConnectionFactory>();

            Org.Mockito.Mockito.DoReturn(conn).When(factory).OpenConnection(Org.Mockito.Mockito
                                                                            .Any <Uri>(), Matchers.AnyBoolean());
            Uri url = new Uri("http://localhost/fakeLog");
            EditLogInputStream elis = EditLogFileInputStream.FromUrl(factory, url, HdfsConstants
                                                                     .InvalidTxid, HdfsConstants.InvalidTxid, false);
            // Read the edit log and verify that we got all of the data.
            EnumMap <FSEditLogOpCodes, Holder <int> > counts = FSImageTestUtil.CountEditLogOpTypes
                                                                   (elis);

            Assert.AssertThat(counts[FSEditLogOpCodes.OpAdd].held, CoreMatchers.Is(1));
            Assert.AssertThat(counts[FSEditLogOpCodes.OpSetGenstampV1].held, CoreMatchers.Is(
                                  1));
            Assert.AssertThat(counts[FSEditLogOpCodes.OpClose].held, CoreMatchers.Is(1));
            // Check that length header was picked up.
            NUnit.Framework.Assert.AreEqual(FakeLogData.Length, elis.Length());
            elis.Close();
        }
コード例 #3
0
        public virtual void TestDigest()
        {
            Configuration  conf    = new Configuration();
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                DistributedFileSystem fs = cluster.GetFileSystem();
                fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                fs.SaveNamespace();
                fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
                FilePath currentDir = FSImageTestUtil.GetNameNodeCurrentDirs(cluster, 0)[0];
                FilePath fsimage    = FSImageTestUtil.FindNewestImageFile(currentDir.GetAbsolutePath
                                                                              ());
                NUnit.Framework.Assert.AreEqual(MD5FileUtils.ReadStoredMd5ForFile(fsimage), MD5FileUtils
                                                .ComputeMd5ForFile(fsimage));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
コード例 #4
0
ファイル: TestBackupNode.cs プロジェクト: orf53975/hadoop.net
        /// <exception cref="System.Exception"/>
        private void AssertStorageDirsMatch(NameNode nn, BackupNode backup)
        {
            // Check that the stored files in the name dirs are identical
            IList <FilePath> dirs = Lists.NewArrayList(FSImageTestUtil.GetCurrentDirs(nn.GetFSImage
                                                                                          ().GetStorage(), null));

            Sharpen.Collections.AddAll(dirs, FSImageTestUtil.GetCurrentDirs(backup.GetFSImage
                                                                                ().GetStorage(), null));
            FSImageTestUtil.AssertParallelFilesAreIdentical(dirs, ImmutableSet.Of("VERSION"));
        }
コード例 #5
0
        /// <returns>
        /// the fsimage file with the most recent transaction ID in the
        /// given 'current/' directory.
        /// </returns>
        /// <exception cref="System.IO.IOException"/>
        public static FilePath FindNewestImageFile(string currentDirPath)
        {
            Storage.StorageDirectory sd = FSImageTestUtil.MockStorageDirectory(new FilePath(currentDirPath
                                                                                            ), NNStorage.NameNodeDirType.Image);
            FSImageTransactionalStorageInspector inspector = new FSImageTransactionalStorageInspector
                                                                 ();

            inspector.InspectDirectory(sd);
            IList <FSImageStorageInspector.FSImageFile> latestImages = inspector.GetLatestImages
                                                                           ();

            return((latestImages.IsEmpty()) ? null : latestImages[0].GetFile());
        }
コード例 #6
0
        public virtual void TestDisplayRecentEditLogOpCodes()
        {
            // start a cluster
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = null;
            FileSystem     fileSys = null;

            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).EnableManagedDfsDirsRedundancy
                          (false).Build();
            cluster.WaitActive();
            fileSys = cluster.GetFileSystem();
            FSNamesystem namesystem = cluster.GetNamesystem();
            FSImage      fsimage    = namesystem.GetFSImage();

            for (int i = 0; i < 20; i++)
            {
                fileSys.Mkdirs(new Path("/tmp/tmp" + i));
            }
            Storage.StorageDirectory sd = fsimage.GetStorage().DirIterator(NNStorage.NameNodeDirType
                                                                           .Edits).Next();
            cluster.Shutdown();
            FilePath editFile = FSImageTestUtil.FindLatestEditsLog(sd).GetFile();

            NUnit.Framework.Assert.IsTrue("Should exist: " + editFile, editFile.Exists());
            // Corrupt the edits file.
            long             fileLen = editFile.Length();
            RandomAccessFile rwf     = new RandomAccessFile(editFile, "rw");

            rwf.Seek(fileLen - 40);
            for (int i_1 = 0; i_1 < 20; i_1++)
            {
                rwf.Write(FSEditLogOpCodes.OpDelete.GetOpCode());
            }
            rwf.Close();
            StringBuilder bld = new StringBuilder();

            bld.Append("^Error replaying edit log at offset \\d+.  ");
            bld.Append("Expected transaction ID was \\d+\n");
            bld.Append("Recent opcode offsets: (\\d+\\s*){4}$");
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).EnableManagedDfsDirsRedundancy
                              (false).Format(false).Build();
                NUnit.Framework.Assert.Fail("should not be able to start");
            }
            catch (IOException e)
            {
                NUnit.Framework.Assert.IsTrue("error message contains opcodes message", e.Message
                                              .Matches(bld.ToString()));
            }
        }
コード例 #7
0
        /// <exception cref="System.IO.IOException"/>
        private void DoIt(IDictionary <string, string> paramsToCorrupt)
        {
            MiniDFSCluster    cluster = null;
            FileSystem        fs      = null;
            SecondaryNameNode snn     = null;

            try
            {
                Configuration conf = new HdfsConfiguration();
                cluster = new MiniDFSCluster.Builder(conf).Build();
                cluster.WaitActive();
                conf.Set(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey, "0.0.0.0:0");
                snn = new SecondaryNameNode(conf);
                fs  = cluster.GetFileSystem();
                fs.Mkdirs(new Path("/test/foo"));
                snn.DoCheckpoint();
                IList <FilePath> versionFiles = snn.GetFSImage().GetStorage().GetFiles(null, "VERSION"
                                                                                       );
                snn.Shutdown();
                foreach (FilePath versionFile in versionFiles)
                {
                    foreach (KeyValuePair <string, string> paramToCorrupt in paramsToCorrupt)
                    {
                        string param = paramToCorrupt.Key;
                        string val   = paramToCorrupt.Value;
                        System.Console.Out.WriteLine("Changing '" + param + "' to '" + val + "' in " + versionFile
                                                     );
                        FSImageTestUtil.CorruptVersionFile(versionFile, param, val);
                    }
                }
                snn = new SecondaryNameNode(conf);
                fs.Mkdirs(new Path("/test/bar"));
                snn.DoCheckpoint();
            }
            finally
            {
                if (fs != null)
                {
                    fs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
                if (snn != null)
                {
                    snn.Shutdown();
                }
            }
        }
コード例 #8
0
        /// <exception cref="System.Exception"/>
        public static void AssertNNFilesMatch(MiniDFSCluster cluster)
        {
            IList <FilePath> curDirs = Lists.NewArrayList();

            Sharpen.Collections.AddAll(curDirs, FSImageTestUtil.GetNameNodeCurrentDirs(cluster
                                                                                       , 0));
            Sharpen.Collections.AddAll(curDirs, FSImageTestUtil.GetNameNodeCurrentDirs(cluster
                                                                                       , 1));
            // Ignore seen_txid file, since the newly bootstrapped standby
            // will have a higher seen_txid than the one it bootstrapped from.
            ICollection <string> ignoredFiles = ImmutableSet.Of("seen_txid");

            FSImageTestUtil.AssertParallelFilesAreIdentical(curDirs, ignoredFiles);
        }
コード例 #9
0
        /// <summary>Create an unfinalized edit log for testing purposes</summary>
        /// <param name="testDir">Directory to create the edit log in</param>
        /// <param name="numTx">Number of transactions to add to the new edit log</param>
        /// <param name="offsetToTxId">
        /// A map from transaction IDs to offsets in the
        /// edit log file.
        /// </param>
        /// <returns>The new edit log file name.</returns>
        /// <exception cref="System.IO.IOException"/>
        private static FilePath PrepareUnfinalizedTestEditLog(FilePath testDir, int numTx
                                                              , SortedDictionary <long, long> offsetToTxId)
        {
            FilePath inProgressFile = new FilePath(testDir, NNStorage.GetInProgressEditsFileName
                                                       (1));
            FSEditLog fsel   = null;
            FSEditLog spyLog = null;

            try
            {
                fsel   = FSImageTestUtil.CreateStandaloneEditLog(testDir);
                spyLog = Org.Mockito.Mockito.Spy(fsel);
                // Normally, the in-progress edit log would be finalized by
                // FSEditLog#endCurrentLogSegment.  For testing purposes, we
                // disable that here.
                Org.Mockito.Mockito.DoNothing().When(spyLog).EndCurrentLogSegment(true);
                spyLog.OpenForWrite();
                NUnit.Framework.Assert.IsTrue("should exist: " + inProgressFile, inProgressFile.Exists
                                                  ());
                for (int i = 0; i < numTx; i++)
                {
                    long trueOffset = GetNonTrailerLength(inProgressFile);
                    long thisTxId   = spyLog.GetLastWrittenTxId() + 1;
                    offsetToTxId[trueOffset] = thisTxId;
                    System.Console.Error.WriteLine("txid " + thisTxId + " at offset " + trueOffset);
                    spyLog.LogDelete("path" + i, i, false);
                    spyLog.LogSync();
                }
            }
            finally
            {
                if (spyLog != null)
                {
                    spyLog.Close();
                }
                else
                {
                    if (fsel != null)
                    {
                        fsel.Close();
                    }
                }
            }
            return(inProgressFile);
        }
コード例 #10
0
        public virtual void TestCurrentStorageInspector()
        {
            FSImageTransactionalStorageInspector inspector = new FSImageTransactionalStorageInspector
                                                                 ();

            Storage.StorageDirectory mockDir = FSImageTestUtil.MockStorageDirectory(NNStorage.NameNodeDirType
                                                                                    .ImageAndEdits, false, "/foo/current/" + NNStorage.GetImageFileName(123), "/foo/current/"
                                                                                    + NNStorage.GetFinalizedEditsFileName(123, 456), "/foo/current/" + NNStorage.GetImageFileName
                                                                                        (456), "/foo/current/" + NNStorage.GetInProgressEditsFileName(457));
            inspector.InspectDirectory(mockDir);
            NUnit.Framework.Assert.AreEqual(2, inspector.foundImages.Count);
            FSImageStorageInspector.FSImageFile latestImage = inspector.GetLatestImages()[0];
            NUnit.Framework.Assert.AreEqual(456, latestImage.txId);
            NUnit.Framework.Assert.AreSame(mockDir, latestImage.sd);
            NUnit.Framework.Assert.IsTrue(inspector.IsUpgradeFinalized());
            NUnit.Framework.Assert.AreEqual(new FilePath("/foo/current/" + NNStorage.GetImageFileName
                                                             (456)), latestImage.GetFile());
        }
コード例 #11
0
        public virtual void TestGetRemoteEditLog()
        {
            Storage.StorageDirectory sd = FSImageTestUtil.MockStorageDirectory(NNStorage.NameNodeDirType
                                                                               .Edits, false, NNStorage.GetFinalizedEditsFileName(1, 100), NNStorage.GetFinalizedEditsFileName
                                                                                   (101, 200), NNStorage.GetInProgressEditsFileName(201), NNStorage.GetFinalizedEditsFileName
                                                                                   (1001, 1100));
            // passing null for NNStorage because this unit test will not use it
            FileJournalManager fjm = new FileJournalManager(conf, sd, null);

            NUnit.Framework.Assert.AreEqual("[1,100],[101,200],[1001,1100]", GetLogsAsString(
                                                fjm, 1));
            NUnit.Framework.Assert.AreEqual("[101,200],[1001,1100]", GetLogsAsString(fjm, 101
                                                                                     ));
            NUnit.Framework.Assert.AreEqual("[101,200],[1001,1100]", GetLogsAsString(fjm, 150
                                                                                     ));
            NUnit.Framework.Assert.AreEqual("[1001,1100]", GetLogsAsString(fjm, 201));
            NUnit.Framework.Assert.AreEqual("Asking for a newer log than exists should return empty list"
                                            , string.Empty, GetLogsAsString(fjm, 9999));
        }
コード例 #12
0
        /// <summary>
        /// Create an aborted in-progress log in the given directory, containing
        /// only a specified number of "mkdirs" operations.
        /// </summary>
        /// <exception cref="System.IO.IOException"/>
        public static void CreateAbortedLogWithMkdirs(FilePath editsLogDir, int numDirs,
                                                      long firstTxId, long newInodeId)
        {
            FSEditLog editLog = FSImageTestUtil.CreateStandaloneEditLog(editsLogDir);

            editLog.SetNextTxId(firstTxId);
            editLog.OpenForWrite();
            PermissionStatus perms = PermissionStatus.CreateImmutable("fakeuser", "fakegroup"
                                                                      , FsPermission.CreateImmutable((short)0x1ed));

            for (int i = 1; i <= numDirs; i++)
            {
                string         dirName = "dir" + i;
                INodeDirectory dir     = new INodeDirectory(newInodeId + i - 1, DFSUtil.String2Bytes(
                                                                dirName), perms, 0L);
                editLog.LogMkDir("/" + dirName, dir);
            }
            editLog.LogSync();
            editLog.AbortCurrentLogSegment();
        }
コード例 #13
0
        /// <summary>
        /// Return a standalone instance of FSEditLog that will log into the given
        /// log directory.
        /// </summary>
        /// <remarks>
        /// Return a standalone instance of FSEditLog that will log into the given
        /// log directory. The returned instance is not yet opened.
        /// </remarks>
        /// <exception cref="System.IO.IOException"/>
        public static FSEditLog CreateStandaloneEditLog(FilePath logDir)
        {
            NUnit.Framework.Assert.IsTrue(logDir.Mkdirs() || logDir.Exists());
            if (!FileUtil.FullyDeleteContents(logDir))
            {
                throw new IOException("Unable to delete contents of " + logDir);
            }
            NNStorage storage = Org.Mockito.Mockito.Mock <NNStorage>();

            Storage.StorageDirectory sd = FSImageTestUtil.MockStorageDirectory(logDir, NNStorage.NameNodeDirType
                                                                               .Edits);
            IList <Storage.StorageDirectory> sds = Lists.NewArrayList(sd);

            Org.Mockito.Mockito.DoReturn(sds).When(storage).DirIterable(NNStorage.NameNodeDirType
                                                                        .Edits);
            Org.Mockito.Mockito.DoReturn(sd).When(storage).GetStorageDirectory(Matchers.AnyObject
                                                                               <URI>());
            FSEditLog editLog = new FSEditLog(new Configuration(), storage, ImmutableList.Of(
                                                  logDir.ToURI()));

            editLog.InitJournalsForWrite();
            return(editLog);
        }
コード例 #14
0
ファイル: TestBackupNode.cs プロジェクト: orf53975/hadoop.net
        internal virtual void WaitCheckpointDone(MiniDFSCluster cluster, long txid)
        {
            long thisCheckpointTxId;

            do
            {
                try
                {
                    Log.Info("Waiting checkpoint to complete... " + "checkpoint txid should increase above "
                             + txid);
                    Sharpen.Thread.Sleep(1000);
                }
                catch (Exception)
                {
                }
                // The checkpoint is not done until the nn has received it from the bn
                thisCheckpointTxId = cluster.GetNameNode().GetFSImage().GetStorage().GetMostRecentCheckpointTxId
                                         ();
            }while (thisCheckpointTxId < txid);
            // Check that the checkpoint got uploaded to NN successfully
            FSImageTestUtil.AssertNNHasCheckpoints(cluster, Collections.SingletonList((int)thisCheckpointTxId
                                                                                      ));
        }
コード例 #15
0
ファイル: TestBackupNode.cs プロジェクト: orf53975/hadoop.net
        /// <exception cref="System.Exception"/>
        internal virtual void TestCheckpoint(HdfsServerConstants.StartupOption op)
        {
            Path          file1 = new Path("/checkpoint.dat");
            Path          file2 = new Path("/checkpoint2.dat");
            Path          file3 = new Path("/backup.dat");
            Configuration conf  = new HdfsConfiguration();

            HAUtil.SetAllowStandbyReads(conf, true);
            short replication  = (short)conf.GetInt("dfs.replication", 3);
            int   numDatanodes = Math.Max(3, replication);

            conf.Set(DFSConfigKeys.DfsNamenodeBackupHttpAddressKey, "localhost:0");
            conf.Set(DFSConfigKeys.DfsBlockreportInitialDelayKey, "0");
            conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1);
            // disable block scanner
            conf.SetInt(DFSConfigKeys.DfsNamenodeCheckpointTxnsKey, 1);
            MiniDFSCluster cluster = null;
            FileSystem     fileSys = null;
            BackupNode     backup  = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                fileSys = cluster.GetFileSystem();
                //
                // verify that 'format' really blew away all pre-existing files
                //
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file2));
                //
                // Create file1
                //
                NUnit.Framework.Assert.IsTrue(fileSys.Mkdirs(file1));
                //
                // Take a checkpoint
                //
                long txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup = StartBackupNode(conf, op, 1);
                WaitCheckpointDone(cluster, txid);
            }
            catch (IOException e)
            {
                Log.Error("Error in TestBackupNode:", e);
                NUnit.Framework.Assert.IsTrue(e.GetLocalizedMessage(), false);
            }
            finally
            {
                if (backup != null)
                {
                    backup.Stop();
                }
                if (fileSys != null)
                {
                    fileSys.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            FilePath nnCurDir = new FilePath(BaseDir, "name1/current/");
            FilePath bnCurDir = new FilePath(GetBackupNodeDir(op, 1), "/current/");

            FSImageTestUtil.AssertParallelFilesAreIdentical(ImmutableList.Of(bnCurDir, nnCurDir
                                                                             ), ImmutableSet.Of <string>("VERSION"));
            try
            {
                //
                // Restart cluster and verify that file1 still exist.
                //
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes).Format(false
                                                                                             ).Build();
                fileSys = cluster.GetFileSystem();
                // check that file1 still exists
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file1));
                fileSys.Delete(file1, true);
                // create new file file2
                fileSys.Mkdirs(file2);
                //
                // Take a checkpoint
                //
                long txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup = StartBackupNode(conf, op, 1);
                WaitCheckpointDone(cluster, txid);
                for (int i = 0; i < 10; i++)
                {
                    fileSys.Mkdirs(new Path("file_" + i));
                }
                txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup.DoCheckpoint();
                WaitCheckpointDone(cluster, txid);
                txid = cluster.GetNameNodeRpc().GetTransactionID();
                backup.DoCheckpoint();
                WaitCheckpointDone(cluster, txid);
                // Try BackupNode operations
                IPEndPoint add = backup.GetNameNodeAddress();
                // Write to BN
                FileSystem bnFS = FileSystem.Get(new Path("hdfs://" + NetUtils.GetHostPortString(
                                                              add)).ToUri(), conf);
                bool canWrite = true;
                try
                {
                    Org.Apache.Hadoop.Hdfs.Server.Namenode.TestCheckpoint.WriteFile(bnFS, file3, replication
                                                                                    );
                }
                catch (IOException eio)
                {
                    Log.Info("Write to " + backup.GetRole() + " failed as expected: ", eio);
                    canWrite = false;
                }
                NUnit.Framework.Assert.IsFalse("Write to BackupNode must be prohibited.", canWrite
                                               );
                // Reads are allowed for BackupNode, but not for CheckpointNode
                bool canRead = true;
                try
                {
                    bnFS.Exists(file2);
                }
                catch (IOException eio)
                {
                    Log.Info("Read from " + backup.GetRole() + " failed: ", eio);
                    canRead = false;
                }
                NUnit.Framework.Assert.AreEqual("Reads to BackupNode are allowed, but not CheckpointNode."
                                                , canRead, backup.IsRole(HdfsServerConstants.NamenodeRole.Backup));
                Org.Apache.Hadoop.Hdfs.Server.Namenode.TestCheckpoint.WriteFile(fileSys, file3, replication
                                                                                );
                Org.Apache.Hadoop.Hdfs.Server.Namenode.TestCheckpoint.CheckFile(fileSys, file3, replication
                                                                                );
                // should also be on BN right away
                NUnit.Framework.Assert.IsTrue("file3 does not exist on BackupNode", op != HdfsServerConstants.StartupOption
                                              .Backup || backup.GetNamesystem().GetFileInfo(file3.ToUri().GetPath(), false) !=
                                              null);
            }
            catch (IOException e)
            {
                Log.Error("Error in TestBackupNode:", e);
                throw new Exception(e);
            }
            finally
            {
                if (backup != null)
                {
                    backup.Stop();
                }
                if (fileSys != null)
                {
                    fileSys.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            FSImageTestUtil.AssertParallelFilesAreIdentical(ImmutableList.Of(bnCurDir, nnCurDir
                                                                             ), ImmutableSet.Of <string>("VERSION"));
            try
            {
                //
                // Restart cluster and verify that file2 exists and
                // file1 does not exist.
                //
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).Build();
                fileSys = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                // verify that file2 exists
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file2));
            }
            catch (IOException e)
            {
                Log.Error("Error in TestBackupNode: ", e);
                NUnit.Framework.Assert.IsTrue(e.GetLocalizedMessage(), false);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
        }
コード例 #16
0
ファイル: CreateEditsLog.cs プロジェクト: orf53975/hadoop.net
        /// <param name="args">arguments</param>
        /// <exception cref="System.IO.IOException"></exception>
        public static void Main(string[] args)
        {
            long  startingBlockId  = 1;
            int   numFiles         = 0;
            short replication      = 1;
            int   numBlocksPerFile = 0;
            long  blockSize        = 10;

            if (args.Length == 0)
            {
                PrintUsageExit();
            }
            for (int i = 0; i < args.Length; i++)
            {
                // parse command line
                if (args[i].Equals("-h"))
                {
                    PrintUsageExit();
                }
                if (args[i].Equals("-f"))
                {
                    if (i + 3 >= args.Length || args[i + 1].StartsWith("-") || args[i + 2].StartsWith
                            ("-") || args[i + 3].StartsWith("-"))
                    {
                        PrintUsageExit("Missing num files, starting block and/or number of blocks");
                    }
                    numFiles         = System.Convert.ToInt32(args[++i]);
                    startingBlockId  = System.Convert.ToInt32(args[++i]);
                    numBlocksPerFile = System.Convert.ToInt32(args[++i]);
                    if (numFiles <= 0 || numBlocksPerFile <= 0)
                    {
                        PrintUsageExit("numFiles and numBlocksPerFile most be greater than 0");
                    }
                }
                else
                {
                    if (args[i].Equals("-l"))
                    {
                        if (i + 1 >= args.Length)
                        {
                            PrintUsageExit("Missing block length");
                        }
                        blockSize = long.Parse(args[++i]);
                    }
                    else
                    {
                        if (args[i].Equals("-r") || args[i + 1].StartsWith("-"))
                        {
                            if (i + 1 >= args.Length)
                            {
                                PrintUsageExit("Missing replication factor");
                            }
                            replication = short.ParseShort(args[++i]);
                        }
                        else
                        {
                            if (args[i].Equals("-d"))
                            {
                                if (i + 1 >= args.Length || args[i + 1].StartsWith("-"))
                                {
                                    PrintUsageExit("Missing edits logs directory");
                                }
                                edits_dir = args[++i];
                            }
                            else
                            {
                                PrintUsageExit();
                            }
                        }
                    }
                }
            }
            FilePath editsLogDir     = new FilePath(edits_dir);
            FilePath subStructureDir = new FilePath(edits_dir + "/" + Storage.StorageDirCurrent
                                                    );

            if (!editsLogDir.Exists())
            {
                if (!editsLogDir.Mkdir())
                {
                    System.Console.Out.WriteLine("cannot create " + edits_dir);
                    System.Environment.Exit(-1);
                }
            }
            if (!subStructureDir.Exists())
            {
                if (!subStructureDir.Mkdir())
                {
                    System.Console.Out.WriteLine("cannot create subdirs of " + edits_dir);
                    System.Environment.Exit(-1);
                }
            }
            FileNameGenerator nameGenerator = new FileNameGenerator(BasePath, 100);
            FSEditLog         editLog       = FSImageTestUtil.CreateStandaloneEditLog(editsLogDir);

            editLog.OpenForWrite();
            AddFiles(editLog, numFiles, replication, numBlocksPerFile, startingBlockId, blockSize
                     , nameGenerator);
            editLog.LogSync();
            editLog.Close();
        }
コード例 #17
0
        public virtual void TestSaveNamespace()
        {
            DistributedFileSystem fs = null;

            try
            {
                Configuration conf = new HdfsConfiguration();
                conf.SetBoolean(DFSConfigKeys.DfsNamenodeDelegationTokenAlwaysUseKey, true);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes).Build();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                FSNamesystem namesystem = cluster.GetNamesystem();
                string       renewer    = UserGroupInformation.GetLoginUser().GetUserName();
                Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token1 = namesystem
                                                                                            .GetDelegationToken(new Text(renewer));
                Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token2 = namesystem
                                                                                            .GetDelegationToken(new Text(renewer));
                // Saving image without safe mode should fail
                DFSAdmin admin = new DFSAdmin(conf);
                string[] args  = new string[] { "-saveNamespace" };
                // verify that the edits file is NOT empty
                NameNode nn = cluster.GetNameNode();
                foreach (Storage.StorageDirectory sd in nn.GetFSImage().GetStorage().DirIterable(
                             null))
                {
                    FileJournalManager.EditLogFile log = FSImageTestUtil.FindLatestEditsLog(sd);
                    NUnit.Framework.Assert.IsTrue(log.IsInProgress());
                    log.ValidateLog();
                    long numTransactions = (log.GetLastTxId() - log.GetFirstTxId()) + 1;
                    NUnit.Framework.Assert.AreEqual("In-progress log " + log + " should have 5 transactions"
                                                    , 5, numTransactions);
                }
                // Saving image in safe mode should succeed
                fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                try
                {
                    admin.Run(args);
                }
                catch (Exception e)
                {
                    throw new IOException(e.Message);
                }
                // verify that the edits file is empty except for the START txn
                foreach (Storage.StorageDirectory sd_1 in nn.GetFSImage().GetStorage().DirIterable
                             (null))
                {
                    FileJournalManager.EditLogFile log = FSImageTestUtil.FindLatestEditsLog(sd_1);
                    NUnit.Framework.Assert.IsTrue(log.IsInProgress());
                    log.ValidateLog();
                    long numTransactions = (log.GetLastTxId() - log.GetFirstTxId()) + 1;
                    NUnit.Framework.Assert.AreEqual("In-progress log " + log + " should only have START txn"
                                                    , 1, numTransactions);
                }
                // restart cluster
                cluster.Shutdown();
                cluster = null;
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes).Format(false
                                                                                             ).Build();
                cluster.WaitActive();
                //Should be able to renew & cancel the delegation token after cluster restart
                try
                {
                    RenewToken(token1);
                    RenewToken(token2);
                }
                catch (IOException)
                {
                    NUnit.Framework.Assert.Fail("Could not renew or cancel the token");
                }
                namesystem = cluster.GetNamesystem();
                Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token3 = namesystem
                                                                                            .GetDelegationToken(new Text(renewer));
                Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token4 = namesystem
                                                                                            .GetDelegationToken(new Text(renewer));
                // restart cluster again
                cluster.Shutdown();
                cluster = null;
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes).Format(false
                                                                                             ).Build();
                cluster.WaitActive();
                namesystem = cluster.GetNamesystem();
                Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token5 = namesystem
                                                                                            .GetDelegationToken(new Text(renewer));
                try
                {
                    RenewToken(token1);
                    RenewToken(token2);
                    RenewToken(token3);
                    RenewToken(token4);
                    RenewToken(token5);
                }
                catch (IOException)
                {
                    NUnit.Framework.Assert.Fail("Could not renew or cancel the token");
                }
                // restart cluster again
                cluster.Shutdown();
                cluster = null;
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes).Format(false
                                                                                             ).Build();
                cluster.WaitActive();
                namesystem = cluster.GetNamesystem();
                try
                {
                    RenewToken(token1);
                    CancelToken(token1);
                    RenewToken(token2);
                    CancelToken(token2);
                    RenewToken(token3);
                    CancelToken(token3);
                    RenewToken(token4);
                    CancelToken(token4);
                    RenewToken(token5);
                    CancelToken(token5);
                }
                catch (IOException)
                {
                    NUnit.Framework.Assert.Fail("Could not renew or cancel the token");
                }
            }
            finally
            {
                if (fs != null)
                {
                    fs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
コード例 #18
0
 internal virtual Storage.StorageDirectory MockStorageDir()
 {
     return(FSImageTestUtil.MockStorageDirectory(this.type, false, Sharpen.Collections.ToArray
                                                     (TestNNStorageRetentionManager.FilesToPaths(this.files), new string[0])));
 }
コード例 #19
0
        public virtual void TestStorageRestore()
        {
            int numDatanodes = 0;

            cluster = new MiniDFSCluster.Builder(config).NumDataNodes(numDatanodes).ManageNameDfsDirs
                          (false).Build();
            cluster.WaitActive();
            SecondaryNameNode secondary = new SecondaryNameNode(config);

            System.Console.Out.WriteLine("****testStorageRestore: Cluster and SNN started");
            PrintStorages(cluster.GetNameNode().GetFSImage());
            FileSystem fs   = cluster.GetFileSystem();
            Path       path = new Path("/", "test");

            NUnit.Framework.Assert.IsTrue(fs.Mkdirs(path));
            System.Console.Out.WriteLine("****testStorageRestore: dir 'test' created, invalidating storage..."
                                         );
            InvalidateStorage(cluster.GetNameNode().GetFSImage(), ImmutableSet.Of(path2, path3
                                                                                  ));
            PrintStorages(cluster.GetNameNode().GetFSImage());
            System.Console.Out.WriteLine("****testStorageRestore: storage invalidated");
            path = new Path("/", "test1");
            NUnit.Framework.Assert.IsTrue(fs.Mkdirs(path));
            System.Console.Out.WriteLine("****testStorageRestore: dir 'test1' created");
            // We did another edit, so the still-active directory at 'path1'
            // should now differ from the others
            FSImageTestUtil.AssertFileContentsDifferent(2, new FilePath(path1, "current/" + NNStorage.GetInProgressEditsFileName
                                                                            (1)), new FilePath(path2, "current/" + NNStorage.GetInProgressEditsFileName(1)),
                                                        new FilePath(path3, "current/" + NNStorage.GetInProgressEditsFileName(1)));
            FSImageTestUtil.AssertFileContentsSame(new FilePath(path2, "current/" + NNStorage.GetInProgressEditsFileName
                                                                    (1)), new FilePath(path3, "current/" + NNStorage.GetInProgressEditsFileName(1)));
            System.Console.Out.WriteLine("****testStorageRestore: checkfiles(false) run");
            secondary.DoCheckpoint();
            ///should enable storage..
            // We should have a checkpoint through txid 4 in the two image dirs
            // (txid=4 for BEGIN, mkdir, mkdir, END)
            FSImageTestUtil.AssertFileContentsSame(new FilePath(path1, "current/" + NNStorage.GetImageFileName
                                                                    (4)), new FilePath(path2, "current/" + NNStorage.GetImageFileName(4)));
            NUnit.Framework.Assert.IsFalse("Should not have any image in an edits-only directory"
                                           , new FilePath(path3, "current/" + NNStorage.GetImageFileName(4)).Exists());
            // Should have finalized logs in the directory that didn't fail
            NUnit.Framework.Assert.IsTrue("Should have finalized logs in the directory that didn't fail"
                                          , new FilePath(path1, "current/" + NNStorage.GetFinalizedEditsFileName(1, 4)).Exists
                                              ());
            // Should not have finalized logs in the failed directories
            NUnit.Framework.Assert.IsFalse("Should not have finalized logs in the failed directories"
                                           , new FilePath(path2, "current/" + NNStorage.GetFinalizedEditsFileName(1, 4)).Exists
                                               ());
            NUnit.Framework.Assert.IsFalse("Should not have finalized logs in the failed directories"
                                           , new FilePath(path3, "current/" + NNStorage.GetFinalizedEditsFileName(1, 4)).Exists
                                               ());
            // The new log segment should be in all of the directories.
            FSImageTestUtil.AssertFileContentsSame(new FilePath(path1, "current/" + NNStorage.GetInProgressEditsFileName
                                                                    (5)), new FilePath(path2, "current/" + NNStorage.GetInProgressEditsFileName(5)),
                                                   new FilePath(path3, "current/" + NNStorage.GetInProgressEditsFileName(5)));
            string md5BeforeEdit = FSImageTestUtil.GetFileMD5(new FilePath(path1, "current/"
                                                                           + NNStorage.GetInProgressEditsFileName(5)));

            // The original image should still be the previously failed image
            // directory after it got restored, since it's still useful for
            // a recovery!
            FSImageTestUtil.AssertFileContentsSame(new FilePath(path1, "current/" + NNStorage.GetImageFileName
                                                                    (0)), new FilePath(path2, "current/" + NNStorage.GetImageFileName(0)));
            // Do another edit to verify that all the logs are active.
            path = new Path("/", "test2");
            NUnit.Framework.Assert.IsTrue(fs.Mkdirs(path));
            // Logs should be changed by the edit.
            string md5AfterEdit = FSImageTestUtil.GetFileMD5(new FilePath(path1, "current/" +
                                                                          NNStorage.GetInProgressEditsFileName(5)));

            NUnit.Framework.Assert.IsFalse(md5BeforeEdit.Equals(md5AfterEdit));
            // And all logs should be changed.
            FSImageTestUtil.AssertFileContentsSame(new FilePath(path1, "current/" + NNStorage.GetInProgressEditsFileName
                                                                    (5)), new FilePath(path2, "current/" + NNStorage.GetInProgressEditsFileName(5)),
                                                   new FilePath(path3, "current/" + NNStorage.GetInProgressEditsFileName(5)));
            secondary.Shutdown();
            cluster.Shutdown();
            // All logs should be finalized by clean shutdown
            FSImageTestUtil.AssertFileContentsSame(new FilePath(path1, "current/" + NNStorage.GetFinalizedEditsFileName
                                                                    (5, 7)), new FilePath(path2, "current/" + NNStorage.GetFinalizedEditsFileName(5,
                                                                                                                                                  7)), new FilePath(path3, "current/" + NNStorage.GetFinalizedEditsFileName(5, 7))
                                                   );
        }
コード例 #20
0
 /// <summary>test</summary>
 private void PrintStorages(FSImage image)
 {
     FSImageTestUtil.LogStorageContents(Log, image.GetStorage());
 }
コード例 #21
0
 /// <exception cref="System.IO.IOException"/>
 private FSImageTransactionalStorageInspector Inspect(FilePath storageDir)
 {
     return(FSImageTestUtil.InspectStorageDirectory(new FilePath(storageDir, "current"
                                                                 ), NNStorage.NameNodeDirType.ImageAndEdits));
 }
コード例 #22
0
        public virtual void TestNameEditsConfigs()
        {
            Path                     file1                  = new Path("TestNameEditsConfigs1");
            Path                     file2                  = new Path("TestNameEditsConfigs2");
            Path                     file3                  = new Path("TestNameEditsConfigs3");
            MiniDFSCluster           cluster                = null;
            SecondaryNameNode        secondary              = null;
            Configuration            conf                   = null;
            FileSystem               fileSys                = null;
            FilePath                 newNameDir             = new FilePath(base_dir, "name");
            FilePath                 newEditsDir            = new FilePath(base_dir, "edits");
            FilePath                 nameAndEdits           = new FilePath(base_dir, "name_and_edits");
            FilePath                 checkpointNameDir      = new FilePath(base_dir, "secondname");
            FilePath                 checkpointEditsDir     = new FilePath(base_dir, "secondedits");
            FilePath                 checkpointNameAndEdits = new FilePath(base_dir, "second_name_and_edits");
            ImmutableList <FilePath> allCurrentDirs         = ImmutableList.Of(new FilePath(nameAndEdits
                                                                                            , "current"), new FilePath(newNameDir, "current"), new FilePath(newEditsDir, "current"
                                                                                                                                                            ), new FilePath(checkpointNameAndEdits, "current"), new FilePath(checkpointNameDir
                                                                                                                                                                                                                             , "current"), new FilePath(checkpointEditsDir, "current"));
            ImmutableList <FilePath> imageCurrentDirs = ImmutableList.Of(new FilePath(nameAndEdits
                                                                                      , "current"), new FilePath(newNameDir, "current"), new FilePath(checkpointNameAndEdits
                                                                                                                                                      , "current"), new FilePath(checkpointNameDir, "current"));

            // Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir
            conf = new HdfsConfiguration();
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEdits.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEdits.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameAndEdits.GetPath
                         ());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointNameAndEdits.GetPath
                         ());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            // Manage our own dfs directories
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).ManageNameDfsDirs
                          (false).Build();
            cluster.WaitActive();
            secondary = StartSecondaryNameNode(conf);
            fileSys   = cluster.GetFileSystem();
            try
            {
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                WriteFile(fileSys, file1, replication);
                CheckFile(fileSys, file1, replication);
                secondary.DoCheckpoint();
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
                secondary.Shutdown();
            }
            // Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir
            conf = new HdfsConfiguration();
            NUnit.Framework.Assert.IsTrue(newNameDir.Mkdir());
            NUnit.Framework.Assert.IsTrue(newEditsDir.Mkdir());
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEdits.GetPath() + "," + newNameDir
                     .GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEdits.GetPath() + "," + newEditsDir
                     .GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameDir.GetPath() +
                     "," + checkpointNameAndEdits.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointEditsDir.GetPath
                         () + "," + checkpointNameAndEdits.GetPath());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            // Manage our own dfs directories. Do not format.
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false
                                                                                         ).ManageNameDfsDirs(false).Build();
            cluster.WaitActive();
            secondary = StartSecondaryNameNode(conf);
            fileSys   = cluster.GetFileSystem();
            try
            {
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file1));
                CheckFile(fileSys, file1, replication);
                CleanupFile(fileSys, file1);
                WriteFile(fileSys, file2, replication);
                CheckFile(fileSys, file2, replication);
                secondary.DoCheckpoint();
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
                secondary.Shutdown();
            }
            FSImageTestUtil.AssertParallelFilesAreIdentical(allCurrentDirs, ImmutableSet.Of("VERSION"
                                                                                            ));
            FSImageTestUtil.AssertSameNewestImage(imageCurrentDirs);
            // Now remove common directory both have and start namenode with
            // separate name and edits dirs
            conf = new HdfsConfiguration();
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, newNameDir.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, newEditsDir.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameDir.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointEditsDir.GetPath
                         ());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            cluster     = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false
                                                                                             ).ManageNameDfsDirs(false).Build();
            cluster.WaitActive();
            secondary = StartSecondaryNameNode(conf);
            fileSys   = cluster.GetFileSystem();
            try
            {
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file2));
                CheckFile(fileSys, file2, replication);
                CleanupFile(fileSys, file2);
                WriteFile(fileSys, file3, replication);
                CheckFile(fileSys, file3, replication);
                secondary.DoCheckpoint();
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
                secondary.Shutdown();
            }
            // No edit logs in new name dir
            CheckImageAndEditsFilesExistence(newNameDir, true, false);
            CheckImageAndEditsFilesExistence(newEditsDir, false, true);
            CheckImageAndEditsFilesExistence(checkpointNameDir, true, false);
            CheckImageAndEditsFilesExistence(checkpointEditsDir, false, true);
            // Add old name_and_edits dir. File system should not read image or edits
            // from old dir
            NUnit.Framework.Assert.IsTrue(FileUtil.FullyDelete(new FilePath(nameAndEdits, "current"
                                                                            )));
            NUnit.Framework.Assert.IsTrue(FileUtil.FullyDelete(new FilePath(checkpointNameAndEdits
                                                                            , "current")));
            conf = new HdfsConfiguration();
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameAndEdits.GetPath() + "," + newNameDir
                     .GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, nameAndEdits + "," + newEditsDir.GetPath
                         ());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointDirKey, checkpointNameDir.GetPath() +
                     "," + checkpointNameAndEdits.GetPath());
            conf.Set(DFSConfigKeys.DfsNamenodeCheckpointEditsDirKey, checkpointEditsDir.GetPath
                         () + "," + checkpointNameAndEdits.GetPath());
            replication = (short)conf.GetInt(DFSConfigKeys.DfsReplicationKey, 3);
            cluster     = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Format(false
                                                                                             ).ManageNameDfsDirs(false).Build();
            cluster.WaitActive();
            secondary = StartSecondaryNameNode(conf);
            fileSys   = cluster.GetFileSystem();
            try
            {
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file1));
                NUnit.Framework.Assert.IsTrue(!fileSys.Exists(file2));
                NUnit.Framework.Assert.IsTrue(fileSys.Exists(file3));
                CheckFile(fileSys, file3, replication);
                secondary.DoCheckpoint();
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
                secondary.Shutdown();
            }
            CheckImageAndEditsFilesExistence(nameAndEdits, true, true);
            CheckImageAndEditsFilesExistence(checkpointNameAndEdits, true, true);
        }
コード例 #23
0
ファイル: TestBackupNode.cs プロジェクト: orf53975/hadoop.net
        public virtual void TestBackupNodeTailsEdits()
        {
            Configuration conf = new HdfsConfiguration();

            HAUtil.SetAllowStandbyReads(conf, true);
            MiniDFSCluster cluster = null;
            FileSystem     fileSys = null;
            BackupNode     backup  = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                fileSys = cluster.GetFileSystem();
                backup  = StartBackupNode(conf, HdfsServerConstants.StartupOption.Backup, 1);
                BackupImage bnImage = (BackupImage)backup.GetFSImage();
                TestBNInSync(cluster, backup, 1);
                // Force a roll -- BN should roll with NN.
                NameNode          nn    = cluster.GetNameNode();
                NamenodeProtocols nnRpc = nn.GetRpcServer();
                nnRpc.RollEditLog();
                NUnit.Framework.Assert.AreEqual(bnImage.GetEditLog().GetCurSegmentTxId(), nn.GetFSImage
                                                    ().GetEditLog().GetCurSegmentTxId());
                // BN should stay in sync after roll
                TestBNInSync(cluster, backup, 2);
                long nnImageBefore = nn.GetFSImage().GetStorage().GetMostRecentCheckpointTxId();
                // BN checkpoint
                backup.DoCheckpoint();
                // NN should have received a new image
                long nnImageAfter = nn.GetFSImage().GetStorage().GetMostRecentCheckpointTxId();
                NUnit.Framework.Assert.IsTrue("nn should have received new checkpoint. before: "
                                              + nnImageBefore + " after: " + nnImageAfter, nnImageAfter > nnImageBefore);
                // BN should stay in sync after checkpoint
                TestBNInSync(cluster, backup, 3);
                // Stop BN
                Storage.StorageDirectory sd = bnImage.GetStorage().GetStorageDir(0);
                backup.Stop();
                backup = null;
                // When shutting down the BN, it shouldn't finalize logs that are
                // still open on the NN
                FileJournalManager.EditLogFile editsLog = FSImageTestUtil.FindLatestEditsLog(sd);
                NUnit.Framework.Assert.AreEqual(editsLog.GetFirstTxId(), nn.GetFSImage().GetEditLog
                                                    ().GetCurSegmentTxId());
                NUnit.Framework.Assert.IsTrue("Should not have finalized " + editsLog, editsLog.IsInProgress
                                                  ());
                // do some edits
                NUnit.Framework.Assert.IsTrue(fileSys.Mkdirs(new Path("/edit-while-bn-down")));
                // start a new backup node
                backup = StartBackupNode(conf, HdfsServerConstants.StartupOption.Backup, 1);
                TestBNInSync(cluster, backup, 4);
                NUnit.Framework.Assert.IsNotNull(backup.GetNamesystem().GetFileInfo("/edit-while-bn-down"
                                                                                    , false));
            }
            finally
            {
                Log.Info("Shutting down...");
                if (backup != null)
                {
                    backup.Stop();
                }
                if (fileSys != null)
                {
                    fileSys.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            AssertStorageDirsMatch(cluster.GetNameNode(), backup);
        }
コード例 #24
0
        /// <exception cref="System.IO.IOException"/>
        internal static void TestNameNodeRecoveryImpl(TestNameNodeRecovery.Corruptor corruptor
                                                      , bool finalize)
        {
            string TestPath     = "/test/path/dir";
            string TestPath2    = "/second/dir";
            bool   needRecovery = corruptor.NeedRecovery(finalize);
            // start a cluster
            Configuration conf = new HdfsConfiguration();

            SetupRecoveryTestConf(conf);
            MiniDFSCluster cluster = null;
            FileSystem     fileSys = null;

            Storage.StorageDirectory sd = null;
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).ManageNameDfsDirs(false
                                                                                             ).Build();
                cluster.WaitActive();
                if (!finalize)
                {
                    // Normally, the in-progress edit log would be finalized by
                    // FSEditLog#endCurrentLogSegment.  For testing purposes, we
                    // disable that here.
                    FSEditLog spyLog = Org.Mockito.Mockito.Spy(cluster.GetNameNode().GetFSImage().GetEditLog
                                                                   ());
                    Org.Mockito.Mockito.DoNothing().When(spyLog).EndCurrentLogSegment(true);
                    DFSTestUtil.SetEditLogForTesting(cluster.GetNamesystem(), spyLog);
                }
                fileSys = cluster.GetFileSystem();
                FSNamesystem namesystem = cluster.GetNamesystem();
                FSImage      fsimage    = namesystem.GetFSImage();
                fileSys.Mkdirs(new Path(TestPath));
                fileSys.Mkdirs(new Path(TestPath2));
                sd = fsimage.GetStorage().DirIterator(NNStorage.NameNodeDirType.Edits).Next();
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            FilePath editFile = FSImageTestUtil.FindLatestEditsLog(sd).GetFile();

            NUnit.Framework.Assert.IsTrue("Should exist: " + editFile, editFile.Exists());
            // Corrupt the edit log
            Log.Info("corrupting edit log file '" + editFile + "'");
            corruptor.Corrupt(editFile);
            // If needRecovery == true, make sure that we can't start the
            // cluster normally before recovery
            cluster = null;
            try
            {
                Log.Debug("trying to start normally (this should fail)...");
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).EnableManagedDfsDirsRedundancy
                              (false).Format(false).Build();
                cluster.WaitActive();
                cluster.Shutdown();
                if (needRecovery)
                {
                    NUnit.Framework.Assert.Fail("expected the corrupted edit log to prevent normal startup"
                                                );
                }
            }
            catch (IOException e)
            {
                if (!needRecovery)
                {
                    Log.Error("Got unexpected failure with " + corruptor.GetName() + corruptor, e);
                    NUnit.Framework.Assert.Fail("got unexpected exception " + e.Message);
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            // Perform NameNode recovery.
            // Even if there was nothing wrong previously (needRecovery == false),
            // this should still work fine.
            cluster = null;
            try
            {
                Log.Debug("running recovery...");
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).EnableManagedDfsDirsRedundancy
                              (false).Format(false).StartupOption(recoverStartOpt).Build();
            }
            catch (IOException e)
            {
                NUnit.Framework.Assert.Fail("caught IOException while trying to recover. " + "message was "
                                            + e.Message + "\nstack trace\n" + StringUtils.StringifyException(e));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            // Make sure that we can start the cluster normally after recovery
            cluster = null;
            try
            {
                Log.Debug("starting cluster normally after recovery...");
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).EnableManagedDfsDirsRedundancy
                              (false).Format(false).Build();
                Log.Debug("successfully recovered the " + corruptor.GetName() + " corrupted edit log"
                          );
                cluster.WaitActive();
                NUnit.Framework.Assert.IsTrue(cluster.GetFileSystem().Exists(new Path(TestPath)));
            }
            catch (IOException e)
            {
                NUnit.Framework.Assert.Fail("failed to recover.  Error message: " + e.Message);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
コード例 #25
0
        /// <exception cref="System.Exception"/>
        public virtual void TestCancelSaveNamespace()
        {
            Configuration conf = GetConf();

            NameNode.InitMetrics(conf, HdfsServerConstants.NamenodeRole.Namenode);
            DFSTestUtil.FormatNameNode(conf);
            FSNamesystem fsn = FSNamesystem.LoadFromDisk(conf);
            // Replace the FSImage with a spy
            FSImage   image   = fsn.GetFSImage();
            NNStorage storage = image.GetStorage();

            storage.Close();
            // unlock any directories that FSNamesystem's initialization may have locked
            storage.SetStorageDirectories(FSNamesystem.GetNamespaceDirs(conf), FSNamesystem.GetNamespaceEditsDirs
                                              (conf));
            FSNamesystem spyFsn   = Org.Mockito.Mockito.Spy(fsn);
            FSNamesystem finalFsn = spyFsn;

            GenericTestUtils.DelayAnswer delayer = new GenericTestUtils.DelayAnswer(Log);
            BlockIdManager bid = Org.Mockito.Mockito.Spy(spyFsn.GetBlockIdManager());

            Whitebox.SetInternalState(finalFsn, "blockIdManager", bid);
            Org.Mockito.Mockito.DoAnswer(delayer).When(bid).GetGenerationStampV2();
            ExecutorService pool = Executors.NewFixedThreadPool(2);

            try
            {
                DoAnEdit(fsn, 1);
                Canceler canceler = new Canceler();
                // Save namespace
                fsn.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                try
                {
                    Future <Void> saverFuture = pool.Submit(new _Callable_561(image, finalFsn, canceler
                                                                              ));
                    // Wait until saveNamespace calls getGenerationStamp
                    delayer.WaitForCall();
                    // then cancel the saveNamespace
                    Future <Void> cancelFuture = pool.Submit(new _Callable_572(canceler));
                    // give the cancel call time to run
                    Sharpen.Thread.Sleep(500);
                    // allow saveNamespace to proceed - it should check the cancel flag after
                    // this point and throw an exception
                    delayer.Proceed();
                    cancelFuture.Get();
                    saverFuture.Get();
                    NUnit.Framework.Assert.Fail("saveNamespace did not fail even though cancelled!");
                }
                catch (Exception t)
                {
                    GenericTestUtils.AssertExceptionContains("SaveNamespaceCancelledException", t);
                }
                Log.Info("Successfully cancelled a saveNamespace");
                // Check that we have only the original image and not any
                // cruft left over from half-finished images
                FSImageTestUtil.LogStorageContents(Log, storage);
                foreach (Storage.StorageDirectory sd in storage.DirIterable(null))
                {
                    FilePath curDir = sd.GetCurrentDir();
                    GenericTestUtils.AssertGlobEquals(curDir, "fsimage_.*", NNStorage.GetImageFileName
                                                          (0), NNStorage.GetImageFileName(0) + MD5FileUtils.Md5Suffix);
                }
            }
            finally
            {
                fsn.Close();
            }
        }