/// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        public virtual void TestQueryAfterRestart()
        {
            Configuration  conf    = new Configuration();
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                // start rolling upgrade
                dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare);
                QueryForPreparation(dfs);
                dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                dfs.SaveNamespace();
                dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
                cluster.RestartNameNodes();
                dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Query);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Beispiel #2
0
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestLogAndRestart()
        {
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, MiniDFSCluster.GetBaseDirectory() +
                     "/TestNNWithQJM/image");
            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, mjc.GetQuorumJournalURI("myjournal"
                                                                                   ).ToString());
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).ManageNameDfsDirs
                                         (false).Build();

            try
            {
                cluster.GetFileSystem().Mkdirs(TestPath);
                // Restart the NN and make sure the edit was persisted
                // and loaded again
                cluster.RestartNameNode();
                NUnit.Framework.Assert.IsTrue(cluster.GetFileSystem().Exists(TestPath));
                cluster.GetFileSystem().Mkdirs(TestPath2);
                // Restart the NN again and make sure both edits are persisted.
                cluster.RestartNameNode();
                NUnit.Framework.Assert.IsTrue(cluster.GetFileSystem().Exists(TestPath));
                NUnit.Framework.Assert.IsTrue(cluster.GetFileSystem().Exists(TestPath2));
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Beispiel #3
0
        public virtual void TestDoGetShouldCloseTheDFSInputStreamIfResponseGetOutPutStreamThrowsAnyException
            ()
        {
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(Conf).NumDataNodes(1).Build();

            try
            {
                Path testFile = CreateFile();
                SetUpForDoGetTest(cluster, testFile);
                Org.Mockito.Mockito.DoThrow(new IOException()).When(mockHttpServletResponse).GetOutputStream
                    ();
                DFSInputStream fsMock = Org.Mockito.Mockito.Mock <DFSInputStream>();
                Org.Mockito.Mockito.DoReturn(fsMock).When(clientMock).Open(testFile.ToString());
                Org.Mockito.Mockito.DoReturn(Sharpen.Extensions.ValueOf(4)).When(fsMock).GetFileLength
                    ();
                try
                {
                    sfile.DoGet(mockHttpServletRequest, mockHttpServletResponse);
                    NUnit.Framework.Assert.Fail("Not throwing the IOException");
                }
                catch (IOException)
                {
                    Org.Mockito.Mockito.Verify(clientMock, Org.Mockito.Mockito.AtLeastOnce()).Close();
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Beispiel #4
0
        public virtual void TestRaceWhileNNStartup()
        {
            MiniDFSCluster cluster = null;
            Configuration  conf    = WebHdfsTestUtil.CreateConf();

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                cluster.WaitActive();
                NameNode          namenode  = cluster.GetNameNode();
                NamenodeProtocols rpcServer = namenode.GetRpcServer();
                Whitebox.SetInternalState(namenode, "rpcServer", null);
                Path       foo     = new Path("/foo");
                FileSystem webHdfs = WebHdfsTestUtil.GetWebHdfsFileSystem(conf, WebHdfsFileSystem
                                                                          .Scheme);
                try
                {
                    webHdfs.Mkdirs(foo);
                    NUnit.Framework.Assert.Fail("Expected RetriableException");
                }
                catch (RetriableException e)
                {
                    GenericTestUtils.AssertExceptionContains("Namenode is in startup mode", e);
                }
                Whitebox.SetInternalState(namenode, "rpcServer", rpcServer);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void TestRollbackCommand()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = null;
            Path           foo     = new Path("/foo");
            Path           bar     = new Path("/bar");

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                DFSAdmin dfsadmin         = new DFSAdmin(conf);
                dfs.Mkdirs(foo);
                // start rolling upgrade
                dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                NUnit.Framework.Assert.AreEqual(0, dfsadmin.Run(new string[] { "-rollingUpgrade",
                                                                               "prepare" }));
                dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
                // create new directory
                dfs.Mkdirs(bar);
                // check NNStorage
                NNStorage storage = cluster.GetNamesystem().GetFSImage().GetStorage();
                CheckNNStorage(storage, 3, -1);
            }
            finally
            {
                // (startSegment, mkdir, endSegment)
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            NameNode nn = null;

            try
            {
                nn = NameNode.CreateNameNode(new string[] { "-rollingUpgrade", "rollback" }, conf
                                             );
                // make sure /foo is still there, but /bar is not
                INode fooNode = nn.GetNamesystem().GetFSDirectory().GetINode4Write(foo.ToString()
                                                                                   );
                NUnit.Framework.Assert.IsNotNull(fooNode);
                INode barNode = nn.GetNamesystem().GetFSDirectory().GetINode4Write(bar.ToString()
                                                                                   );
                NUnit.Framework.Assert.IsNull(barNode);
                // check the details of NNStorage
                NNStorage storage = nn.GetNamesystem().GetFSImage().GetStorage();
                // (startSegment, upgrade marker, mkdir, endSegment)
                CheckNNStorage(storage, 3, 7);
            }
            finally
            {
                if (nn != null)
                {
                    nn.Stop();
                    nn.Join();
                }
            }
        }
Beispiel #6
0
        public virtual void TestLease()
        {
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();

            try
            {
                FileSystem fs = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(dir));
                Path             a     = new Path(dir, "a");
                Path             b     = new Path(dir, "b");
                DataOutputStream a_out = fs.Create(a);
                a_out.WriteBytes("something");
                NUnit.Framework.Assert.IsTrue(HasLease(cluster, a));
                NUnit.Framework.Assert.IsTrue(!HasLease(cluster, b));
                DataOutputStream b_out = fs.Create(b);
                b_out.WriteBytes("something");
                NUnit.Framework.Assert.IsTrue(HasLease(cluster, a));
                NUnit.Framework.Assert.IsTrue(HasLease(cluster, b));
                a_out.Close();
                b_out.Close();
                NUnit.Framework.Assert.IsTrue(!HasLease(cluster, a));
                NUnit.Framework.Assert.IsTrue(!HasLease(cluster, b));
                fs.Delete(dir, true);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Beispiel #7
0
        /// <summary>
        /// Test for catching "no datanode" IOException, when to create a file
        /// but datanode is not running for some reason.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestCreateWithNoDN()
        {
            MiniDFSCluster cluster = null;
            Configuration  conf    = WebHdfsTestUtil.CreateConf();

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1);
                cluster.WaitActive();
                FileSystem fs = WebHdfsTestUtil.GetWebHdfsFileSystem(conf, WebHdfsFileSystem.Scheme
                                                                     );
                fs.Create(new Path("/testnodatanode"));
                NUnit.Framework.Assert.Fail("No exception was thrown");
            }
            catch (IOException ex)
            {
                GenericTestUtils.AssertExceptionContains("Failed to find datanode", ex);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void TestNfsUpgrade()
        {
            MiniDFSCluster cluster = null;
            FileSystem     fs      = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology
                                                                          ()).NumDataNodes(0).Build();
                FilePath sharedDir = new FilePath(cluster.GetSharedEditsDir(0, 1));
                // No upgrade is in progress at the moment.
                CheckClusterPreviousDirExistence(cluster, false);
                AssertCTimesEqual(cluster);
                CheckPreviousDirExistence(sharedDir, false);
                // Transition NN0 to active and do some FS ops.
                cluster.TransitionToActive(0);
                fs = HATestUtil.ConfigureFailoverFs(cluster, conf);
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(new Path("/foo1")));
                // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
                // flag.
                cluster.ShutdownNameNode(1);
                cluster.GetNameNodeInfos()[0].SetStartOpt(HdfsServerConstants.StartupOption.Upgrade
                                                          );
                cluster.RestartNameNode(0, false);
                CheckNnPreviousDirExistence(cluster, 0, true);
                CheckNnPreviousDirExistence(cluster, 1, false);
                CheckPreviousDirExistence(sharedDir, true);
                // NN0 should come up in the active state when given the -upgrade option,
                // so no need to transition it to active.
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(new Path("/foo2")));
                // Restart NN0 without the -upgrade flag, to make sure that works.
                cluster.GetNameNodeInfos()[0].SetStartOpt(HdfsServerConstants.StartupOption.Regular
                                                          );
                cluster.RestartNameNode(0, false);
                // Make sure we can still do FS ops after upgrading.
                cluster.TransitionToActive(0);
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(new Path("/foo3")));
                // Now bootstrap the standby with the upgraded info.
                int rc = BootstrapStandby.Run(new string[] { "-force" }, cluster.GetConfiguration
                                                  (1));
                NUnit.Framework.Assert.AreEqual(0, rc);
                // Now restart NN1 and make sure that we can do ops against that as well.
                cluster.RestartNameNode(1);
                cluster.TransitionToStandby(0);
                cluster.TransitionToActive(1);
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(new Path("/foo4")));
                AssertCTimesEqual(cluster);
            }
            finally
            {
                if (fs != null)
                {
                    fs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void TestStartingWithUpgradeInProgressSucceeds()
        {
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology
                                                                          ()).NumDataNodes(0).Build();
                // Simulate an upgrade having started.
                for (int i = 0; i < 2; i++)
                {
                    foreach (URI uri in cluster.GetNameDirs(i))
                    {
                        FilePath prevTmp = new FilePath(new FilePath(uri), Storage.StorageTmpPrevious);
                        Log.Info("creating previous tmp dir: " + prevTmp);
                        NUnit.Framework.Assert.IsTrue(prevTmp.Mkdirs());
                    }
                }
                cluster.RestartNameNodes();
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        /// <summary>Bring up two clusters and assert that they are in different directories.
        ///     </summary>
        /// <exception cref="System.Exception">on a failure</exception>
        public virtual void TestDualClusters()
        {
            FilePath      testDataCluster2 = new FilePath(testDataPath, Cluster2);
            FilePath      testDataCluster3 = new FilePath(testDataPath, Cluster3);
            Configuration conf             = new HdfsConfiguration();
            string        c2Path           = testDataCluster2.GetAbsolutePath();

            conf.Set(MiniDFSCluster.HdfsMinidfsBasedir, c2Path);
            MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf).Build();
            MiniDFSCluster cluster3 = null;

            try
            {
                string dataDir2 = cluster2.GetDataDirectory();
                NUnit.Framework.Assert.AreEqual(new FilePath(c2Path + "/data"), new FilePath(dataDir2
                                                                                             ));
                //change the data dir
                conf.Set(MiniDFSCluster.HdfsMinidfsBasedir, testDataCluster3.GetAbsolutePath());
                MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
                cluster3 = builder.Build();
                string dataDir3 = cluster3.GetDataDirectory();
                NUnit.Framework.Assert.IsTrue("Clusters are bound to the same directory: " + dataDir2
                                              , !dataDir2.Equals(dataDir3));
            }
            finally
            {
                MiniDFSCluster.ShutdownCluster(cluster3);
                MiniDFSCluster.ShutdownCluster(cluster2);
            }
        }
Beispiel #11
0
        public virtual void TestMkdirRpcNonCanonicalPath()
        {
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();

            try
            {
                NamenodeProtocols nnrpc = cluster.GetNameNodeRpc();
                foreach (string pathStr in NonCanonicalPaths)
                {
                    try
                    {
                        nnrpc.Mkdirs(pathStr, new FsPermission((short)0x1ed), true);
                        NUnit.Framework.Assert.Fail("Did not fail when called with a non-canonicalized path: "
                                                    + pathStr);
                    }
                    catch (InvalidPathException)
                    {
                    }
                }
            }
            finally
            {
                // expected
                cluster.Shutdown();
            }
        }
Beispiel #12
0
        /// <summary>Test the updation of NeededReplications for the Appended Block</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestUpdateNeededReplicationsForAppendedFile()
        {
            Configuration         conf       = new Configuration();
            MiniDFSCluster        cluster    = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            DistributedFileSystem fileSystem = null;

            try
            {
                // create a file.
                fileSystem = cluster.GetFileSystem();
                Path f = new Path("/testAppend");
                FSDataOutputStream create = fileSystem.Create(f, (short)2);
                create.Write(Sharpen.Runtime.GetBytesForString("/testAppend"));
                create.Close();
                // Append to the file.
                FSDataOutputStream append = fileSystem.Append(f);
                append.Write(Sharpen.Runtime.GetBytesForString("/testAppend"));
                append.Close();
                // Start a new datanode
                cluster.StartDataNodes(conf, 1, true, null, null);
                // Check for replications
                DFSTestUtil.WaitReplication(fileSystem, f, (short)2);
            }
            finally
            {
                if (null != fileSystem)
                {
                    fileSystem.Close();
                }
                cluster.Shutdown();
            }
        }
Beispiel #13
0
        public virtual void Test()
        {
            Configuration  conf    = new Configuration();
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).Build();
                cluster.WaitActive();
                FSNamesystem fsn        = cluster.GetNameNode().namesystem;
                MBeanServer  mbs        = ManagementFactory.GetPlatformMBeanServer();
                ObjectName   mxbeanName = new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState"
                                                         );
                string snapshotStats = (string)(mbs.GetAttribute(mxbeanName, "SnapshotStats"));
                IDictionary <string, object> stat = (IDictionary <string, object>)JSON.Parse(snapshotStats
                                                                                             );
                NUnit.Framework.Assert.IsTrue(stat.Contains("SnapshottableDirectories") && (long)
                                              stat["SnapshottableDirectories"] == fsn.GetNumSnapshottableDirs());
                NUnit.Framework.Assert.IsTrue(stat.Contains("Snapshots") && (long)stat["Snapshots"
                                              ] == fsn.GetNumSnapshots());
                object pendingDeletionBlocks = mbs.GetAttribute(mxbeanName, "PendingDeletionBlocks"
                                                                );
                NUnit.Framework.Assert.IsNotNull(pendingDeletionBlocks);
                NUnit.Framework.Assert.IsTrue(pendingDeletionBlocks is long);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Beispiel #14
0
        public virtual void TestWithFSNamesystemWriteLock()
        {
            Configuration  conf    = new Configuration();
            MiniDFSCluster cluster = null;
            FSNamesystem   fsn     = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).Build();
                cluster.WaitActive();
                fsn = cluster.GetNameNode().namesystem;
                fsn.WriteLock();
                TestFSNamesystemMBean.MBeanClient client = new TestFSNamesystemMBean.MBeanClient(
                    );
                client.Start();
                client.Join(20000);
                NUnit.Framework.Assert.IsTrue("JMX calls are blocked when FSNamesystem's writerlock"
                                              + "is owned by another thread", client.succeeded);
                client.Interrupt();
            }
            finally
            {
                if (fsn != null && fsn.HasWriteLock())
                {
                    fsn.WriteUnlock();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Beispiel #15
0
        public virtual void TestHSyncBlockBoundary()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fs      = cluster.GetFileSystem();
            Path           p       = new Path("/testHSyncBlockBoundary/foo");
            int            len     = 1 << 16;

            byte[]             fileContents = AppendTestUtil.InitBuffer(len);
            FSDataOutputStream @out         = fs.Create(p, FsPermission.GetDefault(), EnumSet.Of(CreateFlag
                                                                                                 .Create, CreateFlag.Overwrite, CreateFlag.SyncBlock), 4096, (short)1, len, null);

            // fill exactly one block (tests the SYNC_BLOCK case) and flush
            @out.Write(fileContents, 0, len);
            @out.Hflush();
            // the full block should have caused a sync
            CheckSyncMetric(cluster, 1);
            @out.Hsync();
            // first on block again
            CheckSyncMetric(cluster, 1);
            // write one more byte and sync again
            @out.Write(1);
            @out.Hsync();
            CheckSyncMetric(cluster, 2);
            @out.Close();
            CheckSyncMetric(cluster, 3);
            cluster.Shutdown();
        }
Beispiel #16
0
        public virtual void TestMultipleNamespacesConfigured()
        {
            Configuration     conf    = DFSTestUtil.NewHAConfiguration(LogicalName);
            MiniDFSCluster    cluster = null;
            WebHdfsFileSystem fs      = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(topo).NumDataNodes(1).Build
                              ();
                HATestUtil.SetFailoverConfigurations(cluster, conf, LogicalName);
                cluster.WaitActive();
                DFSTestUtil.AddHAConfiguration(conf, LogicalName + "remote");
                DFSTestUtil.SetFakeHttpAddresses(conf, LogicalName + "remote");
                fs = (WebHdfsFileSystem)FileSystem.Get(WebhdfsUri, conf);
                NUnit.Framework.Assert.AreEqual(2, fs.GetResolvedNNAddr().Length);
            }
            finally
            {
                IOUtils.Cleanup(null, fs);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void TestFinalizedReplicas()
        {
            // bring up a cluster of 3
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1024L);
            conf.SetInt(DFSConfigKeys.DfsClientWritePacketSizeKey, 512);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();

            cluster.WaitActive();
            FileSystem fs = cluster.GetFileSystem();

            try
            {
                // test finalized replicas
                string      TopDir = "/test";
                DFSTestUtil util   = new DFSTestUtil.Builder().SetName("TestDatanodeRestart").SetNumFiles
                                         (2).Build();
                util.CreateFiles(fs, TopDir, (short)3);
                util.WaitReplication(fs, TopDir, (short)3);
                util.CheckFiles(fs, TopDir);
                cluster.RestartDataNodes();
                cluster.WaitActive();
                util.CheckFiles(fs, TopDir);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Beispiel #18
0
        public virtual void TestHA()
        {
            Configuration  conf    = DFSTestUtil.NewHAConfiguration(LogicalName);
            MiniDFSCluster cluster = null;
            FileSystem     fs      = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(topo).NumDataNodes(0).Build
                              ();
                HATestUtil.SetFailoverConfigurations(cluster, conf, LogicalName);
                cluster.WaitActive();
                fs = FileSystem.Get(WebhdfsUri, conf);
                cluster.TransitionToActive(0);
                Path dir = new Path("/test");
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(dir));
                cluster.ShutdownNameNode(0);
                cluster.TransitionToActive(1);
                Path dir2 = new Path("/test2");
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(dir2));
            }
            finally
            {
                IOUtils.Cleanup(null, fs);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Beispiel #19
0
        /// <exception cref="System.Exception"/>
        public virtual void TestLargeDirectory()
        {
            Configuration conf      = WebHdfsTestUtil.CreateConf();
            int           listLimit = 2;

            // force small chunking of directory listing
            conf.SetInt(DFSConfigKeys.DfsListLimit, listLimit);
            // force paths to be only owner-accessible to ensure ugi isn't changing
            // during listStatus
            FsPermission.SetUMask(conf, new FsPermission((short)0x3f));
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();

            try
            {
                cluster.WaitActive();
                WebHdfsTestUtil.GetWebHdfsFileSystem(conf, WebHdfsFileSystem.Scheme).SetPermission
                    (new Path("/"), new FsPermission(FsAction.All, FsAction.All, FsAction.All));
                // trick the NN into not believing it's not the superuser so we can
                // tell if the correct user is used by listStatus
                UserGroupInformation.SetLoginUser(UserGroupInformation.CreateUserForTesting("not-superuser"
                                                                                            , new string[] { "not-supergroup" }));
                UserGroupInformation.CreateUserForTesting("me", new string[] { "my-group" }).DoAs
                    (new _PrivilegedExceptionAction_263(conf, listLimit));
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Beispiel #20
0
        public virtual void TestSecureHAToken()
        {
            Configuration conf = DFSTestUtil.NewHAConfiguration(LogicalName);

            conf.SetBoolean(DFSConfigKeys.DfsNamenodeDelegationTokenAlwaysUseKey, true);
            MiniDFSCluster    cluster = null;
            WebHdfsFileSystem fs      = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(topo).NumDataNodes(0).Build
                              ();
                HATestUtil.SetFailoverConfigurations(cluster, conf, LogicalName);
                cluster.WaitActive();
                fs = Org.Mockito.Mockito.Spy((WebHdfsFileSystem)FileSystem.Get(WebhdfsUri, conf));
                FileSystemTestHelper.AddFileSystemForTesting(WebhdfsUri, conf, fs);
                cluster.TransitionToActive(0);
                Org.Apache.Hadoop.Security.Token.Token <object> token = ((Org.Apache.Hadoop.Security.Token.Token
                                                                          <DelegationTokenIdentifier>)fs.GetDelegationToken(null));
                cluster.ShutdownNameNode(0);
                cluster.TransitionToActive(1);
                token.Renew(conf);
                token.Cancel(conf);
                Org.Mockito.Mockito.Verify(fs).RenewDelegationToken(token);
                Org.Mockito.Mockito.Verify(fs).CancelDelegationToken(token);
            }
            finally
            {
                IOUtils.Cleanup(null, fs);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Beispiel #21
0
        public virtual void TestWebHdfsRenameSnapshot()
        {
            MiniDFSCluster cluster = null;
            Configuration  conf    = WebHdfsTestUtil.CreateConf();

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                cluster.WaitActive();
                DistributedFileSystem dfs     = cluster.GetFileSystem();
                FileSystem            webHdfs = WebHdfsTestUtil.GetWebHdfsFileSystem(conf, WebHdfsFileSystem
                                                                                     .Scheme);
                Path foo = new Path("/foo");
                dfs.Mkdirs(foo);
                dfs.AllowSnapshot(foo);
                webHdfs.CreateSnapshot(foo, "s1");
                Path s1path = SnapshotTestHelper.GetSnapshotRoot(foo, "s1");
                NUnit.Framework.Assert.IsTrue(webHdfs.Exists(s1path));
                // rename s1 to s2
                webHdfs.RenameSnapshot(foo, "s1", "s2");
                NUnit.Framework.Assert.IsFalse(webHdfs.Exists(s1path));
                Path s2path = SnapshotTestHelper.GetSnapshotRoot(foo, "s2");
                NUnit.Framework.Assert.IsTrue(webHdfs.Exists(s2path));
                webHdfs.DeleteSnapshot(foo, "s2");
                NUnit.Framework.Assert.IsFalse(webHdfs.Exists(s2path));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void TestBlockIdGeneration()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            try
            {
                cluster.WaitActive();
                FileSystem fs = cluster.GetFileSystem();
                // Create a file that is 10 blocks long.
                Path path = new Path("testBlockIdGeneration.dat");
                DFSTestUtil.CreateFile(fs, path, IoSize, BlockSize * 10, BlockSize, Replication,
                                       Seed);
                IList <LocatedBlock> blocks = DFSTestUtil.GetAllBlocks(fs, path);
                Log.Info("Block0 id is " + blocks[0].GetBlock().GetBlockId());
                long nextBlockExpectedId = blocks[0].GetBlock().GetBlockId() + 1;
                // Ensure that the block IDs are sequentially increasing.
                for (int i = 1; i < blocks.Count; ++i)
                {
                    long nextBlockId = blocks[i].GetBlock().GetBlockId();
                    Log.Info("Block" + i + " id is " + nextBlockId);
                    Assert.AssertThat(nextBlockId, CoreMatchers.Is(nextBlockExpectedId));
                    ++nextBlockExpectedId;
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Beispiel #23
0
        public virtual void TestDTInInsecureCluster()
        {
            MiniDFSCluster cluster = null;
            Configuration  conf    = WebHdfsTestUtil.CreateConf();

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                FileSystem webHdfs = WebHdfsTestUtil.GetWebHdfsFileSystem(conf, WebHdfsFileSystem
                                                                          .Scheme);
                webHdfs.GetDelegationToken(null);
                NUnit.Framework.Assert.Fail("No exception is thrown.");
            }
            catch (AccessControlException ace)
            {
                NUnit.Framework.Assert.IsTrue(ace.Message.StartsWith(WebHdfsFileSystem.CantFallbackToInsecureMsg
                                                                     ));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        /// <exception cref="System.IO.IOException"/>
        internal static void Setrep(int fromREP, int toREP, bool simulatedStorage)
        {
            Configuration conf = new HdfsConfiguration();

            if (simulatedStorage)
            {
                SimulatedFSDataset.SetFactory(conf);
            }
            conf.Set(DFSConfigKeys.DfsReplicationKey, string.Empty + fromREP);
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 1000L);
            conf.Set(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, Sharpen.Extensions.ToString
                         (2));
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(10).Build(
                );
            FileSystem fs = cluster.GetFileSystem();

            NUnit.Framework.Assert.IsTrue("Not a HDFS: " + fs.GetUri(), fs is DistributedFileSystem
                                          );
            try
            {
                Path root = TestDFSShell.Mkdir(fs, new Path("/test/setrep" + fromREP + "-" + toREP
                                                            ));
                Path f = TestDFSShell.WriteFile(fs, new Path(root, "foo"));
                {
                    // Verify setrep for changing replication
                    string[] args = new string[] { "-setrep", "-w", string.Empty + toREP, string.Empty
                                                   + f };
                    FsShell shell = new FsShell();
                    shell.SetConf(conf);
                    try
                    {
                        NUnit.Framework.Assert.AreEqual(0, shell.Run(args));
                    }
                    catch (Exception e)
                    {
                        NUnit.Framework.Assert.IsTrue("-setrep " + e, false);
                    }
                }
                //get fs again since the old one may be closed
                fs = cluster.GetFileSystem();
                FileStatus file = fs.GetFileStatus(f);
                long       len  = file.GetLen();
                foreach (BlockLocation locations in fs.GetFileBlockLocations(file, 0, len))
                {
                    NUnit.Framework.Assert.IsTrue(locations.GetHosts().Length == toREP);
                }
                TestDFSShell.Show("done setrep waiting: " + root);
            }
            finally
            {
                try
                {
                    fs.Close();
                }
                catch (Exception)
                {
                }
                cluster.Shutdown();
            }
        }
Beispiel #25
0
        //cluster.shutdown();
        /// <exception cref="System.Exception"/>
        public virtual void TestMismatchedNNIsRejected()
        {
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, MiniDFSCluster.GetBaseDirectory() +
                     "/TestNNWithQJM/image");
            string defaultEditsDir = conf.Get(DFSConfigKeys.DfsNamenodeEditsDirKey);

            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, mjc.GetQuorumJournalURI("myjournal"
                                                                                   ).ToString());
            // Start a NN, so the storage is formatted -- both on-disk
            // and QJM.
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).ManageNameDfsDirs
                                         (false).Build();

            cluster.Shutdown();
            // Reformat just the on-disk portion
            Configuration onDiskOnly = new Configuration(conf);

            onDiskOnly.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, defaultEditsDir);
            NameNode.Format(onDiskOnly);
            // Start the NN - should fail because the JNs are still formatted
            // with the old namespace ID.
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).ManageNameDfsDirs(false
                                                                                             ).Format(false).Build();
                NUnit.Framework.Assert.Fail("New NN with different namespace should have been rejected"
                                            );
            }
            catch (IOException ioe)
            {
                GenericTestUtils.AssertExceptionContains("Unable to start log segment 1: too few journals"
                                                         , ioe);
            }
        }
Beispiel #26
0
        public virtual void TestSequenceFileSync()
        {
            Configuration      conf    = new HdfsConfiguration();
            MiniDFSCluster     cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem         fs      = cluster.GetFileSystem();
            Path               p       = new Path("/testSequenceFileSync/foo");
            int                len     = 1 << 16;
            FSDataOutputStream @out    = fs.Create(p, FsPermission.GetDefault(), EnumSet.Of(CreateFlag
                                                                                            .Create, CreateFlag.Overwrite, CreateFlag.SyncBlock), 4096, (short)1, len, null);

            SequenceFile.Writer w = SequenceFile.CreateWriter(new Configuration(), SequenceFile.Writer
                                                              .Stream(@out), SequenceFile.Writer.KeyClass(typeof(RandomDatum)), SequenceFile.Writer
                                                              .ValueClass(typeof(RandomDatum)), SequenceFile.Writer.Compression(SequenceFile.CompressionType
                                                                                                                                .None, new DefaultCodec()));
            w.Hflush();
            CheckSyncMetric(cluster, 0);
            w.Hsync();
            CheckSyncMetric(cluster, 1);
            int seed = new Random().Next();

            RandomDatum.Generator generator = new RandomDatum.Generator(seed);
            generator.Next();
            w.Append(generator.GetKey(), generator.GetValue());
            w.Hsync();
            CheckSyncMetric(cluster, 2);
            w.Close();
            CheckSyncMetric(cluster, 2);
            @out.Close();
            CheckSyncMetric(cluster, 3);
            cluster.Shutdown();
        }
Beispiel #27
0
        /// <exception cref="System.Exception"/>
        public virtual void DoEncryptionTest(int numMappers, int numReducers, int numNodes
                                             , int numLines, bool isUber)
        {
            MiniDFSCluster      dfsCluster = null;
            MiniMRClientCluster mrCluster  = null;
            FileSystem          fileSystem = null;

            try
            {
                Configuration conf = new Configuration();
                // Start the mini-MR and mini-DFS clusters
                dfsCluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numNodes).Build();
                fileSystem = dfsCluster.GetFileSystem();
                mrCluster  = MiniMRClientClusterFactory.Create(this.GetType(), numNodes, conf);
                // Generate input.
                CreateInput(fileSystem, numMappers, numLines);
                // Run the test.
                RunMergeTest(new JobConf(mrCluster.GetConfig()), fileSystem, numMappers, numReducers
                             , numLines, isUber);
            }
            finally
            {
                if (dfsCluster != null)
                {
                    dfsCluster.Shutdown();
                }
                if (mrCluster != null)
                {
                    mrCluster.Stop();
                }
            }
        }
Beispiel #28
0
        public virtual void TestHSyncWithReplication()
        {
            Configuration      conf    = new HdfsConfiguration();
            MiniDFSCluster     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
            FileSystem         fs      = cluster.GetFileSystem();
            Path               p       = new Path("/testHSyncWithReplication/foo");
            int                len     = 1 << 16;
            FSDataOutputStream @out    = fs.Create(p, FsPermission.GetDefault(), EnumSet.Of(CreateFlag
                                                                                            .Create, CreateFlag.Overwrite, CreateFlag.SyncBlock), 4096, (short)3, len, null);

            @out.Write(1);
            @out.Hflush();
            CheckSyncMetric(cluster, 0, 0);
            CheckSyncMetric(cluster, 1, 0);
            CheckSyncMetric(cluster, 2, 0);
            @out.Hsync();
            CheckSyncMetric(cluster, 0, 1);
            CheckSyncMetric(cluster, 1, 1);
            CheckSyncMetric(cluster, 2, 1);
            @out.Hsync();
            CheckSyncMetric(cluster, 0, 2);
            CheckSyncMetric(cluster, 1, 2);
            CheckSyncMetric(cluster, 2, 2);
            cluster.Shutdown();
        }
Beispiel #29
0
        public virtual void TestFailoverWithBK()
        {
            MiniDFSCluster cluster = null;

            try
            {
                Configuration conf = new Configuration();
                conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
                conf.Set(DFSConfigKeys.DfsNamenodeSharedEditsDirKey, BKJMUtil.CreateJournalURI("/hotfailover"
                                                                                               ).ToString());
                BKJMUtil.AddJournalManagerDefinition(conf);
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology
                                                                          ()).NumDataNodes(0).ManageNameDfsSharedDirs(false).Build();
                NameNode nn1 = cluster.GetNameNode(0);
                NameNode nn2 = cluster.GetNameNode(1);
                cluster.WaitActive();
                cluster.TransitionToActive(0);
                Path       p  = new Path("/testBKJMfailover");
                FileSystem fs = HATestUtil.ConfigureFailoverFs(cluster, conf);
                fs.Mkdirs(p);
                cluster.ShutdownNameNode(0);
                cluster.TransitionToActive(1);
                NUnit.Framework.Assert.IsTrue(fs.Exists(p));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void TestDFSAdminDatanodeUpgradeControlCommands()
        {
            // start a cluster
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                DFSAdmin dfsadmin = new DFSAdmin(conf);
                DataNode dn       = cluster.GetDataNodes()[0];
                // check the datanode
                string   dnAddr = dn.GetDatanodeId().GetIpcAddr(false);
                string[] args1  = new string[] { "-getDatanodeInfo", dnAddr };
                RunCmd(dfsadmin, true, args1);
                // issue shutdown to the datanode.
                string[] args2 = new string[] { "-shutdownDatanode", dnAddr, "upgrade" };
                RunCmd(dfsadmin, true, args2);
                // the datanode should be down.
                Sharpen.Thread.Sleep(2000);
                NUnit.Framework.Assert.IsFalse("DataNode should exit", dn.IsDatanodeUp());
                // ping should fail.
                NUnit.Framework.Assert.AreEqual(-1, dfsadmin.Run(args1));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }