public virtual void TestIsInSafemode() { // Check for the standby nn without client failover. NameNode nn2 = cluster.GetNameNode(1); NUnit.Framework.Assert.IsTrue("nn2 should be in standby state", nn2.IsStandbyState ()); IPEndPoint nameNodeAddress = nn2.GetNameNodeAddress(); Configuration conf = new Configuration(); DistributedFileSystem dfs = new DistributedFileSystem(); try { dfs.Initialize(URI.Create("hdfs://" + nameNodeAddress.GetHostName() + ":" + nameNodeAddress .Port), conf); dfs.IsInSafeMode(); NUnit.Framework.Assert.Fail("StandBy should throw exception for isInSafeMode"); } catch (IOException e) { if (e is RemoteException) { IOException sbExcpetion = ((RemoteException)e).UnwrapRemoteException(); NUnit.Framework.Assert.IsTrue("StandBy nn should not support isInSafeMode", sbExcpetion is StandbyException); } else { throw; } } finally { if (null != dfs) { dfs.Close(); } } // Check with Client FailOver cluster.TransitionToStandby(0); cluster.TransitionToActive(1); cluster.GetNameNodeRpc(1).SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter, false); DistributedFileSystem dfsWithFailOver = (DistributedFileSystem)fs; NUnit.Framework.Assert.IsTrue("ANN should be in SafeMode", dfsWithFailOver.IsInSafeMode ()); cluster.GetNameNodeRpc(1).SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave, false); NUnit.Framework.Assert.IsFalse("ANN should be out of SafeMode", dfsWithFailOver.IsInSafeMode ()); }
public virtual void TestAppend2AfterSoftLimit() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1); //Set small soft-limit for lease long softLimit = 1L; long hardLimit = 9999999L; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.SetLeasePeriod(softLimit, hardLimit); cluster.WaitActive(); DistributedFileSystem fs = cluster.GetFileSystem(); DistributedFileSystem fs2 = new DistributedFileSystem(); fs2.Initialize(fs.GetUri(), conf); Path testPath = new Path("/testAppendAfterSoftLimit"); byte[] fileContents = AppendTestUtil.InitBuffer(32); // create a new file without closing FSDataOutputStream @out = fs.Create(testPath); @out.Write(fileContents); //Wait for > soft-limit Sharpen.Thread.Sleep(250); try { FSDataOutputStream appendStream2 = fs2.Append(testPath, EnumSet.Of(CreateFlag.Append , CreateFlag.NewBlock), 4096, null); appendStream2.Write(fileContents); appendStream2.Close(); NUnit.Framework.Assert.AreEqual(fileContents.Length, fs.GetFileStatus(testPath).GetLen ()); // make sure we now have 1 block since the first writer was revoked LocatedBlocks blks = fs.GetClient().GetLocatedBlocks(testPath.ToString(), 0L); NUnit.Framework.Assert.AreEqual(1, blks.GetLocatedBlocks().Count); foreach (LocatedBlock blk in blks.GetLocatedBlocks()) { NUnit.Framework.Assert.AreEqual(fileContents.Length, blk.GetBlockSize()); } } finally { fs.Close(); fs2.Close(); cluster.Shutdown(); } }
public virtual void TestAppendAfterSoftLimit() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1); conf.SetBoolean(DFSConfigKeys.DfsSupportAppendKey, true); //Set small soft-limit for lease long softLimit = 1L; long hardLimit = 9999999L; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.SetLeasePeriod(softLimit, hardLimit); cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); FileSystem fs2 = new DistributedFileSystem(); fs2.Initialize(fs.GetUri(), conf); Path testPath = new Path("/testAppendAfterSoftLimit"); byte[] fileContents = AppendTestUtil.InitBuffer(32); // create a new file without closing FSDataOutputStream @out = fs.Create(testPath); @out.Write(fileContents); //Wait for > soft-limit Sharpen.Thread.Sleep(250); try { FSDataOutputStream appendStream2 = fs2.Append(testPath); appendStream2.Write(fileContents); appendStream2.Close(); NUnit.Framework.Assert.AreEqual(fileContents.Length, fs.GetFileStatus(testPath).GetLen ()); } finally { fs.Close(); fs2.Close(); cluster.Shutdown(); } }