public virtual void TestCloseTwice() { DistributedFileSystem fs = cluster.GetFileSystem(); FSDataOutputStream os = fs.Create(new Path("/test")); DFSOutputStream dos = (DFSOutputStream)Whitebox.GetInternalState(os, "wrappedStream" ); AtomicReference <IOException> ex = (AtomicReference <IOException>)Whitebox.GetInternalState (dos, "lastException"); NUnit.Framework.Assert.AreEqual(null, ex.Get()); dos.Close(); IOException dummy = new IOException("dummy"); ex.Set(dummy); try { dos.Close(); } catch (IOException e) { NUnit.Framework.Assert.AreEqual(e, dummy); } NUnit.Framework.Assert.AreEqual(null, ex.Get()); dos.Close(); }
//Do Nothing /// <exception cref="System.Exception"/> public virtual void TestWebHdfsDoAs() { WebHdfsTestUtil.Log.Info("START: testWebHdfsDoAs()"); WebHdfsTestUtil.Log.Info("ugi.getShortUserName()=" + ugi.GetShortUserName()); WebHdfsFileSystem webhdfs = WebHdfsTestUtil.GetWebHdfsFileSystemAs(ugi, config, WebHdfsFileSystem .Scheme); Path root = new Path("/"); cluster.GetFileSystem().SetPermission(root, new FsPermission((short)0x1ff)); Whitebox.SetInternalState(webhdfs, "ugi", proxyUgi); { Path responsePath = webhdfs.GetHomeDirectory(); WebHdfsTestUtil.Log.Info("responsePath=" + responsePath); NUnit.Framework.Assert.AreEqual(webhdfs.GetUri() + "/user/" + ProxyUser, responsePath .ToString()); } Path f = new Path("/testWebHdfsDoAs/a.txt"); { FSDataOutputStream @out = webhdfs.Create(f); @out.Write(Sharpen.Runtime.GetBytesForString("Hello, webhdfs user!")); @out.Close(); FileStatus status = webhdfs.GetFileStatus(f); WebHdfsTestUtil.Log.Info("status.getOwner()=" + status.GetOwner()); NUnit.Framework.Assert.AreEqual(ProxyUser, status.GetOwner()); } { FSDataOutputStream @out = webhdfs.Append(f); @out.Write(Sharpen.Runtime.GetBytesForString("\nHello again!")); @out.Close(); FileStatus status = webhdfs.GetFileStatus(f); WebHdfsTestUtil.Log.Info("status.getOwner()=" + status.GetOwner()); WebHdfsTestUtil.Log.Info("status.getLen() =" + status.GetLen()); NUnit.Framework.Assert.AreEqual(ProxyUser, status.GetOwner()); } }
public virtual void TestReplQueuesActiveAfterStartupSafemode() { Configuration conf = new Configuration(); FSEditLog fsEditLog = Org.Mockito.Mockito.Mock <FSEditLog>(); FSImage fsImage = Org.Mockito.Mockito.Mock <FSImage>(); Org.Mockito.Mockito.When(fsImage.GetEditLog()).ThenReturn(fsEditLog); FSNamesystem fsNamesystem = new FSNamesystem(conf, fsImage); FSNamesystem fsn = Org.Mockito.Mockito.Spy(fsNamesystem); //Make shouldPopulaeReplQueues return true HAContext haContext = Org.Mockito.Mockito.Mock <HAContext>(); HAState haState = Org.Mockito.Mockito.Mock <HAState>(); Org.Mockito.Mockito.When(haContext.GetState()).ThenReturn(haState); Org.Mockito.Mockito.When(haState.ShouldPopulateReplQueues()).ThenReturn(true); Whitebox.SetInternalState(fsn, "haContext", haContext); //Make NameNode.getNameNodeMetrics() not return null NameNode.InitMetrics(conf, HdfsServerConstants.NamenodeRole.Namenode); fsn.EnterSafeMode(false); NUnit.Framework.Assert.IsTrue("FSNamesystem didn't enter safemode", fsn.IsInSafeMode ()); NUnit.Framework.Assert.IsTrue("Replication queues were being populated during very first " + "safemode", !fsn.IsPopulatingReplQueues()); fsn.LeaveSafeMode(); NUnit.Framework.Assert.IsTrue("FSNamesystem didn't leave safemode", !fsn.IsInSafeMode ()); NUnit.Framework.Assert.IsTrue("Replication queues weren't being populated even after leaving " + "safemode", fsn.IsPopulatingReplQueues()); fsn.EnterSafeMode(false); NUnit.Framework.Assert.IsTrue("FSNamesystem didn't enter safemode", fsn.IsInSafeMode ()); NUnit.Framework.Assert.IsTrue("Replication queues weren't being populated after entering " + "safemode 2nd time", fsn.IsPopulatingReplQueues()); }
public virtual void TestPutMetrics2() { GraphiteSink sink = new GraphiteSink(); IList <MetricsTag> tags = new AList <MetricsTag>(); tags.AddItem(new MetricsTag(MsInfo.Context, "all")); tags.AddItem(new MetricsTag(MsInfo.Hostname, null)); ICollection <AbstractMetric> metrics = new HashSet <AbstractMetric>(); metrics.AddItem(MakeMetric("foo1", 1)); metrics.AddItem(MakeMetric("foo2", 2)); MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long)10000, tags, metrics ); ArgumentCaptor <string> argument = ArgumentCaptor.ForClass <string>(); GraphiteSink.Graphite mockGraphite = MakeGraphite(); Whitebox.SetInternalState(sink, "graphite", mockGraphite); sink.PutMetrics(record); try { Org.Mockito.Mockito.Verify(mockGraphite).Write(argument.Capture()); } catch (IOException e) { Runtime.PrintStackTrace(e); } string result = argument.GetValue(); Assert.Equal(true, result.Equals("null.all.Context.Context=all.foo1 1 10\n" + "null.all.Context.Context=all.foo2 2 10\n") || result.Equals("null.all.Context.Context=all.foo2 2 10\n" + "null.all.Context.Context=all.foo1 1 10\n")); }
public virtual void TestRaceWhileNNStartup() { MiniDFSCluster cluster = null; Configuration conf = WebHdfsTestUtil.CreateConf(); try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build(); cluster.WaitActive(); NameNode namenode = cluster.GetNameNode(); NamenodeProtocols rpcServer = namenode.GetRpcServer(); Whitebox.SetInternalState(namenode, "rpcServer", null); Path foo = new Path("/foo"); FileSystem webHdfs = WebHdfsTestUtil.GetWebHdfsFileSystem(conf, WebHdfsFileSystem .Scheme); try { webHdfs.Mkdirs(foo); NUnit.Framework.Assert.Fail("Expected RetriableException"); } catch (RetriableException e) { GenericTestUtils.AssertExceptionContains("Namenode is in startup mode", e); } Whitebox.SetInternalState(namenode, "rpcServer", rpcServer); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestInitWithUGIToken() { Configuration conf = new Configuration(); TestTokenAspect.DummyFs fs = Org.Mockito.Mockito.Spy(new TestTokenAspect.DummyFs( )); Org.Mockito.Mockito.DoReturn(null).When(fs).GetDelegationToken(Matchers.AnyString ()); Org.Apache.Hadoop.Security.Token.Token <TokenIdentifier> token = new Org.Apache.Hadoop.Security.Token.Token <TokenIdentifier>(new byte[0], new byte[0], TestTokenAspect.DummyFs.TokenKind, new Text("127.0.0.1:1234")); fs.ugi.AddToken(token); fs.ugi.AddToken(new Org.Apache.Hadoop.Security.Token.Token <TokenIdentifier>(new byte [0], new byte[0], new Text("Other token"), new Text("127.0.0.1:8021"))); NUnit.Framework.Assert.AreEqual("wrong tokens in user", 2, fs.ugi.GetTokens().Count ); fs.emulateSecurityEnabled = true; fs.Initialize(new URI("dummyfs://127.0.0.1:1234"), conf); fs.tokenAspect.EnsureTokenInitialized(); // Select a token from ugi (not from the remote host), store it but don't // renew it Org.Mockito.Mockito.Verify(fs).SetDelegationToken(token); Org.Mockito.Mockito.Verify(fs, Org.Mockito.Mockito.Never()).GetDelegationToken(Matchers.AnyString ()); NUnit.Framework.Assert.IsNull(Whitebox.GetInternalState(fs.tokenAspect, "dtRenewer" )); NUnit.Framework.Assert.IsNull(Whitebox.GetInternalState(fs.tokenAspect, "action") ); }
/// <summary>Make sure the client retries when the active NN is in safemode</summary> /// <exception cref="System.Exception"/> public virtual void TestClientRetrySafeMode() { IDictionary <Path, bool> results = Collections.SynchronizedMap(new Dictionary <Path , bool>()); Path test = new Path("/test"); // let nn0 enter safemode NameNodeAdapter.EnterSafeMode(nn0, false); FSNamesystem.SafeModeInfo safeMode = (FSNamesystem.SafeModeInfo)Whitebox.GetInternalState (nn0.GetNamesystem(), "safeMode"); Whitebox.SetInternalState(safeMode, "extension", Sharpen.Extensions.ValueOf(30000 )); Log.Info("enter safemode"); new _Thread_133(this, test, results).Start(); // make sure the client's call has actually been handled by the active NN NUnit.Framework.Assert.IsFalse("The directory should not be created while NN in safemode" , fs.Exists(test)); Sharpen.Thread.Sleep(1000); // let nn0 leave safemode NameNodeAdapter.LeaveSafeMode(nn0); Log.Info("leave safemode"); lock (this) { while (!results.Contains(test)) { Sharpen.Runtime.Wait(this); } NUnit.Framework.Assert.IsTrue(results[test]); } }
public virtual void TestPendingDeleteUnknownBlocks() { int fileNum = 5; // 5 files Path[] files = new Path[fileNum]; MiniDFSCluster.DataNodeProperties[] dnprops = new MiniDFSCluster.DataNodeProperties [Replication]; // create a group of files, each file contains 1 block for (int i = 0; i < fileNum; i++) { files[i] = new Path("/file" + i); DFSTestUtil.CreateFile(dfs, files[i], Blocksize, Replication, i); } // wait until all DataNodes have replicas WaitForReplication(); for (int i_1 = Replication - 1; i_1 >= 0; i_1--) { dnprops[i_1] = cluster.StopDataNode(i_1); } Sharpen.Thread.Sleep(2000); // delete 2 files, we still have 3 files remaining so that we can cover // every DN storage for (int i_2 = 0; i_2 < 2; i_2++) { dfs.Delete(files[i_2], true); } // restart NameNode cluster.RestartNameNode(false); InvalidateBlocks invalidateBlocks = (InvalidateBlocks)Whitebox.GetInternalState(cluster .GetNamesystem().GetBlockManager(), "invalidateBlocks"); InvalidateBlocks mockIb = Org.Mockito.Mockito.Spy(invalidateBlocks); Org.Mockito.Mockito.DoReturn(1L).When(mockIb).GetInvalidationDelay(); Whitebox.SetInternalState(cluster.GetNamesystem().GetBlockManager(), "invalidateBlocks" , mockIb); NUnit.Framework.Assert.AreEqual(0L, cluster.GetNamesystem().GetPendingDeletionBlocks ()); // restart DataNodes for (int i_3 = 0; i_3 < Replication; i_3++) { cluster.RestartDataNode(dnprops[i_3], true); } cluster.WaitActive(); for (int i_4 = 0; i_4 < Replication; i_4++) { DataNodeTestUtils.TriggerBlockReport(cluster.GetDataNodes()[i_4]); } Sharpen.Thread.Sleep(2000); // make sure we have received block reports by checking the total block # NUnit.Framework.Assert.AreEqual(3, cluster.GetNamesystem().GetBlocksTotal()); NUnit.Framework.Assert.AreEqual(4, cluster.GetNamesystem().GetPendingDeletionBlocks ()); cluster.RestartNameNode(true); Sharpen.Thread.Sleep(6000); NUnit.Framework.Assert.AreEqual(3, cluster.GetNamesystem().GetBlocksTotal()); NUnit.Framework.Assert.AreEqual(0, cluster.GetNamesystem().GetPendingDeletionBlocks ()); }
public static FSImage SpyOnFsImage(NameNode nn1) { FSNamesystem fsn = nn1.GetNamesystem(); FSImage spy = Org.Mockito.Mockito.Spy(fsn.GetFSImage()); Whitebox.SetInternalState(fsn, "fsImage", spy); return(spy); }
/// <exception cref="System.IO.IOException"/> private long GetCommittedTxnIdValue(MiniQJMHACluster qjCluster) { Journal journal1 = qjCluster.GetJournalCluster().GetJournalNode(0).GetOrCreateJournal (MiniQJMHACluster.Nameservice); BestEffortLongFile committedTxnId = (BestEffortLongFile)Whitebox.GetInternalState (journal1, "committedTxnId"); return(committedTxnId != null?committedTxnId.Get() : HdfsConstants.InvalidTxid); }
public virtual void TestIncludeExcludeLists() { BlockManager bm = Org.Mockito.Mockito.Mock <BlockManager>(); FSNamesystem fsn = Org.Mockito.Mockito.Mock <FSNamesystem>(); Configuration conf = new Configuration(); HostFileManager hm = new HostFileManager(); HostFileManager.HostSet includedNodes = new HostFileManager.HostSet(); HostFileManager.HostSet excludedNodes = new HostFileManager.HostSet(); includedNodes.Add(Entry("127.0.0.1:12345")); includedNodes.Add(Entry("localhost:12345")); includedNodes.Add(Entry("127.0.0.1:12345")); includedNodes.Add(Entry("127.0.0.2")); excludedNodes.Add(Entry("127.0.0.1:12346")); excludedNodes.Add(Entry("127.0.30.1:12346")); NUnit.Framework.Assert.AreEqual(2, includedNodes.Size()); NUnit.Framework.Assert.AreEqual(2, excludedNodes.Size()); hm.Refresh(includedNodes, excludedNodes); DatanodeManager dm = new DatanodeManager(bm, fsn, conf); Whitebox.SetInternalState(dm, "hostFileManager", hm); IDictionary <string, DatanodeDescriptor> dnMap = (IDictionary <string, DatanodeDescriptor >)Whitebox.GetInternalState(dm, "datanodeMap"); // After the de-duplication, there should be only one DN from the included // nodes declared as dead. NUnit.Framework.Assert.AreEqual(2, dm.GetDatanodeListForReport(HdfsConstants.DatanodeReportType .All).Count); NUnit.Framework.Assert.AreEqual(2, dm.GetDatanodeListForReport(HdfsConstants.DatanodeReportType .Dead).Count); dnMap["uuid-foo"] = new DatanodeDescriptor(new DatanodeID("127.0.0.1", "localhost" , "uuid-foo", 12345, 1020, 1021, 1022)); NUnit.Framework.Assert.AreEqual(1, dm.GetDatanodeListForReport(HdfsConstants.DatanodeReportType .Dead).Count); dnMap["uuid-bar"] = new DatanodeDescriptor(new DatanodeID("127.0.0.2", "127.0.0.2" , "uuid-bar", 12345, 1020, 1021, 1022)); NUnit.Framework.Assert.AreEqual(0, dm.GetDatanodeListForReport(HdfsConstants.DatanodeReportType .Dead).Count); DatanodeDescriptor spam = new DatanodeDescriptor(new DatanodeID("127.0.0" + ".3", "127.0.0.3", "uuid-spam", 12345, 1020, 1021, 1022)); DFSTestUtil.SetDatanodeDead(spam); includedNodes.Add(Entry("127.0.0.3:12345")); dnMap["uuid-spam"] = spam; NUnit.Framework.Assert.AreEqual(1, dm.GetDatanodeListForReport(HdfsConstants.DatanodeReportType .Dead).Count); Sharpen.Collections.Remove(dnMap, "uuid-spam"); NUnit.Framework.Assert.AreEqual(1, dm.GetDatanodeListForReport(HdfsConstants.DatanodeReportType .Dead).Count); excludedNodes.Add(Entry("127.0.0.3")); NUnit.Framework.Assert.AreEqual(0, dm.GetDatanodeListForReport(HdfsConstants.DatanodeReportType .Dead).Count); }
/// <exception cref="System.Exception"/> public virtual void TestDatanodeReRegistration() { // Create a test file DistributedFileSystem dfs = cluster.GetFileSystem(); Path path = new Path("/testRR"); // Create a file and shutdown the DNs, which populates InvalidateBlocks DFSTestUtil.CreateFile(dfs, path, dfs.GetDefaultBlockSize(), (short)NumOfDatanodes , unchecked ((int)(0xED0ED0))); foreach (DataNode dn in cluster.GetDataNodes()) { dn.Shutdown(); } dfs.Delete(path, false); namesystem.WriteLock(); InvalidateBlocks invalidateBlocks; int expected = NumOfDatanodes; try { invalidateBlocks = (InvalidateBlocks)Whitebox.GetInternalState(cluster.GetNamesystem ().GetBlockManager(), "invalidateBlocks"); NUnit.Framework.Assert.AreEqual("Expected invalidate blocks to be the number of DNs" , (long)expected, invalidateBlocks.NumBlocks()); } finally { namesystem.WriteUnlock(); } // Re-register each DN and see that it wipes the invalidation work foreach (DataNode dn_1 in cluster.GetDataNodes()) { DatanodeID did = dn_1.GetDatanodeId(); DatanodeRegistration reg = new DatanodeRegistration(new DatanodeID(UUID.RandomUUID ().ToString(), did), new StorageInfo(HdfsServerConstants.NodeType.DataNode), new ExportedBlockKeys(), VersionInfo.GetVersion()); namesystem.WriteLock(); try { bm.GetDatanodeManager().RegisterDatanode(reg); expected--; NUnit.Framework.Assert.AreEqual("Expected number of invalidate blocks to decrease" , (long)expected, invalidateBlocks.NumBlocks()); } finally { namesystem.WriteUnlock(); } } }
/// <exception cref="System.Exception"/> private void TestUpgrade(TestBootstrapStandbyWithQJM.UpgradeState state) { cluster.TransitionToActive(0); Configuration confNN1 = cluster.GetConfiguration(1); FilePath current = cluster.GetNameNode(1).GetFSImage().GetStorage().GetStorageDir (0).GetCurrentDir(); FilePath tmp = cluster.GetNameNode(1).GetFSImage().GetStorage().GetStorageDir(0). GetPreviousTmp(); // shut down nn1 cluster.ShutdownNameNode(1); // make NN0 in upgrade state FSImage fsImage0 = cluster.GetNameNode(0).GetNamesystem().GetFSImage(); Whitebox.SetInternalState(fsImage0, "isUpgradeFinalized", false); switch (state) { case TestBootstrapStandbyWithQJM.UpgradeState.Recover: { // rename the current directory to previous.tmp in nn1 NNStorage.Rename(current, tmp); break; } case TestBootstrapStandbyWithQJM.UpgradeState.Format: { // rename the current directory to a random name so it's not formatted FilePath wrongPath = new FilePath(current.GetParentFile(), "wrong"); NNStorage.Rename(current, wrongPath); break; } default: { break; } } int rc = BootstrapStandby.Run(new string[] { "-force" }, confNN1); NUnit.Framework.Assert.AreEqual(0, rc); // Should have copied over the namespace from the standby FSImageTestUtil.AssertNNHasCheckpoints(cluster, 1, ImmutableList.Of(0)); FSImageTestUtil.AssertNNFilesMatch(cluster); // make sure the NN1 is in upgrade state, i.e., the previous directory has // been successfully created cluster.RestartNameNode(1); NUnit.Framework.Assert.IsFalse(cluster.GetNameNode(1).GetNamesystem().IsUpgradeFinalized ()); }
/// <exception cref="System.IO.IOException"/> /// <exception cref="Sharpen.URISyntaxException"/> /// <exception cref="System.Exception"/> public virtual void TestOnDiskMerger() { JobConf jobConf = new JobConf(); int SortFactor = 5; jobConf.SetInt(MRJobConfig.IoSortFactor, SortFactor); MapOutputFile mapOutputFile = new MROutputFiles(); FileSystem fs = FileSystem.GetLocal(jobConf); MergeManagerImpl <IntWritable, IntWritable> manager = new MergeManagerImpl <IntWritable , IntWritable>(null, jobConf, fs, null, null, null, null, null, null, null, null , null, null, mapOutputFile); MergeThread <MapOutput <IntWritable, IntWritable>, IntWritable, IntWritable> onDiskMerger = (MergeThread <MapOutput <IntWritable, IntWritable>, IntWritable, IntWritable>)Whitebox .GetInternalState(manager, "onDiskMerger"); int mergeFactor = (int)Whitebox.GetInternalState(onDiskMerger, "mergeFactor"); // make sure the io.sort.factor is set properly NUnit.Framework.Assert.AreEqual(mergeFactor, SortFactor); // Stop the onDiskMerger thread so that we can intercept the list of files // waiting to be merged. onDiskMerger.Suspend(); //Send the list of fake files waiting to be merged Random rand = new Random(); for (int i = 0; i < 2 * SortFactor; ++i) { Path path = new Path("somePath"); MergeManagerImpl.CompressAwarePath cap = new MergeManagerImpl.CompressAwarePath(path , 1l, rand.Next()); manager.CloseOnDiskFile(cap); } //Check that the files pending to be merged are in sorted order. List <IList <MergeManagerImpl.CompressAwarePath> > pendingToBeMerged = (List <IList <MergeManagerImpl.CompressAwarePath > >)Whitebox.GetInternalState(onDiskMerger, "pendingToBeMerged"); NUnit.Framework.Assert.IsTrue("No inputs were added to list pending to merge", pendingToBeMerged .Count > 0); for (int i_1 = 0; i_1 < pendingToBeMerged.Count; ++i_1) { IList <MergeManagerImpl.CompressAwarePath> inputs = pendingToBeMerged[i_1]; for (int j = 1; j < inputs.Count; ++j) { NUnit.Framework.Assert.IsTrue("Not enough / too many inputs were going to be merged" , inputs.Count > 0 && inputs.Count <= SortFactor); NUnit.Framework.Assert.IsTrue("Inputs to be merged were not sorted according to size: " , inputs[j].GetCompressedSize() >= inputs[j - 1].GetCompressedSize()); } } }
public override void Run() { try { Sharpen.Thread.Sleep(1000); TestDeleteRace.Log.Info("Deleting" + this.path); FSDirectory fsdir = this._enclosing.cluster.GetNamesystem().dir; INode fileINode = fsdir.GetINode4Write(this.path.ToString()); INodeMap inodeMap = (INodeMap)Whitebox.GetInternalState(fsdir, "inodeMap"); this.fs.Delete(this.path, false); // after deletion, add the inode back to the inodeMap inodeMap.Put(fileINode); TestDeleteRace.Log.Info("Deleted" + this.path); } catch (Exception e) { TestDeleteRace.Log.Info(e); } }
public virtual void TestTokenCompatibilityFor203() { Configuration conf = new Configuration(); HftpFileSystem fs = new HftpFileSystem(); Org.Apache.Hadoop.Security.Token.Token <object> token = new Org.Apache.Hadoop.Security.Token.Token <TokenIdentifier>(new byte[0], new byte[0], DelegationTokenIdentifier.HdfsDelegationKind , new Text("127.0.0.1:8020")); Credentials cred = new Credentials(); cred.AddToken(HftpFileSystem.TokenKind, token); ByteArrayOutputStream os = new ByteArrayOutputStream(); cred.Write(new DataOutputStream(os)); HttpURLConnection conn = Org.Mockito.Mockito.Mock <HttpURLConnection>(); Org.Mockito.Mockito.DoReturn(new ByteArrayInputStream(os.ToByteArray())).When(conn ).GetInputStream(); Org.Mockito.Mockito.DoReturn(HttpURLConnection.HttpOk).When(conn).GetResponseCode (); URLConnectionFactory factory = Org.Mockito.Mockito.Mock <URLConnectionFactory>(); Org.Mockito.Mockito.DoReturn(conn).When(factory).OpenConnection(Org.Mockito.Mockito .Any <Uri>(), Matchers.AnyBoolean()); URI uri = new URI("hftp://127.0.0.1:8020"); fs.Initialize(uri, conf); fs.connectionFactory = factory; UserGroupInformation ugi = UserGroupInformation.CreateUserForTesting("foo", new string [] { "bar" }); TokenAspect <HftpFileSystem> tokenAspect = new TokenAspect <HftpFileSystem>(fs, SecurityUtil .BuildTokenService(uri), HftpFileSystem.TokenKind); tokenAspect.InitDelegationToken(ugi); tokenAspect.EnsureTokenInitialized(); NUnit.Framework.Assert.AreSame(HftpFileSystem.TokenKind, fs.GetRenewToken().GetKind ()); Org.Apache.Hadoop.Security.Token.Token <object> tok = (Org.Apache.Hadoop.Security.Token.Token <object>)Whitebox.GetInternalState(fs, "delegationToken"); NUnit.Framework.Assert.AreNotSame("Not making a copy of the remote token", token, tok); NUnit.Framework.Assert.AreEqual(token.GetKind(), tok.GetKind()); }
public virtual void TestComputePacketChunkSize() { DistributedFileSystem fs = cluster.GetFileSystem(); FSDataOutputStream os = fs.Create(new Path("/test")); DFSOutputStream dos = (DFSOutputStream)Whitebox.GetInternalState(os, "wrappedStream" ); int packetSize = 64 * 1024; int bytesPerChecksum = 512; MethodInfo method = Sharpen.Runtime.GetDeclaredMethod(dos.GetType(), "computePacketChunkSize" , typeof(int), typeof(int)); method.Invoke(dos, packetSize, bytesPerChecksum); FieldInfo field = Sharpen.Runtime.GetDeclaredField(dos.GetType(), "packetSize"); NUnit.Framework.Assert.IsTrue((int)field.GetValue(dos) + 33 < packetSize); // If PKT_MAX_HEADER_LEN is 257, actual packet size come to over 64KB // without a fix on HDFS-7308. NUnit.Framework.Assert.IsTrue((int)field.GetValue(dos) + 257 < packetSize); }
public virtual void TestPutMetrics3() { // setup GraphiteSink GraphiteSink sink = new GraphiteSink(); GraphiteSink.Graphite mockGraphite = MakeGraphite(); Whitebox.SetInternalState(sink, "graphite", mockGraphite); // given two metrics records with timestamps 1000 milliseconds apart. IList <MetricsTag> tags = Collections.EmptyList(); ICollection <AbstractMetric> metrics = new HashSet <AbstractMetric>(); metrics.AddItem(MakeMetric("foo1", 1)); MetricsRecord record1 = new MetricsRecordImpl(MsInfo.Context, 1000000000000L, tags , metrics); MetricsRecord record2 = new MetricsRecordImpl(MsInfo.Context, 1000000001000L, tags , metrics); sink.PutMetrics(record1); sink.PutMetrics(record2); sink.Flush(); try { sink.Close(); } catch (IOException e) { Runtime.PrintStackTrace(e); } // then the timestamps in the graphite stream should differ by one second. try { Org.Mockito.Mockito.Verify(mockGraphite).Write(Matchers.Eq("null.default.Context.foo1 1 1000000000\n" )); Org.Mockito.Mockito.Verify(mockGraphite).Write(Matchers.Eq("null.default.Context.foo1 1 1000000001\n" )); } catch (IOException e) { Runtime.PrintStackTrace(e); } }
public virtual void TestGetRemoteToken() { Configuration conf = new Configuration(); TestTokenAspect.DummyFs fs = Org.Mockito.Mockito.Spy(new TestTokenAspect.DummyFs( )); Org.Apache.Hadoop.Security.Token.Token <TokenIdentifier> token = new Org.Apache.Hadoop.Security.Token.Token <TokenIdentifier>(new byte[0], new byte[0], TestTokenAspect.DummyFs.TokenKind, new Text("127.0.0.1:1234")); Org.Mockito.Mockito.DoReturn(token).When(fs).GetDelegationToken(Matchers.AnyString ()); Org.Mockito.Mockito.DoReturn(token).When(fs).GetRenewToken(); fs.Initialize(new URI("dummyfs://127.0.0.1:1234"), conf); fs.tokenAspect.EnsureTokenInitialized(); // Select a token, store and renew it Org.Mockito.Mockito.Verify(fs).SetDelegationToken(token); NUnit.Framework.Assert.IsNotNull(Whitebox.GetInternalState(fs.tokenAspect, "dtRenewer" )); NUnit.Framework.Assert.IsNotNull(Whitebox.GetInternalState(fs.tokenAspect, "action" )); }
public virtual void TestSetTokenServiceAndKind() { MiniDFSCluster cluster = null; try { Configuration clusterConf = new HdfsConfiguration(conf); SecurityUtil.SetAuthenticationMethod(UserGroupInformation.AuthenticationMethod.Simple , clusterConf); clusterConf.SetBoolean(DFSConfigKeys.DfsNamenodeDelegationTokenAlwaysUseKey, true ); // trick the NN into thinking s[ecurity is enabled w/o it trying // to login from a keytab UserGroupInformation.SetConfiguration(clusterConf); cluster = new MiniDFSCluster.Builder(clusterConf).NumDataNodes(0).Build(); cluster.WaitActive(); SecurityUtil.SetAuthenticationMethod(UserGroupInformation.AuthenticationMethod.Kerberos , clusterConf); WebHdfsFileSystem fs = WebHdfsTestUtil.GetWebHdfsFileSystem(clusterConf, "webhdfs" ); Whitebox.SetInternalState(fs, "canRefreshDelegationToken", true); URLConnectionFactory factory = new _URLConnectionFactory_268(new _ConnectionConfigurator_262 ()); Whitebox.SetInternalState(fs, "connectionFactory", factory); Org.Apache.Hadoop.Security.Token.Token <object> token1 = fs.GetDelegationToken(); NUnit.Framework.Assert.AreEqual(new Text("bar"), token1.GetKind()); HttpOpParam.OP op = GetOpParam.OP.Getdelegationtoken; Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token2 = new _FsPathResponseRunner_281 (op, null, new RenewerParam(null)).Run(); NUnit.Framework.Assert.AreEqual(new Text("bar"), token2.GetKind()); NUnit.Framework.Assert.AreEqual(new Text("foo"), token2.GetService()); } finally { if (cluster != null) { cluster.Shutdown(); } } }
/// <exception cref="System.Exception"/> private HttpServer2 CheckBindAddress(string host, int port, bool findPort) { HttpServer2 server = CreateServer(host, port); try { // not bound, ephemeral should return requested port (0 for ephemeral) IList <object> listeners = (IList <object>)Whitebox.GetInternalState(server, "listeners" ); Connector listener = (Connector)listeners[0]; Assert.Equal(port, listener.GetPort()); // verify hostname is what was given server.OpenListeners(); Assert.Equal(host, server.GetConnectorAddress(0).GetHostName() ); int boundPort = server.GetConnectorAddress(0).Port; if (port == 0) { Assert.True(boundPort != 0); } else { // ephemeral should now return bound port if (findPort) { Assert.True(boundPort > port); // allow a little wiggle room to prevent random test failures if // some consecutive ports are already in use Assert.True(boundPort - port < 8); } } } catch (Exception e) { server.Stop(); throw; } return(server); }
public virtual void TestFileStatusPipeFile() { RawLocalFileSystem origFs = new RawLocalFileSystem(); RawLocalFileSystem fs = Org.Mockito.Mockito.Spy(origFs); Configuration conf = Org.Mockito.Mockito.Mock <Configuration>(); fs.SetConf(conf); Whitebox.SetInternalState(fs, "useDeprecatedFileStatus", false); Path path = new Path("/foo"); FilePath pipe = Org.Mockito.Mockito.Mock <FilePath>(); Org.Mockito.Mockito.When(pipe.IsFile()).ThenReturn(false); Org.Mockito.Mockito.When(pipe.IsDirectory()).ThenReturn(false); Org.Mockito.Mockito.When(pipe.Exists()).ThenReturn(true); FileStatus stat = Org.Mockito.Mockito.Mock <FileStatus>(); Org.Mockito.Mockito.DoReturn(pipe).When(fs).PathToFile(path); Org.Mockito.Mockito.DoReturn(stat).When(fs).GetFileStatus(path); FileStatus[] stats = fs.ListStatus(path); Assert.True(stats != null && stats.Length == 1 && stats[0] == stat ); }
public virtual void TestClose() { GraphiteSink sink = new GraphiteSink(); GraphiteSink.Graphite mockGraphite = MakeGraphite(); Whitebox.SetInternalState(sink, "graphite", mockGraphite); try { sink.Close(); } catch (IOException ioe) { Runtime.PrintStackTrace(ioe); } try { Org.Mockito.Mockito.Verify(mockGraphite).Close(); } catch (IOException ioe) { Runtime.PrintStackTrace(ioe); } }
/// <summary> /// Make sure the WebHdfsFileSystem will retry based on RetriableException when /// rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestRetryWhileNNStartup() { Configuration conf = DFSTestUtil.NewHAConfiguration(LogicalName); MiniDFSCluster cluster = null; IDictionary <string, bool> resultMap = new Dictionary <string, bool>(); try { cluster = new MiniDFSCluster.Builder(conf).NnTopology(topo).NumDataNodes(0).Build (); HATestUtil.SetFailoverConfigurations(cluster, conf, LogicalName); cluster.WaitActive(); cluster.TransitionToActive(0); NameNode namenode = cluster.GetNameNode(0); NamenodeProtocols rpcServer = namenode.GetRpcServer(); Whitebox.SetInternalState(namenode, "rpcServer", null); new _Thread_212(this, conf, resultMap).Start(); Sharpen.Thread.Sleep(1000); Whitebox.SetInternalState(namenode, "rpcServer", rpcServer); lock (this) { while (!resultMap.Contains("mkdirs")) { Sharpen.Runtime.Wait(this); } NUnit.Framework.Assert.IsTrue(resultMap["mkdirs"]); } } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestFailureAndPutMetrics() { GraphiteSink sink = new GraphiteSink(); IList <MetricsTag> tags = new AList <MetricsTag>(); tags.AddItem(new MetricsTag(MsInfo.Context, "all")); tags.AddItem(new MetricsTag(MsInfo.Hostname, "host")); ICollection <AbstractMetric> metrics = new HashSet <AbstractMetric>(); metrics.AddItem(MakeMetric("foo1", 1.25)); metrics.AddItem(MakeMetric("foo2", 2.25)); MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long)10000, tags, metrics ); GraphiteSink.Graphite mockGraphite = MakeGraphite(); Whitebox.SetInternalState(sink, "graphite", mockGraphite); // throw exception when first try Org.Mockito.Mockito.DoThrow(new IOException("IO exception")).When(mockGraphite).Write (Matchers.AnyString()); sink.PutMetrics(record); Org.Mockito.Mockito.Verify(mockGraphite).Write(Matchers.AnyString()); Org.Mockito.Mockito.Verify(mockGraphite).Close(); // reset mock and try again Org.Mockito.Mockito.Reset(mockGraphite); Org.Mockito.Mockito.When(mockGraphite.IsConnected()).ThenReturn(false); ArgumentCaptor <string> argument = ArgumentCaptor.ForClass <string>(); sink.PutMetrics(record); Org.Mockito.Mockito.Verify(mockGraphite).Write(argument.Capture()); string result = argument.GetValue(); Assert.Equal(true, result.Equals("null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n" + "null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n") || result.Equals ("null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n" + "null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n" )); }
public virtual void TestRemoveIncludedNode() { FSNamesystem fsn = Org.Mockito.Mockito.Mock <FSNamesystem>(); // Set the write lock so that the DatanodeManager can start Org.Mockito.Mockito.When(fsn.HasWriteLock()).ThenReturn(true); DatanodeManager dm = MockDatanodeManager(fsn, new Configuration()); HostFileManager hm = new HostFileManager(); HostFileManager.HostSet noNodes = new HostFileManager.HostSet(); HostFileManager.HostSet oneNode = new HostFileManager.HostSet(); HostFileManager.HostSet twoNodes = new HostFileManager.HostSet(); DatanodeRegistration dr1 = new DatanodeRegistration(new DatanodeID("127.0.0.1", "127.0.0.1" , "someStorageID-123", 12345, 12345, 12345, 12345), new StorageInfo(HdfsServerConstants.NodeType .DataNode), new ExportedBlockKeys(), "test"); DatanodeRegistration dr2 = new DatanodeRegistration(new DatanodeID("127.0.0.1", "127.0.0.1" , "someStorageID-234", 23456, 23456, 23456, 23456), new StorageInfo(HdfsServerConstants.NodeType .DataNode), new ExportedBlockKeys(), "test"); twoNodes.Add(Entry("127.0.0.1:12345")); twoNodes.Add(Entry("127.0.0.1:23456")); oneNode.Add(Entry("127.0.0.1:23456")); hm.Refresh(twoNodes, noNodes); Whitebox.SetInternalState(dm, "hostFileManager", hm); // Register two data nodes to simulate them coming up. // We need to add two nodes, because if we have only one node, removing it // will cause the includes list to be empty, which means all hosts will be // allowed. dm.RegisterDatanode(dr1); dm.RegisterDatanode(dr2); // Make sure that both nodes are reported IList <DatanodeDescriptor> both = dm.GetDatanodeListForReport(HdfsConstants.DatanodeReportType .All); // Sort the list so that we know which one is which both.Sort(); NUnit.Framework.Assert.AreEqual("Incorrect number of hosts reported", 2, both.Count ); NUnit.Framework.Assert.AreEqual("Unexpected host or host in unexpected position", "127.0.0.1:12345", both[0].GetInfoAddr()); NUnit.Framework.Assert.AreEqual("Unexpected host or host in unexpected position", "127.0.0.1:23456", both[1].GetInfoAddr()); // Remove one node from includes, but do not add it to excludes. hm.Refresh(oneNode, noNodes); // Make sure that only one node is still reported IList <DatanodeDescriptor> onlyOne = dm.GetDatanodeListForReport(HdfsConstants.DatanodeReportType .All); NUnit.Framework.Assert.AreEqual("Incorrect number of hosts reported", 1, onlyOne. Count); NUnit.Framework.Assert.AreEqual("Unexpected host reported", "127.0.0.1:23456", onlyOne [0].GetInfoAddr()); // Remove all nodes from includes hm.Refresh(noNodes, noNodes); // Check that both nodes are reported again IList <DatanodeDescriptor> bothAgain = dm.GetDatanodeListForReport(HdfsConstants.DatanodeReportType .All); // Sort the list so that we know which one is which bothAgain.Sort(); NUnit.Framework.Assert.AreEqual("Incorrect number of hosts reported", 2, bothAgain .Count); NUnit.Framework.Assert.AreEqual("Unexpected host or host in unexpected position", "127.0.0.1:12345", bothAgain[0].GetInfoAddr()); NUnit.Framework.Assert.AreEqual("Unexpected host or host in unexpected position", "127.0.0.1:23456", bothAgain[1].GetInfoAddr()); }
public virtual void TestPropagatedClose() { ByteRangeInputStream bris = Org.Mockito.Mockito.Mock <ByteRangeInputStream>(Org.Mockito.Mockito .CallsRealMethods); InputStream mockStream = Org.Mockito.Mockito.Mock <InputStream>(); Org.Mockito.Mockito.DoReturn(mockStream).When(bris).OpenInputStream(); Whitebox.SetInternalState(bris, "status", ByteRangeInputStream.StreamStatus.Seek); int brisOpens = 0; int brisCloses = 0; int isCloses = 0; // first open, shouldn't close underlying stream bris.GetInputStream(); Org.Mockito.Mockito.Verify(bris, Org.Mockito.Mockito.Times(++brisOpens)).OpenInputStream (); Org.Mockito.Mockito.Verify(bris, Org.Mockito.Mockito.Times(brisCloses)).Close(); Org.Mockito.Mockito.Verify(mockStream, Org.Mockito.Mockito.Times(isCloses)).Close (); // stream is open, shouldn't close underlying stream bris.GetInputStream(); Org.Mockito.Mockito.Verify(bris, Org.Mockito.Mockito.Times(brisOpens)).OpenInputStream (); Org.Mockito.Mockito.Verify(bris, Org.Mockito.Mockito.Times(brisCloses)).Close(); Org.Mockito.Mockito.Verify(mockStream, Org.Mockito.Mockito.Times(isCloses)).Close (); // seek forces a reopen, should close underlying stream bris.Seek(1); bris.GetInputStream(); Org.Mockito.Mockito.Verify(bris, Org.Mockito.Mockito.Times(++brisOpens)).OpenInputStream (); Org.Mockito.Mockito.Verify(bris, Org.Mockito.Mockito.Times(brisCloses)).Close(); Org.Mockito.Mockito.Verify(mockStream, Org.Mockito.Mockito.Times(++isCloses)).Close (); // verify that the underlying stream isn't closed after a seek // ie. the state was correctly updated bris.GetInputStream(); Org.Mockito.Mockito.Verify(bris, Org.Mockito.Mockito.Times(brisOpens)).OpenInputStream (); Org.Mockito.Mockito.Verify(bris, Org.Mockito.Mockito.Times(brisCloses)).Close(); Org.Mockito.Mockito.Verify(mockStream, Org.Mockito.Mockito.Times(isCloses)).Close (); // seeking to same location should be a no-op bris.Seek(1); bris.GetInputStream(); Org.Mockito.Mockito.Verify(bris, Org.Mockito.Mockito.Times(brisOpens)).OpenInputStream (); Org.Mockito.Mockito.Verify(bris, Org.Mockito.Mockito.Times(brisCloses)).Close(); Org.Mockito.Mockito.Verify(mockStream, Org.Mockito.Mockito.Times(isCloses)).Close (); // close should of course close bris.Close(); Org.Mockito.Mockito.Verify(bris, Org.Mockito.Mockito.Times(++brisCloses)).Close(); Org.Mockito.Mockito.Verify(mockStream, Org.Mockito.Mockito.Times(++isCloses)).Close (); // it's already closed, underlying stream should not close bris.Close(); Org.Mockito.Mockito.Verify(bris, Org.Mockito.Mockito.Times(++brisCloses)).Close(); Org.Mockito.Mockito.Verify(mockStream, Org.Mockito.Mockito.Times(isCloses)).Close (); // it's closed, don't reopen it bool errored = false; try { bris.GetInputStream(); } catch (IOException e) { errored = true; NUnit.Framework.Assert.AreEqual("Stream closed", e.Message); } finally { NUnit.Framework.Assert.IsTrue("Read a closed steam", errored); } Org.Mockito.Mockito.Verify(bris, Org.Mockito.Mockito.Times(brisOpens)).OpenInputStream (); Org.Mockito.Mockito.Verify(bris, Org.Mockito.Mockito.Times(brisCloses)).Close(); Org.Mockito.Mockito.Verify(mockStream, Org.Mockito.Mockito.Times(isCloses)).Close (); }
/// <summary> /// Test if correct exception (StandbyException or RetriableException) can be /// thrown during the NN failover. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestDelegationTokenDuringNNFailover() { EditLogTailer editLogTailer = nn1.GetNamesystem().GetEditLogTailer(); // stop the editLogTailer of nn1 editLogTailer.Stop(); Configuration conf = (Configuration)Whitebox.GetInternalState(editLogTailer, "conf" ); nn1.GetNamesystem().SetEditLogTailerForTests(new TestDelegationTokensWithHA.EditLogTailerForTest (this, nn1.GetNamesystem(), conf)); // create token Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token = GetDelegationToken (fs, "JobTracker"); DelegationTokenIdentifier identifier = new DelegationTokenIdentifier(); byte[] tokenId = token.GetIdentifier(); identifier.ReadFields(new DataInputStream(new ByteArrayInputStream(tokenId))); // Ensure that it's present in the nn0 secret manager and can // be renewed directly from there. Log.Info("A valid token should have non-null password, " + "and should be renewed successfully" ); NUnit.Framework.Assert.IsTrue(null != dtSecretManager.RetrievePassword(identifier )); dtSecretManager.RenewToken(token, "JobTracker"); // transition nn0 to standby cluster.TransitionToStandby(0); try { cluster.GetNameNodeRpc(0).RenewDelegationToken(token); NUnit.Framework.Assert.Fail("StandbyException is expected since nn0 is in standby state" ); } catch (StandbyException e) { GenericTestUtils.AssertExceptionContains(HAServiceProtocol.HAServiceState.Standby .ToString(), e); } new _Thread_220().Start(); Sharpen.Thread.Sleep(1000); try { nn1.GetNamesystem().VerifyToken(token.DecodeIdentifier(), token.GetPassword()); NUnit.Framework.Assert.Fail("RetriableException/StandbyException is expected since nn1 is in transition" ); } catch (IOException e) { NUnit.Framework.Assert.IsTrue(e is StandbyException || e is RetriableException); Log.Info("Got expected exception", e); } catchup = true; lock (this) { Sharpen.Runtime.NotifyAll(this); } Configuration clientConf = dfs.GetConf(); DoRenewOrCancel(token, clientConf, TestDelegationTokensWithHA.TokenTestAction.Renew ); DoRenewOrCancel(token, clientConf, TestDelegationTokensWithHA.TokenTestAction.Cancel ); }
public virtual void TestInternalNameService() { Configuration conf = new Configuration(); conf.Set(DFSConfigKeys.DfsNameservices, "ns1,ns2,ns3"); AddNN(conf, "ns1", "mock1:8020"); AddNN(conf, "ns2", "mock1:8020"); AddNN(conf, "ns3", "mock1:8020"); conf.Set(DFSConfigKeys.DfsInternalNameservicesKey, "ns1"); bpm.RefreshNamenodes(conf); NUnit.Framework.Assert.AreEqual("create #1\n", log.ToString()); IDictionary <string, BPOfferService> map = (IDictionary <string, BPOfferService>)Whitebox .GetInternalState(bpm, "bpByNameserviceId"); NUnit.Framework.Assert.IsFalse(map.Contains("ns2")); NUnit.Framework.Assert.IsFalse(map.Contains("ns3")); NUnit.Framework.Assert.IsTrue(map.Contains("ns1")); log.Length = 0; }
/// <exception cref="System.IO.IOException"/> /// <exception cref="System.Exception"/> public virtual void TestRegistration() { XDR req = new XDR(); RpcCall.GetInstance(++xid, RpcProgramPortmap.Program, RpcProgramPortmap.Version, RpcProgramPortmap.PmapprocSet, new CredentialsNone(), new VerifierNone()).Write( req); PortmapMapping sent = new PortmapMapping(90000, 1, PortmapMapping.TransportTcp, 1234 ); sent.Serialize(req); byte[] reqBuf = req.GetBytes(); DatagramSocket s = new DatagramSocket(); DatagramPacket p = new DatagramPacket(reqBuf, reqBuf.Length, pm.GetUdpServerLoAddress ()); try { s.Send(p); } finally { s.Close(); } // Give the server a chance to process the request Thread.Sleep(100); bool found = false; IDictionary <string, PortmapMapping> map = (IDictionary <string, PortmapMapping>)Whitebox .GetInternalState(pm.GetHandler(), "map"); foreach (PortmapMapping m in map.Values) { if (m.GetPort() == sent.GetPort() && PortmapMapping.Key(m).Equals(PortmapMapping. Key(sent))) { found = true; break; } } Assert.True("Registration failed", found); }