/// <exception cref="System.IO.IOException"/> private void OnGetFileChecksum(ChannelHandlerContext ctx) { MD5MD5CRC32FileChecksum checksum = null; string nnId = @params.NamenodeId(); DFSClient dfsclient = NewDfsClient(nnId, conf); try { checksum = dfsclient.GetFileChecksum(path, long.MaxValue); dfsclient.Close(); dfsclient = null; } finally { IOUtils.Cleanup(Log, dfsclient); } byte[] js = Sharpen.Runtime.GetBytesForString(JsonUtil.ToJsonString(checksum), Charsets .Utf8); DefaultFullHttpResponse resp = new DefaultFullHttpResponse(HttpVersion.Http11, HttpResponseStatus .Ok, Unpooled.WrappedBuffer(js)); resp.Headers().Set(HttpHeaders.Names.ContentType, ApplicationJsonUtf8); resp.Headers().Set(HttpHeaders.Names.ContentLength, js.Length); resp.Headers().Set(HttpHeaders.Names.Connection, HttpHeaders.Values.Close); ctx.WriteAndFlush(resp).AddListener(ChannelFutureListener.Close); }
public virtual void ShutDownCluster() { client.Close(); fs.Close(); cluster.ShutdownDataNodes(); cluster.Shutdown(); }
/// <summary>Close all DFSClient instances in the Cache.</summary> /// <param name="onlyAutomatic">only close those that are marked for automatic closing /// </param> /// <exception cref="System.IO.IOException"/> internal virtual void CloseAll(bool onlyAutomatic) { lock (this) { IList <IOException> exceptions = new AList <IOException>(); ConcurrentMap <string, DFSClient> map = clientCache.AsMap(); foreach (KeyValuePair <string, DFSClient> item in map) { DFSClient client = item.Value; if (client != null) { try { client.Close(); } catch (IOException ioe) { exceptions.AddItem(ioe); } } } if (!exceptions.IsEmpty()) { throw MultipleIOException.CreateIOException(exceptions); } } }
/// <exception cref="System.Exception"/> public virtual void TestDatanodePeersXceiver() { // Test DatanodeXceiver has correct peer-dataxceiver pairs for sending OOB message try { StartCluster(); // Create files in DFS. string testFile1 = "/" + GenericTestUtils.GetMethodName() + ".01.dat"; string testFile2 = "/" + GenericTestUtils.GetMethodName() + ".02.dat"; string testFile3 = "/" + GenericTestUtils.GetMethodName() + ".03.dat"; DFSClient client1 = new DFSClient(NameNode.GetAddress(conf), conf); DFSClient client2 = new DFSClient(NameNode.GetAddress(conf), conf); DFSClient client3 = new DFSClient(NameNode.GetAddress(conf), conf); DFSOutputStream s1 = (DFSOutputStream)client1.Create(testFile1, true); DFSOutputStream s2 = (DFSOutputStream)client2.Create(testFile2, true); DFSOutputStream s3 = (DFSOutputStream)client3.Create(testFile3, true); byte[] toWrite = new byte[1024 * 1024 * 8]; Random rb = new Random(1111); rb.NextBytes(toWrite); s1.Write(toWrite, 0, 1024 * 1024 * 8); s1.Flush(); s2.Write(toWrite, 0, 1024 * 1024 * 8); s2.Flush(); s3.Write(toWrite, 0, 1024 * 1024 * 8); s3.Flush(); NUnit.Framework.Assert.IsTrue(dn0.GetXferServer().GetNumPeersXceiver() == dn0.GetXferServer ().GetNumPeersXceiver()); s1.Close(); s2.Close(); s3.Close(); NUnit.Framework.Assert.IsTrue(dn0.GetXferServer().GetNumPeersXceiver() == dn0.GetXferServer ().GetNumPeersXceiver()); client1.Close(); client2.Close(); client3.Close(); } finally { ShutdownCluster(); } }
public virtual void TestReadFromOneDN() { HdfsConfiguration configuration = new HdfsConfiguration(); // One of the goals of this test is to verify that we don't open more // than one socket. So use a different client context, so that we // get our own socket cache, rather than sharing with the other test // instances. Also use a really long socket timeout so that nothing // gets closed before we get around to checking the cache size at the end. string contextName = "testReadFromOneDNContext"; configuration.Set(DFSConfigKeys.DfsClientContext, contextName); configuration.SetLong(DFSConfigKeys.DfsClientSocketTimeoutKey, 100000000L); BlockReaderTestUtil util = new BlockReaderTestUtil(1, configuration); Path testFile = new Path("/testConnCache.dat"); byte[] authenticData = util.WriteFile(testFile, FileSize / 1024); DFSClient client = new DFSClient(new IPEndPoint("localhost", util.GetCluster().GetNameNodePort ()), util.GetConf()); ClientContext cacheContext = ClientContext.Get(contextName, client.GetConf()); DFSInputStream @in = client.Open(testFile.ToString()); Log.Info("opened " + testFile.ToString()); byte[] dataBuf = new byte[BlockSize]; // Initial read Pread(@in, 0, dataBuf, 0, dataBuf.Length, authenticData); // Read again and verify that the socket is the same Pread(@in, FileSize - dataBuf.Length, dataBuf, 0, dataBuf.Length, authenticData); Pread(@in, 1024, dataBuf, 0, dataBuf.Length, authenticData); // No seek; just read Pread(@in, -1, dataBuf, 0, dataBuf.Length, authenticData); Pread(@in, 64, dataBuf, 0, dataBuf.Length / 2, authenticData); @in.Close(); client.Close(); NUnit.Framework.Assert.AreEqual(1, ClientContext.GetFromConf(configuration).GetPeerCache ().Size()); }
public virtual void TestDeletedBlockWhenAddBlockIsInEdit() { Configuration conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology ()).NumDataNodes(1).Build(); DFSClient client = null; try { cluster.WaitActive(); NUnit.Framework.Assert.AreEqual("Number of namenodes is not 2", 2, cluster.GetNumNameNodes ()); // Transitioning the namenode 0 to active. cluster.TransitionToActive(0); NUnit.Framework.Assert.IsTrue("Namenode 0 should be in active state", cluster.GetNameNode (0).IsActiveState()); NUnit.Framework.Assert.IsTrue("Namenode 1 should be in standby state", cluster.GetNameNode (1).IsStandbyState()); // Trigger heartbeat to mark DatanodeStorageInfo#heartbeatedSinceFailover // to true. DataNodeTestUtils.TriggerHeartbeat(cluster.GetDataNodes()[0]); FileSystem fs = cluster.GetFileSystem(0); // Trigger blockReport to mark DatanodeStorageInfo#blockContentsStale // to false. cluster.GetDataNodes()[0].TriggerBlockReport(new BlockReportOptions.Factory().SetIncremental (false).Build()); Path fileName = new Path("/tmp.txt"); // create a file with one block DFSTestUtil.CreateFile(fs, fileName, 10L, (short)1, 1234L); DFSTestUtil.WaitReplication(fs, fileName, (short)1); client = new DFSClient(cluster.GetFileSystem(0).GetUri(), conf); IList <LocatedBlock> locatedBlocks = client.GetNamenode().GetBlockLocations("/tmp.txt" , 0, 10L).GetLocatedBlocks(); NUnit.Framework.Assert.IsTrue(locatedBlocks.Count == 1); NUnit.Framework.Assert.IsTrue(locatedBlocks[0].GetLocations().Length == 1); // add a second datanode to the cluster cluster.StartDataNodes(conf, 1, true, null, null, null, null); NUnit.Framework.Assert.AreEqual("Number of datanodes should be 2", 2, cluster.GetDataNodes ().Count); DataNode dn0 = cluster.GetDataNodes()[0]; DataNode dn1 = cluster.GetDataNodes()[1]; string activeNNBPId = cluster.GetNamesystem(0).GetBlockPoolId(); DatanodeDescriptor sourceDnDesc = NameNodeAdapter.GetDatanode(cluster.GetNamesystem (0), dn0.GetDNRegistrationForBP(activeNNBPId)); DatanodeDescriptor destDnDesc = NameNodeAdapter.GetDatanode(cluster.GetNamesystem (0), dn1.GetDNRegistrationForBP(activeNNBPId)); ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, fileName); Log.Info("replaceBlock: " + ReplaceBlock(block, (DatanodeInfo)sourceDnDesc, (DatanodeInfo )sourceDnDesc, (DatanodeInfo)destDnDesc)); // Waiting for the FsDatasetAsyncDsikService to delete the block Sharpen.Thread.Sleep(3000); // Triggering the incremental block report to report the deleted block to // namnemode cluster.GetDataNodes()[0].TriggerBlockReport(new BlockReportOptions.Factory().SetIncremental (true).Build()); cluster.TransitionToStandby(0); cluster.TransitionToActive(1); NUnit.Framework.Assert.IsTrue("Namenode 1 should be in active state", cluster.GetNameNode (1).IsActiveState()); NUnit.Framework.Assert.IsTrue("Namenode 0 should be in standby state", cluster.GetNameNode (0).IsStandbyState()); client.Close(); // Opening a new client for new active namenode client = new DFSClient(cluster.GetFileSystem(1).GetUri(), conf); IList <LocatedBlock> locatedBlocks1 = client.GetNamenode().GetBlockLocations("/tmp.txt" , 0, 10L).GetLocatedBlocks(); NUnit.Framework.Assert.AreEqual(1, locatedBlocks1.Count); NUnit.Framework.Assert.AreEqual("The block should be only on 1 datanode ", 1, locatedBlocks1 [0].GetLocations().Length); } finally { IOUtils.Cleanup(null, client); cluster.Shutdown(); } }
public virtual void TestReadSelectNonStaleDatanode() { HdfsConfiguration conf = new HdfsConfiguration(); conf.SetBoolean(DFSConfigKeys.DfsNamenodeAvoidStaleDatanodeForReadKey, true); long staleInterval = 30 * 1000 * 60; conf.SetLong(DFSConfigKeys.DfsNamenodeStaleDatanodeIntervalKey, staleInterval); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes ).Racks(racks).Build(); cluster.WaitActive(); IPEndPoint addr = new IPEndPoint("localhost", cluster.GetNameNodePort()); DFSClient client = new DFSClient(addr, conf); IList <DatanodeDescriptor> nodeInfoList = cluster.GetNameNode().GetNamesystem().GetBlockManager ().GetDatanodeManager().GetDatanodeListForReport(HdfsConstants.DatanodeReportType .Live); NUnit.Framework.Assert.AreEqual("Unexpected number of datanodes", numDatanodes, nodeInfoList .Count); FileSystem fileSys = cluster.GetFileSystem(); FSDataOutputStream stm = null; try { // do the writing but do not close the FSDataOutputStream // in order to mimic the ongoing writing Path fileName = new Path("/file1"); stm = fileSys.Create(fileName, true, fileSys.GetConf().GetInt(CommonConfigurationKeys .IoFileBufferSizeKey, 4096), (short)3, blockSize); stm.Write(new byte[(blockSize * 3) / 2]); // We do not close the stream so that // the writing seems to be still ongoing stm.Hflush(); LocatedBlocks blocks = client.GetNamenode().GetBlockLocations(fileName.ToString() , 0, blockSize); DatanodeInfo[] nodes = blocks.Get(0).GetLocations(); NUnit.Framework.Assert.AreEqual(nodes.Length, 3); DataNode staleNode = null; DatanodeDescriptor staleNodeInfo = null; // stop the heartbeat of the first node staleNode = this.StopDataNodeHeartbeat(cluster, nodes[0].GetHostName()); NUnit.Framework.Assert.IsNotNull(staleNode); // set the first node as stale staleNodeInfo = cluster.GetNameNode().GetNamesystem().GetBlockManager().GetDatanodeManager ().GetDatanode(staleNode.GetDatanodeId()); DFSTestUtil.ResetLastUpdatesWithOffset(staleNodeInfo, -(staleInterval + 1)); LocatedBlocks blocksAfterStale = client.GetNamenode().GetBlockLocations(fileName. ToString(), 0, blockSize); DatanodeInfo[] nodesAfterStale = blocksAfterStale.Get(0).GetLocations(); NUnit.Framework.Assert.AreEqual(nodesAfterStale.Length, 3); NUnit.Framework.Assert.AreEqual(nodesAfterStale[2].GetHostName(), nodes[0].GetHostName ()); // restart the staleNode's heartbeat DataNodeTestUtils.SetHeartbeatsDisabledForTests(staleNode, false); // reset the first node as non-stale, so as to avoid two stale nodes DFSTestUtil.ResetLastUpdatesWithOffset(staleNodeInfo, 0); LocatedBlock lastBlock = client.GetLocatedBlocks(fileName.ToString(), 0, long.MaxValue ).GetLastLocatedBlock(); nodes = lastBlock.GetLocations(); NUnit.Framework.Assert.AreEqual(nodes.Length, 3); // stop the heartbeat of the first node for the last block staleNode = this.StopDataNodeHeartbeat(cluster, nodes[0].GetHostName()); NUnit.Framework.Assert.IsNotNull(staleNode); // set the node as stale DatanodeDescriptor dnDesc = cluster.GetNameNode().GetNamesystem().GetBlockManager ().GetDatanodeManager().GetDatanode(staleNode.GetDatanodeId()); DFSTestUtil.ResetLastUpdatesWithOffset(dnDesc, -(staleInterval + 1)); LocatedBlock lastBlockAfterStale = client.GetLocatedBlocks(fileName.ToString(), 0 , long.MaxValue).GetLastLocatedBlock(); nodesAfterStale = lastBlockAfterStale.GetLocations(); NUnit.Framework.Assert.AreEqual(nodesAfterStale.Length, 3); NUnit.Framework.Assert.AreEqual(nodesAfterStale[2].GetHostName(), nodes[0].GetHostName ()); } finally { if (stm != null) { stm.Close(); } client.Close(); cluster.Shutdown(); } }
public virtual void TestCopyOnWrite() { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = cluster.GetFileSystem(); IPEndPoint addr = new IPEndPoint("localhost", cluster.GetNameNodePort()); DFSClient client = new DFSClient(addr, conf); try { // create a new file, write to it and close it. // Path file1 = new Path("/filestatus.dat"); FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1); WriteFile(stm); stm.Close(); // Get a handle to the datanode DataNode[] dn = cluster.ListDataNodes(); NUnit.Framework.Assert.IsTrue("There should be only one datanode but found " + dn .Length, dn.Length == 1); LocatedBlocks locations = client.GetNamenode().GetBlockLocations(file1.ToString() , 0, long.MaxValue); IList <LocatedBlock> blocks = locations.GetLocatedBlocks(); // // Create hard links for a few of the blocks // for (int i = 0; i < blocks.Count; i = i + 2) { ExtendedBlock b = blocks[i].GetBlock(); FilePath f = DataNodeTestUtils.GetFile(dn[0], b.GetBlockPoolId(), b.GetLocalBlock ().GetBlockId()); FilePath link = new FilePath(f.ToString() + ".link"); System.Console.Out.WriteLine("Creating hardlink for File " + f + " to " + link); HardLink.CreateHardLink(f, link); } // // Detach all blocks. This should remove hardlinks (if any) // for (int i_1 = 0; i_1 < blocks.Count; i_1++) { ExtendedBlock b = blocks[i_1].GetBlock(); System.Console.Out.WriteLine("testCopyOnWrite detaching block " + b); NUnit.Framework.Assert.IsTrue("Detaching block " + b + " should have returned true" , DataNodeTestUtils.UnlinkBlock(dn[0], b, 1)); } // Since the blocks were already detached earlier, these calls should // return false // for (int i_2 = 0; i_2 < blocks.Count; i_2++) { ExtendedBlock b = blocks[i_2].GetBlock(); System.Console.Out.WriteLine("testCopyOnWrite detaching block " + b); NUnit.Framework.Assert.IsTrue("Detaching block " + b + " should have returned false" , !DataNodeTestUtils.UnlinkBlock(dn[0], b, 1)); } } finally { client.Close(); fs.Close(); cluster.Shutdown(); } }
public virtual void TestRead() { MiniDFSCluster cluster = null; int numDataNodes = 2; Configuration conf = GetConf(numDataNodes); try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build(); cluster.WaitActive(); NUnit.Framework.Assert.AreEqual(numDataNodes, cluster.GetDataNodes().Count); NameNode nn = cluster.GetNameNode(); NamenodeProtocols nnProto = nn.GetRpcServer(); BlockManager bm = nn.GetNamesystem().GetBlockManager(); BlockTokenSecretManager sm = bm.GetBlockTokenSecretManager(); // set a short token lifetime (1 second) initially SecurityTestUtil.SetBlockTokenLifetime(sm, 1000L); Path fileToRead = new Path(FileToRead); FileSystem fs = cluster.GetFileSystem(); CreateFile(fs, fileToRead); /* * setup for testing expiration handling of cached tokens */ // read using blockSeekTo(). Acquired tokens are cached in in1 FSDataInputStream in1 = fs.Open(fileToRead); NUnit.Framework.Assert.IsTrue(CheckFile1(in1)); // read using blockSeekTo(). Acquired tokens are cached in in2 FSDataInputStream in2 = fs.Open(fileToRead); NUnit.Framework.Assert.IsTrue(CheckFile1(in2)); // read using fetchBlockByteRange(). Acquired tokens are cached in in3 FSDataInputStream in3 = fs.Open(fileToRead); NUnit.Framework.Assert.IsTrue(CheckFile2(in3)); /* * testing READ interface on DN using a BlockReader */ DFSClient client = null; try { client = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()), conf ); } finally { if (client != null) { client.Close(); } } IList <LocatedBlock> locatedBlocks = nnProto.GetBlockLocations(FileToRead, 0, FileSize ).GetLocatedBlocks(); LocatedBlock lblock = locatedBlocks[0]; // first block Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> myToken = lblock.GetBlockToken (); // verify token is not expired NUnit.Framework.Assert.IsFalse(SecurityTestUtil.IsBlockTokenExpired(myToken)); // read with valid token, should succeed TryRead(conf, lblock, true); /* * wait till myToken and all cached tokens in in1, in2 and in3 expire */ while (!SecurityTestUtil.IsBlockTokenExpired(myToken)) { try { Sharpen.Thread.Sleep(10); } catch (Exception) { } } /* * continue testing READ interface on DN using a BlockReader */ // verify token is expired NUnit.Framework.Assert.IsTrue(SecurityTestUtil.IsBlockTokenExpired(myToken)); // read should fail TryRead(conf, lblock, false); // use a valid new token lblock.SetBlockToken(sm.GenerateToken(lblock.GetBlock(), EnumSet.Of(BlockTokenSecretManager.AccessMode .Read))); // read should succeed TryRead(conf, lblock, true); // use a token with wrong blockID ExtendedBlock wrongBlock = new ExtendedBlock(lblock.GetBlock().GetBlockPoolId(), lblock.GetBlock().GetBlockId() + 1); lblock.SetBlockToken(sm.GenerateToken(wrongBlock, EnumSet.Of(BlockTokenSecretManager.AccessMode .Read))); // read should fail TryRead(conf, lblock, false); // use a token with wrong access modes lblock.SetBlockToken(sm.GenerateToken(lblock.GetBlock(), EnumSet.Of(BlockTokenSecretManager.AccessMode .Write, BlockTokenSecretManager.AccessMode.Copy, BlockTokenSecretManager.AccessMode .Replace))); // read should fail TryRead(conf, lblock, false); // set a long token lifetime for future tokens SecurityTestUtil.SetBlockTokenLifetime(sm, 600 * 1000L); /* * testing that when cached tokens are expired, DFSClient will re-fetch * tokens transparently for READ. */ // confirm all tokens cached in in1 are expired by now IList <LocatedBlock> lblocks = DFSTestUtil.GetAllBlocks(in1); foreach (LocatedBlock blk in lblocks) { NUnit.Framework.Assert.IsTrue(SecurityTestUtil.IsBlockTokenExpired(blk.GetBlockToken ())); } // verify blockSeekTo() is able to re-fetch token transparently in1.Seek(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in1)); // confirm all tokens cached in in2 are expired by now IList <LocatedBlock> lblocks2 = DFSTestUtil.GetAllBlocks(in2); foreach (LocatedBlock blk_1 in lblocks2) { NUnit.Framework.Assert.IsTrue(SecurityTestUtil.IsBlockTokenExpired(blk_1.GetBlockToken ())); } // verify blockSeekTo() is able to re-fetch token transparently (testing // via another interface method) NUnit.Framework.Assert.IsTrue(in2.SeekToNewSource(0)); NUnit.Framework.Assert.IsTrue(CheckFile1(in2)); // confirm all tokens cached in in3 are expired by now IList <LocatedBlock> lblocks3 = DFSTestUtil.GetAllBlocks(in3); foreach (LocatedBlock blk_2 in lblocks3) { NUnit.Framework.Assert.IsTrue(SecurityTestUtil.IsBlockTokenExpired(blk_2.GetBlockToken ())); } // verify fetchBlockByteRange() is able to re-fetch token transparently NUnit.Framework.Assert.IsTrue(CheckFile2(in3)); /* * testing that after datanodes are restarted on the same ports, cached * tokens should still work and there is no need to fetch new tokens from * namenode. This test should run while namenode is down (to make sure no * new tokens can be fetched from namenode). */ // restart datanodes on the same ports that they currently use NUnit.Framework.Assert.IsTrue(cluster.RestartDataNodes(true)); cluster.WaitActive(); NUnit.Framework.Assert.AreEqual(numDataNodes, cluster.GetDataNodes().Count); cluster.ShutdownNameNode(0); // confirm tokens cached in in1 are still valid lblocks = DFSTestUtil.GetAllBlocks(in1); foreach (LocatedBlock blk_3 in lblocks) { NUnit.Framework.Assert.IsFalse(SecurityTestUtil.IsBlockTokenExpired(blk_3.GetBlockToken ())); } // verify blockSeekTo() still works (forced to use cached tokens) in1.Seek(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in1)); // confirm tokens cached in in2 are still valid lblocks2 = DFSTestUtil.GetAllBlocks(in2); foreach (LocatedBlock blk_4 in lblocks2) { NUnit.Framework.Assert.IsFalse(SecurityTestUtil.IsBlockTokenExpired(blk_4.GetBlockToken ())); } // verify blockSeekTo() still works (forced to use cached tokens) in2.SeekToNewSource(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in2)); // confirm tokens cached in in3 are still valid lblocks3 = DFSTestUtil.GetAllBlocks(in3); foreach (LocatedBlock blk_5 in lblocks3) { NUnit.Framework.Assert.IsFalse(SecurityTestUtil.IsBlockTokenExpired(blk_5.GetBlockToken ())); } // verify fetchBlockByteRange() still works (forced to use cached tokens) NUnit.Framework.Assert.IsTrue(CheckFile2(in3)); /* * testing that when namenode is restarted, cached tokens should still * work and there is no need to fetch new tokens from namenode. Like the * previous test, this test should also run while namenode is down. The * setup for this test depends on the previous test. */ // restart the namenode and then shut it down for test cluster.RestartNameNode(0); cluster.ShutdownNameNode(0); // verify blockSeekTo() still works (forced to use cached tokens) in1.Seek(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in1)); // verify again blockSeekTo() still works (forced to use cached tokens) in2.SeekToNewSource(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in2)); // verify fetchBlockByteRange() still works (forced to use cached tokens) NUnit.Framework.Assert.IsTrue(CheckFile2(in3)); /* * testing that after both namenode and datanodes got restarted (namenode * first, followed by datanodes), DFSClient can't access DN without * re-fetching tokens and is able to re-fetch tokens transparently. The * setup of this test depends on the previous test. */ // restore the cluster and restart the datanodes for test cluster.RestartNameNode(0); NUnit.Framework.Assert.IsTrue(cluster.RestartDataNodes(true)); cluster.WaitActive(); NUnit.Framework.Assert.AreEqual(numDataNodes, cluster.GetDataNodes().Count); // shutdown namenode so that DFSClient can't get new tokens from namenode cluster.ShutdownNameNode(0); // verify blockSeekTo() fails (cached tokens become invalid) in1.Seek(0); NUnit.Framework.Assert.IsFalse(CheckFile1(in1)); // verify fetchBlockByteRange() fails (cached tokens become invalid) NUnit.Framework.Assert.IsFalse(CheckFile2(in3)); // restart the namenode to allow DFSClient to re-fetch tokens cluster.RestartNameNode(0); // verify blockSeekTo() works again (by transparently re-fetching // tokens from namenode) in1.Seek(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in1)); in2.SeekToNewSource(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in2)); // verify fetchBlockByteRange() works again (by transparently // re-fetching tokens from namenode) NUnit.Framework.Assert.IsTrue(CheckFile2(in3)); /* * testing that when datanodes are restarted on different ports, DFSClient * is able to re-fetch tokens transparently to connect to them */ // restart datanodes on newly assigned ports NUnit.Framework.Assert.IsTrue(cluster.RestartDataNodes(false)); cluster.WaitActive(); NUnit.Framework.Assert.AreEqual(numDataNodes, cluster.GetDataNodes().Count); // verify blockSeekTo() is able to re-fetch token transparently in1.Seek(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in1)); // verify blockSeekTo() is able to re-fetch token transparently in2.SeekToNewSource(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in2)); // verify fetchBlockByteRange() is able to re-fetch token transparently NUnit.Framework.Assert.IsTrue(CheckFile2(in3)); } finally { if (cluster != null) { cluster.Shutdown(); } } }