/// <summary> /// Count datanodes that have copies of the blocks for a file /// put it into the map /// </summary> /// <param name="map"/> /// <param name="path"/> /// <param name="size"/> /// <returns/> /// <exception cref="System.IO.IOException"/> private int CountNNBlocks(IDictionary <string, TestDataNodeVolumeFailure.BlockLocs > map, string path, long size) { int total = 0; NamenodeProtocols nn = cluster.GetNameNodeRpc(); IList <LocatedBlock> locatedBlocks = nn.GetBlockLocations(path, 0, size).GetLocatedBlocks (); //System.out.println("Number of blocks: " + locatedBlocks.size()); foreach (LocatedBlock lb in locatedBlocks) { string blockId = string.Empty + lb.GetBlock().GetBlockId(); //System.out.print(blockId + ": "); DatanodeInfo[] dn_locs = lb.GetLocations(); TestDataNodeVolumeFailure.BlockLocs bl = map[blockId]; if (bl == null) { bl = new TestDataNodeVolumeFailure.BlockLocs(this); } //System.out.print(dn_info.name+","); total += dn_locs.Length; bl.num_locs += dn_locs.Length; map[blockId] = bl; } //System.out.println(); return(total); }
public virtual void TestGetBlockLocations() { NamenodeProtocols namenode = cluster.GetNameNodeRpc(); Path p = new Path(BaseDir, "file2.dat"); string src = p.ToString(); FSDataOutputStream @out = TestFileCreation.CreateFile(hdfs, p, 3); // write a half block int len = (int)(((uint)BlockSize) >> 1); WriteFile(p, @out, len); for (int i = 1; i < NumBlocks;) { // verify consistency LocatedBlocks lb = namenode.GetBlockLocations(src, 0, len); IList <LocatedBlock> blocks = lb.GetLocatedBlocks(); NUnit.Framework.Assert.AreEqual(i, blocks.Count); Block b = blocks[blocks.Count - 1].GetBlock().GetLocalBlock(); NUnit.Framework.Assert.IsTrue(b is BlockInfoContiguousUnderConstruction); if (++i < NumBlocks) { // write one more block WriteFile(p, @out, BlockSize); len += BlockSize; } } // close file @out.Close(); }
public virtual void TestRetryAddBlockWhileInChooseTarget() { string src = "/testRetryAddBlockWhileInChooseTarget"; FSNamesystem ns = cluster.GetNamesystem(); NamenodeProtocols nn = cluster.GetNameNodeRpc(); // create file nn.Create(src, FsPermission.GetFileDefault(), "clientName", new EnumSetWritable <CreateFlag >(EnumSet.Of(CreateFlag.Create)), true, (short)3, 1024, null); // start first addBlock() Log.Info("Starting first addBlock for " + src); LocatedBlock[] onRetryBlock = new LocatedBlock[1]; DatanodeStorageInfo[] targets = ns.GetNewBlockTargets(src, INodeId.GrandfatherInodeId , "clientName", null, null, null, onRetryBlock); NUnit.Framework.Assert.IsNotNull("Targets must be generated", targets); // run second addBlock() Log.Info("Starting second addBlock for " + src); nn.AddBlock(src, "clientName", null, null, INodeId.GrandfatherInodeId, null); NUnit.Framework.Assert.IsTrue("Penultimate block must be complete", CheckFileProgress (src, false)); LocatedBlocks lbs = nn.GetBlockLocations(src, 0, long.MaxValue); NUnit.Framework.Assert.AreEqual("Must be one block", 1, lbs.GetLocatedBlocks().Count ); LocatedBlock lb2 = lbs.Get(0); NUnit.Framework.Assert.AreEqual("Wrong replication", Replication, lb2.GetLocations ().Length); // continue first addBlock() LocatedBlock newBlock = ns.StoreAllocatedBlock(src, INodeId.GrandfatherInodeId, "clientName" , null, targets); NUnit.Framework.Assert.AreEqual("Blocks are not equal", lb2.GetBlock(), newBlock. GetBlock()); // check locations lbs = nn.GetBlockLocations(src, 0, long.MaxValue); NUnit.Framework.Assert.AreEqual("Must be one block", 1, lbs.GetLocatedBlocks().Count ); LocatedBlock lb1 = lbs.Get(0); NUnit.Framework.Assert.AreEqual("Wrong replication", Replication, lb1.GetLocations ().Length); NUnit.Framework.Assert.AreEqual("Blocks are not equal", lb1.GetBlock(), lb2.GetBlock ()); }
/// <summary>go to each block on the 2nd DataNode until it fails...</summary> /// <param name="path"/> /// <param name="size"/> /// <exception cref="System.IO.IOException"/> private void TriggerFailure(string path, long size) { NamenodeProtocols nn = cluster.GetNameNodeRpc(); IList <LocatedBlock> locatedBlocks = nn.GetBlockLocations(path, 0, size).GetLocatedBlocks (); foreach (LocatedBlock lb in locatedBlocks) { DatanodeInfo dinfo = lb.GetLocations()[1]; ExtendedBlock b = lb.GetBlock(); try { AccessBlock(dinfo, lb); } catch (IOException) { System.Console.Out.WriteLine("Failure triggered, on block: " + b.GetBlockId() + "; corresponding volume should be removed by now" ); break; } } }
public virtual void TestRead() { MiniDFSCluster cluster = null; int numDataNodes = 2; Configuration conf = GetConf(numDataNodes); try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build(); cluster.WaitActive(); NUnit.Framework.Assert.AreEqual(numDataNodes, cluster.GetDataNodes().Count); NameNode nn = cluster.GetNameNode(); NamenodeProtocols nnProto = nn.GetRpcServer(); BlockManager bm = nn.GetNamesystem().GetBlockManager(); BlockTokenSecretManager sm = bm.GetBlockTokenSecretManager(); // set a short token lifetime (1 second) initially SecurityTestUtil.SetBlockTokenLifetime(sm, 1000L); Path fileToRead = new Path(FileToRead); FileSystem fs = cluster.GetFileSystem(); CreateFile(fs, fileToRead); /* * setup for testing expiration handling of cached tokens */ // read using blockSeekTo(). Acquired tokens are cached in in1 FSDataInputStream in1 = fs.Open(fileToRead); NUnit.Framework.Assert.IsTrue(CheckFile1(in1)); // read using blockSeekTo(). Acquired tokens are cached in in2 FSDataInputStream in2 = fs.Open(fileToRead); NUnit.Framework.Assert.IsTrue(CheckFile1(in2)); // read using fetchBlockByteRange(). Acquired tokens are cached in in3 FSDataInputStream in3 = fs.Open(fileToRead); NUnit.Framework.Assert.IsTrue(CheckFile2(in3)); /* * testing READ interface on DN using a BlockReader */ DFSClient client = null; try { client = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()), conf ); } finally { if (client != null) { client.Close(); } } IList <LocatedBlock> locatedBlocks = nnProto.GetBlockLocations(FileToRead, 0, FileSize ).GetLocatedBlocks(); LocatedBlock lblock = locatedBlocks[0]; // first block Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> myToken = lblock.GetBlockToken (); // verify token is not expired NUnit.Framework.Assert.IsFalse(SecurityTestUtil.IsBlockTokenExpired(myToken)); // read with valid token, should succeed TryRead(conf, lblock, true); /* * wait till myToken and all cached tokens in in1, in2 and in3 expire */ while (!SecurityTestUtil.IsBlockTokenExpired(myToken)) { try { Sharpen.Thread.Sleep(10); } catch (Exception) { } } /* * continue testing READ interface on DN using a BlockReader */ // verify token is expired NUnit.Framework.Assert.IsTrue(SecurityTestUtil.IsBlockTokenExpired(myToken)); // read should fail TryRead(conf, lblock, false); // use a valid new token lblock.SetBlockToken(sm.GenerateToken(lblock.GetBlock(), EnumSet.Of(BlockTokenSecretManager.AccessMode .Read))); // read should succeed TryRead(conf, lblock, true); // use a token with wrong blockID ExtendedBlock wrongBlock = new ExtendedBlock(lblock.GetBlock().GetBlockPoolId(), lblock.GetBlock().GetBlockId() + 1); lblock.SetBlockToken(sm.GenerateToken(wrongBlock, EnumSet.Of(BlockTokenSecretManager.AccessMode .Read))); // read should fail TryRead(conf, lblock, false); // use a token with wrong access modes lblock.SetBlockToken(sm.GenerateToken(lblock.GetBlock(), EnumSet.Of(BlockTokenSecretManager.AccessMode .Write, BlockTokenSecretManager.AccessMode.Copy, BlockTokenSecretManager.AccessMode .Replace))); // read should fail TryRead(conf, lblock, false); // set a long token lifetime for future tokens SecurityTestUtil.SetBlockTokenLifetime(sm, 600 * 1000L); /* * testing that when cached tokens are expired, DFSClient will re-fetch * tokens transparently for READ. */ // confirm all tokens cached in in1 are expired by now IList <LocatedBlock> lblocks = DFSTestUtil.GetAllBlocks(in1); foreach (LocatedBlock blk in lblocks) { NUnit.Framework.Assert.IsTrue(SecurityTestUtil.IsBlockTokenExpired(blk.GetBlockToken ())); } // verify blockSeekTo() is able to re-fetch token transparently in1.Seek(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in1)); // confirm all tokens cached in in2 are expired by now IList <LocatedBlock> lblocks2 = DFSTestUtil.GetAllBlocks(in2); foreach (LocatedBlock blk_1 in lblocks2) { NUnit.Framework.Assert.IsTrue(SecurityTestUtil.IsBlockTokenExpired(blk_1.GetBlockToken ())); } // verify blockSeekTo() is able to re-fetch token transparently (testing // via another interface method) NUnit.Framework.Assert.IsTrue(in2.SeekToNewSource(0)); NUnit.Framework.Assert.IsTrue(CheckFile1(in2)); // confirm all tokens cached in in3 are expired by now IList <LocatedBlock> lblocks3 = DFSTestUtil.GetAllBlocks(in3); foreach (LocatedBlock blk_2 in lblocks3) { NUnit.Framework.Assert.IsTrue(SecurityTestUtil.IsBlockTokenExpired(blk_2.GetBlockToken ())); } // verify fetchBlockByteRange() is able to re-fetch token transparently NUnit.Framework.Assert.IsTrue(CheckFile2(in3)); /* * testing that after datanodes are restarted on the same ports, cached * tokens should still work and there is no need to fetch new tokens from * namenode. This test should run while namenode is down (to make sure no * new tokens can be fetched from namenode). */ // restart datanodes on the same ports that they currently use NUnit.Framework.Assert.IsTrue(cluster.RestartDataNodes(true)); cluster.WaitActive(); NUnit.Framework.Assert.AreEqual(numDataNodes, cluster.GetDataNodes().Count); cluster.ShutdownNameNode(0); // confirm tokens cached in in1 are still valid lblocks = DFSTestUtil.GetAllBlocks(in1); foreach (LocatedBlock blk_3 in lblocks) { NUnit.Framework.Assert.IsFalse(SecurityTestUtil.IsBlockTokenExpired(blk_3.GetBlockToken ())); } // verify blockSeekTo() still works (forced to use cached tokens) in1.Seek(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in1)); // confirm tokens cached in in2 are still valid lblocks2 = DFSTestUtil.GetAllBlocks(in2); foreach (LocatedBlock blk_4 in lblocks2) { NUnit.Framework.Assert.IsFalse(SecurityTestUtil.IsBlockTokenExpired(blk_4.GetBlockToken ())); } // verify blockSeekTo() still works (forced to use cached tokens) in2.SeekToNewSource(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in2)); // confirm tokens cached in in3 are still valid lblocks3 = DFSTestUtil.GetAllBlocks(in3); foreach (LocatedBlock blk_5 in lblocks3) { NUnit.Framework.Assert.IsFalse(SecurityTestUtil.IsBlockTokenExpired(blk_5.GetBlockToken ())); } // verify fetchBlockByteRange() still works (forced to use cached tokens) NUnit.Framework.Assert.IsTrue(CheckFile2(in3)); /* * testing that when namenode is restarted, cached tokens should still * work and there is no need to fetch new tokens from namenode. Like the * previous test, this test should also run while namenode is down. The * setup for this test depends on the previous test. */ // restart the namenode and then shut it down for test cluster.RestartNameNode(0); cluster.ShutdownNameNode(0); // verify blockSeekTo() still works (forced to use cached tokens) in1.Seek(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in1)); // verify again blockSeekTo() still works (forced to use cached tokens) in2.SeekToNewSource(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in2)); // verify fetchBlockByteRange() still works (forced to use cached tokens) NUnit.Framework.Assert.IsTrue(CheckFile2(in3)); /* * testing that after both namenode and datanodes got restarted (namenode * first, followed by datanodes), DFSClient can't access DN without * re-fetching tokens and is able to re-fetch tokens transparently. The * setup of this test depends on the previous test. */ // restore the cluster and restart the datanodes for test cluster.RestartNameNode(0); NUnit.Framework.Assert.IsTrue(cluster.RestartDataNodes(true)); cluster.WaitActive(); NUnit.Framework.Assert.AreEqual(numDataNodes, cluster.GetDataNodes().Count); // shutdown namenode so that DFSClient can't get new tokens from namenode cluster.ShutdownNameNode(0); // verify blockSeekTo() fails (cached tokens become invalid) in1.Seek(0); NUnit.Framework.Assert.IsFalse(CheckFile1(in1)); // verify fetchBlockByteRange() fails (cached tokens become invalid) NUnit.Framework.Assert.IsFalse(CheckFile2(in3)); // restart the namenode to allow DFSClient to re-fetch tokens cluster.RestartNameNode(0); // verify blockSeekTo() works again (by transparently re-fetching // tokens from namenode) in1.Seek(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in1)); in2.SeekToNewSource(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in2)); // verify fetchBlockByteRange() works again (by transparently // re-fetching tokens from namenode) NUnit.Framework.Assert.IsTrue(CheckFile2(in3)); /* * testing that when datanodes are restarted on different ports, DFSClient * is able to re-fetch tokens transparently to connect to them */ // restart datanodes on newly assigned ports NUnit.Framework.Assert.IsTrue(cluster.RestartDataNodes(false)); cluster.WaitActive(); NUnit.Framework.Assert.AreEqual(numDataNodes, cluster.GetDataNodes().Count); // verify blockSeekTo() is able to re-fetch token transparently in1.Seek(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in1)); // verify blockSeekTo() is able to re-fetch token transparently in2.SeekToNewSource(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in2)); // verify fetchBlockByteRange() is able to re-fetch token transparently NUnit.Framework.Assert.IsTrue(CheckFile2(in3)); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestConcat() { int numFiles = 10; long fileLen = blockSize * 3; HdfsFileStatus fStatus; FSDataInputStream stm; string trg = "/trg"; Path trgPath = new Path(trg); DFSTestUtil.CreateFile(dfs, trgPath, fileLen, ReplFactor, 1); fStatus = nn.GetFileInfo(trg); long trgLen = fStatus.GetLen(); long trgBlocks = nn.GetBlockLocations(trg, 0, trgLen).LocatedBlockCount(); Path[] files = new Path[numFiles]; byte[][] bytes = new byte[][] { new byte[(int)fileLen], new byte[(int)fileLen], new byte[(int)fileLen], new byte[(int)fileLen], new byte[(int)fileLen], new byte[(int )fileLen], new byte[(int)fileLen], new byte[(int)fileLen], new byte[(int)fileLen ], new byte[(int)fileLen], new byte[(int)fileLen] }; LocatedBlocks[] lblocks = new LocatedBlocks[numFiles]; long[] lens = new long[numFiles]; stm = dfs.Open(trgPath); stm.ReadFully(0, bytes[0]); stm.Close(); int i; for (i = 0; i < files.Length; i++) { files[i] = new Path("/file" + i); Path path = files[i]; System.Console.Out.WriteLine("Creating file " + path); // make files with different content DFSTestUtil.CreateFile(dfs, path, fileLen, ReplFactor, i); fStatus = nn.GetFileInfo(path.ToUri().GetPath()); lens[i] = fStatus.GetLen(); NUnit.Framework.Assert.AreEqual(trgLen, lens[i]); // file of the same length. lblocks[i] = nn.GetBlockLocations(path.ToUri().GetPath(), 0, lens[i]); //read the file stm = dfs.Open(path); stm.ReadFully(0, bytes[i + 1]); //bytes[i][10] = 10; stm.Close(); } // check permissions -try the operation with the "wrong" user UserGroupInformation user1 = UserGroupInformation.CreateUserForTesting("theDoctor" , new string[] { "tardis" }); DistributedFileSystem hdfs = (DistributedFileSystem)DFSTestUtil.GetFileSystemAs(user1 , conf); try { hdfs.Concat(trgPath, files); NUnit.Framework.Assert.Fail("Permission exception expected"); } catch (IOException ie) { System.Console.Out.WriteLine("Got expected exception for permissions:" + ie.GetLocalizedMessage ()); } // expected // check count update ContentSummary cBefore = dfs.GetContentSummary(trgPath.GetParent()); // resort file array, make INode id not sorted. for (int j = 0; j < files.Length / 2; j++) { Path tempPath = files[j]; files[j] = files[files.Length - 1 - j]; files[files.Length - 1 - j] = tempPath; byte[] tempBytes = bytes[1 + j]; bytes[1 + j] = bytes[files.Length - 1 - j + 1]; bytes[files.Length - 1 - j + 1] = tempBytes; } // now concatenate dfs.Concat(trgPath, files); // verify count ContentSummary cAfter = dfs.GetContentSummary(trgPath.GetParent()); NUnit.Framework.Assert.AreEqual(cBefore.GetFileCount(), cAfter.GetFileCount() + files .Length); // verify other stuff long totalLen = trgLen; long totalBlocks = trgBlocks; for (i = 0; i < files.Length; i++) { totalLen += lens[i]; totalBlocks += lblocks[i].LocatedBlockCount(); } System.Console.Out.WriteLine("total len=" + totalLen + "; totalBlocks=" + totalBlocks ); fStatus = nn.GetFileInfo(trg); trgLen = fStatus.GetLen(); // new length // read the resulting file stm = dfs.Open(trgPath); byte[] byteFileConcat = new byte[(int)trgLen]; stm.ReadFully(0, byteFileConcat); stm.Close(); trgBlocks = nn.GetBlockLocations(trg, 0, trgLen).LocatedBlockCount(); //verifications // 1. number of blocks NUnit.Framework.Assert.AreEqual(trgBlocks, totalBlocks); // 2. file lengths NUnit.Framework.Assert.AreEqual(trgLen, totalLen); // 3. removal of the src file foreach (Path p in files) { fStatus = nn.GetFileInfo(p.ToUri().GetPath()); NUnit.Framework.Assert.IsNull("File " + p + " still exists", fStatus); // file shouldn't exist // try to create fie with the same name DFSTestUtil.CreateFile(dfs, p, fileLen, ReplFactor, 1); } // 4. content CheckFileContent(byteFileConcat, bytes); // add a small file (less then a block) Path smallFile = new Path("/sfile"); int sFileLen = 10; DFSTestUtil.CreateFile(dfs, smallFile, sFileLen, ReplFactor, 1); dfs.Concat(trgPath, new Path[] { smallFile }); fStatus = nn.GetFileInfo(trg); trgLen = fStatus.GetLen(); // new length // check number of blocks trgBlocks = nn.GetBlockLocations(trg, 0, trgLen).LocatedBlockCount(); NUnit.Framework.Assert.AreEqual(trgBlocks, totalBlocks + 1); // and length NUnit.Framework.Assert.AreEqual(trgLen, totalLen + sFileLen); }