/// <summary>Create a file with one block and corrupt some/all of the block replicas. /// </summary> /// <exception cref="System.IO.IOException"/> /// <exception cref="Org.Apache.Hadoop.Security.AccessControlException"/> /// <exception cref="System.IO.FileNotFoundException"/> /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> /// <exception cref="System.Exception"/> /// <exception cref="Sharpen.TimeoutException"/> private void CreateAFileWithCorruptedBlockReplicas(Path filePath, short repl, int corruptBlockCount) { DFSTestUtil.CreateFile(dfs, filePath, BlockSize, repl, 0); DFSTestUtil.WaitReplication(dfs, filePath, repl); // Locate the file blocks by asking name node LocatedBlocks locatedblocks = dfs.dfs.GetNamenode().GetBlockLocations(filePath.ToString (), 0L, BlockSize); NUnit.Framework.Assert.AreEqual(repl, locatedblocks.Get(0).GetLocations().Length); // The file only has one block LocatedBlock lblock = locatedblocks.Get(0); DatanodeInfo[] datanodeinfos = lblock.GetLocations(); ExtendedBlock block = lblock.GetBlock(); // corrupt some /all of the block replicas for (int i = 0; i < corruptBlockCount; i++) { DatanodeInfo dninfo = datanodeinfos[i]; DataNode dn = cluster.GetDataNode(dninfo.GetIpcPort()); CorruptBlock(block, dn); Log.Debug("Corrupted block " + block.GetBlockName() + " on data node " + dninfo); } }
internal Pipeline(LocatedBlock lb) { foreach (DatanodeInfo d in lb.GetLocations()) { datanodes.AddItem(d.GetName()); } }
public virtual void TestAddBlockRetryShouldReturnBlockWithLocations() { string src = "/testAddBlockRetryShouldReturnBlockWithLocations"; NamenodeProtocols nameNodeRpc = cluster.GetNameNodeRpc(); // create file nameNodeRpc.Create(src, FsPermission.GetFileDefault(), "clientName", new EnumSetWritable <CreateFlag>(EnumSet.Of(CreateFlag.Create)), true, (short)3, 1024, null); // start first addBlock() Log.Info("Starting first addBlock for " + src); LocatedBlock lb1 = nameNodeRpc.AddBlock(src, "clientName", null, null, INodeId.GrandfatherInodeId , null); NUnit.Framework.Assert.IsTrue("Block locations should be present", lb1.GetLocations ().Length > 0); cluster.RestartNameNode(); nameNodeRpc = cluster.GetNameNodeRpc(); LocatedBlock lb2 = nameNodeRpc.AddBlock(src, "clientName", null, null, INodeId.GrandfatherInodeId , null); NUnit.Framework.Assert.AreEqual("Blocks are not equal", lb1.GetBlock(), lb2.GetBlock ()); NUnit.Framework.Assert.IsTrue("Wrong locations with retry", lb2.GetLocations().Length > 0); }
public virtual void TestMissingBlock() { // Create a file with single block with two replicas Path file = GetTestPath("testMissingBlocks"); CreateFile(file, 100, (short)1); // Corrupt the only replica of the block to result in a missing block LocatedBlock block = NameNodeAdapter.GetBlockLocations(cluster.GetNameNode(), file .ToString(), 0, 1).Get(0); cluster.GetNamesystem().WriteLock(); try { bm.FindAndMarkBlockAsCorrupt(block.GetBlock(), block.GetLocations()[0], "STORAGE_ID" , "TEST"); } finally { cluster.GetNamesystem().WriteUnlock(); } UpdateMetrics(); MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(NsMetrics); MetricsAsserts.AssertGauge("UnderReplicatedBlocks", 1L, rb); MetricsAsserts.AssertGauge("MissingBlocks", 1L, rb); MetricsAsserts.AssertGauge("MissingReplOneBlocks", 1L, rb); fs.Delete(file, true); WaitForDnMetricValue(NsMetrics, "UnderReplicatedBlocks", 0L); }
/// <summary>Get a DataNode that serves our testBlock.</summary> public virtual DataNode GetDataNode(LocatedBlock testBlock) { DatanodeInfo[] nodes = testBlock.GetLocations(); int ipcport = nodes[0].GetIpcPort(); return(cluster.GetDataNode(ipcport)); }
public virtual void Setup() { conf = new HdfsConfiguration(); SimulatedFSDataset.SetFactory(conf); Configuration[] overlays = new Configuration[NumDatanodes]; for (int i = 0; i < overlays.Length; i++) { overlays[i] = new Configuration(); if (i == RoNodeIndex) { overlays[i].SetEnum(SimulatedFSDataset.ConfigPropertyState, i == RoNodeIndex ? DatanodeStorage.State .ReadOnlyShared : DatanodeStorage.State.Normal); } } cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDatanodes).DataNodeConfOverlays (overlays).Build(); fs = cluster.GetFileSystem(); blockManager = cluster.GetNameNode().GetNamesystem().GetBlockManager(); datanodeManager = blockManager.GetDatanodeManager(); client = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()), cluster .GetConfiguration(0)); for (int i_1 = 0; i_1 < NumDatanodes; i_1++) { DataNode dataNode = cluster.GetDataNodes()[i_1]; ValidateStorageState(BlockManagerTestUtil.GetStorageReportsForDatanode(datanodeManager .GetDatanode(dataNode.GetDatanodeId())), i_1 == RoNodeIndex ? DatanodeStorage.State .ReadOnlyShared : DatanodeStorage.State.Normal); } // Create a 1 block file DFSTestUtil.CreateFile(fs, Path, BlockSize, BlockSize, BlockSize, (short)1, seed); LocatedBlock locatedBlock = GetLocatedBlock(); extendedBlock = locatedBlock.GetBlock(); block = extendedBlock.GetLocalBlock(); Assert.AssertThat(locatedBlock.GetLocations().Length, CoreMatchers.Is(1)); normalDataNode = locatedBlock.GetLocations()[0]; readOnlyDataNode = datanodeManager.GetDatanode(cluster.GetDataNodes()[RoNodeIndex ].GetDatanodeId()); Assert.AssertThat(normalDataNode, CoreMatchers.Is(CoreMatchers.Not(readOnlyDataNode ))); ValidateNumberReplicas(1); // Inject the block into the datanode with READ_ONLY_SHARED storage cluster.InjectBlocks(0, RoNodeIndex, Collections.Singleton(block)); // There should now be 2 *locations* for the block // Must wait until the NameNode has processed the block report for the injected blocks WaitForLocations(2); }
public virtual void TestBlockMoveAcrossStorageInSameNode() { Configuration conf = new HdfsConfiguration(); // create only one datanode in the cluster to verify movement within // datanode. MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).StorageTypes (new StorageType[] { StorageType.Disk, StorageType.Archive }).Build(); try { cluster.WaitActive(); DistributedFileSystem dfs = cluster.GetFileSystem(); Path file = new Path("/testBlockMoveAcrossStorageInSameNode/file"); DFSTestUtil.CreateFile(dfs, file, 1024, (short)1, 1024); LocatedBlocks locatedBlocks = dfs.GetClient().GetLocatedBlocks(file.ToString(), 0 ); // get the current LocatedBlock locatedBlock = locatedBlocks.Get(0); ExtendedBlock block = locatedBlock.GetBlock(); DatanodeInfo[] locations = locatedBlock.GetLocations(); NUnit.Framework.Assert.AreEqual(1, locations.Length); StorageType[] storageTypes = locatedBlock.GetStorageTypes(); // current block should be written to DISK NUnit.Framework.Assert.IsTrue(storageTypes[0] == StorageType.Disk); DatanodeInfo source = locations[0]; // move block to ARCHIVE by using same DataNodeInfo for source, proxy and // destination so that movement happens within datanode NUnit.Framework.Assert.IsTrue(ReplaceBlock(block, source, source, source, StorageType .Archive)); // wait till namenode notified Sharpen.Thread.Sleep(3000); locatedBlocks = dfs.GetClient().GetLocatedBlocks(file.ToString(), 0); // get the current locatedBlock = locatedBlocks.Get(0); NUnit.Framework.Assert.AreEqual("Storage should be only one", 1, locatedBlock.GetLocations ().Length); NUnit.Framework.Assert.IsTrue("Block should be moved to ARCHIVE", locatedBlock.GetStorageTypes ()[0] == StorageType.Archive); } finally { cluster.Shutdown(); } }
/// <summary> /// Verify the number of corrupted block replicas by fetching the block /// location from name node. /// </summary> /// <exception cref="Org.Apache.Hadoop.Security.AccessControlException"/> /// <exception cref="System.IO.FileNotFoundException"/> /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> /// <exception cref="System.IO.IOException"/> private void VerifyCorruptedBlockCount(Path filePath, int expectedReplicas) { LocatedBlocks lBlocks = dfs.dfs.GetNamenode().GetBlockLocations(filePath.ToUri(). GetPath(), 0, long.MaxValue); // we expect only the first block of the file is used for this test LocatedBlock firstLocatedBlock = lBlocks.Get(0); NUnit.Framework.Assert.AreEqual(expectedReplicas, firstLocatedBlock.GetLocations( ).Length); }
/// <exception cref="System.Exception"/> public virtual void TestBlockReaderLocalLegacyWithAppend() { short ReplFactor = 1; HdfsConfiguration conf = GetConfiguration(null); conf.SetBoolean(DFSConfigKeys.DfsClientUseLegacyBlockreaderlocal, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.WaitActive(); DistributedFileSystem dfs = cluster.GetFileSystem(); Path path = new Path("/testBlockReaderLocalLegacy"); DFSTestUtil.CreateFile(dfs, path, 10, ReplFactor, 0); DFSTestUtil.WaitReplication(dfs, path, ReplFactor); ClientDatanodeProtocol proxy; Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token; ExtendedBlock originalBlock; long originalGS; { LocatedBlock lb = cluster.GetNameNode().GetRpcServer().GetBlockLocations(path.ToString (), 0, 1).Get(0); proxy = DFSUtil.CreateClientDatanodeProtocolProxy(lb.GetLocations()[0], conf, 60000 , false); token = lb.GetBlockToken(); // get block and generation stamp ExtendedBlock blk = new ExtendedBlock(lb.GetBlock()); originalBlock = new ExtendedBlock(blk); originalGS = originalBlock.GetGenerationStamp(); // test getBlockLocalPathInfo BlockLocalPathInfo info = proxy.GetBlockLocalPathInfo(blk, token); NUnit.Framework.Assert.AreEqual(originalGS, info.GetBlock().GetGenerationStamp()); } { // append one byte FSDataOutputStream @out = dfs.Append(path); @out.Write(1); @out.Close(); } { // get new generation stamp LocatedBlock lb = cluster.GetNameNode().GetRpcServer().GetBlockLocations(path.ToString (), 0, 1).Get(0); long newGS = lb.GetBlock().GetGenerationStamp(); NUnit.Framework.Assert.IsTrue(newGS > originalGS); // getBlockLocalPathInfo using the original block. NUnit.Framework.Assert.AreEqual(originalGS, originalBlock.GetGenerationStamp()); BlockLocalPathInfo info = proxy.GetBlockLocalPathInfo(originalBlock, token); NUnit.Framework.Assert.AreEqual(newGS, info.GetBlock().GetGenerationStamp()); } cluster.Shutdown(); }
internal static IList <Mover.MLocation> ToLocations(LocatedBlock lb) { DatanodeInfo[] datanodeInfos = lb.GetLocations(); StorageType[] storageTypes = lb.GetStorageTypes(); long size = lb.GetBlockSize(); IList <Mover.MLocation> locations = new List <Mover.MLocation>(); for (int i = 0; i < datanodeInfos.Length; i++) { locations.AddItem(new Mover.MLocation(datanodeInfos[i], storageTypes[i], size)); } return(locations); }
// try reading a block using a BlockReader directly private static void TryRead(Configuration conf, LocatedBlock lblock, bool shouldSucceed ) { IPEndPoint targetAddr = null; IOException ioe = null; BlockReader blockReader = null; ExtendedBlock block = lblock.GetBlock(); try { DatanodeInfo[] nodes = lblock.GetLocations(); targetAddr = NetUtils.CreateSocketAddr(nodes[0].GetXferAddr()); blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).SetFileName(BlockReaderFactory .GetFileName(targetAddr, "test-blockpoolid", block.GetBlockId())).SetBlock(block ).SetBlockToken(lblock.GetBlockToken()).SetInetSocketAddress(targetAddr).SetStartOffset (0).SetLength(-1).SetVerifyChecksum(true).SetClientName("TestBlockTokenWithDFS") .SetDatanodeInfo(nodes[0]).SetCachingStrategy(CachingStrategy.NewDefaultStrategy ()).SetClientCacheContext(ClientContext.GetFromConf(conf)).SetConfiguration(conf ).SetRemotePeerFactory(new _RemotePeerFactory_162(conf)).Build(); } catch (IOException ex) { ioe = ex; } finally { if (blockReader != null) { try { blockReader.Close(); } catch (IOException e) { throw new RuntimeException(e); } } } if (shouldSucceed) { NUnit.Framework.Assert.IsNotNull("OP_READ_BLOCK: access token is invalid, " + "when it is expected to be valid" , blockReader); } else { NUnit.Framework.Assert.IsNotNull("OP_READ_BLOCK: access token is valid, " + "when it is expected to be invalid" , ioe); NUnit.Framework.Assert.IsTrue("OP_READ_BLOCK failed due to reasons other than access token: " , ioe is InvalidBlockTokenException); } }
/// <exception cref="System.IO.IOException"/> private void TestPlacement(string clientMachine, string clientRack) { // write 5 files and check whether all times block placed for (int i = 0; i < 5; i++) { string src = "/test-" + i; // Create the file with client machine HdfsFileStatus fileStatus = namesystem.StartFile(src, perm, clientMachine, clientMachine , EnumSet.Of(CreateFlag.Create), true, ReplicationFactor, DefaultBlockSize, null , false); LocatedBlock locatedBlock = nameNodeRpc.AddBlock(src, clientMachine, null, null, fileStatus.GetFileId(), null); NUnit.Framework.Assert.AreEqual("Block should be allocated sufficient locations", ReplicationFactor, locatedBlock.GetLocations().Length); if (clientRack != null) { NUnit.Framework.Assert.AreEqual("First datanode should be rack local", clientRack , locatedBlock.GetLocations()[0].GetNetworkLocation()); } nameNodeRpc.AbandonBlock(locatedBlock.GetBlock(), fileStatus.GetFileId(), src, clientMachine ); } }
/// <summary>TC7: Corrupted replicas are present.</summary> /// <exception cref="System.IO.IOException">an exception might be thrown</exception> /// <exception cref="System.Exception"/> private void TestTC7(bool appendToNewBlock) { short repl = 2; Path p = new Path("/TC7/foo" + (appendToNewBlock ? "0" : "1")); System.Console.Out.WriteLine("p=" + p); //a. Create file with replication factor of 2. Write half block of data. Close file. int len1 = (int)(BlockSize / 2); { FSDataOutputStream @out = fs.Create(p, false, buffersize, repl, BlockSize); AppendTestUtil.Write(@out, 0, len1); @out.Close(); } DFSTestUtil.WaitReplication(fs, p, repl); //b. Log into one datanode that has one replica of this block. // Find the block file on this datanode and truncate it to zero size. LocatedBlocks locatedblocks = fs.dfs.GetNamenode().GetBlockLocations(p.ToString() , 0L, len1); NUnit.Framework.Assert.AreEqual(1, locatedblocks.LocatedBlockCount()); LocatedBlock lb = locatedblocks.Get(0); ExtendedBlock blk = lb.GetBlock(); NUnit.Framework.Assert.AreEqual(len1, lb.GetBlockSize()); DatanodeInfo[] datanodeinfos = lb.GetLocations(); NUnit.Framework.Assert.AreEqual(repl, datanodeinfos.Length); DataNode dn = cluster.GetDataNode(datanodeinfos[0].GetIpcPort()); FilePath f = DataNodeTestUtils.GetBlockFile(dn, blk.GetBlockPoolId(), blk.GetLocalBlock ()); RandomAccessFile raf = new RandomAccessFile(f, "rw"); AppendTestUtil.Log.Info("dn=" + dn + ", blk=" + blk + " (length=" + blk.GetNumBytes () + ")"); NUnit.Framework.Assert.AreEqual(len1, raf.Length()); raf.SetLength(0); raf.Close(); //c. Open file in "append mode". Append a new block worth of data. Close file. int len2 = (int)BlockSize; { FSDataOutputStream @out = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.Append , CreateFlag.NewBlock), 4096, null) : fs.Append(p); AppendTestUtil.Write(@out, len1, len2); @out.Close(); } //d. Reopen file and read two blocks worth of data. AppendTestUtil.Check(fs, p, len1 + len2); }
/// <summary>TC11: Racing rename</summary> /// <exception cref="System.Exception"/> private void TestTC11(bool appendToNewBlock) { Path p = new Path("/TC11/foo" + (appendToNewBlock ? "0" : "1")); System.Console.Out.WriteLine("p=" + p); //a. Create file and write one block of data. Close file. int len1 = (int)BlockSize; { FSDataOutputStream @out = fs.Create(p, false, buffersize, Replication, BlockSize); AppendTestUtil.Write(@out, 0, len1); @out.Close(); } //b. Reopen file in "append" mode. Append half block of data. FSDataOutputStream out_1 = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag. Append, CreateFlag.NewBlock), 4096, null) : fs.Append(p); int len2 = (int)BlockSize / 2; AppendTestUtil.Write(out_1, len1, len2); out_1.Hflush(); //c. Rename file to file.new. Path pnew = new Path(p + ".new"); NUnit.Framework.Assert.IsTrue(fs.Rename(p, pnew)); //d. Close file handle that was opened in (b). out_1.Close(); //check block sizes long len = fs.GetFileStatus(pnew).GetLen(); LocatedBlocks locatedblocks = fs.dfs.GetNamenode().GetBlockLocations(pnew.ToString (), 0L, len); int numblock = locatedblocks.LocatedBlockCount(); for (int i = 0; i < numblock; i++) { LocatedBlock lb = locatedblocks.Get(i); ExtendedBlock blk = lb.GetBlock(); long size = lb.GetBlockSize(); if (i < numblock - 1) { NUnit.Framework.Assert.AreEqual(BlockSize, size); } foreach (DatanodeInfo datanodeinfo in lb.GetLocations()) { DataNode dn = cluster.GetDataNode(datanodeinfo.GetIpcPort()); Block metainfo = DataNodeTestUtils.GetFSDataset(dn).GetStoredBlock(blk.GetBlockPoolId (), blk.GetBlockId()); NUnit.Framework.Assert.AreEqual(size, metainfo.GetNumBytes()); } } }
private void Compare(LocatedBlock expected, LocatedBlock actual) { NUnit.Framework.Assert.AreEqual(expected.GetBlock(), actual.GetBlock()); Compare(expected.GetBlockToken(), actual.GetBlockToken()); NUnit.Framework.Assert.AreEqual(expected.GetStartOffset(), actual.GetStartOffset( )); NUnit.Framework.Assert.AreEqual(expected.IsCorrupt(), actual.IsCorrupt()); DatanodeInfo[] ei = expected.GetLocations(); DatanodeInfo[] ai = actual.GetLocations(); NUnit.Framework.Assert.AreEqual(ei.Length, ai.Length); for (int i = 0; i < ei.Length; i++) { Compare(ei[i], ai[i]); } }
/// <summary>Convert a LocatedBlock to a Json map.</summary> /// <exception cref="System.IO.IOException"/> private static IDictionary <string, object> ToJsonMap(LocatedBlock locatedblock) { if (locatedblock == null) { return(null); } IDictionary <string, object> m = new SortedDictionary <string, object>(); m["blockToken"] = ToJsonMap(locatedblock.GetBlockToken()); m["isCorrupt"] = locatedblock.IsCorrupt(); m["startOffset"] = locatedblock.GetStartOffset(); m["block"] = ToJsonMap(locatedblock.GetBlock()); m["locations"] = ToJsonArray(locatedblock.GetLocations()); m["cachedLocations"] = ToJsonArray(locatedblock.GetCachedLocations()); return(m); }
public virtual void TestRetryAddBlockWhileInChooseTarget() { string src = "/testRetryAddBlockWhileInChooseTarget"; FSNamesystem ns = cluster.GetNamesystem(); NamenodeProtocols nn = cluster.GetNameNodeRpc(); // create file nn.Create(src, FsPermission.GetFileDefault(), "clientName", new EnumSetWritable <CreateFlag >(EnumSet.Of(CreateFlag.Create)), true, (short)3, 1024, null); // start first addBlock() Log.Info("Starting first addBlock for " + src); LocatedBlock[] onRetryBlock = new LocatedBlock[1]; DatanodeStorageInfo[] targets = ns.GetNewBlockTargets(src, INodeId.GrandfatherInodeId , "clientName", null, null, null, onRetryBlock); NUnit.Framework.Assert.IsNotNull("Targets must be generated", targets); // run second addBlock() Log.Info("Starting second addBlock for " + src); nn.AddBlock(src, "clientName", null, null, INodeId.GrandfatherInodeId, null); NUnit.Framework.Assert.IsTrue("Penultimate block must be complete", CheckFileProgress (src, false)); LocatedBlocks lbs = nn.GetBlockLocations(src, 0, long.MaxValue); NUnit.Framework.Assert.AreEqual("Must be one block", 1, lbs.GetLocatedBlocks().Count ); LocatedBlock lb2 = lbs.Get(0); NUnit.Framework.Assert.AreEqual("Wrong replication", Replication, lb2.GetLocations ().Length); // continue first addBlock() LocatedBlock newBlock = ns.StoreAllocatedBlock(src, INodeId.GrandfatherInodeId, "clientName" , null, targets); NUnit.Framework.Assert.AreEqual("Blocks are not equal", lb2.GetBlock(), newBlock. GetBlock()); // check locations lbs = nn.GetBlockLocations(src, 0, long.MaxValue); NUnit.Framework.Assert.AreEqual("Must be one block", 1, lbs.GetLocatedBlocks().Count ); LocatedBlock lb1 = lbs.Get(0); NUnit.Framework.Assert.AreEqual("Wrong replication", Replication, lb1.GetLocations ().Length); NUnit.Framework.Assert.AreEqual("Blocks are not equal", lb1.GetBlock(), lb2.GetBlock ()); }
/// <summary>Get a BlockReader for the given block.</summary> /// <exception cref="System.IO.IOException"/> public static BlockReader GetBlockReader(MiniDFSCluster cluster, LocatedBlock testBlock , int offset, int lenToRead) { IPEndPoint targetAddr = null; ExtendedBlock block = testBlock.GetBlock(); DatanodeInfo[] nodes = testBlock.GetLocations(); targetAddr = NetUtils.CreateSocketAddr(nodes[0].GetXferAddr()); DistributedFileSystem fs = cluster.GetFileSystem(); return(new BlockReaderFactory(fs.GetClient().GetConf()).SetInetSocketAddress(targetAddr ).SetBlock(block).SetFileName(targetAddr.ToString() + ":" + block.GetBlockId()). SetBlockToken(testBlock.GetBlockToken()).SetStartOffset(offset).SetLength(lenToRead ).SetVerifyChecksum(true).SetClientName("BlockReaderTestUtil").SetDatanodeInfo(nodes [0]).SetClientCacheContext(ClientContext.GetFromConf(fs.GetConf())).SetCachingStrategy (CachingStrategy.NewDefaultStrategy()).SetConfiguration(fs.GetConf()).SetAllowShortCircuitLocalReads (true).SetRemotePeerFactory(new _RemotePeerFactory_196(fs)).Build()); }
/// <exception cref="System.Exception"/> private void WaitForAllReplicas(int expectedReplicaNum, Path file, DistributedFileSystem dfs) { for (int i = 0; i < 5; i++) { LocatedBlocks lbs = dfs.GetClient().GetLocatedBlocks(file.ToString(), 0, BlockSize ); LocatedBlock lb = lbs.Get(0); if (lb.GetLocations().Length >= expectedReplicaNum) { return; } else { Sharpen.Thread.Sleep(1000); } } }
// Waits for all of the blocks to have expected replication // Waits for all of the blocks to have expected replication /// <exception cref="System.IO.IOException"/> private void WaitForBlockReplication(string filename, ClientProtocol namenode, int expected, long maxWaitSec) { long start = Time.MonotonicNow(); //wait for all the blocks to be replicated; Log.Info("Checking for block replication for " + filename); LocatedBlocks blocks = namenode.GetBlockLocations(filename, 0, long.MaxValue); NUnit.Framework.Assert.AreEqual(numBlocks, blocks.LocatedBlockCount()); for (int i = 0; i < numBlocks; ++i) { Log.Info("Checking for block:" + (i + 1)); while (true) { // Loop to check for block i (usually when 0 is done all will be done blocks = namenode.GetBlockLocations(filename, 0, long.MaxValue); NUnit.Framework.Assert.AreEqual(numBlocks, blocks.LocatedBlockCount()); LocatedBlock block = blocks.Get(i); int actual = block.GetLocations().Length; if (actual == expected) { Log.Info("Got enough replicas for " + (i + 1) + "th block " + block.GetBlock() + ", got " + actual + "."); break; } Log.Info("Not enough replicas for " + (i + 1) + "th block " + block.GetBlock() + " yet. Expecting " + expected + ", got " + actual + "."); if (maxWaitSec > 0 && (Time.MonotonicNow() - start) > (maxWaitSec * 1000)) { throw new IOException("Timedout while waiting for all blocks to " + " be replicated for " + filename); } try { Sharpen.Thread.Sleep(500); } catch (Exception) { } } } }
// Waits for all of the blocks to have expected replication /// <exception cref="System.IO.IOException"/> private void WaitForBlockReplication(string filename, ClientProtocol namenode, int expected, long maxWaitSec) { long start = Time.MonotonicNow(); //wait for all the blocks to be replicated; Log.Info("Checking for block replication for " + filename); while (true) { bool replOk = true; LocatedBlocks blocks = namenode.GetBlockLocations(filename, 0, long.MaxValue); for (IEnumerator <LocatedBlock> iter = blocks.GetLocatedBlocks().GetEnumerator(); iter.HasNext();) { LocatedBlock block = iter.Next(); int actual = block.GetLocations().Length; if (actual < expected) { Log.Info("Not enough replicas for " + block.GetBlock() + " yet. Expecting " + expected + ", got " + actual + "."); replOk = false; break; } } if (replOk) { return; } if (maxWaitSec > 0 && (Time.MonotonicNow() - start) > (maxWaitSec * 1000)) { throw new IOException("Timedout while waiting for all blocks to " + " be replicated for " + filename); } try { Sharpen.Thread.Sleep(500); } catch (Exception) { } } }
/// <exception cref="System.IO.IOException"/> /// <exception cref="System.Exception"/> private void WaitForLocations(int locations) { for (int tries = 0; tries < Retries;) { try { LocatedBlock locatedBlock = GetLocatedBlock(); Assert.AssertThat(locatedBlock.GetLocations().Length, CoreMatchers.Is(locations)); break; } catch (Exception e) { if (++tries < Retries) { Sharpen.Thread.Sleep(1000); } else { throw; } } } }
/// <summary> /// Group the per-replica /// <see cref="Org.Apache.Hadoop.FS.VolumeId"/> /// info returned from /// <see cref="DFSClient#queryDatanodesForHdfsBlocksMetadata(Map)"/> /// to be /// associated /// with the corresponding /// <see cref="Org.Apache.Hadoop.Hdfs.Protocol.LocatedBlock"/> /// . /// </summary> /// <param name="blocks">Original LocatedBlock array</param> /// <param name="metadatas">VolumeId information for the replicas on each datanode</param> /// <returns> /// blockVolumeIds per-replica VolumeId information associated with the /// parent LocatedBlock /// </returns> internal static IDictionary <LocatedBlock, IList <VolumeId> > AssociateVolumeIdsWithBlocks (IList <LocatedBlock> blocks, IDictionary <DatanodeInfo, HdfsBlocksMetadata> metadatas ) { // Initialize mapping of ExtendedBlock to LocatedBlock. // Used to associate results from DN RPCs to the parent LocatedBlock IDictionary <long, LocatedBlock> blockIdToLocBlock = new Dictionary <long, LocatedBlock >(); foreach (LocatedBlock b in blocks) { blockIdToLocBlock[b.GetBlock().GetBlockId()] = b; } // Initialize the mapping of blocks -> list of VolumeIds, one per replica // This is filled out with real values from the DN RPCs IDictionary <LocatedBlock, IList <VolumeId> > blockVolumeIds = new Dictionary <LocatedBlock , IList <VolumeId> >(); foreach (LocatedBlock b_1 in blocks) { AList <VolumeId> l = new AList <VolumeId>(b_1.GetLocations().Length); for (int i = 0; i < b_1.GetLocations().Length; i++) { l.AddItem(null); } blockVolumeIds[b_1] = l; } // Iterate through the list of metadatas (one per datanode). // For each metadata, if it's valid, insert its volume location information // into the Map returned to the caller foreach (KeyValuePair <DatanodeInfo, HdfsBlocksMetadata> entry in metadatas) { DatanodeInfo datanode = entry.Key; HdfsBlocksMetadata metadata = entry.Value; // Check if metadata is valid if (metadata == null) { continue; } long[] metaBlockIds = metadata.GetBlockIds(); IList <byte[]> metaVolumeIds = metadata.GetVolumeIds(); IList <int> metaVolumeIndexes = metadata.GetVolumeIndexes(); // Add VolumeId for each replica in the HdfsBlocksMetadata for (int j = 0; j < metaBlockIds.Length; j++) { int volumeIndex = metaVolumeIndexes[j]; long blockId = metaBlockIds[j]; // Skip if block wasn't found, or not a valid index into metaVolumeIds // Also skip if the DN responded with a block we didn't ask for if (volumeIndex == int.MaxValue || volumeIndex >= metaVolumeIds.Count || !blockIdToLocBlock .Contains(blockId)) { if (Log.IsDebugEnabled()) { Log.Debug("No data for block " + blockId); } continue; } // Get the VolumeId by indexing into the list of VolumeIds // provided by the datanode byte[] volumeId = metaVolumeIds[volumeIndex]; HdfsVolumeId id = new HdfsVolumeId(volumeId); // Find out which index we are in the LocatedBlock's replicas LocatedBlock locBlock = blockIdToLocBlock[blockId]; DatanodeInfo[] dnInfos = locBlock.GetLocations(); int index = -1; for (int k = 0; k < dnInfos.Length; k++) { if (dnInfos[k].Equals(datanode)) { index = k; break; } } if (index < 0) { if (Log.IsDebugEnabled()) { Log.Debug("Datanode responded with a block volume id we did" + " not request, omitting." ); } continue; } // Place VolumeId at the same index as the DN's index in the list of // replicas IList <VolumeId> volumeIds = blockVolumeIds[locBlock]; volumeIds.Set(index, id); } } return(blockVolumeIds); }
public virtual void TestUpdatePipelineAfterDelete() { Configuration conf = new HdfsConfiguration(); Path file = new Path("/test-file"); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); try { FileSystem fs = cluster.GetFileSystem(); NamenodeProtocols namenode = cluster.GetNameNodeRpc(); DFSOutputStream @out = null; try { // Create a file and make sure a block is allocated for it. @out = (DFSOutputStream)(fs.Create(file).GetWrappedStream()); @out.Write(1); @out.Hflush(); // Create a snapshot that includes the file. SnapshotTestHelper.CreateSnapshot((DistributedFileSystem)fs, new Path("/"), "s1"); // Grab the block info of this file for later use. FSDataInputStream @in = null; ExtendedBlock oldBlock = null; try { @in = fs.Open(file); oldBlock = DFSTestUtil.GetAllBlocks(@in)[0].GetBlock(); } finally { IOUtils.CloseStream(@in); } // Allocate a new block ID/gen stamp so we can simulate pipeline // recovery. string clientName = ((DistributedFileSystem)fs).GetClient().GetClientName(); LocatedBlock newLocatedBlock = namenode.UpdateBlockForPipeline(oldBlock, clientName ); ExtendedBlock newBlock = new ExtendedBlock(oldBlock.GetBlockPoolId(), oldBlock.GetBlockId (), oldBlock.GetNumBytes(), newLocatedBlock.GetBlock().GetGenerationStamp()); // Delete the file from the present FS. It will still exist the // previously-created snapshot. This will log an OP_DELETE for the // file in question. fs.Delete(file, true); // Simulate a pipeline recovery, wherein a new block is allocated // for the existing block, resulting in an OP_UPDATE_BLOCKS being // logged for the file in question. try { namenode.UpdatePipeline(clientName, oldBlock, newBlock, newLocatedBlock.GetLocations (), newLocatedBlock.GetStorageIDs()); } catch (IOException ioe) { // normal GenericTestUtils.AssertExceptionContains("does not exist or it is not under construction" , ioe); } // Make sure the NN can restart with the edit logs as we have them now. cluster.RestartNameNode(true); } finally { IOUtils.CloseStream(@out); } } finally { cluster.Shutdown(); } }
public virtual void TestBlockSynchronization() { int OrgFileSize = 3000; Configuration conf = new HdfsConfiguration(); conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(5).Build(); cluster.WaitActive(); //create a file DistributedFileSystem dfs = cluster.GetFileSystem(); string filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.CreateFile(dfs, filepath, OrgFileSize, ReplicationNum, 0L); NUnit.Framework.Assert.IsTrue(dfs.Exists(filepath)); DFSTestUtil.WaitReplication(dfs, filepath, ReplicationNum); //get block info for the last block LocatedBlock locatedblock = TestInterDatanodeProtocol.GetLastLocatedBlock(dfs.dfs .GetNamenode(), filestr); DatanodeInfo[] datanodeinfos = locatedblock.GetLocations(); NUnit.Framework.Assert.AreEqual(ReplicationNum, datanodeinfos.Length); //connect to data nodes DataNode[] datanodes = new DataNode[ReplicationNum]; for (int i = 0; i < ReplicationNum; i++) { datanodes[i] = cluster.GetDataNode(datanodeinfos[i].GetIpcPort()); NUnit.Framework.Assert.IsTrue(datanodes[i] != null); } //verify Block Info ExtendedBlock lastblock = locatedblock.GetBlock(); DataNode.Log.Info("newblocks=" + lastblock); for (int i_1 = 0; i_1 < ReplicationNum; i_1++) { CheckMetaInfo(lastblock, datanodes[i_1]); } DataNode.Log.Info("dfs.dfs.clientName=" + dfs.dfs.clientName); cluster.GetNameNodeRpc().Append(filestr, dfs.dfs.clientName, new EnumSetWritable < CreateFlag>(EnumSet.Of(CreateFlag.Append))); // expire lease to trigger block recovery. WaitLeaseRecovery(cluster); Block[] updatedmetainfo = new Block[ReplicationNum]; long oldSize = lastblock.GetNumBytes(); lastblock = TestInterDatanodeProtocol.GetLastLocatedBlock(dfs.dfs.GetNamenode(), filestr).GetBlock(); long currentGS = lastblock.GetGenerationStamp(); for (int i_2 = 0; i_2 < ReplicationNum; i_2++) { updatedmetainfo[i_2] = DataNodeTestUtils.GetFSDataset(datanodes[i_2]).GetStoredBlock (lastblock.GetBlockPoolId(), lastblock.GetBlockId()); NUnit.Framework.Assert.AreEqual(lastblock.GetBlockId(), updatedmetainfo[i_2].GetBlockId ()); NUnit.Framework.Assert.AreEqual(oldSize, updatedmetainfo[i_2].GetNumBytes()); NUnit.Framework.Assert.AreEqual(currentGS, updatedmetainfo[i_2].GetGenerationStamp ()); } // verify that lease recovery does not occur when namenode is in safemode System.Console.Out.WriteLine("Testing that lease recovery cannot happen during safemode." ); filestr = "/foo.safemode"; filepath = new Path(filestr); dfs.Create(filepath, (short)1); cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter, false); NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(filestr)); DFSTestUtil.WaitReplication(dfs, filepath, (short)1); WaitLeaseRecovery(cluster); // verify that we still cannot recover the lease LeaseManager lm = NameNodeAdapter.GetLeaseManager(cluster.GetNamesystem()); NUnit.Framework.Assert.IsTrue("Found " + lm.CountLease() + " lease, expected 1", lm.CountLease() == 1); cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave, false); }
public virtual void TestPendingAndInvalidate() { Configuration Conf = new HdfsConfiguration(); Conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1024); Conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, DfsReplicationInterval); Conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, DfsReplicationInterval ); MiniDFSCluster cluster = new MiniDFSCluster.Builder(Conf).NumDataNodes(DatanodeCount ).Build(); cluster.WaitActive(); FSNamesystem namesystem = cluster.GetNamesystem(); BlockManager bm = namesystem.GetBlockManager(); DistributedFileSystem fs = cluster.GetFileSystem(); try { // 1. create a file Path filePath = new Path("/tmp.txt"); DFSTestUtil.CreateFile(fs, filePath, 1024, (short)3, 0L); // 2. disable the heartbeats foreach (DataNode dn in cluster.GetDataNodes()) { DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, true); } // 3. mark a couple of blocks as corrupt LocatedBlock block = NameNodeAdapter.GetBlockLocations(cluster.GetNameNode(), filePath .ToString(), 0, 1).Get(0); cluster.GetNamesystem().WriteLock(); try { bm.FindAndMarkBlockAsCorrupt(block.GetBlock(), block.GetLocations()[0], "STORAGE_ID" , "TEST"); bm.FindAndMarkBlockAsCorrupt(block.GetBlock(), block.GetLocations()[1], "STORAGE_ID" , "TEST"); } finally { cluster.GetNamesystem().WriteUnlock(); } BlockManagerTestUtil.ComputeAllPendingWork(bm); BlockManagerTestUtil.UpdateState(bm); NUnit.Framework.Assert.AreEqual(bm.GetPendingReplicationBlocksCount(), 1L); NUnit.Framework.Assert.AreEqual(bm.pendingReplications.GetNumReplicas(block.GetBlock ().GetLocalBlock()), 2); // 4. delete the file fs.Delete(filePath, true); // retry at most 10 times, each time sleep for 1s. Note that 10s is much // less than the default pending record timeout (5~10min) int retries = 10; long pendingNum = bm.GetPendingReplicationBlocksCount(); while (pendingNum != 0 && retries-- > 0) { Sharpen.Thread.Sleep(1000); // let NN do the deletion BlockManagerTestUtil.UpdateState(bm); pendingNum = bm.GetPendingReplicationBlocksCount(); } NUnit.Framework.Assert.AreEqual(pendingNum, 0L); } finally { cluster.Shutdown(); } }
public virtual void TestBlockReceived() { Configuration conf = new HdfsConfiguration(); conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1024); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(DatanodeCount).Build(); cluster.WaitActive(); DistributedFileSystem hdfs = cluster.GetFileSystem(); FSNamesystem fsn = cluster.GetNamesystem(); BlockManager blkManager = fsn.GetBlockManager(); string file = "/tmp.txt"; Path filePath = new Path(file); short replFactor = 1; DFSTestUtil.CreateFile(hdfs, filePath, 1024L, replFactor, 0); // temporarily stop the heartbeat AList <DataNode> datanodes = cluster.GetDataNodes(); for (int i = 0; i < DatanodeCount; i++) { DataNodeTestUtils.SetHeartbeatsDisabledForTests(datanodes[i], true); } hdfs.SetReplication(filePath, (short)DatanodeCount); BlockManagerTestUtil.ComputeAllPendingWork(blkManager); NUnit.Framework.Assert.AreEqual(1, blkManager.pendingReplications.Size()); INodeFile fileNode = fsn.GetFSDirectory().GetINode4Write(file).AsFile(); Block[] blocks = fileNode.GetBlocks(); NUnit.Framework.Assert.AreEqual(DatanodeCount - 1, blkManager.pendingReplications .GetNumReplicas(blocks[0])); LocatedBlock locatedBlock = hdfs.GetClient().GetLocatedBlocks(file, 0).Get(0); DatanodeInfo existingDn = (locatedBlock.GetLocations())[0]; int reportDnNum = 0; string poolId = cluster.GetNamesystem().GetBlockPoolId(); // let two datanodes (other than the one that already has the data) to // report to NN for (int i_1 = 0; i_1 < DatanodeCount && reportDnNum < 2; i_1++) { if (!datanodes[i_1].GetDatanodeId().Equals(existingDn)) { DatanodeRegistration dnR = datanodes[i_1].GetDNRegistrationForBP(poolId); StorageReceivedDeletedBlocks[] report = new StorageReceivedDeletedBlocks[] { new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored", new ReceivedDeletedBlockInfo [] { new ReceivedDeletedBlockInfo(blocks[0], ReceivedDeletedBlockInfo.BlockStatus .ReceivedBlock, string.Empty) }) }; cluster.GetNameNodeRpc().BlockReceivedAndDeleted(dnR, poolId, report); reportDnNum++; } } NUnit.Framework.Assert.AreEqual(DatanodeCount - 3, blkManager.pendingReplications .GetNumReplicas(blocks[0])); // let the same datanodes report again for (int i_2 = 0; i_2 < DatanodeCount && reportDnNum < 2; i_2++) { if (!datanodes[i_2].GetDatanodeId().Equals(existingDn)) { DatanodeRegistration dnR = datanodes[i_2].GetDNRegistrationForBP(poolId); StorageReceivedDeletedBlocks[] report = new StorageReceivedDeletedBlocks[] { new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored", new ReceivedDeletedBlockInfo [] { new ReceivedDeletedBlockInfo(blocks[0], ReceivedDeletedBlockInfo.BlockStatus .ReceivedBlock, string.Empty) }) }; cluster.GetNameNodeRpc().BlockReceivedAndDeleted(dnR, poolId, report); reportDnNum++; } } NUnit.Framework.Assert.AreEqual(DatanodeCount - 3, blkManager.pendingReplications .GetNumReplicas(blocks[0])); // re-enable heartbeat for the datanode that has data for (int i_3 = 0; i_3 < DatanodeCount; i_3++) { DataNodeTestUtils.SetHeartbeatsDisabledForTests(datanodes[i_3], false); DataNodeTestUtils.TriggerHeartbeat(datanodes[i_3]); } Sharpen.Thread.Sleep(5000); NUnit.Framework.Assert.AreEqual(0, blkManager.pendingReplications.Size()); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestUpdateReplicaUnderRecovery() { MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); cluster.WaitActive(); string bpid = cluster.GetNamesystem().GetBlockPoolId(); //create a file DistributedFileSystem dfs = cluster.GetFileSystem(); string filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.CreateFile(dfs, filepath, 1024L, (short)3, 0L); //get block info LocatedBlock locatedblock = GetLastLocatedBlock(DFSClientAdapter.GetDFSClient(dfs ).GetNamenode(), filestr); DatanodeInfo[] datanodeinfo = locatedblock.GetLocations(); NUnit.Framework.Assert.IsTrue(datanodeinfo.Length > 0); //get DataNode and FSDataset objects DataNode datanode = cluster.GetDataNode(datanodeinfo[0].GetIpcPort()); NUnit.Framework.Assert.IsTrue(datanode != null); //initReplicaRecovery ExtendedBlock b = locatedblock.GetBlock(); long recoveryid = b.GetGenerationStamp() + 1; long newlength = b.GetNumBytes() - 1; FsDatasetSpi <object> fsdataset = DataNodeTestUtils.GetFSDataset(datanode); ReplicaRecoveryInfo rri = fsdataset.InitReplicaRecovery(new BlockRecoveryCommand.RecoveringBlock (b, null, recoveryid)); //check replica ReplicaInfo replica = FsDatasetTestUtil.FetchReplicaInfo(fsdataset, bpid, b.GetBlockId ()); NUnit.Framework.Assert.AreEqual(HdfsServerConstants.ReplicaState.Rur, replica.GetState ()); //check meta data before update FsDatasetImpl.CheckReplicaFiles(replica); { //case "THIS IS NOT SUPPOSED TO HAPPEN" //with (block length) != (stored replica's on disk length). //create a block with same id and gs but different length. ExtendedBlock tmp = new ExtendedBlock(b.GetBlockPoolId(), rri.GetBlockId(), rri.GetNumBytes () - 1, rri.GetGenerationStamp()); try { //update should fail fsdataset.UpdateReplicaUnderRecovery(tmp, recoveryid, tmp.GetBlockId(), newlength ); NUnit.Framework.Assert.Fail(); } catch (IOException ioe) { System.Console.Out.WriteLine("GOOD: getting " + ioe); } } //update string storageID = fsdataset.UpdateReplicaUnderRecovery(new ExtendedBlock(b.GetBlockPoolId (), rri), recoveryid, rri.GetBlockId(), newlength); NUnit.Framework.Assert.IsTrue(storageID != null); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestSortLocatedBlocks() { // create the DatanodeManager which will be tested FSNamesystem fsn = Org.Mockito.Mockito.Mock <FSNamesystem>(); Org.Mockito.Mockito.When(fsn.HasWriteLock()).ThenReturn(true); DatanodeManager dm = new DatanodeManager(Org.Mockito.Mockito.Mock <BlockManager>() , fsn, new Configuration()); // register 5 datanodes, each with different storage ID and type DatanodeInfo[] locs = new DatanodeInfo[5]; string[] storageIDs = new string[5]; StorageType[] storageTypes = new StorageType[] { StorageType.Archive, StorageType .Default, StorageType.Disk, StorageType.RamDisk, StorageType.Ssd }; for (int i = 0; i < 5; i++) { // register new datanode string uuid = "UUID-" + i; string ip = "IP-" + i; DatanodeRegistration dr = Org.Mockito.Mockito.Mock <DatanodeRegistration>(); Org.Mockito.Mockito.When(dr.GetDatanodeUuid()).ThenReturn(uuid); Org.Mockito.Mockito.When(dr.GetIpAddr()).ThenReturn(ip); Org.Mockito.Mockito.When(dr.GetXferAddr()).ThenReturn(ip + ":9000"); Org.Mockito.Mockito.When(dr.GetXferPort()).ThenReturn(9000); Org.Mockito.Mockito.When(dr.GetSoftwareVersion()).ThenReturn("version1"); dm.RegisterDatanode(dr); // get location and storage information locs[i] = dm.GetDatanode(uuid); storageIDs[i] = "storageID-" + i; } // set first 2 locations as decomissioned locs[0].SetDecommissioned(); locs[1].SetDecommissioned(); // create LocatedBlock with above locations ExtendedBlock b = new ExtendedBlock("somePoolID", 1234); LocatedBlock block = new LocatedBlock(b, locs, storageIDs, storageTypes); IList <LocatedBlock> blocks = new AList <LocatedBlock>(); blocks.AddItem(block); string targetIp = locs[4].GetIpAddr(); // sort block locations dm.SortLocatedBlocks(targetIp, blocks); // check that storage IDs/types are aligned with datanode locs DatanodeInfo[] sortedLocs = block.GetLocations(); storageIDs = block.GetStorageIDs(); storageTypes = block.GetStorageTypes(); Assert.AssertThat(sortedLocs.Length, IS.Is(5)); Assert.AssertThat(storageIDs.Length, IS.Is(5)); Assert.AssertThat(storageTypes.Length, IS.Is(5)); for (int i_1 = 0; i_1 < sortedLocs.Length; i_1++) { Assert.AssertThat(((DatanodeInfoWithStorage)sortedLocs[i_1]).GetStorageID(), IS.Is (storageIDs[i_1])); Assert.AssertThat(((DatanodeInfoWithStorage)sortedLocs[i_1]).GetStorageType(), IS.Is (storageTypes[i_1])); } // Ensure the local node is first. Assert.AssertThat(sortedLocs[0].GetIpAddr(), IS.Is(targetIp)); // Ensure the two decommissioned DNs were moved to the end. Assert.AssertThat(sortedLocs[sortedLocs.Length - 1].GetAdminState(), IS.Is(DatanodeInfo.AdminStates .Decommissioned)); Assert.AssertThat(sortedLocs[sortedLocs.Length - 2].GetAdminState(), IS.Is(DatanodeInfo.AdminStates .Decommissioned)); }
/* check if there are at least two nodes are on the same rack */ /// <exception cref="System.IO.IOException"/> private void CheckFile(FileSystem fileSys, Path name, int repl) { Configuration conf = fileSys.GetConf(); ClientProtocol namenode = NameNodeProxies.CreateProxy <ClientProtocol>(conf, fileSys .GetUri()).GetProxy(); WaitForBlockReplication(name.ToString(), namenode, Math.Min(numDatanodes, repl), -1); LocatedBlocks locations = namenode.GetBlockLocations(name.ToString(), 0, long.MaxValue ); FileStatus stat = fileSys.GetFileStatus(name); BlockLocation[] blockLocations = fileSys.GetFileBlockLocations(stat, 0L, long.MaxValue ); // verify that rack locations match NUnit.Framework.Assert.IsTrue(blockLocations.Length == locations.LocatedBlockCount ()); for (int i = 0; i < blockLocations.Length; i++) { LocatedBlock blk = locations.Get(i); DatanodeInfo[] datanodes = blk.GetLocations(); string[] topologyPaths = blockLocations[i].GetTopologyPaths(); NUnit.Framework.Assert.IsTrue(topologyPaths.Length == datanodes.Length); for (int j = 0; j < topologyPaths.Length; j++) { bool found = false; for (int k = 0; k < racks.Length; k++) { if (topologyPaths[j].StartsWith(racks[k])) { found = true; break; } } NUnit.Framework.Assert.IsTrue(found); } } bool isOnSameRack = true; bool isNotOnSameRack = true; foreach (LocatedBlock blk_1 in locations.GetLocatedBlocks()) { DatanodeInfo[] datanodes = blk_1.GetLocations(); if (datanodes.Length <= 1) { break; } if (datanodes.Length == 2) { isNotOnSameRack = !(datanodes[0].GetNetworkLocation().Equals(datanodes[1].GetNetworkLocation ())); break; } isOnSameRack = false; isNotOnSameRack = false; for (int i_1 = 0; i_1 < datanodes.Length - 1; i_1++) { Log.Info("datanode " + i_1 + ": " + datanodes[i_1]); bool onRack = false; for (int j = i_1 + 1; j < datanodes.Length; j++) { if (datanodes[i_1].GetNetworkLocation().Equals(datanodes[j].GetNetworkLocation())) { onRack = true; } } if (onRack) { isOnSameRack = true; } if (!onRack) { isNotOnSameRack = true; } if (isOnSameRack && isNotOnSameRack) { break; } } if (!isOnSameRack || !isNotOnSameRack) { break; } } NUnit.Framework.Assert.IsTrue(isOnSameRack); NUnit.Framework.Assert.IsTrue(isNotOnSameRack); }