/// <summary>Initializes the cluster.</summary> /// <param name="numDataNodes">number of datanodes</param> /// <param name="storagesPerDatanode">number of storage locations on each datanode</param> /// <param name="failedVolumesTolerated">number of acceptable volume failures</param> /// <exception cref="System.Exception">if there is any failure</exception> private void InitCluster(int numDataNodes, int storagesPerDatanode, int failedVolumesTolerated ) { conf = new HdfsConfiguration(); conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 512L); /* * Lower the DN heartbeat, DF rate, and recheck interval to one second * so state about failures and datanode death propagates faster. */ conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1); conf.SetInt(DFSConfigKeys.DfsDfIntervalKey, 1000); conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1000); conf.SetInt(DFSConfigKeys.DfsDatanodeFailedVolumesToleratedKey, failedVolumesTolerated ); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).StoragesPerDatanode (storagesPerDatanode).Build(); cluster.WaitActive(); fs = cluster.GetFileSystem(); dataDir = cluster.GetDataDirectory(); long dnCapacity = DFSTestUtil.GetDatanodeCapacity(cluster.GetNamesystem().GetBlockManager ().GetDatanodeManager(), 0); volumeCapacity = dnCapacity / cluster.GetStoragesPerDatanode(); }
public virtual void TestBlockHasMultipleReplicasOnSameDN() { string filename = MakeFileName(GenericTestUtils.GetMethodName()); Path filePath = new Path(filename); // Write out a file with a few blocks. DFSTestUtil.CreateFile(fs, filePath, BlockSize, BlockSize * NumBlocks, BlockSize, NumDatanodes, seed); // Get the block list for the file with the block locations. LocatedBlocks locatedBlocks = client.GetLocatedBlocks(filePath.ToString(), 0, BlockSize * NumBlocks); // Generate a fake block report from one of the DataNodes, such // that it reports one copy of each block on either storage. DataNode dn = cluster.GetDataNodes()[0]; DatanodeRegistration dnReg = dn.GetDNRegistrationForBP(bpid); StorageBlockReport[] reports = new StorageBlockReport[cluster.GetStoragesPerDatanode ()]; AList <Replica> blocks = new AList <Replica>(); foreach (LocatedBlock locatedBlock in locatedBlocks.GetLocatedBlocks()) { Block localBlock = locatedBlock.GetBlock().GetLocalBlock(); blocks.AddItem(new FinalizedReplica(localBlock, null, null)); } BlockListAsLongs bll = BlockListAsLongs.Encode(blocks); for (int i = 0; i < cluster.GetStoragesPerDatanode(); ++i) { FsVolumeSpi v = dn.GetFSDataset().GetVolumes()[i]; DatanodeStorage dns = new DatanodeStorage(v.GetStorageID()); reports[i] = new StorageBlockReport(dns, bll); } // Should not assert! cluster.GetNameNodeRpc().BlockReport(dnReg, bpid, reports, new BlockReportContext (1, 0, Runtime.NanoTime())); // Get the block locations once again. locatedBlocks = client.GetLocatedBlocks(filename, 0, BlockSize * NumBlocks); // Make sure that each block has two replicas, one on each DataNode. foreach (LocatedBlock locatedBlock_1 in locatedBlocks.GetLocatedBlocks()) { DatanodeInfo[] locations = locatedBlock_1.GetLocations(); Assert.AssertThat(locations.Length, IS.Is((int)NumDatanodes)); Assert.AssertThat(locations[0].GetDatanodeUuid(), CoreMatchers.Not(locations[1].GetDatanodeUuid ())); } }
/// <summary> /// Test that if splitThreshold is zero, then we always get a separate /// call per storage. /// </summary> /// <exception cref="System.IO.IOException"/> /// <exception cref="System.Exception"/> public virtual void TestAlwaysSplit() { StartUpCluster(0); NameNode nn = cluster.GetNameNode(); DataNode dn = cluster.GetDataNodes()[0]; // Create a file with a few blocks. CreateFile(GenericTestUtils.GetMethodName(), BlocksInFile); // Insert a spy object for the NN RPC. DatanodeProtocolClientSideTranslatorPB nnSpy = DataNodeTestUtils.SpyOnBposToNN(dn , nn); // Trigger a block report so there is an interaction with the spy // object. DataNodeTestUtils.TriggerBlockReport(dn); ArgumentCaptor <StorageBlockReport[]> captor = ArgumentCaptor.ForClass <StorageBlockReport []>(); Org.Mockito.Mockito.Verify(nnSpy, Org.Mockito.Mockito.Times(cluster.GetStoragesPerDatanode ())).BlockReport(Matchers.Any <DatanodeRegistration>(), Matchers.AnyString(), captor .Capture(), Org.Mockito.Mockito.AnyObject <BlockReportContext>()); VerifyCapturedArguments(captor, 1, BlocksInFile); }
public virtual void TestSyncAndBlockReportMetric() { MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(NnMetrics); // We have one sync when the cluster starts up, just opening the journal MetricsAsserts.AssertCounter("SyncsNumOps", 1L, rb); // Each datanode reports in when the cluster comes up MetricsAsserts.AssertCounter("BlockReportNumOps", (long)DatanodeCount * cluster.GetStoragesPerDatanode (), rb); // Sleep for an interval+slop to let the percentiles rollover Sharpen.Thread.Sleep((PercentilesInterval + 1) * 1000); // Check that the percentiles were updated MetricsAsserts.AssertQuantileGauges("Syncs1s", rb); MetricsAsserts.AssertQuantileGauges("BlockReport1s", rb); }