/// <summary>Adjust rackmap, moreThanOne, and exactlyOne after removing replica on cur. /// </summary> /// <param name="rackMap">a map from rack to replica</param> /// <param name="moreThanOne"> /// The List of replica nodes on rack which has more than /// one replica /// </param> /// <param name="exactlyOne">The List of replica nodes on rack with only one replica</param> /// <param name="cur">current replica to remove</param> public virtual void AdjustSetsWithChosenReplica(IDictionary <string, IList <DatanodeStorageInfo > > rackMap, IList <DatanodeStorageInfo> moreThanOne, IList <DatanodeStorageInfo> exactlyOne , DatanodeStorageInfo cur) { string rack = GetRack(cur.GetDatanodeDescriptor()); IList <DatanodeStorageInfo> storages = rackMap[rack]; storages.Remove(cur); if (storages.IsEmpty()) { Sharpen.Collections.Remove(rackMap, rack); } if (moreThanOne.Remove(cur)) { if (storages.Count == 1) { DatanodeStorageInfo remaining = storages[0]; moreThanOne.Remove(remaining); exactlyOne.AddItem(remaining); } } else { exactlyOne.Remove(cur); } }
internal virtual void AddReplicaIfNotPresent(DatanodeStorageInfo storage, Block block , HdfsServerConstants.ReplicaState rState) { IEnumerator <BlockInfoContiguousUnderConstruction.ReplicaUnderConstruction> it = replicas .GetEnumerator(); while (it.HasNext()) { BlockInfoContiguousUnderConstruction.ReplicaUnderConstruction r = it.Next(); DatanodeStorageInfo expectedLocation = r.GetExpectedStorageLocation(); if (expectedLocation == storage) { // Record the gen stamp from the report r.SetGenerationStamp(block.GetGenerationStamp()); return; } else { if (expectedLocation != null && expectedLocation.GetDatanodeDescriptor() == storage .GetDatanodeDescriptor()) { // The Datanode reported that the block is on a different storage // than the one chosen by BlockPlacementPolicy. This can occur as // we allow Datanodes to choose the target storage. Update our // state by removing the stale entry and adding a new one. it.Remove(); break; } } } replicas.AddItem(new BlockInfoContiguousUnderConstruction.ReplicaUnderConstruction (block, storage, rState)); }
public virtual void TestInitializeBlockRecovery() { DatanodeStorageInfo s1 = DFSTestUtil.CreateDatanodeStorageInfo("10.10.1.1", "s1"); DatanodeDescriptor dd1 = s1.GetDatanodeDescriptor(); DatanodeStorageInfo s2 = DFSTestUtil.CreateDatanodeStorageInfo("10.10.1.2", "s2"); DatanodeDescriptor dd2 = s2.GetDatanodeDescriptor(); DatanodeStorageInfo s3 = DFSTestUtil.CreateDatanodeStorageInfo("10.10.1.3", "s3"); DatanodeDescriptor dd3 = s3.GetDatanodeDescriptor(); dd1.isAlive = dd2.isAlive = dd3.isAlive = true; BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction (new Block(0, 0, GenerationStamp.LastReservedStamp), (short)3, HdfsServerConstants.BlockUCState .UnderConstruction, new DatanodeStorageInfo[] { s1, s2, s3 }); // Recovery attempt #1. DFSTestUtil.ResetLastUpdatesWithOffset(dd1, -3 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd3, -2 * 1000); blockInfo.InitializeBlockRecovery(1); BlockInfoContiguousUnderConstruction[] blockInfoRecovery = dd2.GetLeaseRecoveryCommand (1); NUnit.Framework.Assert.AreEqual(blockInfoRecovery[0], blockInfo); // Recovery attempt #2. DFSTestUtil.ResetLastUpdatesWithOffset(dd1, -2 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd3, -3 * 1000); blockInfo.InitializeBlockRecovery(2); blockInfoRecovery = dd1.GetLeaseRecoveryCommand(1); NUnit.Framework.Assert.AreEqual(blockInfoRecovery[0], blockInfo); // Recovery attempt #3. DFSTestUtil.ResetLastUpdatesWithOffset(dd1, -2 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd3, -3 * 1000); blockInfo.InitializeBlockRecovery(3); blockInfoRecovery = dd3.GetLeaseRecoveryCommand(1); NUnit.Framework.Assert.AreEqual(blockInfoRecovery[0], blockInfo); // Recovery attempt #4. // Reset everything. And again pick DN with most recent heart beat. DFSTestUtil.ResetLastUpdatesWithOffset(dd1, -2 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd3, 0); blockInfo.InitializeBlockRecovery(3); blockInfoRecovery = dd3.GetLeaseRecoveryCommand(1); NUnit.Framework.Assert.AreEqual(blockInfoRecovery[0], blockInfo); }
/// <summary>Find specified DatanodeStorageInfo.</summary> /// <returns>DatanodeStorageInfo or null if not found.</returns> internal virtual DatanodeStorageInfo FindStorageInfo(DatanodeDescriptor dn) { int len = GetCapacity(); for (int idx = 0; idx < len; idx++) { DatanodeStorageInfo cur = GetStorageInfo(idx); if (cur == null) { break; } if (cur.GetDatanodeDescriptor() == dn) { return(cur); } } return(null); }
public virtual void TestReplaceStorage() { // Create two dummy storages. DatanodeStorageInfo storage1 = DFSTestUtil.CreateDatanodeStorageInfo("storageID1" , "127.0.0.1"); DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.GetDatanodeDescriptor (), new DatanodeStorage("storageID2")); int NumBlocks = 10; BlockInfoContiguous[] blockInfos = new BlockInfoContiguous[NumBlocks]; // Create a few dummy blocks and add them to the first storage. for (int i = 0; i < NumBlocks; ++i) { blockInfos[i] = new BlockInfoContiguous((short)3); storage1.AddBlock(blockInfos[i]); } // Try to move one of the blocks to a different storage. bool added = storage2.AddBlock(blockInfos[NumBlocks / 2]) == DatanodeStorageInfo.AddBlockResult .Added; Assert.AssertThat(added, IS.Is(false)); Assert.AssertThat(blockInfos[NumBlocks / 2].GetStorageInfo(0), IS.Is(storage2)); }
public virtual DatanodeDescriptor GetDatanode(int index) { DatanodeStorageInfo storage = GetStorageInfo(index); return(storage == null ? null : storage.GetDatanodeDescriptor()); }
private bool IsOnSameNodeGroup(DatanodeDescriptor left, DatanodeStorageInfo right ) { return(cluster.IsOnSameNodeGroup(left, right.GetDatanodeDescriptor())); }
private bool IsOnSameNodeGroup(DatanodeStorageInfo left, DatanodeStorageInfo right ) { return(IsOnSameNodeGroup(left.GetDatanodeDescriptor(), right)); }
/// <summary> /// Regression test for HDFS-7960.<p/> /// Shutting down a datanode, removing a storage directory, and restarting /// the DataNode should not produce zombie storages. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestRemovingStorageDoesNotProduceZombies() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsDatanodeFailedVolumesToleratedKey, 1); int NumStoragesPerDn = 2; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).StoragesPerDatanode (NumStoragesPerDn).Build(); try { cluster.WaitActive(); foreach (DataNode dn in cluster.GetDataNodes()) { NUnit.Framework.Assert.AreEqual(NumStoragesPerDn, cluster.GetNamesystem().GetBlockManager ().GetDatanodeManager().GetDatanode(dn.GetDatanodeId()).GetStorageInfos().Length ); } // Create a file which will end up on all 3 datanodes. Path TestPath = new Path("/foo1"); DistributedFileSystem fs = cluster.GetFileSystem(); DFSTestUtil.CreateFile(fs, TestPath, 1024, (short)3, unchecked ((int)(0xcafecafe)) ); foreach (DataNode dn_1 in cluster.GetDataNodes()) { DataNodeTestUtils.TriggerBlockReport(dn_1); } ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, new Path("/foo1")); cluster.GetNamesystem().WriteLock(); string storageIdToRemove; string datanodeUuid; // Find the first storage which this block is in. try { IEnumerator <DatanodeStorageInfo> storageInfoIter = cluster.GetNamesystem().GetBlockManager ().GetStorages(block.GetLocalBlock()).GetEnumerator(); NUnit.Framework.Assert.IsTrue(storageInfoIter.HasNext()); DatanodeStorageInfo info = storageInfoIter.Next(); storageIdToRemove = info.GetStorageID(); datanodeUuid = info.GetDatanodeDescriptor().GetDatanodeUuid(); } finally { cluster.GetNamesystem().WriteUnlock(); } // Find the DataNode which holds that first storage. DataNode datanodeToRemoveStorageFrom; int datanodeToRemoveStorageFromIdx = 0; while (true) { if (datanodeToRemoveStorageFromIdx >= cluster.GetDataNodes().Count) { NUnit.Framework.Assert.Fail("failed to find datanode with uuid " + datanodeUuid); datanodeToRemoveStorageFrom = null; break; } DataNode dn_2 = cluster.GetDataNodes()[datanodeToRemoveStorageFromIdx]; if (dn_2.GetDatanodeUuid().Equals(datanodeUuid)) { datanodeToRemoveStorageFrom = dn_2; break; } datanodeToRemoveStorageFromIdx++; } // Find the volume within the datanode which holds that first storage. IList <FsVolumeSpi> volumes = datanodeToRemoveStorageFrom.GetFSDataset().GetVolumes (); NUnit.Framework.Assert.AreEqual(NumStoragesPerDn, volumes.Count); string volumeDirectoryToRemove = null; foreach (FsVolumeSpi volume in volumes) { if (volume.GetStorageID().Equals(storageIdToRemove)) { volumeDirectoryToRemove = volume.GetBasePath(); } } // Shut down the datanode and remove the volume. // Replace the volume directory with a regular file, which will // cause a volume failure. (If we merely removed the directory, // it would be re-initialized with a new storage ID.) NUnit.Framework.Assert.IsNotNull(volumeDirectoryToRemove); datanodeToRemoveStorageFrom.Shutdown(); FileUtil.FullyDelete(new FilePath(volumeDirectoryToRemove)); FileOutputStream fos = new FileOutputStream(volumeDirectoryToRemove); try { fos.Write(1); } finally { fos.Close(); } cluster.RestartDataNode(datanodeToRemoveStorageFromIdx); // Wait for the NameNode to remove the storage. Log.Info("waiting for the datanode to remove " + storageIdToRemove); GenericTestUtils.WaitFor(new _Supplier_227(cluster, datanodeToRemoveStorageFrom, storageIdToRemove, NumStoragesPerDn), 10, 30000); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public override string ToString() { return("ReportedBlockInfo [block=" + block + ", dn=" + storageInfo.GetDatanodeDescriptor () + ", reportedState=" + reportedState + "]"); }
/// <summary>Is data-node the replica belongs to alive.</summary> internal virtual bool IsAlive() { return(expectedLocation.GetDatanodeDescriptor().isAlive); }