public virtual void TestQueues() { DatanodeDescriptor fakeDN = DFSTestUtil.GetLocalDatanodeDescriptor(); DatanodeStorage storage = new DatanodeStorage("STORAGE_ID"); DatanodeStorageInfo storageInfo = new DatanodeStorageInfo(fakeDN, storage); msgs.EnqueueReportedBlock(storageInfo, block1Gs1, HdfsServerConstants.ReplicaState .Finalized); msgs.EnqueueReportedBlock(storageInfo, block1Gs2, HdfsServerConstants.ReplicaState .Finalized); NUnit.Framework.Assert.AreEqual(2, msgs.Count()); // Nothing queued yet for block 2 NUnit.Framework.Assert.IsNull(msgs.TakeBlockQueue(block2Gs1)); NUnit.Framework.Assert.AreEqual(2, msgs.Count()); Queue <PendingDataNodeMessages.ReportedBlockInfo> q = msgs.TakeBlockQueue(block1Gs2DifferentInstance ); NUnit.Framework.Assert.AreEqual("ReportedBlockInfo [block=blk_1_1, dn=127.0.0.1:50010, reportedState=FINALIZED]," + "ReportedBlockInfo [block=blk_1_2, dn=127.0.0.1:50010, reportedState=FINALIZED]" , Joiner.On(",").Join(q)); NUnit.Framework.Assert.AreEqual(0, msgs.Count()); // Should be null if we pull again NUnit.Framework.Assert.IsNull(msgs.TakeBlockQueue(block1Gs1)); NUnit.Framework.Assert.AreEqual(0, msgs.Count()); }
internal virtual DatanodeStorageInfo UpdateStorage(DatanodeStorage s) { lock (storageMap) { DatanodeStorageInfo storage = storageMap[s.GetStorageID()]; if (storage == null) { Log.Info("Adding new storage ID " + s.GetStorageID() + " for DN " + GetXferAddr() ); storage = new DatanodeStorageInfo(this, s); storageMap[s.GetStorageID()] = storage; } else { if (storage.GetState() != s.GetState() || storage.GetStorageType() != s.GetStorageType ()) { // For backwards compatibility, make sure that the type and // state are updated. Some reports from older datanodes do // not include these fields so we may have assumed defaults. storage.UpdateFromStorage(s); storageMap[storage.GetStorageID()] = storage; } } return(storage); } }
private DatanodeStorageInfo[] ScheduleSingleReplication(Block block) { // list for priority 1 IList <Block> list_p1 = new AList <Block>(); list_p1.AddItem(block); // list of lists for each priority IList <IList <Block> > list_all = new AList <IList <Block> >(); list_all.AddItem(new AList <Block>()); // for priority 0 list_all.AddItem(list_p1); // for priority 1 NUnit.Framework.Assert.AreEqual("Block not initially pending replication", 0, bm. pendingReplications.GetNumReplicas(block)); NUnit.Framework.Assert.AreEqual("computeReplicationWork should indicate replication is needed" , 1, bm.ComputeReplicationWorkForBlocks(list_all)); NUnit.Framework.Assert.IsTrue("replication is pending after work is computed", bm .pendingReplications.GetNumReplicas(block) > 0); LinkedListMultimap <DatanodeStorageInfo, DatanodeDescriptor.BlockTargetPair> repls = GetAllPendingReplications(); NUnit.Framework.Assert.AreEqual(1, repls.Size()); KeyValuePair <DatanodeStorageInfo, DatanodeDescriptor.BlockTargetPair> repl = repls .Entries().GetEnumerator().Next(); DatanodeStorageInfo[] targets = repl.Value.targets; DatanodeStorageInfo[] pipeline = new DatanodeStorageInfo[1 + targets.Length]; pipeline[0] = repl.Key; System.Array.Copy(targets, 0, pipeline, 1, targets.Length); return(pipeline); }
internal ReportedBlockInfo(DatanodeStorageInfo storageInfo, Block block, HdfsServerConstants.ReplicaState reportedState) { this.storageInfo = storageInfo; this.block = block; this.reportedState = reportedState; }
public virtual DatanodeStorageInfo.AddBlockResult AddBlock(BlockInfoContiguous b) { // First check whether the block belongs to a different storage // on the same DN. DatanodeStorageInfo.AddBlockResult result = DatanodeStorageInfo.AddBlockResult.Added; DatanodeStorageInfo otherStorage = b.FindStorageInfo(GetDatanodeDescriptor()); if (otherStorage != null) { if (otherStorage != this) { // The block belongs to a different storage. Remove it first. otherStorage.RemoveBlock(b); result = DatanodeStorageInfo.AddBlockResult.Replaced; } else { // The block is already associated with this storage. return(DatanodeStorageInfo.AddBlockResult.AlreadyExist); } } // add to the head of the data-node list b.AddStorage(this); blockList = b.ListInsert(blockList, this); numBlocks++; return(result); }
internal virtual IList <DatanodeStorageInfo> RemoveZombieStorages() { IList <DatanodeStorageInfo> zombies = null; lock (storageMap) { IEnumerator <KeyValuePair <string, DatanodeStorageInfo> > iter = storageMap.GetEnumerator (); while (iter.HasNext()) { KeyValuePair <string, DatanodeStorageInfo> entry = iter.Next(); DatanodeStorageInfo storageInfo = entry.Value; if (storageInfo.GetLastBlockReportId() != curBlockReportId) { Log.Info(storageInfo.GetStorageID() + " had lastBlockReportId 0x" + long.ToHexString (storageInfo.GetLastBlockReportId()) + ", but curBlockReportId = 0x" + long.ToHexString (curBlockReportId)); iter.Remove(); if (zombies == null) { zombies = new List <DatanodeStorageInfo>(); } zombies.AddItem(storageInfo); } storageInfo.SetLastBlockReportId(0); } } return(zombies == null ? EmptyStorageInfoList : zombies); }
public virtual void TestHighestPriReplSrcChosenDespiteMaxReplLimit() { bm.maxReplicationStreams = 0; bm.replicationStreamsHardLimit = 1; long blockId = 42; // arbitrary Block aBlock = new Block(blockId, 0, 0); IList <DatanodeDescriptor> origNodes = GetNodes(0, 1); // Add the block to the first node. AddBlockOnNodes(blockId, origNodes.SubList(0, 1)); IList <DatanodeDescriptor> cntNodes = new List <DatanodeDescriptor>(); IList <DatanodeStorageInfo> liveNodes = new List <DatanodeStorageInfo>(); NUnit.Framework.Assert.IsNotNull("Chooses source node for a highest-priority replication" + " even if all available source nodes have reached their replication" + " limits below the hard limit." , bm.ChooseSourceDatanode(aBlock, cntNodes, liveNodes, new NumberReplicas(), UnderReplicatedBlocks .QueueHighestPriority)); NUnit.Framework.Assert.IsNull("Does not choose a source node for a less-than-highest-priority" + " replication since all available source nodes have reached" + " their replication limits." , bm.ChooseSourceDatanode(aBlock, cntNodes, liveNodes, new NumberReplicas(), UnderReplicatedBlocks .QueueVeryUnderReplicated)); // Increase the replication count to test replication count > hard limit DatanodeStorageInfo[] targets = new DatanodeStorageInfo[] { origNodes[1].GetStorageInfos ()[0] }; origNodes[0].AddBlockToBeReplicated(aBlock, targets); NUnit.Framework.Assert.IsNull("Does not choose a source node for a highest-priority" + " replication when all available nodes exceed the hard limit.", bm.ChooseSourceDatanode (aBlock, cntNodes, liveNodes, new NumberReplicas(), UnderReplicatedBlocks.QueueHighestPriority )); }
public virtual void TestFavorDecomUntilHardLimit() { bm.maxReplicationStreams = 0; bm.replicationStreamsHardLimit = 1; long blockId = 42; // arbitrary Block aBlock = new Block(blockId, 0, 0); IList <DatanodeDescriptor> origNodes = GetNodes(0, 1); // Add the block to the first node. AddBlockOnNodes(blockId, origNodes.SubList(0, 1)); origNodes[0].StartDecommission(); IList <DatanodeDescriptor> cntNodes = new List <DatanodeDescriptor>(); IList <DatanodeStorageInfo> liveNodes = new List <DatanodeStorageInfo>(); NUnit.Framework.Assert.IsNotNull("Chooses decommissioning source node for a normal replication" + " if all available source nodes have reached their replication" + " limits below the hard limit." , bm.ChooseSourceDatanode(aBlock, cntNodes, liveNodes, new NumberReplicas(), UnderReplicatedBlocks .QueueUnderReplicated)); // Increase the replication count to test replication count > hard limit DatanodeStorageInfo[] targets = new DatanodeStorageInfo[] { origNodes[1].GetStorageInfos ()[0] }; origNodes[0].AddBlockToBeReplicated(aBlock, targets); NUnit.Framework.Assert.IsNull("Does not choose a source decommissioning node for a normal" + " replication when all available nodes exceed the hard limit.", bm.ChooseSourceDatanode (aBlock, cntNodes, liveNodes, new NumberReplicas(), UnderReplicatedBlocks.QueueUnderReplicated )); }
/// <summary>Adjust rackmap, moreThanOne, and exactlyOne after removing replica on cur. /// </summary> /// <param name="rackMap">a map from rack to replica</param> /// <param name="moreThanOne"> /// The List of replica nodes on rack which has more than /// one replica /// </param> /// <param name="exactlyOne">The List of replica nodes on rack with only one replica</param> /// <param name="cur">current replica to remove</param> public virtual void AdjustSetsWithChosenReplica(IDictionary <string, IList <DatanodeStorageInfo > > rackMap, IList <DatanodeStorageInfo> moreThanOne, IList <DatanodeStorageInfo> exactlyOne , DatanodeStorageInfo cur) { string rack = GetRack(cur.GetDatanodeDescriptor()); IList <DatanodeStorageInfo> storages = rackMap[rack]; storages.Remove(cur); if (storages.IsEmpty()) { Sharpen.Collections.Remove(rackMap, rack); } if (moreThanOne.Remove(cur)) { if (storages.Count == 1) { DatanodeStorageInfo remaining = storages[0]; moreThanOne.Remove(remaining); exactlyOne.AddItem(remaining); } } else { exactlyOne.Remove(cur); } }
ListRemove(Org.Apache.Hadoop.Hdfs.Server.Blockmanagement.BlockInfoContiguous head , DatanodeStorageInfo storage) { if (head == null) { return(null); } int dnIndex = this.FindStorageInfo(storage); if (dnIndex < 0) { // this block is not on the data-node list return(head); } Org.Apache.Hadoop.Hdfs.Server.Blockmanagement.BlockInfoContiguous next = this.GetNext (dnIndex); Org.Apache.Hadoop.Hdfs.Server.Blockmanagement.BlockInfoContiguous prev = this.GetPrevious (dnIndex); this.SetNext(dnIndex, null); this.SetPrevious(dnIndex, null); if (prev != null) { prev.SetNext(prev.FindStorageInfo(storage), next); } if (next != null) { next.SetPrevious(next.FindStorageInfo(storage), prev); } if (this == head) { // removing the head head = next; } return(head); }
internal virtual void AddReplicaIfNotPresent(DatanodeStorageInfo storage, Block block , HdfsServerConstants.ReplicaState rState) { IEnumerator <BlockInfoContiguousUnderConstruction.ReplicaUnderConstruction> it = replicas .GetEnumerator(); while (it.HasNext()) { BlockInfoContiguousUnderConstruction.ReplicaUnderConstruction r = it.Next(); DatanodeStorageInfo expectedLocation = r.GetExpectedStorageLocation(); if (expectedLocation == storage) { // Record the gen stamp from the report r.SetGenerationStamp(block.GetGenerationStamp()); return; } else { if (expectedLocation != null && expectedLocation.GetDatanodeDescriptor() == storage .GetDatanodeDescriptor()) { // The Datanode reported that the block is on a different storage // than the one chosen by BlockPlacementPolicy. This can occur as // we allow Datanodes to choose the target storage. Update our // state by removing the stale entry and adding a new one. it.Remove(); break; } } } replicas.AddItem(new BlockInfoContiguousUnderConstruction.ReplicaUnderConstruction (block, storage, rState)); }
private void SetStorageInfo(int index, DatanodeStorageInfo storage) { System.Diagnostics.Debug.Assert(this.triplets != null, "BlockInfo is not initialized" ); System.Diagnostics.Debug.Assert(index >= 0 && index * 3 < triplets.Length, "Index is out of bound" ); triplets[index * 3] = storage; }
internal virtual void EnqueueReportedBlock(DatanodeStorageInfo storageInfo, Block block, HdfsServerConstants.ReplicaState reportedState) { block = new Block(block); GetBlockQueue(block).AddItem(new PendingDataNodeMessages.ReportedBlockInfo(storageInfo , block, reportedState)); count++; }
internal ReplicaUnderConstruction(Block block, DatanodeStorageInfo target, HdfsServerConstants.ReplicaState state) : base(block) { this.expectedLocation = target; this.state = state; this.chosenAsPrimary = false; }
public virtual void TestAddStorage() { BlockInfoContiguous blockInfo = new BlockInfoContiguous((short)3); DatanodeStorageInfo storage = DFSTestUtil.CreateDatanodeStorageInfo("storageID", "127.0.0.1"); bool added = blockInfo.AddStorage(storage); NUnit.Framework.Assert.IsTrue(added); NUnit.Framework.Assert.AreEqual(storage, blockInfo.GetStorageInfo(0)); }
/// <summary>Remove block from the list of blocks belonging to the data-node.</summary> /// <remarks> /// Remove block from the list of blocks belonging to the data-node. Remove /// data-node from the block. /// </remarks> internal virtual bool RemoveBlock(string storageID, BlockInfoContiguous b) { DatanodeStorageInfo s = GetStorageInfo(storageID); if (s != null) { return(s.RemoveBlock(b)); } return(false); }
/// <summary> /// Add a /// <see cref="DatanodeStorageInfo"/> /// location for a block /// </summary> internal virtual bool AddStorage(DatanodeStorageInfo storage) { // find the last null node int lastNode = EnsureCapacity(1); SetStorageInfo(lastNode, storage); SetNext(lastNode, null); SetPrevious(lastNode, null); return(true); }
/// <summary> /// Tell the block manager that replication is completed for the given /// pipeline. /// </summary> /// <exception cref="System.IO.IOException"/> private void FulfillPipeline(BlockInfoContiguous blockInfo, DatanodeStorageInfo[] pipeline) { for (int i = 1; i < pipeline.Length; i++) { DatanodeStorageInfo storage = pipeline[i]; bm.AddBlock(storage, blockInfo, null); blockInfo.AddStorage(storage); } }
/// <summary> /// Create array of expected replica locations /// (as has been assigned by chooseTargets()). /// </summary> public virtual DatanodeStorageInfo[] GetExpectedStorageLocations() { int numLocations = replicas == null ? 0 : replicas.Count; DatanodeStorageInfo[] storages = new DatanodeStorageInfo[numLocations]; for (int i = 0; i < numLocations; i++) { storages[i] = replicas[i].GetExpectedStorageLocation(); } return(storages); }
/// <summary>Remove block from the list of blocks belonging to the data-node.</summary> /// <remarks> /// Remove block from the list of blocks belonging to the data-node. Remove /// data-node from the block. /// </remarks> internal virtual bool RemoveBlock(BlockInfoContiguous b) { DatanodeStorageInfo s = b.FindStorageInfo(this); // if block exists on this datanode if (s != null) { return(s.RemoveBlock(b)); } return(false); }
public virtual void TestUseDelHint() { DatanodeStorageInfo delHint = new DatanodeStorageInfo(DFSTestUtil.GetLocalDatanodeDescriptor (), new DatanodeStorage("id")); IList <DatanodeStorageInfo> moreThan1Racks = Arrays.AsList(delHint); IList <StorageType> excessTypes = new AList <StorageType>(); excessTypes.AddItem(StorageType.Default); NUnit.Framework.Assert.IsTrue(BlockManager.UseDelHint(true, delHint, null, moreThan1Racks , excessTypes)); excessTypes.Remove(0); excessTypes.AddItem(StorageType.Ssd); NUnit.Framework.Assert.IsFalse(BlockManager.UseDelHint(true, delHint, null, moreThan1Racks , excessTypes)); }
public virtual void TestChooseReplicaToDelete() { IList <DatanodeStorageInfo> replicaList = new AList <DatanodeStorageInfo>(); IDictionary <string, IList <DatanodeStorageInfo> > rackMap = new Dictionary <string, IList <DatanodeStorageInfo> >(); dataNodes[0].SetRemaining(4 * 1024 * 1024); replicaList.AddItem(storages[0]); dataNodes[1].SetRemaining(3 * 1024 * 1024); replicaList.AddItem(storages[1]); dataNodes[2].SetRemaining(2 * 1024 * 1024); replicaList.AddItem(storages[2]); dataNodes[5].SetRemaining(1 * 1024 * 1024); replicaList.AddItem(storages[5]); IList <DatanodeStorageInfo> first = new AList <DatanodeStorageInfo>(); IList <DatanodeStorageInfo> second = new AList <DatanodeStorageInfo>(); replicator.SplitNodesWithRack(replicaList, rackMap, first, second); NUnit.Framework.Assert.AreEqual(3, first.Count); NUnit.Framework.Assert.AreEqual(1, second.Count); IList <StorageType> excessTypes = new AList <StorageType>(); excessTypes.AddItem(StorageType.Default); DatanodeStorageInfo chosen = replicator.ChooseReplicaToDelete(null, null, (short) 3, first, second, excessTypes); // Within first set {dataNodes[0], dataNodes[1], dataNodes[2]}, // dataNodes[0] and dataNodes[1] are in the same nodegroup, // but dataNodes[1] is chosen as less free space NUnit.Framework.Assert.AreEqual(chosen, storages[1]); replicator.AdjustSetsWithChosenReplica(rackMap, first, second, chosen); NUnit.Framework.Assert.AreEqual(2, first.Count); NUnit.Framework.Assert.AreEqual(1, second.Count); // Within first set {dataNodes[0], dataNodes[2]}, dataNodes[2] is chosen // as less free space excessTypes.AddItem(StorageType.Default); chosen = replicator.ChooseReplicaToDelete(null, null, (short)2, first, second, excessTypes ); NUnit.Framework.Assert.AreEqual(chosen, storages[2]); replicator.AdjustSetsWithChosenReplica(rackMap, first, second, chosen); NUnit.Framework.Assert.AreEqual(0, first.Count); NUnit.Framework.Assert.AreEqual(2, second.Count); // Within second set, dataNodes[5] with less free space excessTypes.AddItem(StorageType.Default); chosen = replicator.ChooseReplicaToDelete(null, null, (short)1, first, second, excessTypes ); NUnit.Framework.Assert.AreEqual(chosen, storages[5]); }
ListInsert(Org.Apache.Hadoop.Hdfs.Server.Blockmanagement.BlockInfoContiguous head , DatanodeStorageInfo storage) { int dnIndex = this.FindStorageInfo(storage); System.Diagnostics.Debug.Assert(dnIndex >= 0, "Data node is not found: current"); System.Diagnostics.Debug.Assert(GetPrevious(dnIndex) == null && GetNext(dnIndex) == null, "Block is already in the list and cannot be inserted."); this.SetPrevious(dnIndex, null); this.SetNext(dnIndex, head); if (head != null) { head.SetPrevious(head.FindStorageInfo(storage), this); } return(this); }
public override bool Equals(object obj) { if (this == obj) { return(true); } else { if (obj == null || !(obj is DatanodeStorageInfo)) { return(false); } } DatanodeStorageInfo that = (DatanodeStorageInfo)obj; return(this.storageID.Equals(that.storageID)); }
public virtual void TestInitializeBlockRecovery() { DatanodeStorageInfo s1 = DFSTestUtil.CreateDatanodeStorageInfo("10.10.1.1", "s1"); DatanodeDescriptor dd1 = s1.GetDatanodeDescriptor(); DatanodeStorageInfo s2 = DFSTestUtil.CreateDatanodeStorageInfo("10.10.1.2", "s2"); DatanodeDescriptor dd2 = s2.GetDatanodeDescriptor(); DatanodeStorageInfo s3 = DFSTestUtil.CreateDatanodeStorageInfo("10.10.1.3", "s3"); DatanodeDescriptor dd3 = s3.GetDatanodeDescriptor(); dd1.isAlive = dd2.isAlive = dd3.isAlive = true; BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction (new Block(0, 0, GenerationStamp.LastReservedStamp), (short)3, HdfsServerConstants.BlockUCState .UnderConstruction, new DatanodeStorageInfo[] { s1, s2, s3 }); // Recovery attempt #1. DFSTestUtil.ResetLastUpdatesWithOffset(dd1, -3 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd3, -2 * 1000); blockInfo.InitializeBlockRecovery(1); BlockInfoContiguousUnderConstruction[] blockInfoRecovery = dd2.GetLeaseRecoveryCommand (1); NUnit.Framework.Assert.AreEqual(blockInfoRecovery[0], blockInfo); // Recovery attempt #2. DFSTestUtil.ResetLastUpdatesWithOffset(dd1, -2 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd3, -3 * 1000); blockInfo.InitializeBlockRecovery(2); blockInfoRecovery = dd1.GetLeaseRecoveryCommand(1); NUnit.Framework.Assert.AreEqual(blockInfoRecovery[0], blockInfo); // Recovery attempt #3. DFSTestUtil.ResetLastUpdatesWithOffset(dd1, -2 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd3, -3 * 1000); blockInfo.InitializeBlockRecovery(3); blockInfoRecovery = dd3.GetLeaseRecoveryCommand(1); NUnit.Framework.Assert.AreEqual(blockInfoRecovery[0], blockInfo); // Recovery attempt #4. // Reset everything. And again pick DN with most recent heart beat. DFSTestUtil.ResetLastUpdatesWithOffset(dd1, -2 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.ResetLastUpdatesWithOffset(dd3, 0); blockInfo.InitializeBlockRecovery(3); blockInfoRecovery = dd3.GetLeaseRecoveryCommand(1); NUnit.Framework.Assert.AreEqual(blockInfoRecovery[0], blockInfo); }
/// <summary>Find specified DatanodeStorageInfo.</summary> /// <returns>index or -1 if not found.</returns> internal virtual int FindStorageInfo(DatanodeStorageInfo storageInfo) { int len = GetCapacity(); for (int idx = 0; idx < len; idx++) { DatanodeStorageInfo cur = GetStorageInfo(idx); if (cur == storageInfo) { return(idx); } if (cur == null) { break; } } return(-1); }
/// <summary>Find specified DatanodeStorageInfo.</summary> /// <returns>DatanodeStorageInfo or null if not found.</returns> internal virtual DatanodeStorageInfo FindStorageInfo(DatanodeDescriptor dn) { int len = GetCapacity(); for (int idx = 0; idx < len; idx++) { DatanodeStorageInfo cur = GetStorageInfo(idx); if (cur == null) { break; } if (cur.GetDatanodeDescriptor() == dn) { return(cur); } } return(null); }
MoveBlockToHead(Org.Apache.Hadoop.Hdfs.Server.Blockmanagement.BlockInfoContiguous head, DatanodeStorageInfo storage, int curIndex, int headIndex) { if (head == this) { return(this); } Org.Apache.Hadoop.Hdfs.Server.Blockmanagement.BlockInfoContiguous next = this.SetNext (curIndex, head); Org.Apache.Hadoop.Hdfs.Server.Blockmanagement.BlockInfoContiguous prev = this.SetPrevious (curIndex, null); head.SetPrevious(headIndex, this); prev.SetNext(prev.FindStorageInfo(storage), next); if (next != null) { next.SetPrevious(next.FindStorageInfo(storage), prev); } return(this); }
public virtual void TestSafeModeIBR() { DatanodeDescriptor node = Org.Mockito.Mockito.Spy(nodes[0]); DatanodeStorageInfo ds = node.GetStorageInfos()[0]; node.isAlive = true; DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, string.Empty ); // pretend to be in safemode Org.Mockito.Mockito.DoReturn(true).When(fsn).IsInStartupSafeMode(); // register new node bm.GetDatanodeManager().RegisterDatanode(nodeReg); bm.GetDatanodeManager().AddDatanode(node); // swap in spy NUnit.Framework.Assert.AreEqual(node, bm.GetDatanodeManager().GetDatanode(node)); NUnit.Framework.Assert.AreEqual(0, ds.GetBlockReportCount()); // send block report, should be processed Org.Mockito.Mockito.Reset(node); bm.ProcessReport(node, new DatanodeStorage(ds.GetStorageID()), BlockListAsLongs.Empty , null, false); NUnit.Framework.Assert.AreEqual(1, ds.GetBlockReportCount()); // send block report again, should NOT be processed Org.Mockito.Mockito.Reset(node); bm.ProcessReport(node, new DatanodeStorage(ds.GetStorageID()), BlockListAsLongs.Empty , null, false); NUnit.Framework.Assert.AreEqual(1, ds.GetBlockReportCount()); // re-register as if node restarted, should update existing node bm.GetDatanodeManager().RemoveDatanode(node); Org.Mockito.Mockito.Reset(node); bm.GetDatanodeManager().RegisterDatanode(nodeReg); Org.Mockito.Mockito.Verify(node).UpdateRegInfo(nodeReg); // send block report, should be processed after restart Org.Mockito.Mockito.Reset(node); bm.ProcessReport(node, new DatanodeStorage(ds.GetStorageID()), BlockListAsLongs.Empty , null, false); // Reinitialize as registration with empty storage list pruned // node.storageMap. ds = node.GetStorageInfos()[0]; NUnit.Framework.Assert.AreEqual(1, ds.GetBlockReportCount()); }
public virtual void TestReplaceStorage() { // Create two dummy storages. DatanodeStorageInfo storage1 = DFSTestUtil.CreateDatanodeStorageInfo("storageID1" , "127.0.0.1"); DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.GetDatanodeDescriptor (), new DatanodeStorage("storageID2")); int NumBlocks = 10; BlockInfoContiguous[] blockInfos = new BlockInfoContiguous[NumBlocks]; // Create a few dummy blocks and add them to the first storage. for (int i = 0; i < NumBlocks; ++i) { blockInfos[i] = new BlockInfoContiguous((short)3); storage1.AddBlock(blockInfos[i]); } // Try to move one of the blocks to a different storage. bool added = storage2.AddBlock(blockInfos[NumBlocks / 2]) == DatanodeStorageInfo.AddBlockResult .Added; Assert.AssertThat(added, IS.Is(false)); Assert.AssertThat(blockInfos[NumBlocks / 2].GetStorageInfo(0), IS.Is(storage2)); }