public LocatedBlock(ExtendedBlock b, DatanodeStorageInfo[] storages, long startOffset , bool corrupt) : this(b, DatanodeStorageInfo.ToDatanodeInfos(storages), DatanodeStorageInfo.ToStorageIDs (storages), DatanodeStorageInfo.ToStorageTypes(storages), startOffset, corrupt, EmptyLocs) { }
/// <summary> /// Verify that the NameNode can learn about new storages from incremental /// block reports. /// </summary> /// <remarks> /// Verify that the NameNode can learn about new storages from incremental /// block reports. /// This tests the fix for the error condition seen in HDFS-6904. /// </remarks> /// <exception cref="System.IO.IOException"/> /// <exception cref="System.Exception"/> public virtual void TestNnLearnsNewStorages() { // Generate a report for a fake block on a fake storage. string newStorageUuid = UUID.RandomUUID().ToString(); DatanodeStorage newStorage = new DatanodeStorage(newStorageUuid); StorageReceivedDeletedBlocks[] reports = MakeReportForReceivedBlock(GetDummyBlock (), newStorage); // Send the report to the NN. cluster.GetNameNodeRpc().BlockReceivedAndDeleted(dn0Reg, poolId, reports); // Make sure that the NN has learned of the new storage. DatanodeStorageInfo storageInfo = cluster.GetNameNode().GetNamesystem().GetBlockManager ().GetDatanodeManager().GetDatanode(dn0.GetDatanodeId()).GetStorageInfo(newStorageUuid ); NUnit.Framework.Assert.IsNotNull(storageInfo); }
/// <summary>Create BlockCommand for transferring blocks to another datanode</summary> /// <param name="blocktargetlist">blocks to be transferred</param> public BlockCommand(int action, string poolId, IList <DatanodeDescriptor.BlockTargetPair > blocktargetlist) : base(action) { this.poolId = poolId; blocks = new Block[blocktargetlist.Count]; targets = new DatanodeInfo[blocks.Length][]; targetStorageTypes = new StorageType[blocks.Length][]; targetStorageIDs = new string[blocks.Length][]; for (int i = 0; i < blocks.Length; i++) { DatanodeDescriptor.BlockTargetPair p = blocktargetlist[i]; blocks[i] = p.block; targets[i] = DatanodeStorageInfo.ToDatanodeInfos(p.targets); targetStorageTypes[i] = DatanodeStorageInfo.ToStorageTypes(p.targets); targetStorageIDs[i] = DatanodeStorageInfo.ToStorageIDs(p.targets); } }
/// <exception cref="System.IO.IOException"/> private FSNamesystem MakeNameSystemSpy(Block block, INodeFile file) { Configuration conf = new Configuration(); FSImage image = new FSImage(conf); DatanodeStorageInfo[] targets = new DatanodeStorageInfo[] { }; FSNamesystem namesystem = new FSNamesystem(conf, image); namesystem.SetImageLoaded(true); // set file's parent as root and put the file to inodeMap, so // FSNamesystem's isFileDeleted() method will return false on this file if (file.GetParent() == null) { INodeDirectory mparent = Org.Mockito.Mockito.Mock <INodeDirectory>(); INodeDirectory parent = new INodeDirectory(mparent.GetId(), new byte[0], mparent. GetPermissionStatus(), mparent.GetAccessTime()); parent.SetLocalName(new byte[0]); parent.AddChild(file); file.SetParent(parent); } namesystem.dir.GetINodeMap().Put(file); FSNamesystem namesystemSpy = Org.Mockito.Mockito.Spy(namesystem); BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction (block, (short)1, HdfsServerConstants.BlockUCState.UnderConstruction, targets); blockInfo.SetBlockCollection(file); blockInfo.SetGenerationStamp(genStamp); blockInfo.InitializeBlockRecovery(genStamp); Org.Mockito.Mockito.DoReturn(true).When(file).RemoveLastBlock(Matchers.Any <Block> ()); Org.Mockito.Mockito.DoReturn(true).When(file).IsUnderConstruction(); Org.Mockito.Mockito.DoReturn(new BlockInfoContiguous[1]).When(file).GetBlocks(); Org.Mockito.Mockito.DoReturn(blockInfo).When(namesystemSpy).GetStoredBlock(Matchers.Any <Block>()); Org.Mockito.Mockito.DoReturn(blockInfo).When(file).GetLastBlock(); Org.Mockito.Mockito.DoReturn(string.Empty).When(namesystemSpy).CloseFileCommitBlocks (Matchers.Any <INodeFile>(), Matchers.Any <BlockInfoContiguous>()); Org.Mockito.Mockito.DoReturn(Org.Mockito.Mockito.Mock <FSEditLog>()).When(namesystemSpy ).GetEditLog(); return(namesystemSpy); }