/// <summary>Create a file with one block and corrupt some/all of the block replicas. /// </summary> /// <exception cref="System.IO.IOException"/> /// <exception cref="Org.Apache.Hadoop.Security.AccessControlException"/> /// <exception cref="System.IO.FileNotFoundException"/> /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> /// <exception cref="System.Exception"/> /// <exception cref="Sharpen.TimeoutException"/> private void CreateAFileWithCorruptedBlockReplicas(Path filePath, short repl, int corruptBlockCount) { DFSTestUtil.CreateFile(dfs, filePath, BlockSize, repl, 0); DFSTestUtil.WaitReplication(dfs, filePath, repl); // Locate the file blocks by asking name node LocatedBlocks locatedblocks = dfs.dfs.GetNamenode().GetBlockLocations(filePath.ToString (), 0L, BlockSize); NUnit.Framework.Assert.AreEqual(repl, locatedblocks.Get(0).GetLocations().Length); // The file only has one block LocatedBlock lblock = locatedblocks.Get(0); DatanodeInfo[] datanodeinfos = lblock.GetLocations(); ExtendedBlock block = lblock.GetBlock(); // corrupt some /all of the block replicas for (int i = 0; i < corruptBlockCount; i++) { DatanodeInfo dninfo = datanodeinfos[i]; DataNode dn = cluster.GetDataNode(dninfo.GetIpcPort()); CorruptBlock(block, dn); Log.Debug("Corrupted block " + block.GetBlockName() + " on data node " + dninfo); } }
/// <summary>Convert a DatanodeInfo to a Json map.</summary> internal static IDictionary <string, object> ToJsonMap(DatanodeInfo datanodeinfo) { if (datanodeinfo == null) { return(null); } // TODO: Fix storageID IDictionary <string, object> m = new SortedDictionary <string, object>(); m["ipAddr"] = datanodeinfo.GetIpAddr(); // 'name' is equivalent to ipAddr:xferPort. Older clients (1.x, 0.23.x) // expects this instead of the two fields. m["name"] = datanodeinfo.GetXferAddr(); m["hostName"] = datanodeinfo.GetHostName(); m["storageID"] = datanodeinfo.GetDatanodeUuid(); m["xferPort"] = datanodeinfo.GetXferPort(); m["infoPort"] = datanodeinfo.GetInfoPort(); m["infoSecurePort"] = datanodeinfo.GetInfoSecurePort(); m["ipcPort"] = datanodeinfo.GetIpcPort(); m["capacity"] = datanodeinfo.GetCapacity(); m["dfsUsed"] = datanodeinfo.GetDfsUsed(); m["remaining"] = datanodeinfo.GetRemaining(); m["blockPoolUsed"] = datanodeinfo.GetBlockPoolUsed(); m["cacheCapacity"] = datanodeinfo.GetCacheCapacity(); m["cacheUsed"] = datanodeinfo.GetCacheUsed(); m["lastUpdate"] = datanodeinfo.GetLastUpdate(); m["lastUpdateMonotonic"] = datanodeinfo.GetLastUpdateMonotonic(); m["xceiverCount"] = datanodeinfo.GetXceiverCount(); m["networkLocation"] = datanodeinfo.GetNetworkLocation(); m["adminState"] = datanodeinfo.GetAdminState().ToString(); return(m); }
private void Compare(DatanodeInfo dn1, DatanodeInfo dn2) { NUnit.Framework.Assert.AreEqual(dn1.GetAdminState(), dn2.GetAdminState()); NUnit.Framework.Assert.AreEqual(dn1.GetBlockPoolUsed(), dn2.GetBlockPoolUsed()); NUnit.Framework.Assert.AreEqual(dn1.GetBlockPoolUsedPercent(), dn2.GetBlockPoolUsedPercent (), Delta); NUnit.Framework.Assert.AreEqual(dn1.GetCapacity(), dn2.GetCapacity()); NUnit.Framework.Assert.AreEqual(dn1.GetDatanodeReport(), dn2.GetDatanodeReport()); NUnit.Framework.Assert.AreEqual(dn1.GetDfsUsed(), dn1.GetDfsUsed()); NUnit.Framework.Assert.AreEqual(dn1.GetDfsUsedPercent(), dn1.GetDfsUsedPercent(), Delta); NUnit.Framework.Assert.AreEqual(dn1.GetIpAddr(), dn2.GetIpAddr()); NUnit.Framework.Assert.AreEqual(dn1.GetHostName(), dn2.GetHostName()); NUnit.Framework.Assert.AreEqual(dn1.GetInfoPort(), dn2.GetInfoPort()); NUnit.Framework.Assert.AreEqual(dn1.GetIpcPort(), dn2.GetIpcPort()); NUnit.Framework.Assert.AreEqual(dn1.GetLastUpdate(), dn2.GetLastUpdate()); NUnit.Framework.Assert.AreEqual(dn1.GetLevel(), dn2.GetLevel()); NUnit.Framework.Assert.AreEqual(dn1.GetNetworkLocation(), dn2.GetNetworkLocation( )); }
/// <exception cref="System.IO.IOException"/> private static BlockLocalPathInfo GetBlockPathInfo(UserGroupInformation ugi, ExtendedBlock blk, DatanodeInfo node, Configuration conf, int timeout, Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token, bool connectToDnViaHostname, StorageType storageType ) { BlockReaderLocalLegacy.LocalDatanodeInfo localDatanodeInfo = GetLocalDatanodeInfo (node.GetIpcPort()); BlockLocalPathInfo pathinfo = null; ClientDatanodeProtocol proxy = localDatanodeInfo.GetDatanodeProxy(ugi, node, conf , timeout, connectToDnViaHostname); try { // make RPC to local datanode to find local pathnames of blocks pathinfo = proxy.GetBlockLocalPathInfo(blk, token); // We cannot cache the path information for a replica on transient storage. // If the replica gets evicted, then it moves to a different path. Then, // our next attempt to read from the cached path would fail to find the // file. Additionally, the failure would cause us to disable legacy // short-circuit read for all subsequent use in the ClientContext. Unlike // the newer short-circuit read implementation, we have no communication // channel for the DataNode to notify the client that the path has been // invalidated. Therefore, our only option is to skip caching. if (pathinfo != null && !storageType.IsTransient()) { if (Log.IsDebugEnabled()) { Log.Debug("Cached location of block " + blk + " as " + pathinfo); } localDatanodeInfo.SetBlockLocalPathInfo(blk, pathinfo); } } catch (IOException e) { localDatanodeInfo.ResetDatanodeProxy(); // Reset proxy on error throw; } return(pathinfo); }
// Multiple datanodes could be running on the local machine. Store proxies in // a map keyed by the ipc port of the datanode. // reader for the data file // reader for the checksum file /// <summary>The only way this object can be instantiated.</summary> /// <exception cref="System.IO.IOException"/> internal static BlockReaderLocalLegacy NewBlockReader(DFSClient.Conf conf, UserGroupInformation userGroupInformation, Configuration configuration, string file, ExtendedBlock blk , Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token, DatanodeInfo node, long startOffset, long length, StorageType storageType) { BlockReaderLocalLegacy.LocalDatanodeInfo localDatanodeInfo = GetLocalDatanodeInfo (node.GetIpcPort()); // check the cache first BlockLocalPathInfo pathinfo = localDatanodeInfo.GetBlockLocalPathInfo(blk); if (pathinfo == null) { if (userGroupInformation == null) { userGroupInformation = UserGroupInformation.GetCurrentUser(); } pathinfo = GetBlockPathInfo(userGroupInformation, blk, node, configuration, conf. socketTimeout, token, conf.connectToDnViaHostname, storageType); } // check to see if the file exists. It may so happen that the // HDFS file has been deleted and this block-lookup is occurring // on behalf of a new HDFS file. This time, the block file could // be residing in a different portion of the fs.data.dir directory. // In this case, we remove this entry from the cache. The next // call to this method will re-populate the cache. FileInputStream dataIn = null; FileInputStream checksumIn = null; BlockReaderLocalLegacy localBlockReader = null; bool skipChecksumCheck = conf.skipShortCircuitChecksums || storageType.IsTransient (); try { // get a local file system FilePath blkfile = new FilePath(pathinfo.GetBlockPath()); dataIn = new FileInputStream(blkfile); if (Log.IsDebugEnabled()) { Log.Debug("New BlockReaderLocalLegacy for file " + blkfile + " of size " + blkfile .Length() + " startOffset " + startOffset + " length " + length + " short circuit checksum " + !skipChecksumCheck); } if (!skipChecksumCheck) { // get the metadata file FilePath metafile = new FilePath(pathinfo.GetMetaPath()); checksumIn = new FileInputStream(metafile); DataChecksum checksum = BlockMetadataHeader.ReadDataChecksum(new DataInputStream( checksumIn), blk); long firstChunkOffset = startOffset - (startOffset % checksum.GetBytesPerChecksum ()); localBlockReader = new BlockReaderLocalLegacy(conf, file, blk, token, startOffset , length, pathinfo, checksum, true, dataIn, firstChunkOffset, checksumIn); } else { localBlockReader = new BlockReaderLocalLegacy(conf, file, blk, token, startOffset , length, pathinfo, dataIn); } } catch (IOException e) { // remove from cache localDatanodeInfo.RemoveBlockLocalPathInfo(blk); DFSClient.Log.Warn("BlockReaderLocalLegacy: Removing " + blk + " from cache because local file " + pathinfo.GetBlockPath() + " could not be opened."); throw; } finally { if (localBlockReader == null) { if (dataIn != null) { dataIn.Close(); } if (checksumIn != null) { checksumIn.Close(); } } } return(localBlockReader); }