/// <summary>Test assumes that the file has a single block</summary> /// <exception cref="System.IO.IOException"/> private FilePath GetBlockForFile(Path path, bool exists) { LocatedBlocks blocks = nn.GetRpcServer().GetBlockLocations(path.ToString(), 0, long.MaxValue ); NUnit.Framework.Assert.AreEqual("The test helper functions assume that each file has a single block" , 1, blocks.GetLocatedBlocks().Count); ExtendedBlock block = blocks.GetLocatedBlocks()[0].GetBlock(); BlockLocalPathInfo bInfo = dn0.GetFSDataset().GetBlockLocalPathInfo(block); FilePath blockFile = new FilePath(bInfo.GetBlockPath()); NUnit.Framework.Assert.AreEqual(exists, blockFile.Exists()); return(blockFile); }
// Multiple datanodes could be running on the local machine. Store proxies in // a map keyed by the ipc port of the datanode. // reader for the data file // reader for the checksum file /// <summary>The only way this object can be instantiated.</summary> /// <exception cref="System.IO.IOException"/> internal static BlockReaderLocalLegacy NewBlockReader(DFSClient.Conf conf, UserGroupInformation userGroupInformation, Configuration configuration, string file, ExtendedBlock blk , Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token, DatanodeInfo node, long startOffset, long length, StorageType storageType) { BlockReaderLocalLegacy.LocalDatanodeInfo localDatanodeInfo = GetLocalDatanodeInfo (node.GetIpcPort()); // check the cache first BlockLocalPathInfo pathinfo = localDatanodeInfo.GetBlockLocalPathInfo(blk); if (pathinfo == null) { if (userGroupInformation == null) { userGroupInformation = UserGroupInformation.GetCurrentUser(); } pathinfo = GetBlockPathInfo(userGroupInformation, blk, node, configuration, conf. socketTimeout, token, conf.connectToDnViaHostname, storageType); } // check to see if the file exists. It may so happen that the // HDFS file has been deleted and this block-lookup is occurring // on behalf of a new HDFS file. This time, the block file could // be residing in a different portion of the fs.data.dir directory. // In this case, we remove this entry from the cache. The next // call to this method will re-populate the cache. FileInputStream dataIn = null; FileInputStream checksumIn = null; BlockReaderLocalLegacy localBlockReader = null; bool skipChecksumCheck = conf.skipShortCircuitChecksums || storageType.IsTransient (); try { // get a local file system FilePath blkfile = new FilePath(pathinfo.GetBlockPath()); dataIn = new FileInputStream(blkfile); if (Log.IsDebugEnabled()) { Log.Debug("New BlockReaderLocalLegacy for file " + blkfile + " of size " + blkfile .Length() + " startOffset " + startOffset + " length " + length + " short circuit checksum " + !skipChecksumCheck); } if (!skipChecksumCheck) { // get the metadata file FilePath metafile = new FilePath(pathinfo.GetMetaPath()); checksumIn = new FileInputStream(metafile); DataChecksum checksum = BlockMetadataHeader.ReadDataChecksum(new DataInputStream( checksumIn), blk); long firstChunkOffset = startOffset - (startOffset % checksum.GetBytesPerChecksum ()); localBlockReader = new BlockReaderLocalLegacy(conf, file, blk, token, startOffset , length, pathinfo, checksum, true, dataIn, firstChunkOffset, checksumIn); } else { localBlockReader = new BlockReaderLocalLegacy(conf, file, blk, token, startOffset , length, pathinfo, dataIn); } } catch (IOException e) { // remove from cache localDatanodeInfo.RemoveBlockLocalPathInfo(blk); DFSClient.Log.Warn("BlockReaderLocalLegacy: Removing " + blk + " from cache because local file " + pathinfo.GetBlockPath() + " could not be opened."); throw; } finally { if (localBlockReader == null) { if (dataIn != null) { dataIn.Close(); } if (checksumIn != null) { checksumIn.Close(); } } } return(localBlockReader); }