/// <exception cref="System.IO.IOException"/> private BlockReaderLocalLegacy(DFSClient.Conf conf, string hdfsfile, ExtendedBlock block, Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token, long startOffset, long length, BlockLocalPathInfo pathinfo, FileInputStream dataIn) : this(conf, hdfsfile, block, token, startOffset, length, pathinfo, DataChecksum. NewDataChecksum(DataChecksum.Type.Null, 4), false, dataIn, startOffset, null) { }
/// <summary>Creates the namenode proxy with the passed protocol.</summary> /// <remarks> /// Creates the namenode proxy with the passed protocol. This will handle /// creation of either HA- or non-HA-enabled proxy objects, depending upon /// if the provided URI is a configured logical URI. /// </remarks> /// <param name="conf"> /// the configuration containing the required IPC /// properties, client failover configurations, etc. /// </param> /// <param name="nameNodeUri"> /// the URI pointing either to a specific NameNode /// or to a logical nameservice. /// </param> /// <param name="xface">the IPC interface which should be created</param> /// <param name="fallbackToSimpleAuth"> /// set to true or false during calls to indicate if /// a secure client falls back to simple auth /// </param> /// <returns> /// an object containing both the proxy and the associated /// delegation token service it corresponds to /// </returns> /// <exception cref="System.IO.IOException">if there is an error creating the proxy</exception> public static NameNodeProxies.ProxyAndInfo <T> CreateProxy <T>(Configuration conf, URI nameNodeUri, AtomicBoolean fallbackToSimpleAuth) { System.Type xface = typeof(T); AbstractNNFailoverProxyProvider <T> failoverProxyProvider = CreateFailoverProxyProvider (conf, nameNodeUri, xface, true, fallbackToSimpleAuth); if (failoverProxyProvider == null) { // Non-HA case return(CreateNonHAProxy(conf, NameNode.GetAddress(nameNodeUri), xface, UserGroupInformation .GetCurrentUser(), true, fallbackToSimpleAuth)); } else { // HA case DFSClient.Conf config = new DFSClient.Conf(conf); T proxy = (T)RetryProxy.Create(xface, failoverProxyProvider, RetryPolicies.FailoverOnNetworkException (RetryPolicies.TryOnceThenFail, config.maxFailoverAttempts, config.maxRetryAttempts , config.failoverSleepBaseMillis, config.failoverSleepMaxMillis)); Text dtService; if (failoverProxyProvider.UseLogicalURI()) { dtService = HAUtil.BuildTokenServiceForLogicalUri(nameNodeUri, HdfsConstants.HdfsUriScheme ); } else { dtService = SecurityUtil.BuildTokenService(NameNode.GetAddress(nameNodeUri)); } return(new NameNodeProxies.ProxyAndInfo <T>(proxy, dtService, NameNode.GetAddress( nameNodeUri))); } }
private ClientContext(string name, DFSClient.Conf conf) { this.name = name; this.confString = ConfAsString(conf); this.shortCircuitCache = new ShortCircuitCache(conf.shortCircuitStreamsCacheSize, conf.shortCircuitStreamsCacheExpiryMs, conf.shortCircuitMmapCacheSize, conf.shortCircuitMmapCacheExpiryMs , conf.shortCircuitMmapCacheRetryTimeout, conf.shortCircuitCacheStaleThresholdMs , conf.shortCircuitSharedMemoryWatcherInterruptCheckMs); this.peerCache = new PeerCache(conf.socketCacheCapacity, conf.socketCacheExpiry); this.keyProviderCache = new KeyProviderCache(conf.keyProviderCacheExpiryMs); this.useLegacyBlockReaderLocal = conf.useLegacyBlockReaderLocal; this.domainSocketFactory = new DomainSocketFactory(conf); this.byteArrayManager = ByteArrayManager.NewInstance(conf.writeByteArrayManagerConf ); }
private void PrintConfWarningIfNeeded(DFSClient.Conf conf) { string existing = this.GetConfString(); string requested = ConfAsString(conf); if (!existing.Equals(requested)) { if (!printedConfWarning) { printedConfWarning = true; Log.Warn("Existing client context '" + name + "' does not match " + "requested configuration. Existing: " + existing + ", Requested: " + requested); } } }
/// <exception cref="System.IO.IOException"/> private BlockReaderLocalLegacy(DFSClient.Conf conf, string hdfsfile, ExtendedBlock block, Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token, long startOffset, long length, BlockLocalPathInfo pathinfo, DataChecksum checksum, bool verifyChecksum, FileInputStream dataIn, long firstChunkOffset, FileInputStream checksumIn) { this.filename = hdfsfile; this.checksum = checksum; this.verifyChecksum = verifyChecksum; this.startOffset = Math.Max(startOffset, 0); this.blockId = block.GetBlockId(); bytesPerChecksum = this.checksum.GetBytesPerChecksum(); checksumSize = this.checksum.GetChecksumSize(); this.dataIn = dataIn; this.checksumIn = checksumIn; this.offsetFromChunkBoundary = (int)(startOffset - firstChunkOffset); int chunksPerChecksumRead = GetSlowReadBufferNumChunks(conf.shortCircuitBufferSize , bytesPerChecksum); slowReadBuff = bufferPool.GetBuffer(bytesPerChecksum * chunksPerChecksumRead); checksumBuff = bufferPool.GetBuffer(checksumSize * chunksPerChecksumRead); // Initially the buffers have nothing to read. slowReadBuff.Flip(); checksumBuff.Flip(); bool success = false; try { // Skip both input streams to beginning of the chunk containing startOffset IOUtils.SkipFully(dataIn, firstChunkOffset); if (checksumIn != null) { long checkSumOffset = (firstChunkOffset / bytesPerChecksum) * checksumSize; IOUtils.SkipFully(checksumIn, checkSumOffset); } success = true; } finally { if (!success) { bufferPool.ReturnBuffer(slowReadBuff); bufferPool.ReturnBuffer(checksumBuff); } } }
public DomainSocketFactory(DFSClient.Conf conf) { string feature; if (conf.IsShortCircuitLocalReads() && (!conf.IsUseLegacyBlockReaderLocal())) { feature = "The short-circuit local reads feature"; } else { if (conf.IsDomainSocketDataTraffic()) { feature = "UNIX domain socket data traffic"; } else { feature = null; } } if (feature == null) { PerformanceAdvisory.Log.Debug("Both short-circuit local reads and UNIX domain socket are disabled." ); } else { if (conf.GetDomainSocketPath().IsEmpty()) { throw new HadoopIllegalArgumentException(feature + " is enabled but " + DFSConfigKeys .DfsDomainSocketPathKey + " is not set."); } else { if (DomainSocket.GetLoadingFailureReason() != null) { Log.Warn(feature + " cannot be used because " + DomainSocket.GetLoadingFailureReason ()); } else { Log.Debug(feature + " is enabled."); } } } }
public static string ConfAsString(DFSClient.Conf conf) { StringBuilder builder = new StringBuilder(); builder.Append("shortCircuitStreamsCacheSize = ").Append(conf.shortCircuitStreamsCacheSize ).Append(", shortCircuitStreamsCacheExpiryMs = ").Append(conf.shortCircuitStreamsCacheExpiryMs ).Append(", shortCircuitMmapCacheSize = ").Append(conf.shortCircuitMmapCacheSize ).Append(", shortCircuitMmapCacheExpiryMs = ").Append(conf.shortCircuitMmapCacheExpiryMs ).Append(", shortCircuitMmapCacheRetryTimeout = ").Append(conf.shortCircuitMmapCacheRetryTimeout ).Append(", shortCircuitCacheStaleThresholdMs = ").Append(conf.shortCircuitCacheStaleThresholdMs ).Append(", socketCacheCapacity = ").Append(conf.socketCacheCapacity).Append(", socketCacheExpiry = " ).Append(conf.socketCacheExpiry).Append(", shortCircuitLocalReads = ").Append(conf .shortCircuitLocalReads).Append(", useLegacyBlockReaderLocal = ").Append(conf.useLegacyBlockReaderLocal ).Append(", domainSocketDataTraffic = ").Append(conf.domainSocketDataTraffic).Append (", shortCircuitSharedMemoryWatcherInterruptCheckMs = ").Append(conf.shortCircuitSharedMemoryWatcherInterruptCheckMs ).Append(", keyProviderCacheExpiryMs = ").Append(conf.keyProviderCacheExpiryMs); return(builder.ToString()); }
public static Org.Apache.Hadoop.Hdfs.ClientContext Get(string name, DFSClient.Conf conf) { Org.Apache.Hadoop.Hdfs.ClientContext context; lock (typeof(Org.Apache.Hadoop.Hdfs.ClientContext)) { context = Caches[name]; if (context == null) { context = new Org.Apache.Hadoop.Hdfs.ClientContext(name, conf); Caches[name] = context; } else { context.PrintConfWarningIfNeeded(conf); } } return(context); }
/// <summary>Get information about a domain socket path.</summary> /// <param name="addr">The inet address to use.</param> /// <param name="conf">The client configuration.</param> /// <returns>Information about the socket path.</returns> public virtual DomainSocketFactory.PathInfo GetPathInfo(IPEndPoint addr, DFSClient.Conf conf) { // If there is no domain socket path configured, we can't use domain // sockets. if (conf.GetDomainSocketPath().IsEmpty()) { return(DomainSocketFactory.PathInfo.NotConfigured); } // If we can't do anything with the domain socket, don't create it. if (!conf.IsDomainSocketDataTraffic() && (!conf.IsShortCircuitLocalReads() || conf .IsUseLegacyBlockReaderLocal())) { return(DomainSocketFactory.PathInfo.NotConfigured); } // If the DomainSocket code is not loaded, we can't create // DomainSocket objects. if (DomainSocket.GetLoadingFailureReason() != null) { return(DomainSocketFactory.PathInfo.NotConfigured); } // UNIX domain sockets can only be used to talk to local peers if (!DFSClient.IsLocalAddress(addr)) { return(DomainSocketFactory.PathInfo.NotConfigured); } string escapedPath = DomainSocket.GetEffectivePath(conf.GetDomainSocketPath(), addr .Port); DomainSocketFactory.PathState status = pathMap.GetIfPresent(escapedPath); if (status == null) { return(new DomainSocketFactory.PathInfo(escapedPath, DomainSocketFactory.PathState .Valid)); } else { return(new DomainSocketFactory.PathInfo(escapedPath, status)); } }
public Builder(DFSClient.Conf conf) { this.maxReadahead = int.MaxValue; this.verifyChecksum = !conf.skipShortCircuitChecksums; this.bufferSize = conf.shortCircuitBufferSize; }
public BlockReaderFactory(DFSClient.Conf conf) { this.conf = conf; this.failureInjector = conf.brfFailureInjector; this.remainingCacheTries = conf.nCachedConnRetry; }
// Multiple datanodes could be running on the local machine. Store proxies in // a map keyed by the ipc port of the datanode. // reader for the data file // reader for the checksum file /// <summary>The only way this object can be instantiated.</summary> /// <exception cref="System.IO.IOException"/> internal static BlockReaderLocalLegacy NewBlockReader(DFSClient.Conf conf, UserGroupInformation userGroupInformation, Configuration configuration, string file, ExtendedBlock blk , Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token, DatanodeInfo node, long startOffset, long length, StorageType storageType) { BlockReaderLocalLegacy.LocalDatanodeInfo localDatanodeInfo = GetLocalDatanodeInfo (node.GetIpcPort()); // check the cache first BlockLocalPathInfo pathinfo = localDatanodeInfo.GetBlockLocalPathInfo(blk); if (pathinfo == null) { if (userGroupInformation == null) { userGroupInformation = UserGroupInformation.GetCurrentUser(); } pathinfo = GetBlockPathInfo(userGroupInformation, blk, node, configuration, conf. socketTimeout, token, conf.connectToDnViaHostname, storageType); } // check to see if the file exists. It may so happen that the // HDFS file has been deleted and this block-lookup is occurring // on behalf of a new HDFS file. This time, the block file could // be residing in a different portion of the fs.data.dir directory. // In this case, we remove this entry from the cache. The next // call to this method will re-populate the cache. FileInputStream dataIn = null; FileInputStream checksumIn = null; BlockReaderLocalLegacy localBlockReader = null; bool skipChecksumCheck = conf.skipShortCircuitChecksums || storageType.IsTransient (); try { // get a local file system FilePath blkfile = new FilePath(pathinfo.GetBlockPath()); dataIn = new FileInputStream(blkfile); if (Log.IsDebugEnabled()) { Log.Debug("New BlockReaderLocalLegacy for file " + blkfile + " of size " + blkfile .Length() + " startOffset " + startOffset + " length " + length + " short circuit checksum " + !skipChecksumCheck); } if (!skipChecksumCheck) { // get the metadata file FilePath metafile = new FilePath(pathinfo.GetMetaPath()); checksumIn = new FileInputStream(metafile); DataChecksum checksum = BlockMetadataHeader.ReadDataChecksum(new DataInputStream( checksumIn), blk); long firstChunkOffset = startOffset - (startOffset % checksum.GetBytesPerChecksum ()); localBlockReader = new BlockReaderLocalLegacy(conf, file, blk, token, startOffset , length, pathinfo, checksum, true, dataIn, firstChunkOffset, checksumIn); } else { localBlockReader = new BlockReaderLocalLegacy(conf, file, blk, token, startOffset , length, pathinfo, dataIn); } } catch (IOException e) { // remove from cache localDatanodeInfo.RemoveBlockLocalPathInfo(blk); DFSClient.Log.Warn("BlockReaderLocalLegacy: Removing " + blk + " from cache because local file " + pathinfo.GetBlockPath() + " could not be opened."); throw; } finally { if (localBlockReader == null) { if (dataIn != null) { dataIn.Close(); } if (checksumIn != null) { checksumIn.Close(); } } } return(localBlockReader); }