public virtual void Before() { Assume.AssumeThat(NativeCodeLoader.IsNativeCodeLoaded() && !Path.Windows, CoreMatchers.EqualTo (true)); Assume.AssumeThat(DomainSocket.GetLoadingFailureReason(), CoreMatchers.EqualTo(null )); }
public virtual void TestShortCircuitTraceHooks() { Assume.AssumeTrue(NativeCodeLoader.IsNativeCodeLoaded() && !Path.Windows); conf = new Configuration(); conf.Set(DFSConfigKeys.DfsClientHtracePrefix + SpanReceiverHost.SpanReceiversConfSuffix , typeof(TestTracing.SetSpanReceiver).FullName); conf.SetLong("dfs.blocksize", 100 * 1024); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, false); conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, "testShortCircuitTraceHooks._PORT" ); conf.Set(DFSConfigKeys.DfsChecksumTypeKey, "CRC32C"); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); dfs = cluster.GetFileSystem(); try { DFSTestUtil.CreateFile(dfs, TestPath, TestLength, (short)1, 5678L); TraceScope ts = Trace.StartSpan("testShortCircuitTraceHooks", Sampler.Always); FSDataInputStream stream = dfs.Open(TestPath); byte[] buf = new byte[TestLength]; IOUtils.ReadFully(stream, buf, 0, TestLength); stream.Close(); ts.Close(); string[] expectedSpanNames = new string[] { "OpRequestShortCircuitAccessProto", "ShortCircuitShmRequestProto" }; TestTracing.AssertSpanNamesFound(expectedSpanNames); } finally { dfs.Close(); cluster.Shutdown(); } }
static DomainSocketWatcher() { watcherThread = new Thread(new _Runnable_451(this)); if (SystemUtils.IsOsWindows) { loadingFailureReason = "UNIX Domain sockets are not available on Windows."; } else { if (!NativeCodeLoader.IsNativeCodeLoaded()) { loadingFailureReason = "libhadoop cannot be loaded."; } else { string problem; try { AnchorNative(); problem = null; } catch (Exception t) { problem = "DomainSocketWatcher#anchorNative got error: " + t.Message; } loadingFailureReason = problem; } } }
static POSIX() { if (NativeCodeLoader.IsNativeCodeLoaded()) { try { Configuration conf = new Configuration(); workaroundNonThreadSafePasswdCalls = conf.GetBoolean(WorkaroundNonThreadsafeCallsKey , WorkaroundNonThreadsafeCallsDefault); InitNative(); nativeLoaded = true; cacheTimeout = conf.GetLong(CommonConfigurationKeys.HadoopSecurityUidNameCacheTimeoutKey , CommonConfigurationKeys.HadoopSecurityUidNameCacheTimeoutDefault) * 1000; Log.Debug("Initialized cache for IDs to User/Group mapping with a " + " cache timeout of " + cacheTimeout / 1000 + " seconds."); } catch (Exception t) { // This can happen if the user has an older version of libhadoop.so // installed - in this case we can continue without native IO // after warning PerformanceAdvisory.Log.Debug("Unable to initialize NativeIO libraries", t); } } }
private static bool IsNativeSnappyLoadable() { bool snappyAvailable = false; bool loaded = false; try { Runtime.LoadLibrary("snappy"); logger.Warn("Snappy native library is available"); snappyAvailable = true; bool hadoopNativeAvailable = NativeCodeLoader.IsNativeCodeLoaded(); loaded = snappyAvailable && hadoopNativeAvailable; if (loaded) { logger.Info("Snappy native library loaded"); } else { logger.Warn("Snappy native library not loaded"); } } catch (Exception t) { logger.Warn("Failed to load snappy: ", t); return(false); } return(loaded); }
public virtual void TestDoPreUpgradeIOError() { FilePath storageDir = new FilePath(TestEditLog.TestDir, "preupgradeioerror"); IList <URI> editUris = Sharpen.Collections.SingletonList(storageDir.ToURI()); NNStorage storage = TestEditLog.SetupEdits(editUris, 5); Storage.StorageDirectory sd = storage.DirIterator(NNStorage.NameNodeDirType.Edits ).Next(); NUnit.Framework.Assert.IsNotNull(sd); // Change storage directory so that renaming current to previous.tmp fails. FileUtil.SetWritable(storageDir, false); FileJournalManager jm = null; try { jm = new FileJournalManager(conf, sd, storage); exception.Expect(typeof(IOException)); if (NativeCodeLoader.IsNativeCodeLoaded()) { exception.ExpectMessage("failure in native rename"); } jm.DoPreUpgrade(); } finally { IOUtils.Cleanup(Log, jm); // Restore permissions on storage directory and make sure we can delete. FileUtil.SetWritable(storageDir, true); FileUtil.FullyDelete(storageDir); } }
static DomainSocket() { inputStream = new DomainSocket.DomainInputStream(this); outputStream = new DomainSocket.DomainOutputStream(this); channel = new DomainSocket.DomainChannel(this); if (SystemUtils.IsOsWindows) { loadingFailureReason = "UNIX Domain sockets are not available on Windows."; } else { if (!NativeCodeLoader.IsNativeCodeLoaded()) { loadingFailureReason = "libhadoop cannot be loaded."; } else { string problem; try { AnchorNative(); problem = null; } catch (Exception t) { problem = "DomainSocket#anchorNative got error: " + t.Message; } loadingFailureReason = problem; } } }
static JniBasedUnixGroupsNetgroupMapping() { if (!NativeCodeLoader.IsNativeCodeLoaded()) { throw new RuntimeException("Bailing out since native library couldn't " + "be loaded" ); } Log.Debug("Using JniBasedUnixGroupsNetgroupMapping for Netgroup resolution"); }
static Native() { if (NativeCodeLoader.IsNativeCodeLoaded()) { try { InitWsceNative(); nativeLoaded = true; } catch (Exception t) { Log.Info("Unable to initialize WSCE Native libraries", t); } } }
static ZlibDecompressor() { if (NativeCodeLoader.IsNativeCodeLoaded()) { try { // Initialize the native library InitIDs(); nativeZlibLoaded = true; } catch { } } }
public JniBasedUnixGroupsNetgroupMappingWithFallback() { if (NativeCodeLoader.IsNativeCodeLoaded()) { this.impl = new JniBasedUnixGroupsNetgroupMapping(); } else { Log.Info("Falling back to shell based"); this.impl = new ShellBasedUnixGroupsNetgroupMapping(); } if (Log.IsDebugEnabled()) { Log.Debug("Group mapping impl=" + impl.GetType().FullName); } }
static ZlibFactory() { if (NativeCodeLoader.IsNativeCodeLoaded()) { nativeZlibLoaded = ZlibCompressor.IsNativeZlibLoaded() && ZlibDecompressor.IsNativeZlibLoaded (); if (nativeZlibLoaded) { Log.Info("Successfully loaded & initialized native-zlib library"); } else { Log.Warn("Failed to load/initialize native-zlib library"); } } }
public virtual void TestZlibDirectCompressDecompress() { int[] size = new int[] { 1, 4, 16, 4 * 1024, 64 * 1024, 128 * 1024, 1024 * 1024 }; Assume.AssumeTrue(NativeCodeLoader.IsNativeCodeLoaded()); try { for (int i = 0; i < size.Length; i++) { CompressDecompressLoop(size[i]); } } catch (IOException ex) { NUnit.Framework.Assert.Fail("testZlibDirectCompressDecompress ex !!!" + ex); } }
static SnappyDecompressor() { // HACK - Use this as a global lock in the JNI layer if (NativeCodeLoader.IsNativeCodeLoaded() && NativeCodeLoader.BuildSupportsSnappy ()) { try { InitIDs(); nativeSnappyLoaded = true; } catch (Exception t) { Log.Error("failed to load SnappyDecompressor", t); } } }
static NativeIO() { if (NativeCodeLoader.IsNativeCodeLoaded()) { try { InitNative(); nativeLoaded = true; } catch (Exception t) { // This can happen if the user has an older version of libhadoop.so // installed - in this case we can continue without native IO // after warning PerformanceAdvisory.Log.Debug("Unable to initialize NativeIO libraries", t); } } }
public virtual void TestLz4Codec() { if (NativeCodeLoader.IsNativeCodeLoaded()) { if (Lz4Codec.IsNativeCodeLoaded()) { conf.SetBoolean(CommonConfigurationKeys.IoCompressionCodecLz4Uselz4hcKey, false); CodecTest(conf, seed, 0, "org.apache.hadoop.io.compress.Lz4Codec"); CodecTest(conf, seed, count, "org.apache.hadoop.io.compress.Lz4Codec"); conf.SetBoolean(CommonConfigurationKeys.IoCompressionCodecLz4Uselz4hcKey, true); CodecTest(conf, seed, 0, "org.apache.hadoop.io.compress.Lz4Codec"); CodecTest(conf, seed, count, "org.apache.hadoop.io.compress.Lz4Codec"); } else { NUnit.Framework.Assert.Fail("Native hadoop library available but lz4 not"); } } }
public virtual void TestNativeCodeLoaded() { if (RequireTestJni() == false) { Log.Info("TestNativeCodeLoader: libhadoop.so testing is not required."); return; } if (!NativeCodeLoader.IsNativeCodeLoaded()) { string LdLibraryPath = Sharpen.Runtime.GetEnv()["LD_LIBRARY_PATH"]; if (LdLibraryPath == null) { LdLibraryPath = string.Empty; } NUnit.Framework.Assert.Fail("TestNativeCodeLoader: libhadoop.so testing was required, but " + "libhadoop.so was not loaded. LD_LIBRARY_PATH = " + LdLibraryPath); } Log.Info("TestHdfsNativeCodeLoader: libhadoop.so is loaded."); }
/// <summary>Are the native snappy libraries loaded & initialized?</summary> public static void CheckNativeCodeLoaded() { if (!NativeCodeLoader.IsNativeCodeLoaded() || !NativeCodeLoader.BuildSupportsSnappy ()) { throw new RuntimeException("native snappy library not available: " + "this version of libhadoop was built without " + "snappy support."); } if (!SnappyCompressor.IsNativeCodeLoaded()) { throw new RuntimeException("native snappy library not available: " + "SnappyCompressor has not been loaded." ); } if (!SnappyDecompressor.IsNativeCodeLoaded()) { throw new RuntimeException("native snappy library not available: " + "SnappyDecompressor has not been loaded." ); } }
/// <summary> /// Test that when we have an uncache request, and the client refuses to release /// the replica for a long time, we will un-mlock it. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestRevocation() { Assume.AssumeTrue(NativeCodeLoader.IsNativeCodeLoaded() && !Path.Windows); BlockReaderTestUtil.EnableHdfsCachingTracing(); BlockReaderTestUtil.EnableShortCircuitShmTracing(); Configuration conf = GetDefaultConf(); // Set a really short revocation timeout. conf.SetLong(DFSConfigKeys.DfsDatanodeCacheRevocationTimeoutMs, 250L); // Poll very often conf.SetLong(DFSConfigKeys.DfsDatanodeCacheRevocationPollingMs, 2L); MiniDFSCluster cluster = null; cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.WaitActive(); DistributedFileSystem dfs = cluster.GetFileSystem(); // Create and cache a file. string TestFile = "/test_file2"; DFSTestUtil.CreateFile(dfs, new Path(TestFile), BlockSize, (short)1, unchecked ((int )(0xcafe))); dfs.AddCachePool(new CachePoolInfo("pool")); long cacheDirectiveId = dfs.AddCacheDirective(new CacheDirectiveInfo.Builder().SetPool ("pool").SetPath(new Path(TestFile)).SetReplication((short)1).Build()); FsDatasetSpi <object> fsd = cluster.GetDataNodes()[0].GetFSDataset(); DFSTestUtil.VerifyExpectedCacheUsage(BlockSize, 1, fsd); // Mmap the file. FSDataInputStream @in = dfs.Open(new Path(TestFile)); ByteBuffer buf = @in.Read(null, BlockSize, EnumSet.NoneOf <ReadOption>()); // Attempt to uncache file. The file should get uncached. Log.Info("removing cache directive {}", cacheDirectiveId); dfs.RemoveCacheDirective(cacheDirectiveId); Log.Info("finished removing cache directive {}", cacheDirectiveId); Sharpen.Thread.Sleep(1000); DFSTestUtil.VerifyExpectedCacheUsage(0, 0, fsd); // Cleanup @in.ReleaseBuffer(buf); @in.Close(); cluster.Shutdown(); }
public virtual void TestNetgroups() { if (!NativeCodeLoader.IsNativeCodeLoaded()) { Log.Info("Not testing netgroups, " + "this test only runs when native code is compiled" ); return; } string groupMappingClassName = Runtime.GetProperty("TestAccessControlListGroupMapping" ); if (groupMappingClassName == null) { Log.Info("Not testing netgroups, no group mapping class specified, " + "use -DTestAccessControlListGroupMapping=$className to specify " + "group mapping class (must implement GroupMappingServiceProvider " + "interface and support netgroups)" ); return; } Log.Info("Testing netgroups using: " + groupMappingClassName); Configuration conf = new Configuration(); conf.Set(CommonConfigurationKeysPublic.HadoopSecurityGroupMapping, groupMappingClassName ); Groups groups = Groups.GetUserToGroupsMappingService(conf); AccessControlList acl; // create these ACLs to populate groups cache acl = new AccessControlList("ja my"); // plain acl = new AccessControlList("sinatra ratpack,@lasVegas"); // netgroup acl = new AccessControlList(" somegroup,@someNetgroup"); // no user // this ACL will be used for testing ACLs acl = new AccessControlList("carlPerkins ratpack,@lasVegas"); acl.AddGroup("@memphis"); // validate the netgroups before and after rehresh to make // sure refresh works correctly ValidateNetgroups(groups, acl); groups.Refresh(); ValidateNetgroups(groups, acl); }
/// <exception cref="System.IO.IOException"/> /// <exception cref="System.TypeLoadException"/> /// <exception cref="InstantiationException"/> /// <exception cref="System.MemberAccessException"/> public virtual void TestSequenceFileBZip2NativeCodec() { Configuration conf = new Configuration(); conf.Set("io.compression.codec.bzip2.library", "system-native"); if (NativeCodeLoader.IsNativeCodeLoaded()) { if (Bzip2Factory.IsNativeBzip2Loaded(conf)) { SequenceFileCodecTest(conf, 0, "org.apache.hadoop.io.compress.BZip2Codec", 100); SequenceFileCodecTest(conf, 100, "org.apache.hadoop.io.compress.BZip2Codec", 100); SequenceFileCodecTest(conf, 200000, "org.apache.hadoop.io.compress.BZip2Codec", 1000000 ); } else { Log.Warn("Native hadoop library available but native bzip2 is not"); } } }
/// <exception cref="System.IO.IOException"/> public virtual void TestBZip2NativeCodec() { Configuration conf = new Configuration(); conf.Set("io.compression.codec.bzip2.library", "system-native"); if (NativeCodeLoader.IsNativeCodeLoaded()) { if (Bzip2Factory.IsNativeBzip2Loaded(conf)) { CodecTest(conf, seed, 0, "org.apache.hadoop.io.compress.BZip2Codec"); CodecTest(conf, seed, count, "org.apache.hadoop.io.compress.BZip2Codec"); conf.Set("io.compression.codec.bzip2.library", "java-builtin"); CodecTest(conf, seed, 0, "org.apache.hadoop.io.compress.BZip2Codec"); CodecTest(conf, seed, count, "org.apache.hadoop.io.compress.BZip2Codec"); } else { Log.Warn("Native hadoop library available but native bzip2 is not"); } } }
static Lz4Compressor() { // HACK - Use this as a global lock in the JNI layer if (NativeCodeLoader.IsNativeCodeLoaded()) { // Initialize the native library try { InitIDs(); } catch (Exception t) { // Ignore failure to load/initialize lz4 Log.Warn(t.ToString()); } } else { Log.Error("Cannot load " + typeof(Org.Apache.Hadoop.IO.Compress.Lz4.Lz4Compressor ).FullName + " without native hadoop library!"); } }
/// <summary>Method for compressor availability check</summary> private static bool IsAvailable <T, E>(CompressDecompressTester.TesterPair <T, E> pair ) where T : Compressor where E : Decompressor { Compressor compressor = pair.compressor; if (compressor.GetType().IsAssignableFrom(typeof(Lz4Compressor)) && (NativeCodeLoader .IsNativeCodeLoaded())) { return(true); } else { if (compressor.GetType().IsAssignableFrom(typeof(BuiltInZlibDeflater)) && NativeCodeLoader .IsNativeCodeLoaded()) { return(true); } else { if (compressor.GetType().IsAssignableFrom(typeof(ZlibCompressor))) { return(ZlibFactory.IsNativeZlibLoaded(new Configuration())); } else { if (compressor.GetType().IsAssignableFrom(typeof(SnappyCompressor)) && IsNativeSnappyLoadable ()) { return(true); } } } } return(false); }
/// <summary> /// Check if native-bzip2 code is loaded & initialized correctly and /// can be loaded for this job. /// </summary> /// <param name="conf">configuration</param> /// <returns> /// <code>true</code> if native-bzip2 is loaded & initialized /// and can be loaded for this job, else <code>false</code> /// </returns> public static bool IsNativeBzip2Loaded(Configuration conf) { lock (typeof(Bzip2Factory)) { string libname = conf.Get("io.compression.codec.bzip2.library", "system-native"); if (!bzip2LibraryName.Equals(libname)) { nativeBzip2Loaded = false; bzip2LibraryName = libname; if (libname.Equals("java-builtin")) { Log.Info("Using pure-Java version of bzip2 library"); } else { if (conf.GetBoolean(CommonConfigurationKeys.IoNativeLibAvailableKey, CommonConfigurationKeys .IoNativeLibAvailableDefault) && NativeCodeLoader.IsNativeCodeLoaded()) { try { // Initialize the native library. Bzip2Compressor.InitSymbols(libname); Bzip2Decompressor.InitSymbols(libname); nativeBzip2Loaded = true; Log.Info("Successfully loaded & initialized native-bzip2 library " + libname); } catch { Log.Warn("Failed to load/initialize native-bzip2 library " + libname + ", will use pure-Java version" ); } } } } return(nativeBzip2Loaded); } }
public virtual void TestNetgroups() { if (!NativeCodeLoader.IsNativeCodeLoaded()) { Log.Info("Not testing netgroups, " + "this test only runs when native code is compiled" ); return; } string groupMappingClassName = Runtime.GetProperty("TestProxyUsersGroupMapping"); if (groupMappingClassName == null) { Log.Info("Not testing netgroups, no group mapping class specified, " + "use -DTestProxyUsersGroupMapping=$className to specify " + "group mapping class (must implement GroupMappingServiceProvider " + "interface and support netgroups)" ); return; } Log.Info("Testing netgroups using: " + groupMappingClassName); Configuration conf = new Configuration(); conf.Set(CommonConfigurationKeysPublic.HadoopSecurityGroupMapping, groupMappingClassName ); conf.Set(DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserGroupConfKey (RealUserName), StringUtils.Join(",", Arrays.AsList(NetgroupNames))); conf.Set(DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserIpConfKey (RealUserName), ProxyIp); ProxyUsers.RefreshSuperUserGroupsConfiguration(conf); Groups groups = Groups.GetUserToGroupsMappingService(conf); // try proxying a group that's allowed UserGroupInformation realUserUgi = UserGroupInformation.CreateRemoteUser(RealUserName ); UserGroupInformation proxyUserUgi = UserGroupInformation.CreateProxyUserForTesting (ProxyUserName, realUserUgi, Collections.ToArray(groups.GetGroups(ProxyUserName ), new string[groups.GetGroups(ProxyUserName).Count])); AssertAuthorized(proxyUserUgi, ProxyIp); }
public virtual void CheckLoaded() { Assume.AssumeTrue(NativeCodeLoader.IsNativeCodeLoaded()); }
/// <summary>Return true if the JNI-based native IO extensions are available.</summary> public static bool IsAvailable() { return(NativeCodeLoader.IsNativeCodeLoaded() && nativeLoaded); }
static Lz4Codec() { NativeCodeLoader.IsNativeCodeLoaded(); }
/// <summary>Are the native lz4 libraries loaded & initialized?</summary> /// <returns>true if loaded & initialized, otherwise false</returns> public static bool IsNativeCodeLoaded() { return(NativeCodeLoader.IsNativeCodeLoaded()); }