/// <exception cref="System.Exception"/> private void TestReadAcontainerLog(bool logUploadedTime) { Configuration conf = new Configuration(); FilePath workDir = new FilePath(testWorkDir, "testReadAcontainerLogs1"); Path remoteAppLogFile = new Path(workDir.GetAbsolutePath(), "aggregatedLogFile"); Path srcFileRoot = new Path(workDir.GetAbsolutePath(), "srcFiles"); ContainerId testContainerId = TestContainerId.NewContainerId(1, 1, 1, 1); Path t = new Path(srcFileRoot, testContainerId.GetApplicationAttemptId().GetApplicationId ().ToString()); Path srcFilePath = new Path(t, testContainerId.ToString()); int numChars = 80000; // create a sub-folder under srcFilePath // and create file logs in this sub-folder. // We only aggregate top level files. // So, this log file should be ignored. Path subDir = new Path(srcFilePath, "subDir"); fs.Mkdirs(subDir); WriteSrcFile(subDir, "logs", numChars); // create file stderr and stdout in containerLogDir WriteSrcFile(srcFilePath, "stderr", numChars); WriteSrcFile(srcFilePath, "stdout", numChars); UserGroupInformation ugi = UserGroupInformation.GetCurrentUser(); AggregatedLogFormat.LogWriter logWriter = new AggregatedLogFormat.LogWriter(conf, remoteAppLogFile, ugi); AggregatedLogFormat.LogKey logKey = new AggregatedLogFormat.LogKey(testContainerId ); AggregatedLogFormat.LogValue logValue = new AggregatedLogFormat.LogValue(Collections .SingletonList(srcFileRoot.ToString()), testContainerId, ugi.GetShortUserName()); // When we try to open FileInputStream for stderr, it will throw out an IOException. // Skip the log aggregation for stderr. AggregatedLogFormat.LogValue spyLogValue = Org.Mockito.Mockito.Spy(logValue); FilePath errorFile = new FilePath((new Path(srcFilePath, "stderr")).ToString()); Org.Mockito.Mockito.DoThrow(new IOException("Mock can not open FileInputStream")) .When(spyLogValue).SecureOpenFile(errorFile); logWriter.Append(logKey, spyLogValue); logWriter.Close(); // make sure permission are correct on the file FileStatus fsStatus = fs.GetFileStatus(remoteAppLogFile); NUnit.Framework.Assert.AreEqual("permissions on log aggregation file are wrong", FsPermission.CreateImmutable((short)0x1a0), fsStatus.GetPermission()); AggregatedLogFormat.LogReader logReader = new AggregatedLogFormat.LogReader(conf, remoteAppLogFile); AggregatedLogFormat.LogKey rLogKey = new AggregatedLogFormat.LogKey(); DataInputStream dis = logReader.Next(rLogKey); TextWriter writer = new StringWriter(); if (logUploadedTime) { AggregatedLogFormat.LogReader.ReadAcontainerLogs(dis, writer, Runtime.CurrentTimeMillis ()); } else { AggregatedLogFormat.LogReader.ReadAcontainerLogs(dis, writer); } // We should only do the log aggregation for stdout. // Since we could not open the fileInputStream for stderr, this file is not // aggregated. string s = writer.ToString(); int expectedLength = "LogType:stdout".Length + (logUploadedTime ? ("\nLog Upload Time:" + Times.Format(Runtime.CurrentTimeMillis())).Length : 0) + ("\nLogLength:" + numChars ).Length + "\nLog Contents:\n".Length + numChars + "\n".Length + "End of LogType:stdout\n" .Length; NUnit.Framework.Assert.IsTrue("LogType not matched", s.Contains("LogType:stdout") ); NUnit.Framework.Assert.IsTrue("log file:stderr should not be aggregated.", !s.Contains ("LogType:stderr")); NUnit.Framework.Assert.IsTrue("log file:logs should not be aggregated.", !s.Contains ("LogType:logs")); NUnit.Framework.Assert.IsTrue("LogLength not matched", s.Contains("LogLength:" + numChars)); NUnit.Framework.Assert.IsTrue("Log Contents not matched", s.Contains("Log Contents" )); StringBuilder sb = new StringBuilder(); for (int i = 0; i < numChars; i++) { sb.Append(filler); } string expectedContent = sb.ToString(); NUnit.Framework.Assert.IsTrue("Log content incorrect", s.Contains(expectedContent )); NUnit.Framework.Assert.AreEqual(expectedLength, s.Length); }
public virtual Response Get(string path, HttpFSParametersProvider.OperationParam op, Parameters @params, HttpServletRequest request) { UserGroupInformation user = HttpUserGroupInformation.Get(); Response response; path = MakeAbsolute(path); MDC.Put(HttpFSFileSystem.OpParam, op.Value().ToString()); MDC.Put("hostname", request.GetRemoteAddr()); switch (op.Value()) { case HttpFSFileSystem.Operation.Open: { //Invoking the command directly using an unmanaged FileSystem that is // released by the FileSystemReleaseFilter FSOperations.FSOpen command = new FSOperations.FSOpen(path); FileSystem fs = CreateFileSystem(user); InputStream @is = command.Execute(fs); long offset = @params.Get <HttpFSParametersProvider.OffsetParam>(HttpFSParametersProvider.OffsetParam .Name); long len = @params.Get <HttpFSParametersProvider.LenParam>(HttpFSParametersProvider.LenParam .Name); AuditLog.Info("[{}] offset [{}] len [{}]", new object[] { path, offset, len }); InputStreamEntity entity = new InputStreamEntity(@is, offset, len); response = Response.Ok(entity).Type(MediaType.ApplicationOctetStream).Build(); break; } case HttpFSFileSystem.Operation.Getfilestatus: { FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path); IDictionary json = FsExecute(user, command); AuditLog.Info("[{}]", path); response = Response.Ok(json).Type(MediaType.ApplicationJson).Build(); break; } case HttpFSFileSystem.Operation.Liststatus: { string filter = @params.Get <HttpFSParametersProvider.FilterParam>(HttpFSParametersProvider.FilterParam .Name); FSOperations.FSListStatus command = new FSOperations.FSListStatus(path, filter); IDictionary json = FsExecute(user, command); AuditLog.Info("[{}] filter [{}]", path, (filter != null) ? filter : "-"); response = Response.Ok(json).Type(MediaType.ApplicationJson).Build(); break; } case HttpFSFileSystem.Operation.Gethomedirectory: { EnforceRootPath(op.Value(), path); FSOperations.FSHomeDir command = new FSOperations.FSHomeDir(); JSONObject json = FsExecute(user, command); AuditLog.Info(string.Empty); response = Response.Ok(json).Type(MediaType.ApplicationJson).Build(); break; } case HttpFSFileSystem.Operation.Instrumentation: { EnforceRootPath(op.Value(), path); Groups groups = HttpFSServerWebApp.Get().Get <Groups>(); IList <string> userGroups = groups.GetGroups(user.GetShortUserName()); if (!userGroups.Contains(HttpFSServerWebApp.Get().GetAdminGroup())) { throw new AccessControlException("User not in HttpFSServer admin group"); } Instrumentation instrumentation = HttpFSServerWebApp.Get().Get <Instrumentation>(); IDictionary snapshot = instrumentation.GetSnapshot(); response = Response.Ok(snapshot).Build(); break; } case HttpFSFileSystem.Operation.Getcontentsummary: { FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path); IDictionary json = FsExecute(user, command); AuditLog.Info("[{}]", path); response = Response.Ok(json).Type(MediaType.ApplicationJson).Build(); break; } case HttpFSFileSystem.Operation.Getfilechecksum: { FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path); IDictionary json = FsExecute(user, command); AuditLog.Info("[{}]", path); response = Response.Ok(json).Type(MediaType.ApplicationJson).Build(); break; } case HttpFSFileSystem.Operation.Getfileblocklocations: { response = Response.Status(Response.Status.BadRequest).Build(); break; } case HttpFSFileSystem.Operation.Getaclstatus: { FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path); IDictionary json = FsExecute(user, command); AuditLog.Info("ACL status for [{}]", path); response = Response.Ok(json).Type(MediaType.ApplicationJson).Build(); break; } case HttpFSFileSystem.Operation.Getxattrs: { IList <string> xattrNames = @params.GetValues <HttpFSParametersProvider.XAttrNameParam >(HttpFSParametersProvider.XAttrNameParam.Name); XAttrCodec encoding = @params.Get <HttpFSParametersProvider.XAttrEncodingParam>(HttpFSParametersProvider.XAttrEncodingParam .Name); FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path, xattrNames, encoding); IDictionary json = FsExecute(user, command); AuditLog.Info("XAttrs for [{}]", path); response = Response.Ok(json).Type(MediaType.ApplicationJson).Build(); break; } case HttpFSFileSystem.Operation.Listxattrs: { FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path); IDictionary json = FsExecute(user, command); AuditLog.Info("XAttr names for [{}]", path); response = Response.Ok(json).Type(MediaType.ApplicationJson).Build(); break; } default: { throw new IOException(MessageFormat.Format("Invalid HTTP GET operation [{0}]", op .Value())); } } return(response); }
public virtual void TestDelegationToken() { Logger rootLogger = LogManager.GetRootLogger(); rootLogger.SetLevel(Level.Debug); YarnConfiguration conf = new YarnConfiguration(new JobConf()); // Just a random principle conf.Set(JHAdminConfig.MrHistoryPrincipal, "RandomOrc/[email protected]"); conf.Set(CommonConfigurationKeysPublic.HadoopSecurityAuthentication, "kerberos"); UserGroupInformation.SetConfiguration(conf); long initialInterval = 10000l; long maxLifetime = 20000l; long renewInterval = 10000l; JobHistoryServer jobHistoryServer = null; MRClientProtocol clientUsingDT = null; long tokenFetchTime; try { jobHistoryServer = new _JobHistoryServer_87(initialInterval, maxLifetime, renewInterval ); // no keytab based login // Don't need it, skip.; // final JobHistoryServer jobHistoryServer = jhServer; jobHistoryServer.Init(conf); jobHistoryServer.Start(); MRClientProtocol hsService = jobHistoryServer.GetClientService().GetClientHandler (); // Fake the authentication-method UserGroupInformation loggedInUser = UserGroupInformation.CreateRemoteUser("*****@*****.**" ); NUnit.Framework.Assert.AreEqual("testrenewer", loggedInUser.GetShortUserName()); // Default realm is APACHE.ORG loggedInUser.SetAuthenticationMethod(UserGroupInformation.AuthenticationMethod.Kerberos ); Token token = GetDelegationToken(loggedInUser, hsService, loggedInUser.GetShortUserName ()); tokenFetchTime = Runtime.CurrentTimeMillis(); Log.Info("Got delegation token at: " + tokenFetchTime); // Now try talking to JHS using the delegation token clientUsingDT = GetMRClientProtocol(token, jobHistoryServer.GetClientService().GetBindAddress (), "TheDarkLord", conf); GetJobReportRequest jobReportRequest = Org.Apache.Hadoop.Yarn.Util.Records.NewRecord <GetJobReportRequest>(); jobReportRequest.SetJobId(MRBuilderUtils.NewJobId(123456, 1, 1)); try { clientUsingDT.GetJobReport(jobReportRequest); } catch (IOException e) { NUnit.Framework.Assert.AreEqual("Unknown job job_123456_0001", e.Message); } // Renew after 50% of token age. while (Runtime.CurrentTimeMillis() < tokenFetchTime + initialInterval / 2) { Sharpen.Thread.Sleep(500l); } long nextExpTime = RenewDelegationToken(loggedInUser, hsService, token); long renewalTime = Runtime.CurrentTimeMillis(); Log.Info("Renewed token at: " + renewalTime + ", NextExpiryTime: " + nextExpTime); // Wait for first expiry, but before renewed expiry. while (Runtime.CurrentTimeMillis() > tokenFetchTime + initialInterval && Runtime. CurrentTimeMillis() < nextExpTime) { Sharpen.Thread.Sleep(500l); } Sharpen.Thread.Sleep(50l); // Valid token because of renewal. try { clientUsingDT.GetJobReport(jobReportRequest); } catch (IOException e) { NUnit.Framework.Assert.AreEqual("Unknown job job_123456_0001", e.Message); } // Wait for expiry. while (Runtime.CurrentTimeMillis() < renewalTime + renewInterval) { Sharpen.Thread.Sleep(500l); } Sharpen.Thread.Sleep(50l); Log.Info("At time: " + Runtime.CurrentTimeMillis() + ", token should be invalid"); // Token should have expired. try { clientUsingDT.GetJobReport(jobReportRequest); NUnit.Framework.Assert.Fail("Should not have succeeded with an expired token"); } catch (IOException e) { NUnit.Framework.Assert.IsTrue(e.InnerException.Message.Contains("is expired")); } // Test cancellation // Stop the existing proxy, start another. if (clientUsingDT != null) { // RPC.stopProxy(clientUsingDT); clientUsingDT = null; } token = GetDelegationToken(loggedInUser, hsService, loggedInUser.GetShortUserName ()); tokenFetchTime = Runtime.CurrentTimeMillis(); Log.Info("Got delegation token at: " + tokenFetchTime); // Now try talking to HSService using the delegation token clientUsingDT = GetMRClientProtocol(token, jobHistoryServer.GetClientService().GetBindAddress (), "loginuser2", conf); try { clientUsingDT.GetJobReport(jobReportRequest); } catch (IOException e) { NUnit.Framework.Assert.Fail("Unexpected exception" + e); } CancelDelegationToken(loggedInUser, hsService, token); // Testing the token with different renewer to cancel the token Token tokenWithDifferentRenewer = GetDelegationToken(loggedInUser, hsService, "yarn" ); CancelDelegationToken(loggedInUser, hsService, tokenWithDifferentRenewer); if (clientUsingDT != null) { // RPC.stopProxy(clientUsingDT); clientUsingDT = null; } // Creating a new connection. clientUsingDT = GetMRClientProtocol(token, jobHistoryServer.GetClientService().GetBindAddress (), "loginuser2", conf); Log.Info("Cancelled delegation token at: " + Runtime.CurrentTimeMillis()); // Verify cancellation worked. try { clientUsingDT.GetJobReport(jobReportRequest); NUnit.Framework.Assert.Fail("Should not have succeeded with a cancelled delegation token" ); } catch (IOException) { } } finally { jobHistoryServer.Stop(); } }
public virtual void Pipeline_02_03() { Configuration conf = new HdfsConfiguration(); conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1); // create cluster MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(4).Build(); try { //change the lease limits. cluster.SetLeasePeriod(SoftLeaseLimit, HardLeaseLimit); //wait for the cluster cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); Path p = new Path(Dir, "file1"); int half = BlockSize / 2; { //a. On Machine M1, Create file. Write half block of data. // Invoke DFSOutputStream.hflush() on the dfs file handle. // Do not close file yet. FSDataOutputStream @out = fs.Create(p, true, fs.GetConf().GetInt(CommonConfigurationKeys .IoFileBufferSizeKey, 4096), (short)3, BlockSize); Write(@out, 0, half); //hflush ((DFSOutputStream)@out.GetWrappedStream()).Hflush(); } //b. On another machine M2, open file and verify that the half-block // of data can be read successfully. CheckFile(p, half, conf); AppendTestUtil.Log.Info("leasechecker.interruptAndJoin()"); ((DistributedFileSystem)fs).dfs.GetLeaseRenewer().InterruptAndJoin(); { //c. On M1, append another half block of data. Close file on M1. //sleep to let the lease is expired. Sharpen.Thread.Sleep(2 * SoftLeaseLimit); UserGroupInformation current = UserGroupInformation.GetCurrentUser(); UserGroupInformation ugi = UserGroupInformation.CreateUserForTesting(current.GetShortUserName () + "x", new string[] { "supergroup" }); DistributedFileSystem dfs = ugi.DoAs(new _PrivilegedExceptionAction_102(conf)); FSDataOutputStream @out = Append(dfs, p); Write(@out, 0, half); @out.Close(); } //d. On M2, open file and read 1 block of data from it. Close file. CheckFile(p, 2 * half, conf); } finally { cluster.Shutdown(); } }