Пример #1
0
        public virtual Response Get(string path, HttpFSParametersProvider.OperationParam
                                    op, Parameters @params, HttpServletRequest request)
        {
            UserGroupInformation user = HttpUserGroupInformation.Get();
            Response             response;

            path = MakeAbsolute(path);
            MDC.Put(HttpFSFileSystem.OpParam, op.Value().ToString());
            MDC.Put("hostname", request.GetRemoteAddr());
            switch (op.Value())
            {
            case HttpFSFileSystem.Operation.Open:
            {
                //Invoking the command directly using an unmanaged FileSystem that is
                // released by the FileSystemReleaseFilter
                FSOperations.FSOpen command = new FSOperations.FSOpen(path);
                FileSystem          fs      = CreateFileSystem(user);
                InputStream         @is     = command.Execute(fs);
                long offset = @params.Get <HttpFSParametersProvider.OffsetParam>(HttpFSParametersProvider.OffsetParam
                                                                                 .Name);
                long len = @params.Get <HttpFSParametersProvider.LenParam>(HttpFSParametersProvider.LenParam
                                                                           .Name);
                AuditLog.Info("[{}] offset [{}] len [{}]", new object[] { path, offset, len });
                InputStreamEntity entity = new InputStreamEntity(@is, offset, len);
                response = Response.Ok(entity).Type(MediaType.ApplicationOctetStream).Build();
                break;
            }

            case HttpFSFileSystem.Operation.Getfilestatus:
            {
                FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path);
                IDictionary json = FsExecute(user, command);
                AuditLog.Info("[{}]", path);
                response = Response.Ok(json).Type(MediaType.ApplicationJson).Build();
                break;
            }

            case HttpFSFileSystem.Operation.Liststatus:
            {
                string filter = @params.Get <HttpFSParametersProvider.FilterParam>(HttpFSParametersProvider.FilterParam
                                                                                   .Name);
                FSOperations.FSListStatus command = new FSOperations.FSListStatus(path, filter);
                IDictionary json = FsExecute(user, command);
                AuditLog.Info("[{}] filter [{}]", path, (filter != null) ? filter : "-");
                response = Response.Ok(json).Type(MediaType.ApplicationJson).Build();
                break;
            }

            case HttpFSFileSystem.Operation.Gethomedirectory:
            {
                EnforceRootPath(op.Value(), path);
                FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
                JSONObject             json    = FsExecute(user, command);
                AuditLog.Info(string.Empty);
                response = Response.Ok(json).Type(MediaType.ApplicationJson).Build();
                break;
            }

            case HttpFSFileSystem.Operation.Instrumentation:
            {
                EnforceRootPath(op.Value(), path);
                Groups         groups     = HttpFSServerWebApp.Get().Get <Groups>();
                IList <string> userGroups = groups.GetGroups(user.GetShortUserName());
                if (!userGroups.Contains(HttpFSServerWebApp.Get().GetAdminGroup()))
                {
                    throw new AccessControlException("User not in HttpFSServer admin group");
                }
                Instrumentation instrumentation = HttpFSServerWebApp.Get().Get <Instrumentation>();
                IDictionary     snapshot        = instrumentation.GetSnapshot();
                response = Response.Ok(snapshot).Build();
                break;
            }

            case HttpFSFileSystem.Operation.Getcontentsummary:
            {
                FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path);
                IDictionary json = FsExecute(user, command);
                AuditLog.Info("[{}]", path);
                response = Response.Ok(json).Type(MediaType.ApplicationJson).Build();
                break;
            }

            case HttpFSFileSystem.Operation.Getfilechecksum:
            {
                FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path);
                IDictionary json = FsExecute(user, command);
                AuditLog.Info("[{}]", path);
                response = Response.Ok(json).Type(MediaType.ApplicationJson).Build();
                break;
            }

            case HttpFSFileSystem.Operation.Getfileblocklocations:
            {
                response = Response.Status(Response.Status.BadRequest).Build();
                break;
            }

            case HttpFSFileSystem.Operation.Getaclstatus:
            {
                FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path);
                IDictionary json = FsExecute(user, command);
                AuditLog.Info("ACL status for [{}]", path);
                response = Response.Ok(json).Type(MediaType.ApplicationJson).Build();
                break;
            }

            case HttpFSFileSystem.Operation.Getxattrs:
            {
                IList <string> xattrNames = @params.GetValues <HttpFSParametersProvider.XAttrNameParam
                                                               >(HttpFSParametersProvider.XAttrNameParam.Name);
                XAttrCodec encoding = @params.Get <HttpFSParametersProvider.XAttrEncodingParam>(HttpFSParametersProvider.XAttrEncodingParam
                                                                                                .Name);
                FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path, xattrNames,
                                                                                encoding);
                IDictionary json = FsExecute(user, command);
                AuditLog.Info("XAttrs for [{}]", path);
                response = Response.Ok(json).Type(MediaType.ApplicationJson).Build();
                break;
            }

            case HttpFSFileSystem.Operation.Listxattrs:
            {
                FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path);
                IDictionary json = FsExecute(user, command);
                AuditLog.Info("XAttr names for [{}]", path);
                response = Response.Ok(json).Type(MediaType.ApplicationJson).Build();
                break;
            }

            default:
            {
                throw new IOException(MessageFormat.Format("Invalid HTTP GET operation [{0}]", op
                                                           .Value()));
            }
            }
            return(response);
        }
Пример #2
0
        public virtual Response Post(InputStream @is, UriInfo uriInfo, string path, HttpFSParametersProvider.OperationParam
                                     op, Parameters @params, HttpServletRequest request)
        {
            UserGroupInformation user = HttpUserGroupInformation.Get();
            Response             response;

            path = MakeAbsolute(path);
            MDC.Put(HttpFSFileSystem.OpParam, op.Value().ToString());
            MDC.Put("hostname", request.GetRemoteAddr());
            switch (op.Value())
            {
            case HttpFSFileSystem.Operation.Append:
            {
                bool hasData = @params.Get <HttpFSParametersProvider.DataParam>(HttpFSParametersProvider.DataParam
                                                                                .Name);
                if (!hasData)
                {
                    response = Response.TemporaryRedirect(CreateUploadRedirectionURL(uriInfo, HttpFSFileSystem.Operation
                                                                                     .Append)).Build();
                }
                else
                {
                    FSOperations.FSAppend command = new FSOperations.FSAppend(@is, path);
                    FsExecute(user, command);
                    AuditLog.Info("[{}]", path);
                    response = Response.Ok().Type(MediaType.ApplicationJson).Build();
                }
                break;
            }

            case HttpFSFileSystem.Operation.Concat:
            {
                System.Console.Out.WriteLine("HTTPFS SERVER CONCAT");
                string sources = @params.Get <HttpFSParametersProvider.SourcesParam>(HttpFSParametersProvider.SourcesParam
                                                                                     .Name);
                FSOperations.FSConcat command = new FSOperations.FSConcat(path, sources.Split(","
                                                                                              ));
                FsExecute(user, command);
                AuditLog.Info("[{}]", path);
                System.Console.Out.WriteLine("SENT RESPONSE");
                response = Response.Ok().Build();
                break;
            }

            case HttpFSFileSystem.Operation.Truncate:
            {
                long newLength = @params.Get <HttpFSParametersProvider.NewLengthParam>(HttpFSParametersProvider.NewLengthParam
                                                                                       .Name);
                FSOperations.FSTruncate command = new FSOperations.FSTruncate(path, newLength);
                JSONObject json = FsExecute(user, command);
                AuditLog.Info("Truncate [{}] to length [{}]", path, newLength);
                response = Response.Ok(json).Type(MediaType.ApplicationJson).Build();
                break;
            }

            default:
            {
                throw new IOException(MessageFormat.Format("Invalid HTTP POST operation [{0}]", op
                                                           .Value()));
            }
            }
            return(response);
        }
Пример #3
0
        /// <summary>
        /// Store the timeline entities into the store and set the owner of them to the
        /// given user.
        /// </summary>
        /// <exception cref="Org.Apache.Hadoop.Yarn.Exceptions.YarnException"/>
        /// <exception cref="System.IO.IOException"/>
        public virtual TimelinePutResponse PostEntities(TimelineEntities entities, UserGroupInformation
                                                        callerUGI)
        {
            if (entities == null)
            {
                return(new TimelinePutResponse());
            }
            IList <EntityIdentifier> entityIDs     = new AList <EntityIdentifier>();
            TimelineEntities         entitiesToPut = new TimelineEntities();
            IList <TimelinePutResponse.TimelinePutError> errors = new AList <TimelinePutResponse.TimelinePutError
                                                                             >();

            foreach (TimelineEntity entity in entities.GetEntities())
            {
                EntityIdentifier entityID = new EntityIdentifier(entity.GetEntityId(), entity.GetEntityType
                                                                     ());
                // if the domain id is not specified, the entity will be put into
                // the default domain
                if (entity.GetDomainId() == null || entity.GetDomainId().Length == 0)
                {
                    entity.SetDomainId(DefaultDomainId);
                }
                // check if there is existing entity
                TimelineEntity existingEntity = null;
                try
                {
                    existingEntity = store.GetEntity(entityID.GetId(), entityID.GetType(), EnumSet.Of
                                                         (TimelineReader.Field.PrimaryFilters));
                    if (existingEntity != null)
                    {
                        AddDefaultDomainIdIfAbsent(existingEntity);
                        if (!existingEntity.GetDomainId().Equals(entity.GetDomainId()))
                        {
                            throw new YarnException("The domain of the timeline entity " + entityID + " is not allowed to be changed."
                                                    );
                        }
                    }
                    if (!timelineACLsManager.CheckAccess(callerUGI, ApplicationAccessType.ModifyApp,
                                                         entity))
                    {
                        throw new YarnException(callerUGI + " is not allowed to put the timeline entity "
                                                + entityID + " into the domain " + entity.GetDomainId() + ".");
                    }
                }
                catch (Exception e)
                {
                    // Skip the entity which already exists and was put by others
                    Log.Error("Skip the timeline entity: " + entityID, e);
                    TimelinePutResponse.TimelinePutError error = new TimelinePutResponse.TimelinePutError
                                                                     ();
                    error.SetEntityId(entityID.GetId());
                    error.SetEntityType(entityID.GetType());
                    error.SetErrorCode(TimelinePutResponse.TimelinePutError.AccessDenied);
                    errors.AddItem(error);
                    continue;
                }
                entityIDs.AddItem(entityID);
                entitiesToPut.AddEntity(entity);
                if (Log.IsDebugEnabled())
                {
                    Log.Debug("Storing the entity " + entityID + ", JSON-style content: " + TimelineUtils
                              .DumpTimelineRecordtoJSON(entity));
                }
            }
            if (Log.IsDebugEnabled())
            {
                Log.Debug("Storing entities: " + StringHelper.CsvJoiner.Join(entityIDs));
            }
            TimelinePutResponse response = store.Put(entitiesToPut);

            // add the errors of timeline system filter key conflict
            response.AddErrors(errors);
            return(response);
        }
Пример #4
0
 public CheckAclImpl(TimelineDataManager _enclosing, UserGroupInformation callerUGI
                     )
 {
     this._enclosing = _enclosing;
     this.ugi        = callerUGI;
 }
Пример #5
0
        public virtual void TestRMInitialsWithFileSystemBasedConfigurationProvider()
        {
            configuration.Set(YarnConfiguration.RmConfigurationProviderClass, "org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider"
                              );
            // upload configurations
            FilePath excludeHostsFile = new FilePath(tmpDir.ToString(), "excludeHosts");

            if (excludeHostsFile.Exists())
            {
                excludeHostsFile.Delete();
            }
            if (!excludeHostsFile.CreateNewFile())
            {
                NUnit.Framework.Assert.Fail("Can not create " + "excludeHosts");
            }
            PrintWriter fileWriter = new PrintWriter(excludeHostsFile);

            fileWriter.Write("0.0.0.0:123");
            fileWriter.Close();
            UploadToRemoteFileSystem(new Path(excludeHostsFile.GetAbsolutePath()));
            YarnConfiguration yarnConf = new YarnConfiguration();

            yarnConf.Set(YarnConfiguration.YarnAdminAcl, "world:anyone:rwcda");
            yarnConf.Set(YarnConfiguration.RmNodesExcludeFilePath, this.workingPath + "/excludeHosts"
                         );
            UploadConfiguration(yarnConf, "yarn-site.xml");
            CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();

            csConf.Set("yarn.scheduler.capacity.maximum-applications", "5000");
            UploadConfiguration(csConf, "capacity-scheduler.xml");
            string        aclsString = "alice,bob users,wheel";
            Configuration newConf    = new Configuration();

            newConf.Set("security.applicationclient.protocol.acl", aclsString);
            UploadConfiguration(newConf, "hadoop-policy.xml");
            Configuration conf = new Configuration();

            conf.SetBoolean(CommonConfigurationKeysPublic.HadoopSecurityAuthorization, true);
            conf.Set("hadoop.proxyuser.test.groups", "test_groups");
            conf.Set("hadoop.proxyuser.test.hosts", "test_hosts");
            conf.SetClass(CommonConfigurationKeys.HadoopSecurityGroupMapping, typeof(TestRMAdminService.MockUnixGroupsMapping
                                                                                     ), typeof(GroupMappingServiceProvider));
            UploadConfiguration(conf, "core-site.xml");
            // update the groups
            TestRMAdminService.MockUnixGroupsMapping.UpdateGroups();
            ResourceManager resourceManager = null;

            try
            {
                try
                {
                    resourceManager = new ResourceManager();
                    resourceManager.Init(configuration);
                    resourceManager.Start();
                }
                catch (Exception)
                {
                    NUnit.Framework.Assert.Fail("Should not get any exceptions");
                }
                // validate values for excludeHosts
                ICollection <string> excludeHosts = resourceManager.GetRMContext().GetNodesListManager
                                                        ().GetHostsReader().GetExcludedHosts();
                NUnit.Framework.Assert.IsTrue(excludeHosts.Count == 1);
                NUnit.Framework.Assert.IsTrue(excludeHosts.Contains("0.0.0.0:123"));
                // validate values for admin-acls
                string aclStringAfter = resourceManager.adminService.GetAccessControlList().GetAclString
                                            ().Trim();
                NUnit.Framework.Assert.AreEqual(aclStringAfter, "world:anyone:rwcda," + UserGroupInformation
                                                .GetCurrentUser().GetShortUserName());
                // validate values for queue configuration
                CapacityScheduler cs = (CapacityScheduler)resourceManager.GetRMContext().GetScheduler
                                           ();
                int maxAppsAfter = cs.GetConfiguration().GetMaximumSystemApplications();
                NUnit.Framework.Assert.AreEqual(maxAppsAfter, 5000);
                // verify service Acls for AdminService
                ServiceAuthorizationManager adminServiceServiceManager = resourceManager.adminService
                                                                         .GetServer().GetServiceAuthorizationManager();
                VerifyServiceACLsRefresh(adminServiceServiceManager, typeof(ApplicationClientProtocolPB
                                                                            ), aclsString);
                // verify service ACLs for ClientRMService
                ServiceAuthorizationManager clientRMServiceServiceManager = resourceManager.GetRMContext
                                                                                ().GetClientRMService().GetServer().GetServiceAuthorizationManager();
                VerifyServiceACLsRefresh(clientRMServiceServiceManager, typeof(ApplicationClientProtocolPB
                                                                               ), aclsString);
                // verify service ACLs for ApplicationMasterService
                ServiceAuthorizationManager appMasterService = resourceManager.GetRMContext().GetApplicationMasterService
                                                                   ().GetServer().GetServiceAuthorizationManager();
                VerifyServiceACLsRefresh(appMasterService, typeof(ApplicationClientProtocolPB), aclsString
                                         );
                // verify service ACLs for ResourceTrackerService
                ServiceAuthorizationManager RTService = resourceManager.GetRMContext().GetResourceTrackerService
                                                            ().GetServer().GetServiceAuthorizationManager();
                VerifyServiceACLsRefresh(RTService, typeof(ApplicationClientProtocolPB), aclsString
                                         );
                // verify ProxyUsers and ProxyHosts
                ProxyUsers.RefreshSuperUserGroupsConfiguration(configuration);
                NUnit.Framework.Assert.IsTrue(ProxyUsers.GetDefaultImpersonationProvider().GetProxyGroups
                                                  ()["hadoop.proxyuser.test.groups"].Count == 1);
                NUnit.Framework.Assert.IsTrue(ProxyUsers.GetDefaultImpersonationProvider().GetProxyGroups
                                                  ()["hadoop.proxyuser.test.groups"].Contains("test_groups"));
                NUnit.Framework.Assert.IsTrue(ProxyUsers.GetDefaultImpersonationProvider().GetProxyHosts
                                                  ()["hadoop.proxyuser.test.hosts"].Count == 1);
                NUnit.Framework.Assert.IsTrue(ProxyUsers.GetDefaultImpersonationProvider().GetProxyHosts
                                                  ()["hadoop.proxyuser.test.hosts"].Contains("test_hosts"));
                // verify UserToGroupsMappings
                IList <string> groupAfter = Groups.GetUserToGroupsMappingService(configuration).GetGroups
                                                (UserGroupInformation.GetCurrentUser().GetUserName());
                NUnit.Framework.Assert.IsTrue(groupAfter.Contains("test_group_D") && groupAfter.Contains
                                                  ("test_group_E") && groupAfter.Contains("test_group_F") && groupAfter.Count == 3
                                              );
            }
            finally
            {
                if (resourceManager != null)
                {
                    resourceManager.Stop();
                }
            }
        }
Пример #6
0
            public virtual WebApp Build(WebApp webapp)
            {
                if (webapp == null)
                {
                    webapp = new _WebApp_171();
                }
                // Defaults should be fine in usual cases
                webapp.SetName(name);
                webapp.SetWebServices(wsName);
                string basePath = "/" + name;

                webapp.SetRedirectPath(basePath);
                IList <string> pathList = new AList <string>();

                if (basePath.Equals("/"))
                {
                    webapp.AddServePathSpec("/*");
                    pathList.AddItem("/*");
                }
                else
                {
                    webapp.AddServePathSpec(basePath);
                    webapp.AddServePathSpec(basePath + "/*");
                    pathList.AddItem(basePath + "/*");
                }
                if (wsName != null && !wsName.Equals(basePath))
                {
                    if (wsName.Equals("/"))
                    {
                        webapp.AddServePathSpec("/*");
                        pathList.AddItem("/*");
                    }
                    else
                    {
                        webapp.AddServePathSpec("/" + wsName);
                        webapp.AddServePathSpec("/" + wsName + "/*");
                        pathList.AddItem("/" + wsName + "/*");
                    }
                }
                if (conf == null)
                {
                    conf = new Configuration();
                }
                try
                {
                    if (application != null)
                    {
                        webapp.SetHostClass(application.GetType());
                    }
                    else
                    {
                        string cls = InferHostClass();
                        Log.Debug("setting webapp host class to {}", cls);
                        webapp.SetHostClass(Sharpen.Runtime.GetType(cls));
                    }
                    if (devMode)
                    {
                        if (port > 0)
                        {
                            try
                            {
                                new Uri("http://localhost:" + port + "/__stop").GetContent();
                                Log.Info("stopping existing webapp instance");
                                Sharpen.Thread.Sleep(100);
                            }
                            catch (ConnectException e)
                            {
                                Log.Info("no existing webapp instance found: {}", e.ToString());
                            }
                            catch (Exception e)
                            {
                                // should not be fatal
                                Log.Warn("error stopping existing instance: {}", e.ToString());
                            }
                        }
                        else
                        {
                            Log.Error("dev mode does NOT work with ephemeral port!");
                            System.Environment.Exit(1);
                        }
                    }
                    string httpScheme;
                    if (this.httpPolicy == null)
                    {
                        httpScheme = WebAppUtils.GetHttpSchemePrefix(conf);
                    }
                    else
                    {
                        httpScheme = (httpPolicy == HttpConfig.Policy.HttpsOnly) ? WebAppUtils.HttpsPrefix
                                                         : WebAppUtils.HttpPrefix;
                    }
                    HttpServer2.Builder builder = new HttpServer2.Builder().SetName(name).AddEndpoint
                                                      (URI.Create(httpScheme + bindAddress + ":" + port)).SetConf(conf).SetFindPort(findPort
                                                                                                                                    ).SetACL(new AccessControlList(conf.Get(YarnConfiguration.YarnAdminAcl, YarnConfiguration
                                                                                                                                                                            .DefaultYarnAdminAcl))).SetPathSpec(Sharpen.Collections.ToArray(pathList, new string
                                                                                                                                                                                                                                            [0]));
                    bool hasSpnegoConf = spnegoPrincipalKey != null && conf.Get(spnegoPrincipalKey) !=
                                         null && spnegoKeytabKey != null && conf.Get(spnegoKeytabKey) != null;
                    if (hasSpnegoConf)
                    {
                        builder.SetUsernameConfKey(spnegoPrincipalKey).SetKeytabConfKey(spnegoKeytabKey).
                        SetSecurityEnabled(UserGroupInformation.IsSecurityEnabled());
                    }
                    if (httpScheme.Equals(WebAppUtils.HttpsPrefix))
                    {
                        WebAppUtils.LoadSslConfiguration(builder);
                    }
                    HttpServer2 server = builder.Build();
                    foreach (WebApps.Builder.ServletStruct @struct in servlets)
                    {
                        server.AddServlet(@struct.name, @struct.spec, @struct.clazz);
                    }
                    foreach (KeyValuePair <string, object> entry in attributes)
                    {
                        server.SetAttribute(entry.Key, entry.Value);
                    }
                    HttpServer2.DefineFilter(server.GetWebAppContext(), "guice", typeof(GuiceFilter).
                                             FullName, null, new string[] { "/*" });
                    webapp.SetConf(conf);
                    webapp.SetHttpServer(server);
                }
                catch (TypeLoadException e)
                {
                    throw new WebAppException("Error starting http server", e);
                }
                catch (IOException e)
                {
                    throw new WebAppException("Error starting http server", e);
                }
                Injector injector = Guice.CreateInjector(webapp, new _AbstractModule_280(this));

                Log.Info("Registered webapp guice modules");
                // save a guice filter instance for webapp stop (mostly for unit tests)
                webapp.SetGuiceFilter(injector.GetInstance <GuiceFilter>());
                if (devMode)
                {
                    injector.GetInstance <Dispatcher>().SetDevMode(devMode);
                    Log.Info("in dev mode!");
                }
                return(webapp);
            }
Пример #7
0
        /// <exception cref="System.Exception"/>
        private void TestDelegationTokenAuthenticatorCalls(bool useQS)
        {
            Org.Mortbay.Jetty.Server jetty = CreateJettyServer();
            Context context = new Context();

            context.SetContextPath("/foo");
            jetty.SetHandler(context);
            context.AddFilter(new FilterHolder(typeof(TestWebDelegationToken.AFilter)), "/*",
                              0);
            context.AddServlet(new ServletHolder(typeof(TestWebDelegationToken.PingServlet)),
                               "/bar");
            try
            {
                jetty.Start();
                Uri nonAuthURL = new Uri(GetJettyURL() + "/foo/bar");
                Uri authURL    = new Uri(GetJettyURL() + "/foo/bar?authenticated=foo");
                Uri authURL2   = new Uri(GetJettyURL() + "/foo/bar?authenticated=bar");
                DelegationTokenAuthenticatedURL.Token token = new DelegationTokenAuthenticatedURL.Token
                                                                  ();
                DelegationTokenAuthenticatedURL aUrl = new DelegationTokenAuthenticatedURL();
                aUrl.SetUseQueryStringForDelegationToken(useQS);
                try
                {
                    aUrl.GetDelegationToken(nonAuthURL, token, FooUser);
                    NUnit.Framework.Assert.Fail();
                }
                catch (Exception ex)
                {
                    Assert.True(ex.Message.Contains("401"));
                }
                aUrl.GetDelegationToken(authURL, token, FooUser);
                NUnit.Framework.Assert.IsNotNull(token.GetDelegationToken());
                Assert.Equal(new Text("token-kind"), token.GetDelegationToken(
                                 ).GetKind());
                aUrl.RenewDelegationToken(authURL, token);
                try
                {
                    aUrl.RenewDelegationToken(nonAuthURL, token);
                    NUnit.Framework.Assert.Fail();
                }
                catch (Exception ex)
                {
                    Assert.True(ex.Message.Contains("401"));
                }
                aUrl.GetDelegationToken(authURL, token, FooUser);
                try
                {
                    aUrl.RenewDelegationToken(authURL2, token);
                    NUnit.Framework.Assert.Fail();
                }
                catch (Exception ex)
                {
                    Assert.True(ex.Message.Contains("403"));
                }
                aUrl.GetDelegationToken(authURL, token, FooUser);
                aUrl.CancelDelegationToken(authURL, token);
                aUrl.GetDelegationToken(authURL, token, FooUser);
                aUrl.CancelDelegationToken(nonAuthURL, token);
                aUrl.GetDelegationToken(authURL, token, FooUser);
                try
                {
                    aUrl.RenewDelegationToken(nonAuthURL, token);
                }
                catch (Exception ex)
                {
                    Assert.True(ex.Message.Contains("401"));
                }
                aUrl.GetDelegationToken(authURL, token, "foo");
                UserGroupInformation ugi = UserGroupInformation.GetCurrentUser();
                ugi.AddToken(token.GetDelegationToken());
                ugi.DoAs(new _PrivilegedExceptionAction_412(aUrl, nonAuthURL, useQS));
            }
            finally
            {
                jetty.Stop();
            }
        }
Пример #8
0
 public abstract IList <QueueUserACLInfo> GetQueueUserAclInfo(UserGroupInformation
                                                              arg1);
Пример #9
0
        // Multiple datanodes could be running on the local machine. Store proxies in
        // a map keyed by the ipc port of the datanode.
        // reader for the data file
        // reader for the checksum file
        /// <summary>The only way this object can be instantiated.</summary>
        /// <exception cref="System.IO.IOException"/>
        internal static BlockReaderLocalLegacy NewBlockReader(DFSClient.Conf conf, UserGroupInformation
                                                              userGroupInformation, Configuration configuration, string file, ExtendedBlock blk
                                                              , Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token, DatanodeInfo
                                                              node, long startOffset, long length, StorageType storageType)
        {
            BlockReaderLocalLegacy.LocalDatanodeInfo localDatanodeInfo = GetLocalDatanodeInfo
                                                                             (node.GetIpcPort());
            // check the cache first
            BlockLocalPathInfo pathinfo = localDatanodeInfo.GetBlockLocalPathInfo(blk);

            if (pathinfo == null)
            {
                if (userGroupInformation == null)
                {
                    userGroupInformation = UserGroupInformation.GetCurrentUser();
                }
                pathinfo = GetBlockPathInfo(userGroupInformation, blk, node, configuration, conf.
                                            socketTimeout, token, conf.connectToDnViaHostname, storageType);
            }
            // check to see if the file exists. It may so happen that the
            // HDFS file has been deleted and this block-lookup is occurring
            // on behalf of a new HDFS file. This time, the block file could
            // be residing in a different portion of the fs.data.dir directory.
            // In this case, we remove this entry from the cache. The next
            // call to this method will re-populate the cache.
            FileInputStream        dataIn           = null;
            FileInputStream        checksumIn       = null;
            BlockReaderLocalLegacy localBlockReader = null;
            bool skipChecksumCheck = conf.skipShortCircuitChecksums || storageType.IsTransient
                                         ();

            try
            {
                // get a local file system
                FilePath blkfile = new FilePath(pathinfo.GetBlockPath());
                dataIn = new FileInputStream(blkfile);
                if (Log.IsDebugEnabled())
                {
                    Log.Debug("New BlockReaderLocalLegacy for file " + blkfile + " of size " + blkfile
                              .Length() + " startOffset " + startOffset + " length " + length + " short circuit checksum "
                              + !skipChecksumCheck);
                }
                if (!skipChecksumCheck)
                {
                    // get the metadata file
                    FilePath metafile = new FilePath(pathinfo.GetMetaPath());
                    checksumIn = new FileInputStream(metafile);
                    DataChecksum checksum = BlockMetadataHeader.ReadDataChecksum(new DataInputStream(
                                                                                     checksumIn), blk);
                    long firstChunkOffset = startOffset - (startOffset % checksum.GetBytesPerChecksum
                                                               ());
                    localBlockReader = new BlockReaderLocalLegacy(conf, file, blk, token, startOffset
                                                                  , length, pathinfo, checksum, true, dataIn, firstChunkOffset, checksumIn);
                }
                else
                {
                    localBlockReader = new BlockReaderLocalLegacy(conf, file, blk, token, startOffset
                                                                  , length, pathinfo, dataIn);
                }
            }
            catch (IOException e)
            {
                // remove from cache
                localDatanodeInfo.RemoveBlockLocalPathInfo(blk);
                DFSClient.Log.Warn("BlockReaderLocalLegacy: Removing " + blk + " from cache because local file "
                                   + pathinfo.GetBlockPath() + " could not be opened.");
                throw;
            }
            finally
            {
                if (localBlockReader == null)
                {
                    if (dataIn != null)
                    {
                        dataIn.Close();
                    }
                    if (checksumIn != null)
                    {
                        checksumIn.Close();
                    }
                }
            }
            return(localBlockReader);
        }
Пример #10
0
        /// <summary>test JobConf</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestNetworkedJob()
        {
            // mock creation
            MiniMRClientCluster mr      = null;
            FileSystem          fileSys = null;

            try
            {
                mr = CreateMiniClusterWithCapacityScheduler();
                JobConf job = new JobConf(mr.GetConfig());
                fileSys = FileSystem.Get(job);
                fileSys.Delete(testDir, true);
                FSDataOutputStream @out = fileSys.Create(inFile, true);
                @out.WriteBytes("This is a test file");
                @out.Close();
                FileInputFormat.SetInputPaths(job, inFile);
                FileOutputFormat.SetOutputPath(job, outDir);
                job.SetInputFormat(typeof(TextInputFormat));
                job.SetOutputFormat(typeof(TextOutputFormat));
                job.SetMapperClass(typeof(IdentityMapper));
                job.SetReducerClass(typeof(IdentityReducer));
                job.SetNumReduceTasks(0);
                JobClient              client     = new JobClient(mr.GetConfig());
                RunningJob             rj         = client.SubmitJob(job);
                JobID                  jobId      = rj.GetID();
                JobClient.NetworkedJob runningJob = (JobClient.NetworkedJob)client.GetJob(jobId);
                runningJob.SetJobPriority(JobPriority.High.ToString());
                // test getters
                NUnit.Framework.Assert.IsTrue(runningJob.GetConfiguration().ToString().EndsWith("0001/job.xml"
                                                                                                ));
                NUnit.Framework.Assert.AreEqual(runningJob.GetID(), jobId);
                NUnit.Framework.Assert.AreEqual(runningJob.GetJobID(), jobId.ToString());
                NUnit.Framework.Assert.AreEqual(runningJob.GetJobName(), "N/A");
                NUnit.Framework.Assert.IsTrue(runningJob.GetJobFile().EndsWith(".staging/" + runningJob
                                                                               .GetJobID() + "/job.xml"));
                NUnit.Framework.Assert.IsTrue(runningJob.GetTrackingURL().Length > 0);
                NUnit.Framework.Assert.IsTrue(runningJob.MapProgress() == 0.0f);
                NUnit.Framework.Assert.IsTrue(runningJob.ReduceProgress() == 0.0f);
                NUnit.Framework.Assert.IsTrue(runningJob.CleanupProgress() == 0.0f);
                NUnit.Framework.Assert.IsTrue(runningJob.SetupProgress() == 0.0f);
                TaskCompletionEvent[] tce = runningJob.GetTaskCompletionEvents(0);
                NUnit.Framework.Assert.AreEqual(tce.Length, 0);
                NUnit.Framework.Assert.AreEqual(runningJob.GetHistoryUrl(), string.Empty);
                NUnit.Framework.Assert.IsFalse(runningJob.IsRetired());
                NUnit.Framework.Assert.AreEqual(runningJob.GetFailureInfo(), string.Empty);
                NUnit.Framework.Assert.AreEqual(runningJob.GetJobStatus().GetJobName(), "N/A");
                NUnit.Framework.Assert.AreEqual(client.GetMapTaskReports(jobId).Length, 0);
                try
                {
                    client.GetSetupTaskReports(jobId);
                }
                catch (YarnRuntimeException e)
                {
                    NUnit.Framework.Assert.AreEqual(e.Message, "Unrecognized task type: JOB_SETUP");
                }
                try
                {
                    client.GetCleanupTaskReports(jobId);
                }
                catch (YarnRuntimeException e)
                {
                    NUnit.Framework.Assert.AreEqual(e.Message, "Unrecognized task type: JOB_CLEANUP");
                }
                NUnit.Framework.Assert.AreEqual(client.GetReduceTaskReports(jobId).Length, 0);
                // test ClusterStatus
                ClusterStatus status = client.GetClusterStatus(true);
                NUnit.Framework.Assert.AreEqual(status.GetActiveTrackerNames().Count, 2);
                // it method does not implemented and always return empty array or null;
                NUnit.Framework.Assert.AreEqual(status.GetBlacklistedTrackers(), 0);
                NUnit.Framework.Assert.AreEqual(status.GetBlacklistedTrackerNames().Count, 0);
                NUnit.Framework.Assert.AreEqual(status.GetBlackListedTrackersInfo().Count, 0);
                NUnit.Framework.Assert.AreEqual(status.GetJobTrackerStatus(), Cluster.JobTrackerStatus
                                                .Running);
                NUnit.Framework.Assert.AreEqual(status.GetMapTasks(), 1);
                NUnit.Framework.Assert.AreEqual(status.GetMaxMapTasks(), 20);
                NUnit.Framework.Assert.AreEqual(status.GetMaxReduceTasks(), 4);
                NUnit.Framework.Assert.AreEqual(status.GetNumExcludedNodes(), 0);
                NUnit.Framework.Assert.AreEqual(status.GetReduceTasks(), 1);
                NUnit.Framework.Assert.AreEqual(status.GetTaskTrackers(), 2);
                NUnit.Framework.Assert.AreEqual(status.GetTTExpiryInterval(), 0);
                NUnit.Framework.Assert.AreEqual(status.GetJobTrackerStatus(), Cluster.JobTrackerStatus
                                                .Running);
                NUnit.Framework.Assert.AreEqual(status.GetGraylistedTrackers(), 0);
                // test read and write
                ByteArrayOutputStream dataOut = new ByteArrayOutputStream();
                status.Write(new DataOutputStream(dataOut));
                ClusterStatus status2 = new ClusterStatus();
                status2.ReadFields(new DataInputStream(new ByteArrayInputStream(dataOut.ToByteArray
                                                                                    ())));
                NUnit.Framework.Assert.AreEqual(status.GetActiveTrackerNames(), status2.GetActiveTrackerNames
                                                    ());
                NUnit.Framework.Assert.AreEqual(status.GetBlackListedTrackersInfo(), status2.GetBlackListedTrackersInfo
                                                    ());
                NUnit.Framework.Assert.AreEqual(status.GetMapTasks(), status2.GetMapTasks());
                try
                {
                }
                catch (RuntimeException e)
                {
                    NUnit.Framework.Assert.IsTrue(e.Message.EndsWith("not found on CLASSPATH"));
                }
                // test taskStatusfilter
                JobClient.SetTaskOutputFilter(job, JobClient.TaskStatusFilter.All);
                NUnit.Framework.Assert.AreEqual(JobClient.GetTaskOutputFilter(job), JobClient.TaskStatusFilter
                                                .All);
                // runningJob.setJobPriority(JobPriority.HIGH.name());
                // test default map
                NUnit.Framework.Assert.AreEqual(client.GetDefaultMaps(), 20);
                NUnit.Framework.Assert.AreEqual(client.GetDefaultReduces(), 4);
                NUnit.Framework.Assert.AreEqual(client.GetSystemDir().GetName(), "jobSubmitDir");
                // test queue information
                JobQueueInfo[] rootQueueInfo = client.GetRootQueues();
                NUnit.Framework.Assert.AreEqual(rootQueueInfo.Length, 1);
                NUnit.Framework.Assert.AreEqual(rootQueueInfo[0].GetQueueName(), "default");
                JobQueueInfo[] qinfo = client.GetQueues();
                NUnit.Framework.Assert.AreEqual(qinfo.Length, 1);
                NUnit.Framework.Assert.AreEqual(qinfo[0].GetQueueName(), "default");
                NUnit.Framework.Assert.AreEqual(client.GetChildQueues("default").Length, 0);
                NUnit.Framework.Assert.AreEqual(client.GetJobsFromQueue("default").Length, 1);
                NUnit.Framework.Assert.IsTrue(client.GetJobsFromQueue("default")[0].GetJobFile().
                                              EndsWith("/job.xml"));
                JobQueueInfo qi = client.GetQueueInfo("default");
                NUnit.Framework.Assert.AreEqual(qi.GetQueueName(), "default");
                NUnit.Framework.Assert.AreEqual(qi.GetQueueState(), "running");
                QueueAclsInfo[] aai = client.GetQueueAclsForCurrentUser();
                NUnit.Framework.Assert.AreEqual(aai.Length, 2);
                NUnit.Framework.Assert.AreEqual(aai[0].GetQueueName(), "root");
                NUnit.Framework.Assert.AreEqual(aai[1].GetQueueName(), "default");
                // test token
                Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token = client.
                                                                                           GetDelegationToken(new Text(UserGroupInformation.GetCurrentUser().GetShortUserName
                                                                                                                           ()));
                NUnit.Framework.Assert.AreEqual(token.GetKind().ToString(), "RM_DELEGATION_TOKEN"
                                                );
                // test JobClient
                // The following asserts read JobStatus twice and ensure the returned
                // JobStatus objects correspond to the same Job.
                NUnit.Framework.Assert.AreEqual("Expected matching JobIDs", jobId, ((JobID)client
                                                                                    .GetJob(jobId).GetJobStatus().GetJobID()));
                NUnit.Framework.Assert.AreEqual("Expected matching startTimes", rj.GetJobStatus()
                                                .GetStartTime(), client.GetJob(jobId).GetJobStatus().GetStartTime());
            }
            finally
            {
                if (fileSys != null)
                {
                    fileSys.Delete(testDir, true);
                }
                if (mr != null)
                {
                    mr.Stop();
                }
            }
        }
Пример #11
0
 /// <exception cref="System.IO.IOException"/>
 private UserProvider(Configuration conf)
     : base(conf)
 {
     user        = UserGroupInformation.GetCurrentUser();
     credentials = user.GetCredentials();
 }
Пример #12
0
 /// <exception cref="System.IO.IOException"/>
 /// <exception cref="System.Exception"/>
 public static WebHdfsFileSystem GetWebHdfsFileSystemAs(UserGroupInformation ugi,
                                                        Configuration conf, string scheme)
 {
     return(ugi.DoAs(new _PrivilegedExceptionAction_75(conf)));
 }
Пример #13
0
 /// <exception cref="System.IO.IOException"/>
 /// <exception cref="System.Exception"/>
 public static WebHdfsFileSystem GetWebHdfsFileSystemAs(UserGroupInformation ugi,
                                                        Configuration conf)
 {
     return(GetWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.Scheme));
 }
Пример #14
0
        public virtual Response Put(InputStream @is, UriInfo uriInfo, string path, HttpFSParametersProvider.OperationParam
                                    op, Parameters @params, HttpServletRequest request)
        {
            UserGroupInformation user = HttpUserGroupInformation.Get();
            Response             response;

            path = MakeAbsolute(path);
            MDC.Put(HttpFSFileSystem.OpParam, op.Value().ToString());
            MDC.Put("hostname", request.GetRemoteAddr());
            switch (op.Value())
            {
            case HttpFSFileSystem.Operation.Create:
            {
                bool hasData = @params.Get <HttpFSParametersProvider.DataParam>(HttpFSParametersProvider.DataParam
                                                                                .Name);
                if (!hasData)
                {
                    response = Response.TemporaryRedirect(CreateUploadRedirectionURL(uriInfo, HttpFSFileSystem.Operation
                                                                                     .Create)).Build();
                }
                else
                {
                    short permission = @params.Get <HttpFSParametersProvider.PermissionParam>(HttpFSParametersProvider.PermissionParam
                                                                                              .Name);
                    bool @override = @params.Get <HttpFSParametersProvider.OverwriteParam>(HttpFSParametersProvider.OverwriteParam
                                                                                           .Name);
                    short replication = @params.Get <HttpFSParametersProvider.ReplicationParam>(HttpFSParametersProvider.ReplicationParam
                                                                                                .Name);
                    long blockSize = @params.Get <HttpFSParametersProvider.BlockSizeParam>(HttpFSParametersProvider.BlockSizeParam
                                                                                           .Name);
                    FSOperations.FSCreate command = new FSOperations.FSCreate(@is, path, permission,
                                                                              @override, replication, blockSize);
                    FsExecute(user, command);
                    AuditLog.Info("[{}] permission [{}] override [{}] replication [{}] blockSize [{}]"
                                  , new object[] { path, permission, @override, replication, blockSize });
                    response = Response.Status(Response.Status.Created).Build();
                }
                break;
            }

            case HttpFSFileSystem.Operation.Setxattr:
            {
                string xattrName = @params.Get <HttpFSParametersProvider.XAttrNameParam>(HttpFSParametersProvider.XAttrNameParam
                                                                                         .Name);
                string xattrValue = @params.Get <HttpFSParametersProvider.XAttrValueParam>(HttpFSParametersProvider.XAttrValueParam
                                                                                           .Name);
                EnumSet <XAttrSetFlag> flag = @params.Get <HttpFSParametersProvider.XAttrSetFlagParam
                                                           >(HttpFSParametersProvider.XAttrSetFlagParam.Name);
                FSOperations.FSSetXAttr command = new FSOperations.FSSetXAttr(path, xattrName, xattrValue
                                                                              , flag);
                FsExecute(user, command);
                AuditLog.Info("[{}] to xAttr [{}]", path, xattrName);
                response = Response.Ok().Build();
                break;
            }

            case HttpFSFileSystem.Operation.Removexattr:
            {
                string xattrName = @params.Get <HttpFSParametersProvider.XAttrNameParam>(HttpFSParametersProvider.XAttrNameParam
                                                                                         .Name);
                FSOperations.FSRemoveXAttr command = new FSOperations.FSRemoveXAttr(path, xattrName
                                                                                    );
                FsExecute(user, command);
                AuditLog.Info("[{}] removed xAttr [{}]", path, xattrName);
                response = Response.Ok().Build();
                break;
            }

            case HttpFSFileSystem.Operation.Mkdirs:
            {
                short permission = @params.Get <HttpFSParametersProvider.PermissionParam>(HttpFSParametersProvider.PermissionParam
                                                                                          .Name);
                FSOperations.FSMkdirs command = new FSOperations.FSMkdirs(path, permission);
                JSONObject            json    = FsExecute(user, command);
                AuditLog.Info("[{}] permission [{}]", path, permission);
                response = Response.Ok(json).Type(MediaType.ApplicationJson).Build();
                break;
            }

            case HttpFSFileSystem.Operation.Rename:
            {
                string toPath = @params.Get <HttpFSParametersProvider.DestinationParam>(HttpFSParametersProvider.DestinationParam
                                                                                        .Name);
                FSOperations.FSRename command = new FSOperations.FSRename(path, toPath);
                JSONObject            json    = FsExecute(user, command);
                AuditLog.Info("[{}] to [{}]", path, toPath);
                response = Response.Ok(json).Type(MediaType.ApplicationJson).Build();
                break;
            }

            case HttpFSFileSystem.Operation.Setowner:
            {
                string owner = @params.Get <HttpFSParametersProvider.OwnerParam>(HttpFSParametersProvider.OwnerParam
                                                                                 .Name);
                string group = @params.Get <HttpFSParametersProvider.GroupParam>(HttpFSParametersProvider.GroupParam
                                                                                 .Name);
                FSOperations.FSSetOwner command = new FSOperations.FSSetOwner(path, owner, group);
                FsExecute(user, command);
                AuditLog.Info("[{}] to (O/G)[{}]", path, owner + ":" + group);
                response = Response.Ok().Build();
                break;
            }

            case HttpFSFileSystem.Operation.Setpermission:
            {
                short permission = @params.Get <HttpFSParametersProvider.PermissionParam>(HttpFSParametersProvider.PermissionParam
                                                                                          .Name);
                FSOperations.FSSetPermission command = new FSOperations.FSSetPermission(path, permission
                                                                                        );
                FsExecute(user, command);
                AuditLog.Info("[{}] to [{}]", path, permission);
                response = Response.Ok().Build();
                break;
            }

            case HttpFSFileSystem.Operation.Setreplication:
            {
                short replication = @params.Get <HttpFSParametersProvider.ReplicationParam>(HttpFSParametersProvider.ReplicationParam
                                                                                            .Name);
                FSOperations.FSSetReplication command = new FSOperations.FSSetReplication(path, replication
                                                                                          );
                JSONObject json = FsExecute(user, command);
                AuditLog.Info("[{}] to [{}]", path, replication);
                response = Response.Ok(json).Build();
                break;
            }

            case HttpFSFileSystem.Operation.Settimes:
            {
                long modifiedTime = @params.Get <HttpFSParametersProvider.ModifiedTimeParam>(HttpFSParametersProvider.ModifiedTimeParam
                                                                                             .Name);
                long accessTime = @params.Get <HttpFSParametersProvider.AccessTimeParam>(HttpFSParametersProvider.AccessTimeParam
                                                                                         .Name);
                FSOperations.FSSetTimes command = new FSOperations.FSSetTimes(path, modifiedTime,
                                                                              accessTime);
                FsExecute(user, command);
                AuditLog.Info("[{}] to (M/A)[{}]", path, modifiedTime + ":" + accessTime);
                response = Response.Ok().Build();
                break;
            }

            case HttpFSFileSystem.Operation.Setacl:
            {
                string aclSpec = @params.Get <HttpFSParametersProvider.AclPermissionParam>(HttpFSParametersProvider.AclPermissionParam
                                                                                           .Name);
                FSOperations.FSSetAcl command = new FSOperations.FSSetAcl(path, aclSpec);
                FsExecute(user, command);
                AuditLog.Info("[{}] to acl [{}]", path, aclSpec);
                response = Response.Ok().Build();
                break;
            }

            case HttpFSFileSystem.Operation.Removeacl:
            {
                FSOperations.FSRemoveAcl command = new FSOperations.FSRemoveAcl(path);
                FsExecute(user, command);
                AuditLog.Info("[{}] removed acl", path);
                response = Response.Ok().Build();
                break;
            }

            case HttpFSFileSystem.Operation.Modifyaclentries:
            {
                string aclSpec = @params.Get <HttpFSParametersProvider.AclPermissionParam>(HttpFSParametersProvider.AclPermissionParam
                                                                                           .Name);
                FSOperations.FSModifyAclEntries command = new FSOperations.FSModifyAclEntries(path
                                                                                              , aclSpec);
                FsExecute(user, command);
                AuditLog.Info("[{}] modify acl entry with [{}]", path, aclSpec);
                response = Response.Ok().Build();
                break;
            }

            case HttpFSFileSystem.Operation.Removeaclentries:
            {
                string aclSpec = @params.Get <HttpFSParametersProvider.AclPermissionParam>(HttpFSParametersProvider.AclPermissionParam
                                                                                           .Name);
                FSOperations.FSRemoveAclEntries command = new FSOperations.FSRemoveAclEntries(path
                                                                                              , aclSpec);
                FsExecute(user, command);
                AuditLog.Info("[{}] remove acl entry [{}]", path, aclSpec);
                response = Response.Ok().Build();
                break;
            }

            case HttpFSFileSystem.Operation.Removedefaultacl:
            {
                FSOperations.FSRemoveDefaultAcl command = new FSOperations.FSRemoveDefaultAcl(path
                                                                                              );
                FsExecute(user, command);
                AuditLog.Info("[{}] remove default acl", path);
                response = Response.Ok().Build();
                break;
            }

            default:
            {
                throw new IOException(MessageFormat.Format("Invalid HTTP PUT operation [{0}]", op
                                                           .Value()));
            }
            }
            return(response);
        }
Пример #15
0
 /// <summary>Perform the given action as the daemon's current user.</summary>
 /// <remarks>
 /// Perform the given action as the daemon's current user. If an
 /// InterruptedException is thrown, it is converted to an IOException.
 /// </remarks>
 /// <param name="action">the action to perform</param>
 /// <returns>the result of the action</returns>
 /// <exception cref="System.IO.IOException">in the event of error</exception>
 public static T DoAsCurrentUser <T>(PrivilegedExceptionAction <T> action)
 {
     return(DoAsUser(UserGroupInformation.GetCurrentUser(), action));
 }
Пример #16
0
 public virtual bool HasAccess(QueueACL acl, UserGroupInformation user)
 {
     return(authorizer.CheckPermission(SchedulerUtils.ToAccessType(acl), queueEntity,
                                       user));
 }
Пример #17
0
 public virtual BlockReaderFactory SetUserGroupInformation(UserGroupInformation userGroupInformation
                                                           )
 {
     this.userGroupInformation = userGroupInformation;
     return(this);
 }
Пример #18
0
        /// <summary>Main run function for the client</summary>
        /// <returns>true if application completed successfully</returns>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="Org.Apache.Hadoop.Yarn.Exceptions.YarnException"/>
        public virtual bool Run()
        {
            Log.Info("Running Client");
            yarnClient.Start();
            YarnClusterMetrics clusterMetrics = yarnClient.GetYarnClusterMetrics();

            Log.Info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics
                     .GetNumNodeManagers());
            IList <NodeReport> clusterNodeReports = yarnClient.GetNodeReports(NodeState.Running
                                                                              );

            Log.Info("Got Cluster node info from ASM");
            foreach (NodeReport node in clusterNodeReports)
            {
                Log.Info("Got node report from ASM for" + ", nodeId=" + node.GetNodeId() + ", nodeAddress"
                         + node.GetHttpAddress() + ", nodeRackName" + node.GetRackName() + ", nodeNumContainers"
                         + node.GetNumContainers());
            }
            QueueInfo queueInfo = yarnClient.GetQueueInfo(this.amQueue);

            Log.Info("Queue info" + ", queueName=" + queueInfo.GetQueueName() + ", queueCurrentCapacity="
                     + queueInfo.GetCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.GetMaximumCapacity
                         () + ", queueApplicationCount=" + queueInfo.GetApplications().Count + ", queueChildQueueCount="
                     + queueInfo.GetChildQueues().Count);
            IList <QueueUserACLInfo> listAclInfo = yarnClient.GetQueueAclsInfo();

            foreach (QueueUserACLInfo aclInfo in listAclInfo)
            {
                foreach (QueueACL userAcl in aclInfo.GetUserAcls())
                {
                    Log.Info("User ACL Info for Queue" + ", queueName=" + aclInfo.GetQueueName() + ", userAcl="
                             + userAcl.ToString());
                }
            }
            if (domainId != null && domainId.Length > 0 && toCreateDomain)
            {
                PrepareTimelineDomain();
            }
            // Get a new application id
            YarnClientApplication     app         = yarnClient.CreateApplication();
            GetNewApplicationResponse appResponse = app.GetNewApplicationResponse();
            // TODO get min/max resource capabilities from RM and change memory ask if needed
            // If we do not have min/max, we may not be able to correctly request
            // the required resources from the RM for the app master
            // Memory ask has to be a multiple of min and less than max.
            // Dump out information about cluster capability as seen by the resource manager
            int maxMem = appResponse.GetMaximumResourceCapability().GetMemory();

            Log.Info("Max mem capabililty of resources in this cluster " + maxMem);
            // A resource ask cannot exceed the max.
            if (amMemory > maxMem)
            {
                Log.Info("AM memory specified above max threshold of cluster. Using max value." +
                         ", specified=" + amMemory + ", max=" + maxMem);
                amMemory = maxMem;
            }
            int maxVCores = appResponse.GetMaximumResourceCapability().GetVirtualCores();

            Log.Info("Max virtual cores capabililty of resources in this cluster " + maxVCores
                     );
            if (amVCores > maxVCores)
            {
                Log.Info("AM virtual cores specified above max threshold of cluster. " + "Using max value."
                         + ", specified=" + amVCores + ", max=" + maxVCores);
                amVCores = maxVCores;
            }
            // set the application name
            ApplicationSubmissionContext appContext = app.GetApplicationSubmissionContext();
            ApplicationId appId = appContext.GetApplicationId();

            appContext.SetKeepContainersAcrossApplicationAttempts(keepContainers);
            appContext.SetApplicationName(appName);
            if (attemptFailuresValidityInterval >= 0)
            {
                appContext.SetAttemptFailuresValidityInterval(attemptFailuresValidityInterval);
            }
            // set local resources for the application master
            // local files or archives as needed
            // In this scenario, the jar file for the application master is part of the local resources
            IDictionary <string, LocalResource> localResources = new Dictionary <string, LocalResource
                                                                                 >();

            Log.Info("Copy App Master jar from local filesystem and add to local environment"
                     );
            // Copy the application master jar to the filesystem
            // Create a local resource to point to the destination jar path
            FileSystem fs = FileSystem.Get(conf);

            AddToLocalResources(fs, appMasterJar, appMasterJarPath, appId.ToString(), localResources
                                , null);
            // Set the log4j properties if needed
            if (!log4jPropFile.IsEmpty())
            {
                AddToLocalResources(fs, log4jPropFile, log4jPath, appId.ToString(), localResources
                                    , null);
            }
            // The shell script has to be made available on the final container(s)
            // where it will be executed.
            // To do this, we need to first copy into the filesystem that is visible
            // to the yarn framework.
            // We do not need to set this as a local resource for the application
            // master as the application master does not need it.
            string hdfsShellScriptLocation  = string.Empty;
            long   hdfsShellScriptLen       = 0;
            long   hdfsShellScriptTimestamp = 0;

            if (!shellScriptPath.IsEmpty())
            {
                Path   shellSrc        = new Path(shellScriptPath);
                string shellPathSuffix = appName + "/" + appId.ToString() + "/" + ScriptPath;
                Path   shellDst        = new Path(fs.GetHomeDirectory(), shellPathSuffix);
                fs.CopyFromLocalFile(false, true, shellSrc, shellDst);
                hdfsShellScriptLocation = shellDst.ToUri().ToString();
                FileStatus shellFileStatus = fs.GetFileStatus(shellDst);
                hdfsShellScriptLen       = shellFileStatus.GetLen();
                hdfsShellScriptTimestamp = shellFileStatus.GetModificationTime();
            }
            if (!shellCommand.IsEmpty())
            {
                AddToLocalResources(fs, null, shellCommandPath, appId.ToString(), localResources,
                                    shellCommand);
            }
            if (shellArgs.Length > 0)
            {
                AddToLocalResources(fs, null, shellArgsPath, appId.ToString(), localResources, StringUtils
                                    .Join(shellArgs, " "));
            }
            // Set the necessary security tokens as needed
            //amContainer.setContainerTokens(containerToken);
            // Set the env variables to be setup in the env where the application master will be run
            Log.Info("Set the environment for the application master");
            IDictionary <string, string> env = new Dictionary <string, string>();

            // put location of shell script into env
            // using the env info, the application master will create the correct local resource for the
            // eventual containers that will be launched to execute the shell scripts
            env[DSConstants.Distributedshellscriptlocation]  = hdfsShellScriptLocation;
            env[DSConstants.Distributedshellscripttimestamp] = System.Convert.ToString(hdfsShellScriptTimestamp
                                                                                       );
            env[DSConstants.Distributedshellscriptlen] = System.Convert.ToString(hdfsShellScriptLen
                                                                                 );
            if (domainId != null && domainId.Length > 0)
            {
                env[DSConstants.Distributedshelltimelinedomain] = domainId;
            }
            // Add AppMaster.jar location to classpath
            // At some point we should not be required to add
            // the hadoop specific classpaths to the env.
            // It should be provided out of the box.
            // For now setting all required classpaths including
            // the classpath to "." for the application jar
            StringBuilder classPathEnv = new StringBuilder(ApplicationConstants.Environment.Classpath
                                                           .$$()).Append(ApplicationConstants.ClassPathSeparator).Append("./*");

            foreach (string c in conf.GetStrings(YarnConfiguration.YarnApplicationClasspath,
                                                 YarnConfiguration.DefaultYarnCrossPlatformApplicationClasspath))
            {
                classPathEnv.Append(ApplicationConstants.ClassPathSeparator);
                classPathEnv.Append(c.Trim());
            }
            classPathEnv.Append(ApplicationConstants.ClassPathSeparator).Append("./log4j.properties"
                                                                                );
            // add the runtime classpath needed for tests to work
            if (conf.GetBoolean(YarnConfiguration.IsMiniYarnCluster, false))
            {
                classPathEnv.Append(':');
                classPathEnv.Append(Runtime.GetProperty("java.class.path"));
            }
            env["CLASSPATH"] = classPathEnv.ToString();
            // Set the necessary command to execute the application master
            Vector <CharSequence> vargs = new Vector <CharSequence>(30);

            // Set java executable command
            Log.Info("Setting up app master command");
            vargs.AddItem(ApplicationConstants.Environment.JavaHome.$$() + "/bin/java");
            // Set Xmx based on am memory size
            vargs.AddItem("-Xmx" + amMemory + "m");
            // Set class name
            vargs.AddItem(appMasterMainClass);
            // Set params for Application Master
            vargs.AddItem("--container_memory " + containerMemory.ToString());
            vargs.AddItem("--container_vcores " + containerVirtualCores.ToString());
            vargs.AddItem("--num_containers " + numContainers.ToString());
            if (null != nodeLabelExpression)
            {
                appContext.SetNodeLabelExpression(nodeLabelExpression);
            }
            vargs.AddItem("--priority " + shellCmdPriority.ToString());
            foreach (KeyValuePair <string, string> entry in shellEnv)
            {
                vargs.AddItem("--shell_env " + entry.Key + "=" + entry.Value);
            }
            if (debugFlag)
            {
                vargs.AddItem("--debug");
            }
            vargs.AddItem("1>" + ApplicationConstants.LogDirExpansionVar + "/AppMaster.stdout"
                          );
            vargs.AddItem("2>" + ApplicationConstants.LogDirExpansionVar + "/AppMaster.stderr"
                          );
            // Get final commmand
            StringBuilder command = new StringBuilder();

            foreach (CharSequence str in vargs)
            {
                command.Append(str).Append(" ");
            }
            Log.Info("Completed setting up app master command " + command.ToString());
            IList <string> commands = new AList <string>();

            commands.AddItem(command.ToString());
            // Set up the container launch context for the application master
            ContainerLaunchContext amContainer = ContainerLaunchContext.NewInstance(localResources
                                                                                    , env, commands, null, null, null);
            // Set up resource type requirements
            // For now, both memory and vcores are supported, so we set memory and
            // vcores requirements
            Resource capability = Resource.NewInstance(amMemory, amVCores);

            appContext.SetResource(capability);
            // Service data is a binary blob that can be passed to the application
            // Not needed in this scenario
            // amContainer.setServiceData(serviceData);
            // Setup security tokens
            if (UserGroupInformation.IsSecurityEnabled())
            {
                // Note: Credentials class is marked as LimitedPrivate for HDFS and MapReduce
                Credentials credentials  = new Credentials();
                string      tokenRenewer = conf.Get(YarnConfiguration.RmPrincipal);
                if (tokenRenewer == null || tokenRenewer.Length == 0)
                {
                    throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer"
                                          );
                }
                // For now, only getting tokens for the default file-system.
                Org.Apache.Hadoop.Security.Token.Token <object>[] tokens = fs.AddDelegationTokens(
                    tokenRenewer, credentials);
                if (tokens != null)
                {
                    foreach (Org.Apache.Hadoop.Security.Token.Token <object> token in tokens)
                    {
                        Log.Info("Got dt for " + fs.GetUri() + "; " + token);
                    }
                }
                DataOutputBuffer dob = new DataOutputBuffer();
                credentials.WriteTokenStorageToStream(dob);
                ByteBuffer fsTokens = ByteBuffer.Wrap(dob.GetData(), 0, dob.GetLength());
                amContainer.SetTokens(fsTokens);
            }
            appContext.SetAMContainerSpec(amContainer);
            // Set the priority for the application master
            // TODO - what is the range for priority? how to decide?
            Priority pri = Priority.NewInstance(amPriority);

            appContext.SetPriority(pri);
            // Set the queue to which this application is to be submitted in the RM
            appContext.SetQueue(amQueue);
            // Submit the application to the applications manager
            // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest);
            // Ignore the response as either a valid response object is returned on success
            // or an exception thrown to denote some form of a failure
            Log.Info("Submitting application to ASM");
            yarnClient.SubmitApplication(appContext);
            // TODO
            // Try submitting the same request again
            // app submission failure?
            // Monitor the application
            return(MonitorApplication(appId));
        }
Пример #19
0
        /// <summary>Get the timeline entities that the given user have access to.</summary>
        /// <remarks>
        /// Get the timeline entities that the given user have access to. The meaning
        /// of each argument has been documented with
        /// <see cref="TimelineReader.GetEntities(string, long, long, long, string, long, NameValuePair, System.Collections.Generic.ICollection{E}, Sharpen.EnumSet{E}, CheckAcl)
        ///     "/>
        /// .
        /// </remarks>
        /// <seealso cref="TimelineReader.GetEntities(string, long, long, long, string, long, NameValuePair, System.Collections.Generic.ICollection{E}, Sharpen.EnumSet{E}, CheckAcl)
        ///     "/>
        /// <exception cref="Org.Apache.Hadoop.Yarn.Exceptions.YarnException"/>
        /// <exception cref="System.IO.IOException"/>
        public virtual TimelineEntities GetEntities(string entityType, NameValuePair primaryFilter
                                                    , ICollection <NameValuePair> secondaryFilter, long windowStart, long windowEnd,
                                                    string fromId, long fromTs, long limit, EnumSet <TimelineReader.Field> fields, UserGroupInformation
                                                    callerUGI)
        {
            TimelineEntities entities = null;

            entities = store.GetEntities(entityType, limit, windowStart, windowEnd, fromId, fromTs
                                         , primaryFilter, secondaryFilter, fields, new TimelineDataManager.CheckAclImpl(this
                                                                                                                        , callerUGI));
            if (entities == null)
            {
                return(new TimelineEntities());
            }
            return(entities);
        }
Пример #20
0
        public virtual void TestConcat()
        {
            int               numFiles = 10;
            long              fileLen  = blockSize * 3;
            HdfsFileStatus    fStatus;
            FSDataInputStream stm;
            string            trg     = "/trg";
            Path              trgPath = new Path(trg);

            DFSTestUtil.CreateFile(dfs, trgPath, fileLen, ReplFactor, 1);
            fStatus = nn.GetFileInfo(trg);
            long trgLen    = fStatus.GetLen();
            long trgBlocks = nn.GetBlockLocations(trg, 0, trgLen).LocatedBlockCount();

            Path[]   files = new Path[numFiles];
            byte[][] bytes = new byte[][] { new byte[(int)fileLen], new byte[(int)fileLen], new
                                            byte[(int)fileLen], new byte[(int)fileLen], new byte[(int)fileLen], new byte[(int
                                                                                                                          )fileLen], new byte[(int)fileLen], new byte[(int)fileLen], new byte[(int)fileLen
                                            ], new byte[(int)fileLen], new byte[(int)fileLen] };
            LocatedBlocks[] lblocks = new LocatedBlocks[numFiles];
            long[]          lens    = new long[numFiles];
            stm = dfs.Open(trgPath);
            stm.ReadFully(0, bytes[0]);
            stm.Close();
            int i;

            for (i = 0; i < files.Length; i++)
            {
                files[i] = new Path("/file" + i);
                Path path = files[i];
                System.Console.Out.WriteLine("Creating file " + path);
                // make files with different content
                DFSTestUtil.CreateFile(dfs, path, fileLen, ReplFactor, i);
                fStatus = nn.GetFileInfo(path.ToUri().GetPath());
                lens[i] = fStatus.GetLen();
                NUnit.Framework.Assert.AreEqual(trgLen, lens[i]);
                // file of the same length.
                lblocks[i] = nn.GetBlockLocations(path.ToUri().GetPath(), 0, lens[i]);
                //read the file
                stm = dfs.Open(path);
                stm.ReadFully(0, bytes[i + 1]);
                //bytes[i][10] = 10;
                stm.Close();
            }
            // check permissions -try the operation with the "wrong" user
            UserGroupInformation user1 = UserGroupInformation.CreateUserForTesting("theDoctor"
                                                                                   , new string[] { "tardis" });
            DistributedFileSystem hdfs = (DistributedFileSystem)DFSTestUtil.GetFileSystemAs(user1
                                                                                            , conf);

            try
            {
                hdfs.Concat(trgPath, files);
                NUnit.Framework.Assert.Fail("Permission exception expected");
            }
            catch (IOException ie)
            {
                System.Console.Out.WriteLine("Got expected exception for permissions:" + ie.GetLocalizedMessage
                                                 ());
            }
            // expected
            // check count update
            ContentSummary cBefore = dfs.GetContentSummary(trgPath.GetParent());

            // resort file array, make INode id not sorted.
            for (int j = 0; j < files.Length / 2; j++)
            {
                Path tempPath = files[j];
                files[j] = files[files.Length - 1 - j];
                files[files.Length - 1 - j] = tempPath;
                byte[] tempBytes = bytes[1 + j];
                bytes[1 + j] = bytes[files.Length - 1 - j + 1];
                bytes[files.Length - 1 - j + 1] = tempBytes;
            }
            // now concatenate
            dfs.Concat(trgPath, files);
            // verify  count
            ContentSummary cAfter = dfs.GetContentSummary(trgPath.GetParent());

            NUnit.Framework.Assert.AreEqual(cBefore.GetFileCount(), cAfter.GetFileCount() + files
                                            .Length);
            // verify other stuff
            long totalLen    = trgLen;
            long totalBlocks = trgBlocks;

            for (i = 0; i < files.Length; i++)
            {
                totalLen    += lens[i];
                totalBlocks += lblocks[i].LocatedBlockCount();
            }
            System.Console.Out.WriteLine("total len=" + totalLen + "; totalBlocks=" + totalBlocks
                                         );
            fStatus = nn.GetFileInfo(trg);
            trgLen  = fStatus.GetLen();
            // new length
            // read the resulting file
            stm = dfs.Open(trgPath);
            byte[] byteFileConcat = new byte[(int)trgLen];
            stm.ReadFully(0, byteFileConcat);
            stm.Close();
            trgBlocks = nn.GetBlockLocations(trg, 0, trgLen).LocatedBlockCount();
            //verifications
            // 1. number of blocks
            NUnit.Framework.Assert.AreEqual(trgBlocks, totalBlocks);
            // 2. file lengths
            NUnit.Framework.Assert.AreEqual(trgLen, totalLen);
            // 3. removal of the src file
            foreach (Path p in files)
            {
                fStatus = nn.GetFileInfo(p.ToUri().GetPath());
                NUnit.Framework.Assert.IsNull("File " + p + " still exists", fStatus);
                // file shouldn't exist
                // try to create fie with the same name
                DFSTestUtil.CreateFile(dfs, p, fileLen, ReplFactor, 1);
            }
            // 4. content
            CheckFileContent(byteFileConcat, bytes);
            // add a small file (less then a block)
            Path smallFile = new Path("/sfile");
            int  sFileLen  = 10;

            DFSTestUtil.CreateFile(dfs, smallFile, sFileLen, ReplFactor, 1);
            dfs.Concat(trgPath, new Path[] { smallFile });
            fStatus = nn.GetFileInfo(trg);
            trgLen  = fStatus.GetLen();
            // new length
            // check number of blocks
            trgBlocks = nn.GetBlockLocations(trg, 0, trgLen).LocatedBlockCount();
            NUnit.Framework.Assert.AreEqual(trgBlocks, totalBlocks + 1);
            // and length
            NUnit.Framework.Assert.AreEqual(trgLen, totalLen + sFileLen);
        }
Пример #21
0
        public virtual void TestDTManager()
        {
            Configuration conf = new Configuration(false);

            conf.SetLong(DelegationTokenManager.UpdateInterval, DayInSecs);
            conf.SetLong(DelegationTokenManager.MaxLifetime, DayInSecs);
            conf.SetLong(DelegationTokenManager.RenewInterval, DayInSecs);
            conf.SetLong(DelegationTokenManager.RemovalScanInterval, DayInSecs);
            conf.GetBoolean(DelegationTokenManager.EnableZkKey, enableZKKey);
            DelegationTokenManager tm = new DelegationTokenManager(conf, new Text("foo"));

            tm.Init();
            Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token = (Org.Apache.Hadoop.Security.Token.Token
                                                                                        <DelegationTokenIdentifier>)tm.CreateToken(UserGroupInformation.GetCurrentUser()
                                                                                                                                   , "foo");
            NUnit.Framework.Assert.IsNotNull(token);
            tm.VerifyToken(token);
            Assert.True(tm.RenewToken(token, "foo") > Runtime.CurrentTimeMillis
                            ());
            tm.CancelToken(token, "foo");
            try
            {
                tm.VerifyToken(token);
                NUnit.Framework.Assert.Fail();
            }
            catch (IOException)
            {
            }
            catch (Exception)
            {
                //NOP
                NUnit.Framework.Assert.Fail();
            }
            tm.Destroy();
        }