示例#1
0
        public virtual void TestExceptionResultsInNormalError()
        {
            // In this test, we ensure that all handlers are called even if we throw an exception in one
            RefreshHandler exceptionalHandler = Org.Mockito.Mockito.Mock <RefreshHandler>();

            Org.Mockito.Mockito.Stub(exceptionalHandler.HandleRefresh(Org.Mockito.Mockito.AnyString
                                                                          (), Org.Mockito.Mockito.Any <string[]>())).ToThrow(new RuntimeException("Exceptional Handler Throws Exception"
                                                                                                                                                  ));
            RefreshHandler otherExceptionalHandler = Org.Mockito.Mockito.Mock <RefreshHandler>
                                                         ();

            Org.Mockito.Mockito.Stub(otherExceptionalHandler.HandleRefresh(Org.Mockito.Mockito
                                                                           .AnyString(), Org.Mockito.Mockito.Any <string[]>())).ToThrow(new RuntimeException
                                                                                                                                            ("More Exceptions"));
            RefreshRegistry.DefaultRegistry().Register("exceptional", exceptionalHandler);
            RefreshRegistry.DefaultRegistry().Register("exceptional", otherExceptionalHandler
                                                       );
            DFSAdmin admin = new DFSAdmin(config);

            string[] args = new string[] { "-refresh", "localhost:" + cluster.GetNameNodePort
                                               (), "exceptional" };
            int exitCode = admin.Run(args);

            NUnit.Framework.Assert.AreEqual(-1, exitCode);
            // Exceptions result in a -1
            Org.Mockito.Mockito.Verify(exceptionalHandler).HandleRefresh("exceptional", new string
                                                                         [] {  });
            Org.Mockito.Mockito.Verify(otherExceptionalHandler).HandleRefresh("exceptional",
                                                                              new string[] {  });
            RefreshRegistry.DefaultRegistry().UnregisterAll("exceptional");
        }
示例#2
0
        public virtual void TestDFSAdminDatanodeUpgradeControlCommands()
        {
            // start a cluster
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                DFSAdmin dfsadmin = new DFSAdmin(conf);
                DataNode dn       = cluster.GetDataNodes()[0];
                // check the datanode
                string   dnAddr = dn.GetDatanodeId().GetIpcAddr(false);
                string[] args1  = new string[] { "-getDatanodeInfo", dnAddr };
                RunCmd(dfsadmin, true, args1);
                // issue shutdown to the datanode.
                string[] args2 = new string[] { "-shutdownDatanode", dnAddr, "upgrade" };
                RunCmd(dfsadmin, true, args2);
                // the datanode should be down.
                Sharpen.Thread.Sleep(2000);
                NUnit.Framework.Assert.IsFalse("DataNode should exit", dn.IsDatanodeUp());
                // ping should fail.
                NUnit.Framework.Assert.AreEqual(-1, dfsadmin.Run(args1));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
示例#3
0
        public virtual void TestFetchImage()
        {
            FetchedImageFile.Mkdirs();
            Configuration  conf    = new Configuration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fs      = null;

            try
            {
                DFSAdmin dfsAdmin = new DFSAdmin();
                dfsAdmin.SetConf(conf);
                RunFetchImage(dfsAdmin, cluster);
                fs = cluster.GetFileSystem();
                fs.Mkdirs(new Path("/foo"));
                fs.Mkdirs(new Path("/foo2"));
                fs.Mkdirs(new Path("/foo3"));
                cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter,
                                                     false);
                cluster.GetNameNodeRpc().SaveNamespace();
                cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave,
                                                     false);
                RunFetchImage(dfsAdmin, cluster);
            }
            finally
            {
                if (fs != null)
                {
                    fs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void TestRefresh()
        {
            NUnit.Framework.Assert.IsTrue("Mock queue should have been constructed", mockQueueConstructions
                                          > 0);
            NUnit.Framework.Assert.IsTrue("Puts are routed through MockQueue", CanPutInMockQueue
                                              ());
            int lastMockQueueConstructions = mockQueueConstructions;
            // Replace queue with the queue specified in core-site.xml, which would be the LinkedBlockingQueue
            DFSAdmin admin = new DFSAdmin(config);

            string[] args     = new string[] { "-refreshCallQueue" };
            int      exitCode = admin.Run(args);

            NUnit.Framework.Assert.AreEqual("DFSAdmin should return 0", 0, exitCode);
            NUnit.Framework.Assert.AreEqual("Mock queue should have no additional constructions"
                                            , lastMockQueueConstructions, mockQueueConstructions);
            try
            {
                NUnit.Framework.Assert.IsFalse("Puts are routed through LBQ instead of MockQueue"
                                               , CanPutInMockQueue());
            }
            catch (IOException)
            {
                NUnit.Framework.Assert.Fail("Could not put into queue at all");
            }
        }
        public virtual void TestRollbackCommand()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = null;
            Path           foo     = new Path("/foo");
            Path           bar     = new Path("/bar");

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                DFSAdmin dfsadmin         = new DFSAdmin(conf);
                dfs.Mkdirs(foo);
                // start rolling upgrade
                dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                NUnit.Framework.Assert.AreEqual(0, dfsadmin.Run(new string[] { "-rollingUpgrade",
                                                                               "prepare" }));
                dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
                // create new directory
                dfs.Mkdirs(bar);
                // check NNStorage
                NNStorage storage = cluster.GetNamesystem().GetFSImage().GetStorage();
                CheckNNStorage(storage, 3, -1);
            }
            finally
            {
                // (startSegment, mkdir, endSegment)
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            NameNode nn = null;

            try
            {
                nn = NameNode.CreateNameNode(new string[] { "-rollingUpgrade", "rollback" }, conf
                                             );
                // make sure /foo is still there, but /bar is not
                INode fooNode = nn.GetNamesystem().GetFSDirectory().GetINode4Write(foo.ToString()
                                                                                   );
                NUnit.Framework.Assert.IsNotNull(fooNode);
                INode barNode = nn.GetNamesystem().GetFSDirectory().GetINode4Write(bar.ToString()
                                                                                   );
                NUnit.Framework.Assert.IsNull(barNode);
                // check the details of NNStorage
                NNStorage storage = nn.GetNamesystem().GetFSImage().GetStorage();
                // (startSegment, upgrade marker, mkdir, endSegment)
                CheckNNStorage(storage, 3, 7);
            }
            finally
            {
                if (nn != null)
                {
                    nn.Stop();
                    nn.Join();
                }
            }
        }
示例#6
0
        public virtual void TestMultipleReturnCodeMerging()
        {
            // Two handlers which return two non-zero values
            RefreshHandler handlerOne = Org.Mockito.Mockito.Mock <RefreshHandler>();

            Org.Mockito.Mockito.Stub(handlerOne.HandleRefresh(Org.Mockito.Mockito.AnyString()
                                                              , Org.Mockito.Mockito.Any <string[]>())).ToReturn(new RefreshResponse(23, "Twenty Three"
                                                                                                                                    ));
            RefreshHandler handlerTwo = Org.Mockito.Mockito.Mock <RefreshHandler>();

            Org.Mockito.Mockito.Stub(handlerTwo.HandleRefresh(Org.Mockito.Mockito.AnyString()
                                                              , Org.Mockito.Mockito.Any <string[]>())).ToReturn(new RefreshResponse(10, "Ten"));
            // Then registered to the same ID
            RefreshRegistry.DefaultRegistry().Register("shared", handlerOne);
            RefreshRegistry.DefaultRegistry().Register("shared", handlerTwo);
            // We refresh both
            DFSAdmin admin = new DFSAdmin(config);

            string[] args = new string[] { "-refresh", "localhost:" + cluster.GetNameNodePort
                                               (), "shared" };
            int exitCode = admin.Run(args);

            NUnit.Framework.Assert.AreEqual(-1, exitCode);
            // We get -1 because of our logic for melding non-zero return codes
            // Verify we called both
            Org.Mockito.Mockito.Verify(handlerOne).HandleRefresh("shared", new string[] {  });
            Org.Mockito.Mockito.Verify(handlerTwo).HandleRefresh("shared", new string[] {  });
            RefreshRegistry.DefaultRegistry().UnregisterAll("shared");
        }
示例#7
0
        public virtual void SetUp()
        {
            Configuration conf = new Configuration();

            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            cluster.WaitActive();
            admin    = new DFSAdmin();
            datanode = cluster.GetDataNodes()[0];
        }
示例#8
0
        public virtual void TestPipelineRecoveryOnRestartFailure()
        {
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsClientDatanodeRestartTimeoutKey, "5");
            MiniDFSCluster cluster = null;

            try
            {
                int numDataNodes = 2;
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build();
                cluster.WaitActive();
                FileSystem fileSys = cluster.GetFileSystem();
                Path       file    = new Path("dataprotocol3.dat");
                DFSTestUtil.CreateFile(fileSys, file, 10240L, (short)2, 0L);
                DFSOutputStream @out = (DFSOutputStream)(fileSys.Append(file).GetWrappedStream());
                @out.Write(1);
                @out.Hflush();
                DFSAdmin dfsadmin = new DFSAdmin(conf);
                DataNode dn       = cluster.GetDataNodes()[0];
                string   dnAddr1  = dn.GetDatanodeId().GetIpcAddr(false);
                // issue shutdown to the datanode.
                string[] args1 = new string[] { "-shutdownDatanode", dnAddr1, "upgrade" };
                NUnit.Framework.Assert.AreEqual(0, dfsadmin.Run(args1));
                Sharpen.Thread.Sleep(4000);
                // This should succeed without restarting the node. The restart will
                // expire and regular pipeline recovery will kick in.
                @out.Close();
                // At this point there is only one node in the cluster.
                @out = (DFSOutputStream)(fileSys.Append(file).GetWrappedStream());
                @out.Write(1);
                @out.Hflush();
                dn = cluster.GetDataNodes()[1];
                string dnAddr2 = dn.GetDatanodeId().GetIpcAddr(false);
                // issue shutdown to the datanode.
                string[] args2 = new string[] { "-shutdownDatanode", dnAddr2, "upgrade" };
                NUnit.Framework.Assert.AreEqual(0, dfsadmin.Run(args2));
                Sharpen.Thread.Sleep(4000);
                try
                {
                    // close should fail
                    @out.Close();
                    System.Diagnostics.Debug.Assert(false);
                }
                catch (IOException)
                {
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
示例#9
0
        public virtual void TestInvalidCommand()
        {
            DFSAdmin admin = new DFSAdmin(config);

            string[] args     = new string[] { "-refresh", "nn" };
            int      exitCode = admin.Run(args);

            NUnit.Framework.Assert.AreEqual("DFSAdmin should fail due to bad args", -1, exitCode
                                            );
        }
        public virtual void TestRollbackWithQJM()
        {
            Configuration      conf    = new HdfsConfiguration();
            MiniJournalCluster mjc     = null;
            MiniDFSCluster     cluster = null;
            Path foo = new Path("/foo");
            Path bar = new Path("/bar");

            try
            {
                mjc = new MiniJournalCluster.Builder(conf).NumJournalNodes(NumJournalNodes).Build
                          ();
                conf.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, mjc.GetQuorumJournalURI(JournalId)
                         .ToString());
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                DFSAdmin dfsadmin         = new DFSAdmin(conf);
                dfs.Mkdirs(foo);
                // start rolling upgrade
                dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                NUnit.Framework.Assert.AreEqual(0, dfsadmin.Run(new string[] { "-rollingUpgrade",
                                                                               "prepare" }));
                dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
                // create new directory
                dfs.Mkdirs(bar);
                dfs.Close();
                // rollback
                cluster.RestartNameNode("-rollingUpgrade", "rollback");
                // make sure /foo is still there, but /bar is not
                dfs = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
                NUnit.Framework.Assert.IsFalse(dfs.Exists(bar));
                // check storage in JNs
                for (int i = 0; i < NumJournalNodes; i++)
                {
                    FilePath dir = mjc.GetCurrentDir(0, JournalId);
                    // segments:(startSegment, mkdir, endSegment), (startSegment, upgrade
                    // marker, mkdir, endSegment)
                    CheckJNStorage(dir, 4, 7);
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
                if (mjc != null)
                {
                    mjc.Shutdown();
                }
            }
        }
示例#11
0
 /// <exception cref="System.Exception"/>
 public static void RunCmd(DFSAdmin dfsadmin, bool success, params string[] args)
 {
     if (success)
     {
         NUnit.Framework.Assert.AreEqual(0, dfsadmin.Run(args));
     }
     else
     {
         NUnit.Framework.Assert.IsTrue(dfsadmin.Run(args) != 0);
     }
 }
示例#12
0
        public virtual void TestInvalidIdentifier()
        {
            DFSAdmin admin = new DFSAdmin(config);

            string[] args = new string[] { "-refresh", "localhost:" + cluster.GetNameNodePort
                                               (), "unregisteredIdentity" };
            int exitCode = admin.Run(args);

            NUnit.Framework.Assert.AreEqual("DFSAdmin should fail due to no handler registered"
                                            , -1, exitCode);
        }
示例#13
0
        /// <exception cref="System.Exception"/>
        private void StartRollingUpgrade()
        {
            Log.Info("Starting rolling upgrade");
            fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
            DFSAdmin dfsadmin = new DFSAdmin(conf);

            TestRollingUpgrade.RunCmd(dfsadmin, true, "-rollingUpgrade", "prepare");
            TriggerHeartBeats();
            // Ensure datanode rolling upgrade is started
            NUnit.Framework.Assert.IsTrue(dn0.GetFSDataset().TrashEnabled(blockPoolId));
        }
示例#14
0
        public virtual void TestUnregistration()
        {
            RefreshRegistry.DefaultRegistry().UnregisterAll("firstHandler");
            // And now this should fail
            DFSAdmin admin = new DFSAdmin(config);

            string[] args = new string[] { "-refresh", "localhost:" + cluster.GetNameNodePort
                                               (), "firstHandler" };
            int exitCode = admin.Run(args);

            NUnit.Framework.Assert.AreEqual("DFSAdmin should return -1", -1, exitCode);
        }
示例#15
0
        /// <summary>
        /// Run `hdfs dfsadmin -fetchImage ...' and verify that the downloaded image is
        /// correct.
        /// </summary>
        /// <exception cref="System.Exception"/>
        private static void RunFetchImage(DFSAdmin dfsAdmin, MiniDFSCluster cluster)
        {
            int retVal = dfsAdmin.Run(new string[] { "-fetchImage", FetchedImageFile.GetPath(
                                                         ) });

            NUnit.Framework.Assert.AreEqual(0, retVal);
            FilePath highestImageOnNn = GetHighestFsImageOnCluster(cluster);
            MD5Hash  expected         = MD5FileUtils.ComputeMd5ForFile(highestImageOnNn);
            MD5Hash  actual           = MD5FileUtils.ComputeMd5ForFile(new FilePath(FetchedImageFile, highestImageOnNn
                                                                                    .GetName()));

            NUnit.Framework.Assert.AreEqual(expected, actual);
        }
示例#16
0
        /// <exception cref="System.Exception"/>
        private void FinalizeRollingUpgrade()
        {
            Log.Info("Finalizing rolling upgrade");
            DFSAdmin dfsadmin = new DFSAdmin(conf);

            TestRollingUpgrade.RunCmd(dfsadmin, true, "-rollingUpgrade", "finalize");
            TriggerHeartBeats();
            // Ensure datanode rolling upgrade is started
            NUnit.Framework.Assert.IsFalse(dn0.GetFSDataset().TrashEnabled(blockPoolId));
            BlockPoolSliceStorage bps = dn0.GetStorage().GetBPStorage(blockPoolId);

            NUnit.Framework.Assert.IsFalse(bps.TrashEnabled());
        }
示例#17
0
        /// <exception cref="System.Exception"/>
        private void RunCommand(DFSAdmin admin, string[] args, bool expectEror)
        {
            int val = admin.Run(args);

            if (expectEror)
            {
                NUnit.Framework.Assert.AreEqual(val, -1);
            }
            else
            {
                NUnit.Framework.Assert.IsTrue(val >= 0);
            }
        }
示例#18
0
            /// <exception cref="System.Exception"/>
            public object Run()
            {
                NUnit.Framework.Assert.AreEqual("Not running as new user", username, UserGroupInformation
                                                .GetCurrentUser().GetShortUserName());
                DFSAdmin userAdmin = new DFSAdmin(conf);

                args2[1] = "100";
                this._enclosing.RunCommand(userAdmin, args2, true);
                this._enclosing.RunCommand(userAdmin, true, "-setSpaceQuota", "1g", args2[2]);
                string[] args3 = new string[] { "-clrQuota", parent.ToString() };
                this._enclosing.RunCommand(userAdmin, args3, true);
                this._enclosing.RunCommand(userAdmin, true, "-clrSpaceQuota", args3[1]);
                return(null);
            }
示例#19
0
        /// <exception cref="System.IO.IOException"/>
        private void CheckDFSAdminDecommissionStatus(IList <DatanodeDescriptor> expectedDecomm
                                                     , DistributedFileSystem dfs, DFSAdmin admin)
        {
            ByteArrayOutputStream baos   = new ByteArrayOutputStream();
            TextWriter            ps     = new TextWriter(baos);
            TextWriter            oldOut = System.Console.Out;

            Runtime.SetOut(ps);
            try
            {
                // Parse DFSAdmin just to check the count
                admin.Report(new string[] { "-decommissioning" }, 0);
                string[] lines = baos.ToString().Split("\n");
                int      num   = null;
                int      count = 0;
                foreach (string line in lines)
                {
                    if (line.StartsWith("Decommissioning datanodes"))
                    {
                        // Pull out the "(num)" and parse it into an int
                        string temp = line.Split(" ")[2];
                        num = System.Convert.ToInt32((string)temp.SubSequence(1, temp.Length - 2));
                    }
                    if (line.Contains("Decommission in progress"))
                    {
                        count++;
                    }
                }
                NUnit.Framework.Assert.IsTrue("No decommissioning output", num != null);
                NUnit.Framework.Assert.AreEqual("Unexpected number of decomming DNs", expectedDecomm
                                                .Count, num);
                NUnit.Framework.Assert.AreEqual("Unexpected number of decomming DNs", expectedDecomm
                                                .Count, count);
                // Check Java API for correct contents
                IList <DatanodeInfo> decomming = new AList <DatanodeInfo>(Arrays.AsList(dfs.GetDataNodeStats
                                                                                            (HdfsConstants.DatanodeReportType.Decommissioning)));
                NUnit.Framework.Assert.AreEqual("Unexpected number of decomming DNs", expectedDecomm
                                                .Count, decomming.Count);
                foreach (DatanodeID id in expectedDecomm)
                {
                    NUnit.Framework.Assert.IsTrue("Did not find expected decomming DN " + id, decomming
                                                  .Contains(id));
                }
            }
            finally
            {
                Runtime.SetOut(oldOut);
            }
        }
示例#20
0
        public virtual void TestGroupMappingRefresh()
        {
            DFSAdmin admin = new DFSAdmin(config);

            string[] args   = new string[] { "-refreshUserToGroupsMappings" };
            Groups   groups = Groups.GetUserToGroupsMappingService(config);
            string   user   = UserGroupInformation.GetCurrentUser().GetUserName();

            System.Console.Out.WriteLine("first attempt:");
            IList <string> g1 = groups.GetGroups(user);

            string[] str_groups = new string[g1.Count];
            Sharpen.Collections.ToArray(g1, str_groups);
            System.Console.Out.WriteLine(Arrays.ToString(str_groups));
            System.Console.Out.WriteLine("second attempt, should be same:");
            IList <string> g2 = groups.GetGroups(user);

            Sharpen.Collections.ToArray(g2, str_groups);
            System.Console.Out.WriteLine(Arrays.ToString(str_groups));
            for (int i = 0; i < g2.Count; i++)
            {
                NUnit.Framework.Assert.AreEqual("Should be same group ", g1[i], g2[i]);
            }
            admin.Run(args);
            System.Console.Out.WriteLine("third attempt(after refresh command), should be different:"
                                         );
            IList <string> g3 = groups.GetGroups(user);

            Sharpen.Collections.ToArray(g3, str_groups);
            System.Console.Out.WriteLine(Arrays.ToString(str_groups));
            for (int i_1 = 0; i_1 < g3.Count; i_1++)
            {
                NUnit.Framework.Assert.IsFalse("Should be different group: " + g1[i_1] + " and "
                                               + g3[i_1], g1[i_1].Equals(g3[i_1]));
            }
            // test time out
            Sharpen.Thread.Sleep(groupRefreshTimeoutSec * 1100);
            System.Console.Out.WriteLine("fourth attempt(after timeout), should be different:"
                                         );
            IList <string> g4 = groups.GetGroups(user);

            Sharpen.Collections.ToArray(g4, str_groups);
            System.Console.Out.WriteLine(Arrays.ToString(str_groups));
            for (int i_2 = 0; i_2 < g4.Count; i_2++)
            {
                NUnit.Framework.Assert.IsFalse("Should be different group ", g3[i_2].Equals(g4[i_2
                                                                                            ]));
            }
        }
示例#21
0
        public virtual void TestValidIdentifier()
        {
            DFSAdmin admin = new DFSAdmin(config);

            string[] args = new string[] { "-refresh", "localhost:" + cluster.GetNameNodePort
                                               (), "firstHandler" };
            int exitCode = admin.Run(args);

            NUnit.Framework.Assert.AreEqual("DFSAdmin should succeed", 0, exitCode);
            Org.Mockito.Mockito.Verify(firstHandler).HandleRefresh("firstHandler", new string
                                                                   [] {  });
            // Second handler was never called
            Org.Mockito.Mockito.Verify(secondHandler, Org.Mockito.Mockito.Never()).HandleRefresh
                (Org.Mockito.Mockito.AnyString(), Org.Mockito.Mockito.Any <string[]>());
        }
示例#22
0
        public virtual void TestDfsAdminDeleteBlockPool()
        {
            Configuration  conf    = new Configuration();
            MiniDFSCluster cluster = null;

            try
            {
                conf.Set(DFSConfigKeys.DfsNameservices, "namesServerId1,namesServerId2");
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleFederatedTopology
                                                                          (conf.Get(DFSConfigKeys.DfsNameservices))).NumDataNodes(1).Build();
                cluster.WaitActive();
                FileSystem fs1 = cluster.GetFileSystem(0);
                FileSystem fs2 = cluster.GetFileSystem(1);
                DFSTestUtil.CreateFile(fs1, new Path("/alpha"), 1024, (short)1, 54);
                DFSTestUtil.CreateFile(fs2, new Path("/beta"), 1024, (short)1, 54);
                DataNode      dn1            = cluster.GetDataNodes()[0];
                string        bpid1          = cluster.GetNamesystem(0).GetBlockPoolId();
                string        bpid2          = cluster.GetNamesystem(1).GetBlockPoolId();
                FilePath      dn1StorageDir1 = cluster.GetInstanceStorageDir(0, 0);
                FilePath      dn1StorageDir2 = cluster.GetInstanceStorageDir(0, 1);
                Configuration nn1Conf        = cluster.GetConfiguration(0);
                nn1Conf.Set(DFSConfigKeys.DfsNameservices, "namesServerId1");
                dn1.RefreshNamenodes(nn1Conf);
                NUnit.Framework.Assert.AreEqual(1, dn1.GetAllBpOs().Length);
                DFSAdmin admin      = new DFSAdmin(nn1Conf);
                string   dn1Address = dn1.GetDatanodeId().GetIpAddr() + ":" + dn1.GetIpcPort();
                string[] args       = new string[] { "-deleteBlockPool", dn1Address, bpid2 };
                int      ret        = admin.Run(args);
                NUnit.Framework.Assert.IsFalse(0 == ret);
                VerifyBlockPoolDirectories(true, dn1StorageDir1, bpid2);
                VerifyBlockPoolDirectories(true, dn1StorageDir2, bpid2);
                string[] forceArgs = new string[] { "-deleteBlockPool", dn1Address, bpid2, "force" };
                ret = admin.Run(forceArgs);
                NUnit.Framework.Assert.AreEqual(0, ret);
                VerifyBlockPoolDirectories(false, dn1StorageDir1, bpid2);
                VerifyBlockPoolDirectories(false, dn1StorageDir2, bpid2);
                //bpid1 remains good
                VerifyBlockPoolDirectories(true, dn1StorageDir1, bpid1);
                VerifyBlockPoolDirectories(true, dn1StorageDir2, bpid1);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
示例#23
0
 /// <exception cref="System.Exception"/>
 private void SetUpHaCluster(bool security)
 {
     conf = new Configuration();
     conf.SetBoolean(CommonConfigurationKeys.HadoopSecurityAuthorization, security);
     cluster = new MiniQJMHACluster.Builder(conf).Build();
     SetHAConf(conf, cluster.GetDfsCluster().GetNameNode(0).GetHostAndPort(), cluster.
               GetDfsCluster().GetNameNode(1).GetHostAndPort());
     cluster.GetDfsCluster().GetNameNode(0).GetHostAndPort();
     admin = new DFSAdmin();
     admin.SetConf(conf);
     NUnit.Framework.Assert.IsTrue(HAUtil.IsHAEnabled(conf, "ns1"));
     originOut = System.Console.Out;
     originErr = System.Console.Error;
     Runtime.SetOut(new TextWriter(@out));
     Runtime.SetErr(new TextWriter(err));
 }
示例#24
0
        public virtual void TestVariableArgs()
        {
            DFSAdmin admin = new DFSAdmin(config);

            string[] args = new string[] { "-refresh", "localhost:" + cluster.GetNameNodePort
                                               (), "secondHandler", "one" };
            int exitCode = admin.Run(args);

            NUnit.Framework.Assert.AreEqual("DFSAdmin should return 2", 2, exitCode);
            exitCode = admin.Run(new string[] { "-refresh", "localhost:" + cluster.GetNameNodePort
                                                    (), "secondHandler", "one", "two" });
            NUnit.Framework.Assert.AreEqual("DFSAdmin should now return 3", 3, exitCode);
            Org.Mockito.Mockito.Verify(secondHandler).HandleRefresh("secondHandler", new string
                                                                    [] { "one" });
            Org.Mockito.Mockito.Verify(secondHandler).HandleRefresh("secondHandler", new string
                                                                    [] { "one", "two" });
        }
示例#25
0
        public virtual void TestMultipleRegistration()
        {
            RefreshRegistry.DefaultRegistry().Register("sharedId", firstHandler);
            RefreshRegistry.DefaultRegistry().Register("sharedId", secondHandler);
            // this should trigger both
            DFSAdmin admin = new DFSAdmin(config);

            string[] args = new string[] { "-refresh", "localhost:" + cluster.GetNameNodePort
                                               (), "sharedId", "one" };
            int exitCode = admin.Run(args);

            NUnit.Framework.Assert.AreEqual(-1, exitCode);
            // -1 because one of the responses is unregistered
            // verify we called both
            Org.Mockito.Mockito.Verify(firstHandler).HandleRefresh("sharedId", new string[] {
                "one"
            });
            Org.Mockito.Mockito.Verify(secondHandler).HandleRefresh("sharedId", new string[]
                                                                    { "one" });
            RefreshRegistry.DefaultRegistry().UnregisterAll("sharedId");
        }
示例#26
0
        public virtual void TestPipelineRecoveryOnOOB()
        {
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsClientDatanodeRestartTimeoutKey, "15");
            MiniDFSCluster cluster = null;

            try
            {
                int numDataNodes = 1;
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build();
                cluster.WaitActive();
                FileSystem fileSys = cluster.GetFileSystem();
                Path       file    = new Path("dataprotocol2.dat");
                DFSTestUtil.CreateFile(fileSys, file, 10240L, (short)1, 0L);
                DFSOutputStream @out = (DFSOutputStream)(fileSys.Append(file).GetWrappedStream());
                @out.Write(1);
                @out.Hflush();
                DFSAdmin dfsadmin = new DFSAdmin(conf);
                DataNode dn       = cluster.GetDataNodes()[0];
                string   dnAddr   = dn.GetDatanodeId().GetIpcAddr(false);
                // issue shutdown to the datanode.
                string[] args1 = new string[] { "-shutdownDatanode", dnAddr, "upgrade" };
                NUnit.Framework.Assert.AreEqual(0, dfsadmin.Run(args1));
                // Wait long enough to receive an OOB ack before closing the file.
                Sharpen.Thread.Sleep(4000);
                // Retart the datanode
                cluster.RestartDataNode(0, true);
                // The following forces a data packet and end of block packets to be sent.
                @out.Close();
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
示例#27
0
        public virtual void TestQuotaCommands()
        {
            Configuration conf = new HdfsConfiguration();
            // set a smaller block size so that we can test with smaller
            // Space quotas
            int DefaultBlockSize = 512;

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockSize);
            // Make it relinquish locks. When run serially, the result should
            // be identical.
            conf.SetInt(DFSConfigKeys.DfsContentSummaryLimitKey, 2);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            NUnit.Framework.Assert.IsTrue("Not a HDFS: " + fs.GetUri(), fs is DistributedFileSystem
                                          );
            DistributedFileSystem dfs = (DistributedFileSystem)fs;
            DFSAdmin admin            = new DFSAdmin(conf);

            try
            {
                int   fileLen     = 1024;
                short replication = 5;
                long  spaceQuota  = fileLen * replication * 15 / 8;
                // 1: create a directory /test and set its quota to be 3
                Path parent = new Path("/test");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(parent));
                string[] args = new string[] { "-setQuota", "3", parent.ToString() };
                RunCommand(admin, args, false);
                //try setting space quota with a 'binary prefix'
                RunCommand(admin, false, "-setSpaceQuota", "2t", parent.ToString());
                NUnit.Framework.Assert.AreEqual(2L << 40, dfs.GetContentSummary(parent).GetSpaceQuota
                                                    ());
                // set diskspace quota to 10000
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota), parent
                           .ToString());
                // 2: create directory /test/data0
                Path childDir0 = new Path(parent, "data0");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(childDir0));
                // 3: create a file /test/datafile0
                Path childFile0 = new Path(parent, "datafile0");
                DFSTestUtil.CreateFile(fs, childFile0, fileLen, replication, 0);
                // 4: count -q /test
                ContentSummary c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetFileCount() + c.GetDirectoryCount(), 3);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 3);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileLen * replication);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), spaceQuota);
                // 5: count -q /test/data0
                c = dfs.GetContentSummary(childDir0);
                NUnit.Framework.Assert.AreEqual(c.GetFileCount() + c.GetDirectoryCount(), 1);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                // check disk space consumed
                c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileLen * replication);
                // 6: create a directory /test/data1
                Path childDir1    = new Path(parent, "data1");
                bool hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Mkdirs(childDir1));
                }
                catch (QuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                OutputStream fout;
                // 7: create a file /test/datafile1
                Path childFile1 = new Path(parent, "datafile1");
                hasException = false;
                try
                {
                    fout = dfs.Create(childFile1);
                }
                catch (QuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // 8: clear quota /test
                RunCommand(admin, new string[] { "-clrQuota", parent.ToString() }, false);
                c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), spaceQuota);
                // 9: clear quota /test/data0
                RunCommand(admin, new string[] { "-clrQuota", childDir0.ToString() }, false);
                c = dfs.GetContentSummary(childDir0);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                // 10: create a file /test/datafile1
                fout = dfs.Create(childFile1, replication);
                // 10.s: but writing fileLen bytes should result in an quota exception
                try
                {
                    fout.Write(new byte[fileLen]);
                    fout.Close();
                    NUnit.Framework.Assert.Fail();
                }
                catch (QuotaExceededException)
                {
                    IOUtils.CloseStream(fout);
                }
                //delete the file
                dfs.Delete(childFile1, false);
                // 9.s: clear diskspace quota
                RunCommand(admin, false, "-clrSpaceQuota", parent.ToString());
                c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), -1);
                // now creating childFile1 should succeed
                DFSTestUtil.CreateFile(dfs, childFile1, fileLen, replication, 0);
                // 11: set the quota of /test to be 1
                // HADOOP-5872 - we can set quota even if it is immediately violated
                args = new string[] { "-setQuota", "1", parent.ToString() };
                RunCommand(admin, args, false);
                RunCommand(admin, false, "-setSpaceQuota", Sharpen.Extensions.ToString(fileLen),
                           args[2]);
                // for space quota
                // 12: set the quota of /test/data0 to be 1
                args = new string[] { "-setQuota", "1", childDir0.ToString() };
                RunCommand(admin, args, false);
                // 13: not able create a directory under data0
                hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Mkdirs(new Path(childDir0, "in")));
                }
                catch (QuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                c = dfs.GetContentSummary(childDir0);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount() + c.GetFileCount(), 1);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 1);
                // 14a: set quota on a non-existent directory
                Path nonExistentPath = new Path("/test1");
                NUnit.Framework.Assert.IsFalse(dfs.Exists(nonExistentPath));
                args = new string[] { "-setQuota", "1", nonExistentPath.ToString() };
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", "1g", nonExistentPath.ToString());
                // for space quota
                // 14b: set quota on a file
                NUnit.Framework.Assert.IsTrue(dfs.IsFile(childFile0));
                args[1] = childFile0.ToString();
                RunCommand(admin, args, true);
                // same for space quota
                RunCommand(admin, true, "-setSpaceQuota", "1t", args[1]);
                // 15a: clear quota on a file
                args[0] = "-clrQuota";
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-clrSpaceQuota", args[1]);
                // 15b: clear quota on a non-existent directory
                args[1] = nonExistentPath.ToString();
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-clrSpaceQuota", args[1]);
                // 16a: set the quota of /test to be 0
                args = new string[] { "-setQuota", "0", parent.ToString() };
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", "0", args[2]);
                // 16b: set the quota of /test to be -1
                args[1] = "-1";
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
                // 16c: set the quota of /test to be Long.MAX_VALUE+1
                args[1] = (long.MaxValue + 1L).ToString();
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
                // 16d: set the quota of /test to be a non integer
                args[1] = "33aa1.5";
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
                // 16e: set space quota with a value larger than Long.MAX_VALUE
                RunCommand(admin, true, "-setSpaceQuota", (long.MaxValue / 1024 / 1024 + 1024) +
                           "m", args[2]);
                // 17:  setQuota by a non-administrator
                string username          = "******";
                UserGroupInformation ugi = UserGroupInformation.CreateUserForTesting(username, new
                                                                                     string[] { "groupyy" });
                string[] args2 = args.MemberwiseClone();
                // need final ref for doAs block
                ugi.DoAs(new _PrivilegedExceptionAction_275(this, username, conf, args2, parent));
                // 18: clrQuota by a non-administrator
                // 19: clrQuota on the root directory ("/") should fail
                RunCommand(admin, true, "-clrQuota", "/");
                // 20: setQuota on the root directory ("/") should succeed
                RunCommand(admin, false, "-setQuota", "1000000", "/");
                RunCommand(admin, true, "-clrQuota", "/");
                RunCommand(admin, false, "-clrSpaceQuota", "/");
                RunCommand(admin, new string[] { "-clrQuota", parent.ToString() }, false);
                RunCommand(admin, false, "-clrSpaceQuota", parent.ToString());
                // 2: create directory /test/data2
                Path childDir2 = new Path(parent, "data2");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(childDir2));
                Path childFile2  = new Path(childDir2, "datafile2");
                Path childFile3  = new Path(childDir2, "datafile3");
                long spaceQuota2 = DefaultBlockSize * replication;
                long fileLen2    = DefaultBlockSize;
                // set space quota to a real low value
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           childDir2.ToString());
                // clear space quota
                RunCommand(admin, false, "-clrSpaceQuota", childDir2.ToString());
                // create a file that is greater than the size of space quota
                DFSTestUtil.CreateFile(fs, childFile2, fileLen2, replication, 0);
                // now set space quota again. This should succeed
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           childDir2.ToString());
                hasException = false;
                try
                {
                    DFSTestUtil.CreateFile(fs, childFile3, fileLen2, replication, 0);
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // now test the same for root
                Path childFile4 = new Path("/", "datafile2");
                Path childFile5 = new Path("/", "datafile3");
                RunCommand(admin, true, "-clrQuota", "/");
                RunCommand(admin, false, "-clrSpaceQuota", "/");
                // set space quota to a real low value
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           "/");
                RunCommand(admin, false, "-clrSpaceQuota", "/");
                DFSTestUtil.CreateFile(fs, childFile4, fileLen2, replication, 0);
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           "/");
                hasException = false;
                try
                {
                    DFSTestUtil.CreateFile(fs, childFile5, fileLen2, replication, 0);
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                NUnit.Framework.Assert.AreEqual(4, cluster.GetNamesystem().GetFSDirectory().GetYieldCount
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
示例#28
0
        public virtual void TestBlockAllocationAdjustsUsageConservatively()
        {
            Configuration conf      = new HdfsConfiguration();
            int           BlockSize = 6 * 1024;

            conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
            conf.SetBoolean(DFSConfigKeys.DfsWebhdfsEnabledKey, true);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();

            cluster.WaitActive();
            FileSystem fs         = cluster.GetFileSystem();
            DFSAdmin   admin      = new DFSAdmin(conf);
            string     nnAddr     = conf.Get(DFSConfigKeys.DfsNamenodeHttpAddressKey);
            string     webhdfsuri = WebHdfsFileSystem.Scheme + "://" + nnAddr;

            System.Console.Out.WriteLine("webhdfsuri=" + webhdfsuri);
            FileSystem webhdfs = new Path(webhdfsuri).GetFileSystem(conf);

            try
            {
                Path dir           = new Path("/test");
                Path file1         = new Path("/test/test1");
                Path file2         = new Path("/test/test2");
                bool exceededQuota = false;
                int  QuotaSize     = 3 * BlockSize;
                // total space usage including
                // repl.
                int            FileSize = BlockSize / 2;
                ContentSummary c;
                // Create the directory and set the quota
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(dir));
                RunCommand(admin, false, "-setSpaceQuota", Sharpen.Extensions.ToString(QuotaSize)
                           , dir.ToString());
                // Creating a file should use half the quota
                DFSTestUtil.CreateFile(fs, file1, FileSize, (short)3, 1L);
                DFSTestUtil.WaitReplication(fs, file1, (short)3);
                c = fs.GetContentSummary(dir);
                CheckContentSummary(c, webhdfs.GetContentSummary(dir));
                NUnit.Framework.Assert.AreEqual("Quota is half consumed", QuotaSize / 2, c.GetSpaceConsumed
                                                    ());
                // We can not create the 2nd file because even though the total spaced
                // used by two files (2 * 3 * 512/2) would fit within the quota (3 * 512)
                // when a block for a file is created the space used is adjusted
                // conservatively (3 * block size, ie assumes a full block is written)
                // which will violate the quota (3 * block size) since we've already
                // used half the quota for the first file.
                try
                {
                    DFSTestUtil.CreateFile(fs, file2, FileSize, (short)3, 1L);
                }
                catch (QuotaExceededException)
                {
                    exceededQuota = true;
                }
                NUnit.Framework.Assert.IsTrue("Quota not exceeded", exceededQuota);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
示例#29
0
        public virtual void TestMultipleFilesSmallerThanOneBlock()
        {
            Configuration conf      = new HdfsConfiguration();
            int           BlockSize = 6 * 1024;

            conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
            conf.SetBoolean(DFSConfigKeys.DfsWebhdfsEnabledKey, true);
            // Make it relinquish locks. When run serially, the result should
            // be identical.
            conf.SetInt(DFSConfigKeys.DfsContentSummaryLimitKey, 2);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();

            cluster.WaitActive();
            FileSystem fs         = cluster.GetFileSystem();
            DFSAdmin   admin      = new DFSAdmin(conf);
            string     nnAddr     = conf.Get(DFSConfigKeys.DfsNamenodeHttpAddressKey);
            string     webhdfsuri = WebHdfsFileSystem.Scheme + "://" + nnAddr;

            System.Console.Out.WriteLine("webhdfsuri=" + webhdfsuri);
            FileSystem webhdfs = new Path(webhdfsuri).GetFileSystem(conf);

            try
            {
                Path           dir           = new Path("/test");
                bool           exceededQuota = false;
                ContentSummary c;
                // 1kb file
                // 6kb block
                // 192kb quota
                int FileSize  = 1024;
                int QuotaSize = 32 * (int)fs.GetDefaultBlockSize(dir);
                NUnit.Framework.Assert.AreEqual(6 * 1024, fs.GetDefaultBlockSize(dir));
                NUnit.Framework.Assert.AreEqual(192 * 1024, QuotaSize);
                // Create the dir and set the quota. We need to enable the quota before
                // writing the files as setting the quota afterwards will over-write
                // the cached disk space used for quota verification with the actual
                // amount used as calculated by INode#spaceConsumedInTree.
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(dir));
                RunCommand(admin, false, "-setSpaceQuota", Sharpen.Extensions.ToString(QuotaSize)
                           , dir.ToString());
                // We can create at most 59 files because block allocation is
                // conservative and initially assumes a full block is used, so we
                // need to leave at least 3 * BLOCK_SIZE free space when allocating
                // the last block: (58 * 3 * 1024) (3 * 6 * 1024) = 192kb
                for (int i = 0; i < 59; i++)
                {
                    Path file = new Path("/test/test" + i);
                    DFSTestUtil.CreateFile(fs, file, FileSize, (short)3, 1L);
                    DFSTestUtil.WaitReplication(fs, file, (short)3);
                }
                // Should account for all 59 files (almost QUOTA_SIZE)
                c = fs.GetContentSummary(dir);
                CheckContentSummary(c, webhdfs.GetContentSummary(dir));
                NUnit.Framework.Assert.AreEqual("Invalid space consumed", 59 * FileSize * 3, c.GetSpaceConsumed
                                                    ());
                NUnit.Framework.Assert.AreEqual("Invalid space consumed", QuotaSize - (59 * FileSize
                                                                                       * 3), 3 * (fs.GetDefaultBlockSize(dir) - FileSize));
                // Now check that trying to create another file violates the quota
                try
                {
                    Path file = new Path("/test/test59");
                    DFSTestUtil.CreateFile(fs, file, FileSize, (short)3, 1L);
                    DFSTestUtil.WaitReplication(fs, file, (short)3);
                }
                catch (QuotaExceededException)
                {
                    exceededQuota = true;
                }
                NUnit.Framework.Assert.IsTrue("Quota not exceeded", exceededQuota);
                NUnit.Framework.Assert.AreEqual(2, cluster.GetNamesystem().GetFSDirectory().GetYieldCount
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
示例#30
0
        public virtual void TestRefreshSuperUserGroupsConfiguration()
        {
            string SuperUser = "******";

            string[] GroupNames1 = new string[] { "gr1", "gr2" };
            string[] GroupNames2 = new string[] { "gr3", "gr4" };
            //keys in conf
            string userKeyGroups = DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserGroupConfKey
                                       (SuperUser);
            string userKeyHosts = DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserIpConfKey
                                      (SuperUser);

            config.Set(userKeyGroups, "gr3,gr4,gr5");
            // superuser can proxy for this group
            config.Set(userKeyHosts, "127.0.0.1");
            ProxyUsers.RefreshSuperUserGroupsConfiguration(config);
            UserGroupInformation ugi1  = Org.Mockito.Mockito.Mock <UserGroupInformation>();
            UserGroupInformation ugi2  = Org.Mockito.Mockito.Mock <UserGroupInformation>();
            UserGroupInformation suUgi = Org.Mockito.Mockito.Mock <UserGroupInformation>();

            Org.Mockito.Mockito.When(ugi1.GetRealUser()).ThenReturn(suUgi);
            Org.Mockito.Mockito.When(ugi2.GetRealUser()).ThenReturn(suUgi);
            Org.Mockito.Mockito.When(suUgi.GetShortUserName()).ThenReturn(SuperUser);
            // super user
            Org.Mockito.Mockito.When(suUgi.GetUserName()).ThenReturn(SuperUser + "L");
            // super user
            Org.Mockito.Mockito.When(ugi1.GetShortUserName()).ThenReturn("user1");
            Org.Mockito.Mockito.When(ugi2.GetShortUserName()).ThenReturn("user2");
            Org.Mockito.Mockito.When(ugi1.GetUserName()).ThenReturn("userL1");
            Org.Mockito.Mockito.When(ugi2.GetUserName()).ThenReturn("userL2");
            // set groups for users
            Org.Mockito.Mockito.When(ugi1.GetGroupNames()).ThenReturn(GroupNames1);
            Org.Mockito.Mockito.When(ugi2.GetGroupNames()).ThenReturn(GroupNames2);
            // check before
            try
            {
                ProxyUsers.Authorize(ugi1, "127.0.0.1");
                NUnit.Framework.Assert.Fail("first auth for " + ugi1.GetShortUserName() + " should've failed "
                                            );
            }
            catch (AuthorizationException)
            {
                // expected
                System.Console.Error.WriteLine("auth for " + ugi1.GetUserName() + " failed");
            }
            try
            {
                ProxyUsers.Authorize(ugi2, "127.0.0.1");
                System.Console.Error.WriteLine("auth for " + ugi2.GetUserName() + " succeeded");
            }
            catch (AuthorizationException e)
            {
                // expected
                NUnit.Framework.Assert.Fail("first auth for " + ugi2.GetShortUserName() + " should've succeeded: "
                                            + e.GetLocalizedMessage());
            }
            // refresh will look at configuration on the server side
            // add additional resource with the new value
            // so the server side will pick it up
            string rsrc = "testGroupMappingRefresh_rsrc.xml";

            AddNewConfigResource(rsrc, userKeyGroups, "gr2", userKeyHosts, "127.0.0.1");
            DFSAdmin admin = new DFSAdmin(config);

            string[] args = new string[] { "-refreshSuperUserGroupsConfiguration" };
            admin.Run(args);
            try
            {
                ProxyUsers.Authorize(ugi2, "127.0.0.1");
                NUnit.Framework.Assert.Fail("second auth for " + ugi2.GetShortUserName() + " should've failed "
                                            );
            }
            catch (AuthorizationException)
            {
                // expected
                System.Console.Error.WriteLine("auth for " + ugi2.GetUserName() + " failed");
            }
            try
            {
                ProxyUsers.Authorize(ugi1, "127.0.0.1");
                System.Console.Error.WriteLine("auth for " + ugi1.GetUserName() + " succeeded");
            }
            catch (AuthorizationException e)
            {
                // expected
                NUnit.Framework.Assert.Fail("second auth for " + ugi1.GetShortUserName() + " should've succeeded: "
                                            + e.GetLocalizedMessage());
            }
        }