示例#1
0
        public virtual void TestCloseTwice()
        {
            DistributedFileSystem fs  = cluster.GetFileSystem();
            FSDataOutputStream    os  = fs.Create(new Path("/test"));
            DFSOutputStream       dos = (DFSOutputStream)Whitebox.GetInternalState(os, "wrappedStream"
                                                                                   );
            AtomicReference <IOException> ex = (AtomicReference <IOException>)Whitebox.GetInternalState
                                                   (dos, "lastException");

            NUnit.Framework.Assert.AreEqual(null, ex.Get());
            dos.Close();
            IOException dummy = new IOException("dummy");

            ex.Set(dummy);
            try
            {
                dos.Close();
            }
            catch (IOException e)
            {
                NUnit.Framework.Assert.AreEqual(e, dummy);
            }
            NUnit.Framework.Assert.AreEqual(null, ex.Get());
            dos.Close();
        }
示例#2
0
        public virtual void TestFcResolveAfs()
        {
            Configuration conf           = new Configuration();
            FileContext   fcLocal        = FileContext.GetLocalFSFileContext();
            FileContext   fcHdfs         = FileContext.GetFileContext(cluster.GetFileSystem().GetUri());
            string        localTestRoot  = helper.GetAbsoluteTestRootDir(fcLocal);
            Path          alphaLocalPath = new Path(fcLocal.GetDefaultFileSystem().GetUri().ToString()
                                                    , new FilePath(localTestRoot, "alpha").GetAbsolutePath());

            DFSTestUtil.CreateFile(FileSystem.GetLocal(conf), alphaLocalPath, 16, (short)1, 2
                                   );
            Path linkTarget = new Path(fcLocal.GetDefaultFileSystem().GetUri().ToString(), localTestRoot
                                       );
            Path hdfsLink = new Path(fcHdfs.GetDefaultFileSystem().GetUri().ToString(), "/tmp/link"
                                     );

            fcHdfs.CreateSymlink(linkTarget, hdfsLink, true);
            Path alphaHdfsPathViaLink = new Path(fcHdfs.GetDefaultFileSystem().GetUri().ToString
                                                     () + "/tmp/link/alpha");
            ICollection <AbstractFileSystem> afsList = fcHdfs.ResolveAbstractFileSystems(alphaHdfsPathViaLink
                                                                                         );

            NUnit.Framework.Assert.AreEqual(2, afsList.Count);
            foreach (AbstractFileSystem afs in afsList)
            {
                if ((!afs.Equals(fcHdfs.GetDefaultFileSystem())) && (!afs.Equals(fcLocal.GetDefaultFileSystem
                                                                                     ())))
                {
                    NUnit.Framework.Assert.Fail("Failed to resolve AFS correctly");
                }
            }
        }
        /// <summary>create a file with a length of <code>filelen</code></summary>
        /// <exception cref="System.IO.IOException"/>
        private void CreateFile(string fileName, long filelen)
        {
            FileSystem fs       = mc.GetFileSystem();
            Path       filePath = new Path(fileName);

            DFSTestUtil.CreateFile(fs, filePath, filelen, (short)1, 0);
        }
示例#4
0
        public virtual void TestManualSafeMode()
        {
            fs = cluster.GetFileSystem();
            Path file1 = new Path("/tmp/testManualSafeMode/file1");
            Path file2 = new Path("/tmp/testManualSafeMode/file2");

            // create two files with one block each.
            DFSTestUtil.CreateFile(fs, file1, 1000, (short)1, 0);
            DFSTestUtil.CreateFile(fs, file2, 1000, (short)1, 0);
            fs.Close();
            cluster.Shutdown();
            // now bring up just the NameNode.
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(false).Build();
            cluster.WaitActive();
            dfs = cluster.GetFileSystem();
            NUnit.Framework.Assert.IsTrue("No datanode is started. Should be in SafeMode", dfs
                                          .SetSafeMode(HdfsConstants.SafeModeAction.SafemodeGet));
            // manually set safemode.
            dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
            // now bring up the datanode and wait for it to be active.
            cluster.StartDataNodes(conf, 1, true, null, null);
            cluster.WaitActive();
            // wait longer than dfs.namenode.safemode.extension
            try
            {
                Sharpen.Thread.Sleep(2000);
            }
            catch (Exception)
            {
            }
            NUnit.Framework.Assert.IsTrue("should still be in SafeMode", dfs.SetSafeMode(HdfsConstants.SafeModeAction
                                                                                         .SafemodeGet));
            NUnit.Framework.Assert.IsFalse("should not be in SafeMode", dfs.SetSafeMode(HdfsConstants.SafeModeAction
                                                                                        .SafemodeLeave));
        }
 public static void ClusterSetupAtBeginning()
 {
     cluster = new MiniDFSCluster.Builder(clusterConf).NnTopology(MiniDFSNNTopology.SimpleFederatedTopology
                                                                      (2)).NumDataNodes(2).Build();
     cluster.WaitClusterUp();
     fHdfs  = cluster.GetFileSystem(0);
     fHdfs2 = cluster.GetFileSystem(1);
 }
 public static void ClusterSetupAtBeginning()
 {
     clusterConf.SetBoolean(DFSConfigKeys.DfsNamenodeAclsEnabledKey, true);
     cluster = new MiniDFSCluster.Builder(clusterConf).NnTopology(MiniDFSNNTopology.SimpleFederatedTopology
                                                                      (2)).NumDataNodes(2).Build();
     cluster.WaitClusterUp();
     fHdfs  = cluster.GetFileSystem(0);
     fHdfs2 = cluster.GetFileSystem(1);
 }
示例#7
0
        public virtual void TestAddBlock()
        {
            DistributedFileSystem fs = cluster.GetFileSystem();
            Path file1 = new Path("/file1");
            Path file2 = new Path("/file2");
            Path file3 = new Path("/file3");
            Path file4 = new Path("/file4");

            DFSTestUtil.CreateFile(fs, file1, Blocksize - 1, Replication, 0L);
            DFSTestUtil.CreateFile(fs, file2, Blocksize, Replication, 0L);
            DFSTestUtil.CreateFile(fs, file3, Blocksize * 2 - 1, Replication, 0L);
            DFSTestUtil.CreateFile(fs, file4, Blocksize * 2, Replication, 0L);
            // restart NameNode
            cluster.RestartNameNode(true);
            FSDirectory fsdir = cluster.GetNamesystem().GetFSDirectory();
            // check file1
            INodeFile file1Node = fsdir.GetINode4Write(file1.ToString()).AsFile();

            BlockInfoContiguous[] file1Blocks = file1Node.GetBlocks();
            NUnit.Framework.Assert.AreEqual(1, file1Blocks.Length);
            NUnit.Framework.Assert.AreEqual(Blocksize - 1, file1Blocks[0].GetNumBytes());
            NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.Complete, file1Blocks
                                            [0].GetBlockUCState());
            // check file2
            INodeFile file2Node = fsdir.GetINode4Write(file2.ToString()).AsFile();

            BlockInfoContiguous[] file2Blocks = file2Node.GetBlocks();
            NUnit.Framework.Assert.AreEqual(1, file2Blocks.Length);
            NUnit.Framework.Assert.AreEqual(Blocksize, file2Blocks[0].GetNumBytes());
            NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.Complete, file2Blocks
                                            [0].GetBlockUCState());
            // check file3
            INodeFile file3Node = fsdir.GetINode4Write(file3.ToString()).AsFile();

            BlockInfoContiguous[] file3Blocks = file3Node.GetBlocks();
            NUnit.Framework.Assert.AreEqual(2, file3Blocks.Length);
            NUnit.Framework.Assert.AreEqual(Blocksize, file3Blocks[0].GetNumBytes());
            NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.Complete, file3Blocks
                                            [0].GetBlockUCState());
            NUnit.Framework.Assert.AreEqual(Blocksize - 1, file3Blocks[1].GetNumBytes());
            NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.Complete, file3Blocks
                                            [1].GetBlockUCState());
            // check file4
            INodeFile file4Node = fsdir.GetINode4Write(file4.ToString()).AsFile();

            BlockInfoContiguous[] file4Blocks = file4Node.GetBlocks();
            NUnit.Framework.Assert.AreEqual(2, file4Blocks.Length);
            NUnit.Framework.Assert.AreEqual(Blocksize, file4Blocks[0].GetNumBytes());
            NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.Complete, file4Blocks
                                            [0].GetBlockUCState());
            NUnit.Framework.Assert.AreEqual(Blocksize, file4Blocks[1].GetNumBytes());
            NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.Complete, file4Blocks
                                            [1].GetBlockUCState());
        }
示例#8
0
        /// <exception cref="System.Exception"/>
        private static void JoinAs(string jointype, Type map, Type reduce)
        {
            int           srcs  = 4;
            Configuration conf  = new Configuration();
            Path          @base = cluster.GetFileSystem().MakeQualified(new Path("/" + jointype));

            Path[] src = WriteSimpleSrc(@base, conf, srcs);
            conf.Set(CompositeInputFormat.JoinExpr, CompositeInputFormat.Compose(jointype, typeof(
                                                                                     SequenceFileInputFormat), src));
            conf.SetInt("testdatamerge.sources", srcs);
            Job job = Job.GetInstance(conf);

            job.SetInputFormatClass(typeof(CompositeInputFormat));
            FileOutputFormat.SetOutputPath(job, new Path(@base, "out"));
            job.SetMapperClass(map);
            job.SetReducerClass(reduce);
            job.SetOutputFormatClass(typeof(SequenceFileOutputFormat));
            job.SetOutputKeyClass(typeof(IntWritable));
            job.SetOutputValueClass(typeof(IntWritable));
            job.WaitForCompletion(true);
            NUnit.Framework.Assert.IsTrue("Job failed", job.IsSuccessful());
            if ("outer".Equals(jointype))
            {
                CheckOuterConsistency(job, src);
            }
            @base.GetFileSystem(conf).Delete(@base, true);
        }
        public virtual void TestTruncate()
        {
            short repl               = 3;
            int   blockSize          = 1024;
            int   numOfBlocks        = 2;
            DistributedFileSystem fs = cluster.GetFileSystem();
            Path dir  = GetTestRootPath(fc, "test/hadoop");
            Path file = GetTestRootPath(fc, "test/hadoop/file");

            byte[] data = FileSystemTestHelper.GetFileData(numOfBlocks, blockSize);
            FileSystemTestHelper.CreateFile(fs, file, data, blockSize, repl);
            int  newLength = blockSize;
            bool isReady   = fc.Truncate(file, newLength);

            NUnit.Framework.Assert.IsTrue("Recovery is not expected.", isReady);
            FileStatus fileStatus = fc.GetFileStatus(file);

            NUnit.Framework.Assert.AreEqual(fileStatus.GetLen(), newLength);
            AppendTestUtil.CheckFullFile(fs, file, newLength, data, file.ToString());
            ContentSummary cs = fs.GetContentSummary(dir);

            NUnit.Framework.Assert.AreEqual("Bad disk space usage", cs.GetSpaceConsumed(), newLength
                                            * repl);
            NUnit.Framework.Assert.IsTrue(fs.Delete(dir, true));
        }
示例#10
0
 public static void ClusterSetUp()
 {
     conf    = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).Build();
     cluster.WaitActive();
     fs = cluster.GetFileSystem();
 }
示例#11
0
 /// <param name="blockSize"/>
 /// <param name="perVolumeCapacity">
 /// limit the capacity of each volume to the given
 /// value. If negative, then don't limit.
 /// </param>
 /// <exception cref="System.IO.IOException"/>
 private void StartCluster(int blockSize, int numDatanodes, long perVolumeCapacity
                           )
 {
     InitConfig(blockSize);
     cluster = new MiniDFSCluster.Builder(conf).StoragesPerDatanode(StoragesPerDatanode
                                                                    ).NumDataNodes(numDatanodes).Build();
     fs     = cluster.GetFileSystem();
     client = fs.GetClient();
     cluster.WaitActive();
     if (perVolumeCapacity >= 0)
     {
         foreach (DataNode dn in cluster.GetDataNodes())
         {
             foreach (FsVolumeSpi volume in dn.GetFSDataset().GetVolumes())
             {
                 ((FsVolumeImpl)volume).SetCapacityForTesting(perVolumeCapacity);
             }
         }
     }
     if (numDatanodes == 1)
     {
         IList <FsVolumeSpi> volumes = cluster.GetDataNodes()[0].GetFSDataset().GetVolumes(
             );
         Assert.AssertThat(volumes.Count, IS.Is(1));
         singletonVolume = ((FsVolumeImpl)volumes[0]);
     }
 }
示例#12
0
 public virtual void ClusterSetUp()
 {
     conf    = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(Repl).Build();
     cluster.WaitActive();
     fs = cluster.GetFileSystem();
 }
            /// <summary>create a file with a length of <code>fileLen</code></summary>
            /// <exception cref="System.IO.IOException"/>
            /// <exception cref="System.Exception"/>
            /// <exception cref="Sharpen.TimeoutException"/>
            private void CreateFile(int index, long len)
            {
                FileSystem fs = cluster.GetFileSystem(index);

                DFSTestUtil.CreateFile(fs, FilePath, len, replication, Random.NextLong());
                DFSTestUtil.WaitReplication(fs, FilePath, replication);
            }
示例#14
0
        public virtual void SetupCluster()
        {
            // must configure prior to instantiating the namesystem because it
            // will reconfigure the logger if async is enabled
            ConfigureAuditLogs();
            conf = new HdfsConfiguration();
            long precision = 1L;

            conf.SetLong(DFSConfigKeys.DfsNamenodeAccesstimePrecisionKey, precision);
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 10000L);
            conf.SetBoolean(DFSConfigKeys.DfsWebhdfsEnabledKey, true);
            conf.SetBoolean(DFSConfigKeys.DfsNamenodeAuditLogAsyncKey, useAsyncLog);
            util = new DFSTestUtil.Builder().SetName("TestAuditAllowed").SetNumFiles(20).Build
                       ();
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(4).Build();
            fs      = cluster.GetFileSystem();
            util.CreateFiles(fs, fileName);
            // make sure the appender is what it's supposed to be
            Logger           logger    = ((Log4JLogger)FSNamesystem.auditLog).GetLogger();
            IList <Appender> appenders = Sharpen.Collections.List(logger.GetAllAppenders());

            NUnit.Framework.Assert.AreEqual(1, appenders.Count);
            NUnit.Framework.Assert.AreEqual(useAsyncLog, appenders[0] is AsyncAppender);
            fnames = util.GetFileNames(fileName);
            util.WaitReplication(fs, fileName, (short)3);
            userGroupInfo = UserGroupInformation.CreateUserForTesting(username, groups);
        }
        public static void SetupCluster()
        {
            Configuration conf = new Configuration();

            conf.SetBoolean(DFSConfigKeys.DfsWebhdfsEnabledKey, true);
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1024);
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
                cluster.WaitActive();
                //change root permission to 777
                cluster.GetFileSystem().SetPermission(new Path("/"), new FsPermission((short)0x1ff
                                                                                      ));
                string uri = WebHdfsFileSystem.Scheme + "://" + conf.Get(DFSConfigKeys.DfsNamenodeHttpAddressKey
                                                                         );
                //get file system as a non-superuser
                UserGroupInformation current = UserGroupInformation.GetCurrentUser();
                UserGroupInformation ugi     = UserGroupInformation.CreateUserForTesting(current.GetShortUserName
                                                                                             () + "x", new string[] { "user" });
                fileSystem = ugi.DoAs(new _PrivilegedExceptionAction_91(uri, conf));
                defaultWorkingDirectory = fileSystem.GetWorkingDirectory();
            }
            catch (Exception e)
            {
                throw new RuntimeException(e);
            }
        }
示例#16
0
 public static void StartUp()
 {
     conf = new HdfsConfiguration();
     Init(conf);
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(DatanodeNum).Build();
     fs      = cluster.GetFileSystem();
 }
示例#17
0
        /// <exception cref="System.Exception"/>
        private void StartCluster(Configuration conf)
        {
            if (Runtime.GetProperty("hadoop.log.dir") == null)
            {
                Runtime.SetProperty("hadoop.log.dir", "target/test-dir");
            }
            conf.Set("dfs.block.access.token.enable", "false");
            conf.Set("dfs.permissions", "true");
            conf.Set("hadoop.security.authentication", "simple");
            string cp = conf.Get(YarnConfiguration.YarnApplicationClasspath, StringUtils.Join
                                     (",", YarnConfiguration.DefaultYarnCrossPlatformApplicationClasspath)) + FilePath
                        .pathSeparator + classpathDir;

            conf.Set(YarnConfiguration.YarnApplicationClasspath, cp);
            dfsCluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem fileSystem = dfsCluster.GetFileSystem();

            fileSystem.Mkdirs(new Path("/tmp"));
            fileSystem.Mkdirs(new Path("/user"));
            fileSystem.Mkdirs(new Path("/hadoop/mapred/system"));
            fileSystem.SetPermission(new Path("/tmp"), FsPermission.ValueOf("-rwxrwxrwx"));
            fileSystem.SetPermission(new Path("/user"), FsPermission.ValueOf("-rwxrwxrwx"));
            fileSystem.SetPermission(new Path("/hadoop/mapred/system"), FsPermission.ValueOf(
                                         "-rwx------"));
            FileSystem.SetDefaultUri(conf, fileSystem.GetUri());
            mrCluster = MiniMRClientClusterFactory.Create(this.GetType(), 1, conf);
            // so the minicluster conf is avail to the containers.
            TextWriter writer = new FileWriter(classpathDir + "/core-site.xml");

            mrCluster.GetConfig().WriteXml(writer);
            writer.Close();
        }
示例#18
0
        public static void SetUp()
        {
            conf = new HdfsConfiguration();
            conf.SetBoolean(DFSConfigKeys.DfsNamenodeReplicationConsiderloadKey, false);
            // Set up the hosts/exclude files.
            localFileSys = FileSystem.GetLocal(conf);
            Path workingDir = localFileSys.GetWorkingDirectory();

            dir = new Path(workingDir, "build/test/data/work-dir/decommission");
            NUnit.Framework.Assert.IsTrue(localFileSys.Mkdirs(dir));
            excludeFile = new Path(dir, "exclude");
            conf.Set(DFSConfigKeys.DfsHostsExclude, excludeFile.ToUri().GetPath());
            Path includeFile = new Path(dir, "include");

            conf.Set(DFSConfigKeys.DfsHosts, includeFile.ToUri().GetPath());
            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1000);
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, 4);
            conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, 1000);
            conf.SetInt(DFSConfigKeys.DfsNamenodeDecommissionIntervalKey, 1);
            conf.SetLong(DFSConfigKeys.DfsDatanodeBalanceBandwidthpersecKey, 1);
            WriteConfigFile(localFileSys, excludeFile, null);
            WriteConfigFile(localFileSys, includeFile, null);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes).Build();
            cluster.WaitActive();
            fileSys = cluster.GetFileSystem();
            cluster.GetNamesystem().GetBlockManager().GetDatanodeManager().SetHeartbeatExpireInterval
                (3000);
            Logger.GetLogger(typeof(DecommissionManager)).SetLevel(Level.Debug);
        }
示例#19
0
        /// <exception cref="System.IO.IOException"/>
        private void TestXAttr(bool persistNamespace)
        {
            Path path = new Path("/p");
            DistributedFileSystem fs = cluster.GetFileSystem();

            fs.Create(path).Close();
            fs.SetXAttr(path, name1, value1, EnumSet.Of(XAttrSetFlag.Create));
            fs.SetXAttr(path, name2, value2, EnumSet.Of(XAttrSetFlag.Create));
            fs.SetXAttr(path, name3, null, EnumSet.Of(XAttrSetFlag.Create));
            Restart(fs, persistNamespace);
            IDictionary <string, byte[]> xattrs = fs.GetXAttrs(path);

            NUnit.Framework.Assert.AreEqual(xattrs.Count, 3);
            Assert.AssertArrayEquals(value1, xattrs[name1]);
            Assert.AssertArrayEquals(value2, xattrs[name2]);
            Assert.AssertArrayEquals(value3, xattrs[name3]);
            fs.SetXAttr(path, name1, newValue1, EnumSet.Of(XAttrSetFlag.Replace));
            Restart(fs, persistNamespace);
            xattrs = fs.GetXAttrs(path);
            NUnit.Framework.Assert.AreEqual(xattrs.Count, 3);
            Assert.AssertArrayEquals(newValue1, xattrs[name1]);
            Assert.AssertArrayEquals(value2, xattrs[name2]);
            Assert.AssertArrayEquals(value3, xattrs[name3]);
            fs.RemoveXAttr(path, name1);
            fs.RemoveXAttr(path, name2);
            fs.RemoveXAttr(path, name3);
            Restart(fs, persistNamespace);
            xattrs = fs.GetXAttrs(path);
            NUnit.Framework.Assert.AreEqual(xattrs.Count, 0);
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestNumInputs()
        {
            JobConf job = new JobConf(conf);

            dfs = NewDFSCluster(job);
            FileSystem fs = dfs.GetFileSystem();

            System.Console.Out.WriteLine("FileSystem " + fs.GetUri());
            Path   inputDir     = new Path("/foo/");
            int    numFiles     = 10;
            string fileNameBase = "part-0000";

            for (int i = 0; i < numFiles; ++i)
            {
                CreateInputs(fs, inputDir, fileNameBase + i.ToString());
            }
            CreateInputs(fs, inputDir, "_meta");
            CreateInputs(fs, inputDir, "_temp");
            // split it using a file input format
            TextInputFormat.AddInputPath(job, inputDir);
            TextInputFormat inFormat = new TextInputFormat();

            inFormat.Configure(job);
            InputSplit[] splits = inFormat.GetSplits(job, 1);
            NUnit.Framework.Assert.AreEqual("Expected value of " + FileInputFormat.NumInputFiles
                                            , numFiles, job.GetLong(FileInputFormat.NumInputFiles, 0));
        }
示例#21
0
        /// <exception cref="System.Exception"/>
        public static Counters RunJob(JobConf conf)
        {
            conf.SetMapperClass(typeof(TestReduceFetchFromPartialMem.MapMB));
            conf.SetReducerClass(typeof(TestReduceFetchFromPartialMem.MBValidate));
            conf.SetOutputKeyClass(typeof(Org.Apache.Hadoop.IO.Text));
            conf.SetOutputValueClass(typeof(Org.Apache.Hadoop.IO.Text));
            conf.SetNumReduceTasks(1);
            conf.SetInputFormat(typeof(TestReduceFetchFromPartialMem.FakeIF));
            conf.SetNumTasksToExecutePerJvm(1);
            conf.SetInt(JobContext.MapMaxAttempts, 0);
            conf.SetInt(JobContext.ReduceMaxAttempts, 0);
            FileInputFormat.SetInputPaths(conf, new Path("/in"));
            Path outp = new Path("/out");

            FileOutputFormat.SetOutputPath(conf, outp);
            RunningJob job = null;

            try
            {
                job = JobClient.RunJob(conf);
                NUnit.Framework.Assert.IsTrue(job.IsSuccessful());
            }
            finally
            {
                FileSystem fs = dfsCluster.GetFileSystem();
                if (fs.Exists(outp))
                {
                    fs.Delete(outp, true);
                }
            }
            return(job.GetCounters());
        }
示例#22
0
        // also okay for HDFS.
        //the following are new tests (i.e. not over-riding the super class methods)
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestGetFileBlockLocations()
        {
            string f = "/test/testGetFileBlockLocations";

            CreateFile(Path(f));
            BlockLocation[] computed = fs.GetFileBlockLocations(new Path(f), 0L, 1L);
            BlockLocation[] expected = cluster.GetFileSystem().GetFileBlockLocations(new Path
                                                                                         (f), 0L, 1L);
            NUnit.Framework.Assert.AreEqual(expected.Length, computed.Length);
            for (int i = 0; i < computed.Length; i++)
            {
                NUnit.Framework.Assert.AreEqual(expected[i].ToString(), computed[i].ToString());
                // Check names
                string[] names1 = expected[i].GetNames();
                string[] names2 = computed[i].GetNames();
                Arrays.Sort(names1);
                Arrays.Sort(names2);
                Assert.AssertArrayEquals("Names differ", names1, names2);
                // Check topology
                string[] topos1 = expected[i].GetTopologyPaths();
                string[] topos2 = computed[i].GetTopologyPaths();
                Arrays.Sort(topos1);
                Arrays.Sort(topos2);
                Assert.AssertArrayEquals("Topology differs", topos1, topos2);
            }
        }
        //Do Nothing
        /// <exception cref="System.Exception"/>
        public virtual void TestWebHdfsDoAs()
        {
            WebHdfsTestUtil.Log.Info("START: testWebHdfsDoAs()");
            WebHdfsTestUtil.Log.Info("ugi.getShortUserName()=" + ugi.GetShortUserName());
            WebHdfsFileSystem webhdfs = WebHdfsTestUtil.GetWebHdfsFileSystemAs(ugi, config, WebHdfsFileSystem
                                                                               .Scheme);
            Path root = new Path("/");

            cluster.GetFileSystem().SetPermission(root, new FsPermission((short)0x1ff));
            Whitebox.SetInternalState(webhdfs, "ugi", proxyUgi);
            {
                Path responsePath = webhdfs.GetHomeDirectory();
                WebHdfsTestUtil.Log.Info("responsePath=" + responsePath);
                NUnit.Framework.Assert.AreEqual(webhdfs.GetUri() + "/user/" + ProxyUser, responsePath
                                                .ToString());
            }
            Path f = new Path("/testWebHdfsDoAs/a.txt");
            {
                FSDataOutputStream @out = webhdfs.Create(f);
                @out.Write(Sharpen.Runtime.GetBytesForString("Hello, webhdfs user!"));
                @out.Close();
                FileStatus status = webhdfs.GetFileStatus(f);
                WebHdfsTestUtil.Log.Info("status.getOwner()=" + status.GetOwner());
                NUnit.Framework.Assert.AreEqual(ProxyUser, status.GetOwner());
            }
            {
                FSDataOutputStream @out = webhdfs.Append(f);
                @out.Write(Sharpen.Runtime.GetBytesForString("\nHello again!"));
                @out.Close();
                FileStatus status = webhdfs.GetFileStatus(f);
                WebHdfsTestUtil.Log.Info("status.getOwner()=" + status.GetOwner());
                WebHdfsTestUtil.Log.Info("status.getLen()  =" + status.GetLen());
                NUnit.Framework.Assert.AreEqual(ProxyUser, status.GetOwner());
            }
        }
示例#24
0
        public virtual void TestBlocksScheduledCounter()
        {
            cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).Build();
            cluster.WaitActive();
            fs = cluster.GetFileSystem();
            //open a file an write a few bytes:
            FSDataOutputStream @out = fs.Create(new Path("/testBlockScheduledCounter"));

            for (int i = 0; i < 1024; i++)
            {
                @out.Write(i);
            }
            // flush to make sure a block is allocated.
            @out.Hflush();
            AList <DatanodeDescriptor> dnList = new AList <DatanodeDescriptor>();
            DatanodeManager            dm     = cluster.GetNamesystem().GetBlockManager().GetDatanodeManager
                                                    ();

            dm.FetchDatanodes(dnList, dnList, false);
            DatanodeDescriptor dn = dnList[0];

            NUnit.Framework.Assert.AreEqual(1, dn.GetBlocksScheduled());
            // close the file and the counter should go to zero.
            @out.Close();
            NUnit.Framework.Assert.AreEqual(0, dn.GetBlocksScheduled());
        }
示例#25
0
 /// <exception cref="System.Exception"/>
 internal TestContext(Configuration conf, int numNameServices)
 {
     this.numNameServices = numNameServices;
     MiniDFSCluster.Builder bld = new MiniDFSCluster.Builder(conf).NumDataNodes(1).StoragesPerDatanode
                                      (1);
     if (numNameServices > 1)
     {
         bld.NnTopology(MiniDFSNNTopology.SimpleFederatedTopology(numNameServices));
     }
     cluster = bld.Build();
     cluster.WaitActive();
     dfs = new DistributedFileSystem[numNameServices];
     for (int i = 0; i < numNameServices; i++)
     {
         dfs[i] = cluster.GetFileSystem(i);
     }
     bpids = new string[numNameServices];
     for (int i_1 = 0; i_1 < numNameServices; i_1++)
     {
         bpids[i_1] = cluster.GetNamesystem(i_1).GetBlockPoolId();
     }
     datanode     = cluster.GetDataNodes()[0];
     blockScanner = datanode.GetBlockScanner();
     for (int i_2 = 0; i_2 < numNameServices; i_2++)
     {
         dfs[i_2].Mkdirs(new Path("/test"));
     }
     data    = datanode.GetFSDataset();
     volumes = data.GetVolumes();
 }
 public virtual void TestShortCircuitTraceHooks()
 {
     Assume.AssumeTrue(NativeCodeLoader.IsNativeCodeLoaded() && !Path.Windows);
     conf = new Configuration();
     conf.Set(DFSConfigKeys.DfsClientHtracePrefix + SpanReceiverHost.SpanReceiversConfSuffix
              , typeof(TestTracing.SetSpanReceiver).FullName);
     conf.SetLong("dfs.blocksize", 100 * 1024);
     conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true);
     conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, false);
     conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, "testShortCircuitTraceHooks._PORT"
              );
     conf.Set(DFSConfigKeys.DfsChecksumTypeKey, "CRC32C");
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
     dfs     = cluster.GetFileSystem();
     try
     {
         DFSTestUtil.CreateFile(dfs, TestPath, TestLength, (short)1, 5678L);
         TraceScope        ts     = Trace.StartSpan("testShortCircuitTraceHooks", Sampler.Always);
         FSDataInputStream stream = dfs.Open(TestPath);
         byte[]            buf    = new byte[TestLength];
         IOUtils.ReadFully(stream, buf, 0, TestLength);
         stream.Close();
         ts.Close();
         string[] expectedSpanNames = new string[] { "OpRequestShortCircuitAccessProto", "ShortCircuitShmRequestProto" };
         TestTracing.AssertSpanNamesFound(expectedSpanNames);
     }
     finally
     {
         dfs.Close();
         cluster.Shutdown();
     }
 }
示例#27
0
        /// <summary>Initializes the cluster.</summary>
        /// <param name="numDataNodes">number of datanodes</param>
        /// <param name="storagesPerDatanode">number of storage locations on each datanode</param>
        /// <param name="failedVolumesTolerated">number of acceptable volume failures</param>
        /// <exception cref="System.Exception">if there is any failure</exception>
        private void InitCluster(int numDataNodes, int storagesPerDatanode, int failedVolumesTolerated
                                 )
        {
            conf = new HdfsConfiguration();
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 512L);

            /*
             * Lower the DN heartbeat, DF rate, and recheck interval to one second
             * so state about failures and datanode death propagates faster.
             */
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            conf.SetInt(DFSConfigKeys.DfsDfIntervalKey, 1000);
            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1000);
            conf.SetInt(DFSConfigKeys.DfsDatanodeFailedVolumesToleratedKey, failedVolumesTolerated
                        );
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).StoragesPerDatanode
                          (storagesPerDatanode).Build();
            cluster.WaitActive();
            fs      = cluster.GetFileSystem();
            dataDir = cluster.GetDataDirectory();
            long dnCapacity = DFSTestUtil.GetDatanodeCapacity(cluster.GetNamesystem().GetBlockManager
                                                                  ().GetDatanodeManager(), 0);

            volumeCapacity = dnCapacity / cluster.GetStoragesPerDatanode();
        }
示例#28
0
 public static void Setup()
 {
     try
     {
         dfsCluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Format(true).Racks(
             null).Build();
         remoteFs = dfsCluster.GetFileSystem();
     }
     catch (IOException io)
     {
         throw new RuntimeException("problem starting mini dfs cluster", io);
     }
     if (!(new FilePath(MiniMRYarnCluster.Appjar)).Exists())
     {
         Log.Info("MRAppJar " + MiniMRYarnCluster.Appjar + " not found. Not running test."
                  );
         return;
     }
     if (mrCluster == null)
     {
         mrCluster = new MiniMRYarnCluster(typeof(TestMRJobs).FullName, NumNodeMgrs);
         Configuration conf = new Configuration();
         conf.Set("fs.defaultFS", remoteFs.GetUri().ToString());
         // use HDFS
         conf.Set(MRJobConfig.MrAmStagingDir, "/apps_staging_dir");
         mrCluster.Init(conf);
         mrCluster.Start();
     }
     // Copy MRAppJar and make it private. TODO: FIXME. This is a hack to
     // workaround the absent public discache.
     localFs.CopyFromLocalFile(new Path(MiniMRYarnCluster.Appjar), AppJar);
     localFs.SetPermission(AppJar, new FsPermission("700"));
 }
示例#29
0
        /// <summary>
        /// Test fsimage loading when 1) there is an empty file loaded from fsimage,
        /// and 2) there is later an append operation to be applied from edit log.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestLoadImageWithEmptyFile()
        {
            // create an empty file
            Path file = new Path(dir, "file");
            FSDataOutputStream @out = hdfs.Create(file);

            @out.Close();
            // save namespace
            hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
            hdfs.SaveNamespace();
            hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
            // append to the empty file
            @out = hdfs.Append(file);
            @out.Write(1);
            @out.Close();
            // restart cluster
            cluster.Shutdown();
            cluster = new MiniDFSCluster.Builder(conf).Format(false).NumDataNodes(Replication
                                                                                  ).Build();
            cluster.WaitActive();
            hdfs = cluster.GetFileSystem();
            FileStatus status = hdfs.GetFileStatus(file);

            NUnit.Framework.Assert.AreEqual(1, status.GetLen());
        }
 /// <exception cref="System.IO.IOException"/>
 public virtual void SetUpMiniCluster(Configuration conf, bool manageNameDfsDirs)
 {
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).ManageNameDfsDirs(manageNameDfsDirs
                                                                                  ).CheckExitOnShutdown(false).Build();
     cluster.WaitActive();
     fs = cluster.GetFileSystem();
 }