Ejemplo n.º 1
0
 /// <exception cref="System.IO.IOException"/>
 internal EventWriter(FSDataOutputStream @out)
 {
     this.@out = @out;
     @out.WriteBytes(Version);
     @out.WriteBytes("\n");
     @out.WriteBytes(Event.Schema$.ToString());
     @out.WriteBytes("\n");
     this.encoder = EncoderFactory.Get().JsonEncoder(Event.Schema$, @out);
 }
Ejemplo n.º 2
0
        public virtual void TestFailedAppendBlockRejection()
        {
            Configuration conf = new HdfsConfiguration();

            conf.Set("dfs.client.block.write.replace-datanode-on-failure.enable", "false");
            MiniDFSCluster        cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
            DistributedFileSystem fs      = null;

            try
            {
                fs = cluster.GetFileSystem();
                Path path = new Path("/test");
                FSDataOutputStream @out = fs.Create(path);
                @out.WriteBytes("hello\n");
                @out.Close();
                // stop one datanode
                MiniDFSCluster.DataNodeProperties dnProp = cluster.StopDataNode(0);
                string dnAddress = dnProp.datanode.GetXferAddress().ToString();
                if (dnAddress.StartsWith("/"))
                {
                    dnAddress = Sharpen.Runtime.Substring(dnAddress, 1);
                }
                // append again to bump genstamps
                for (int i = 0; i < 2; i++)
                {
                    @out = fs.Append(path);
                    @out.WriteBytes("helloagain\n");
                    @out.Close();
                }
                // re-open and make the block state as underconstruction
                @out = fs.Append(path);
                cluster.RestartDataNode(dnProp, true);
                // wait till the block report comes
                Sharpen.Thread.Sleep(2000);
                // check the block locations, this should not contain restarted datanode
                BlockLocation[] locations = fs.GetFileBlockLocations(path, 0, long.MaxValue);
                string[]        names     = locations[0].GetNames();
                foreach (string node in names)
                {
                    if (node.Equals(dnAddress))
                    {
                        NUnit.Framework.Assert.Fail("Failed append should not be present in latest block locations."
                                                    );
                    }
                }
                @out.Close();
            }
            finally
            {
                IOUtils.CloseStream(fs);
                cluster.Shutdown();
            }
        }
Ejemplo n.º 3
0
        public virtual void Pipeline_01()
        {
            string MethodName = GenericTestUtils.GetMethodName();

            if (Log.IsDebugEnabled())
            {
                Log.Debug("Running " + MethodName);
            }
            Path filePath = new Path("/" + MethodName + ".dat");

            DFSTestUtil.CreateFile(fs, filePath, FileSize, ReplFactor, rand.NextLong());
            if (Log.IsDebugEnabled())
            {
                Log.Debug("Invoking append but doing nothing otherwise...");
            }
            FSDataOutputStream ofs = fs.Append(filePath);

            ofs.WriteBytes("Some more stuff to write");
            ((DFSOutputStream)ofs.GetWrappedStream()).Hflush();
            IList <LocatedBlock> lb = cluster.GetNameNodeRpc().GetBlockLocations(filePath.ToString
                                                                                     (), FileSize - 1, FileSize).GetLocatedBlocks();
            string bpid = cluster.GetNamesystem().GetBlockPoolId();

            foreach (DataNode dn in cluster.GetDataNodes())
            {
                Replica r = DataNodeTestUtils.FetchReplicaInfo(dn, bpid, lb[0].GetBlock().GetBlockId
                                                                   ());
                NUnit.Framework.Assert.IsTrue("Replica on DN " + dn + " shouldn't be null", r !=
                                              null);
                NUnit.Framework.Assert.AreEqual("Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()"
                                                , HdfsServerConstants.ReplicaState.Rbw, r.GetState());
            }
            ofs.Close();
        }
Ejemplo n.º 4
0
        /// <exception cref="System.Exception"/>
        internal static void WriteFile(FileSystem fs, Path name)
        {
            FSDataOutputStream stm = fs.Create(name);

            stm.WriteBytes("42\n");
            stm.Close();
        }
Ejemplo n.º 5
0
        public virtual void TestReadClosedStream()
        {
            Path testFile         = new Path("/testfile+2");
            FSDataOutputStream os = hdfs.Create(testFile, true);

            os.WriteBytes("0123456789");
            os.Close();
            // ByteRangeInputStream delays opens until reads. Make sure it doesn't
            // open a closed stream that has never been opened
            FSDataInputStream @in = hftpFs.Open(testFile);

            @in.Close();
            CheckClosedStream(@in);
            CheckClosedStream(@in.GetWrappedStream());
            // force the stream to connect and then close it
            @in = hftpFs.Open(testFile);
            int ch = @in.Read();

            NUnit.Framework.Assert.AreEqual('0', ch);
            @in.Close();
            CheckClosedStream(@in);
            CheckClosedStream(@in.GetWrappedStream());
            // make sure seeking doesn't automagically reopen the stream
            @in.Seek(4);
            CheckClosedStream(@in);
            CheckClosedStream(@in.GetWrappedStream());
        }
Ejemplo n.º 6
0
 /// <summary>
 /// The idea for making sure that there is no more than one instance
 /// running in an HDFS is to create a file in the HDFS, writes the hostname
 /// of the machine on which the instance is running to the file, but did not
 /// close the file until it exits.
 /// </summary>
 /// <remarks>
 /// The idea for making sure that there is no more than one instance
 /// running in an HDFS is to create a file in the HDFS, writes the hostname
 /// of the machine on which the instance is running to the file, but did not
 /// close the file until it exits.
 /// This prevents the second instance from running because it can not
 /// creates the file while the first one is running.
 /// This method checks if there is any running instance. If no, mark yes.
 /// Note that this is an atomic operation.
 /// </remarks>
 /// <returns>
 /// null if there is a running instance;
 /// otherwise, the output stream to the newly created file.
 /// </returns>
 /// <exception cref="System.IO.IOException"/>
 private OutputStream CheckAndMarkRunning()
 {
     try
     {
         if (fs.Exists(idPath))
         {
             // try appending to it so that it will fail fast if another balancer is
             // running.
             IOUtils.CloseStream(fs.Append(idPath));
             fs.Delete(idPath, true);
         }
         FSDataOutputStream fsout = fs.Create(idPath, false);
         // mark balancer idPath to be deleted during filesystem closure
         fs.DeleteOnExit(idPath);
         if (write2IdFile)
         {
             fsout.WriteBytes(Sharpen.Runtime.GetLocalHost().GetHostName());
             fsout.Hflush();
         }
         return(fsout);
     }
     catch (RemoteException e)
     {
         if (typeof(AlreadyBeingCreatedException).FullName.Equals(e.GetClassName()))
         {
             return(null);
         }
         else
         {
             throw;
         }
     }
 }
        public virtual void TestInvalidateOverReplicatedBlock()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();

            try
            {
                FSNamesystem       namesystem = cluster.GetNamesystem();
                BlockManager       bm         = namesystem.GetBlockManager();
                FileSystem         fs         = cluster.GetFileSystem();
                Path               p          = new Path(MiniDFSCluster.GetBaseDirectory(), "/foo1");
                FSDataOutputStream @out       = fs.Create(p, (short)2);
                @out.WriteBytes("HDFS-3119: " + p);
                @out.Hsync();
                fs.SetReplication(p, (short)1);
                @out.Close();
                ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, p);
                NUnit.Framework.Assert.AreEqual("Expected only one live replica for the block", 1
                                                , bm.CountNodes(block.GetLocalBlock()).LiveReplicas());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Ejemplo n.º 8
0
        public virtual void TestGetPos()
        {
            Path testFile = new Path("/testfile+1");
            // Write a test file.
            FSDataOutputStream @out = hdfs.Create(testFile, true);

            @out.WriteBytes("0123456789");
            @out.Close();
            FSDataInputStream @in = hftpFs.Open(testFile);

            // Test read().
            for (int i = 0; i < 5; ++i)
            {
                NUnit.Framework.Assert.AreEqual(i, @in.GetPos());
                @in.Read();
            }
            // Test read(b, off, len).
            NUnit.Framework.Assert.AreEqual(5, @in.GetPos());
            byte[] buffer = new byte[10];
            NUnit.Framework.Assert.AreEqual(2, @in.Read(buffer, 0, 2));
            NUnit.Framework.Assert.AreEqual(7, @in.GetPos());
            // Test read(b).
            int bytesRead = @in.Read(buffer);

            NUnit.Framework.Assert.AreEqual(7 + bytesRead, @in.GetPos());
            // Test EOF.
            for (int i_1 = 0; i_1 < 100; ++i_1)
            {
                @in.Read();
            }
            NUnit.Framework.Assert.AreEqual(10, @in.GetPos());
            @in.Close();
        }
        // Symlink target will have gone so can't use File.exists()
        /// <exception cref="System.IO.IOException"/>
        private Path CreateTempFile(string filename, string contents)
        {
            Path path             = new Path(TestRootDir, filename);
            FSDataOutputStream os = localFs.Create(path);

            os.WriteBytes(contents);
            os.Close();
            return(path);
        }
Ejemplo n.º 10
0
        /// <exception cref="System.IO.IOException"/>
        private Path CreateTempFile(string filename, string contents)
        {
            Path               path = new Path(TestRootDir, filename);
            Configuration      conf = new Configuration();
            FSDataOutputStream os   = FileSystem.GetLocal(conf).Create(path);

            os.WriteBytes(contents);
            os.Close();
            return(path);
        }
Ejemplo n.º 11
0
        /// <exception cref="System.IO.IOException"/>
        private void WriteConfigFile(Path name, AList <string> nodes)
        {
            // delete if it already exists
            if (localFileSys.Exists(name))
            {
                localFileSys.Delete(name, true);
            }
            FSDataOutputStream stm = localFileSys.Create(name);

            if (nodes != null)
            {
                for (IEnumerator <string> it = nodes.GetEnumerator(); it.HasNext();)
                {
                    string node = it.Next();
                    stm.WriteBytes(node);
                    stm.WriteBytes("\n");
                }
            }
            stm.Close();
        }
Ejemplo n.º 12
0
 /// <exception cref="System.IO.IOException"/>
 internal virtual void Write(HistoryEvent @event)
 {
     lock (this)
     {
         Event wrapper = new Event();
         wrapper.type   = @event.GetEventType();
         wrapper.@event = @event.GetDatum();
         writer.Write(wrapper, encoder);
         encoder.Flush();
         @out.WriteBytes("\n");
     }
 }
Ejemplo n.º 13
0
        public virtual void TestSeek()
        {
            Path testFile           = new Path("/testfile+1");
            FSDataOutputStream @out = hdfs.Create(testFile, true);

            @out.WriteBytes("0123456789");
            @out.Close();
            FSDataInputStream @in = hftpFs.Open(testFile);

            @in.Seek(7);
            NUnit.Framework.Assert.AreEqual('7', @in.Read());
            @in.Close();
        }
Ejemplo n.º 14
0
        /// <exception cref="System.IO.IOException"/>
        private static void CreateFile(Path inFile, Configuration conf)
        {
            FileSystem fs = inFile.GetFileSystem(conf);

            if (fs.Exists(inFile))
            {
                return;
            }
            FSDataOutputStream @out = fs.Create(inFile);

            @out.WriteBytes("This is a test file");
            @out.Close();
        }
Ejemplo n.º 15
0
        /// <summary>Test to verify the race between finalizeBlock and Lease recovery</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestRaceBetweenReplicaRecoveryAndFinalizeBlock()
        {
            TearDown();
            // Stop the Mocked DN started in startup()
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsDatanodeXceiverStopTimeoutMillisKey, "1000");
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            try
            {
                cluster.WaitClusterUp();
                DistributedFileSystem fs = cluster.GetFileSystem();
                Path path = new Path("/test");
                FSDataOutputStream @out = fs.Create(path);
                @out.WriteBytes("data");
                @out.Hsync();
                IList <LocatedBlock> blocks             = DFSTestUtil.GetAllBlocks(fs.Open(path));
                LocatedBlock         block              = blocks[0];
                DataNode             dataNode           = cluster.GetDataNodes()[0];
                AtomicBoolean        recoveryInitResult = new AtomicBoolean(true);
                Sharpen.Thread       recoveryThread     = new _Thread_612(block, dataNode, recoveryInitResult
                                                                          );
                recoveryThread.Start();
                try
                {
                    @out.Close();
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue("Writing should fail", e.Message.Contains("are bad. Aborting..."
                                                                                            ));
                }
                finally
                {
                    recoveryThread.Join();
                }
                NUnit.Framework.Assert.IsTrue("Recovery should be initiated successfully", recoveryInitResult
                                              .Get());
                dataNode.UpdateReplicaUnderRecovery(block.GetBlock(), block.GetBlock().GetGenerationStamp
                                                        () + 1, block.GetBlock().GetBlockId(), block.GetBlockSize());
            }
            finally
            {
                if (null != cluster)
                {
                    cluster.Shutdown();
                    cluster = null;
                }
            }
        }
Ejemplo n.º 16
0
        /// <exception cref="System.IO.IOException"/>
        private void TestPersistHelper(Configuration conf)
        {
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).Build();
                cluster.WaitActive();
                FSNamesystem          fsn = cluster.GetNamesystem();
                DistributedFileSystem fs  = cluster.GetFileSystem();
                Path dir   = new Path("/abc/def");
                Path file1 = new Path(dir, "f1");
                Path file2 = new Path(dir, "f2");
                // create an empty file f1
                fs.Create(file1).Close();
                // create an under-construction file f2
                FSDataOutputStream @out = fs.Create(file2);
                @out.WriteBytes("hello");
                ((DFSOutputStream)@out.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                            .UpdateLength));
                // checkpoint
                fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                fs.SaveNamespace();
                fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
                cluster.RestartNameNode();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsTrue(fs.IsDirectory(dir));
                NUnit.Framework.Assert.IsTrue(fs.Exists(file1));
                NUnit.Framework.Assert.IsTrue(fs.Exists(file2));
                // check internals of file2
                INodeFile file2Node = fsn.dir.GetINode4Write(file2.ToString()).AsFile();
                NUnit.Framework.Assert.AreEqual("hello".Length, file2Node.ComputeFileSize());
                NUnit.Framework.Assert.IsTrue(file2Node.IsUnderConstruction());
                BlockInfoContiguous[] blks = file2Node.GetBlocks();
                NUnit.Framework.Assert.AreEqual(1, blks.Length);
                NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.UnderConstruction
                                                , blks[0].GetBlockUCState());
                // check lease manager
                LeaseManager.Lease lease = fsn.leaseManager.GetLeaseByPath(file2.ToString());
                NUnit.Framework.Assert.IsNotNull(lease);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Ejemplo n.º 17
0
        public virtual void TestBlockRecoveryWithLessMetafile()
        {
            Configuration conf = new Configuration();

            conf.Set(DFSConfigKeys.DfsBlockLocalPathAccessUserKey, UserGroupInformation.GetCurrentUser
                         ().GetShortUserName());
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            Path file = new Path("/testRecoveryFile");
            DistributedFileSystem dfs  = cluster.GetFileSystem();
            FSDataOutputStream    @out = dfs.Create(file);
            int count = 0;

            while (count < 2 * 1024 * 1024)
            {
                @out.WriteBytes("Data");
                count += 4;
            }
            @out.Hsync();
            // abort the original stream
            ((DFSOutputStream)@out.GetWrappedStream()).Abort();
            LocatedBlocks locations = cluster.GetNameNodeRpc().GetBlockLocations(file.ToString
                                                                                     (), 0, count);
            ExtendedBlock      block         = locations.Get(0).GetBlock();
            DataNode           dn            = cluster.GetDataNodes()[0];
            BlockLocalPathInfo localPathInfo = dn.GetBlockLocalPathInfo(block, null);
            FilePath           metafile      = new FilePath(localPathInfo.GetMetaPath());

            NUnit.Framework.Assert.IsTrue(metafile.Exists());
            // reduce the block meta file size
            RandomAccessFile raf = new RandomAccessFile(metafile, "rw");

            raf.SetLength(metafile.Length() - 20);
            raf.Close();
            // restart DN to make replica to RWR
            MiniDFSCluster.DataNodeProperties dnProp = cluster.StopDataNode(0);
            cluster.RestartDataNode(dnProp, true);
            // try to recover the lease
            DistributedFileSystem newdfs = (DistributedFileSystem)FileSystem.NewInstance(cluster
                                                                                         .GetConfiguration(0));

            count = 0;
            while (++count < 10 && !newdfs.RecoverLease(file))
            {
                Sharpen.Thread.Sleep(1000);
            }
            NUnit.Framework.Assert.IsTrue("File should be closed", newdfs.RecoverLease(file));
        }
Ejemplo n.º 18
0
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        /// <exception cref="System.TypeLoadException"/>
        public virtual void TestGetJobStatus()
        {
            MiniMRClientCluster mr      = null;
            FileSystem          fileSys = null;

            try
            {
                mr = CreateMiniClusterWithCapacityScheduler();
                JobConf job = new JobConf(mr.GetConfig());
                fileSys = FileSystem.Get(job);
                fileSys.Delete(testDir, true);
                FSDataOutputStream @out = fileSys.Create(inFile, true);
                @out.WriteBytes("This is a test file");
                @out.Close();
                FileInputFormat.SetInputPaths(job, inFile);
                FileOutputFormat.SetOutputPath(job, outDir);
                job.SetInputFormat(typeof(TextInputFormat));
                job.SetOutputFormat(typeof(TextOutputFormat));
                job.SetMapperClass(typeof(IdentityMapper));
                job.SetReducerClass(typeof(IdentityReducer));
                job.SetNumReduceTasks(0);
                JobClient  client = new JobClient(mr.GetConfig());
                RunningJob rj     = client.SubmitJob(job);
                JobID      jobId  = rj.GetID();
                // The following asserts read JobStatus twice and ensure the returned
                // JobStatus objects correspond to the same Job.
                NUnit.Framework.Assert.AreEqual("Expected matching JobIDs", jobId, ((JobID)client
                                                                                    .GetJob(jobId).GetJobStatus().GetJobID()));
                NUnit.Framework.Assert.AreEqual("Expected matching startTimes", rj.GetJobStatus()
                                                .GetStartTime(), client.GetJob(jobId).GetJobStatus().GetStartTime());
            }
            finally
            {
                if (fileSys != null)
                {
                    fileSys.Delete(testDir, true);
                }
                if (mr != null)
                {
                    mr.Stop();
                }
            }
        }
Ejemplo n.º 19
0
        /// <exception cref="System.Exception"/>
        public virtual void TestTimeoutMetric()
        {
            Configuration              conf    = new HdfsConfiguration();
            Path                       path    = new Path("/test");
            MiniDFSCluster             cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            IList <FSDataOutputStream> streams = Lists.NewArrayList();

            try
            {
                FSDataOutputStream    @out     = cluster.GetFileSystem().Create(path, (short)2);
                DataNodeFaultInjector injector = Org.Mockito.Mockito.Mock <DataNodeFaultInjector>(
                    );
                Org.Mockito.Mockito.DoThrow(new IOException("mock IOException")).When(injector).WriteBlockAfterFlush
                    ();
                DataNodeFaultInjector.instance = injector;
                streams.AddItem(@out);
                @out.WriteBytes("old gs data\n");
                @out.Hflush();
                /* Test the metric. */
                MetricsRecordBuilder dnMetrics = MetricsAsserts.GetMetrics(cluster.GetDataNodes()
                                                                           [0].GetMetrics().Name());
                MetricsAsserts.AssertCounter("DatanodeNetworkErrors", 1L, dnMetrics);
                /* Test JMX datanode network counts. */
                MBeanServer mbs        = ManagementFactory.GetPlatformMBeanServer();
                ObjectName  mxbeanName = new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo"
                                                        );
                object dnc    = mbs.GetAttribute(mxbeanName, "DatanodeNetworkCounts");
                string allDnc = dnc.ToString();
                NUnit.Framework.Assert.IsTrue("expected to see loopback address", allDnc.IndexOf(
                                                  "127.0.0.1") >= 0);
                NUnit.Framework.Assert.IsTrue("expected to see networkErrors", allDnc.IndexOf("networkErrors"
                                                                                              ) >= 0);
            }
            finally
            {
                IOUtils.Cleanup(Log, Sharpen.Collections.ToArray(streams, new IDisposable[0]));
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
                DataNodeFaultInjector.instance = new DataNodeFaultInjector();
            }
        }
Ejemplo n.º 20
0
        /// <exception cref="System.IO.IOException"/>
        private void TestDataNodeRedirect(Path path)
        {
            // Create the file
            if (hdfs.Exists(path))
            {
                hdfs.Delete(path, true);
            }
            FSDataOutputStream @out = hdfs.Create(path, (short)1);

            @out.WriteBytes("0123456789");
            @out.Close();
            // Get the path's block location so we can determine
            // if we were redirected to the right DN.
            BlockLocation[] locations = hdfs.GetFileBlockLocations(path, 0, 10);
            string          xferAddr  = locations[0].GetNames()[0];
            // Connect to the NN to get redirected
            Uri u = hftpFs.GetNamenodeURL("/data" + ServletUtil.EncodePath(path.ToUri().GetPath
                                                                               ()), "ugi=userx,groupy");
            HttpURLConnection conn = (HttpURLConnection)u.OpenConnection();

            HttpURLConnection.SetFollowRedirects(true);
            conn.Connect();
            conn.GetInputStream();
            bool @checked = false;

            // Find the datanode that has the block according to locations
            // and check that the URL was redirected to this DN's info port
            foreach (DataNode node in cluster.GetDataNodes())
            {
                DatanodeRegistration dnR = DataNodeTestUtils.GetDNRegistrationForBP(node, blockPoolId
                                                                                    );
                if (dnR.GetXferAddr().Equals(xferAddr))
                {
                    @checked = true;
                    NUnit.Framework.Assert.AreEqual(dnR.GetInfoPort(), conn.GetURL().Port);
                }
            }
            NUnit.Framework.Assert.IsTrue("The test never checked that location of " + "the block and hftp desitnation are the same"
                                          , @checked);
        }
        /// <exception cref="System.IO.IOException"/>
        private void DoWriteAndAbort(DistributedFileSystem fs, Path path)
        {
            fs.Mkdirs(path);
            fs.AllowSnapshot(path);
            DFSTestUtil.CreateFile(fs, new Path("/test/test1"), 100, (short)2, 100024L);
            DFSTestUtil.CreateFile(fs, new Path("/test/test2"), 100, (short)2, 100024L);
            Path file = new Path("/test/test/test2");
            FSDataOutputStream @out = fs.Create(file);

            for (int i = 0; i < 2; i++)
            {
                long count = 0;
                while (count < 1048576)
                {
                    @out.WriteBytes("hell");
                    count += 4;
                }
            }
            ((DFSOutputStream)@out.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                        .UpdateLength));
            DFSTestUtil.AbortStream((DFSOutputStream)@out.GetWrappedStream());
            Path file2 = new Path("/test/test/test3");
            FSDataOutputStream out2 = fs.Create(file2);

            for (int i_1 = 0; i_1 < 2; i_1++)
            {
                long count = 0;
                while (count < 1048576)
                {
                    out2.WriteBytes("hell");
                    count += 4;
                }
            }
            ((DFSOutputStream)out2.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                        .UpdateLength));
            DFSTestUtil.AbortStream((DFSOutputStream)out2.GetWrappedStream());
            fs.CreateSnapshot(path, "s1");
        }
Ejemplo n.º 22
0
        public virtual void TestAddBlockUC()
        {
            DistributedFileSystem fs = cluster.GetFileSystem();
            Path file1 = new Path("/file1");

            DFSTestUtil.CreateFile(fs, file1, Blocksize - 1, Replication, 0L);
            FSDataOutputStream @out = null;

            try
            {
                // append files without closing the streams
                @out = fs.Append(file1);
                string appendContent = "appending-content";
                @out.WriteBytes(appendContent);
                ((DFSOutputStream)@out.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                            .UpdateLength));
                // restart NN
                cluster.RestartNameNode(true);
                FSDirectory           fsdir      = cluster.GetNamesystem().GetFSDirectory();
                INodeFile             fileNode   = fsdir.GetINode4Write(file1.ToString()).AsFile();
                BlockInfoContiguous[] fileBlocks = fileNode.GetBlocks();
                NUnit.Framework.Assert.AreEqual(2, fileBlocks.Length);
                NUnit.Framework.Assert.AreEqual(Blocksize, fileBlocks[0].GetNumBytes());
                NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.Complete, fileBlocks
                                                [0].GetBlockUCState());
                NUnit.Framework.Assert.AreEqual(appendContent.Length - 1, fileBlocks[1].GetNumBytes
                                                    ());
                NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.UnderConstruction
                                                , fileBlocks[1].GetBlockUCState());
            }
            finally
            {
                if (@out != null)
                {
                    @out.Close();
                }
            }
        }
Ejemplo n.º 23
0
        /// <exception cref="System.IO.IOException"/>
        private static void CreateWordsFile(Path inpFile, Configuration conf)
        {
            FileSystem fs = inpFile.GetFileSystem(conf);

            if (fs.Exists(inpFile))
            {
                return;
            }
            FSDataOutputStream @out = fs.Create(inpFile);

            try
            {
                // 1024*4 unique words --- repeated 5 times => 5*2K words
                int    Replicas        = 5;
                int    Numlines        = 1024;
                int    Numwordsperline = 4;
                string Word            = "zymurgy";
                // 7 bytes + 4 id bytes
                Formatter fmt = new Formatter(new StringBuilder());
                for (int i = 0; i < Replicas; i++)
                {
                    for (int j = 1; j <= Numlines * Numwordsperline; j += Numwordsperline)
                    {
                        ((StringBuilder)fmt.Out()).Length = 0;
                        for (int k = 0; k < Numwordsperline; ++k)
                        {
                            fmt.Format("%s%04d ", Word, j + k);
                        }
                        ((StringBuilder)fmt.Out()).Append("\n");
                        @out.WriteBytes(fmt.ToString());
                    }
                }
            }
            finally
            {
                @out.Close();
            }
        }
Ejemplo n.º 24
0
        public virtual void TestLeaseAfterRenameAndRecreate()
        {
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();

            try
            {
                Path   path1     = new Path("/test-file");
                string contents1 = "contents1";
                Path   path2     = new Path("/test-file-new-location");
                string contents2 = "contents2";
                // open a file to get a lease
                FileSystem         fs   = cluster.GetFileSystem();
                FSDataOutputStream out1 = fs.Create(path1);
                out1.WriteBytes(contents1);
                NUnit.Framework.Assert.IsTrue(HasLease(cluster, path1));
                NUnit.Framework.Assert.AreEqual(1, LeaseCount(cluster));
                DistributedFileSystem fs2 = (DistributedFileSystem)FileSystem.NewInstance(fs.GetUri
                                                                                              (), fs.GetConf());
                fs2.Rename(path1, path2);
                FSDataOutputStream out2 = fs2.Create(path1);
                out2.WriteBytes(contents2);
                out2.Close();
                // The first file should still be open and valid
                NUnit.Framework.Assert.IsTrue(HasLease(cluster, path2));
                out1.Close();
                // Contents should be as expected
                DistributedFileSystem fs3 = (DistributedFileSystem)FileSystem.NewInstance(fs.GetUri
                                                                                              (), fs.GetConf());
                NUnit.Framework.Assert.AreEqual(contents1, DFSTestUtil.ReadFile(fs3, path2));
                NUnit.Framework.Assert.AreEqual(contents2, DFSTestUtil.ReadFile(fs3, path1));
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Ejemplo n.º 25
0
 public virtual void TestFileNameEncoding()
 {
     foreach (Path p in TestPaths)
     {
         // Create and access the path (data and streamFile servlets)
         FSDataOutputStream @out = hdfs.Create(p, true);
         @out.WriteBytes("0123456789");
         @out.Close();
         FSDataInputStream @in = hftpFs.Open(p);
         NUnit.Framework.Assert.AreEqual('0', @in.Read());
         @in.Close();
         // Check the file status matches the path. Hftp returns a FileStatus
         // with the entire URI, extract the path part.
         NUnit.Framework.Assert.AreEqual(p, new Path(hftpFs.GetFileStatus(p).GetPath().ToUri
                                                         ().GetPath()));
         // Test list status (listPath servlet)
         NUnit.Framework.Assert.AreEqual(1, hftpFs.ListStatus(p).Length);
         // Test content summary (contentSummary servlet)
         NUnit.Framework.Assert.IsNotNull("No content summary", hftpFs.GetContentSummary(p
                                                                                         ));
         // Test checksums (fileChecksum and getFileChecksum servlets)
         NUnit.Framework.Assert.IsNotNull("No file checksum", hftpFs.GetFileChecksum(p));
     }
 }
Ejemplo n.º 26
0
        /// <summary>test JobConf</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestNetworkedJob()
        {
            // mock creation
            MiniMRClientCluster mr      = null;
            FileSystem          fileSys = null;

            try
            {
                mr = CreateMiniClusterWithCapacityScheduler();
                JobConf job = new JobConf(mr.GetConfig());
                fileSys = FileSystem.Get(job);
                fileSys.Delete(testDir, true);
                FSDataOutputStream @out = fileSys.Create(inFile, true);
                @out.WriteBytes("This is a test file");
                @out.Close();
                FileInputFormat.SetInputPaths(job, inFile);
                FileOutputFormat.SetOutputPath(job, outDir);
                job.SetInputFormat(typeof(TextInputFormat));
                job.SetOutputFormat(typeof(TextOutputFormat));
                job.SetMapperClass(typeof(IdentityMapper));
                job.SetReducerClass(typeof(IdentityReducer));
                job.SetNumReduceTasks(0);
                JobClient              client     = new JobClient(mr.GetConfig());
                RunningJob             rj         = client.SubmitJob(job);
                JobID                  jobId      = rj.GetID();
                JobClient.NetworkedJob runningJob = (JobClient.NetworkedJob)client.GetJob(jobId);
                runningJob.SetJobPriority(JobPriority.High.ToString());
                // test getters
                NUnit.Framework.Assert.IsTrue(runningJob.GetConfiguration().ToString().EndsWith("0001/job.xml"
                                                                                                ));
                NUnit.Framework.Assert.AreEqual(runningJob.GetID(), jobId);
                NUnit.Framework.Assert.AreEqual(runningJob.GetJobID(), jobId.ToString());
                NUnit.Framework.Assert.AreEqual(runningJob.GetJobName(), "N/A");
                NUnit.Framework.Assert.IsTrue(runningJob.GetJobFile().EndsWith(".staging/" + runningJob
                                                                               .GetJobID() + "/job.xml"));
                NUnit.Framework.Assert.IsTrue(runningJob.GetTrackingURL().Length > 0);
                NUnit.Framework.Assert.IsTrue(runningJob.MapProgress() == 0.0f);
                NUnit.Framework.Assert.IsTrue(runningJob.ReduceProgress() == 0.0f);
                NUnit.Framework.Assert.IsTrue(runningJob.CleanupProgress() == 0.0f);
                NUnit.Framework.Assert.IsTrue(runningJob.SetupProgress() == 0.0f);
                TaskCompletionEvent[] tce = runningJob.GetTaskCompletionEvents(0);
                NUnit.Framework.Assert.AreEqual(tce.Length, 0);
                NUnit.Framework.Assert.AreEqual(runningJob.GetHistoryUrl(), string.Empty);
                NUnit.Framework.Assert.IsFalse(runningJob.IsRetired());
                NUnit.Framework.Assert.AreEqual(runningJob.GetFailureInfo(), string.Empty);
                NUnit.Framework.Assert.AreEqual(runningJob.GetJobStatus().GetJobName(), "N/A");
                NUnit.Framework.Assert.AreEqual(client.GetMapTaskReports(jobId).Length, 0);
                try
                {
                    client.GetSetupTaskReports(jobId);
                }
                catch (YarnRuntimeException e)
                {
                    NUnit.Framework.Assert.AreEqual(e.Message, "Unrecognized task type: JOB_SETUP");
                }
                try
                {
                    client.GetCleanupTaskReports(jobId);
                }
                catch (YarnRuntimeException e)
                {
                    NUnit.Framework.Assert.AreEqual(e.Message, "Unrecognized task type: JOB_CLEANUP");
                }
                NUnit.Framework.Assert.AreEqual(client.GetReduceTaskReports(jobId).Length, 0);
                // test ClusterStatus
                ClusterStatus status = client.GetClusterStatus(true);
                NUnit.Framework.Assert.AreEqual(status.GetActiveTrackerNames().Count, 2);
                // it method does not implemented and always return empty array or null;
                NUnit.Framework.Assert.AreEqual(status.GetBlacklistedTrackers(), 0);
                NUnit.Framework.Assert.AreEqual(status.GetBlacklistedTrackerNames().Count, 0);
                NUnit.Framework.Assert.AreEqual(status.GetBlackListedTrackersInfo().Count, 0);
                NUnit.Framework.Assert.AreEqual(status.GetJobTrackerStatus(), Cluster.JobTrackerStatus
                                                .Running);
                NUnit.Framework.Assert.AreEqual(status.GetMapTasks(), 1);
                NUnit.Framework.Assert.AreEqual(status.GetMaxMapTasks(), 20);
                NUnit.Framework.Assert.AreEqual(status.GetMaxReduceTasks(), 4);
                NUnit.Framework.Assert.AreEqual(status.GetNumExcludedNodes(), 0);
                NUnit.Framework.Assert.AreEqual(status.GetReduceTasks(), 1);
                NUnit.Framework.Assert.AreEqual(status.GetTaskTrackers(), 2);
                NUnit.Framework.Assert.AreEqual(status.GetTTExpiryInterval(), 0);
                NUnit.Framework.Assert.AreEqual(status.GetJobTrackerStatus(), Cluster.JobTrackerStatus
                                                .Running);
                NUnit.Framework.Assert.AreEqual(status.GetGraylistedTrackers(), 0);
                // test read and write
                ByteArrayOutputStream dataOut = new ByteArrayOutputStream();
                status.Write(new DataOutputStream(dataOut));
                ClusterStatus status2 = new ClusterStatus();
                status2.ReadFields(new DataInputStream(new ByteArrayInputStream(dataOut.ToByteArray
                                                                                    ())));
                NUnit.Framework.Assert.AreEqual(status.GetActiveTrackerNames(), status2.GetActiveTrackerNames
                                                    ());
                NUnit.Framework.Assert.AreEqual(status.GetBlackListedTrackersInfo(), status2.GetBlackListedTrackersInfo
                                                    ());
                NUnit.Framework.Assert.AreEqual(status.GetMapTasks(), status2.GetMapTasks());
                try
                {
                }
                catch (RuntimeException e)
                {
                    NUnit.Framework.Assert.IsTrue(e.Message.EndsWith("not found on CLASSPATH"));
                }
                // test taskStatusfilter
                JobClient.SetTaskOutputFilter(job, JobClient.TaskStatusFilter.All);
                NUnit.Framework.Assert.AreEqual(JobClient.GetTaskOutputFilter(job), JobClient.TaskStatusFilter
                                                .All);
                // runningJob.setJobPriority(JobPriority.HIGH.name());
                // test default map
                NUnit.Framework.Assert.AreEqual(client.GetDefaultMaps(), 20);
                NUnit.Framework.Assert.AreEqual(client.GetDefaultReduces(), 4);
                NUnit.Framework.Assert.AreEqual(client.GetSystemDir().GetName(), "jobSubmitDir");
                // test queue information
                JobQueueInfo[] rootQueueInfo = client.GetRootQueues();
                NUnit.Framework.Assert.AreEqual(rootQueueInfo.Length, 1);
                NUnit.Framework.Assert.AreEqual(rootQueueInfo[0].GetQueueName(), "default");
                JobQueueInfo[] qinfo = client.GetQueues();
                NUnit.Framework.Assert.AreEqual(qinfo.Length, 1);
                NUnit.Framework.Assert.AreEqual(qinfo[0].GetQueueName(), "default");
                NUnit.Framework.Assert.AreEqual(client.GetChildQueues("default").Length, 0);
                NUnit.Framework.Assert.AreEqual(client.GetJobsFromQueue("default").Length, 1);
                NUnit.Framework.Assert.IsTrue(client.GetJobsFromQueue("default")[0].GetJobFile().
                                              EndsWith("/job.xml"));
                JobQueueInfo qi = client.GetQueueInfo("default");
                NUnit.Framework.Assert.AreEqual(qi.GetQueueName(), "default");
                NUnit.Framework.Assert.AreEqual(qi.GetQueueState(), "running");
                QueueAclsInfo[] aai = client.GetQueueAclsForCurrentUser();
                NUnit.Framework.Assert.AreEqual(aai.Length, 2);
                NUnit.Framework.Assert.AreEqual(aai[0].GetQueueName(), "root");
                NUnit.Framework.Assert.AreEqual(aai[1].GetQueueName(), "default");
                // test token
                Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token = client.
                                                                                           GetDelegationToken(new Text(UserGroupInformation.GetCurrentUser().GetShortUserName
                                                                                                                           ()));
                NUnit.Framework.Assert.AreEqual(token.GetKind().ToString(), "RM_DELEGATION_TOKEN"
                                                );
                // test JobClient
                // The following asserts read JobStatus twice and ensure the returned
                // JobStatus objects correspond to the same Job.
                NUnit.Framework.Assert.AreEqual("Expected matching JobIDs", jobId, ((JobID)client
                                                                                    .GetJob(jobId).GetJobStatus().GetJobID()));
                NUnit.Framework.Assert.AreEqual("Expected matching startTimes", rj.GetJobStatus()
                                                .GetStartTime(), client.GetJob(jobId).GetJobStatus().GetStartTime());
            }
            finally
            {
                if (fileSys != null)
                {
                    fileSys.Delete(testDir, true);
                }
                if (mr != null)
                {
                    mr.Stop();
                }
            }
        }
Ejemplo n.º 27
0
        public virtual void TestMultiAppend2()
        {
            Configuration conf = new HdfsConfiguration();

            conf.Set("dfs.client.block.write.replace-datanode-on-failure.enable", "false");
            MiniDFSCluster        cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
            DistributedFileSystem fs      = null;
            string hello = "hello\n";

            try
            {
                fs = cluster.GetFileSystem();
                Path path = new Path("/test");
                FSDataOutputStream @out = fs.Create(path);
                @out.WriteBytes(hello);
                @out.Close();
                // stop one datanode
                MiniDFSCluster.DataNodeProperties dnProp = cluster.StopDataNode(0);
                string dnAddress = dnProp.datanode.GetXferAddress().ToString();
                if (dnAddress.StartsWith("/"))
                {
                    dnAddress = Sharpen.Runtime.Substring(dnAddress, 1);
                }
                // append again to bump genstamps
                for (int i = 0; i < 2; i++)
                {
                    @out = fs.Append(path, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock), 4096,
                                     null);
                    @out.WriteBytes(hello);
                    @out.Close();
                }
                // re-open and make the block state as underconstruction
                @out = fs.Append(path, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock), 4096,
                                 null);
                cluster.RestartDataNode(dnProp, true);
                // wait till the block report comes
                Sharpen.Thread.Sleep(2000);
                @out.WriteBytes(hello);
                @out.Close();
                // check the block locations
                LocatedBlocks blocks = fs.GetClient().GetLocatedBlocks(path.ToString(), 0L);
                // since we append the file 3 time, we should be 4 blocks
                NUnit.Framework.Assert.AreEqual(4, blocks.GetLocatedBlocks().Count);
                foreach (LocatedBlock block in blocks.GetLocatedBlocks())
                {
                    NUnit.Framework.Assert.AreEqual(hello.Length, block.GetBlockSize());
                }
                StringBuilder sb = new StringBuilder();
                for (int i_1 = 0; i_1 < 4; i_1++)
                {
                    sb.Append(hello);
                }
                byte[] content = Sharpen.Runtime.GetBytesForString(sb.ToString());
                AppendTestUtil.CheckFullFile(fs, path, content.Length, content, "Read /test");
                // restart namenode to make sure the editlog can be properly applied
                cluster.RestartNameNode(true);
                cluster.WaitActive();
                AppendTestUtil.CheckFullFile(fs, path, content.Length, content, "Read /test");
                blocks = fs.GetClient().GetLocatedBlocks(path.ToString(), 0L);
                // since we append the file 3 time, we should be 4 blocks
                NUnit.Framework.Assert.AreEqual(4, blocks.GetLocatedBlocks().Count);
                foreach (LocatedBlock block_1 in blocks.GetLocatedBlocks())
                {
                    NUnit.Framework.Assert.AreEqual(hello.Length, block_1.GetBlockSize());
                }
            }
            finally
            {
                IOUtils.CloseStream(fs);
                cluster.Shutdown();
            }
        }
Ejemplo n.º 28
0
        public virtual void TestMigrateOpenFileToArchival()
        {
            Log.Info("testMigrateOpenFileToArchival");
            Path fooDir = new Path("/foo");
            IDictionary <Path, BlockStoragePolicy> policyMap = Maps.NewHashMap();

            policyMap[fooDir] = Cold;
            TestStorageMover.NamespaceScheme nsScheme = new TestStorageMover.NamespaceScheme(
                Arrays.AsList(fooDir), null, BlockSize, null, policyMap);
            TestStorageMover.ClusterScheme clusterScheme = new TestStorageMover.ClusterScheme
                                                               (DefaultConf, NumDatanodes, Repl, GenStorageTypes(NumDatanodes), null);
            TestStorageMover.MigrationTest test = new TestStorageMover.MigrationTest(this, clusterScheme
                                                                                     , nsScheme);
            test.SetupCluster();
            // create an open file
            Banner("writing to file /foo/bar");
            Path barFile = new Path(fooDir, "bar");

            DFSTestUtil.CreateFile(test.dfs, barFile, BlockSize, (short)1, 0L);
            FSDataOutputStream @out = test.dfs.Append(barFile);

            @out.WriteBytes("hello, ");
            ((DFSOutputStream)@out.GetWrappedStream()).Hsync();
            try
            {
                Banner("start data migration");
                test.SetStoragePolicy();
                // set /foo to COLD
                test.Migrate();
                // make sure the under construction block has not been migrated
                LocatedBlocks lbs = test.dfs.GetClient().GetLocatedBlocks(barFile.ToString(), BlockSize
                                                                          );
                Log.Info("Locations: " + lbs);
                IList <LocatedBlock> blks = lbs.GetLocatedBlocks();
                NUnit.Framework.Assert.AreEqual(1, blks.Count);
                NUnit.Framework.Assert.AreEqual(1, blks[0].GetLocations().Length);
                Banner("finish the migration, continue writing");
                // make sure the writing can continue
                @out.WriteBytes("world!");
                ((DFSOutputStream)@out.GetWrappedStream()).Hsync();
                IOUtils.Cleanup(Log, @out);
                lbs = test.dfs.GetClient().GetLocatedBlocks(barFile.ToString(), BlockSize);
                Log.Info("Locations: " + lbs);
                blks = lbs.GetLocatedBlocks();
                NUnit.Framework.Assert.AreEqual(1, blks.Count);
                NUnit.Framework.Assert.AreEqual(1, blks[0].GetLocations().Length);
                Banner("finish writing, starting reading");
                // check the content of /foo/bar
                FSDataInputStream @in = test.dfs.Open(barFile);
                byte[]            buf = new byte[13];
                // read from offset 1024
                @in.ReadFully(BlockSize, buf, 0, buf.Length);
                IOUtils.Cleanup(Log, @in);
                NUnit.Framework.Assert.AreEqual("hello, world!", Sharpen.Runtime.GetStringForBytes
                                                    (buf));
            }
            finally
            {
                test.ShutdownCluster();
            }
        }
Ejemplo n.º 29
0
        public virtual void TestLeaseAfterRename()
        {
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();

            try
            {
                Path p  = new Path("/test-file");
                Path d  = new Path("/test-d");
                Path d2 = new Path("/test-d-other");
                // open a file to get a lease
                FileSystem         fs   = cluster.GetFileSystem();
                FSDataOutputStream @out = fs.Create(p);
                @out.WriteBytes("something");
                //out.hsync();
                NUnit.Framework.Assert.IsTrue(HasLease(cluster, p));
                NUnit.Framework.Assert.AreEqual(1, LeaseCount(cluster));
                // just to ensure first fs doesn't have any logic to twiddle leases
                DistributedFileSystem fs2 = (DistributedFileSystem)FileSystem.NewInstance(fs.GetUri
                                                                                              (), fs.GetConf());
                // rename the file into an existing dir
                Log.Info("DMS: rename file into dir");
                Path pRenamed = new Path(d, p.GetName());
                fs2.Mkdirs(d);
                fs2.Rename(p, pRenamed);
                NUnit.Framework.Assert.IsFalse(p + " exists", fs2.Exists(p));
                NUnit.Framework.Assert.IsTrue(pRenamed + " not found", fs2.Exists(pRenamed));
                NUnit.Framework.Assert.IsFalse("has lease for " + p, HasLease(cluster, p));
                NUnit.Framework.Assert.IsTrue("no lease for " + pRenamed, HasLease(cluster, pRenamed
                                                                                   ));
                NUnit.Framework.Assert.AreEqual(1, LeaseCount(cluster));
                // rename the parent dir to a new non-existent dir
                Log.Info("DMS: rename parent dir");
                Path pRenamedAgain = new Path(d2, pRenamed.GetName());
                fs2.Rename(d, d2);
                // src gone
                NUnit.Framework.Assert.IsFalse(d + " exists", fs2.Exists(d));
                NUnit.Framework.Assert.IsFalse("has lease for " + pRenamed, HasLease(cluster, pRenamed
                                                                                     ));
                // dst checks
                NUnit.Framework.Assert.IsTrue(d2 + " not found", fs2.Exists(d2));
                NUnit.Framework.Assert.IsTrue(pRenamedAgain + " not found", fs2.Exists(pRenamedAgain
                                                                                       ));
                NUnit.Framework.Assert.IsTrue("no lease for " + pRenamedAgain, HasLease(cluster,
                                                                                        pRenamedAgain));
                NUnit.Framework.Assert.AreEqual(1, LeaseCount(cluster));
                // rename the parent dir to existing dir
                // NOTE: rename w/o options moves paths into existing dir
                Log.Info("DMS: rename parent again");
                pRenamed      = pRenamedAgain;
                pRenamedAgain = new Path(new Path(d, d2.GetName()), p.GetName());
                fs2.Mkdirs(d);
                fs2.Rename(d2, d);
                // src gone
                NUnit.Framework.Assert.IsFalse(d2 + " exists", fs2.Exists(d2));
                NUnit.Framework.Assert.IsFalse("no lease for " + pRenamed, HasLease(cluster, pRenamed
                                                                                    ));
                // dst checks
                NUnit.Framework.Assert.IsTrue(d + " not found", fs2.Exists(d));
                NUnit.Framework.Assert.IsTrue(pRenamedAgain + " not found", fs2.Exists(pRenamedAgain
                                                                                       ));
                NUnit.Framework.Assert.IsTrue("no lease for " + pRenamedAgain, HasLease(cluster,
                                                                                        pRenamedAgain));
                NUnit.Framework.Assert.AreEqual(1, LeaseCount(cluster));
                // rename with opts to non-existent dir
                pRenamed      = pRenamedAgain;
                pRenamedAgain = new Path(d2, p.GetName());
                fs2.Rename(pRenamed.GetParent(), d2, Options.Rename.Overwrite);
                // src gone
                NUnit.Framework.Assert.IsFalse(pRenamed.GetParent() + " not found", fs2.Exists(pRenamed
                                                                                               .GetParent()));
                NUnit.Framework.Assert.IsFalse("has lease for " + pRenamed, HasLease(cluster, pRenamed
                                                                                     ));
                // dst checks
                NUnit.Framework.Assert.IsTrue(d2 + " not found", fs2.Exists(d2));
                NUnit.Framework.Assert.IsTrue(pRenamedAgain + " not found", fs2.Exists(pRenamedAgain
                                                                                       ));
                NUnit.Framework.Assert.IsTrue("no lease for " + pRenamedAgain, HasLease(cluster,
                                                                                        pRenamedAgain));
                NUnit.Framework.Assert.AreEqual(1, LeaseCount(cluster));
                // rename with opts to existing dir
                // NOTE: rename with options will not move paths into the existing dir
                pRenamed      = pRenamedAgain;
                pRenamedAgain = new Path(d, p.GetName());
                fs2.Rename(pRenamed.GetParent(), d, Options.Rename.Overwrite);
                // src gone
                NUnit.Framework.Assert.IsFalse(pRenamed.GetParent() + " not found", fs2.Exists(pRenamed
                                                                                               .GetParent()));
                NUnit.Framework.Assert.IsFalse("has lease for " + pRenamed, HasLease(cluster, pRenamed
                                                                                     ));
                // dst checks
                NUnit.Framework.Assert.IsTrue(d + " not found", fs2.Exists(d));
                NUnit.Framework.Assert.IsTrue(pRenamedAgain + " not found", fs2.Exists(pRenamedAgain
                                                                                       ));
                NUnit.Framework.Assert.IsTrue("no lease for " + pRenamedAgain, HasLease(cluster,
                                                                                        pRenamedAgain));
                NUnit.Framework.Assert.AreEqual(1, LeaseCount(cluster));
                @out.Close();
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Ejemplo n.º 30
0
        /// <summary>test run from command line JobQueueClient</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestJobQueueClient()
        {
            MiniMRClientCluster mr      = null;
            FileSystem          fileSys = null;
            TextWriter          oldOut  = System.Console.Out;

            try
            {
                mr = CreateMiniClusterWithCapacityScheduler();
                JobConf job = new JobConf(mr.GetConfig());
                fileSys = FileSystem.Get(job);
                fileSys.Delete(testDir, true);
                FSDataOutputStream @out = fileSys.Create(inFile, true);
                @out.WriteBytes("This is a test file");
                @out.Close();
                FileInputFormat.SetInputPaths(job, inFile);
                FileOutputFormat.SetOutputPath(job, outDir);
                job.SetInputFormat(typeof(TextInputFormat));
                job.SetOutputFormat(typeof(TextOutputFormat));
                job.SetMapperClass(typeof(IdentityMapper));
                job.SetReducerClass(typeof(IdentityReducer));
                job.SetNumReduceTasks(0);
                JobClient client = new JobClient(mr.GetConfig());
                client.SubmitJob(job);
                JobQueueClient        jobClient = new JobQueueClient(job);
                ByteArrayOutputStream bytes     = new ByteArrayOutputStream();
                Runtime.SetOut(new TextWriter(bytes));
                string[] arg = new string[] { "-list" };
                jobClient.Run(arg);
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("Queue Name : default"));
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("Queue State : running"));
                bytes = new ByteArrayOutputStream();
                Runtime.SetOut(new TextWriter(bytes));
                string[] arg1 = new string[] { "-showacls" };
                jobClient.Run(arg1);
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("Queue acls for user :"******"root  ADMINISTER_QUEUE,SUBMIT_APPLICATIONS"
                                                                        ));
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("default  ADMINISTER_QUEUE,SUBMIT_APPLICATIONS"
                                                                        ));
                // test for info and default queue
                bytes = new ByteArrayOutputStream();
                Runtime.SetOut(new TextWriter(bytes));
                string[] arg2 = new string[] { "-info", "default" };
                jobClient.Run(arg2);
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("Queue Name : default"));
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("Queue State : running"));
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("Scheduling Info"));
                // test for info , default queue and jobs
                bytes = new ByteArrayOutputStream();
                Runtime.SetOut(new TextWriter(bytes));
                string[] arg3 = new string[] { "-info", "default", "-showJobs" };
                jobClient.Run(arg3);
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("Queue Name : default"));
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("Queue State : running"));
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("Scheduling Info"));
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("job_1"));
                string[] arg4 = new string[] {  };
                jobClient.Run(arg4);
            }
            finally
            {
                Runtime.SetOut(oldOut);
                if (fileSys != null)
                {
                    fileSys.Delete(testDir, true);
                }
                if (mr != null)
                {
                    mr.Stop();
                }
            }
        }