Example #1
0
        public virtual void TestMultiAppend2()
        {
            Configuration conf = new HdfsConfiguration();

            conf.Set("dfs.client.block.write.replace-datanode-on-failure.enable", "false");
            MiniDFSCluster        cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
            DistributedFileSystem fs      = null;
            string hello = "hello\n";

            try
            {
                fs = cluster.GetFileSystem();
                Path path = new Path("/test");
                FSDataOutputStream @out = fs.Create(path);
                @out.WriteBytes(hello);
                @out.Close();
                // stop one datanode
                MiniDFSCluster.DataNodeProperties dnProp = cluster.StopDataNode(0);
                string dnAddress = dnProp.datanode.GetXferAddress().ToString();
                if (dnAddress.StartsWith("/"))
                {
                    dnAddress = Sharpen.Runtime.Substring(dnAddress, 1);
                }
                // append again to bump genstamps
                for (int i = 0; i < 2; i++)
                {
                    @out = fs.Append(path, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock), 4096,
                                     null);
                    @out.WriteBytes(hello);
                    @out.Close();
                }
                // re-open and make the block state as underconstruction
                @out = fs.Append(path, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock), 4096,
                                 null);
                cluster.RestartDataNode(dnProp, true);
                // wait till the block report comes
                Sharpen.Thread.Sleep(2000);
                @out.WriteBytes(hello);
                @out.Close();
                // check the block locations
                LocatedBlocks blocks = fs.GetClient().GetLocatedBlocks(path.ToString(), 0L);
                // since we append the file 3 time, we should be 4 blocks
                NUnit.Framework.Assert.AreEqual(4, blocks.GetLocatedBlocks().Count);
                foreach (LocatedBlock block in blocks.GetLocatedBlocks())
                {
                    NUnit.Framework.Assert.AreEqual(hello.Length, block.GetBlockSize());
                }
                StringBuilder sb = new StringBuilder();
                for (int i_1 = 0; i_1 < 4; i_1++)
                {
                    sb.Append(hello);
                }
                byte[] content = Sharpen.Runtime.GetBytesForString(sb.ToString());
                AppendTestUtil.CheckFullFile(fs, path, content.Length, content, "Read /test");
                // restart namenode to make sure the editlog can be properly applied
                cluster.RestartNameNode(true);
                cluster.WaitActive();
                AppendTestUtil.CheckFullFile(fs, path, content.Length, content, "Read /test");
                blocks = fs.GetClient().GetLocatedBlocks(path.ToString(), 0L);
                // since we append the file 3 time, we should be 4 blocks
                NUnit.Framework.Assert.AreEqual(4, blocks.GetLocatedBlocks().Count);
                foreach (LocatedBlock block_1 in blocks.GetLocatedBlocks())
                {
                    NUnit.Framework.Assert.AreEqual(hello.Length, block_1.GetBlockSize());
                }
            }
            finally
            {
                IOUtils.CloseStream(fs);
                cluster.Shutdown();
            }
        }
Example #2
0
        /// <summary>
        /// Test that an append with no locations fails with an exception
        /// showing insufficient locations.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestAppendInsufficientLocations()
        {
            Configuration conf = new Configuration();

            // lower heartbeat interval for fast recognition of DN
            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1000);
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            conf.SetInt(DFSConfigKeys.DfsClientSocketTimeoutKey, 3000);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(4).Build();
            DistributedFileSystem fileSystem = null;

            try
            {
                // create a file with replication 3
                fileSystem = cluster.GetFileSystem();
                Path f = new Path("/testAppend");
                FSDataOutputStream create = fileSystem.Create(f, (short)2);
                create.Write(Sharpen.Runtime.GetBytesForString("/testAppend"));
                create.Close();
                // Check for replications
                DFSTestUtil.WaitReplication(fileSystem, f, (short)2);
                // Shut down all DNs that have the last block location for the file
                LocatedBlocks lbs = fileSystem.dfs.GetNamenode().GetBlockLocations("/testAppend",
                                                                                   0, long.MaxValue);
                IList <DataNode> dnsOfCluster     = cluster.GetDataNodes();
                DatanodeInfo[]   dnsWithLocations = lbs.GetLastLocatedBlock().GetLocations();
                foreach (DataNode dn in dnsOfCluster)
                {
                    foreach (DatanodeInfo loc in dnsWithLocations)
                    {
                        if (dn.GetDatanodeId().Equals(loc))
                        {
                            dn.Shutdown();
                            DFSTestUtil.WaitForDatanodeDeath(dn);
                        }
                    }
                }
                // Wait till 0 replication is recognized
                DFSTestUtil.WaitReplication(fileSystem, f, (short)0);
                // Append to the file, at this state there are 3 live DNs but none of them
                // have the block.
                try
                {
                    fileSystem.Append(f);
                    NUnit.Framework.Assert.Fail("Append should fail because insufficient locations");
                }
                catch (IOException e)
                {
                    Log.Info("Expected exception: ", e);
                }
                FSDirectory dir   = cluster.GetNamesystem().GetFSDirectory();
                INodeFile   inode = INodeFile.ValueOf(dir.GetINode("/testAppend"), "/testAppend");
                NUnit.Framework.Assert.IsTrue("File should remain closed", !inode.IsUnderConstruction
                                                  ());
            }
            finally
            {
                if (null != fileSystem)
                {
                    fileSystem.Close();
                }
                cluster.Shutdown();
            }
        }
Example #3
0
 //
 // writes to file but does not close it
 //
 /// <exception cref="System.IO.IOException"/>
 private void WriteFile(FSDataOutputStream stm)
 {
     byte[] buffer = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize);
     stm.Write(buffer);
 }
        /// <summary>
        /// Regression test for HDFS-4799, a case where, upon restart, if there
        /// were RWR replicas with out-of-date genstamps, the NN could accidentally
        /// delete good replicas instead of the bad replicas.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestRWRInvalidation()
        {
            Configuration conf = new HdfsConfiguration();

            // Set the deletion policy to be randomized rather than the default.
            // The default is based on disk space, which isn't controllable
            // in the context of the test, whereas a random one is more accurate
            // to what is seen in real clusters (nodes have random amounts of free
            // space)
            conf.SetClass(DFSConfigKeys.DfsBlockReplicatorClassnameKey, typeof(TestDNFencing.RandomDeleterPolicy
                                                                               ), typeof(BlockPlacementPolicy));
            // Speed up the test a bit with faster heartbeats.
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            // Test with a bunch of separate files, since otherwise the test may
            // fail just due to "good luck", even if a bug is present.
            IList <Path> testPaths = Lists.NewArrayList();

            for (int i = 0; i < 10; i++)
            {
                testPaths.AddItem(new Path("/test" + i));
            }
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();

            try
            {
                IList <FSDataOutputStream> streams = Lists.NewArrayList();
                try
                {
                    // Open the test files and write some data to each
                    foreach (Path path in testPaths)
                    {
                        FSDataOutputStream @out = cluster.GetFileSystem().Create(path, (short)2);
                        streams.AddItem(@out);
                        @out.WriteBytes("old gs data\n");
                        @out.Hflush();
                    }
                    // Shutdown one of the nodes in the pipeline
                    MiniDFSCluster.DataNodeProperties oldGenstampNode = cluster.StopDataNode(0);
                    // Write some more data and flush again. This data will only
                    // be in the latter genstamp copy of the blocks.
                    for (int i_1 = 0; i_1 < streams.Count; i_1++)
                    {
                        Path path_1             = testPaths[i_1];
                        FSDataOutputStream @out = streams[i_1];
                        @out.WriteBytes("new gs data\n");
                        @out.Hflush();
                        // Set replication so that only one node is necessary for this block,
                        // and close it.
                        cluster.GetFileSystem().SetReplication(path_1, (short)1);
                        @out.Close();
                    }
                    // Upon restart, there will be two replicas, one with an old genstamp
                    // and one current copy. This test wants to ensure that the old genstamp
                    // copy is the one that is deleted.
                    Log.Info("=========================== restarting cluster");
                    MiniDFSCluster.DataNodeProperties otherNode = cluster.StopDataNode(0);
                    cluster.RestartNameNode();
                    // Restart the datanode with the corrupt replica first.
                    cluster.RestartDataNode(oldGenstampNode);
                    cluster.WaitActive();
                    // Then the other node
                    cluster.RestartDataNode(otherNode);
                    cluster.WaitActive();
                    // Compute and send invalidations, waiting until they're fully processed.
                    cluster.GetNameNode().GetNamesystem().GetBlockManager().ComputeInvalidateWork(2);
                    cluster.TriggerHeartbeats();
                    HATestUtil.WaitForDNDeletions(cluster);
                    cluster.TriggerDeletionReports();
                    // Make sure we can still read the blocks.
                    foreach (Path path_2 in testPaths)
                    {
                        string ret = DFSTestUtil.ReadFile(cluster.GetFileSystem(), path_2);
                        NUnit.Framework.Assert.AreEqual("old gs data\n" + "new gs data\n", ret);
                    }
                }
                finally
                {
                    IOUtils.Cleanup(Log, Sharpen.Collections.ToArray(streams, new IDisposable[0]));
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Example #5
0
 /// <exception cref="System.IO.IOException"/>
 private static void WriteSplitHeader(FSDataOutputStream @out)
 {
     @out.Write(SplitFileHeader);
     @out.WriteInt(splitVersion);
 }
 OrcRecordUpdater(Path path,
                  AcidOutputFormat.Options options)
 {
     this.options = options;
     this.bucket.set(options.getBucket());
     this.path = AcidUtils.createFilename(path, options);
     FileSystem fs = options.getFilesystem();
     if (fs == null)
     {
         fs = path.getFileSystem(options.getConfiguration());
     }
     this.fs = fs;
     try
     {
         FSDataOutputStream strm = fs.create(new Path(path, ACID_FORMAT), false);
         strm.writeInt(ORC_ACID_VERSION);
         strm.close();
     }
     catch (IOException ioe)
     {
         if (LOG.isDebugEnabled())
         {
             LOG.debug("Failed to create " + path + "/" + ACID_FORMAT + " with " +
                 ioe);
         }
     }
     if (options.getMinimumTransactionId() != options.getMaximumTransactionId()
         && !options.isWritingBase())
     {
         flushLengths = fs.create(getSideFile(this.path), true, 8,
             options.getReporter());
     }
     else
     {
         flushLengths = null;
     }
     OrcFile.WriterOptions writerOptions = null;
     if (options is OrcOptions)
     {
         writerOptions = ((OrcOptions)options).getOrcOptions();
     }
     if (writerOptions == null)
     {
         writerOptions = OrcFile.writerOptions( /* options.getTableProperties(), */
             options.getConfiguration());
     }
     writerOptions.fileSystem(fs).callback(indexBuilder);
     if (!options.isWritingBase())
     {
         writerOptions.blockPadding(false);
         writerOptions.bufferSize(DELTA_BUFFER_SIZE);
         writerOptions.stripeSize(DELTA_STRIPE_SIZE);
     }
     rowInspector = (StructObjectInspector)options.getInspector();
     writerOptions.inspector(createEventSchema(findRecId(options.getInspector(),
         options.getRecordIdColumn())));
     this.writer = OrcFile.createWriter(this.path, writerOptions);
     item = new OrcStruct(FIELDS);
     item.setFieldValue(OPERATION, operation);
     item.setFieldValue(CURRENT_TRANSACTION, currentTransaction);
     item.setFieldValue(ORIGINAL_TRANSACTION, originalTransaction);
     item.setFieldValue(BUCKET, bucket);
     item.setFieldValue(ROW_ID, rowId);
 }
Example #7
0
        public virtual void TestLeaseAfterRename()
        {
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();

            try
            {
                Path p  = new Path("/test-file");
                Path d  = new Path("/test-d");
                Path d2 = new Path("/test-d-other");
                // open a file to get a lease
                FileSystem         fs   = cluster.GetFileSystem();
                FSDataOutputStream @out = fs.Create(p);
                @out.WriteBytes("something");
                //out.hsync();
                NUnit.Framework.Assert.IsTrue(HasLease(cluster, p));
                NUnit.Framework.Assert.AreEqual(1, LeaseCount(cluster));
                // just to ensure first fs doesn't have any logic to twiddle leases
                DistributedFileSystem fs2 = (DistributedFileSystem)FileSystem.NewInstance(fs.GetUri
                                                                                              (), fs.GetConf());
                // rename the file into an existing dir
                Log.Info("DMS: rename file into dir");
                Path pRenamed = new Path(d, p.GetName());
                fs2.Mkdirs(d);
                fs2.Rename(p, pRenamed);
                NUnit.Framework.Assert.IsFalse(p + " exists", fs2.Exists(p));
                NUnit.Framework.Assert.IsTrue(pRenamed + " not found", fs2.Exists(pRenamed));
                NUnit.Framework.Assert.IsFalse("has lease for " + p, HasLease(cluster, p));
                NUnit.Framework.Assert.IsTrue("no lease for " + pRenamed, HasLease(cluster, pRenamed
                                                                                   ));
                NUnit.Framework.Assert.AreEqual(1, LeaseCount(cluster));
                // rename the parent dir to a new non-existent dir
                Log.Info("DMS: rename parent dir");
                Path pRenamedAgain = new Path(d2, pRenamed.GetName());
                fs2.Rename(d, d2);
                // src gone
                NUnit.Framework.Assert.IsFalse(d + " exists", fs2.Exists(d));
                NUnit.Framework.Assert.IsFalse("has lease for " + pRenamed, HasLease(cluster, pRenamed
                                                                                     ));
                // dst checks
                NUnit.Framework.Assert.IsTrue(d2 + " not found", fs2.Exists(d2));
                NUnit.Framework.Assert.IsTrue(pRenamedAgain + " not found", fs2.Exists(pRenamedAgain
                                                                                       ));
                NUnit.Framework.Assert.IsTrue("no lease for " + pRenamedAgain, HasLease(cluster,
                                                                                        pRenamedAgain));
                NUnit.Framework.Assert.AreEqual(1, LeaseCount(cluster));
                // rename the parent dir to existing dir
                // NOTE: rename w/o options moves paths into existing dir
                Log.Info("DMS: rename parent again");
                pRenamed      = pRenamedAgain;
                pRenamedAgain = new Path(new Path(d, d2.GetName()), p.GetName());
                fs2.Mkdirs(d);
                fs2.Rename(d2, d);
                // src gone
                NUnit.Framework.Assert.IsFalse(d2 + " exists", fs2.Exists(d2));
                NUnit.Framework.Assert.IsFalse("no lease for " + pRenamed, HasLease(cluster, pRenamed
                                                                                    ));
                // dst checks
                NUnit.Framework.Assert.IsTrue(d + " not found", fs2.Exists(d));
                NUnit.Framework.Assert.IsTrue(pRenamedAgain + " not found", fs2.Exists(pRenamedAgain
                                                                                       ));
                NUnit.Framework.Assert.IsTrue("no lease for " + pRenamedAgain, HasLease(cluster,
                                                                                        pRenamedAgain));
                NUnit.Framework.Assert.AreEqual(1, LeaseCount(cluster));
                // rename with opts to non-existent dir
                pRenamed      = pRenamedAgain;
                pRenamedAgain = new Path(d2, p.GetName());
                fs2.Rename(pRenamed.GetParent(), d2, Options.Rename.Overwrite);
                // src gone
                NUnit.Framework.Assert.IsFalse(pRenamed.GetParent() + " not found", fs2.Exists(pRenamed
                                                                                               .GetParent()));
                NUnit.Framework.Assert.IsFalse("has lease for " + pRenamed, HasLease(cluster, pRenamed
                                                                                     ));
                // dst checks
                NUnit.Framework.Assert.IsTrue(d2 + " not found", fs2.Exists(d2));
                NUnit.Framework.Assert.IsTrue(pRenamedAgain + " not found", fs2.Exists(pRenamedAgain
                                                                                       ));
                NUnit.Framework.Assert.IsTrue("no lease for " + pRenamedAgain, HasLease(cluster,
                                                                                        pRenamedAgain));
                NUnit.Framework.Assert.AreEqual(1, LeaseCount(cluster));
                // rename with opts to existing dir
                // NOTE: rename with options will not move paths into the existing dir
                pRenamed      = pRenamedAgain;
                pRenamedAgain = new Path(d, p.GetName());
                fs2.Rename(pRenamed.GetParent(), d, Options.Rename.Overwrite);
                // src gone
                NUnit.Framework.Assert.IsFalse(pRenamed.GetParent() + " not found", fs2.Exists(pRenamed
                                                                                               .GetParent()));
                NUnit.Framework.Assert.IsFalse("has lease for " + pRenamed, HasLease(cluster, pRenamed
                                                                                     ));
                // dst checks
                NUnit.Framework.Assert.IsTrue(d + " not found", fs2.Exists(d));
                NUnit.Framework.Assert.IsTrue(pRenamedAgain + " not found", fs2.Exists(pRenamedAgain
                                                                                       ));
                NUnit.Framework.Assert.IsTrue("no lease for " + pRenamedAgain, HasLease(cluster,
                                                                                        pRenamedAgain));
                NUnit.Framework.Assert.AreEqual(1, LeaseCount(cluster));
                @out.Close();
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        /// <exception cref="System.Exception"/>
        public virtual void HardLeaseRecoveryRestartHelper(bool doRename, int size)
        {
            if (size < 0)
            {
                size = AppendTestUtil.NextInt(FileSize + 1);
            }
            //create a file
            string fileStr = "/hardLeaseRecovery";

            AppendTestUtil.Log.Info("filestr=" + fileStr);
            Path filePath          = new Path(fileStr);
            FSDataOutputStream stm = dfs.Create(filePath, true, BufSize, ReplicationNum, BlockSize
                                                );

            NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(fileStr));
            // write bytes into the file.
            AppendTestUtil.Log.Info("size=" + size);
            stm.Write(buffer, 0, size);
            string originalLeaseHolder = NameNodeAdapter.GetLeaseHolderForPath(cluster.GetNameNode
                                                                                   (), fileStr);

            NUnit.Framework.Assert.IsFalse("original lease holder should not be the NN", originalLeaseHolder
                                           .Equals(HdfsServerConstants.NamenodeLeaseHolder));
            // hflush file
            AppendTestUtil.Log.Info("hflush");
            stm.Hflush();
            // check visible length
            HdfsDataInputStream @in = (HdfsDataInputStream)dfs.Open(filePath);

            NUnit.Framework.Assert.AreEqual(size, @in.GetVisibleLength());
            @in.Close();
            if (doRename)
            {
                fileStr += ".renamed";
                Path renamedPath = new Path(fileStr);
                NUnit.Framework.Assert.IsTrue(dfs.Rename(filePath, renamedPath));
                filePath = renamedPath;
            }
            // kill the lease renewal thread
            AppendTestUtil.Log.Info("leasechecker.interruptAndJoin()");
            dfs.dfs.GetLeaseRenewer().InterruptAndJoin();
            // Make sure the DNs don't send a heartbeat for a while, so the blocks
            // won't actually get completed during lease recovery.
            foreach (DataNode dn in cluster.GetDataNodes())
            {
                DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, true);
            }
            // set the hard limit to be 1 second
            cluster.SetLeasePeriod(LongLeasePeriod, ShortLeasePeriod);
            // Make sure lease recovery begins.
            Sharpen.Thread.Sleep(HdfsServerConstants.NamenodeLeaseRecheckInterval * 2);
            CheckLease(fileStr, size);
            cluster.RestartNameNode(false);
            CheckLease(fileStr, size);
            // Let the DNs send heartbeats again.
            foreach (DataNode dn_1 in cluster.GetDataNodes())
            {
                DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn_1, false);
            }
            cluster.WaitActive();
            // set the hard limit to be 1 second, to initiate lease recovery.
            cluster.SetLeasePeriod(LongLeasePeriod, ShortLeasePeriod);
            // wait for lease recovery to complete
            LocatedBlocks locatedBlocks;

            do
            {
                Sharpen.Thread.Sleep(ShortLeasePeriod);
                locatedBlocks = dfs.dfs.GetLocatedBlocks(fileStr, 0L, size);
            }while (locatedBlocks.IsUnderConstruction());
            NUnit.Framework.Assert.AreEqual(size, locatedBlocks.GetFileLength());
            // make sure that the client can't write data anymore.
            try
            {
                stm.Write('b');
                stm.Hflush();
                NUnit.Framework.Assert.Fail("Should not be able to flush after we've lost the lease"
                                            );
            }
            catch (IOException e)
            {
                Log.Info("Expceted exception on write/hflush", e);
            }
            try
            {
                stm.Close();
                NUnit.Framework.Assert.Fail("Should not be able to close after we've lost the lease"
                                            );
            }
            catch (IOException e)
            {
                Log.Info("Expected exception on close", e);
            }
            // verify data
            AppendTestUtil.Log.Info("File size is good. Now validating sizes from datanodes..."
                                    );
            AppendTestUtil.CheckFullFile(dfs, filePath, size, buffer, fileStr);
        }
        public virtual void TestFileCreationDeleteParent()
        {
            Configuration conf        = new HdfsConfiguration();
            int           MaxIdleTime = 2000;

            // 2s
            conf.SetInt("ipc.client.connection.maxidletime", MaxIdleTime);
            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1000);
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            // create cluster
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fs      = null;

            try
            {
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                int nnport = cluster.GetNameNodePort();
                // create file1.
                Path dir   = new Path("/foo");
                Path file1 = new Path(dir, "file1");
                FSDataOutputStream stm1 = TestFileCreation.CreateFile(fs, file1, 1);
                System.Console.Out.WriteLine("testFileCreationDeleteParent: " + "Created file " +
                                             file1);
                TestFileCreation.WriteFile(stm1, 1000);
                stm1.Hflush();
                // create file2.
                Path file2 = new Path("/file2");
                FSDataOutputStream stm2 = TestFileCreation.CreateFile(fs, file2, 1);
                System.Console.Out.WriteLine("testFileCreationDeleteParent: " + "Created file " +
                                             file2);
                TestFileCreation.WriteFile(stm2, 1000);
                stm2.Hflush();
                // rm dir
                fs.Delete(dir, true);
                // restart cluster with the same namenode port as before.
                // This ensures that leases are persisted in fsimage.
                cluster.Shutdown();
                try
                {
                    Sharpen.Thread.Sleep(2 * MaxIdleTime);
                }
                catch (Exception)
                {
                }
                cluster = new MiniDFSCluster.Builder(conf).NameNodePort(nnport).Format(false).Build
                              ();
                cluster.WaitActive();
                // restart cluster yet again. This triggers the code to read in
                // persistent leases from fsimage.
                cluster.Shutdown();
                try
                {
                    Sharpen.Thread.Sleep(5000);
                }
                catch (Exception)
                {
                }
                cluster = new MiniDFSCluster.Builder(conf).NameNodePort(nnport).Format(false).Build
                              ();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsTrue(!fs.Exists(file1));
                NUnit.Framework.Assert.IsTrue(fs.Exists(file2));
            }
            finally
            {
                fs.Close();
                cluster.Shutdown();
            }
        }
Example #10
0
 /// <exception cref="System.IO.IOException"/>
 public Writer(Configuration conf, FSDataOutputStream @out, Type keyClass, Type valueClass
               , CompressionCodec codec, Counters.Counter writesCounter)
     : this(conf, @out, keyClass, valueClass, codec, writesCounter, false)
 {
 }
Example #11
0
        public virtual void TestSoftLeaseRecovery()
        {
            IDictionary <string, string[]> u2g_map = new Dictionary <string, string[]>(1);

            u2g_map[fakeUsername] = new string[] { fakeGroup };
            DFSTestUtil.UpdateConfWithFakeGroupMapping(conf, u2g_map);
            // Reset default lease periods
            cluster.SetLeasePeriod(HdfsConstants.LeaseSoftlimitPeriod, HdfsConstants.LeaseHardlimitPeriod
                                   );
            //create a file
            // create a random file name
            string filestr = "/foo" + AppendTestUtil.NextInt();

            AppendTestUtil.Log.Info("filestr=" + filestr);
            Path filepath          = new Path(filestr);
            FSDataOutputStream stm = dfs.Create(filepath, true, BufSize, ReplicationNum, BlockSize
                                                );

            NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(filestr));
            // write random number of bytes into it.
            int size = AppendTestUtil.NextInt(FileSize);

            AppendTestUtil.Log.Info("size=" + size);
            stm.Write(buffer, 0, size);
            // hflush file
            AppendTestUtil.Log.Info("hflush");
            stm.Hflush();
            AppendTestUtil.Log.Info("leasechecker.interruptAndJoin()");
            dfs.dfs.GetLeaseRenewer().InterruptAndJoin();
            // set the soft limit to be 1 second so that the
            // namenode triggers lease recovery on next attempt to write-for-open.
            cluster.SetLeasePeriod(ShortLeasePeriod, LongLeasePeriod);
            {
                // try to re-open the file before closing the previous handle. This
                // should fail but will trigger lease recovery.
                UserGroupInformation ugi = UserGroupInformation.CreateUserForTesting(fakeUsername
                                                                                     , new string[] { fakeGroup });
                FileSystem dfs2 = DFSTestUtil.GetFileSystemAs(ugi, conf);
                bool       done = false;
                for (int i = 0; i < 10 && !done; i++)
                {
                    AppendTestUtil.Log.Info("i=" + i);
                    try
                    {
                        dfs2.Create(filepath, false, BufSize, ReplicationNum, BlockSize);
                        NUnit.Framework.Assert.Fail("Creation of an existing file should never succeed.");
                    }
                    catch (FileAlreadyExistsException)
                    {
                        done = true;
                    }
                    catch (AlreadyBeingCreatedException ex)
                    {
                        AppendTestUtil.Log.Info("GOOD! got " + ex.Message);
                    }
                    catch (IOException ioe)
                    {
                        AppendTestUtil.Log.Warn("UNEXPECTED IOException", ioe);
                    }
                    if (!done)
                    {
                        AppendTestUtil.Log.Info("sleep " + 5000 + "ms");
                        try
                        {
                            Sharpen.Thread.Sleep(5000);
                        }
                        catch (Exception)
                        {
                        }
                    }
                }
                NUnit.Framework.Assert.IsTrue(done);
            }
            AppendTestUtil.Log.Info("Lease for file " + filepath + " is recovered. " + "Validating its contents now..."
                                    );
            // verify that file-size matches
            long fileSize = dfs.GetFileStatus(filepath).GetLen();

            NUnit.Framework.Assert.IsTrue("File should be " + size + " bytes, but is actually "
                                          + " found to be " + fileSize + " bytes", fileSize == size);
            // verify data
            AppendTestUtil.Log.Info("File size is good. " + "Now validating data and sizes from datanodes..."
                                    );
            AppendTestUtil.CheckFullFile(dfs, filepath, size, buffer, filestr);
        }
Example #12
0
        /// <summary>test JobConf</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestNetworkedJob()
        {
            // mock creation
            MiniMRClientCluster mr      = null;
            FileSystem          fileSys = null;

            try
            {
                mr = CreateMiniClusterWithCapacityScheduler();
                JobConf job = new JobConf(mr.GetConfig());
                fileSys = FileSystem.Get(job);
                fileSys.Delete(testDir, true);
                FSDataOutputStream @out = fileSys.Create(inFile, true);
                @out.WriteBytes("This is a test file");
                @out.Close();
                FileInputFormat.SetInputPaths(job, inFile);
                FileOutputFormat.SetOutputPath(job, outDir);
                job.SetInputFormat(typeof(TextInputFormat));
                job.SetOutputFormat(typeof(TextOutputFormat));
                job.SetMapperClass(typeof(IdentityMapper));
                job.SetReducerClass(typeof(IdentityReducer));
                job.SetNumReduceTasks(0);
                JobClient              client     = new JobClient(mr.GetConfig());
                RunningJob             rj         = client.SubmitJob(job);
                JobID                  jobId      = rj.GetID();
                JobClient.NetworkedJob runningJob = (JobClient.NetworkedJob)client.GetJob(jobId);
                runningJob.SetJobPriority(JobPriority.High.ToString());
                // test getters
                NUnit.Framework.Assert.IsTrue(runningJob.GetConfiguration().ToString().EndsWith("0001/job.xml"
                                                                                                ));
                NUnit.Framework.Assert.AreEqual(runningJob.GetID(), jobId);
                NUnit.Framework.Assert.AreEqual(runningJob.GetJobID(), jobId.ToString());
                NUnit.Framework.Assert.AreEqual(runningJob.GetJobName(), "N/A");
                NUnit.Framework.Assert.IsTrue(runningJob.GetJobFile().EndsWith(".staging/" + runningJob
                                                                               .GetJobID() + "/job.xml"));
                NUnit.Framework.Assert.IsTrue(runningJob.GetTrackingURL().Length > 0);
                NUnit.Framework.Assert.IsTrue(runningJob.MapProgress() == 0.0f);
                NUnit.Framework.Assert.IsTrue(runningJob.ReduceProgress() == 0.0f);
                NUnit.Framework.Assert.IsTrue(runningJob.CleanupProgress() == 0.0f);
                NUnit.Framework.Assert.IsTrue(runningJob.SetupProgress() == 0.0f);
                TaskCompletionEvent[] tce = runningJob.GetTaskCompletionEvents(0);
                NUnit.Framework.Assert.AreEqual(tce.Length, 0);
                NUnit.Framework.Assert.AreEqual(runningJob.GetHistoryUrl(), string.Empty);
                NUnit.Framework.Assert.IsFalse(runningJob.IsRetired());
                NUnit.Framework.Assert.AreEqual(runningJob.GetFailureInfo(), string.Empty);
                NUnit.Framework.Assert.AreEqual(runningJob.GetJobStatus().GetJobName(), "N/A");
                NUnit.Framework.Assert.AreEqual(client.GetMapTaskReports(jobId).Length, 0);
                try
                {
                    client.GetSetupTaskReports(jobId);
                }
                catch (YarnRuntimeException e)
                {
                    NUnit.Framework.Assert.AreEqual(e.Message, "Unrecognized task type: JOB_SETUP");
                }
                try
                {
                    client.GetCleanupTaskReports(jobId);
                }
                catch (YarnRuntimeException e)
                {
                    NUnit.Framework.Assert.AreEqual(e.Message, "Unrecognized task type: JOB_CLEANUP");
                }
                NUnit.Framework.Assert.AreEqual(client.GetReduceTaskReports(jobId).Length, 0);
                // test ClusterStatus
                ClusterStatus status = client.GetClusterStatus(true);
                NUnit.Framework.Assert.AreEqual(status.GetActiveTrackerNames().Count, 2);
                // it method does not implemented and always return empty array or null;
                NUnit.Framework.Assert.AreEqual(status.GetBlacklistedTrackers(), 0);
                NUnit.Framework.Assert.AreEqual(status.GetBlacklistedTrackerNames().Count, 0);
                NUnit.Framework.Assert.AreEqual(status.GetBlackListedTrackersInfo().Count, 0);
                NUnit.Framework.Assert.AreEqual(status.GetJobTrackerStatus(), Cluster.JobTrackerStatus
                                                .Running);
                NUnit.Framework.Assert.AreEqual(status.GetMapTasks(), 1);
                NUnit.Framework.Assert.AreEqual(status.GetMaxMapTasks(), 20);
                NUnit.Framework.Assert.AreEqual(status.GetMaxReduceTasks(), 4);
                NUnit.Framework.Assert.AreEqual(status.GetNumExcludedNodes(), 0);
                NUnit.Framework.Assert.AreEqual(status.GetReduceTasks(), 1);
                NUnit.Framework.Assert.AreEqual(status.GetTaskTrackers(), 2);
                NUnit.Framework.Assert.AreEqual(status.GetTTExpiryInterval(), 0);
                NUnit.Framework.Assert.AreEqual(status.GetJobTrackerStatus(), Cluster.JobTrackerStatus
                                                .Running);
                NUnit.Framework.Assert.AreEqual(status.GetGraylistedTrackers(), 0);
                // test read and write
                ByteArrayOutputStream dataOut = new ByteArrayOutputStream();
                status.Write(new DataOutputStream(dataOut));
                ClusterStatus status2 = new ClusterStatus();
                status2.ReadFields(new DataInputStream(new ByteArrayInputStream(dataOut.ToByteArray
                                                                                    ())));
                NUnit.Framework.Assert.AreEqual(status.GetActiveTrackerNames(), status2.GetActiveTrackerNames
                                                    ());
                NUnit.Framework.Assert.AreEqual(status.GetBlackListedTrackersInfo(), status2.GetBlackListedTrackersInfo
                                                    ());
                NUnit.Framework.Assert.AreEqual(status.GetMapTasks(), status2.GetMapTasks());
                try
                {
                }
                catch (RuntimeException e)
                {
                    NUnit.Framework.Assert.IsTrue(e.Message.EndsWith("not found on CLASSPATH"));
                }
                // test taskStatusfilter
                JobClient.SetTaskOutputFilter(job, JobClient.TaskStatusFilter.All);
                NUnit.Framework.Assert.AreEqual(JobClient.GetTaskOutputFilter(job), JobClient.TaskStatusFilter
                                                .All);
                // runningJob.setJobPriority(JobPriority.HIGH.name());
                // test default map
                NUnit.Framework.Assert.AreEqual(client.GetDefaultMaps(), 20);
                NUnit.Framework.Assert.AreEqual(client.GetDefaultReduces(), 4);
                NUnit.Framework.Assert.AreEqual(client.GetSystemDir().GetName(), "jobSubmitDir");
                // test queue information
                JobQueueInfo[] rootQueueInfo = client.GetRootQueues();
                NUnit.Framework.Assert.AreEqual(rootQueueInfo.Length, 1);
                NUnit.Framework.Assert.AreEqual(rootQueueInfo[0].GetQueueName(), "default");
                JobQueueInfo[] qinfo = client.GetQueues();
                NUnit.Framework.Assert.AreEqual(qinfo.Length, 1);
                NUnit.Framework.Assert.AreEqual(qinfo[0].GetQueueName(), "default");
                NUnit.Framework.Assert.AreEqual(client.GetChildQueues("default").Length, 0);
                NUnit.Framework.Assert.AreEqual(client.GetJobsFromQueue("default").Length, 1);
                NUnit.Framework.Assert.IsTrue(client.GetJobsFromQueue("default")[0].GetJobFile().
                                              EndsWith("/job.xml"));
                JobQueueInfo qi = client.GetQueueInfo("default");
                NUnit.Framework.Assert.AreEqual(qi.GetQueueName(), "default");
                NUnit.Framework.Assert.AreEqual(qi.GetQueueState(), "running");
                QueueAclsInfo[] aai = client.GetQueueAclsForCurrentUser();
                NUnit.Framework.Assert.AreEqual(aai.Length, 2);
                NUnit.Framework.Assert.AreEqual(aai[0].GetQueueName(), "root");
                NUnit.Framework.Assert.AreEqual(aai[1].GetQueueName(), "default");
                // test token
                Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token = client.
                                                                                           GetDelegationToken(new Text(UserGroupInformation.GetCurrentUser().GetShortUserName
                                                                                                                           ()));
                NUnit.Framework.Assert.AreEqual(token.GetKind().ToString(), "RM_DELEGATION_TOKEN"
                                                );
                // test JobClient
                // The following asserts read JobStatus twice and ensure the returned
                // JobStatus objects correspond to the same Job.
                NUnit.Framework.Assert.AreEqual("Expected matching JobIDs", jobId, ((JobID)client
                                                                                    .GetJob(jobId).GetJobStatus().GetJobID()));
                NUnit.Framework.Assert.AreEqual("Expected matching startTimes", rj.GetJobStatus()
                                                .GetStartTime(), client.GetJob(jobId).GetJobStatus().GetStartTime());
            }
            finally
            {
                if (fileSys != null)
                {
                    fileSys.Delete(testDir, true);
                }
                if (mr != null)
                {
                    mr.Stop();
                }
            }
        }
Example #13
0
        /// <summary>test run from command line JobQueueClient</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestJobQueueClient()
        {
            MiniMRClientCluster mr      = null;
            FileSystem          fileSys = null;
            TextWriter          oldOut  = System.Console.Out;

            try
            {
                mr = CreateMiniClusterWithCapacityScheduler();
                JobConf job = new JobConf(mr.GetConfig());
                fileSys = FileSystem.Get(job);
                fileSys.Delete(testDir, true);
                FSDataOutputStream @out = fileSys.Create(inFile, true);
                @out.WriteBytes("This is a test file");
                @out.Close();
                FileInputFormat.SetInputPaths(job, inFile);
                FileOutputFormat.SetOutputPath(job, outDir);
                job.SetInputFormat(typeof(TextInputFormat));
                job.SetOutputFormat(typeof(TextOutputFormat));
                job.SetMapperClass(typeof(IdentityMapper));
                job.SetReducerClass(typeof(IdentityReducer));
                job.SetNumReduceTasks(0);
                JobClient client = new JobClient(mr.GetConfig());
                client.SubmitJob(job);
                JobQueueClient        jobClient = new JobQueueClient(job);
                ByteArrayOutputStream bytes     = new ByteArrayOutputStream();
                Runtime.SetOut(new TextWriter(bytes));
                string[] arg = new string[] { "-list" };
                jobClient.Run(arg);
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("Queue Name : default"));
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("Queue State : running"));
                bytes = new ByteArrayOutputStream();
                Runtime.SetOut(new TextWriter(bytes));
                string[] arg1 = new string[] { "-showacls" };
                jobClient.Run(arg1);
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("Queue acls for user :"******"root  ADMINISTER_QUEUE,SUBMIT_APPLICATIONS"
                                                                        ));
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("default  ADMINISTER_QUEUE,SUBMIT_APPLICATIONS"
                                                                        ));
                // test for info and default queue
                bytes = new ByteArrayOutputStream();
                Runtime.SetOut(new TextWriter(bytes));
                string[] arg2 = new string[] { "-info", "default" };
                jobClient.Run(arg2);
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("Queue Name : default"));
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("Queue State : running"));
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("Scheduling Info"));
                // test for info , default queue and jobs
                bytes = new ByteArrayOutputStream();
                Runtime.SetOut(new TextWriter(bytes));
                string[] arg3 = new string[] { "-info", "default", "-showJobs" };
                jobClient.Run(arg3);
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("Queue Name : default"));
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("Queue State : running"));
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("Scheduling Info"));
                NUnit.Framework.Assert.IsTrue(bytes.ToString().Contains("job_1"));
                string[] arg4 = new string[] {  };
                jobClient.Run(arg4);
            }
            finally
            {
                Runtime.SetOut(oldOut);
                if (fileSys != null)
                {
                    fileSys.Delete(testDir, true);
                }
                if (mr != null)
                {
                    mr.Stop();
                }
            }
        }
        /// <summary>
        /// Test when a block's replica is removed from RBW folder in one of the
        /// datanode, namenode should ask to invalidate that corrupted block and
        /// schedule replication for one more replica for that under replicated block.
        /// </summary>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        public virtual void TestBlockInvalidationWhenRBWReplicaMissedInDN()
        {
            // This test cannot pass on Windows due to file locking enforcement.  It will
            // reject the attempt to delete the block file from the RBW folder.
            Assume.AssumeTrue(!Path.Windows);
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsReplicationKey, 2);
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 300);
            conf.SetLong(DFSConfigKeys.DfsDatanodeDirectoryscanIntervalKey, 1);
            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            MiniDFSCluster     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            FSDataOutputStream @out    = null;

            try
            {
                FSNamesystem namesystem = cluster.GetNamesystem();
                FileSystem   fs         = cluster.GetFileSystem();
                Path         testPath   = new Path("/tmp/TestRBWBlockInvalidation", "foo1");
                @out = fs.Create(testPath, (short)2);
                @out.WriteBytes("HDFS-3157: " + testPath);
                @out.Hsync();
                cluster.StartDataNodes(conf, 1, true, null, null, null);
                string        bpid  = namesystem.GetBlockPoolId();
                ExtendedBlock blk   = DFSTestUtil.GetFirstBlock(fs, testPath);
                Block         block = blk.GetLocalBlock();
                DataNode      dn    = cluster.GetDataNodes()[0];
                // Delete partial block and its meta information from the RBW folder
                // of first datanode.
                FilePath blockFile = DataNodeTestUtils.GetBlockFile(dn, bpid, block);
                FilePath metaFile  = DataNodeTestUtils.GetMetaFile(dn, bpid, block);
                NUnit.Framework.Assert.IsTrue("Could not delete the block file from the RBW folder"
                                              , blockFile.Delete());
                NUnit.Framework.Assert.IsTrue("Could not delete the block meta file from the RBW folder"
                                              , metaFile.Delete());
                @out.Close();
                int liveReplicas = 0;
                while (true)
                {
                    if ((liveReplicas = CountReplicas(namesystem, blk).LiveReplicas()) < 2)
                    {
                        // This confirms we have a corrupt replica
                        Log.Info("Live Replicas after corruption: " + liveReplicas);
                        break;
                    }
                    Sharpen.Thread.Sleep(100);
                }
                NUnit.Framework.Assert.AreEqual("There should be less than 2 replicas in the " +
                                                "liveReplicasMap", 1, liveReplicas);
                while (true)
                {
                    if ((liveReplicas = CountReplicas(namesystem, blk).LiveReplicas()) > 1)
                    {
                        //Wait till the live replica count becomes equal to Replication Factor
                        Log.Info("Live Replicas after Rereplication: " + liveReplicas);
                        break;
                    }
                    Sharpen.Thread.Sleep(100);
                }
                NUnit.Framework.Assert.AreEqual("There should be two live replicas", 2, liveReplicas
                                                );
                while (true)
                {
                    Sharpen.Thread.Sleep(100);
                    if (CountReplicas(namesystem, blk).CorruptReplicas() == 0)
                    {
                        Log.Info("Corrupt Replicas becomes 0");
                        break;
                    }
                }
            }
            finally
            {
                if (@out != null)
                {
                    @out.Close();
                }
                cluster.Shutdown();
            }
        }
Example #15
0
        public virtual void TestCopyOnWrite()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fs      = cluster.GetFileSystem();
            IPEndPoint     addr    = new IPEndPoint("localhost", cluster.GetNameNodePort());
            DFSClient      client  = new DFSClient(addr, conf);

            try
            {
                // create a new file, write to it and close it.
                //
                Path file1             = new Path("/filestatus.dat");
                FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1);
                WriteFile(stm);
                stm.Close();
                // Get a handle to the datanode
                DataNode[] dn = cluster.ListDataNodes();
                NUnit.Framework.Assert.IsTrue("There should be only one datanode but found " + dn
                                              .Length, dn.Length == 1);
                LocatedBlocks locations = client.GetNamenode().GetBlockLocations(file1.ToString()
                                                                                 , 0, long.MaxValue);
                IList <LocatedBlock> blocks = locations.GetLocatedBlocks();
                //
                // Create hard links for a few of the blocks
                //
                for (int i = 0; i < blocks.Count; i = i + 2)
                {
                    ExtendedBlock b = blocks[i].GetBlock();
                    FilePath      f = DataNodeTestUtils.GetFile(dn[0], b.GetBlockPoolId(), b.GetLocalBlock
                                                                    ().GetBlockId());
                    FilePath link = new FilePath(f.ToString() + ".link");
                    System.Console.Out.WriteLine("Creating hardlink for File " + f + " to " + link);
                    HardLink.CreateHardLink(f, link);
                }
                //
                // Detach all blocks. This should remove hardlinks (if any)
                //
                for (int i_1 = 0; i_1 < blocks.Count; i_1++)
                {
                    ExtendedBlock b = blocks[i_1].GetBlock();
                    System.Console.Out.WriteLine("testCopyOnWrite detaching block " + b);
                    NUnit.Framework.Assert.IsTrue("Detaching block " + b + " should have returned true"
                                                  , DataNodeTestUtils.UnlinkBlock(dn[0], b, 1));
                }
                // Since the blocks were already detached earlier, these calls should
                // return false
                //
                for (int i_2 = 0; i_2 < blocks.Count; i_2++)
                {
                    ExtendedBlock b = blocks[i_2].GetBlock();
                    System.Console.Out.WriteLine("testCopyOnWrite detaching block " + b);
                    NUnit.Framework.Assert.IsTrue("Detaching block " + b + " should have returned false"
                                                  , !DataNodeTestUtils.UnlinkBlock(dn[0], b, 1));
                }
            }
            finally
            {
                client.Close();
                fs.Close();
                cluster.Shutdown();
            }
        }
Example #16
0
        public virtual void TestMultipleAppendsDuringCatchupTailing()
        {
            Configuration conf = new Configuration();

            // Set a length edits tailing period, and explicit rolling, so we can
            // control the ingest of edits by the standby for this test.
            conf.Set(DFSConfigKeys.DfsHaTaileditsPeriodKey, "5000");
            conf.SetInt(DFSConfigKeys.DfsHaLogrollPeriodKey, -1);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology
                                                                                 .SimpleHATopology()).NumDataNodes(3).Build();
            FileSystem fs = null;

            try
            {
                cluster.TransitionToActive(0);
                fs = HATestUtil.ConfigureFailoverFs(cluster, conf);
                Path   fileToAppend   = new Path("/FileToAppend");
                Path   fileToTruncate = new Path("/FileToTruncate");
                byte[] data           = new byte[1 << 16];
                DFSUtil.GetRandom().NextBytes(data);
                int[] appendPos   = AppendTestUtil.RandomFilePartition(data.Length, Count);
                int[] truncatePos = AppendTestUtil.RandomFilePartition(data.Length, 1);
                // Create file, write some data, and hflush so that the first
                // block is in the edit log prior to roll.
                FSDataOutputStream @out         = CreateAndHflush(fs, fileToAppend, data, appendPos[0]);
                FSDataOutputStream out4Truncate = CreateAndHflush(fs, fileToTruncate, data, data.
                                                                  Length);
                // Let the StandbyNode catch the creation of the file.
                cluster.GetNameNode(0).GetRpcServer().RollEditLog();
                cluster.GetNameNode(1).GetNamesystem().GetEditLogTailer().DoTailEdits();
                @out.Close();
                out4Truncate.Close();
                // Append and re-close a few time, so that many block entries are queued.
                for (int i = 0; i < Count; i++)
                {
                    int end = i < Count - 1 ? appendPos[i + 1] : data.Length;
                    @out = fs.Append(fileToAppend);
                    @out.Write(data, appendPos[i], end - appendPos[i]);
                    @out.Close();
                }
                bool isTruncateReady = fs.Truncate(fileToTruncate, truncatePos[0]);
                // Ensure that blocks have been reported to the SBN ahead of the edits
                // arriving.
                cluster.TriggerBlockReports();
                // Failover the current standby to active.
                cluster.ShutdownNameNode(0);
                cluster.TransitionToActive(1);
                // Check the FSCK doesn't detect any bad blocks on the SBN.
                int rc = ToolRunner.Run(new DFSck(cluster.GetConfiguration(1)), new string[] { "/"
                                                                                               , "-files", "-blocks" });
                NUnit.Framework.Assert.AreEqual(0, rc);
                NUnit.Framework.Assert.AreEqual("CorruptBlocks should be empty.", 0, cluster.GetNameNode
                                                    (1).GetNamesystem().GetCorruptReplicaBlocks());
                AppendTestUtil.CheckFullFile(fs, fileToAppend, data.Length, data, fileToAppend.ToString
                                                 ());
                if (!isTruncateReady)
                {
                    TestFileTruncate.CheckBlockRecovery(fileToTruncate, cluster.GetFileSystem(1));
                }
                AppendTestUtil.CheckFullFile(fs, fileToTruncate, truncatePos[0], data, fileToTruncate
                                             .ToString());
            }
            finally
            {
                if (null != cluster)
                {
                    cluster.Shutdown();
                }
                if (null != fs)
                {
                    fs.Close();
                }
            }
        }
Example #17
0
        internal override IList <OperationOutput> Run(FileSystem fs)
        {
            // Operation
            IList <OperationOutput> @out = base.Run(fs);
            FSDataOutputStream      os   = null;

            try
            {
                Path         fn                = GetCreateFile();
                Range <long> writeSizeRange    = GetConfig().GetWriteSize();
                long         writeSize         = 0;
                long         blockSize         = DetermineBlockSize();
                short        replicationAmount = DetermineReplication();
                if (GetConfig().ShouldWriteUseBlockSize())
                {
                    writeSizeRange = GetConfig().GetBlockSize();
                }
                writeSize = Range.BetweenPositive(GetRandom(), writeSizeRange);
                long       bytesWritten = 0;
                long       timeTaken    = 0;
                int        bufSize      = GetBufferSize();
                bool       overWrite    = false;
                DataWriter writer       = new DataWriter(GetRandom());
                Log.Info("Attempting to create file at " + fn + " of size " + Helper.ToByteInfo(writeSize
                                                                                                ) + " using blocksize " + Helper.ToByteInfo(blockSize) + " and replication amount "
                         + replicationAmount);
                {
                    // open & create
                    long startTime = Timer.Now();
                    os         = fs.Create(fn, overWrite, bufSize, replicationAmount, blockSize);
                    timeTaken += Timer.Elapsed(startTime);
                    // write the given length
                    DataWriter.GenerateOutput stats = writer.WriteSegment(writeSize, os);
                    bytesWritten += stats.GetBytesWritten();
                    timeTaken    += stats.GetTimeTaken();
                    // capture close time
                    startTime = Timer.Now();
                    os.Close();
                    os         = null;
                    timeTaken += Timer.Elapsed(startTime);
                }
                Log.Info("Created file at " + fn + " of size " + Helper.ToByteInfo(bytesWritten)
                         + " bytes using blocksize " + Helper.ToByteInfo(blockSize) + " and replication amount "
                         + replicationAmount + " in " + timeTaken + " milliseconds");
                // collect all the stats
                @out.AddItem(new OperationOutput(OperationOutput.OutputType.Long, GetType(), ReportWriter
                                                 .OkTimeTaken, timeTaken));
                @out.AddItem(new OperationOutput(OperationOutput.OutputType.Long, GetType(), ReportWriter
                                                 .BytesWritten, bytesWritten));
                @out.AddItem(new OperationOutput(OperationOutput.OutputType.Long, GetType(), ReportWriter
                                                 .Successes, 1L));
            }
            catch (IOException e)
            {
                @out.AddItem(new OperationOutput(OperationOutput.OutputType.Long, GetType(), ReportWriter
                                                 .Failures, 1L));
                Log.Warn("Error with creating", e);
            }
            finally
            {
                if (os != null)
                {
                    try
                    {
                        os.Close();
                    }
                    catch (IOException e)
                    {
                        Log.Warn("Error closing create stream", e);
                    }
                }
            }
            return(@out);
        }
        /// <exception cref="System.IO.IOException"/>
        protected internal override OutputStream GetOutputStreamForKeystore()
        {
            FSDataOutputStream @out = FileSystem.Create(fs, GetPath(), permissions);

            return(@out);
        }
Example #19
0
        public virtual void TestMigrateOpenFileToArchival()
        {
            Log.Info("testMigrateOpenFileToArchival");
            Path fooDir = new Path("/foo");
            IDictionary <Path, BlockStoragePolicy> policyMap = Maps.NewHashMap();

            policyMap[fooDir] = Cold;
            TestStorageMover.NamespaceScheme nsScheme = new TestStorageMover.NamespaceScheme(
                Arrays.AsList(fooDir), null, BlockSize, null, policyMap);
            TestStorageMover.ClusterScheme clusterScheme = new TestStorageMover.ClusterScheme
                                                               (DefaultConf, NumDatanodes, Repl, GenStorageTypes(NumDatanodes), null);
            TestStorageMover.MigrationTest test = new TestStorageMover.MigrationTest(this, clusterScheme
                                                                                     , nsScheme);
            test.SetupCluster();
            // create an open file
            Banner("writing to file /foo/bar");
            Path barFile = new Path(fooDir, "bar");

            DFSTestUtil.CreateFile(test.dfs, barFile, BlockSize, (short)1, 0L);
            FSDataOutputStream @out = test.dfs.Append(barFile);

            @out.WriteBytes("hello, ");
            ((DFSOutputStream)@out.GetWrappedStream()).Hsync();
            try
            {
                Banner("start data migration");
                test.SetStoragePolicy();
                // set /foo to COLD
                test.Migrate();
                // make sure the under construction block has not been migrated
                LocatedBlocks lbs = test.dfs.GetClient().GetLocatedBlocks(barFile.ToString(), BlockSize
                                                                          );
                Log.Info("Locations: " + lbs);
                IList <LocatedBlock> blks = lbs.GetLocatedBlocks();
                NUnit.Framework.Assert.AreEqual(1, blks.Count);
                NUnit.Framework.Assert.AreEqual(1, blks[0].GetLocations().Length);
                Banner("finish the migration, continue writing");
                // make sure the writing can continue
                @out.WriteBytes("world!");
                ((DFSOutputStream)@out.GetWrappedStream()).Hsync();
                IOUtils.Cleanup(Log, @out);
                lbs = test.dfs.GetClient().GetLocatedBlocks(barFile.ToString(), BlockSize);
                Log.Info("Locations: " + lbs);
                blks = lbs.GetLocatedBlocks();
                NUnit.Framework.Assert.AreEqual(1, blks.Count);
                NUnit.Framework.Assert.AreEqual(1, blks[0].GetLocations().Length);
                Banner("finish writing, starting reading");
                // check the content of /foo/bar
                FSDataInputStream @in = test.dfs.Open(barFile);
                byte[]            buf = new byte[13];
                // read from offset 1024
                @in.ReadFully(BlockSize, buf, 0, buf.Length);
                IOUtils.Cleanup(Log, @in);
                NUnit.Framework.Assert.AreEqual("hello, world!", Sharpen.Runtime.GetStringForBytes
                                                    (buf));
            }
            finally
            {
                test.ShutdownCluster();
            }
        }
        public virtual void TestAppend()
        {
            MiniDFSCluster cluster      = null;
            int            numDataNodes = 2;
            Configuration  conf         = GetConf(numDataNodes);

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build();
                cluster.WaitActive();
                NUnit.Framework.Assert.AreEqual(numDataNodes, cluster.GetDataNodes().Count);
                NameNode                nn = cluster.GetNameNode();
                BlockManager            bm = nn.GetNamesystem().GetBlockManager();
                BlockTokenSecretManager sm = bm.GetBlockTokenSecretManager();
                // set a short token lifetime (1 second)
                SecurityTestUtil.SetBlockTokenLifetime(sm, 1000L);
                Path       fileToAppend = new Path(FileToAppend);
                FileSystem fs           = cluster.GetFileSystem();
                // write a one-byte file
                FSDataOutputStream stm = WriteFile(fs, fileToAppend, (short)numDataNodes, BlockSize
                                                   );
                stm.Write(rawData, 0, 1);
                stm.Close();
                // open the file again for append
                stm = fs.Append(fileToAppend);
                int mid = rawData.Length - 1;
                stm.Write(rawData, 1, mid - 1);
                stm.Hflush();

                /*
                 * wait till token used in stm expires
                 */
                Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token = DFSTestUtil.
                                                                                      GetBlockToken(stm);
                while (!SecurityTestUtil.IsBlockTokenExpired(token))
                {
                    try
                    {
                        Sharpen.Thread.Sleep(10);
                    }
                    catch (Exception)
                    {
                    }
                }
                // remove a datanode to force re-establishing pipeline
                cluster.StopDataNode(0);
                // append the rest of the file
                stm.Write(rawData, mid, rawData.Length - mid);
                stm.Close();
                // check if append is successful
                FSDataInputStream in5 = fs.Open(fileToAppend);
                NUnit.Framework.Assert.IsTrue(CheckFile1(in5));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Example #21
0
        public virtual void TestLeaseAbort()
        {
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();

            try
            {
                cluster.WaitActive();
                NamenodeProtocols  preSpyNN = cluster.GetNameNodeRpc();
                NamenodeProtocols  spyNN    = Org.Mockito.Mockito.Spy(preSpyNN);
                DFSClient          dfs      = new DFSClient(null, spyNN, conf, null);
                byte[]             buf      = new byte[1024];
                FSDataOutputStream c_out    = CreateFsOut(dfs, dirString + "c");
                c_out.Write(buf, 0, 1024);
                c_out.Close();
                DFSInputStream     c_in  = dfs.Open(dirString + "c");
                FSDataOutputStream d_out = CreateFsOut(dfs, dirString + "d");
                // stub the renew method.
                Org.Mockito.Mockito.DoThrow(new RemoteException(typeof(SecretManager.InvalidToken
                                                                       ).FullName, "Your token is worthless")).When(spyNN).RenewLease(Matchers.AnyString
                                                                                                                                          ());
                // We don't need to wait the lease renewer thread to act.
                // call renewLease() manually.
                // make it look like the soft limit has been exceeded.
                LeaseRenewer originalRenewer = dfs.GetLeaseRenewer();
                dfs.lastLeaseRenewal = Time.MonotonicNow() - HdfsConstants.LeaseSoftlimitPeriod -
                                       1000;
                try
                {
                    dfs.RenewLease();
                }
                catch (IOException)
                {
                }
                // Things should continue to work it passes hard limit without
                // renewing.
                try
                {
                    d_out.Write(buf, 0, 1024);
                    Log.Info("Write worked beyond the soft limit as expected.");
                }
                catch (IOException)
                {
                    NUnit.Framework.Assert.Fail("Write failed.");
                }
                // make it look like the hard limit has been exceeded.
                dfs.lastLeaseRenewal = Time.MonotonicNow() - HdfsConstants.LeaseHardlimitPeriod -
                                       1000;
                dfs.RenewLease();
                // this should not work.
                try
                {
                    d_out.Write(buf, 0, 1024);
                    d_out.Close();
                    NUnit.Framework.Assert.Fail("Write did not fail even after the fatal lease renewal failure"
                                                );
                }
                catch (IOException e)
                {
                    Log.Info("Write failed as expected. ", e);
                }
                // If aborted, the renewer should be empty. (no reference to clients)
                Sharpen.Thread.Sleep(1000);
                NUnit.Framework.Assert.IsTrue(originalRenewer.IsEmpty());
                // unstub
                Org.Mockito.Mockito.DoNothing().When(spyNN).RenewLease(Matchers.AnyString());
                // existing input streams should work
                try
                {
                    int num = c_in.Read(buf, 0, 1);
                    if (num != 1)
                    {
                        NUnit.Framework.Assert.Fail("Failed to read 1 byte");
                    }
                    c_in.Close();
                }
                catch (IOException e)
                {
                    Log.Error("Read failed with ", e);
                    NUnit.Framework.Assert.Fail("Read after lease renewal failure failed");
                }
                // new file writes should work.
                try
                {
                    c_out = CreateFsOut(dfs, dirString + "c");
                    c_out.Write(buf, 0, 1024);
                    c_out.Close();
                }
                catch (IOException e)
                {
                    Log.Error("Write failed with ", e);
                    NUnit.Framework.Assert.Fail("Write failed");
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        /// <exception cref="System.Exception"/>
        public static void Launch()
        {
            Configuration conf            = new Configuration();
            FileSystem    fs              = FileSystem.Get(conf);
            int           numOfInputLines = 20;
            Path          OutputDir       = new Path("build/test/output_for_aggregates_test");
            Path          InputDir        = new Path("build/test/input_for_aggregates_test");
            string        inputFile       = "input.txt";

            fs.Delete(InputDir, true);
            fs.Mkdirs(InputDir);
            fs.Delete(OutputDir, true);
            StringBuilder inputData      = new StringBuilder();
            StringBuilder expectedOutput = new StringBuilder();

            expectedOutput.Append("max\t19\n");
            expectedOutput.Append("min\t1\n");
            FSDataOutputStream fileOut = fs.Create(new Path(InputDir, inputFile));

            for (int i = 1; i < numOfInputLines; i++)
            {
                expectedOutput.Append("count_").Append(idFormat.Format(i));
                expectedOutput.Append("\t").Append(i).Append("\n");
                inputData.Append(idFormat.Format(i));
                for (int j = 1; j < i; j++)
                {
                    inputData.Append(" ").Append(idFormat.Format(i));
                }
                inputData.Append("\n");
            }
            expectedOutput.Append("value_as_string_max\t9\n");
            expectedOutput.Append("value_as_string_min\t1\n");
            expectedOutput.Append("uniq_count\t15\n");
            fileOut.Write(Sharpen.Runtime.GetBytesForString(inputData.ToString(), "utf-8"));
            fileOut.Close();
            System.Console.Out.WriteLine("inputData:");
            System.Console.Out.WriteLine(inputData.ToString());
            conf.SetInt(ValueAggregatorJobBase.DescriptorNum, 1);
            conf.Set(ValueAggregatorJobBase.Descriptor + ".0", "UserDefined,org.apache.hadoop.mapreduce.lib.aggregate.AggregatorTests"
                     );
            conf.SetLong(UniqValueCount.MaxNumUniqueValues, 14);
            Job job = Job.GetInstance(conf);

            FileInputFormat.SetInputPaths(job, InputDir);
            job.SetInputFormatClass(typeof(TextInputFormat));
            FileOutputFormat.SetOutputPath(job, OutputDir);
            job.SetOutputFormatClass(typeof(TextOutputFormat));
            job.SetMapOutputKeyClass(typeof(Org.Apache.Hadoop.IO.Text));
            job.SetMapOutputValueClass(typeof(Org.Apache.Hadoop.IO.Text));
            job.SetOutputKeyClass(typeof(Org.Apache.Hadoop.IO.Text));
            job.SetOutputValueClass(typeof(Org.Apache.Hadoop.IO.Text));
            job.SetNumReduceTasks(1);
            job.SetMapperClass(typeof(ValueAggregatorMapper));
            job.SetReducerClass(typeof(ValueAggregatorReducer));
            job.SetCombinerClass(typeof(ValueAggregatorCombiner));
            job.WaitForCompletion(true);
            NUnit.Framework.Assert.IsTrue(job.IsSuccessful());
            //
            // Finally, we compare the reconstructed answer key with the
            // original one.  Remember, we need to ignore zero-count items
            // in the original key.
            //
            string outdata = MapReduceTestUtil.ReadOutput(OutputDir, conf);

            System.Console.Out.WriteLine("full out data:");
            System.Console.Out.WriteLine(outdata.ToString());
            outdata = Sharpen.Runtime.Substring(outdata, 0, expectedOutput.ToString().Length);
            NUnit.Framework.Assert.AreEqual(expectedOutput.ToString(), outdata);
            fs.Delete(OutputDir, true);
            fs.Delete(InputDir, true);
        }