Exemple #1
0
        public virtual void HSyncEndBlock_00()
        {
            int           preferredBlockSize = 1024;
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, preferredBlockSize);
            MiniDFSCluster        cluster    = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            DistributedFileSystem fileSystem = cluster.GetFileSystem();
            FSDataOutputStream    stm        = null;

            try
            {
                Path path = new Path("/" + fName);
                stm = fileSystem.Create(path, true, 4096, (short)2, AppendTestUtil.BlockSize);
                System.Console.Out.WriteLine("Created file " + path.ToString());
                ((DFSOutputStream)stm.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                           .EndBlock));
                long currentFileLength = fileSystem.GetFileStatus(path).GetLen();
                NUnit.Framework.Assert.AreEqual(0L, currentFileLength);
                LocatedBlocks blocks = fileSystem.dfs.GetLocatedBlocks(path.ToString(), 0);
                NUnit.Framework.Assert.AreEqual(0, blocks.GetLocatedBlocks().Count);
                // write a block and call hsync(end_block) at the block boundary
                stm.Write(new byte[preferredBlockSize]);
                ((DFSOutputStream)stm.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                           .EndBlock));
                currentFileLength = fileSystem.GetFileStatus(path).GetLen();
                NUnit.Framework.Assert.AreEqual(preferredBlockSize, currentFileLength);
                blocks = fileSystem.dfs.GetLocatedBlocks(path.ToString(), 0);
                NUnit.Framework.Assert.AreEqual(1, blocks.GetLocatedBlocks().Count);
                // call hsync then call hsync(end_block) immediately
                stm.Write(new byte[preferredBlockSize / 2]);
                stm.Hsync();
                ((DFSOutputStream)stm.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                           .EndBlock));
                currentFileLength = fileSystem.GetFileStatus(path).GetLen();
                NUnit.Framework.Assert.AreEqual(preferredBlockSize + preferredBlockSize / 2, currentFileLength
                                                );
                blocks = fileSystem.dfs.GetLocatedBlocks(path.ToString(), 0);
                NUnit.Framework.Assert.AreEqual(2, blocks.GetLocatedBlocks().Count);
                stm.Write(new byte[preferredBlockSize / 4]);
                stm.Hsync();
                currentFileLength = fileSystem.GetFileStatus(path).GetLen();
                NUnit.Framework.Assert.AreEqual(preferredBlockSize + preferredBlockSize / 2 + preferredBlockSize
                                                / 4, currentFileLength);
                blocks = fileSystem.dfs.GetLocatedBlocks(path.ToString(), 0);
                NUnit.Framework.Assert.AreEqual(3, blocks.GetLocatedBlocks().Count);
            }
            finally
            {
                IOUtils.Cleanup(null, stm, fileSystem);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Exemple #2
0
        public virtual void Pipeline_01()
        {
            string MethodName = GenericTestUtils.GetMethodName();

            if (Log.IsDebugEnabled())
            {
                Log.Debug("Running " + MethodName);
            }
            Path filePath = new Path("/" + MethodName + ".dat");

            DFSTestUtil.CreateFile(fs, filePath, FileSize, ReplFactor, rand.NextLong());
            if (Log.IsDebugEnabled())
            {
                Log.Debug("Invoking append but doing nothing otherwise...");
            }
            FSDataOutputStream ofs = fs.Append(filePath);

            ofs.WriteBytes("Some more stuff to write");
            ((DFSOutputStream)ofs.GetWrappedStream()).Hflush();
            IList <LocatedBlock> lb = cluster.GetNameNodeRpc().GetBlockLocations(filePath.ToString
                                                                                     (), FileSize - 1, FileSize).GetLocatedBlocks();
            string bpid = cluster.GetNamesystem().GetBlockPoolId();

            foreach (DataNode dn in cluster.GetDataNodes())
            {
                Replica r = DataNodeTestUtils.FetchReplicaInfo(dn, bpid, lb[0].GetBlock().GetBlockId
                                                                   ());
                NUnit.Framework.Assert.IsTrue("Replica on DN " + dn + " shouldn't be null", r !=
                                              null);
                NUnit.Framework.Assert.AreEqual("Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()"
                                                , HdfsServerConstants.ReplicaState.Rbw, r.GetState());
            }
            ofs.Close();
        }
 // create a bunch of files. Write to them and then verify.
 public override void Run()
 {
     System.Console.Out.WriteLine("Workload starting ");
     for (int i = 0; i < numberOfFiles; i++)
     {
         Path filename = new Path(id + "." + i);
         try
         {
             System.Console.Out.WriteLine("Workload processing file " + filename);
             FSDataOutputStream stm      = CreateFile(fs, filename, replication);
             DFSOutputStream    dfstream = (DFSOutputStream)(stm.GetWrappedStream());
             dfstream.SetArtificialSlowdown(1000);
             WriteFile(stm, myseed);
             stm.Close();
             CheckFile(fs, filename, replication, numBlocks, fileSize, myseed);
         }
         catch (Exception e)
         {
             System.Console.Out.WriteLine("Workload exception " + e);
             NUnit.Framework.Assert.IsTrue(e.ToString(), false);
         }
         // increment the stamp to indicate that another file is done.
         lock (this)
         {
             stamp++;
         }
     }
 }
        public virtual void TestAbandonBlock()
        {
            string src = FileNamePrefix + "foo";
            // Start writing a file but do not close it
            FSDataOutputStream fout = fs.Create(new Path(src), true, 4096, (short)1, 512L);

            for (int i = 0; i < 1024; i++)
            {
                fout.Write(123);
            }
            fout.Hflush();
            long fileId = ((DFSOutputStream)fout.GetWrappedStream()).GetFileId();
            // Now abandon the last block
            DFSClient     dfsclient = DFSClientAdapter.GetDFSClient(fs);
            LocatedBlocks blocks    = dfsclient.GetNamenode().GetBlockLocations(src, 0, int.MaxValue
                                                                                );
            int          orginalNumBlocks = blocks.LocatedBlockCount();
            LocatedBlock b = blocks.GetLastLocatedBlock();

            dfsclient.GetNamenode().AbandonBlock(b.GetBlock(), fileId, src, dfsclient.clientName
                                                 );
            // call abandonBlock again to make sure the operation is idempotent
            dfsclient.GetNamenode().AbandonBlock(b.GetBlock(), fileId, src, dfsclient.clientName
                                                 );
            // And close the file
            fout.Close();
            // Close cluster and check the block has been abandoned after restart
            cluster.RestartNameNode();
            blocks = dfsclient.GetNamenode().GetBlockLocations(src, 0, int.MaxValue);
            NUnit.Framework.Assert.AreEqual("Blocks " + b + " has not been abandoned.", orginalNumBlocks
                                            , blocks.LocatedBlockCount() + 1);
        }
Exemple #5
0
        /// <summary>Test NN crash and client crash/stuck immediately after block allocation</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestOpenFileWhenNNAndClientCrashAfterAddBlock()
        {
            cluster.GetConfiguration(0).Set(DFSConfigKeys.DfsNamenodeSafemodeThresholdPctKey,
                                            "1.0f");
            string testData = "testData";

            // to make sure we write the full block before creating dummy block at NN.
            cluster.GetConfiguration(0).SetInt("io.bytes.per.checksum", testData.Length);
            cluster.RestartNameNode(0);
            try
            {
                cluster.WaitActive();
                cluster.TransitionToActive(0);
                cluster.TransitionToStandby(1);
                DistributedFileSystem dfs     = cluster.GetFileSystem(0);
                string             pathString = "/tmp1.txt";
                Path               filePath   = new Path(pathString);
                FSDataOutputStream create     = dfs.Create(filePath, FsPermission.GetDefault(), true,
                                                           1024, (short)3, testData.Length, null);
                create.Write(Sharpen.Runtime.GetBytesForString(testData));
                create.Hflush();
                long       fileId     = ((DFSOutputStream)create.GetWrappedStream()).GetFileId();
                FileStatus fileStatus = dfs.GetFileStatus(filePath);
                DFSClient  client     = DFSClientAdapter.GetClient(dfs);
                // add one dummy block at NN, but not write to DataNode
                ExtendedBlock previousBlock = DFSClientAdapter.GetPreviousBlock(client, fileId);
                DFSClientAdapter.GetNamenode(client).AddBlock(pathString, client.GetClientName(),
                                                              new ExtendedBlock(previousBlock), new DatanodeInfo[0], DFSClientAdapter.GetFileId
                                                                  ((DFSOutputStream)create.GetWrappedStream()), null);
                cluster.RestartNameNode(0, true);
                cluster.RestartDataNode(0);
                cluster.TransitionToActive(0);
                // let the block reports be processed.
                Sharpen.Thread.Sleep(2000);
                FSDataInputStream @is = dfs.Open(filePath);
                @is.Close();
                dfs.RecoverLease(filePath);
                // initiate recovery
                NUnit.Framework.Assert.IsTrue("Recovery also should be success", dfs.RecoverLease
                                                  (filePath));
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemple #6
0
        public virtual void TestPacketTransmissionDelay()
        {
            // Make the first datanode to not relay heartbeat packet.
            DataNodeFaultInjector dnFaultInjector = new _DataNodeFaultInjector_171();
            DataNodeFaultInjector oldDnInjector   = DataNodeFaultInjector.Get();

            DataNodeFaultInjector.Set(dnFaultInjector);
            // Setting the timeout to be 3 seconds. Normally heartbeat packet
            // would be sent every 1.5 seconds if there is no data traffic.
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsClientSocketTimeoutKey, "3000");
            MiniDFSCluster cluster = null;

            try
            {
                int numDataNodes = 2;
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build();
                cluster.WaitActive();
                FileSystem         fs   = cluster.GetFileSystem();
                FSDataOutputStream @out = fs.Create(new Path("noheartbeat.dat"), (short)2);
                @out.Write(unchecked ((int)(0x31)));
                @out.Hflush();
                DFSOutputStream dfsOut = (DFSOutputStream)@out.GetWrappedStream();
                // original pipeline
                DatanodeInfo[] orgNodes = dfsOut.GetPipeline();
                // Cause the second datanode to timeout on reading packet
                Sharpen.Thread.Sleep(3500);
                @out.Write(unchecked ((int)(0x32)));
                @out.Hflush();
                // new pipeline
                DatanodeInfo[] newNodes = dfsOut.GetPipeline();
                @out.Close();
                bool contains = false;
                for (int i = 0; i < newNodes.Length; i++)
                {
                    if (orgNodes[0].GetXferAddr().Equals(newNodes[i].GetXferAddr()))
                    {
                        throw new IOException("The first datanode should have been replaced.");
                    }
                    if (orgNodes[1].GetXferAddr().Equals(newNodes[i].GetXferAddr()))
                    {
                        contains = true;
                    }
                }
                NUnit.Framework.Assert.IsTrue(contains);
            }
            finally
            {
                DataNodeFaultInjector.Set(oldDnInjector);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void Pipeline_02_03()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            // create cluster
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(4).Build();

            try
            {
                //change the lease limits.
                cluster.SetLeasePeriod(SoftLeaseLimit, HardLeaseLimit);
                //wait for the cluster
                cluster.WaitActive();
                FileSystem fs   = cluster.GetFileSystem();
                Path       p    = new Path(Dir, "file1");
                int        half = BlockSize / 2;
                {
                    //a. On Machine M1, Create file. Write half block of data.
                    //   Invoke DFSOutputStream.hflush() on the dfs file handle.
                    //   Do not close file yet.
                    FSDataOutputStream @out = fs.Create(p, true, fs.GetConf().GetInt(CommonConfigurationKeys
                                                                                     .IoFileBufferSizeKey, 4096), (short)3, BlockSize);
                    Write(@out, 0, half);
                    //hflush
                    ((DFSOutputStream)@out.GetWrappedStream()).Hflush();
                }
                //b. On another machine M2, open file and verify that the half-block
                //   of data can be read successfully.
                CheckFile(p, half, conf);
                AppendTestUtil.Log.Info("leasechecker.interruptAndJoin()");
                ((DistributedFileSystem)fs).dfs.GetLeaseRenewer().InterruptAndJoin();
                {
                    //c. On M1, append another half block of data.  Close file on M1.
                    //sleep to let the lease is expired.
                    Sharpen.Thread.Sleep(2 * SoftLeaseLimit);
                    UserGroupInformation current = UserGroupInformation.GetCurrentUser();
                    UserGroupInformation ugi     = UserGroupInformation.CreateUserForTesting(current.GetShortUserName
                                                                                                 () + "x", new string[] { "supergroup" });
                    DistributedFileSystem dfs  = ugi.DoAs(new _PrivilegedExceptionAction_102(conf));
                    FSDataOutputStream    @out = Append(dfs, p);
                    Write(@out, 0, half);
                    @out.Close();
                }
                //d. On M2, open file and read 1 block of data from it. Close file.
                CheckFile(p, 2 * half, conf);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        /// <exception cref="System.IO.IOException"/>
        private void DoWriteAndAbort(DistributedFileSystem fs, Path path)
        {
            fs.Mkdirs(path);
            fs.AllowSnapshot(path);
            DFSTestUtil.CreateFile(fs, new Path("/test/test1"), 100, (short)2, 100024L);
            DFSTestUtil.CreateFile(fs, new Path("/test/test2"), 100, (short)2, 100024L);
            Path file = new Path("/test/test/test2");
            FSDataOutputStream @out = fs.Create(file);

            for (int i = 0; i < 2; i++)
            {
                long count = 0;
                while (count < 1048576)
                {
                    @out.WriteBytes("hell");
                    count += 4;
                }
            }
            ((DFSOutputStream)@out.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                        .UpdateLength));
            DFSTestUtil.AbortStream((DFSOutputStream)@out.GetWrappedStream());
            Path file2 = new Path("/test/test/test3");
            FSDataOutputStream out2 = fs.Create(file2);

            for (int i_1 = 0; i_1 < 2; i_1++)
            {
                long count = 0;
                while (count < 1048576)
                {
                    out2.WriteBytes("hell");
                    count += 4;
                }
            }
            ((DFSOutputStream)out2.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                        .UpdateLength));
            DFSTestUtil.AbortStream((DFSOutputStream)out2.GetWrappedStream());
            fs.CreateSnapshot(path, "s1");
        }
Exemple #9
0
        /// <exception cref="System.IO.IOException"/>
        private void TestPersistHelper(Configuration conf)
        {
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).Build();
                cluster.WaitActive();
                FSNamesystem          fsn = cluster.GetNamesystem();
                DistributedFileSystem fs  = cluster.GetFileSystem();
                Path dir   = new Path("/abc/def");
                Path file1 = new Path(dir, "f1");
                Path file2 = new Path(dir, "f2");
                // create an empty file f1
                fs.Create(file1).Close();
                // create an under-construction file f2
                FSDataOutputStream @out = fs.Create(file2);
                @out.WriteBytes("hello");
                ((DFSOutputStream)@out.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                            .UpdateLength));
                // checkpoint
                fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                fs.SaveNamespace();
                fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
                cluster.RestartNameNode();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                NUnit.Framework.Assert.IsTrue(fs.IsDirectory(dir));
                NUnit.Framework.Assert.IsTrue(fs.Exists(file1));
                NUnit.Framework.Assert.IsTrue(fs.Exists(file2));
                // check internals of file2
                INodeFile file2Node = fsn.dir.GetINode4Write(file2.ToString()).AsFile();
                NUnit.Framework.Assert.AreEqual("hello".Length, file2Node.ComputeFileSize());
                NUnit.Framework.Assert.IsTrue(file2Node.IsUnderConstruction());
                BlockInfoContiguous[] blks = file2Node.GetBlocks();
                NUnit.Framework.Assert.AreEqual(1, blks.Length);
                NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.UnderConstruction
                                                , blks[0].GetBlockUCState());
                // check lease manager
                LeaseManager.Lease lease = fsn.leaseManager.GetLeaseByPath(file2.ToString());
                NUnit.Framework.Assert.IsNotNull(lease);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void TestBlockRecoveryWithLessMetafile()
        {
            Configuration conf = new Configuration();

            conf.Set(DFSConfigKeys.DfsBlockLocalPathAccessUserKey, UserGroupInformation.GetCurrentUser
                         ().GetShortUserName());
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            Path file = new Path("/testRecoveryFile");
            DistributedFileSystem dfs  = cluster.GetFileSystem();
            FSDataOutputStream    @out = dfs.Create(file);
            int count = 0;

            while (count < 2 * 1024 * 1024)
            {
                @out.WriteBytes("Data");
                count += 4;
            }
            @out.Hsync();
            // abort the original stream
            ((DFSOutputStream)@out.GetWrappedStream()).Abort();
            LocatedBlocks locations = cluster.GetNameNodeRpc().GetBlockLocations(file.ToString
                                                                                     (), 0, count);
            ExtendedBlock      block         = locations.Get(0).GetBlock();
            DataNode           dn            = cluster.GetDataNodes()[0];
            BlockLocalPathInfo localPathInfo = dn.GetBlockLocalPathInfo(block, null);
            FilePath           metafile      = new FilePath(localPathInfo.GetMetaPath());

            NUnit.Framework.Assert.IsTrue(metafile.Exists());
            // reduce the block meta file size
            RandomAccessFile raf = new RandomAccessFile(metafile, "rw");

            raf.SetLength(metafile.Length() - 20);
            raf.Close();
            // restart DN to make replica to RWR
            MiniDFSCluster.DataNodeProperties dnProp = cluster.StopDataNode(0);
            cluster.RestartDataNode(dnProp, true);
            // try to recover the lease
            DistributedFileSystem newdfs = (DistributedFileSystem)FileSystem.NewInstance(cluster
                                                                                         .GetConfiguration(0));

            count = 0;
            while (++count < 10 && !newdfs.RecoverLease(file))
            {
                Sharpen.Thread.Sleep(1000);
            }
            NUnit.Framework.Assert.IsTrue("File should be closed", newdfs.RecoverLease(file));
        }
        public virtual void TestLeaseRecoveryAndAppend()
        {
            Configuration conf = new Configuration();

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                Path file = new Path("/testLeaseRecovery");
                DistributedFileSystem dfs = cluster.GetFileSystem();
                // create a file with 0 bytes
                FSDataOutputStream @out = dfs.Create(file);
                @out.Hflush();
                @out.Hsync();
                // abort the original stream
                ((DFSOutputStream)@out.GetWrappedStream()).Abort();
                DistributedFileSystem newdfs = (DistributedFileSystem)FileSystem.NewInstance(cluster
                                                                                             .GetConfiguration(0));
                // Append to a file , whose lease is held by another client should fail
                try
                {
                    newdfs.Append(file);
                    NUnit.Framework.Assert.Fail("Append to a file(lease is held by another client) should fail"
                                                );
                }
                catch (RemoteException e)
                {
                    NUnit.Framework.Assert.IsTrue(e.Message.Contains("file lease is currently owned")
                                                  );
                }
                // Lease recovery on first try should be successful
                bool recoverLease = newdfs.RecoverLease(file);
                NUnit.Framework.Assert.IsTrue(recoverLease);
                FSDataOutputStream append = newdfs.Append(file);
                append.Write(Sharpen.Runtime.GetBytesForString("test"));
                append.Close();
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                    cluster = null;
                }
            }
        }
Exemple #12
0
        // os2.close() will fail as no block was allocated.
        /// <summary>
        /// Ensure that reserved space is released when the client goes away
        /// unexpectedly.
        /// </summary>
        /// <remarks>
        /// Ensure that reserved space is released when the client goes away
        /// unexpectedly.
        /// The verification is done for each replica in the write pipeline.
        /// </remarks>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        /// <exception cref="Sharpen.TimeoutException"/>
        public virtual void TestSpaceReleasedOnUnexpectedEof()
        {
            short replication = 3;

            StartCluster(BlockSize, replication, -1);
            string methodName = GenericTestUtils.GetMethodName();
            Path   file       = new Path("/" + methodName + ".01.dat");
            // Write 1 byte to the file and kill the writer.
            FSDataOutputStream os = fs.Create(file, replication);

            os.Write(new byte[1]);
            os.Hsync();
            DFSTestUtil.AbortStream((DFSOutputStream)os.GetWrappedStream());
            // Ensure all space reserved for the replica was released on each
            // DataNode.
            foreach (DataNode dn in cluster.GetDataNodes())
            {
                FsVolumeImpl volume = (FsVolumeImpl)dn.GetFSDataset().GetVolumes()[0];
                GenericTestUtils.WaitFor(new _Supplier_276(volume), 500, int.MaxValue);
            }
        }
Exemple #13
0
        /// <summary>
        /// Test if the quota can be correctly updated when file length is updated
        /// through fsync
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestUpdateQuotaForFSync()
        {
            Path foo = new Path("/foo");
            Path bar = new Path(foo, "bar");

            DFSTestUtil.CreateFile(dfs, bar, Blocksize, Replication, 0L);
            dfs.SetQuota(foo, long.MaxValue - 1, long.MaxValue - 1);
            FSDataOutputStream @out = dfs.Append(bar);

            @out.Write(new byte[Blocksize / 4]);
            ((DFSOutputStream)@out.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                        .UpdateLength));
            INodeDirectory fooNode = fsdir.GetINode4Write(foo.ToString()).AsDirectory();
            QuotaCounts    quota   = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed();
            long           ns      = quota.GetNameSpace();
            long           ds      = quota.GetStorageSpace();

            NUnit.Framework.Assert.AreEqual(2, ns);
            // foo and bar
            NUnit.Framework.Assert.AreEqual(Blocksize * 2 * Replication, ds);
            // file is under construction
            @out.Write(new byte[Blocksize / 4]);
            @out.Close();
            fooNode = fsdir.GetINode4Write(foo.ToString()).AsDirectory();
            quota   = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed();
            ns      = quota.GetNameSpace();
            ds      = quota.GetStorageSpace();
            NUnit.Framework.Assert.AreEqual(2, ns);
            NUnit.Framework.Assert.AreEqual((Blocksize + Blocksize / 2) * Replication, ds);
            // append another block
            DFSTestUtil.AppendFile(dfs, bar, Blocksize);
            quota = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed();
            ns    = quota.GetNameSpace();
            ds    = quota.GetStorageSpace();
            NUnit.Framework.Assert.AreEqual(2, ns);
            // foo and bar
            NUnit.Framework.Assert.AreEqual((Blocksize * 2 + Blocksize / 2) * Replication, ds
                                            );
        }
Exemple #14
0
        public virtual void TestAddBlockUC()
        {
            DistributedFileSystem fs = cluster.GetFileSystem();
            Path file1 = new Path("/file1");

            DFSTestUtil.CreateFile(fs, file1, Blocksize - 1, Replication, 0L);
            FSDataOutputStream @out = null;

            try
            {
                // append files without closing the streams
                @out = fs.Append(file1);
                string appendContent = "appending-content";
                @out.WriteBytes(appendContent);
                ((DFSOutputStream)@out.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                            .UpdateLength));
                // restart NN
                cluster.RestartNameNode(true);
                FSDirectory           fsdir      = cluster.GetNamesystem().GetFSDirectory();
                INodeFile             fileNode   = fsdir.GetINode4Write(file1.ToString()).AsFile();
                BlockInfoContiguous[] fileBlocks = fileNode.GetBlocks();
                NUnit.Framework.Assert.AreEqual(2, fileBlocks.Length);
                NUnit.Framework.Assert.AreEqual(Blocksize, fileBlocks[0].GetNumBytes());
                NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.Complete, fileBlocks
                                                [0].GetBlockUCState());
                NUnit.Framework.Assert.AreEqual(appendContent.Length - 1, fileBlocks[1].GetNumBytes
                                                    ());
                NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.UnderConstruction
                                                , fileBlocks[1].GetBlockUCState());
            }
            finally
            {
                if (@out != null)
                {
                    @out.Close();
                }
            }
        }
Exemple #15
0
        public virtual void HSyncUpdateLength_00()
        {
            Configuration         conf       = new HdfsConfiguration();
            MiniDFSCluster        cluster    = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            DistributedFileSystem fileSystem = cluster.GetFileSystem();

            try
            {
                Path path = new Path(fName);
                FSDataOutputStream stm = fileSystem.Create(path, true, 4096, (short)2, AppendTestUtil
                                                           .BlockSize);
                System.Console.Out.WriteLine("Created file " + path.ToString());
                ((DFSOutputStream)stm.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                           .UpdateLength));
                long currentFileLength = fileSystem.GetFileStatus(path).GetLen();
                NUnit.Framework.Assert.AreEqual(0L, currentFileLength);
                stm.Close();
            }
            finally
            {
                fileSystem.Close();
                cluster.Shutdown();
            }
        }
Exemple #16
0
        /// <summary>
        /// The method starts new cluster with defined Configuration; creates a file
        /// with specified block_size and writes 10 equal sections in it; it also calls
        /// hflush/hsync after each write and throws an IOException in case of an error.
        /// </summary>
        /// <param name="conf">cluster configuration</param>
        /// <param name="fileName">of the file to be created and processed as required</param>
        /// <param name="block_size">value to be used for the file's creation</param>
        /// <param name="replicas">is the number of replicas</param>
        /// <param name="isSync">hsync or hflush</param>
        /// <param name="syncFlags">specify the semantic of the sync/flush</param>
        /// <exception cref="System.IO.IOException">in case of any errors</exception>
        public static void DoTheJob(Configuration conf, string fileName, long block_size,
                                    short replicas, bool isSync, EnumSet <HdfsDataOutputStream.SyncFlag> syncFlags)
        {
            byte[] fileContent;
            int    Sections = 10;

            fileContent = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(replicas).
                                     Build();
            // Make sure we work with DFS in order to utilize all its functionality
            DistributedFileSystem fileSystem = cluster.GetFileSystem();
            FSDataInputStream     @is;

            try
            {
                Path   path     = new Path(fileName);
                string pathName = new Path(fileSystem.GetWorkingDirectory(), path).ToUri().GetPath
                                      ();
                FSDataOutputStream stm = fileSystem.Create(path, false, 4096, replicas, block_size
                                                           );
                System.Console.Out.WriteLine("Created file " + fileName);
                int tenth    = AppendTestUtil.FileSize / Sections;
                int rounding = AppendTestUtil.FileSize - tenth * Sections;
                for (int i = 0; i < Sections; i++)
                {
                    System.Console.Out.WriteLine("Writing " + (tenth * i) + " to " + (tenth * (i + 1)
                                                                                      ) + " section to file " + fileName);
                    // write to the file
                    stm.Write(fileContent, tenth * i, tenth);
                    // Wait while hflush/hsync pushes all packets through built pipeline
                    if (isSync)
                    {
                        ((DFSOutputStream)stm.GetWrappedStream()).Hsync(syncFlags);
                    }
                    else
                    {
                        ((DFSOutputStream)stm.GetWrappedStream()).Hflush();
                    }
                    // Check file length if updatelength is required
                    if (isSync && syncFlags.Contains(HdfsDataOutputStream.SyncFlag.UpdateLength))
                    {
                        long currentFileLength = fileSystem.GetFileStatus(path).GetLen();
                        NUnit.Framework.Assert.AreEqual("File size doesn't match for hsync/hflush with updating the length"
                                                        , tenth * (i + 1), currentFileLength);
                    }
                    else
                    {
                        if (isSync && syncFlags.Contains(HdfsDataOutputStream.SyncFlag.EndBlock))
                        {
                            LocatedBlocks blocks = fileSystem.dfs.GetLocatedBlocks(pathName, 0);
                            NUnit.Framework.Assert.AreEqual(i + 1, blocks.GetLocatedBlocks().Count);
                        }
                    }
                    byte[] toRead   = new byte[tenth];
                    byte[] expected = new byte[tenth];
                    System.Array.Copy(fileContent, tenth * i, expected, 0, tenth);
                    // Open the same file for read. Need to create new reader after every write operation(!)
                    @is = fileSystem.Open(path);
                    @is.Seek(tenth * i);
                    int readBytes = @is.Read(toRead, 0, tenth);
                    System.Console.Out.WriteLine("Has read " + readBytes);
                    NUnit.Framework.Assert.IsTrue("Should've get more bytes", (readBytes > 0) && (readBytes
                                                                                                  <= tenth));
                    @is.Close();
                    CheckData(toRead, 0, readBytes, expected, "Partial verification");
                }
                System.Console.Out.WriteLine("Writing " + (tenth * Sections) + " to " + (tenth *
                                                                                         Sections + rounding) + " section to file " + fileName);
                stm.Write(fileContent, tenth * Sections, rounding);
                stm.Close();
                NUnit.Framework.Assert.AreEqual("File size doesn't match ", AppendTestUtil.FileSize
                                                , fileSystem.GetFileStatus(path).GetLen());
                AppendTestUtil.CheckFullFile(fileSystem, path, fileContent.Length, fileContent, "hflush()"
                                             );
            }
            finally
            {
                fileSystem.Close();
                cluster.Shutdown();
            }
        }
        public virtual void TestMigrateOpenFileToArchival()
        {
            Log.Info("testMigrateOpenFileToArchival");
            Path fooDir = new Path("/foo");
            IDictionary <Path, BlockStoragePolicy> policyMap = Maps.NewHashMap();

            policyMap[fooDir] = Cold;
            TestStorageMover.NamespaceScheme nsScheme = new TestStorageMover.NamespaceScheme(
                Arrays.AsList(fooDir), null, BlockSize, null, policyMap);
            TestStorageMover.ClusterScheme clusterScheme = new TestStorageMover.ClusterScheme
                                                               (DefaultConf, NumDatanodes, Repl, GenStorageTypes(NumDatanodes), null);
            TestStorageMover.MigrationTest test = new TestStorageMover.MigrationTest(this, clusterScheme
                                                                                     , nsScheme);
            test.SetupCluster();
            // create an open file
            Banner("writing to file /foo/bar");
            Path barFile = new Path(fooDir, "bar");

            DFSTestUtil.CreateFile(test.dfs, barFile, BlockSize, (short)1, 0L);
            FSDataOutputStream @out = test.dfs.Append(barFile);

            @out.WriteBytes("hello, ");
            ((DFSOutputStream)@out.GetWrappedStream()).Hsync();
            try
            {
                Banner("start data migration");
                test.SetStoragePolicy();
                // set /foo to COLD
                test.Migrate();
                // make sure the under construction block has not been migrated
                LocatedBlocks lbs = test.dfs.GetClient().GetLocatedBlocks(barFile.ToString(), BlockSize
                                                                          );
                Log.Info("Locations: " + lbs);
                IList <LocatedBlock> blks = lbs.GetLocatedBlocks();
                NUnit.Framework.Assert.AreEqual(1, blks.Count);
                NUnit.Framework.Assert.AreEqual(1, blks[0].GetLocations().Length);
                Banner("finish the migration, continue writing");
                // make sure the writing can continue
                @out.WriteBytes("world!");
                ((DFSOutputStream)@out.GetWrappedStream()).Hsync();
                IOUtils.Cleanup(Log, @out);
                lbs = test.dfs.GetClient().GetLocatedBlocks(barFile.ToString(), BlockSize);
                Log.Info("Locations: " + lbs);
                blks = lbs.GetLocatedBlocks();
                NUnit.Framework.Assert.AreEqual(1, blks.Count);
                NUnit.Framework.Assert.AreEqual(1, blks[0].GetLocations().Length);
                Banner("finish writing, starting reading");
                // check the content of /foo/bar
                FSDataInputStream @in = test.dfs.Open(barFile);
                byte[]            buf = new byte[13];
                // read from offset 1024
                @in.ReadFully(BlockSize, buf, 0, buf.Length);
                IOUtils.Cleanup(Log, @in);
                NUnit.Framework.Assert.AreEqual("hello, world!", Sharpen.Runtime.GetStringForBytes
                                                    (buf));
            }
            finally
            {
                test.ShutdownCluster();
            }
        }
        /// <summary>
        /// Write to one file, then kill one datanode in the pipeline and then
        /// close the file.
        /// </summary>
        /// <exception cref="System.IO.IOException"/>
        private void SimpleTest(int datanodeToKill)
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 2000);
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, 2);
            conf.SetInt(DFSConfigKeys.DfsClientSocketTimeoutKey, 5000);
            int myMaxNodes = 5;

            System.Console.Out.WriteLine("SimpleTest starting with DataNode to Kill " + datanodeToKill
                                         );
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(myMaxNodes
                                                                                   ).Build();

            cluster.WaitActive();
            FileSystem fs       = cluster.GetFileSystem();
            short      repl     = 3;
            Path       filename = new Path("simpletest.dat");

            try
            {
                // create a file and write one block of data
                System.Console.Out.WriteLine("SimpleTest creating file " + filename);
                FSDataOutputStream stm      = CreateFile(fs, filename, repl);
                DFSOutputStream    dfstream = (DFSOutputStream)(stm.GetWrappedStream());
                // these are test settings
                dfstream.SetChunksPerPacket(5);
                dfstream.SetArtificialSlowdown(3000);
                long   myseed = AppendTestUtil.NextLong();
                byte[] buffer = AppendTestUtil.RandomBytes(myseed, fileSize);
                int    mid    = fileSize / 4;
                stm.Write(buffer, 0, mid);
                DatanodeInfo[] targets = dfstream.GetPipeline();
                int            count   = 5;
                while (count-- > 0 && targets == null)
                {
                    try
                    {
                        System.Console.Out.WriteLine("SimpleTest: Waiting for pipeline to be created.");
                        Sharpen.Thread.Sleep(1000);
                    }
                    catch (Exception)
                    {
                    }
                    targets = dfstream.GetPipeline();
                }
                if (targets == null)
                {
                    int victim = AppendTestUtil.NextInt(myMaxNodes);
                    System.Console.Out.WriteLine("SimpleTest stopping datanode random " + victim);
                    cluster.StopDataNode(victim);
                }
                else
                {
                    int victim = datanodeToKill;
                    System.Console.Out.WriteLine("SimpleTest stopping datanode " + targets[victim]);
                    cluster.StopDataNode(targets[victim].GetXferAddr());
                }
                System.Console.Out.WriteLine("SimpleTest stopping datanode complete");
                // write some more data to file, close and verify
                stm.Write(buffer, mid, fileSize - mid);
                stm.Close();
                CheckFile(fs, filename, repl, numBlocks, fileSize, myseed);
            }
            catch (Exception e)
            {
                System.Console.Out.WriteLine("Simple Workload exception " + e);
                Sharpen.Runtime.PrintStackTrace(e);
                NUnit.Framework.Assert.IsTrue(e.ToString(), false);
            }
            finally
            {
                fs.Close();
                cluster.Shutdown();
            }
        }
Exemple #19
0
        public virtual void TestRoundTripAckMetric()
        {
            int           datanodeCount = 2;
            int           interval      = 1;
            Configuration conf          = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsMetricsPercentilesIntervalsKey, string.Empty + interval
                     );
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(datanodeCount
                                                                                   ).Build();

            try
            {
                cluster.WaitActive();
                FileSystem fs = cluster.GetFileSystem();
                // Open a file and get the head of the pipeline
                Path testFile            = new Path("/testRoundTripAckMetric.txt");
                FSDataOutputStream fsout = fs.Create(testFile, (short)datanodeCount);
                DFSOutputStream    dout  = (DFSOutputStream)fsout.GetWrappedStream();
                // Slow down the writes to catch the write pipeline
                dout.SetChunksPerPacket(5);
                dout.SetArtificialSlowdown(3000);
                fsout.Write(new byte[10000]);
                DatanodeInfo[] pipeline = null;
                int            count    = 0;
                while (pipeline == null && count < 5)
                {
                    pipeline = dout.GetPipeline();
                    System.Console.Out.WriteLine("Waiting for pipeline to be created.");
                    Sharpen.Thread.Sleep(1000);
                    count++;
                }
                // Get the head node that should be receiving downstream acks
                DatanodeInfo headInfo = pipeline[0];
                DataNode     headNode = null;
                foreach (DataNode datanode in cluster.GetDataNodes())
                {
                    if (datanode.GetDatanodeId().Equals(headInfo))
                    {
                        headNode = datanode;
                        break;
                    }
                }
                NUnit.Framework.Assert.IsNotNull("Could not find the head of the datanode write pipeline"
                                                 , headNode);
                // Close the file and wait for the metrics to rollover
                Sharpen.Thread.Sleep((interval + 1) * 1000);
                // Check the ack was received
                MetricsRecordBuilder dnMetrics = MetricsAsserts.GetMetrics(headNode.GetMetrics().
                                                                           Name());
                NUnit.Framework.Assert.IsTrue("Expected non-zero number of acks", MetricsAsserts.GetLongCounter
                                                  ("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0);
                MetricsAsserts.AssertQuantileGauges("PacketAckRoundTripTimeNanos" + interval + "s"
                                                    , dnMetrics);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }