Example #1
0
        public virtual void TestAbandonBlock()
        {
            string src = FileNamePrefix + "foo";
            // Start writing a file but do not close it
            FSDataOutputStream fout = fs.Create(new Path(src), true, 4096, (short)1, 512L);

            for (int i = 0; i < 1024; i++)
            {
                fout.Write(123);
            }
            fout.Hflush();
            long fileId = ((DFSOutputStream)fout.GetWrappedStream()).GetFileId();
            // Now abandon the last block
            DFSClient     dfsclient = DFSClientAdapter.GetDFSClient(fs);
            LocatedBlocks blocks    = dfsclient.GetNamenode().GetBlockLocations(src, 0, int.MaxValue
                                                                                );
            int          orginalNumBlocks = blocks.LocatedBlockCount();
            LocatedBlock b = blocks.GetLastLocatedBlock();

            dfsclient.GetNamenode().AbandonBlock(b.GetBlock(), fileId, src, dfsclient.clientName
                                                 );
            // call abandonBlock again to make sure the operation is idempotent
            dfsclient.GetNamenode().AbandonBlock(b.GetBlock(), fileId, src, dfsclient.clientName
                                                 );
            // And close the file
            fout.Close();
            // Close cluster and check the block has been abandoned after restart
            cluster.RestartNameNode();
            blocks = dfsclient.GetNamenode().GetBlockLocations(src, 0, int.MaxValue);
            NUnit.Framework.Assert.AreEqual("Blocks " + b + " has not been abandoned.", orginalNumBlocks
                                            , blocks.LocatedBlockCount() + 1);
        }
Example #2
0
        public virtual void TestBlockTokenInLastLocatedBlock()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetBoolean(DFSConfigKeys.DfsBlockAccessTokenEnableKey, true);
            conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, 512);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.WaitActive();
            try
            {
                FileSystem         fs       = cluster.GetFileSystem();
                string             fileName = "/testBlockTokenInLastLocatedBlock";
                Path               filePath = new Path(fileName);
                FSDataOutputStream @out     = fs.Create(filePath, (short)1);
                @out.Write(new byte[1000]);
                // ensure that the first block is written out (see FSOutputSummer#flush)
                @out.Flush();
                LocatedBlocks locatedBlocks = cluster.GetNameNodeRpc().GetBlockLocations(fileName
                                                                                         , 0, 1000);
                while (locatedBlocks.GetLastLocatedBlock() == null)
                {
                    Sharpen.Thread.Sleep(100);
                    locatedBlocks = cluster.GetNameNodeRpc().GetBlockLocations(fileName, 0, 1000);
                }
                Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token = locatedBlocks
                                                                                      .GetLastLocatedBlock().GetBlockToken();
                NUnit.Framework.Assert.AreEqual(BlockTokenIdentifier.KindName, token.GetKind());
                @out.Close();
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Example #3
0
 public bool Get()
 {
     try
     {
         LocatedBlocks  locs = NameNodeAdapter.GetBlockLocations(nn, path, 0, 1000);
         DatanodeInfo[] dnis = locs.GetLastLocatedBlock().GetLocations();
         foreach (DatanodeInfo dni in dnis)
         {
             NUnit.Framework.Assert.IsNotNull(dni);
         }
         int numReplicas = dnis.Length;
         Org.Apache.Hadoop.Hdfs.Server.Namenode.HA.TestStandbyIsHot.Log.Info("Got " + numReplicas
                                                                             + " locs: " + locs);
         if (numReplicas > expectedReplicas)
         {
             cluster.TriggerDeletionReports();
         }
         cluster.TriggerHeartbeats();
         return(numReplicas == expectedReplicas);
     }
     catch (IOException e)
     {
         Org.Apache.Hadoop.Hdfs.Server.Namenode.HA.TestStandbyIsHot.Log.Warn("No block locations yet: "
                                                                             + e.Message);
         return(false);
     }
 }
Example #4
0
        /// <summary>Convert LocatedBlocks to a Json string.</summary>
        /// <exception cref="System.IO.IOException"/>
        public static string ToJsonString(LocatedBlocks locatedblocks)
        {
            if (locatedblocks == null)
            {
                return(null);
            }
            IDictionary <string, object> m = new SortedDictionary <string, object>();

            m["fileLength"]          = locatedblocks.GetFileLength();
            m["isUnderConstruction"] = locatedblocks.IsUnderConstruction();
            m["locatedBlocks"]       = ToJsonArray(locatedblocks.GetLocatedBlocks());
            m["lastLocatedBlock"]    = ToJsonMap(locatedblocks.GetLastLocatedBlock());
            m["isLastBlockComplete"] = locatedblocks.IsLastBlockComplete();
            return(ToJsonString(typeof(LocatedBlocks), m));
        }
        public virtual void TestGetBlockLocations()
        {
            Path root = new Path("/");
            Path file = new Path("/file");

            DFSTestUtil.CreateFile(hdfs, file, Blocksize, Replication, seed);
            // take a snapshot on root
            SnapshotTestHelper.CreateSnapshot(hdfs, root, "s1");
            Path fileInSnapshot = SnapshotTestHelper.GetSnapshotPath(root, "s1", file.GetName
                                                                         ());
            FileStatus status = hdfs.GetFileStatus(fileInSnapshot);

            // make sure we record the size for the file
            NUnit.Framework.Assert.AreEqual(Blocksize, status.GetLen());
            // append data to file
            DFSTestUtil.AppendFile(hdfs, file, Blocksize - 1);
            status = hdfs.GetFileStatus(fileInSnapshot);
            // the size of snapshot file should still be BLOCKSIZE
            NUnit.Framework.Assert.AreEqual(Blocksize, status.GetLen());
            // the size of the file should be (2 * BLOCKSIZE - 1)
            status = hdfs.GetFileStatus(file);
            NUnit.Framework.Assert.AreEqual(Blocksize * 2 - 1, status.GetLen());
            // call DFSClient#callGetBlockLocations for the file in snapshot
            LocatedBlocks blocks = DFSClientAdapter.CallGetBlockLocations(cluster.GetNameNodeRpc
                                                                              (), fileInSnapshot.ToString(), 0, long.MaxValue);
            IList <LocatedBlock> blockList = blocks.GetLocatedBlocks();

            // should be only one block
            NUnit.Framework.Assert.AreEqual(Blocksize, blocks.GetFileLength());
            NUnit.Framework.Assert.AreEqual(1, blockList.Count);
            // check the last block
            LocatedBlock lastBlock = blocks.GetLastLocatedBlock();

            NUnit.Framework.Assert.AreEqual(0, lastBlock.GetStartOffset());
            NUnit.Framework.Assert.AreEqual(Blocksize, lastBlock.GetBlockSize());
            // take another snapshot
            SnapshotTestHelper.CreateSnapshot(hdfs, root, "s2");
            Path fileInSnapshot2 = SnapshotTestHelper.GetSnapshotPath(root, "s2", file.GetName
                                                                          ());
            // append data to file without closing
            HdfsDataOutputStream @out = AppendFileWithoutClosing(file, Blocksize);

            @out.Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag.UpdateLength));
            status = hdfs.GetFileStatus(fileInSnapshot2);
            // the size of snapshot file should be BLOCKSIZE*2-1
            NUnit.Framework.Assert.AreEqual(Blocksize * 2 - 1, status.GetLen());
            // the size of the file should be (3 * BLOCKSIZE - 1)
            status = hdfs.GetFileStatus(file);
            NUnit.Framework.Assert.AreEqual(Blocksize * 3 - 1, status.GetLen());
            blocks = DFSClientAdapter.CallGetBlockLocations(cluster.GetNameNodeRpc(), fileInSnapshot2
                                                            .ToString(), 0, long.MaxValue);
            NUnit.Framework.Assert.IsFalse(blocks.IsUnderConstruction());
            NUnit.Framework.Assert.IsTrue(blocks.IsLastBlockComplete());
            blockList = blocks.GetLocatedBlocks();
            // should be 2 blocks
            NUnit.Framework.Assert.AreEqual(Blocksize * 2 - 1, blocks.GetFileLength());
            NUnit.Framework.Assert.AreEqual(2, blockList.Count);
            // check the last block
            lastBlock = blocks.GetLastLocatedBlock();
            NUnit.Framework.Assert.AreEqual(Blocksize, lastBlock.GetStartOffset());
            NUnit.Framework.Assert.AreEqual(Blocksize, lastBlock.GetBlockSize());
            blocks = DFSClientAdapter.CallGetBlockLocations(cluster.GetNameNodeRpc(), fileInSnapshot2
                                                            .ToString(), Blocksize, 0);
            blockList = blocks.GetLocatedBlocks();
            NUnit.Framework.Assert.AreEqual(1, blockList.Count);
            // check blocks for file being written
            blocks = DFSClientAdapter.CallGetBlockLocations(cluster.GetNameNodeRpc(), file.ToString
                                                                (), 0, long.MaxValue);
            blockList = blocks.GetLocatedBlocks();
            NUnit.Framework.Assert.AreEqual(3, blockList.Count);
            NUnit.Framework.Assert.IsTrue(blocks.IsUnderConstruction());
            NUnit.Framework.Assert.IsFalse(blocks.IsLastBlockComplete());
            lastBlock = blocks.GetLastLocatedBlock();
            NUnit.Framework.Assert.AreEqual(Blocksize * 2, lastBlock.GetStartOffset());
            NUnit.Framework.Assert.AreEqual(Blocksize - 1, lastBlock.GetBlockSize());
            @out.Close();
        }
Example #6
0
        public virtual void TestRestartDfsWithAbandonedBlock()
        {
            Configuration conf = new HdfsConfiguration();

            // Turn off persistent IPC, so that the DFSClient can survive NN restart
            conf.SetInt(CommonConfigurationKeysPublic.IpcClientConnectionMaxidletimeKey, 0);
            MiniDFSCluster     cluster = null;
            long               len     = 0;
            FSDataOutputStream stream;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
                FileSystem fs = cluster.GetFileSystem();
                // Creating a file with 4096 blockSize to write multiple blocks
                stream = fs.Create(FilePath, true, BlockSize, (short)1, BlockSize);
                stream.Write(DataBeforeRestart);
                stream.Hflush();
                // Wait for all of the blocks to get through
                while (len < BlockSize * (NumBlocks - 1))
                {
                    FileStatus status = fs.GetFileStatus(FilePath);
                    len = status.GetLen();
                    Sharpen.Thread.Sleep(100);
                }
                // Abandon the last block
                DFSClient      dfsclient  = DFSClientAdapter.GetDFSClient((DistributedFileSystem)fs);
                HdfsFileStatus fileStatus = dfsclient.GetNamenode().GetFileInfo(FileName);
                LocatedBlocks  blocks     = dfsclient.GetNamenode().GetBlockLocations(FileName, 0, BlockSize
                                                                                      * NumBlocks);
                NUnit.Framework.Assert.AreEqual(NumBlocks, blocks.GetLocatedBlocks().Count);
                LocatedBlock b = blocks.GetLastLocatedBlock();
                dfsclient.GetNamenode().AbandonBlock(b.GetBlock(), fileStatus.GetFileId(), FileName
                                                     , dfsclient.clientName);
                // explicitly do NOT close the file.
                cluster.RestartNameNode();
                // Check that the file has no less bytes than before the restart
                // This would mean that blocks were successfully persisted to the log
                FileStatus status_1 = fs.GetFileStatus(FilePath);
                NUnit.Framework.Assert.IsTrue("Length incorrect: " + status_1.GetLen(), status_1.
                                              GetLen() == len - BlockSize);
                // Verify the data showed up from before restart, sans abandoned block.
                FSDataInputStream readStream = fs.Open(FilePath);
                try
                {
                    byte[] verifyBuf = new byte[DataBeforeRestart.Length - BlockSize];
                    IOUtils.ReadFully(readStream, verifyBuf, 0, verifyBuf.Length);
                    byte[] expectedBuf = new byte[DataBeforeRestart.Length - BlockSize];
                    System.Array.Copy(DataBeforeRestart, 0, expectedBuf, 0, expectedBuf.Length);
                    Assert.AssertArrayEquals(expectedBuf, verifyBuf);
                }
                finally
                {
                    IOUtils.CloseStream(readStream);
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Example #7
0
        /// <summary>
        /// Test that an append with no locations fails with an exception
        /// showing insufficient locations.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestAppendInsufficientLocations()
        {
            Configuration conf = new Configuration();

            // lower heartbeat interval for fast recognition of DN
            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1000);
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            conf.SetInt(DFSConfigKeys.DfsClientSocketTimeoutKey, 3000);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(4).Build();
            DistributedFileSystem fileSystem = null;

            try
            {
                // create a file with replication 3
                fileSystem = cluster.GetFileSystem();
                Path f = new Path("/testAppend");
                FSDataOutputStream create = fileSystem.Create(f, (short)2);
                create.Write(Sharpen.Runtime.GetBytesForString("/testAppend"));
                create.Close();
                // Check for replications
                DFSTestUtil.WaitReplication(fileSystem, f, (short)2);
                // Shut down all DNs that have the last block location for the file
                LocatedBlocks lbs = fileSystem.dfs.GetNamenode().GetBlockLocations("/testAppend",
                                                                                   0, long.MaxValue);
                IList <DataNode> dnsOfCluster     = cluster.GetDataNodes();
                DatanodeInfo[]   dnsWithLocations = lbs.GetLastLocatedBlock().GetLocations();
                foreach (DataNode dn in dnsOfCluster)
                {
                    foreach (DatanodeInfo loc in dnsWithLocations)
                    {
                        if (dn.GetDatanodeId().Equals(loc))
                        {
                            dn.Shutdown();
                            DFSTestUtil.WaitForDatanodeDeath(dn);
                        }
                    }
                }
                // Wait till 0 replication is recognized
                DFSTestUtil.WaitReplication(fileSystem, f, (short)0);
                // Append to the file, at this state there are 3 live DNs but none of them
                // have the block.
                try
                {
                    fileSystem.Append(f);
                    NUnit.Framework.Assert.Fail("Append should fail because insufficient locations");
                }
                catch (IOException e)
                {
                    Log.Info("Expected exception: ", e);
                }
                FSDirectory dir   = cluster.GetNamesystem().GetFSDirectory();
                INodeFile   inode = INodeFile.ValueOf(dir.GetINode("/testAppend"), "/testAppend");
                NUnit.Framework.Assert.IsTrue("File should remain closed", !inode.IsUnderConstruction
                                                  ());
            }
            finally
            {
                if (null != fileSystem)
                {
                    fileSystem.Close();
                }
                cluster.Shutdown();
            }
        }