Ejemplo n.º 1
0
        public virtual void TestMissingBlock()
        {
            // Create a file with single block with two replicas
            Path file = GetTestPath("testMissingBlocks");

            CreateFile(file, 100, (short)1);
            // Corrupt the only replica of the block to result in a missing block
            LocatedBlock block = NameNodeAdapter.GetBlockLocations(cluster.GetNameNode(), file
                                                                   .ToString(), 0, 1).Get(0);

            cluster.GetNamesystem().WriteLock();
            try
            {
                bm.FindAndMarkBlockAsCorrupt(block.GetBlock(), block.GetLocations()[0], "STORAGE_ID"
                                             , "TEST");
            }
            finally
            {
                cluster.GetNamesystem().WriteUnlock();
            }
            UpdateMetrics();
            MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(NsMetrics);

            MetricsAsserts.AssertGauge("UnderReplicatedBlocks", 1L, rb);
            MetricsAsserts.AssertGauge("MissingBlocks", 1L, rb);
            MetricsAsserts.AssertGauge("MissingReplOneBlocks", 1L, rb);
            fs.Delete(file, true);
            WaitForDnMetricValue(NsMetrics, "UnderReplicatedBlocks", 0L);
        }
Ejemplo n.º 2
0
 public bool Get()
 {
     try
     {
         LocatedBlocks  locs = NameNodeAdapter.GetBlockLocations(nn, path, 0, 1000);
         DatanodeInfo[] dnis = locs.GetLastLocatedBlock().GetLocations();
         foreach (DatanodeInfo dni in dnis)
         {
             NUnit.Framework.Assert.IsNotNull(dni);
         }
         int numReplicas = dnis.Length;
         Org.Apache.Hadoop.Hdfs.Server.Namenode.HA.TestStandbyIsHot.Log.Info("Got " + numReplicas
                                                                             + " locs: " + locs);
         if (numReplicas > expectedReplicas)
         {
             cluster.TriggerDeletionReports();
         }
         cluster.TriggerHeartbeats();
         return(numReplicas == expectedReplicas);
     }
     catch (IOException e)
     {
         Org.Apache.Hadoop.Hdfs.Server.Namenode.HA.TestStandbyIsHot.Log.Warn("No block locations yet: "
                                                                             + e.Message);
         return(false);
     }
 }
Ejemplo n.º 3
0
        public virtual void TestReplicationError()
        {
            // create a file of replication factor of 1
            Path fileName = new Path("/test.txt");
            int  fileLen  = 1;

            DFSTestUtil.CreateFile(fs, fileName, 1, (short)1, 1L);
            DFSTestUtil.WaitReplication(fs, fileName, (short)1);
            // get the block belonged to the created file
            LocatedBlocks blocks = NameNodeAdapter.GetBlockLocations(cluster.GetNameNode(), fileName
                                                                     .ToString(), 0, (long)fileLen);

            NUnit.Framework.Assert.AreEqual("Should only find 1 block", blocks.LocatedBlockCount
                                                (), 1);
            LocatedBlock block = blocks.Get(0);

            // bring up a second datanode
            cluster.StartDataNodes(conf, 1, true, null, null);
            cluster.WaitActive();
            int      sndNode  = 1;
            DataNode datanode = cluster.GetDataNodes()[sndNode];
            // replicate the block to the second datanode
            IPEndPoint target = datanode.GetXferAddress();
            Socket     s      = Sharpen.Extensions.CreateSocket(target.Address, target.Port);
            // write the header.
            DataOutputStream @out     = new DataOutputStream(s.GetOutputStream());
            DataChecksum     checksum = DataChecksum.NewDataChecksum(DataChecksum.Type.Crc32, 512
                                                                     );

            new Sender(@out).WriteBlock(block.GetBlock(), StorageType.Default, BlockTokenSecretManager
                                        .DummyToken, string.Empty, new DatanodeInfo[0], new StorageType[0], null, BlockConstructionStage
                                        .PipelineSetupCreate, 1, 0L, 0L, 0L, checksum, CachingStrategy.NewDefaultStrategy
                                            (), false, false, null);
            @out.Flush();
            // close the connection before sending the content of the block
            @out.Close();
            // the temporary block & meta files should be deleted
            string   bpid       = cluster.GetNamesystem().GetBlockPoolId();
            FilePath storageDir = cluster.GetInstanceStorageDir(sndNode, 0);
            FilePath dir1       = MiniDFSCluster.GetRbwDir(storageDir, bpid);

            storageDir = cluster.GetInstanceStorageDir(sndNode, 1);
            FilePath dir2 = MiniDFSCluster.GetRbwDir(storageDir, bpid);

            while (dir1.ListFiles().Length != 0 || dir2.ListFiles().Length != 0)
            {
                Sharpen.Thread.Sleep(100);
            }
            // then increase the file's replication factor
            fs.SetReplication(fileName, (short)2);
            // replication should succeed
            DFSTestUtil.WaitReplication(fs, fileName, (short)1);
            // clean up the file
            fs.Delete(fileName, false);
        }
Ejemplo n.º 4
0
        /// <summary>
        /// Check the blocks of dst file are cleaned after rename with overwrite
        /// Restart NN to check the rename successfully
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestRenameWithOverwrite()
        {
            short          replFactor = 2;
            long           blockSize  = 512;
            Configuration  conf       = new Configuration();
            MiniDFSCluster cluster    = new MiniDFSCluster.Builder(conf).NumDataNodes(replFactor
                                                                                      ).Build();
            DistributedFileSystem dfs = cluster.GetFileSystem();

            try
            {
                long   fileLen = blockSize * 3;
                string src     = "/foo/src";
                string dst     = "/foo/dst";
                Path   srcPath = new Path(src);
                Path   dstPath = new Path(dst);
                DFSTestUtil.CreateFile(dfs, srcPath, fileLen, replFactor, 1);
                DFSTestUtil.CreateFile(dfs, dstPath, fileLen, replFactor, 1);
                LocatedBlocks lbs = NameNodeAdapter.GetBlockLocations(cluster.GetNameNode(), dst,
                                                                      0, fileLen);
                BlockManager bm = NameNodeAdapter.GetNamesystem(cluster.GetNameNode()).GetBlockManager
                                      ();
                NUnit.Framework.Assert.IsTrue(bm.GetStoredBlock(lbs.GetLocatedBlocks()[0].GetBlock
                                                                    ().GetLocalBlock()) != null);
                dfs.Rename(srcPath, dstPath, Options.Rename.Overwrite);
                NUnit.Framework.Assert.IsTrue(bm.GetStoredBlock(lbs.GetLocatedBlocks()[0].GetBlock
                                                                    ().GetLocalBlock()) == null);
                // Restart NN and check the rename successfully
                cluster.RestartNameNodes();
                NUnit.Framework.Assert.IsFalse(dfs.Exists(srcPath));
                NUnit.Framework.Assert.IsTrue(dfs.Exists(dstPath));
            }
            finally
            {
                if (dfs != null)
                {
                    dfs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Ejemplo n.º 5
0
        public virtual void TestPendingAndInvalidate()
        {
            Configuration Conf = new HdfsConfiguration();

            Conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1024);
            Conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, DfsReplicationInterval);
            Conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, DfsReplicationInterval
                        );
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(Conf).NumDataNodes(DatanodeCount
                                                                                   ).Build();

            cluster.WaitActive();
            FSNamesystem          namesystem = cluster.GetNamesystem();
            BlockManager          bm         = namesystem.GetBlockManager();
            DistributedFileSystem fs         = cluster.GetFileSystem();

            try
            {
                // 1. create a file
                Path filePath = new Path("/tmp.txt");
                DFSTestUtil.CreateFile(fs, filePath, 1024, (short)3, 0L);
                // 2. disable the heartbeats
                foreach (DataNode dn in cluster.GetDataNodes())
                {
                    DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, true);
                }
                // 3. mark a couple of blocks as corrupt
                LocatedBlock block = NameNodeAdapter.GetBlockLocations(cluster.GetNameNode(), filePath
                                                                       .ToString(), 0, 1).Get(0);
                cluster.GetNamesystem().WriteLock();
                try
                {
                    bm.FindAndMarkBlockAsCorrupt(block.GetBlock(), block.GetLocations()[0], "STORAGE_ID"
                                                 , "TEST");
                    bm.FindAndMarkBlockAsCorrupt(block.GetBlock(), block.GetLocations()[1], "STORAGE_ID"
                                                 , "TEST");
                }
                finally
                {
                    cluster.GetNamesystem().WriteUnlock();
                }
                BlockManagerTestUtil.ComputeAllPendingWork(bm);
                BlockManagerTestUtil.UpdateState(bm);
                NUnit.Framework.Assert.AreEqual(bm.GetPendingReplicationBlocksCount(), 1L);
                NUnit.Framework.Assert.AreEqual(bm.pendingReplications.GetNumReplicas(block.GetBlock
                                                                                          ().GetLocalBlock()), 2);
                // 4. delete the file
                fs.Delete(filePath, true);
                // retry at most 10 times, each time sleep for 1s. Note that 10s is much
                // less than the default pending record timeout (5~10min)
                int  retries    = 10;
                long pendingNum = bm.GetPendingReplicationBlocksCount();
                while (pendingNum != 0 && retries-- > 0)
                {
                    Sharpen.Thread.Sleep(1000);
                    // let NN do the deletion
                    BlockManagerTestUtil.UpdateState(bm);
                    pendingNum = bm.GetPendingReplicationBlocksCount();
                }
                NUnit.Framework.Assert.AreEqual(pendingNum, 0L);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Ejemplo n.º 6
0
        public virtual void TestDataLocality()
        {
            Configuration conf = WebHdfsTestUtil.CreateConf();

            string[] racks      = new string[] { Rack0, Rack0, Rack1, Rack1, Rack2, Rack2 };
            int      nDataNodes = racks.Length;

            Log.Info("nDataNodes=" + nDataNodes + ", racks=" + Arrays.AsList(racks));
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(nDataNodes
                                                                                   ).Racks(racks).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                NameNode        namenode  = cluster.GetNameNode();
                DatanodeManager dm        = namenode.GetNamesystem().GetBlockManager().GetDatanodeManager
                                                ();
                Log.Info("dm=" + dm);
                long   blocksize = DFSConfigKeys.DfsBlockSizeDefault;
                string f         = "/foo";
                {
                    //test CREATE
                    for (int i = 0; i < nDataNodes; i++)
                    {
                        //set client address to a particular datanode
                        DataNode dn     = cluster.GetDataNodes()[i];
                        string   ipAddr = dm.GetDatanode(dn.GetDatanodeId()).GetIpAddr();
                        //The chosen datanode must be the same as the client address
                        DatanodeInfo chosen = NamenodeWebHdfsMethods.ChooseDatanode(namenode, f, PutOpParam.OP
                                                                                    .Create, -1L, blocksize, null);
                        NUnit.Framework.Assert.AreEqual(ipAddr, chosen.GetIpAddr());
                    }
                }
                //create a file with one replica.
                Path p = new Path(f);
                FSDataOutputStream @out = dfs.Create(p, (short)1);
                @out.Write(1);
                @out.Close();
                //get replica location.
                LocatedBlocks locatedblocks = NameNodeAdapter.GetBlockLocations(namenode, f, 0, 1
                                                                                );
                IList <LocatedBlock> lb = locatedblocks.GetLocatedBlocks();
                NUnit.Framework.Assert.AreEqual(1, lb.Count);
                DatanodeInfo[] locations = lb[0].GetLocations();
                NUnit.Framework.Assert.AreEqual(1, locations.Length);
                DatanodeInfo expected = locations[0];
                {
                    //For GETFILECHECKSUM, OPEN and APPEND,
                    //the chosen datanode must be the same as the replica location.
                    //test GETFILECHECKSUM
                    DatanodeInfo chosen = NamenodeWebHdfsMethods.ChooseDatanode(namenode, f, GetOpParam.OP
                                                                                .Getfilechecksum, -1L, blocksize, null);
                    NUnit.Framework.Assert.AreEqual(expected, chosen);
                }
                {
                    //test OPEN
                    DatanodeInfo chosen = NamenodeWebHdfsMethods.ChooseDatanode(namenode, f, GetOpParam.OP
                                                                                .Open, 0, blocksize, null);
                    NUnit.Framework.Assert.AreEqual(expected, chosen);
                }
                {
                    //test APPEND
                    DatanodeInfo chosen = NamenodeWebHdfsMethods.ChooseDatanode(namenode, f, PostOpParam.OP
                                                                                .Append, -1L, blocksize, null);
                    NUnit.Framework.Assert.AreEqual(expected, chosen);
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Ejemplo n.º 7
0
        public virtual void TestExcludeDataNodes()
        {
            Configuration conf = WebHdfsTestUtil.CreateConf();

            string[] racks = new string[] { Rack0, Rack0, Rack1, Rack1, Rack2, Rack2 };
            string[] hosts = new string[] { "DataNode1", "DataNode2", "DataNode3", "DataNode4"
                                            , "DataNode5", "DataNode6" };
            int nDataNodes = hosts.Length;

            Log.Info("nDataNodes=" + nDataNodes + ", racks=" + Arrays.AsList(racks) + ", hosts="
                     + Arrays.AsList(hosts));
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Hosts(hosts).NumDataNodes
                                         (nDataNodes).Racks(racks).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                NameNode        namenode  = cluster.GetNameNode();
                DatanodeManager dm        = namenode.GetNamesystem().GetBlockManager().GetDatanodeManager
                                                ();
                Log.Info("dm=" + dm);
                long   blocksize = DFSConfigKeys.DfsBlockSizeDefault;
                string f         = "/foo";
                //create a file with three replica.
                Path p = new Path(f);
                FSDataOutputStream @out = dfs.Create(p, (short)3);
                @out.Write(1);
                @out.Close();
                //get replica location.
                LocatedBlocks locatedblocks = NameNodeAdapter.GetBlockLocations(namenode, f, 0, 1
                                                                                );
                IList <LocatedBlock> lb = locatedblocks.GetLocatedBlocks();
                NUnit.Framework.Assert.AreEqual(1, lb.Count);
                DatanodeInfo[] locations = lb[0].GetLocations();
                NUnit.Framework.Assert.AreEqual(3, locations.Length);
                //For GETFILECHECKSUM, OPEN and APPEND,
                //the chosen datanode must be different with exclude nodes.
                StringBuilder sb = new StringBuilder();
                for (int i = 0; i < 2; i++)
                {
                    sb.Append(locations[i].GetXferAddr());
                    {
                        // test GETFILECHECKSUM
                        DatanodeInfo chosen = NamenodeWebHdfsMethods.ChooseDatanode(namenode, f, GetOpParam.OP
                                                                                    .Getfilechecksum, -1L, blocksize, sb.ToString());
                        for (int j = 0; j <= i; j++)
                        {
                            Assert.AssertNotEquals(locations[j].GetHostName(), chosen.GetHostName());
                        }
                    }
                    {
                        // test OPEN
                        DatanodeInfo chosen = NamenodeWebHdfsMethods.ChooseDatanode(namenode, f, GetOpParam.OP
                                                                                    .Open, 0, blocksize, sb.ToString());
                        for (int j = 0; j <= i; j++)
                        {
                            Assert.AssertNotEquals(locations[j].GetHostName(), chosen.GetHostName());
                        }
                    }
                    {
                        // test APPEND
                        DatanodeInfo chosen = NamenodeWebHdfsMethods.ChooseDatanode(namenode, f, PostOpParam.OP
                                                                                    .Append, -1L, blocksize, sb.ToString());
                        for (int j = 0; j <= i; j++)
                        {
                            Assert.AssertNotEquals(locations[j].GetHostName(), chosen.GetHostName());
                        }
                    }
                    sb.Append(",");
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }