Beispiel #1
0
        public virtual void TestBlockHasMultipleReplicasOnSameDN()
        {
            string filename = MakeFileName(GenericTestUtils.GetMethodName());
            Path   filePath = new Path(filename);

            // Write out a file with a few blocks.
            DFSTestUtil.CreateFile(fs, filePath, BlockSize, BlockSize * NumBlocks, BlockSize,
                                   NumDatanodes, seed);
            // Get the block list for the file with the block locations.
            LocatedBlocks locatedBlocks = client.GetLocatedBlocks(filePath.ToString(), 0, BlockSize
                                                                  * NumBlocks);
            // Generate a fake block report from one of the DataNodes, such
            // that it reports one copy of each block on either storage.
            DataNode             dn    = cluster.GetDataNodes()[0];
            DatanodeRegistration dnReg = dn.GetDNRegistrationForBP(bpid);

            StorageBlockReport[] reports = new StorageBlockReport[cluster.GetStoragesPerDatanode
                                                                      ()];
            AList <Replica> blocks = new AList <Replica>();

            foreach (LocatedBlock locatedBlock in locatedBlocks.GetLocatedBlocks())
            {
                Block localBlock = locatedBlock.GetBlock().GetLocalBlock();
                blocks.AddItem(new FinalizedReplica(localBlock, null, null));
            }
            BlockListAsLongs bll = BlockListAsLongs.Encode(blocks);

            for (int i = 0; i < cluster.GetStoragesPerDatanode(); ++i)
            {
                FsVolumeSpi     v   = dn.GetFSDataset().GetVolumes()[i];
                DatanodeStorage dns = new DatanodeStorage(v.GetStorageID());
                reports[i] = new StorageBlockReport(dns, bll);
            }
            // Should not assert!
            cluster.GetNameNodeRpc().BlockReport(dnReg, bpid, reports, new BlockReportContext
                                                     (1, 0, Runtime.NanoTime()));
            // Get the block locations once again.
            locatedBlocks = client.GetLocatedBlocks(filename, 0, BlockSize * NumBlocks);
            // Make sure that each block has two replicas, one on each DataNode.
            foreach (LocatedBlock locatedBlock_1 in locatedBlocks.GetLocatedBlocks())
            {
                DatanodeInfo[] locations = locatedBlock_1.GetLocations();
                Assert.AssertThat(locations.Length, IS.Is((int)NumDatanodes));
                Assert.AssertThat(locations[0].GetDatanodeUuid(), CoreMatchers.Not(locations[1].GetDatanodeUuid
                                                                                       ()));
            }
        }
        /// <exception cref="System.IO.IOException"/>
        private LocatedBlock GetLocatedBlock()
        {
            LocatedBlocks locatedBlocks = client.GetLocatedBlocks(Path.ToString(), 0, BlockSize
                                                                  );

            Assert.AssertThat(locatedBlocks.GetLocatedBlocks().Count, CoreMatchers.Is(1));
            return(Iterables.GetOnlyElement(locatedBlocks.GetLocatedBlocks()));
        }
Beispiel #3
0
        /// <summary>create a file with a length of <code>fileLen</code></summary>
        /// <exception cref="System.IO.IOException"/>
        private IList <LocatedBlock> CreateFile(string fileNamePrefix, long fileLen, bool
                                                isLazyPersist)
        {
            FileSystem fs       = cluster.GetFileSystem();
            Path       filePath = new Path("/" + fileNamePrefix + ".dat");

            DFSTestUtil.CreateFile(fs, filePath, isLazyPersist, 1024, fileLen, BlockLength, (
                                       short)1, r.NextLong(), false);
            return(client.GetLocatedBlocks(filePath.ToString(), 0, fileLen).GetLocatedBlocks(
                       ));
        }
        /// <exception cref="System.IO.IOException"/>
        private LocatedBlocks CreateFileGetBlocks(string filenamePrefix)
        {
            Path filePath = new Path("/" + filenamePrefix + ".dat");

            // Write out a file with a few blocks, get block locations.
            DFSTestUtil.CreateFile(fs, filePath, BlockSize, BlockSize * NumBlocks, BlockSize,
                                   NumDatanodes, seed);
            // Get the block list for the file with the block locations.
            LocatedBlocks blocks = client.GetLocatedBlocks(filePath.ToString(), 0, BlockSize
                                                           * NumBlocks);

            Assert.AssertThat(cluster.GetNamesystem().GetUnderReplicatedBlocks(), IS.Is(0L));
            return(blocks);
        }
Beispiel #5
0
        /// <exception cref="System.IO.IOException"/>
        protected internal LocatedBlocks EnsureFileReplicasOnStorageType(Path path, StorageType
                                                                         storageType)
        {
            // Ensure that returned block locations returned are correct!
            Log.Info("Ensure path: " + path + " is on StorageType: " + storageType);
            Assert.AssertThat(fs.Exists(path), IS.Is(true));
            long          fileLength    = client.GetFileInfo(path.ToString()).GetLen();
            LocatedBlocks locatedBlocks = client.GetLocatedBlocks(path.ToString(), 0, fileLength
                                                                  );

            foreach (LocatedBlock locatedBlock in locatedBlocks.GetLocatedBlocks())
            {
                Assert.AssertThat(locatedBlock.GetStorageTypes()[0], IS.Is(storageType));
            }
            return(locatedBlocks);
        }
Beispiel #6
0
        public virtual void TestReadSelectNonStaleDatanode()
        {
            HdfsConfiguration conf = new HdfsConfiguration();

            conf.SetBoolean(DFSConfigKeys.DfsNamenodeAvoidStaleDatanodeForReadKey, true);
            long staleInterval = 30 * 1000 * 60;

            conf.SetLong(DFSConfigKeys.DfsNamenodeStaleDatanodeIntervalKey, staleInterval);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes
                                                                                   ).Racks(racks).Build();

            cluster.WaitActive();
            IPEndPoint addr   = new IPEndPoint("localhost", cluster.GetNameNodePort());
            DFSClient  client = new DFSClient(addr, conf);
            IList <DatanodeDescriptor> nodeInfoList = cluster.GetNameNode().GetNamesystem().GetBlockManager
                                                          ().GetDatanodeManager().GetDatanodeListForReport(HdfsConstants.DatanodeReportType
                                                                                                           .Live);

            NUnit.Framework.Assert.AreEqual("Unexpected number of datanodes", numDatanodes, nodeInfoList
                                            .Count);
            FileSystem         fileSys = cluster.GetFileSystem();
            FSDataOutputStream stm     = null;

            try
            {
                // do the writing but do not close the FSDataOutputStream
                // in order to mimic the ongoing writing
                Path fileName = new Path("/file1");
                stm = fileSys.Create(fileName, true, fileSys.GetConf().GetInt(CommonConfigurationKeys
                                                                              .IoFileBufferSizeKey, 4096), (short)3, blockSize);
                stm.Write(new byte[(blockSize * 3) / 2]);
                // We do not close the stream so that
                // the writing seems to be still ongoing
                stm.Hflush();
                LocatedBlocks blocks = client.GetNamenode().GetBlockLocations(fileName.ToString()
                                                                              , 0, blockSize);
                DatanodeInfo[] nodes = blocks.Get(0).GetLocations();
                NUnit.Framework.Assert.AreEqual(nodes.Length, 3);
                DataNode           staleNode     = null;
                DatanodeDescriptor staleNodeInfo = null;
                // stop the heartbeat of the first node
                staleNode = this.StopDataNodeHeartbeat(cluster, nodes[0].GetHostName());
                NUnit.Framework.Assert.IsNotNull(staleNode);
                // set the first node as stale
                staleNodeInfo = cluster.GetNameNode().GetNamesystem().GetBlockManager().GetDatanodeManager
                                    ().GetDatanode(staleNode.GetDatanodeId());
                DFSTestUtil.ResetLastUpdatesWithOffset(staleNodeInfo, -(staleInterval + 1));
                LocatedBlocks blocksAfterStale = client.GetNamenode().GetBlockLocations(fileName.
                                                                                        ToString(), 0, blockSize);
                DatanodeInfo[] nodesAfterStale = blocksAfterStale.Get(0).GetLocations();
                NUnit.Framework.Assert.AreEqual(nodesAfterStale.Length, 3);
                NUnit.Framework.Assert.AreEqual(nodesAfterStale[2].GetHostName(), nodes[0].GetHostName
                                                    ());
                // restart the staleNode's heartbeat
                DataNodeTestUtils.SetHeartbeatsDisabledForTests(staleNode, false);
                // reset the first node as non-stale, so as to avoid two stale nodes
                DFSTestUtil.ResetLastUpdatesWithOffset(staleNodeInfo, 0);
                LocatedBlock lastBlock = client.GetLocatedBlocks(fileName.ToString(), 0, long.MaxValue
                                                                 ).GetLastLocatedBlock();
                nodes = lastBlock.GetLocations();
                NUnit.Framework.Assert.AreEqual(nodes.Length, 3);
                // stop the heartbeat of the first node for the last block
                staleNode = this.StopDataNodeHeartbeat(cluster, nodes[0].GetHostName());
                NUnit.Framework.Assert.IsNotNull(staleNode);
                // set the node as stale
                DatanodeDescriptor dnDesc = cluster.GetNameNode().GetNamesystem().GetBlockManager
                                                ().GetDatanodeManager().GetDatanode(staleNode.GetDatanodeId());
                DFSTestUtil.ResetLastUpdatesWithOffset(dnDesc, -(staleInterval + 1));
                LocatedBlock lastBlockAfterStale = client.GetLocatedBlocks(fileName.ToString(), 0
                                                                           , long.MaxValue).GetLastLocatedBlock();
                nodesAfterStale = lastBlockAfterStale.GetLocations();
                NUnit.Framework.Assert.AreEqual(nodesAfterStale.Length, 3);
                NUnit.Framework.Assert.AreEqual(nodesAfterStale[2].GetHostName(), nodes[0].GetHostName
                                                    ());
            }
            finally
            {
                if (stm != null)
                {
                    stm.Close();
                }
                client.Close();
                cluster.Shutdown();
            }
        }