예제 #1
0
        public virtual void TestBasicFunctionality()
        {
            BPOfferService bpos = SetupBPOSForNNs(mockNN1, mockNN2);

            bpos.Start();
            try
            {
                WaitForInitialization(bpos);
                // The DN should have register to both NNs.
                Org.Mockito.Mockito.Verify(mockNN1).RegisterDatanode(Org.Mockito.Mockito.Any <DatanodeRegistration
                                                                                              >());
                Org.Mockito.Mockito.Verify(mockNN2).RegisterDatanode(Org.Mockito.Mockito.Any <DatanodeRegistration
                                                                                              >());
                // Should get block reports from both NNs
                WaitForBlockReport(mockNN1);
                WaitForBlockReport(mockNN2);
                // When we receive a block, it should report it to both NNs
                bpos.NotifyNamenodeReceivedBlock(FakeBlock, string.Empty, string.Empty);
                ReceivedDeletedBlockInfo[] ret = WaitForBlockReceived(FakeBlock, mockNN1);
                NUnit.Framework.Assert.AreEqual(1, ret.Length);
                NUnit.Framework.Assert.AreEqual(FakeBlock.GetLocalBlock(), ret[0].GetBlock());
                ret = WaitForBlockReceived(FakeBlock, mockNN2);
                NUnit.Framework.Assert.AreEqual(1, ret.Length);
                NUnit.Framework.Assert.AreEqual(FakeBlock.GetLocalBlock(), ret[0].GetBlock());
            }
            finally
            {
                bpos.Stop();
            }
        }
예제 #2
0
        /// <exception cref="System.Exception"/>
        public virtual void TestSetrepIncWithUnderReplicatedBlocks()
        {
            // 1 min timeout
            Configuration  conf = new HdfsConfiguration();
            short          ReplicationFactor = 2;
            string         FileName          = "/testFile";
            Path           FilePath          = new Path(FileName);
            MiniDFSCluster cluster           = new MiniDFSCluster.Builder(conf).NumDataNodes(ReplicationFactor
                                                                                             + 1).Build();

            try
            {
                // create a file with one block with a replication factor of 2
                FileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, FilePath, 1L, ReplicationFactor, 1L);
                DFSTestUtil.WaitReplication(fs, FilePath, ReplicationFactor);
                // remove one replica from the blocksMap so block becomes under-replicated
                // but the block does not get put into the under-replicated blocks queue
                BlockManager       bm = cluster.GetNamesystem().GetBlockManager();
                ExtendedBlock      b  = DFSTestUtil.GetFirstBlock(fs, FilePath);
                DatanodeDescriptor dn = bm.blocksMap.GetStorages(b.GetLocalBlock()).GetEnumerator
                                            ().Next().GetDatanodeDescriptor();
                bm.AddToInvalidates(b.GetLocalBlock(), dn);
                Sharpen.Thread.Sleep(5000);
                bm.blocksMap.RemoveNode(b.GetLocalBlock(), dn);
                // increment this file's replication factor
                FsShell shell = new FsShell(conf);
                NUnit.Framework.Assert.AreEqual(0, shell.Run(new string[] { "-setrep", "-w", Sharpen.Extensions.ToString
                                                                                (1 + ReplicationFactor), FileName }));
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        public virtual void TestInvalidateOverReplicatedBlock()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();

            try
            {
                FSNamesystem       namesystem = cluster.GetNamesystem();
                BlockManager       bm         = namesystem.GetBlockManager();
                FileSystem         fs         = cluster.GetFileSystem();
                Path               p          = new Path(MiniDFSCluster.GetBaseDirectory(), "/foo1");
                FSDataOutputStream @out       = fs.Create(p, (short)2);
                @out.WriteBytes("HDFS-3119: " + p);
                @out.Hsync();
                fs.SetReplication(p, (short)1);
                @out.Close();
                ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, p);
                NUnit.Framework.Assert.AreEqual("Expected only one live replica for the block", 1
                                                , bm.CountNodes(block.GetLocalBlock()).LiveReplicas());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
예제 #4
0
        public virtual void TestDeletingBlocks()
        {
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).Build
                                         ();

            try
            {
                cluster.WaitActive();
                DataNode      dn  = cluster.GetDataNodes()[0];
                FsDatasetImpl ds  = (FsDatasetImpl)DataNodeTestUtils.GetFSDataset(dn);
                FsVolumeImpl  vol = ds.GetVolumes()[0];
                ExtendedBlock eb;
                ReplicaInfo   info;
                IList <Block> blockList = new AList <Block>();
                for (int i = 1; i <= 63; i++)
                {
                    eb   = new ExtendedBlock(Blockpool, i, 1, 1000 + i);
                    info = new FinalizedReplica(eb.GetLocalBlock(), vol, vol.GetCurrentDir().GetParentFile
                                                    ());
                    ds.volumeMap.Add(Blockpool, info);
                    info.GetBlockFile().CreateNewFile();
                    info.GetMetaFile().CreateNewFile();
                    blockList.AddItem(info);
                }
                ds.Invalidate(Blockpool, Sharpen.Collections.ToArray(blockList, new Block[0]));
                try
                {
                    Sharpen.Thread.Sleep(1000);
                }
                catch (Exception)
                {
                }
                // Nothing to do
                NUnit.Framework.Assert.IsTrue(ds.IsDeletingBlock(Blockpool, blockList[0].GetBlockId
                                                                     ()));
                blockList.Clear();
                eb   = new ExtendedBlock(Blockpool, 64, 1, 1064);
                info = new FinalizedReplica(eb.GetLocalBlock(), vol, vol.GetCurrentDir().GetParentFile
                                                ());
                ds.volumeMap.Add(Blockpool, info);
                info.GetBlockFile().CreateNewFile();
                info.GetMetaFile().CreateNewFile();
                blockList.AddItem(info);
                ds.Invalidate(Blockpool, Sharpen.Collections.ToArray(blockList, new Block[0]));
                try
                {
                    Sharpen.Thread.Sleep(1000);
                }
                catch (Exception)
                {
                }
                // Nothing to do
                NUnit.Framework.Assert.IsFalse(ds.IsDeletingBlock(Blockpool, blockList[0].GetBlockId
                                                                      ()));
            }
            finally
            {
                cluster.Shutdown();
            }
        }
 /// <summary>
 /// Override createRbw to verify that the block length that is passed
 /// is correct.
 /// </summary>
 /// <remarks>
 /// Override createRbw to verify that the block length that is passed
 /// is correct. This requires both DFSOutputStream and BlockReceiver to
 /// correctly propagate the hint to FsDatasetSpi.
 /// </remarks>
 /// <exception cref="System.IO.IOException"/>
 public override ReplicaHandler CreateRbw(StorageType storageType, ExtendedBlock b
                                          , bool allowLazyPersist)
 {
     lock (this)
     {
         Assert.AssertThat(b.GetLocalBlock().GetNumBytes(), IS.Is(ExpectedBlockLength));
         return(base.CreateRbw(storageType, b, allowLazyPersist));
     }
 }
 /// <summary>
 /// Delete the block file and meta file from the disk asynchronously, adjust
 /// dfsUsed statistics accordingly.
 /// </summary>
 internal virtual void DeleteAsync(FsVolumeReference volumeRef, FilePath blockFile
                                   , FilePath metaFile, ExtendedBlock block, string trashDirectory)
 {
     Log.Info("Scheduling " + block.GetLocalBlock() + " file " + blockFile + " for deletion"
              );
     FsDatasetAsyncDiskService.ReplicaFileDeleteTask deletionTask = new FsDatasetAsyncDiskService.ReplicaFileDeleteTask
                                                                        (this, volumeRef, blockFile, metaFile, block, trashDirectory);
     Execute(((FsVolumeImpl)volumeRef.GetVolume()).GetCurrentDir(), deletionTask);
 }
예제 #7
0
        internal virtual void NotifyNamenodeReceivingBlock(ExtendedBlock block, string storageUuid
                                                           )
        {
            CheckBlock(block);
            ReceivedDeletedBlockInfo bInfo = new ReceivedDeletedBlockInfo(block.GetLocalBlock
                                                                              (), ReceivedDeletedBlockInfo.BlockStatus.ReceivingBlock, null);

            foreach (BPServiceActor actor in bpServices)
            {
                actor.NotifyNamenodeBlock(bInfo, storageUuid, false);
            }
        }
        /// <summary>Corrupt a block on a data node.</summary>
        /// <remarks>
        /// Corrupt a block on a data node. Replace the block file content with content
        /// of 1, 2, ...BLOCK_SIZE.
        /// </remarks>
        /// <param name="block">the ExtendedBlock to be corrupted</param>
        /// <param name="dn">the data node where the block needs to be corrupted</param>
        /// <exception cref="System.IO.FileNotFoundException"/>
        /// <exception cref="System.IO.IOException"/>
        private static void CorruptBlock(ExtendedBlock block, DataNode dn)
        {
            FilePath f = DataNodeTestUtils.GetBlockFile(dn, block.GetBlockPoolId(), block.GetLocalBlock
                                                            ());
            RandomAccessFile raFile = new RandomAccessFile(f, "rw");

            byte[] bytes = new byte[(int)BlockSize];
            for (int i = 0; i < BlockSize; i++)
            {
                bytes[i] = unchecked ((byte)(i));
            }
            raFile.Write(bytes);
            raFile.Close();
        }
예제 #9
0
        /// <summary>TC7: Corrupted replicas are present.</summary>
        /// <exception cref="System.IO.IOException">an exception might be thrown</exception>
        /// <exception cref="System.Exception"/>
        private void TestTC7(bool appendToNewBlock)
        {
            short repl = 2;
            Path  p    = new Path("/TC7/foo" + (appendToNewBlock ? "0" : "1"));

            System.Console.Out.WriteLine("p=" + p);
            //a. Create file with replication factor of 2. Write half block of data. Close file.
            int len1 = (int)(BlockSize / 2);

            {
                FSDataOutputStream @out = fs.Create(p, false, buffersize, repl, BlockSize);
                AppendTestUtil.Write(@out, 0, len1);
                @out.Close();
            }
            DFSTestUtil.WaitReplication(fs, p, repl);
            //b. Log into one datanode that has one replica of this block.
            //   Find the block file on this datanode and truncate it to zero size.
            LocatedBlocks locatedblocks = fs.dfs.GetNamenode().GetBlockLocations(p.ToString()
                                                                                 , 0L, len1);

            NUnit.Framework.Assert.AreEqual(1, locatedblocks.LocatedBlockCount());
            LocatedBlock  lb  = locatedblocks.Get(0);
            ExtendedBlock blk = lb.GetBlock();

            NUnit.Framework.Assert.AreEqual(len1, lb.GetBlockSize());
            DatanodeInfo[] datanodeinfos = lb.GetLocations();
            NUnit.Framework.Assert.AreEqual(repl, datanodeinfos.Length);
            DataNode dn = cluster.GetDataNode(datanodeinfos[0].GetIpcPort());
            FilePath f  = DataNodeTestUtils.GetBlockFile(dn, blk.GetBlockPoolId(), blk.GetLocalBlock
                                                             ());
            RandomAccessFile raf = new RandomAccessFile(f, "rw");

            AppendTestUtil.Log.Info("dn=" + dn + ", blk=" + blk + " (length=" + blk.GetNumBytes
                                        () + ")");
            NUnit.Framework.Assert.AreEqual(len1, raf.Length());
            raf.SetLength(0);
            raf.Close();
            //c. Open file in "append mode".  Append a new block worth of data. Close file.
            int len2 = (int)BlockSize;

            {
                FSDataOutputStream @out = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.Append
                                                                                     , CreateFlag.NewBlock), 4096, null) : fs.Append(p);
                AppendTestUtil.Write(@out, len1, len2);
                @out.Close();
            }
            //d. Reopen file and read two blocks worth of data.
            AppendTestUtil.Check(fs, p, len1 + len2);
        }
        public virtual void Setup()
        {
            conf = new HdfsConfiguration();
            SimulatedFSDataset.SetFactory(conf);
            Configuration[] overlays = new Configuration[NumDatanodes];
            for (int i = 0; i < overlays.Length; i++)
            {
                overlays[i] = new Configuration();
                if (i == RoNodeIndex)
                {
                    overlays[i].SetEnum(SimulatedFSDataset.ConfigPropertyState, i == RoNodeIndex ? DatanodeStorage.State
                                        .ReadOnlyShared : DatanodeStorage.State.Normal);
                }
            }
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDatanodes).DataNodeConfOverlays
                          (overlays).Build();
            fs              = cluster.GetFileSystem();
            blockManager    = cluster.GetNameNode().GetNamesystem().GetBlockManager();
            datanodeManager = blockManager.GetDatanodeManager();
            client          = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()), cluster
                                            .GetConfiguration(0));
            for (int i_1 = 0; i_1 < NumDatanodes; i_1++)
            {
                DataNode dataNode = cluster.GetDataNodes()[i_1];
                ValidateStorageState(BlockManagerTestUtil.GetStorageReportsForDatanode(datanodeManager
                                                                                       .GetDatanode(dataNode.GetDatanodeId())), i_1 == RoNodeIndex ? DatanodeStorage.State
                                     .ReadOnlyShared : DatanodeStorage.State.Normal);
            }
            // Create a 1 block file
            DFSTestUtil.CreateFile(fs, Path, BlockSize, BlockSize, BlockSize, (short)1, seed);
            LocatedBlock locatedBlock = GetLocatedBlock();

            extendedBlock = locatedBlock.GetBlock();
            block         = extendedBlock.GetLocalBlock();
            Assert.AssertThat(locatedBlock.GetLocations().Length, CoreMatchers.Is(1));
            normalDataNode   = locatedBlock.GetLocations()[0];
            readOnlyDataNode = datanodeManager.GetDatanode(cluster.GetDataNodes()[RoNodeIndex
                                                           ].GetDatanodeId());
            Assert.AssertThat(normalDataNode, CoreMatchers.Is(CoreMatchers.Not(readOnlyDataNode
                                                                               )));
            ValidateNumberReplicas(1);
            // Inject the block into the datanode with READ_ONLY_SHARED storage
            cluster.InjectBlocks(0, RoNodeIndex, Collections.Singleton(block));
            // There should now be 2 *locations* for the block
            // Must wait until the NameNode has processed the block report for the injected blocks
            WaitForLocations(2);
        }
예제 #11
0
        /// <exception cref="System.Exception"/>
        public virtual void TestVerifyBlockChecksumCommand()
        {
            DFSTestUtil.CreateFile(fs, new Path("/bar"), 1234, (short)1, unchecked ((int)(0xdeadbeef
                                                                                          )));
            FsDatasetSpi <object> fsd   = datanode.GetFSDataset();
            ExtendedBlock         block = DFSTestUtil.GetFirstBlock(fs, new Path("/bar"));
            FilePath blockFile          = FsDatasetTestUtil.GetBlockFile(fsd, block.GetBlockPoolId(),
                                                                         block.GetLocalBlock());

            NUnit.Framework.Assert.AreEqual("ret: 1, You must specify a meta file with -meta"
                                            , RunCmd(new string[] { "verify", "-block", blockFile.GetAbsolutePath() }));
            FilePath metaFile = FsDatasetTestUtil.GetMetaFile(fsd, block.GetBlockPoolId(), block
                                                              .GetLocalBlock());

            NUnit.Framework.Assert.AreEqual("ret: 0, Checksum type: " + "DataChecksum(type=CRC32C, chunkSize=512)"
                                            , RunCmd(new string[] { "verify", "-meta", metaFile.GetAbsolutePath() }));
            NUnit.Framework.Assert.AreEqual("ret: 0, Checksum type: " + "DataChecksum(type=CRC32C, chunkSize=512)"
                                            + "Checksum verification succeeded on block file " + blockFile.GetAbsolutePath(
                                                ), RunCmd(new string[] { "verify", "-meta", metaFile.GetAbsolutePath(), "-block"
                                                                         , blockFile.GetAbsolutePath() }));
        }
예제 #12
0
        /// <summary>
        /// The test verifies the number of outstanding replication requests for a
        /// given DN shouldn't exceed the limit set by configuration property
        /// dfs.namenode.replication.max-streams-hard-limit.
        /// </summary>
        /// <remarks>
        /// The test verifies the number of outstanding replication requests for a
        /// given DN shouldn't exceed the limit set by configuration property
        /// dfs.namenode.replication.max-streams-hard-limit.
        /// The test does the followings:
        /// 1. Create a mini cluster with 2 DNs. Set large heartbeat interval so that
        /// replication requests won't be picked by any DN right away.
        /// 2. Create a file with 10 blocks and replication factor 2. Thus each
        /// of the 2 DNs have one replica of each block.
        /// 3. Add a DN to the cluster for later replication.
        /// 4. Remove a DN that has data.
        /// 5. Ask BlockManager to compute the replication work. This will assign
        /// replication requests to the only DN that has data.
        /// 6. Make sure the number of pending replication requests of that DN don't
        /// exceed the limit.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestNumberOfBlocksToBeReplicated()
        {
            // 1 min timeout
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsNamenodeMinBlockSizeKey, 0);
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1);
            conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, 1);
            // Large value to make sure the pending replication request can stay in
            // DatanodeDescriptor.replicateBlocks before test timeout.
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 100);
            // Make sure BlockManager can pull all blocks from UnderReplicatedBlocks via
            // chooseUnderReplicatedBlocks at once.
            conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationWorkMultiplierPerIteration, 5);
            int            NumOfBlocks = 10;
            short          RepFactor   = 2;
            string         FileName    = "/testFile";
            Path           FilePath    = new Path(FileName);
            MiniDFSCluster cluster     = new MiniDFSCluster.Builder(conf).NumDataNodes(RepFactor)
                                         .Build();

            try
            {
                // create a file with 10 blocks with a replication factor of 2
                FileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, FilePath, NumOfBlocks, RepFactor, 1L);
                DFSTestUtil.WaitReplication(fs, FilePath, RepFactor);
                cluster.StartDataNodes(conf, 1, true, null, null, null, null);
                BlockManager  bm = cluster.GetNamesystem().GetBlockManager();
                ExtendedBlock b  = DFSTestUtil.GetFirstBlock(fs, FilePath);
                IEnumerator <DatanodeStorageInfo> storageInfos = bm.blocksMap.GetStorages(b.GetLocalBlock
                                                                                              ()).GetEnumerator();
                DatanodeDescriptor firstDn  = storageInfos.Next().GetDatanodeDescriptor();
                DatanodeDescriptor secondDn = storageInfos.Next().GetDatanodeDescriptor();
                bm.GetDatanodeManager().RemoveDatanode(firstDn);
                NUnit.Framework.Assert.AreEqual(NumOfBlocks, bm.GetUnderReplicatedNotMissingBlocks
                                                    ());
                bm.ComputeDatanodeWork();
                NUnit.Framework.Assert.IsTrue("The number of blocks to be replicated should be less than "
                                              + "or equal to " + bm.replicationStreamsHardLimit, secondDn.GetNumberOfBlocksToBeReplicated
                                                  () <= bm.replicationStreamsHardLimit);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
예제 #13
0
        public virtual void TestCopyOnWrite()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fs      = cluster.GetFileSystem();
            IPEndPoint     addr    = new IPEndPoint("localhost", cluster.GetNameNodePort());
            DFSClient      client  = new DFSClient(addr, conf);

            try
            {
                // create a new file, write to it and close it.
                //
                Path file1             = new Path("/filestatus.dat");
                FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1);
                WriteFile(stm);
                stm.Close();
                // Get a handle to the datanode
                DataNode[] dn = cluster.ListDataNodes();
                NUnit.Framework.Assert.IsTrue("There should be only one datanode but found " + dn
                                              .Length, dn.Length == 1);
                LocatedBlocks locations = client.GetNamenode().GetBlockLocations(file1.ToString()
                                                                                 , 0, long.MaxValue);
                IList <LocatedBlock> blocks = locations.GetLocatedBlocks();
                //
                // Create hard links for a few of the blocks
                //
                for (int i = 0; i < blocks.Count; i = i + 2)
                {
                    ExtendedBlock b = blocks[i].GetBlock();
                    FilePath      f = DataNodeTestUtils.GetFile(dn[0], b.GetBlockPoolId(), b.GetLocalBlock
                                                                    ().GetBlockId());
                    FilePath link = new FilePath(f.ToString() + ".link");
                    System.Console.Out.WriteLine("Creating hardlink for File " + f + " to " + link);
                    HardLink.CreateHardLink(f, link);
                }
                //
                // Detach all blocks. This should remove hardlinks (if any)
                //
                for (int i_1 = 0; i_1 < blocks.Count; i_1++)
                {
                    ExtendedBlock b = blocks[i_1].GetBlock();
                    System.Console.Out.WriteLine("testCopyOnWrite detaching block " + b);
                    NUnit.Framework.Assert.IsTrue("Detaching block " + b + " should have returned true"
                                                  , DataNodeTestUtils.UnlinkBlock(dn[0], b, 1));
                }
                // Since the blocks were already detached earlier, these calls should
                // return false
                //
                for (int i_2 = 0; i_2 < blocks.Count; i_2++)
                {
                    ExtendedBlock b = blocks[i_2].GetBlock();
                    System.Console.Out.WriteLine("testCopyOnWrite detaching block " + b);
                    NUnit.Framework.Assert.IsTrue("Detaching block " + b + " should have returned false"
                                                  , !DataNodeTestUtils.UnlinkBlock(dn[0], b, 1));
                }
            }
            finally
            {
                client.Close();
                fs.Close();
                cluster.Shutdown();
            }
        }
예제 #14
0
        /// <summary>
        /// Regression test for HDFS-7960.<p/>
        /// Shutting down a datanode, removing a storage directory, and restarting
        /// the DataNode should not produce zombie storages.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestRemovingStorageDoesNotProduceZombies()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsDatanodeFailedVolumesToleratedKey, 1);
            int            NumStoragesPerDn = 2;
            MiniDFSCluster cluster          = new MiniDFSCluster.Builder(conf).NumDataNodes(3).StoragesPerDatanode
                                                  (NumStoragesPerDn).Build();

            try
            {
                cluster.WaitActive();
                foreach (DataNode dn in cluster.GetDataNodes())
                {
                    NUnit.Framework.Assert.AreEqual(NumStoragesPerDn, cluster.GetNamesystem().GetBlockManager
                                                        ().GetDatanodeManager().GetDatanode(dn.GetDatanodeId()).GetStorageInfos().Length
                                                    );
                }
                // Create a file which will end up on all 3 datanodes.
                Path TestPath            = new Path("/foo1");
                DistributedFileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, TestPath, 1024, (short)3, unchecked ((int)(0xcafecafe))
                                       );
                foreach (DataNode dn_1 in cluster.GetDataNodes())
                {
                    DataNodeTestUtils.TriggerBlockReport(dn_1);
                }
                ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, new Path("/foo1"));
                cluster.GetNamesystem().WriteLock();
                string storageIdToRemove;
                string datanodeUuid;
                // Find the first storage which this block is in.
                try
                {
                    IEnumerator <DatanodeStorageInfo> storageInfoIter = cluster.GetNamesystem().GetBlockManager
                                                                            ().GetStorages(block.GetLocalBlock()).GetEnumerator();
                    NUnit.Framework.Assert.IsTrue(storageInfoIter.HasNext());
                    DatanodeStorageInfo info = storageInfoIter.Next();
                    storageIdToRemove = info.GetStorageID();
                    datanodeUuid      = info.GetDatanodeDescriptor().GetDatanodeUuid();
                }
                finally
                {
                    cluster.GetNamesystem().WriteUnlock();
                }
                // Find the DataNode which holds that first storage.
                DataNode datanodeToRemoveStorageFrom;
                int      datanodeToRemoveStorageFromIdx = 0;
                while (true)
                {
                    if (datanodeToRemoveStorageFromIdx >= cluster.GetDataNodes().Count)
                    {
                        NUnit.Framework.Assert.Fail("failed to find datanode with uuid " + datanodeUuid);
                        datanodeToRemoveStorageFrom = null;
                        break;
                    }
                    DataNode dn_2 = cluster.GetDataNodes()[datanodeToRemoveStorageFromIdx];
                    if (dn_2.GetDatanodeUuid().Equals(datanodeUuid))
                    {
                        datanodeToRemoveStorageFrom = dn_2;
                        break;
                    }
                    datanodeToRemoveStorageFromIdx++;
                }
                // Find the volume within the datanode which holds that first storage.
                IList <FsVolumeSpi> volumes = datanodeToRemoveStorageFrom.GetFSDataset().GetVolumes
                                                  ();
                NUnit.Framework.Assert.AreEqual(NumStoragesPerDn, volumes.Count);
                string volumeDirectoryToRemove = null;
                foreach (FsVolumeSpi volume in volumes)
                {
                    if (volume.GetStorageID().Equals(storageIdToRemove))
                    {
                        volumeDirectoryToRemove = volume.GetBasePath();
                    }
                }
                // Shut down the datanode and remove the volume.
                // Replace the volume directory with a regular file, which will
                // cause a volume failure.  (If we merely removed the directory,
                // it would be re-initialized with a new storage ID.)
                NUnit.Framework.Assert.IsNotNull(volumeDirectoryToRemove);
                datanodeToRemoveStorageFrom.Shutdown();
                FileUtil.FullyDelete(new FilePath(volumeDirectoryToRemove));
                FileOutputStream fos = new FileOutputStream(volumeDirectoryToRemove);
                try
                {
                    fos.Write(1);
                }
                finally
                {
                    fos.Close();
                }
                cluster.RestartDataNode(datanodeToRemoveStorageFromIdx);
                // Wait for the NameNode to remove the storage.
                Log.Info("waiting for the datanode to remove " + storageIdToRemove);
                GenericTestUtils.WaitFor(new _Supplier_227(cluster, datanodeToRemoveStorageFrom,
                                                           storageIdToRemove, NumStoragesPerDn), 10, 30000);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #15
0
        public virtual void TestDataTransferProtocol()
        {
            Random        random       = new Random();
            int           oneMil       = 1024 * 1024;
            Path          file         = new Path("dataprotocol.dat");
            int           numDataNodes = 1;
            Configuration conf         = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsReplicationKey, numDataNodes);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes
                                                                                   ).Build();

            try
            {
                cluster.WaitActive();
                datanode = cluster.GetFileSystem().GetDataNodeStats(HdfsConstants.DatanodeReportType
                                                                    .Live)[0];
                dnAddr = NetUtils.CreateSocketAddr(datanode.GetXferAddr());
                FileSystem fileSys = cluster.GetFileSystem();
                int        fileLen = Math.Min(conf.GetInt(DFSConfigKeys.DfsBlockSizeKey, 4096), 4096);
                CreateFile(fileSys, file, fileLen);
                // get the first blockid for the file
                ExtendedBlock firstBlock = DFSTestUtil.GetFirstBlock(fileSys, file);
                string        poolId     = firstBlock.GetBlockPoolId();
                long          newBlockId = firstBlock.GetBlockId() + 1;
                recvBuf.Reset();
                sendBuf.Reset();
                // bad version
                recvOut.WriteShort((short)(DataTransferProtocol.DataTransferVersion - 1));
                sendOut.WriteShort((short)(DataTransferProtocol.DataTransferVersion - 1));
                SendRecvData("Wrong Version", true);
                // bad ops
                sendBuf.Reset();
                sendOut.WriteShort((short)DataTransferProtocol.DataTransferVersion);
                sendOut.WriteByte(OP.WriteBlock.code - 1);
                SendRecvData("Wrong Op Code", true);
                /* Test OP_WRITE_BLOCK */
                sendBuf.Reset();
                DataChecksum badChecksum = Org.Mockito.Mockito.Spy(DefaultChecksum);
                Org.Mockito.Mockito.DoReturn(-1).When(badChecksum).GetBytesPerChecksum();
                WriteBlock(poolId, newBlockId, badChecksum);
                recvBuf.Reset();
                SendResponse(DataTransferProtos.Status.Error, null, null, recvOut);
                SendRecvData("wrong bytesPerChecksum while writing", true);
                sendBuf.Reset();
                recvBuf.Reset();
                WriteBlock(poolId, ++newBlockId, DefaultChecksum);
                PacketHeader hdr = new PacketHeader(4, 0, 100, false, -1 - random.Next(oneMil), false
                                                    );
                // size of packet
                // offset in block,
                // seqno
                // last packet
                // bad datalen
                hdr.Write(sendOut);
                SendResponse(DataTransferProtos.Status.Success, string.Empty, null, recvOut);
                new PipelineAck(100, new int[] { PipelineAck.CombineHeader(PipelineAck.ECN.Disabled
                                                                           , DataTransferProtos.Status.Error) }).Write(recvOut);
                SendRecvData("negative DATA_CHUNK len while writing block " + newBlockId, true);
                // test for writing a valid zero size block
                sendBuf.Reset();
                recvBuf.Reset();
                WriteBlock(poolId, ++newBlockId, DefaultChecksum);
                hdr = new PacketHeader(8, 0, 100, true, 0, false);
                // size of packet
                // OffsetInBlock
                // sequencenumber
                // lastPacketInBlock
                // chunk length
                hdr.Write(sendOut);
                sendOut.WriteInt(0);
                // zero checksum
                sendOut.Flush();
                //ok finally write a block with 0 len
                SendResponse(DataTransferProtos.Status.Success, string.Empty, null, recvOut);
                new PipelineAck(100, new int[] { PipelineAck.CombineHeader(PipelineAck.ECN.Disabled
                                                                           , DataTransferProtos.Status.Success) }).Write(recvOut);
                SendRecvData("Writing a zero len block blockid " + newBlockId, false);
                /* Test OP_READ_BLOCK */
                string        bpid  = cluster.GetNamesystem().GetBlockPoolId();
                ExtendedBlock blk   = new ExtendedBlock(bpid, firstBlock.GetLocalBlock());
                long          blkid = blk.GetBlockId();
                // bad block id
                sendBuf.Reset();
                recvBuf.Reset();
                blk.SetBlockId(blkid - 1);
                sender.ReadBlock(blk, BlockTokenSecretManager.DummyToken, "cl", 0L, fileLen, true
                                 , CachingStrategy.NewDefaultStrategy());
                SendRecvData("Wrong block ID " + newBlockId + " for read", false);
                // negative block start offset -1L
                sendBuf.Reset();
                blk.SetBlockId(blkid);
                sender.ReadBlock(blk, BlockTokenSecretManager.DummyToken, "cl", -1L, fileLen, true
                                 , CachingStrategy.NewDefaultStrategy());
                SendRecvData("Negative start-offset for read for block " + firstBlock.GetBlockId(
                                 ), false);
                // bad block start offset
                sendBuf.Reset();
                sender.ReadBlock(blk, BlockTokenSecretManager.DummyToken, "cl", fileLen, fileLen,
                                 true, CachingStrategy.NewDefaultStrategy());
                SendRecvData("Wrong start-offset for reading block " + firstBlock.GetBlockId(), false
                             );
                // negative length is ok. Datanode assumes we want to read the whole block.
                recvBuf.Reset();
                ((DataTransferProtos.BlockOpResponseProto)DataTransferProtos.BlockOpResponseProto
                 .NewBuilder().SetStatus(DataTransferProtos.Status.Success).SetReadOpChecksumInfo
                     (DataTransferProtos.ReadOpChecksumInfoProto.NewBuilder().SetChecksum(DataTransferProtoUtil
                                                                                          .ToProto(DefaultChecksum)).SetChunkOffset(0L)).Build()).WriteDelimitedTo(recvOut
                                                                                                                                                                   );
                sendBuf.Reset();
                sender.ReadBlock(blk, BlockTokenSecretManager.DummyToken, "cl", 0L, -1L - random.
                                 Next(oneMil), true, CachingStrategy.NewDefaultStrategy());
                SendRecvData("Negative length for reading block " + firstBlock.GetBlockId(), false
                             );
                // length is more than size of block.
                recvBuf.Reset();
                SendResponse(DataTransferProtos.Status.Error, null, "opReadBlock " + firstBlock +
                             " received exception java.io.IOException:  " + "Offset 0 and length 4097 don't match block "
                             + firstBlock + " ( blockLen 4096 )", recvOut);
                sendBuf.Reset();
                sender.ReadBlock(blk, BlockTokenSecretManager.DummyToken, "cl", 0L, fileLen + 1,
                                 true, CachingStrategy.NewDefaultStrategy());
                SendRecvData("Wrong length for reading block " + firstBlock.GetBlockId(), false);
                //At the end of all this, read the file to make sure that succeeds finally.
                sendBuf.Reset();
                sender.ReadBlock(blk, BlockTokenSecretManager.DummyToken, "cl", 0L, fileLen, true
                                 , CachingStrategy.NewDefaultStrategy());
                ReadFile(fileSys, file, fileLen);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
 private static NumberReplicas CountReplicas(FSNamesystem namesystem, ExtendedBlock
                                             block)
 {
     return(namesystem.GetBlockManager().CountNodes(block.GetLocalBlock()));
 }
        public virtual void TestProcesOverReplicateBlock()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, 100L);
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 1000L);
            conf.Set(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, Sharpen.Extensions.ToString
                         (2));
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            try
            {
                Path fileName = new Path("/foo1");
                DFSTestUtil.CreateFile(fs, fileName, 2, (short)3, 0L);
                DFSTestUtil.WaitReplication(fs, fileName, (short)3);
                // corrupt the block on datanode 0
                ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, fileName);
                NUnit.Framework.Assert.IsTrue(cluster.CorruptReplica(0, block));
                MiniDFSCluster.DataNodeProperties dnProps = cluster.StopDataNode(0);
                // remove block scanner log to trigger block scanning
                FilePath scanCursor = new FilePath(new FilePath(MiniDFSCluster.GetFinalizedDir(cluster
                                                                                               .GetInstanceStorageDir(0, 0), cluster.GetNamesystem().GetBlockPoolId()).GetParent
                                                                    ()).GetParent(), "scanner.cursor");
                //wait for one minute for deletion to succeed;
                for (int i = 0; !scanCursor.Delete(); i++)
                {
                    NUnit.Framework.Assert.IsTrue("Could not delete " + scanCursor.GetAbsolutePath()
                                                  + " in one minute", i < 60);
                    try
                    {
                        Sharpen.Thread.Sleep(1000);
                    }
                    catch (Exception)
                    {
                    }
                }
                // restart the datanode so the corrupt replica will be detected
                cluster.RestartDataNode(dnProps);
                DFSTestUtil.WaitReplication(fs, fileName, (short)2);
                string     blockPoolId     = cluster.GetNamesystem().GetBlockPoolId();
                DatanodeID corruptDataNode = DataNodeTestUtils.GetDNRegistrationForBP(cluster.GetDataNodes
                                                                                          ()[2], blockPoolId);
                FSNamesystem     namesystem = cluster.GetNamesystem();
                BlockManager     bm         = namesystem.GetBlockManager();
                HeartbeatManager hm         = bm.GetDatanodeManager().GetHeartbeatManager();
                try
                {
                    namesystem.WriteLock();
                    lock (hm)
                    {
                        // set live datanode's remaining space to be 0
                        // so they will be chosen to be deleted when over-replication occurs
                        string corruptMachineName = corruptDataNode.GetXferAddr();
                        foreach (DatanodeDescriptor datanode in hm.GetDatanodes())
                        {
                            if (!corruptMachineName.Equals(datanode.GetXferAddr()))
                            {
                                datanode.GetStorageInfos()[0].SetUtilizationForTesting(100L, 100L, 0, 100L);
                                datanode.UpdateHeartbeat(BlockManagerTestUtil.GetStorageReportsForDatanode(datanode
                                                                                                           ), 0L, 0L, 0, 0, null);
                            }
                        }
                        // decrease the replication factor to 1;
                        NameNodeAdapter.SetReplication(namesystem, fileName.ToString(), (short)1);
                        // corrupt one won't be chosen to be excess one
                        // without 4910 the number of live replicas would be 0: block gets lost
                        NUnit.Framework.Assert.AreEqual(1, bm.CountNodes(block.GetLocalBlock()).LiveReplicas
                                                            ());
                    }
                }
                finally
                {
                    namesystem.WriteUnlock();
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
예제 #18
0
        public virtual void TestNodeCount()
        {
            // start a mini dfs cluster of 2 nodes
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(ReplicationFactor
                                                                                   ).Build();

            try
            {
                FSNamesystem     namesystem = cluster.GetNamesystem();
                BlockManager     bm         = namesystem.GetBlockManager();
                HeartbeatManager hm         = bm.GetDatanodeManager().GetHeartbeatManager();
                FileSystem       fs         = cluster.GetFileSystem();
                // populate the cluster with a one block file
                Path FilePath = new Path("/testfile");
                DFSTestUtil.CreateFile(fs, FilePath, 1L, ReplicationFactor, 1L);
                DFSTestUtil.WaitReplication(fs, FilePath, ReplicationFactor);
                ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, FilePath);
                // keep a copy of all datanode descriptor
                DatanodeDescriptor[] datanodes = hm.GetDatanodes();
                // start two new nodes
                cluster.StartDataNodes(conf, 2, true, null, null);
                cluster.WaitActive();
                // bring down first datanode
                DatanodeDescriptor datanode = datanodes[0];
                MiniDFSCluster.DataNodeProperties dnprop = cluster.StopDataNode(datanode.GetXferAddr
                                                                                    ());
                // make sure that NN detects that the datanode is down
                BlockManagerTestUtil.NoticeDeadDatanode(cluster.GetNameNode(), datanode.GetXferAddr
                                                            ());
                // the block will be replicated
                DFSTestUtil.WaitReplication(fs, FilePath, ReplicationFactor);
                // restart the first datanode
                cluster.RestartDataNode(dnprop);
                cluster.WaitActive();
                // check if excessive replica is detected (transient)
                InitializeTimeout(Timeout);
                while (CountNodes(block.GetLocalBlock(), namesystem).ExcessReplicas() == 0)
                {
                    CheckTimeout("excess replicas not detected");
                }
                // find out a non-excess node
                DatanodeDescriptor nonExcessDN = null;
                foreach (DatanodeStorageInfo storage in bm.blocksMap.GetStorages(block.GetLocalBlock
                                                                                     ()))
                {
                    DatanodeDescriptor  dn     = storage.GetDatanodeDescriptor();
                    ICollection <Block> blocks = bm.excessReplicateMap[dn.GetDatanodeUuid()];
                    if (blocks == null || !blocks.Contains(block.GetLocalBlock()))
                    {
                        nonExcessDN = dn;
                        break;
                    }
                }
                NUnit.Framework.Assert.IsTrue(nonExcessDN != null);
                // bring down non excessive datanode
                dnprop = cluster.StopDataNode(nonExcessDN.GetXferAddr());
                // make sure that NN detects that the datanode is down
                BlockManagerTestUtil.NoticeDeadDatanode(cluster.GetNameNode(), nonExcessDN.GetXferAddr
                                                            ());
                // The block should be replicated
                InitializeTimeout(Timeout);
                while (CountNodes(block.GetLocalBlock(), namesystem).LiveReplicas() != ReplicationFactor
                       )
                {
                    CheckTimeout("live replica count not correct", 1000);
                }
                // restart the first datanode
                cluster.RestartDataNode(dnprop);
                cluster.WaitActive();
                // check if excessive replica is detected (transient)
                InitializeTimeout(Timeout);
                while (CountNodes(block.GetLocalBlock(), namesystem).ExcessReplicas() != 2)
                {
                    CheckTimeout("excess replica count not equal to 2");
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        /// <summary>
        /// Test when a block's replica is removed from RBW folder in one of the
        /// datanode, namenode should ask to invalidate that corrupted block and
        /// schedule replication for one more replica for that under replicated block.
        /// </summary>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        public virtual void TestBlockInvalidationWhenRBWReplicaMissedInDN()
        {
            // This test cannot pass on Windows due to file locking enforcement.  It will
            // reject the attempt to delete the block file from the RBW folder.
            Assume.AssumeTrue(!Path.Windows);
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsReplicationKey, 2);
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 300);
            conf.SetLong(DFSConfigKeys.DfsDatanodeDirectoryscanIntervalKey, 1);
            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            MiniDFSCluster     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            FSDataOutputStream @out    = null;

            try
            {
                FSNamesystem namesystem = cluster.GetNamesystem();
                FileSystem   fs         = cluster.GetFileSystem();
                Path         testPath   = new Path("/tmp/TestRBWBlockInvalidation", "foo1");
                @out = fs.Create(testPath, (short)2);
                @out.WriteBytes("HDFS-3157: " + testPath);
                @out.Hsync();
                cluster.StartDataNodes(conf, 1, true, null, null, null);
                string        bpid  = namesystem.GetBlockPoolId();
                ExtendedBlock blk   = DFSTestUtil.GetFirstBlock(fs, testPath);
                Block         block = blk.GetLocalBlock();
                DataNode      dn    = cluster.GetDataNodes()[0];
                // Delete partial block and its meta information from the RBW folder
                // of first datanode.
                FilePath blockFile = DataNodeTestUtils.GetBlockFile(dn, bpid, block);
                FilePath metaFile  = DataNodeTestUtils.GetMetaFile(dn, bpid, block);
                NUnit.Framework.Assert.IsTrue("Could not delete the block file from the RBW folder"
                                              , blockFile.Delete());
                NUnit.Framework.Assert.IsTrue("Could not delete the block meta file from the RBW folder"
                                              , metaFile.Delete());
                @out.Close();
                int liveReplicas = 0;
                while (true)
                {
                    if ((liveReplicas = CountReplicas(namesystem, blk).LiveReplicas()) < 2)
                    {
                        // This confirms we have a corrupt replica
                        Log.Info("Live Replicas after corruption: " + liveReplicas);
                        break;
                    }
                    Sharpen.Thread.Sleep(100);
                }
                NUnit.Framework.Assert.AreEqual("There should be less than 2 replicas in the " +
                                                "liveReplicasMap", 1, liveReplicas);
                while (true)
                {
                    if ((liveReplicas = CountReplicas(namesystem, blk).LiveReplicas()) > 1)
                    {
                        //Wait till the live replica count becomes equal to Replication Factor
                        Log.Info("Live Replicas after Rereplication: " + liveReplicas);
                        break;
                    }
                    Sharpen.Thread.Sleep(100);
                }
                NUnit.Framework.Assert.AreEqual("There should be two live replicas", 2, liveReplicas
                                                );
                while (true)
                {
                    Sharpen.Thread.Sleep(100);
                    if (CountReplicas(namesystem, blk).CorruptReplicas() == 0)
                    {
                        Log.Info("Corrupt Replicas becomes 0");
                        break;
                    }
                }
            }
            finally
            {
                if (@out != null)
                {
                    @out.Close();
                }
                cluster.Shutdown();
            }
        }