Exemplo n.º 1
0
        public virtual void TestBlockIdGeneration()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            try
            {
                cluster.WaitActive();
                FileSystem fs = cluster.GetFileSystem();
                // Create a file that is 10 blocks long.
                Path path = new Path("testBlockIdGeneration.dat");
                DFSTestUtil.CreateFile(fs, path, IoSize, BlockSize * 10, BlockSize, Replication,
                                       Seed);
                IList <LocatedBlock> blocks = DFSTestUtil.GetAllBlocks(fs, path);
                Log.Info("Block0 id is " + blocks[0].GetBlock().GetBlockId());
                long nextBlockExpectedId = blocks[0].GetBlock().GetBlockId() + 1;
                // Ensure that the block IDs are sequentially increasing.
                for (int i = 1; i < blocks.Count; ++i)
                {
                    long nextBlockId = blocks[i].GetBlock().GetBlockId();
                    Log.Info("Block" + i + " id is " + nextBlockId);
                    Assert.AssertThat(nextBlockId, CoreMatchers.Is(nextBlockExpectedId));
                    ++nextBlockExpectedId;
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemplo n.º 2
0
        public virtual void TestBlockTokenInLastLocatedBlock()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetBoolean(DFSConfigKeys.DfsBlockAccessTokenEnableKey, true);
            conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, 512);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.WaitActive();
            try
            {
                FileSystem         fs       = cluster.GetFileSystem();
                string             fileName = "/testBlockTokenInLastLocatedBlock";
                Path               filePath = new Path(fileName);
                FSDataOutputStream @out     = fs.Create(filePath, (short)1);
                @out.Write(new byte[1000]);
                // ensure that the first block is written out (see FSOutputSummer#flush)
                @out.Flush();
                LocatedBlocks locatedBlocks = cluster.GetNameNodeRpc().GetBlockLocations(fileName
                                                                                         , 0, 1000);
                while (locatedBlocks.GetLastLocatedBlock() == null)
                {
                    Sharpen.Thread.Sleep(100);
                    locatedBlocks = cluster.GetNameNodeRpc().GetBlockLocations(fileName, 0, 1000);
                }
                Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token = locatedBlocks
                                                                                      .GetLastLocatedBlock().GetBlockToken();
                NUnit.Framework.Assert.AreEqual(BlockTokenIdentifier.KindName, token.GetKind());
                @out.Close();
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemplo n.º 3
0
        public virtual void TestFinalizedReplicas()
        {
            // bring up a cluster of 3
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1024L);
            conf.SetInt(DFSConfigKeys.DfsClientWritePacketSizeKey, 512);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();

            cluster.WaitActive();
            FileSystem fs = cluster.GetFileSystem();

            try
            {
                // test finalized replicas
                string      TopDir = "/test";
                DFSTestUtil util   = new DFSTestUtil.Builder().SetName("TestDatanodeRestart").SetNumFiles
                                         (2).Build();
                util.CreateFiles(fs, TopDir, (short)3);
                util.WaitReplication(fs, TopDir, (short)3);
                util.CheckFiles(fs, TopDir);
                cluster.RestartDataNodes();
                cluster.WaitActive();
                util.CheckFiles(fs, TopDir);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemplo n.º 4
0
        public virtual void TestServerSaslNoClientSasl()
        {
            HdfsConfiguration clusterConf = CreateSecureConfig("authentication,integrity,privacy"
                                                               );

            // Set short retry timeouts so this test runs faster
            clusterConf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 10);
            StartCluster(clusterConf);
            HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);

            clientConf.Set(DFSConfigKeys.DfsDataTransferProtectionKey, string.Empty);
            GenericTestUtils.LogCapturer logs = GenericTestUtils.LogCapturer.CaptureLogs(LogFactory
                                                                                         .GetLog(typeof(DataNode)));
            try
            {
                DoTest(clientConf);
                NUnit.Framework.Assert.Fail("Should fail if SASL data transfer protection is not "
                                            + "configured or not supported in client");
            }
            catch (IOException e)
            {
                GenericTestUtils.AssertMatches(e.Message, "could only be replicated to 0 nodes");
            }
            finally
            {
                logs.StopCapturing();
            }
            GenericTestUtils.AssertMatches(logs.GetOutput(), "Failed to read expected SASL data transfer protection "
                                           + "handshake from client at");
        }
Exemplo n.º 5
0
        /// <summary>
        /// The test verifies the number of outstanding replication requests for a
        /// given DN shouldn't exceed the limit set by configuration property
        /// dfs.namenode.replication.max-streams-hard-limit.
        /// </summary>
        /// <remarks>
        /// The test verifies the number of outstanding replication requests for a
        /// given DN shouldn't exceed the limit set by configuration property
        /// dfs.namenode.replication.max-streams-hard-limit.
        /// The test does the followings:
        /// 1. Create a mini cluster with 2 DNs. Set large heartbeat interval so that
        /// replication requests won't be picked by any DN right away.
        /// 2. Create a file with 10 blocks and replication factor 2. Thus each
        /// of the 2 DNs have one replica of each block.
        /// 3. Add a DN to the cluster for later replication.
        /// 4. Remove a DN that has data.
        /// 5. Ask BlockManager to compute the replication work. This will assign
        /// replication requests to the only DN that has data.
        /// 6. Make sure the number of pending replication requests of that DN don't
        /// exceed the limit.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestNumberOfBlocksToBeReplicated()
        {
            // 1 min timeout
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsNamenodeMinBlockSizeKey, 0);
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1);
            conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, 1);
            // Large value to make sure the pending replication request can stay in
            // DatanodeDescriptor.replicateBlocks before test timeout.
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 100);
            // Make sure BlockManager can pull all blocks from UnderReplicatedBlocks via
            // chooseUnderReplicatedBlocks at once.
            conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationWorkMultiplierPerIteration, 5);
            int            NumOfBlocks = 10;
            short          RepFactor   = 2;
            string         FileName    = "/testFile";
            Path           FilePath    = new Path(FileName);
            MiniDFSCluster cluster     = new MiniDFSCluster.Builder(conf).NumDataNodes(RepFactor)
                                         .Build();

            try
            {
                // create a file with 10 blocks with a replication factor of 2
                FileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, FilePath, NumOfBlocks, RepFactor, 1L);
                DFSTestUtil.WaitReplication(fs, FilePath, RepFactor);
                cluster.StartDataNodes(conf, 1, true, null, null, null, null);
                BlockManager  bm = cluster.GetNamesystem().GetBlockManager();
                ExtendedBlock b  = DFSTestUtil.GetFirstBlock(fs, FilePath);
                IEnumerator <DatanodeStorageInfo> storageInfos = bm.blocksMap.GetStorages(b.GetLocalBlock
                                                                                              ()).GetEnumerator();
                DatanodeDescriptor firstDn  = storageInfos.Next().GetDatanodeDescriptor();
                DatanodeDescriptor secondDn = storageInfos.Next().GetDatanodeDescriptor();
                bm.GetDatanodeManager().RemoveDatanode(firstDn);
                NUnit.Framework.Assert.AreEqual(NumOfBlocks, bm.GetUnderReplicatedNotMissingBlocks
                                                    ());
                bm.ComputeDatanodeWork();
                NUnit.Framework.Assert.IsTrue("The number of blocks to be replicated should be less than "
                                              + "or equal to " + bm.replicationStreamsHardLimit, secondDn.GetNumberOfBlocksToBeReplicated
                                                  () <= bm.replicationStreamsHardLimit);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemplo n.º 6
0
        public static void Setup()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsListLimit, ListLimit);
            cluster = new MiniDFSCluster.Builder(conf).Build();
            cluster.WaitClusterUp();
            fs = cluster.GetFileSystem();
        }
Exemplo n.º 7
0
        public virtual void TestDeadDatanode()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 500);
            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1L);
            cluster = new MiniDFSCluster.Builder(conf).Build();
            cluster.WaitActive();
            string poolId = cluster.GetNamesystem().GetBlockPoolId();
            // wait for datanode to be marked live
            DataNode             dn  = cluster.GetDataNodes()[0];
            DatanodeRegistration reg = DataNodeTestUtils.GetDNRegistrationForBP(cluster.GetDataNodes
                                                                                    ()[0], poolId);

            DFSTestUtil.WaitForDatanodeState(cluster, reg.GetDatanodeUuid(), true, 20000);
            // Shutdown and wait for datanode to be marked dead
            dn.Shutdown();
            DFSTestUtil.WaitForDatanodeState(cluster, reg.GetDatanodeUuid(), false, 20000);
            DatanodeProtocol dnp = cluster.GetNameNodeRpc();

            ReceivedDeletedBlockInfo[] blocks = new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo
                                                                                     (new Block(0), ReceivedDeletedBlockInfo.BlockStatus.ReceivedBlock, null) };
            StorageReceivedDeletedBlocks[] storageBlocks = new StorageReceivedDeletedBlocks[]
            { new StorageReceivedDeletedBlocks(reg.GetDatanodeUuid(), blocks) };
            // Ensure blockReceived call from dead datanode is rejected with IOException
            try
            {
                dnp.BlockReceivedAndDeleted(reg, poolId, storageBlocks);
                NUnit.Framework.Assert.Fail("Expected IOException is not thrown");
            }
            catch (IOException)
            {
            }
            // Expected
            // Ensure blockReport from dead datanode is rejected with IOException
            StorageBlockReport[] report = new StorageBlockReport[] { new StorageBlockReport(new
                                                                                            DatanodeStorage(reg.GetDatanodeUuid()), BlockListAsLongs.Empty) };
            try
            {
                dnp.BlockReport(reg, poolId, report, new BlockReportContext(1, 0, Runtime.NanoTime
                                                                                ()));
                NUnit.Framework.Assert.Fail("Expected IOException is not thrown");
            }
            catch (IOException)
            {
            }
            // Expected
            // Ensure heartbeat from dead datanode is rejected with a command
            // that asks datanode to register again
            StorageReport[] rep = new StorageReport[] { new StorageReport(new DatanodeStorage
                                                                              (reg.GetDatanodeUuid()), false, 0, 0, 0, 0) };
            DatanodeCommand[] cmd = dnp.SendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null).GetCommands
                                        ();
            NUnit.Framework.Assert.AreEqual(1, cmd.Length);
            NUnit.Framework.Assert.AreEqual(cmd[0].GetAction(), RegisterCommand.Register.GetAction
                                                ());
        }
Exemplo n.º 8
0
        /// <exception cref="System.IO.IOException"/>
        public static void CreateCluster()
        {
            HdfsConfiguration conf = new HdfsConfiguration();

            conf.AddResource(ContractHdfsXml);
            //hack in a 256 byte block size
            conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            cluster.WaitClusterUp();
        }
Exemplo n.º 9
0
        public virtual void TestRecoverReplicas()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1024L);
            conf.SetInt(DFSConfigKeys.DfsClientWritePacketSizeKey, 512);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();

            cluster.WaitActive();
            try
            {
                FileSystem fs = cluster.GetFileSystem();
                for (int i = 0; i < 4; i++)
                {
                    Path fileName = new Path("/test" + i);
                    DFSTestUtil.CreateFile(fs, fileName, 1, (short)1, 0L);
                    DFSTestUtil.WaitReplication(fs, fileName, (short)1);
                }
                string   bpid = cluster.GetNamesystem().GetBlockPoolId();
                DataNode dn   = cluster.GetDataNodes()[0];
                IEnumerator <ReplicaInfo> replicasItor = Dataset(dn).volumeMap.Replicas(bpid).GetEnumerator
                                                             ();
                ReplicaInfo replica = replicasItor.Next();
                CreateUnlinkTmpFile(replica, true, true);
                // rename block file
                CreateUnlinkTmpFile(replica, false, true);
                // rename meta file
                replica = replicasItor.Next();
                CreateUnlinkTmpFile(replica, true, false);
                // copy block file
                CreateUnlinkTmpFile(replica, false, false);
                // copy meta file
                replica = replicasItor.Next();
                CreateUnlinkTmpFile(replica, true, true);
                // rename block file
                CreateUnlinkTmpFile(replica, false, false);
                // copy meta file
                cluster.RestartDataNodes();
                cluster.WaitActive();
                dn = cluster.GetDataNodes()[0];
                // check volumeMap: 4 finalized replica
                ICollection <ReplicaInfo> replicas = Dataset(dn).volumeMap.Replicas(bpid);
                NUnit.Framework.Assert.AreEqual(4, replicas.Count);
                replicasItor = replicas.GetEnumerator();
                while (replicasItor.HasNext())
                {
                    NUnit.Framework.Assert.AreEqual(HdfsServerConstants.ReplicaState.Finalized, replicasItor
                                                    .Next().GetState());
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemplo n.º 10
0
        /*
         * Return a configuration object with low timeouts for testing and
         * a topology script set (which enables rack awareness).
         */
        private Configuration GetConf()
        {
            Configuration conf = new HdfsConfiguration();

            // Lower the heart beat interval so the NN quickly learns of dead
            // or decommissioned DNs and the NN issues replication and invalidation
            // commands quickly (as replies to heartbeats)
            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1L);
            // Have the NN ReplicationMonitor compute the replication and
            // invalidation commands to send DNs every second.
            conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, 1);
            // Have the NN check for pending replications every second so it
            // quickly schedules additional replicas as they are identified.
            conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, 1);
            // The DNs report blocks every second.
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 1000L);
            // Indicates we have multiple racks
            conf.Set(DFSConfigKeys.NetTopologyScriptFileNameKey, "xyz");
            return(conf);
        }
Exemplo n.º 11
0
        public static void BeforeClassSetup()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetBoolean(DFSConfigKeys.DfsWebhdfsEnabledKey, true);
            conf.Set(FsPermission.UmaskLabel, "000");
            conf.SetInt(DFSConfigKeys.DfsNamenodeMaxComponentLengthKey, 0);
            cluster = new MiniDFSCluster.Builder(conf).Build();
            webhdfs = WebHdfsTestUtil.GetWebHdfsFileSystem(conf, WebHdfsFileSystem.Scheme);
            dfs     = cluster.GetFileSystem();
        }
        public virtual void TestSingleRequiredFailedEditsDirOnSetReadyToFlush()
        {
            // Set one of the edits dirs to be required.
            string[] editsDirs = cluster.GetConfiguration(0).GetTrimmedStrings(DFSConfigKeys.
                                                                               DfsNamenodeNameDirKey);
            ShutDownMiniCluster();
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsNamenodeEditsDirRequiredKey, editsDirs[0]);
            conf.SetInt(DFSConfigKeys.DfsNamenodeEditsDirMinimumKey, 0);
            conf.SetInt(DFSConfigKeys.DfsNamenodeCheckedVolumesMinimumKey, 0);
            SetUpMiniCluster(conf, true);
            NUnit.Framework.Assert.IsTrue(DoAnEdit());
            // Invalidated the one required edits journal.
            InvalidateEditsDirAtIndex(0, false, false);
            JournalSet.JournalAndStream nonRequiredJas = GetJournalAndStream(1);
            EditLogFileOutputStream     nonRequiredSpy = SpyOnStream(nonRequiredJas);

            // The NN has not terminated (no ExitException thrown)
            // ..and that the other stream is active.
            NUnit.Framework.Assert.IsTrue(nonRequiredJas.IsActive());
            try
            {
                DoAnEdit();
                NUnit.Framework.Assert.Fail("A single failure of a required journal should have halted the NN"
                                            );
            }
            catch (RemoteException re)
            {
                NUnit.Framework.Assert.IsTrue(re.GetClassName().Contains("ExitException"));
                GenericTestUtils.AssertExceptionContains("setReadyToFlush failed for required journal"
                                                         , re);
            }
            // Since the required directory failed setReadyToFlush, and that
            // directory was listed prior to the non-required directory,
            // we should not call setReadyToFlush on the non-required
            // directory. Regression test for HDFS-2874.
            Org.Mockito.Mockito.Verify(nonRequiredSpy, Org.Mockito.Mockito.Never()).SetReadyToFlush
                ();
            NUnit.Framework.Assert.IsFalse(nonRequiredJas.IsActive());
        }
Exemplo n.º 13
0
        public virtual void TestReplicationAdjusted()
        {
            // start a cluster
            Configuration conf = new HdfsConfiguration();

            // Replicate and heartbeat fast to shave a few seconds off test
            conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, 1);
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
                cluster.WaitActive();
                FileSystem fs = cluster.GetFileSystem();
                // Create a file with replication count 1
                Path p = new Path("/testfile");
                DFSTestUtil.CreateFile(fs, p, 10, (short)1, 1);
                /*repl*/
                DFSTestUtil.WaitReplication(fs, p, (short)1);
                // Shut down and restart cluster with new minimum replication of 2
                cluster.Shutdown();
                cluster = null;
                conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationMinKey, 2);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Format(false).Build();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                // The file should get adjusted to replication 2 when
                // the edit log is replayed.
                DFSTestUtil.WaitReplication(fs, p, (short)2);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Exemplo n.º 14
0
        public static void Init()
        {
            baseDir = new FilePath(Runtime.GetProperty("test.build.dir", "target/test-dir"),
                                   typeof(TestSecureNNWithQJM).Name);
            FileUtil.FullyDelete(baseDir);
            NUnit.Framework.Assert.IsTrue(baseDir.Mkdirs());
            Properties kdcConf = MiniKdc.CreateConf();

            kdc = new MiniKdc(kdcConf, baseDir);
            kdc.Start();
            baseConf = new HdfsConfiguration();
            SecurityUtil.SetAuthenticationMethod(UserGroupInformation.AuthenticationMethod.Kerberos
                                                 , baseConf);
            UserGroupInformation.SetConfiguration(baseConf);
            NUnit.Framework.Assert.IsTrue("Expected configuration to enable security", UserGroupInformation
                                          .IsSecurityEnabled());
            string   userName   = UserGroupInformation.GetLoginUser().GetShortUserName();
            FilePath keytabFile = new FilePath(baseDir, userName + ".keytab");
            string   keytab     = keytabFile.GetAbsolutePath();
            // Windows will not reverse name lookup "127.0.0.1" to "localhost".
            string krbInstance = Path.Windows ? "127.0.0.1" : "localhost";

            kdc.CreatePrincipal(keytabFile, userName + "/" + krbInstance, "HTTP/" + krbInstance
                                );
            string hdfsPrincipal   = userName + "/" + krbInstance + "@" + kdc.GetRealm();
            string spnegoPrincipal = "HTTP/" + krbInstance + "@" + kdc.GetRealm();

            baseConf.Set(DFSConfigKeys.DfsNamenodeKerberosPrincipalKey, hdfsPrincipal);
            baseConf.Set(DFSConfigKeys.DfsNamenodeKeytabFileKey, keytab);
            baseConf.Set(DFSConfigKeys.DfsDatanodeKerberosPrincipalKey, hdfsPrincipal);
            baseConf.Set(DFSConfigKeys.DfsDatanodeKeytabFileKey, keytab);
            baseConf.Set(DFSConfigKeys.DfsWebAuthenticationKerberosPrincipalKey, spnegoPrincipal
                         );
            baseConf.Set(DFSConfigKeys.DfsJournalnodeKeytabFileKey, keytab);
            baseConf.Set(DFSConfigKeys.DfsJournalnodeKerberosPrincipalKey, hdfsPrincipal);
            baseConf.Set(DFSConfigKeys.DfsJournalnodeKerberosInternalSpnegoPrincipalKey, spnegoPrincipal
                         );
            baseConf.SetBoolean(DFSConfigKeys.DfsBlockAccessTokenEnableKey, true);
            baseConf.Set(DFSConfigKeys.DfsDataTransferProtectionKey, "authentication");
            baseConf.Set(DFSConfigKeys.DfsHttpPolicyKey, HttpConfig.Policy.HttpsOnly.ToString
                             ());
            baseConf.Set(DFSConfigKeys.DfsNamenodeHttpsAddressKey, "localhost:0");
            baseConf.Set(DFSConfigKeys.DfsDatanodeHttpsAddressKey, "localhost:0");
            baseConf.Set(DFSConfigKeys.DfsJournalnodeHttpsAddressKey, "localhost:0");
            baseConf.SetInt(CommonConfigurationKeys.IpcClientConnectMaxRetriesOnSaslKey, 10);
            string keystoresDir = baseDir.GetAbsolutePath();
            string sslConfDir   = KeyStoreTestUtil.GetClasspathDir(typeof(TestSecureNNWithQJM));

            KeyStoreTestUtil.SetupSSLConfig(keystoresDir, sslConfDir, baseConf, false);
        }
Exemplo n.º 15
0
        public static void SetUp()
        {
            // start a cluster
            Configuration conf = new HdfsConfiguration();

            // High value of replication interval
            // so that blocks remain under-replicated
            conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, 1000);
            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1L);
            conf.SetLong(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1L);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Build();
            cluster.WaitActive();
            fileSys = cluster.GetFileSystem();
            nnRpc   = cluster.GetNameNodeRpc();
        }
Exemplo n.º 16
0
        public virtual void Setup()
        {
            StaticMapping.ResetMap();
            Configuration conf = new HdfsConfiguration();

            string[] racks = new string[] { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
            string[] hosts = new string[] { "/host0", "/host1", "/host2", "/host3", "/host4" };
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockSize);
            conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, DefaultBlockSize / 2);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(5).Racks(racks).Hosts(hosts
                                                                                          ).Build();
            cluster.WaitActive();
            nameNodeRpc = cluster.GetNameNodeRpc();
            namesystem  = cluster.GetNamesystem();
            perm        = new PermissionStatus("TestDefaultBlockPlacementPolicy", null, FsPermission
                                               .GetDefault());
        }
        public static HdfsConfiguration InitZeroCopyTest()
        {
            Assume.AssumeTrue(NativeIO.IsAvailable());
            Assume.AssumeTrue(SystemUtils.IsOsUnix);
            HdfsConfiguration conf = new HdfsConfiguration();

            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true);
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
            conf.SetInt(DFSConfigKeys.DfsClientMmapCacheSize, 3);
            conf.SetLong(DFSConfigKeys.DfsClientMmapCacheTimeoutMs, 100);
            conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, new FilePath(sockDir.GetDir(), "TestRequestMmapAccess._PORT.sock"
                                                                        ).GetAbsolutePath());
            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, true);
            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            conf.SetLong(DFSConfigKeys.DfsCachereportIntervalMsecKey, 1000);
            conf.SetLong(DFSConfigKeys.DfsNamenodePathBasedCacheRefreshIntervalMs, 1000);
            return(conf);
        }
        public virtual void TestMultipleRedundantFailedEditsDirOnSetReadyToFlush()
        {
            // Set up 4 name/edits dirs.
            ShutDownMiniCluster();
            Configuration conf = new HdfsConfiguration();

            string[] nameDirs = new string[4];
            for (int i = 0; i < nameDirs.Length; i++)
            {
                FilePath nameDir = new FilePath(PathUtils.GetTestDir(GetType()), "name-dir" + i);
                nameDir.Mkdirs();
                nameDirs[i] = nameDir.GetAbsolutePath();
            }
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, StringUtils.Join(nameDirs, ","));
            // Keep running unless there are less than 2 edits dirs remaining.
            conf.SetInt(DFSConfigKeys.DfsNamenodeEditsDirMinimumKey, 2);
            SetUpMiniCluster(conf, false);
            // All journals active.
            NUnit.Framework.Assert.IsTrue(DoAnEdit());
            // The NN has not terminated (no ExitException thrown)
            // Invalidate 1/4 of the redundant journals.
            InvalidateEditsDirAtIndex(0, false, false);
            NUnit.Framework.Assert.IsTrue(DoAnEdit());
            // The NN has not terminated (no ExitException thrown)
            // Invalidate 2/4 of the redundant journals.
            InvalidateEditsDirAtIndex(1, false, false);
            NUnit.Framework.Assert.IsTrue(DoAnEdit());
            // The NN has not terminated (no ExitException thrown)
            // Invalidate 3/4 of the redundant journals.
            InvalidateEditsDirAtIndex(2, false, false);
            try
            {
                DoAnEdit();
                NUnit.Framework.Assert.Fail("A failure of more than the minimum number of redundant journals "
                                            + "should have halted ");
            }
            catch (RemoteException re)
            {
                NUnit.Framework.Assert.IsTrue(re.GetClassName().Contains("ExitException"));
                GenericTestUtils.AssertExceptionContains("Could not sync enough journals to persistent storage due to "
                                                         + "setReadyToFlush failed for too many journals. " + "Unsynced transactions: 1"
                                                         , re);
            }
        }
Exemplo n.º 19
0
        public virtual void TestTailer()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
            HAUtil.SetAllowStandbyReads(conf, true);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology
                                                                                 .SimpleHATopology()).NumDataNodes(0).Build();

            cluster.WaitActive();
            cluster.TransitionToActive(0);
            NameNode nn1 = cluster.GetNameNode(0);
            NameNode nn2 = cluster.GetNameNode(1);

            try
            {
                for (int i = 0; i < DirsToMake / 2; i++)
                {
                    NameNodeAdapter.Mkdirs(nn1, GetDirPath(i), new PermissionStatus("test", "test", new
                                                                                    FsPermission((short)0x1ed)), true);
                }
                HATestUtil.WaitForStandbyToCatchUp(nn1, nn2);
                for (int i_1 = 0; i_1 < DirsToMake / 2; i_1++)
                {
                    NUnit.Framework.Assert.IsTrue(NameNodeAdapter.GetFileInfo(nn2, GetDirPath(i_1), false
                                                                              ).IsDir());
                }
                for (int i_2 = DirsToMake / 2; i_2 < DirsToMake; i_2++)
                {
                    NameNodeAdapter.Mkdirs(nn1, GetDirPath(i_2), new PermissionStatus("test", "test",
                                                                                      new FsPermission((short)0x1ed)), true);
                }
                HATestUtil.WaitForStandbyToCatchUp(nn1, nn2);
                for (int i_3 = DirsToMake / 2; i_3 < DirsToMake; i_3++)
                {
                    NUnit.Framework.Assert.IsTrue(NameNodeAdapter.GetFileInfo(nn2, GetDirPath(i_3), false
                                                                              ).IsDir());
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemplo n.º 20
0
        // test rbw replicas persist across DataNode restarts
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestRbwReplicas()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1024L);
            conf.SetInt(DFSConfigKeys.DfsClientWritePacketSizeKey, 512);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();

            cluster.WaitActive();
            try
            {
                TestRbwReplicas(cluster, false);
                TestRbwReplicas(cluster, true);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemplo n.º 21
0
        public virtual void TestTriggerBlockIdCollision()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            try
            {
                cluster.WaitActive();
                FileSystem   fs         = cluster.GetFileSystem();
                FSNamesystem fsn        = cluster.GetNamesystem();
                int          blockCount = 10;
                // Create a file with a few blocks to rev up the global block ID
                // counter.
                Path path1 = new Path("testBlockIdCollisionDetection_file1.dat");
                DFSTestUtil.CreateFile(fs, path1, IoSize, BlockSize * blockCount, BlockSize, Replication
                                       , Seed);
                IList <LocatedBlock> blocks1 = DFSTestUtil.GetAllBlocks(fs, path1);
                // Rewind the block ID counter in the name system object. This will result
                // in block ID collisions when we try to allocate new blocks.
                SequentialBlockIdGenerator blockIdGenerator = fsn.GetBlockIdManager().GetBlockIdGenerator
                                                                  ();
                blockIdGenerator.SetCurrentValue(blockIdGenerator.GetCurrentValue() - 5);
                // Trigger collisions by creating a new file.
                Path path2 = new Path("testBlockIdCollisionDetection_file2.dat");
                DFSTestUtil.CreateFile(fs, path2, IoSize, BlockSize * blockCount, BlockSize, Replication
                                       , Seed);
                IList <LocatedBlock> blocks2 = DFSTestUtil.GetAllBlocks(fs, path2);
                Assert.AssertThat(blocks2.Count, CoreMatchers.Is(blockCount));
                // Make sure that file2 block IDs start immediately after file1
                Assert.AssertThat(blocks2[0].GetBlock().GetBlockId(), CoreMatchers.Is(blocks1[9].
                                                                                      GetBlock().GetBlockId() + 1));
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        public virtual void TestStartNNWithTrashEmptier()
        {
            MiniDFSCluster cluster = null;
            Configuration  conf    = new HdfsConfiguration();

            // enable both trash emptier and dropping response
            conf.SetLong("fs.trash.interval", 360);
            conf.SetInt(DFSConfigKeys.DfsClientTestDropNamenodeResponseNumKey, 2);
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology
                                                                          ()).NumDataNodes(0).Build();
                cluster.WaitActive();
                cluster.TransitionToActive(0);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Exemplo n.º 23
0
        /// <summary>Creates configuration for starting a secure cluster.</summary>
        /// <param name="dataTransferProtection">supported QOPs</param>
        /// <returns>configuration for starting a secure cluster</returns>
        /// <exception cref="System.Exception">if there is any failure</exception>
        protected internal virtual HdfsConfiguration CreateSecureConfig(string dataTransferProtection
                                                                        )
        {
            HdfsConfiguration conf = new HdfsConfiguration();

            SecurityUtil.SetAuthenticationMethod(UserGroupInformation.AuthenticationMethod.Kerberos
                                                 , conf);
            conf.Set(DFSConfigKeys.DfsNamenodeKerberosPrincipalKey, hdfsPrincipal);
            conf.Set(DFSConfigKeys.DfsNamenodeKeytabFileKey, keytab);
            conf.Set(DFSConfigKeys.DfsDatanodeKerberosPrincipalKey, hdfsPrincipal);
            conf.Set(DFSConfigKeys.DfsDatanodeKeytabFileKey, keytab);
            conf.Set(DFSConfigKeys.DfsWebAuthenticationKerberosPrincipalKey, spnegoPrincipal);
            conf.SetBoolean(DFSConfigKeys.DfsBlockAccessTokenEnableKey, true);
            conf.Set(DFSConfigKeys.DfsDataTransferProtectionKey, dataTransferProtection);
            conf.Set(DFSConfigKeys.DfsHttpPolicyKey, HttpConfig.Policy.HttpsOnly.ToString());
            conf.Set(DFSConfigKeys.DfsNamenodeHttpsAddressKey, "localhost:0");
            conf.Set(DFSConfigKeys.DfsDatanodeHttpsAddressKey, "localhost:0");
            conf.SetInt(CommonConfigurationKeys.IpcClientConnectMaxRetriesOnSaslKey, 10);
            string keystoresDir = baseDir.GetAbsolutePath();
            string sslConfDir   = KeyStoreTestUtil.GetClasspathDir(this.GetType());

            KeyStoreTestUtil.SetupSSLConfig(keystoresDir, sslConfDir, conf, false);
            return(conf);
        }
Exemplo n.º 24
0
        public virtual void TestChangedStorageId()
        {
            HdfsConfiguration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).NnTopology
                                         (MiniDFSNNTopology.SimpleHATopology()).Build();

            try
            {
                cluster.TransitionToActive(0);
                FileSystem   fs   = HATestUtil.ConfigureFailoverFs(cluster, conf);
                OutputStream @out = fs.Create(filePath);
                @out.Write(Sharpen.Runtime.GetBytesForString("foo bar baz"));
                @out.Close();
                HATestUtil.WaitForStandbyToCatchUp(cluster.GetNameNode(0), cluster.GetNameNode(1)
                                                   );
                // Change the gen stamp of the block on datanode to go back in time (gen
                // stamps start at 1000)
                ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, filePath);
                NUnit.Framework.Assert.IsTrue(cluster.ChangeGenStampOfBlock(0, block, 900));
                // Stop the DN so the replica with the changed gen stamp will be reported
                // when this DN starts up.
                MiniDFSCluster.DataNodeProperties dnProps = cluster.StopDataNode(0);
                // Restart the namenode so that when the DN comes up it will see an initial
                // block report.
                cluster.RestartNameNode(1, false);
                NUnit.Framework.Assert.IsTrue(cluster.RestartDataNode(dnProps, true));
                // Wait until the standby NN queues up the corrupt block in the pending DN
                // message queue.
                while (cluster.GetNamesystem(1).GetBlockManager().GetPendingDataNodeMessageCount(
                           ) < 1)
                {
                    ThreadUtil.SleepAtLeastIgnoreInterrupts(1000);
                }
                NUnit.Framework.Assert.AreEqual(1, cluster.GetNamesystem(1).GetBlockManager().GetPendingDataNodeMessageCount
                                                    ());
                string oldStorageId = GetRegisteredDatanodeUid(cluster, 1);
                // Reformat/restart the DN.
                NUnit.Framework.Assert.IsTrue(WipeAndRestartDn(cluster, 0));
                // Give the DN time to start up and register, which will cause the
                // DatanodeManager to dissociate the old storage ID from the DN xfer addr.
                string newStorageId = string.Empty;
                do
                {
                    ThreadUtil.SleepAtLeastIgnoreInterrupts(1000);
                    newStorageId = GetRegisteredDatanodeUid(cluster, 1);
                    System.Console.Out.WriteLine("====> oldStorageId: " + oldStorageId + " newStorageId: "
                                                 + newStorageId);
                }while (newStorageId.Equals(oldStorageId));
                NUnit.Framework.Assert.AreEqual(0, cluster.GetNamesystem(1).GetBlockManager().GetPendingDataNodeMessageCount
                                                    ());
                // Now try to fail over.
                cluster.TransitionToStandby(0);
                cluster.TransitionToActive(1);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        /// <summary>Check that listCorruptFileBlocks works while the namenode is still in safemode.
        ///     </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestListCorruptFileBlocksInSafeMode()
        {
            MiniDFSCluster cluster = null;
            Random         random  = new Random();

            try
            {
                Configuration conf = new HdfsConfiguration();
                // datanode scans directories
                conf.SetInt(DFSConfigKeys.DfsDatanodeDirectoryscanIntervalKey, 1);
                // datanode sends block reports
                conf.SetInt(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 3 * 1000);
                // never leave safemode automatically
                conf.SetFloat(DFSConfigKeys.DfsNamenodeSafemodeThresholdPctKey, 1.5f);
                // start populating repl queues immediately
                conf.SetFloat(DFSConfigKeys.DfsNamenodeReplQueueThresholdPctKey, 0f);
                // Set short retry timeouts so this test runs faster
                conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 10);
                cluster = new MiniDFSCluster.Builder(conf).WaitSafeMode(false).Build();
                cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave,
                                                     false);
                FileSystem fs = cluster.GetFileSystem();
                // create two files with one block each
                DFSTestUtil util = new DFSTestUtil.Builder().SetName("testListCorruptFileBlocksInSafeMode"
                                                                     ).SetNumFiles(2).SetMaxLevels(1).SetMaxSize(512).Build();
                util.CreateFiles(fs, "/srcdat10");
                // fetch bad file list from namenode. There should be none.
                ICollection <FSNamesystem.CorruptFileBlockInfo> badFiles = cluster.GetNameNode().GetNamesystem
                                                                               ().ListCorruptFileBlocks("/", null);
                NUnit.Framework.Assert.IsTrue("Namenode has " + badFiles.Count + " corrupt files. Expecting None."
                                              , badFiles.Count == 0);
                // Now deliberately corrupt one block
                FilePath storageDir = cluster.GetInstanceStorageDir(0, 0);
                FilePath data_dir   = MiniDFSCluster.GetFinalizedDir(storageDir, cluster.GetNamesystem
                                                                         ().GetBlockPoolId());
                NUnit.Framework.Assert.IsTrue("data directory does not exist", data_dir.Exists());
                IList <FilePath> metaFiles = MiniDFSCluster.GetAllBlockMetadataFiles(data_dir);
                NUnit.Framework.Assert.IsTrue("Data directory does not contain any blocks or there was an "
                                              + "IO error", metaFiles != null && !metaFiles.IsEmpty());
                FilePath         metaFile = metaFiles[0];
                RandomAccessFile file     = new RandomAccessFile(metaFile, "rw");
                FileChannel      channel  = file.GetChannel();
                long             position = channel.Size() - 2;
                int    length             = 2;
                byte[] buffer             = new byte[length];
                random.NextBytes(buffer);
                channel.Write(ByteBuffer.Wrap(buffer), position);
                file.Close();
                Log.Info("Deliberately corrupting file " + metaFile.GetName() + " at offset " + position
                         + " length " + length);
                // read all files to trigger detection of corrupted replica
                try
                {
                    util.CheckFiles(fs, "/srcdat10");
                }
                catch (BlockMissingException)
                {
                    System.Console.Out.WriteLine("Received BlockMissingException as expected.");
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException "
                                                  + " but received IOException " + e, false);
                }
                // fetch bad file list from namenode. There should be one file.
                badFiles = cluster.GetNameNode().GetNamesystem().ListCorruptFileBlocks("/", null);
                Log.Info("Namenode has bad files. " + badFiles.Count);
                NUnit.Framework.Assert.IsTrue("Namenode has " + badFiles.Count + " bad files. Expecting 1."
                                              , badFiles.Count == 1);
                // restart namenode
                cluster.RestartNameNode(0);
                fs = cluster.GetFileSystem();
                // wait until replication queues have been initialized
                while (!cluster.GetNameNode().namesystem.IsPopulatingReplQueues())
                {
                    try
                    {
                        Log.Info("waiting for replication queues");
                        Sharpen.Thread.Sleep(1000);
                    }
                    catch (Exception)
                    {
                    }
                }
                // read all files to trigger detection of corrupted replica
                try
                {
                    util.CheckFiles(fs, "/srcdat10");
                }
                catch (BlockMissingException)
                {
                    System.Console.Out.WriteLine("Received BlockMissingException as expected.");
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException "
                                                  + " but received IOException " + e, false);
                }
                // fetch bad file list from namenode. There should be one file.
                badFiles = cluster.GetNameNode().GetNamesystem().ListCorruptFileBlocks("/", null);
                Log.Info("Namenode has bad files. " + badFiles.Count);
                NUnit.Framework.Assert.IsTrue("Namenode has " + badFiles.Count + " bad files. Expecting 1."
                                              , badFiles.Count == 1);
                // check that we are still in safe mode
                NUnit.Framework.Assert.IsTrue("Namenode is not in safe mode", cluster.GetNameNode
                                                  ().IsInSafeMode());
                // now leave safe mode so that we can clean up
                cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave,
                                                     false);
                util.Cleanup(fs, "/srcdat10");
            }
            catch (Exception e)
            {
                Log.Error(StringUtils.StringifyException(e));
                throw;
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        /// <summary>check if nn.getCorruptFiles() returns a file that has corrupted blocks</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestListCorruptFilesCorruptedBlock()
        {
            MiniDFSCluster cluster = null;
            Random         random  = new Random();

            try
            {
                Configuration conf = new HdfsConfiguration();
                conf.SetInt(DFSConfigKeys.DfsDatanodeDirectoryscanIntervalKey, 1);
                // datanode scans directories
                conf.SetInt(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 3 * 1000);
                // datanode sends block reports
                // Set short retry timeouts so this test runs faster
                conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 10);
                cluster = new MiniDFSCluster.Builder(conf).Build();
                FileSystem fs = cluster.GetFileSystem();
                // create two files with one block each
                DFSTestUtil util = new DFSTestUtil.Builder().SetName("testCorruptFilesCorruptedBlock"
                                                                     ).SetNumFiles(2).SetMaxLevels(1).SetMaxSize(512).Build();
                util.CreateFiles(fs, "/srcdat10");
                // fetch bad file list from namenode. There should be none.
                NameNode namenode = cluster.GetNameNode();
                ICollection <FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.GetNamesystem(
                    ).ListCorruptFileBlocks("/", null);
                NUnit.Framework.Assert.IsTrue("Namenode has " + badFiles.Count + " corrupt files. Expecting None."
                                              , badFiles.Count == 0);
                // Now deliberately corrupt one block
                string   bpid       = cluster.GetNamesystem().GetBlockPoolId();
                FilePath storageDir = cluster.GetInstanceStorageDir(0, 1);
                FilePath data_dir   = MiniDFSCluster.GetFinalizedDir(storageDir, bpid);
                NUnit.Framework.Assert.IsTrue("data directory does not exist", data_dir.Exists());
                IList <FilePath> metaFiles = MiniDFSCluster.GetAllBlockMetadataFiles(data_dir);
                NUnit.Framework.Assert.IsTrue("Data directory does not contain any blocks or there was an "
                                              + "IO error", metaFiles != null && !metaFiles.IsEmpty());
                FilePath         metaFile = metaFiles[0];
                RandomAccessFile file     = new RandomAccessFile(metaFile, "rw");
                FileChannel      channel  = file.GetChannel();
                long             position = channel.Size() - 2;
                int    length             = 2;
                byte[] buffer             = new byte[length];
                random.NextBytes(buffer);
                channel.Write(ByteBuffer.Wrap(buffer), position);
                file.Close();
                Log.Info("Deliberately corrupting file " + metaFile.GetName() + " at offset " + position
                         + " length " + length);
                // read all files to trigger detection of corrupted replica
                try
                {
                    util.CheckFiles(fs, "/srcdat10");
                }
                catch (BlockMissingException)
                {
                    System.Console.Out.WriteLine("Received BlockMissingException as expected.");
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue("Corrupted replicas not handled properly. Expecting BlockMissingException "
                                                  + " but received IOException " + e, false);
                }
                // fetch bad file list from namenode. There should be one file.
                badFiles = namenode.GetNamesystem().ListCorruptFileBlocks("/", null);
                Log.Info("Namenode has bad files. " + badFiles.Count);
                NUnit.Framework.Assert.IsTrue("Namenode has " + badFiles.Count + " bad files. Expecting 1."
                                              , badFiles.Count == 1);
                util.Cleanup(fs, "/srcdat10");
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Exemplo n.º 27
0
        /// <summary>
        /// Regression test for HDFS-7960.<p/>
        /// Shutting down a datanode, removing a storage directory, and restarting
        /// the DataNode should not produce zombie storages.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestRemovingStorageDoesNotProduceZombies()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsDatanodeFailedVolumesToleratedKey, 1);
            int            NumStoragesPerDn = 2;
            MiniDFSCluster cluster          = new MiniDFSCluster.Builder(conf).NumDataNodes(3).StoragesPerDatanode
                                                  (NumStoragesPerDn).Build();

            try
            {
                cluster.WaitActive();
                foreach (DataNode dn in cluster.GetDataNodes())
                {
                    NUnit.Framework.Assert.AreEqual(NumStoragesPerDn, cluster.GetNamesystem().GetBlockManager
                                                        ().GetDatanodeManager().GetDatanode(dn.GetDatanodeId()).GetStorageInfos().Length
                                                    );
                }
                // Create a file which will end up on all 3 datanodes.
                Path TestPath            = new Path("/foo1");
                DistributedFileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, TestPath, 1024, (short)3, unchecked ((int)(0xcafecafe))
                                       );
                foreach (DataNode dn_1 in cluster.GetDataNodes())
                {
                    DataNodeTestUtils.TriggerBlockReport(dn_1);
                }
                ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, new Path("/foo1"));
                cluster.GetNamesystem().WriteLock();
                string storageIdToRemove;
                string datanodeUuid;
                // Find the first storage which this block is in.
                try
                {
                    IEnumerator <DatanodeStorageInfo> storageInfoIter = cluster.GetNamesystem().GetBlockManager
                                                                            ().GetStorages(block.GetLocalBlock()).GetEnumerator();
                    NUnit.Framework.Assert.IsTrue(storageInfoIter.HasNext());
                    DatanodeStorageInfo info = storageInfoIter.Next();
                    storageIdToRemove = info.GetStorageID();
                    datanodeUuid      = info.GetDatanodeDescriptor().GetDatanodeUuid();
                }
                finally
                {
                    cluster.GetNamesystem().WriteUnlock();
                }
                // Find the DataNode which holds that first storage.
                DataNode datanodeToRemoveStorageFrom;
                int      datanodeToRemoveStorageFromIdx = 0;
                while (true)
                {
                    if (datanodeToRemoveStorageFromIdx >= cluster.GetDataNodes().Count)
                    {
                        NUnit.Framework.Assert.Fail("failed to find datanode with uuid " + datanodeUuid);
                        datanodeToRemoveStorageFrom = null;
                        break;
                    }
                    DataNode dn_2 = cluster.GetDataNodes()[datanodeToRemoveStorageFromIdx];
                    if (dn_2.GetDatanodeUuid().Equals(datanodeUuid))
                    {
                        datanodeToRemoveStorageFrom = dn_2;
                        break;
                    }
                    datanodeToRemoveStorageFromIdx++;
                }
                // Find the volume within the datanode which holds that first storage.
                IList <FsVolumeSpi> volumes = datanodeToRemoveStorageFrom.GetFSDataset().GetVolumes
                                                  ();
                NUnit.Framework.Assert.AreEqual(NumStoragesPerDn, volumes.Count);
                string volumeDirectoryToRemove = null;
                foreach (FsVolumeSpi volume in volumes)
                {
                    if (volume.GetStorageID().Equals(storageIdToRemove))
                    {
                        volumeDirectoryToRemove = volume.GetBasePath();
                    }
                }
                // Shut down the datanode and remove the volume.
                // Replace the volume directory with a regular file, which will
                // cause a volume failure.  (If we merely removed the directory,
                // it would be re-initialized with a new storage ID.)
                NUnit.Framework.Assert.IsNotNull(volumeDirectoryToRemove);
                datanodeToRemoveStorageFrom.Shutdown();
                FileUtil.FullyDelete(new FilePath(volumeDirectoryToRemove));
                FileOutputStream fos = new FileOutputStream(volumeDirectoryToRemove);
                try
                {
                    fos.Write(1);
                }
                finally
                {
                    fos.Close();
                }
                cluster.RestartDataNode(datanodeToRemoveStorageFromIdx);
                // Wait for the NameNode to remove the storage.
                Log.Info("waiting for the datanode to remove " + storageIdToRemove);
                GenericTestUtils.WaitFor(new _Supplier_227(cluster, datanodeToRemoveStorageFrom,
                                                           storageIdToRemove, NumStoragesPerDn), 10, 30000);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Exemplo n.º 28
0
        public virtual void TestPendingAndInvalidate()
        {
            Configuration Conf = new HdfsConfiguration();

            Conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1024);
            Conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, DfsReplicationInterval);
            Conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, DfsReplicationInterval
                        );
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(Conf).NumDataNodes(DatanodeCount
                                                                                   ).Build();

            cluster.WaitActive();
            FSNamesystem          namesystem = cluster.GetNamesystem();
            BlockManager          bm         = namesystem.GetBlockManager();
            DistributedFileSystem fs         = cluster.GetFileSystem();

            try
            {
                // 1. create a file
                Path filePath = new Path("/tmp.txt");
                DFSTestUtil.CreateFile(fs, filePath, 1024, (short)3, 0L);
                // 2. disable the heartbeats
                foreach (DataNode dn in cluster.GetDataNodes())
                {
                    DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, true);
                }
                // 3. mark a couple of blocks as corrupt
                LocatedBlock block = NameNodeAdapter.GetBlockLocations(cluster.GetNameNode(), filePath
                                                                       .ToString(), 0, 1).Get(0);
                cluster.GetNamesystem().WriteLock();
                try
                {
                    bm.FindAndMarkBlockAsCorrupt(block.GetBlock(), block.GetLocations()[0], "STORAGE_ID"
                                                 , "TEST");
                    bm.FindAndMarkBlockAsCorrupt(block.GetBlock(), block.GetLocations()[1], "STORAGE_ID"
                                                 , "TEST");
                }
                finally
                {
                    cluster.GetNamesystem().WriteUnlock();
                }
                BlockManagerTestUtil.ComputeAllPendingWork(bm);
                BlockManagerTestUtil.UpdateState(bm);
                NUnit.Framework.Assert.AreEqual(bm.GetPendingReplicationBlocksCount(), 1L);
                NUnit.Framework.Assert.AreEqual(bm.pendingReplications.GetNumReplicas(block.GetBlock
                                                                                          ().GetLocalBlock()), 2);
                // 4. delete the file
                fs.Delete(filePath, true);
                // retry at most 10 times, each time sleep for 1s. Note that 10s is much
                // less than the default pending record timeout (5~10min)
                int  retries    = 10;
                long pendingNum = bm.GetPendingReplicationBlocksCount();
                while (pendingNum != 0 && retries-- > 0)
                {
                    Sharpen.Thread.Sleep(1000);
                    // let NN do the deletion
                    BlockManagerTestUtil.UpdateState(bm);
                    pendingNum = bm.GetPendingReplicationBlocksCount();
                }
                NUnit.Framework.Assert.AreEqual(pendingNum, 0L);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemplo n.º 29
0
        public virtual void TestFileLimit()
        {
            Configuration conf       = new HdfsConfiguration();
            int           maxObjects = 5;

            conf.SetLong(DFSConfigKeys.DfsNamenodeMaxObjectsKey, maxObjects);
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 1000L);
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            int currentNodes = 0;

            if (simulatedStorage)
            {
                SimulatedFSDataset.SetFactory(conf);
            }
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fs      = cluster.GetFileSystem();
            FSNamesystem   namesys = cluster.GetNamesystem();

            try
            {
                //
                // check that / exists
                //
                Path path = new Path("/");
                NUnit.Framework.Assert.IsTrue("/ should be a directory", fs.GetFileStatus(path).IsDirectory
                                                  ());
                currentNodes = 1;
                // root inode
                // verify that we can create the specified number of files. We leave
                // one for the "/". Each file takes an inode and a block.
                //
                for (int i = 0; i < maxObjects / 2; i++)
                {
                    Path file = new Path("/filestatus" + i);
                    CreateFile(fs, file);
                    System.Console.Out.WriteLine("Created file " + file);
                    currentNodes += 2;
                }
                // two more objects for this creation.
                // verify that creating another file fails
                bool hitException = false;
                try
                {
                    Path file = new Path("/filestatus");
                    CreateFile(fs, file);
                    System.Console.Out.WriteLine("Created file " + file);
                }
                catch (IOException)
                {
                    hitException = true;
                }
                NUnit.Framework.Assert.IsTrue("Was able to exceed file limit", hitException);
                // delete one file
                Path file0 = new Path("/filestatus0");
                fs.Delete(file0, true);
                System.Console.Out.WriteLine("Deleted file " + file0);
                currentNodes -= 2;
                // wait for number of blocks to decrease
                WaitForLimit(namesys, currentNodes);
                // now, we shud be able to create a new file
                CreateFile(fs, file0);
                System.Console.Out.WriteLine("Created file " + file0 + " again.");
                currentNodes += 2;
                // delete the file again
                file0 = new Path("/filestatus0");
                fs.Delete(file0, true);
                System.Console.Out.WriteLine("Deleted file " + file0 + " again.");
                currentNodes -= 2;
                // wait for number of blocks to decrease
                WaitForLimit(namesys, currentNodes);
                // create two directories in place of the file that we deleted
                Path dir = new Path("/dir0/dir1");
                fs.Mkdirs(dir);
                System.Console.Out.WriteLine("Created directories " + dir);
                currentNodes += 2;
                WaitForLimit(namesys, currentNodes);
                // verify that creating another directory fails
                hitException = false;
                try
                {
                    fs.Mkdirs(new Path("dir.fail"));
                    System.Console.Out.WriteLine("Created directory should not have succeeded.");
                }
                catch (IOException)
                {
                    hitException = true;
                }
                NUnit.Framework.Assert.IsTrue("Was able to exceed dir limit", hitException);
            }
            finally
            {
                fs.Close();
                cluster.Shutdown();
            }
        }
        /// <summary>Test if NN.listCorruptFiles() returns the right number of results.</summary>
        /// <remarks>
        /// Test if NN.listCorruptFiles() returns the right number of results.
        /// The corrupt blocks are detected by the BlockPoolSliceScanner.
        /// Also, test that DFS.listCorruptFileBlocks can make multiple successive
        /// calls.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestMaxCorruptFiles()
        {
            MiniDFSCluster cluster = null;

            try
            {
                Configuration conf = new HdfsConfiguration();
                conf.SetInt(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 3 * 1000);
                // datanode sends block reports
                cluster = new MiniDFSCluster.Builder(conf).Build();
                FileSystem fs = cluster.GetFileSystem();
                int        maxCorruptFileBlocks = FSNamesystem.DefaultMaxCorruptFileblocksReturned;
                // create 110 files with one block each
                DFSTestUtil util = new DFSTestUtil.Builder().SetName("testMaxCorruptFiles").SetNumFiles
                                       (maxCorruptFileBlocks * 3).SetMaxLevels(1).SetMaxSize(512).Build();
                util.CreateFiles(fs, "/srcdat2", (short)1);
                util.WaitReplication(fs, "/srcdat2", (short)1);
                // verify that there are no bad blocks.
                NameNode namenode = cluster.GetNameNode();
                ICollection <FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.GetNamesystem(
                    ).ListCorruptFileBlocks("/srcdat2", null);
                NUnit.Framework.Assert.IsTrue("Namenode has " + badFiles.Count + " corrupt files. Expecting none."
                                              , badFiles.Count == 0);
                // Now deliberately blocks from all files
                string bpid = cluster.GetNamesystem().GetBlockPoolId();
                for (int i = 0; i < 4; i++)
                {
                    for (int j = 0; j <= 1; j++)
                    {
                        FilePath storageDir = cluster.GetInstanceStorageDir(i, j);
                        FilePath data_dir   = MiniDFSCluster.GetFinalizedDir(storageDir, bpid);
                        Log.Info("Removing files from " + data_dir);
                        IList <FilePath> metadataFiles = MiniDFSCluster.GetAllBlockMetadataFiles(data_dir);
                        if (metadataFiles == null)
                        {
                            continue;
                        }
                        foreach (FilePath metadataFile in metadataFiles)
                        {
                            FilePath blockFile = Block.MetaToBlockFile(metadataFile);
                            NUnit.Framework.Assert.IsTrue("Cannot remove file.", blockFile.Delete());
                            NUnit.Framework.Assert.IsTrue("Cannot remove file.", metadataFile.Delete());
                        }
                    }
                }
                // Occasionally the BlockPoolSliceScanner can run before we have removed
                // the blocks. Restart the Datanode to trigger the scanner into running
                // once more.
                Log.Info("Restarting Datanode to trigger BlockPoolSliceScanner");
                cluster.RestartDataNodes();
                cluster.WaitActive();
                badFiles = namenode.GetNamesystem().ListCorruptFileBlocks("/srcdat2", null);
                while (badFiles.Count < maxCorruptFileBlocks)
                {
                    Log.Info("# of corrupt files is: " + badFiles.Count);
                    Sharpen.Thread.Sleep(10000);
                    badFiles = namenode.GetNamesystem().ListCorruptFileBlocks("/srcdat2", null);
                }
                badFiles = namenode.GetNamesystem().ListCorruptFileBlocks("/srcdat2", null);
                Log.Info("Namenode has bad files. " + badFiles.Count);
                NUnit.Framework.Assert.IsTrue("Namenode has " + badFiles.Count + " bad files. Expecting "
                                              + maxCorruptFileBlocks + ".", badFiles.Count == maxCorruptFileBlocks);
                CorruptFileBlockIterator iter = (CorruptFileBlockIterator)fs.ListCorruptFileBlocks
                                                    (new Path("/srcdat2"));
                int corruptPaths = CountPaths(iter);
                NUnit.Framework.Assert.IsTrue("Expected more than " + maxCorruptFileBlocks + " corrupt file blocks but got "
                                              + corruptPaths, corruptPaths > maxCorruptFileBlocks);
                NUnit.Framework.Assert.IsTrue("Iterator should have made more than 1 call but made "
                                              + iter.GetCallsMade(), iter.GetCallsMade() > 1);
                util.Cleanup(fs, "/srcdat2");
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }