示例#1
0
        /// <summary>Add volumes to the first DataNode.</summary>
        /// <exception cref="Org.Apache.Hadoop.Conf.ReconfigurationException"/>
        /// <exception cref="System.IO.IOException"/>
        private void AddVolumes(int numNewVolumes)
        {
            FilePath dataDir = new FilePath(cluster.GetDataDirectory());
            DataNode dn      = cluster.GetDataNodes()[0];
            // First DataNode.
            Configuration    conf          = dn.GetConf();
            string           oldDataDir    = conf.Get(DFSConfigKeys.DfsDatanodeDataDirKey);
            IList <FilePath> newVolumeDirs = new AList <FilePath>();
            StringBuilder    newDataDirBuf = new StringBuilder(oldDataDir);
            int startIdx = oldDataDir.Split(",").Length + 1;

            // Find the first available (non-taken) directory name for data volume.
            while (true)
            {
                FilePath volumeDir = new FilePath(dataDir, "data" + startIdx);
                if (!volumeDir.Exists())
                {
                    break;
                }
                startIdx++;
            }
            for (int i = startIdx; i < startIdx + numNewVolumes; i++)
            {
                FilePath volumeDir = new FilePath(dataDir, "data" + i.ToString());
                newVolumeDirs.AddItem(volumeDir);
                volumeDir.Mkdirs();
                newDataDirBuf.Append(",");
                newDataDirBuf.Append(StorageLocation.Parse(volumeDir.ToString()).ToString());
            }
            string newDataDir = newDataDirBuf.ToString();

            dn.ReconfigurePropertyImpl(DFSConfigKeys.DfsDatanodeDataDirKey, newDataDir);
            // Verify the configuration value is appropriately set.
            string[] effectiveDataDirs = conf.Get(DFSConfigKeys.DfsDatanodeDataDirKey).Split(
                ",");
            string[] expectDataDirs = newDataDir.Split(",");
            NUnit.Framework.Assert.AreEqual(expectDataDirs.Length, effectiveDataDirs.Length);
            for (int i_1 = 0; i_1 < expectDataDirs.Length; i_1++)
            {
                StorageLocation expectLocation    = StorageLocation.Parse(expectDataDirs[i_1]);
                StorageLocation effectiveLocation = StorageLocation.Parse(effectiveDataDirs[i_1]);
                NUnit.Framework.Assert.AreEqual(expectLocation.GetStorageType(), effectiveLocation
                                                .GetStorageType());
                NUnit.Framework.Assert.AreEqual(expectLocation.GetFile().GetCanonicalFile(), effectiveLocation
                                                .GetFile().GetCanonicalFile());
            }
            // Check that all newly created volumes are appropriately formatted.
            foreach (FilePath volumeDir_1 in newVolumeDirs)
            {
                FilePath curDir = new FilePath(volumeDir_1, "current");
                NUnit.Framework.Assert.IsTrue(curDir.Exists());
                NUnit.Framework.Assert.IsTrue(curDir.IsDirectory());
            }
        }
示例#2
0
        /// <summary>Initializes the cluster.</summary>
        /// <param name="numDataNodes">number of datanodes</param>
        /// <param name="storagesPerDatanode">number of storage locations on each datanode</param>
        /// <param name="failedVolumesTolerated">number of acceptable volume failures</param>
        /// <exception cref="System.Exception">if there is any failure</exception>
        private void InitCluster(int numDataNodes, int storagesPerDatanode, int failedVolumesTolerated
                                 )
        {
            conf = new HdfsConfiguration();
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 512L);

            /*
             * Lower the DN heartbeat, DF rate, and recheck interval to one second
             * so state about failures and datanode death propagates faster.
             */
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            conf.SetInt(DFSConfigKeys.DfsDfIntervalKey, 1000);
            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1000);
            conf.SetInt(DFSConfigKeys.DfsDatanodeFailedVolumesToleratedKey, failedVolumesTolerated
                        );
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).StoragesPerDatanode
                          (storagesPerDatanode).Build();
            cluster.WaitActive();
            fs      = cluster.GetFileSystem();
            dataDir = cluster.GetDataDirectory();
            long dnCapacity = DFSTestUtil.GetDatanodeCapacity(cluster.GetNamesystem().GetBlockManager
                                                                  ().GetDatanodeManager(), 0);

            volumeCapacity = dnCapacity / cluster.GetStoragesPerDatanode();
        }
示例#3
0
        /// <summary>Test that a full block report is sent after hot swapping volumes</summary>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="Org.Apache.Hadoop.Conf.ReconfigurationException"/>
        public virtual void TestFullBlockReportAfterRemovingVolumes()
        {
            Configuration conf = new Configuration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
            // Similar to TestTriggerBlockReport, set a really long value for
            // dfs.heartbeat.interval, so that incremental block reports and heartbeats
            // won't be sent during this test unless they're triggered
            // manually.
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 10800000L);
            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1080L);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            cluster.WaitActive();
            DataNode dn = cluster.GetDataNodes()[0];
            DatanodeProtocolClientSideTranslatorPB spy = DataNodeTestUtils.SpyOnBposToNN(dn,
                                                                                         cluster.GetNameNode());
            // Remove a data dir from datanode
            FilePath dataDirToKeep = new FilePath(cluster.GetDataDirectory(), "data1");

            dn.ReconfigurePropertyImpl(DFSConfigKeys.DfsDatanodeDataDirKey, dataDirToKeep.ToString
                                           ());
            // We should get 1 full report
            Org.Mockito.Mockito.Verify(spy, Org.Mockito.Mockito.Timeout(60000).Times(1)).BlockReport
                (Matchers.Any <DatanodeRegistration>(), Matchers.AnyString(), Matchers.Any <StorageBlockReport
                                                                                            []>(), Matchers.Any <BlockReportContext>());
        }
示例#4
0
        /// <summary>Bring up two clusters and assert that they are in different directories.
        ///     </summary>
        /// <exception cref="System.Exception">on a failure</exception>
        public virtual void TestDualClusters()
        {
            FilePath      testDataCluster2 = new FilePath(testDataPath, Cluster2);
            FilePath      testDataCluster3 = new FilePath(testDataPath, Cluster3);
            Configuration conf             = new HdfsConfiguration();
            string        c2Path           = testDataCluster2.GetAbsolutePath();

            conf.Set(MiniDFSCluster.HdfsMinidfsBasedir, c2Path);
            MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf).Build();
            MiniDFSCluster cluster3 = null;

            try
            {
                string dataDir2 = cluster2.GetDataDirectory();
                NUnit.Framework.Assert.AreEqual(new FilePath(c2Path + "/data"), new FilePath(dataDir2
                                                                                             ));
                //change the data dir
                conf.Set(MiniDFSCluster.HdfsMinidfsBasedir, testDataCluster3.GetAbsolutePath());
                MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
                cluster3 = builder.Build();
                string dataDir3 = cluster3.GetDataDirectory();
                NUnit.Framework.Assert.IsTrue("Clusters are bound to the same directory: " + dataDir2
                                              , !dataDir2.Equals(dataDir3));
            }
            finally
            {
                MiniDFSCluster.ShutdownCluster(cluster3);
                MiniDFSCluster.ShutdownCluster(cluster2);
            }
        }
示例#5
0
 public virtual void SetUp()
 {
     // bring up a cluster of 2
     conf = new HdfsConfiguration();
     conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, block_size);
     // Allow a single volume failure (there are two volumes)
     conf.SetInt(DFSConfigKeys.DfsDatanodeFailedVolumesToleratedKey, 1);
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(dn_num).Build();
     cluster.WaitActive();
     fs      = cluster.GetFileSystem();
     dataDir = new FilePath(cluster.GetDataDirectory());
 }
示例#6
0
        public virtual void TearDown()
        {
            if (cluster.IsClusterUp())
            {
                cluster.Shutdown();
            }
            FilePath data_dir = new FilePath(cluster.GetDataDirectory());

            if (data_dir.Exists() && !FileUtil.FullyDelete(data_dir))
            {
                throw new IOException("Could not delete hdfs directory in tearDown '" + data_dir
                                      + "'");
            }
        }
        public virtual void SetUp()
        {
            conf = new HdfsConfiguration();
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 512L);

            /*
             * Lower the DN heartbeat, DF rate, and recheck interval to one second
             * so state about failures and datanode death propagates faster.
             */
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            conf.SetInt(DFSConfigKeys.DfsDfIntervalKey, 1000);
            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1000);
            // Allow a single volume failure (there are two volumes)
            conf.SetInt(DFSConfigKeys.DfsDatanodeFailedVolumesToleratedKey, 1);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            cluster.WaitActive();
            fs      = cluster.GetFileSystem();
            dataDir = cluster.GetDataDirectory();
        }
示例#8
0
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        public virtual void TestGetReconfigureStatus()
        {
            ReconfigurationUtil ru = Org.Mockito.Mockito.Mock <ReconfigurationUtil>();

            datanode.SetReconfigurationUtil(ru);
            IList <ReconfigurationUtil.PropertyChange> changes = new AList <ReconfigurationUtil.PropertyChange
                                                                            >();
            FilePath newDir = new FilePath(cluster.GetDataDirectory(), "data_new");

            newDir.Mkdirs();
            changes.AddItem(new ReconfigurationUtil.PropertyChange(DFSConfigKeys.DfsDatanodeDataDirKey
                                                                   , newDir.ToString(), datanode.GetConf().Get(DFSConfigKeys.DfsDatanodeDataDirKey)
                                                                   ));
            changes.AddItem(new ReconfigurationUtil.PropertyChange("randomKey", "new123", "old456"
                                                                   ));
            Org.Mockito.Mockito.When(ru.ParseChangedProperties(Matchers.Any <Configuration>(),
                                                               Matchers.Any <Configuration>())).ThenReturn(changes);
            int    port    = datanode.GetIpcPort();
            string address = "localhost:" + port;

            Assert.AssertThat(admin.StartReconfiguration("datanode", address), CoreMatchers.Is
                                  (0));
            IList <string> outputs = null;
            int            count   = 100;

            while (count > 0)
            {
                outputs = GetReconfigureStatus("datanode", address);
                if (!outputs.IsEmpty() && outputs[0].Contains("finished"))
                {
                    break;
                }
                count--;
                Sharpen.Thread.Sleep(100);
            }
            NUnit.Framework.Assert.IsTrue(count > 0);
            Assert.AssertThat(outputs.Count, CoreMatchers.Is(8));
            // 3 (SUCCESS) + 4 (FAILED)
            IList <StorageLocation> locations = DataNode.GetStorageLocations(datanode.GetConf(
                                                                                 ));

            Assert.AssertThat(locations.Count, CoreMatchers.Is(1));
            Assert.AssertThat(locations[0].GetFile(), CoreMatchers.Is(newDir));
            // Verify the directory is appropriately formatted.
            NUnit.Framework.Assert.IsTrue(new FilePath(newDir, Storage.StorageDirCurrent).IsDirectory
                                              ());
            int successOffset = outputs[1].StartsWith("SUCCESS:") ? 1 : 5;
            int failedOffset  = outputs[1].StartsWith("FAILED:") ? 1 : 4;

            Assert.AssertThat(outputs[successOffset], CoreMatchers.ContainsString("Change property "
                                                                                  + DFSConfigKeys.DfsDatanodeDataDirKey));
            Assert.AssertThat(outputs[successOffset + 1], CoreMatchers.Is(CoreMatchers.AllOf(
                                                                              CoreMatchers.ContainsString("From:"), CoreMatchers.ContainsString("data1"), CoreMatchers.ContainsString
                                                                                  ("data2"))));
            Assert.AssertThat(outputs[successOffset + 2], CoreMatchers.Is(CoreMatchers.Not(CoreMatchers.AnyOf
                                                                                               (CoreMatchers.ContainsString("data1"), CoreMatchers.ContainsString("data2")))));
            Assert.AssertThat(outputs[successOffset + 2], CoreMatchers.Is(CoreMatchers.AllOf(
                                                                              CoreMatchers.ContainsString("To"), CoreMatchers.ContainsString("data_new"))));
            Assert.AssertThat(outputs[failedOffset], CoreMatchers.ContainsString("Change property randomKey"
                                                                                 ));
            Assert.AssertThat(outputs[failedOffset + 1], CoreMatchers.ContainsString("From: \"old456\""
                                                                                     ));
            Assert.AssertThat(outputs[failedOffset + 2], CoreMatchers.ContainsString("To: \"new123\""
                                                                                     ));
        }