Example #1
0
        public virtual void TestGetNNUris()
        {
            HdfsConfiguration conf        = new HdfsConfiguration();
            string            Ns1Nn1Addr  = "ns1-nn1.example.com:8020";
            string            Ns1Nn2Addr  = "ns1-nn2.example.com:8020";
            string            Ns2NnAddr   = "ns2-nn.example.com:8020";
            string            Nn1Addr     = "nn.example.com:8020";
            string            Nn1SrvcAddr = "nn.example.com:8021";
            string            Nn2Addr     = "nn2.example.com:8020";

            conf.Set(DFSConfigKeys.DfsNameservices, "ns1,ns2");
            conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsHaNamenodesKeyPrefix, "ns1"), "nn1,nn2"
                     );
            conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsNamenodeRpcAddressKey, "ns1", "nn1"
                                            ), Ns1Nn1Addr);
            conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsNamenodeRpcAddressKey, "ns1", "nn2"
                                            ), Ns1Nn2Addr);
            conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsNamenodeServiceRpcAddressKey, "ns2"
                                            ), Ns2NnAddr);
            conf.Set(DFSConfigKeys.DfsNamenodeRpcAddressKey, "hdfs://" + Nn1Addr);
            conf.Set(CommonConfigurationKeys.FsDefaultNameKey, "hdfs://" + Nn2Addr);
            ICollection <URI> uris = DFSUtil.GetNameServiceUris(conf, DFSConfigKeys.DfsNamenodeServiceRpcAddressKey
                                                                , DFSConfigKeys.DfsNamenodeRpcAddressKey);

            NUnit.Framework.Assert.AreEqual(4, uris.Count);
            NUnit.Framework.Assert.IsTrue(uris.Contains(new URI("hdfs://ns1")));
            NUnit.Framework.Assert.IsTrue(uris.Contains(new URI("hdfs://" + Ns2NnAddr)));
            NUnit.Framework.Assert.IsTrue(uris.Contains(new URI("hdfs://" + Nn1Addr)));
            NUnit.Framework.Assert.IsTrue(uris.Contains(new URI("hdfs://" + Nn2Addr)));
            // Make sure that non-HDFS URIs in fs.defaultFS don't get included.
            conf.Set(CommonConfigurationKeys.FsDefaultNameKey, "viewfs://vfs-name.example.com"
                     );
            uris = DFSUtil.GetNameServiceUris(conf, DFSConfigKeys.DfsNamenodeServiceRpcAddressKey
                                              , DFSConfigKeys.DfsNamenodeRpcAddressKey);
            NUnit.Framework.Assert.AreEqual(3, uris.Count);
            NUnit.Framework.Assert.IsTrue(uris.Contains(new URI("hdfs://ns1")));
            NUnit.Framework.Assert.IsTrue(uris.Contains(new URI("hdfs://" + Ns2NnAddr)));
            NUnit.Framework.Assert.IsTrue(uris.Contains(new URI("hdfs://" + Nn1Addr)));
            // Make sure that an HA URI being the default URI doesn't result in multiple
            // entries being returned.
            conf.Set(CommonConfigurationKeys.FsDefaultNameKey, "hdfs://ns1");
            uris = DFSUtil.GetNameServiceUris(conf, DFSConfigKeys.DfsNamenodeServiceRpcAddressKey
                                              , DFSConfigKeys.DfsNamenodeRpcAddressKey);
            NUnit.Framework.Assert.AreEqual(3, uris.Count);
            NUnit.Framework.Assert.IsTrue(uris.Contains(new URI("hdfs://ns1")));
            NUnit.Framework.Assert.IsTrue(uris.Contains(new URI("hdfs://" + Ns2NnAddr)));
            NUnit.Framework.Assert.IsTrue(uris.Contains(new URI("hdfs://" + Nn1Addr)));
            // Make sure that when a service RPC address is used that is distinct from
            // the client RPC address, and that client RPC address is also used as the
            // default URI, that the client URI does not end up in the set of URIs
            // returned.
            conf = new HdfsConfiguration();
            conf.Set(CommonConfigurationKeys.FsDefaultNameKey, "hdfs://" + Nn1Addr);
            conf.Set(DFSConfigKeys.DfsNamenodeRpcAddressKey, Nn1Addr);
            conf.Set(DFSConfigKeys.DfsNamenodeServiceRpcAddressKey, Nn1SrvcAddr);
            uris = DFSUtil.GetNameServiceUris(conf, DFSConfigKeys.DfsNamenodeServiceRpcAddressKey
                                              , DFSConfigKeys.DfsNamenodeRpcAddressKey);
            NUnit.Framework.Assert.AreEqual(1, uris.Count);
            NUnit.Framework.Assert.IsTrue(uris.Contains(new URI("hdfs://" + Nn1SrvcAddr)));
        }
Example #2
0
        public virtual void TestGetNNServiceRpcAddressesForNsIds()
        {
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsNameservices, "nn1,nn2");
            conf.Set(DFSConfigKeys.DfsInternalNameservicesKey, "nn1");
            // Test - configured list of namenodes are returned
            string Nn1Address = "localhost:9000";
            string Nn2Address = "localhost:9001";

            conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsNamenodeRpcAddressKey, "nn1"), Nn1Address
                     );
            conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsNamenodeRpcAddressKey, "nn2"), Nn2Address
                     );
            IDictionary <string, IDictionary <string, IPEndPoint> > nnMap = DFSUtil.GetNNServiceRpcAddressesForCluster
                                                                                (conf);

            NUnit.Framework.Assert.AreEqual(1, nnMap.Count);
            NUnit.Framework.Assert.IsTrue(nnMap.Contains("nn1"));
            conf.Set(DFSConfigKeys.DfsInternalNameservicesKey, "nn3");
            try
            {
                DFSUtil.GetNNServiceRpcAddressesForCluster(conf);
                NUnit.Framework.Assert.Fail("Should fail for misconfiguration");
            }
            catch (IOException)
            {
            }
        }
Example #3
0
        /// <summary>Verify secondary namenode port usage.</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestSecondaryNodePorts()
        {
            NameNode nn = null;

            try
            {
                nn = StartNameNode();
                // bind http server to the same port as name-node
                Configuration conf2 = new HdfsConfiguration(config);
                conf2.Set(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey, config.Get(DFSConfigKeys
                                                                                       .DfsNamenodeHttpAddressKey));
                Log.Info("= Starting 1 on: " + conf2.Get(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey
                                                         ));
                bool started = CanStartSecondaryNode(conf2);
                NUnit.Framework.Assert.IsFalse(started);
                // should fail
                // bind http server to a different port
                conf2.Set(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey, ThisHost);
                Log.Info("= Starting 2 on: " + conf2.Get(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey
                                                         ));
                started = CanStartSecondaryNode(conf2);
                NUnit.Framework.Assert.IsTrue(started);
            }
            finally
            {
                // should start now
                StopNameNode(nn);
            }
        }
Example #4
0
        /// <summary>Verify BackupNode port usage.</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestBackupNodePorts()
        {
            NameNode nn = null;

            try
            {
                nn = StartNameNode();
                Configuration backup_config = new HdfsConfiguration(config);
                backup_config.Set(DFSConfigKeys.DfsNamenodeBackupAddressKey, ThisHost);
                // bind http server to the same port as name-node
                backup_config.Set(DFSConfigKeys.DfsNamenodeBackupHttpAddressKey, backup_config.Get
                                      (DFSConfigKeys.DfsNamenodeHttpAddressKey));
                Log.Info("= Starting 1 on: " + backup_config.Get(DFSConfigKeys.DfsNamenodeBackupHttpAddressKey
                                                                 ));
                NUnit.Framework.Assert.IsFalse("Backup started on same port as Namenode", CanStartBackupNode
                                                   (backup_config));
                // should fail
                // bind http server to a different port
                backup_config.Set(DFSConfigKeys.DfsNamenodeBackupHttpAddressKey, ThisHost);
                Log.Info("= Starting 2 on: " + backup_config.Get(DFSConfigKeys.DfsNamenodeBackupHttpAddressKey
                                                                 ));
                bool started = CanStartBackupNode(backup_config);
                NUnit.Framework.Assert.IsTrue("Backup Namenode should've started", started);
            }
            finally
            {
                // should start now
                StopNameNode(nn);
            }
        }
        /// <exception cref="System.IO.IOException"/>
        internal static void Setrep(int fromREP, int toREP, bool simulatedStorage)
        {
            Configuration conf = new HdfsConfiguration();

            if (simulatedStorage)
            {
                SimulatedFSDataset.SetFactory(conf);
            }
            conf.Set(DFSConfigKeys.DfsReplicationKey, string.Empty + fromREP);
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 1000L);
            conf.Set(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, Sharpen.Extensions.ToString
                         (2));
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(10).Build(
                );
            FileSystem fs = cluster.GetFileSystem();

            NUnit.Framework.Assert.IsTrue("Not a HDFS: " + fs.GetUri(), fs is DistributedFileSystem
                                          );
            try
            {
                Path root = TestDFSShell.Mkdir(fs, new Path("/test/setrep" + fromREP + "-" + toREP
                                                            ));
                Path f = TestDFSShell.WriteFile(fs, new Path(root, "foo"));
                {
                    // Verify setrep for changing replication
                    string[] args = new string[] { "-setrep", "-w", string.Empty + toREP, string.Empty
                                                   + f };
                    FsShell shell = new FsShell();
                    shell.SetConf(conf);
                    try
                    {
                        NUnit.Framework.Assert.AreEqual(0, shell.Run(args));
                    }
                    catch (Exception e)
                    {
                        NUnit.Framework.Assert.IsTrue("-setrep " + e, false);
                    }
                }
                //get fs again since the old one may be closed
                fs = cluster.GetFileSystem();
                FileStatus file = fs.GetFileStatus(f);
                long       len  = file.GetLen();
                foreach (BlockLocation locations in fs.GetFileBlockLocations(file, 0, len))
                {
                    NUnit.Framework.Assert.IsTrue(locations.GetHosts().Length == toREP);
                }
                TestDFSShell.Show("done setrep waiting: " + root);
            }
            finally
            {
                try
                {
                    fs.Close();
                }
                catch (Exception)
                {
                }
                cluster.Shutdown();
            }
        }
        /// <summary>Bring up two clusters and assert that they are in different directories.
        ///     </summary>
        /// <exception cref="System.Exception">on a failure</exception>
        public virtual void TestDualClusters()
        {
            FilePath      testDataCluster2 = new FilePath(testDataPath, Cluster2);
            FilePath      testDataCluster3 = new FilePath(testDataPath, Cluster3);
            Configuration conf             = new HdfsConfiguration();
            string        c2Path           = testDataCluster2.GetAbsolutePath();

            conf.Set(MiniDFSCluster.HdfsMinidfsBasedir, c2Path);
            MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf).Build();
            MiniDFSCluster cluster3 = null;

            try
            {
                string dataDir2 = cluster2.GetDataDirectory();
                NUnit.Framework.Assert.AreEqual(new FilePath(c2Path + "/data"), new FilePath(dataDir2
                                                                                             ));
                //change the data dir
                conf.Set(MiniDFSCluster.HdfsMinidfsBasedir, testDataCluster3.GetAbsolutePath());
                MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
                cluster3 = builder.Build();
                string dataDir3 = cluster3.GetDataDirectory();
                NUnit.Framework.Assert.IsTrue("Clusters are bound to the same directory: " + dataDir2
                                              , !dataDir2.Equals(dataDir3));
            }
            finally
            {
                MiniDFSCluster.ShutdownCluster(cluster3);
                MiniDFSCluster.ShutdownCluster(cluster2);
            }
        }
Example #7
0
        public virtual void TestSomeConfsNNSpecificSomeNSSpecific()
        {
            HdfsConfiguration conf = new HdfsConfiguration();
            string            key  = DFSConfigKeys.DfsNamenodeSharedEditsDirKey;

            conf.Set(key, "global-default");
            conf.Set(key + ".ns1", "ns1-override");
            conf.Set(key + ".ns1.nn1", "nn1-override");
            // A namenode in another nameservice should get the global default.
            Configuration newConf = new Configuration(conf);

            NameNode.InitializeGenericKeys(newConf, "ns2", "nn1");
            NUnit.Framework.Assert.AreEqual("global-default", newConf.Get(key));
            // A namenode in another non-HA nameservice should get global default.
            newConf = new Configuration(conf);
            NameNode.InitializeGenericKeys(newConf, "ns2", null);
            NUnit.Framework.Assert.AreEqual("global-default", newConf.Get(key));
            // A namenode in the same nameservice should get the ns setting
            newConf = new Configuration(conf);
            NameNode.InitializeGenericKeys(newConf, "ns1", "nn2");
            NUnit.Framework.Assert.AreEqual("ns1-override", newConf.Get(key));
            // The nn with the nn-specific setting should get its own override
            newConf = new Configuration(conf);
            NameNode.InitializeGenericKeys(newConf, "ns1", "nn1");
            NUnit.Framework.Assert.AreEqual("nn1-override", newConf.Get(key));
        }
Example #8
0
        private Configuration SetupAddress(string key)
        {
            HdfsConfiguration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsNameservices, "nn1");
            conf.Set(DFSUtil.AddKeySuffixes(key, "nn1"), "localhost:9000");
            return(conf);
        }
Example #9
0
        public virtual void TestGetOnlyNameServiceIdOrNull()
        {
            HdfsConfiguration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsNameservices, "ns1,ns2");
            NUnit.Framework.Assert.IsNull(DFSUtil.GetOnlyNameServiceIdOrNull(conf));
            conf.Set(DFSConfigKeys.DfsNameservices, string.Empty);
            NUnit.Framework.Assert.IsNull(DFSUtil.GetOnlyNameServiceIdOrNull(conf));
            conf.Set(DFSConfigKeys.DfsNameservices, "ns1");
            NUnit.Framework.Assert.AreEqual("ns1", DFSUtil.GetOnlyNameServiceIdOrNull(conf));
        }
Example #10
0
        public static void SetUp()
        {
            ClearBaseDir();
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsDatanodeHttpsPortKey, 0);
            conf.Set(DFSConfigKeys.DfsDatanodeAddressKey, "localhost:0");
            conf.Set(DFSConfigKeys.DfsDatanodeIpcAddressKey, "localhost:0");
            conf.Set(DFSConfigKeys.DfsDatanodeHttpAddressKey, "localhost:0");
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
            cluster.WaitActive();
        }
Example #11
0
        /// <summary>
        /// Test
        /// <see cref="DFSUtil.GetNamenodeNameServiceId(Org.Apache.Hadoop.Conf.Configuration)
        ///     "/>
        /// to ensure
        /// exception is thrown when multiple rpc addresses match the local node's
        /// address
        /// </summary>
        public virtual void TestGetNameServiceIdException()
        {
            HdfsConfiguration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsNameservices, "nn1,nn2");
            conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsNamenodeRpcAddressKey, "nn1"), "localhost:9000"
                     );
            conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsNamenodeRpcAddressKey, "nn2"), "localhost:9001"
                     );
            DFSUtil.GetNamenodeNameServiceId(conf);
            NUnit.Framework.Assert.Fail("Expected exception is not thrown");
        }
Example #12
0
        public virtual void TestRegistrationWithDifferentSoftwareVersions()
        {
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsDatanodeMinSupportedNamenodeVersionKey, "3.0.0");
            conf.Set(DFSConfigKeys.DfsNamenodeMinSupportedDatanodeVersionKey, "3.0.0");
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                NamenodeProtocols rpcServer = cluster.GetNameNodeRpc();
                long        nnCTime         = cluster.GetNamesystem().GetFSImage().GetStorage().GetCTime();
                StorageInfo mockStorageInfo = Org.Mockito.Mockito.Mock <StorageInfo>();
                Org.Mockito.Mockito.DoReturn(nnCTime).When(mockStorageInfo).GetCTime();
                DatanodeRegistration mockDnReg = Org.Mockito.Mockito.Mock <DatanodeRegistration>();
                Org.Mockito.Mockito.DoReturn(HdfsConstants.DatanodeLayoutVersion).When(mockDnReg)
                .GetVersion();
                Org.Mockito.Mockito.DoReturn("127.0.0.1").When(mockDnReg).GetIpAddr();
                Org.Mockito.Mockito.DoReturn(123).When(mockDnReg).GetXferPort();
                Org.Mockito.Mockito.DoReturn("fake-storage-id").When(mockDnReg).GetDatanodeUuid();
                Org.Mockito.Mockito.DoReturn(mockStorageInfo).When(mockDnReg).GetStorageInfo();
                // Should succeed when software versions are the same.
                Org.Mockito.Mockito.DoReturn("3.0.0").When(mockDnReg).GetSoftwareVersion();
                rpcServer.RegisterDatanode(mockDnReg);
                // Should succeed when software version of DN is above minimum required by NN.
                Org.Mockito.Mockito.DoReturn("4.0.0").When(mockDnReg).GetSoftwareVersion();
                rpcServer.RegisterDatanode(mockDnReg);
                // Should fail when software version of DN is below minimum required by NN.
                Org.Mockito.Mockito.DoReturn("2.0.0").When(mockDnReg).GetSoftwareVersion();
                try
                {
                    rpcServer.RegisterDatanode(mockDnReg);
                    NUnit.Framework.Assert.Fail("Should not have been able to register DN with too-low version."
                                                );
                }
                catch (IncorrectVersionException ive)
                {
                    GenericTestUtils.AssertExceptionContains("The reported DataNode version is too low"
                                                             , ive);
                    Log.Info("Got expected exception", ive);
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Example #13
0
        public virtual void TestEarlierVersionEditLog()
        {
            Configuration conf    = new HdfsConfiguration();
            string        tarFile = Runtime.GetProperty("test.cache.data", "build/test/cache") + "/"
                                    + Hadoop10MultiblockTgz;
            string   testDir = PathUtils.GetTestDirName(GetType());
            FilePath dfsDir  = new FilePath(testDir, "image-1.0");

            if (dfsDir.Exists() && !FileUtil.FullyDelete(dfsDir))
            {
                throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
            }
            FileUtil.UnTar(new FilePath(tarFile), new FilePath(testDir));
            FilePath nameDir = new FilePath(dfsDir, "name");

            GenericTestUtils.AssertExists(nameDir);
            FilePath dataDir = new FilePath(dfsDir, "data");

            GenericTestUtils.AssertExists(dataDir);
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDir.GetAbsolutePath());
            conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dataDir.GetAbsolutePath());
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(
                false).ManageDataDfsDirs(false).ManageNameDfsDirs(false).NumDataNodes(1).StartupOption
                                         (HdfsServerConstants.StartupOption.Upgrade).Build();

            try
            {
                FileSystem fs       = cluster.GetFileSystem();
                Path       testPath = new Path("/user/todd/4blocks");
                // Read it without caring about the actual data within - we just need
                // to make sure that the block states and locations are OK.
                DFSTestUtil.ReadFile(fs, testPath);
                // Ensure that we can append to it - if the blocks were in some funny
                // state we'd get some kind of issue here.
                FSDataOutputStream stm = fs.Append(testPath);
                try
                {
                    stm.Write(1);
                }
                finally
                {
                    IOUtils.CloseStream(stm);
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Example #14
0
        public static void SetupCluster()
        {
            DFSInputStream.tcpReadsDisabledForTesting = true;
            HdfsConfiguration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, string.Empty);
            conf.SetBoolean(DFSConfigKeys.DfsClientUseLegacyBlockreaderlocal, true);
            conf.SetBoolean(DFSConfigKeys.DfsClientDomainSocketDataTraffic, false);
            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true);
            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, false);
            conf.Set(DFSConfigKeys.DfsBlockLocalPathAccessUserKey, UserGroupInformation.GetCurrentUser
                         ().GetShortUserName());
            DomainSocket.DisableBindPathValidation();
            SetupCluster(1, conf);
        }
Example #15
0
        public virtual void GetNameServiceId()
        {
            HdfsConfiguration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsNameserviceId, "nn1");
            NUnit.Framework.Assert.AreEqual("nn1", DFSUtil.GetNamenodeNameServiceId(conf));
        }
Example #16
0
        /// <summary>Tests get/set working directory in DFS.</summary>
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestHomeDirectory()
        {
            string[]      homeBases = new string[] { "/home", "/home/user" };
            Configuration conf      = new HdfsConfiguration();

            foreach (string homeBase in homeBases)
            {
                conf.Set(DFSConfigKeys.DfsUserHomeDirPrefixKey, homeBase);
                MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
                FileSystem     fileSys = cluster.GetFileSystem();
                try
                {
                    // test home directory
                    Path home = fileSys.MakeQualified(new Path(homeBase + "/" + GetUserName(fileSys))
                                                      );
                    Path fsHome = fileSys.GetHomeDirectory();
                    NUnit.Framework.Assert.AreEqual(home, fsHome);
                }
                finally
                {
                    fileSys.Close();
                    cluster.Shutdown();
                }
            }
        }
Example #17
0
        private static Configuration CreateWebHDFSHAConfiguration(string logicalHostName,
                                                                  string nnaddr1, string nnaddr2)
        {
            HdfsConfiguration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsNameservices, "ns1");
            conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsHaNamenodesKeyPrefix, "ns1"), "nn1,nn2"
                     );
            conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsNamenodeHttpAddressKey, "ns1", "nn1"
                                            ), nnaddr1);
            conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsNamenodeHttpAddressKey, "ns1", "nn2"
                                            ), nnaddr2);
            conf.Set(DFSConfigKeys.DfsClientFailoverProxyProviderKeyPrefix + "." + logicalHostName
                     , typeof(ConfiguredFailoverProxyProvider).FullName);
            return(conf);
        }
Example #18
0
        /// <exception cref="System.Exception"/>
        private void DoTestFSOutputSummer(string checksumType)
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
            conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, BytesPerChecksum);
            conf.Set(DFSConfigKeys.DfsChecksumTypeKey, checksumType);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumOfDatanodes
                                                                                   ).Build();

            fileSys = cluster.GetFileSystem();
            try
            {
                Path   file = new Path("try.dat");
                Random rand = new Random(seed);
                rand.NextBytes(expected);
                WriteFile1(file);
                WriteFile2(file);
                WriteFile3(file);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
        }
Example #19
0
        public static void SetupCluster()
        {
            if (DomainSocket.GetLoadingFailureReason() != null)
            {
                return;
            }
            sockDir = new TemporarySocketDirectory();
            HdfsConfiguration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, new FilePath(sockDir.GetDir(), "TestParallelShortCircuitReadUnCached._PORT.sock"
                                                                        ).GetAbsolutePath());
            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true);
            // Enabling data transfer encryption should have no effect when using
            // short-circuit local reads.  This is a regression test for HDFS-5353.
            conf.SetBoolean(DFSConfigKeys.DfsEncryptDataTransferKey, true);
            conf.SetBoolean(DFSConfigKeys.DfsBlockAccessTokenEnableKey, true);
            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, false);
            conf.SetBoolean(DFSConfigKeys.DfsClientDomainSocketDataTraffic, true);
            // We want to test reading from stale sockets.
            conf.SetInt(DFSConfigKeys.DfsDatanodeSocketReuseKeepaliveKey, 1);
            conf.SetLong(DFSConfigKeys.DfsClientSocketCacheExpiryMsecKey, 5 * 60 * 1000);
            conf.SetInt(DFSConfigKeys.DfsClientSocketCacheCapacityKey, 32);
            // Avoid using the FileInputStreamCache.
            conf.SetInt(DFSConfigKeys.DfsClientReadShortcircuitStreamsCacheSizeKey, 0);
            DomainSocket.DisableBindPathValidation();
            DFSInputStream.tcpReadsDisabledForTesting = true;
            SetupCluster(1, conf);
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestWriteConf()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, 4096);
            System.Console.Out.WriteLine("Setting conf in: " + Runtime.IdentityHashCode(conf)
                                         );
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            FileSystem     fs      = null;
            OutputStream   os      = null;

            try
            {
                fs = cluster.GetFileSystem();
                Path filePath = new Path("/testWriteConf.xml");
                os = fs.Create(filePath);
                StringBuilder longString = new StringBuilder();
                for (int i = 0; i < 100000; i++)
                {
                    longString.Append("hello");
                }
                // 500KB
                conf.Set("foobar", longString.ToString());
                conf.WriteXml(os);
                os.Close();
                os = null;
                fs.Close();
                fs = null;
            }
            finally
            {
                IOUtils.Cleanup(null, os, fs);
                cluster.Shutdown();
            }
        }
Example #21
0
        public virtual void TestSmallBlock()
        {
            Configuration conf = new HdfsConfiguration();

            if (simulatedStorage)
            {
                SimulatedFSDataset.SetFactory(conf);
            }
            conf.Set(DFSConfigKeys.DfsBytesPerChecksumKey, "1");
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fileSys = cluster.GetFileSystem();

            try
            {
                Path file1 = new Path("smallblocktest.dat");
                WriteFile(fileSys, file1);
                CheckFile(fileSys, file1);
                CleanupFile(fileSys, file1);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
        }
        public virtual void TestLoadLogsFromBuggyEarlierVersions()
        {
            Configuration conf    = new HdfsConfiguration();
            string        tarFile = Runtime.GetProperty("test.cache.data", "build/test/cache") + "/"
                                    + Hadoop23BrokenAppendTgz;
            string   testDir = PathUtils.GetTestDirName(GetType());
            FilePath dfsDir  = new FilePath(testDir, "image-with-buggy-append");

            if (dfsDir.Exists() && !FileUtil.FullyDelete(dfsDir))
            {
                throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
            }
            FileUtil.UnTar(new FilePath(tarFile), new FilePath(testDir));
            FilePath nameDir = new FilePath(dfsDir, "name");

            GenericTestUtils.AssertExists(nameDir);
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDir.GetAbsolutePath());
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(
                false).ManageDataDfsDirs(false).ManageNameDfsDirs(false).NumDataNodes(0).WaitSafeMode
                                         (false).StartupOption(HdfsServerConstants.StartupOption.Upgrade).Build();

            try
            {
                FileSystem fs       = cluster.GetFileSystem();
                Path       testPath = new Path("/tmp/io_data/test_io_0");
                NUnit.Framework.Assert.AreEqual(2 * 1024 * 1024, fs.GetFileStatus(testPath).GetLen
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Example #23
0
        public virtual void TestGetSpnegoKeytabKey()
        {
            HdfsConfiguration conf       = new HdfsConfiguration();
            string            defaultKey = "default.spengo.key";

            conf.Unset(DFSConfigKeys.DfsWebAuthenticationKerberosKeytabKey);
            NUnit.Framework.Assert.AreEqual("Test spnego key in config is null", defaultKey,
                                            DFSUtil.GetSpnegoKeytabKey(conf, defaultKey));
            conf.Set(DFSConfigKeys.DfsWebAuthenticationKerberosKeytabKey, string.Empty);
            NUnit.Framework.Assert.AreEqual("Test spnego key is empty", defaultKey, DFSUtil.GetSpnegoKeytabKey
                                                (conf, defaultKey));
            string spengoKey = "spengo.key";

            conf.Set(DFSConfigKeys.DfsWebAuthenticationKerberosKeytabKey, spengoKey);
            NUnit.Framework.Assert.AreEqual("Test spnego key is NOT null", DFSConfigKeys.DfsWebAuthenticationKerberosKeytabKey
                                            , DFSUtil.GetSpnegoKeytabKey(conf, defaultKey));
        }
Example #24
0
        public static void SetupCluster()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, 4096);
            conf.Set("fs.hdfs.impl.disable.cache", "true");
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            fs      = cluster.GetFileSystem();
        }
Example #25
0
        public virtual void TestPipelineRecoveryOnRestartFailure()
        {
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsClientDatanodeRestartTimeoutKey, "5");
            MiniDFSCluster cluster = null;

            try
            {
                int numDataNodes = 2;
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build();
                cluster.WaitActive();
                FileSystem fileSys = cluster.GetFileSystem();
                Path       file    = new Path("dataprotocol3.dat");
                DFSTestUtil.CreateFile(fileSys, file, 10240L, (short)2, 0L);
                DFSOutputStream @out = (DFSOutputStream)(fileSys.Append(file).GetWrappedStream());
                @out.Write(1);
                @out.Hflush();
                DFSAdmin dfsadmin = new DFSAdmin(conf);
                DataNode dn       = cluster.GetDataNodes()[0];
                string   dnAddr1  = dn.GetDatanodeId().GetIpcAddr(false);
                // issue shutdown to the datanode.
                string[] args1 = new string[] { "-shutdownDatanode", dnAddr1, "upgrade" };
                NUnit.Framework.Assert.AreEqual(0, dfsadmin.Run(args1));
                Sharpen.Thread.Sleep(4000);
                // This should succeed without restarting the node. The restart will
                // expire and regular pipeline recovery will kick in.
                @out.Close();
                // At this point there is only one node in the cluster.
                @out = (DFSOutputStream)(fileSys.Append(file).GetWrappedStream());
                @out.Write(1);
                @out.Hflush();
                dn = cluster.GetDataNodes()[1];
                string dnAddr2 = dn.GetDatanodeId().GetIpcAddr(false);
                // issue shutdown to the datanode.
                string[] args2 = new string[] { "-shutdownDatanode", dnAddr2, "upgrade" };
                NUnit.Framework.Assert.AreEqual(0, dfsadmin.Run(args2));
                Sharpen.Thread.Sleep(4000);
                try
                {
                    // close should fail
                    @out.Close();
                    System.Diagnostics.Debug.Assert(false);
                }
                catch (IOException)
                {
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Example #26
0
        public virtual void TestPacketTransmissionDelay()
        {
            // Make the first datanode to not relay heartbeat packet.
            DataNodeFaultInjector dnFaultInjector = new _DataNodeFaultInjector_171();
            DataNodeFaultInjector oldDnInjector   = DataNodeFaultInjector.Get();

            DataNodeFaultInjector.Set(dnFaultInjector);
            // Setting the timeout to be 3 seconds. Normally heartbeat packet
            // would be sent every 1.5 seconds if there is no data traffic.
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsClientSocketTimeoutKey, "3000");
            MiniDFSCluster cluster = null;

            try
            {
                int numDataNodes = 2;
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build();
                cluster.WaitActive();
                FileSystem         fs   = cluster.GetFileSystem();
                FSDataOutputStream @out = fs.Create(new Path("noheartbeat.dat"), (short)2);
                @out.Write(unchecked ((int)(0x31)));
                @out.Hflush();
                DFSOutputStream dfsOut = (DFSOutputStream)@out.GetWrappedStream();
                // original pipeline
                DatanodeInfo[] orgNodes = dfsOut.GetPipeline();
                // Cause the second datanode to timeout on reading packet
                Sharpen.Thread.Sleep(3500);
                @out.Write(unchecked ((int)(0x32)));
                @out.Hflush();
                // new pipeline
                DatanodeInfo[] newNodes = dfsOut.GetPipeline();
                @out.Close();
                bool contains = false;
                for (int i = 0; i < newNodes.Length; i++)
                {
                    if (orgNodes[0].GetXferAddr().Equals(newNodes[i].GetXferAddr()))
                    {
                        throw new IOException("The first datanode should have been replaced.");
                    }
                    if (orgNodes[1].GetXferAddr().Equals(newNodes[i].GetXferAddr()))
                    {
                        contains = true;
                    }
                }
                NUnit.Framework.Assert.IsTrue(contains);
            }
            finally
            {
                DataNodeFaultInjector.Set(oldDnInjector);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Example #27
0
        public virtual void TestHANameNodesWithFederation()
        {
            HdfsConfiguration conf       = new HdfsConfiguration();
            string            Ns1Nn1Host = "ns1-nn1.example.com:8020";
            string            Ns1Nn2Host = "ns1-nn2.example.com:8020";
            string            Ns2Nn1Host = "ns2-nn1.example.com:8020";
            string            Ns2Nn2Host = "ns2-nn2.example.com:8020";

            conf.Set(CommonConfigurationKeys.FsDefaultNameKey, "hdfs://ns1");
            // Two nameservices, each with two NNs.
            conf.Set(DFSConfigKeys.DfsNameservices, "ns1,ns2");
            conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsHaNamenodesKeyPrefix, "ns1"), "ns1-nn1,ns1-nn2"
                     );
            conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsHaNamenodesKeyPrefix, "ns2"), "ns2-nn1,ns2-nn2"
                     );
            conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsNamenodeRpcAddressKey, "ns1", "ns1-nn1"
                                            ), Ns1Nn1Host);
            conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsNamenodeRpcAddressKey, "ns1", "ns1-nn2"
                                            ), Ns1Nn2Host);
            conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsNamenodeRpcAddressKey, "ns2", "ns2-nn1"
                                            ), Ns2Nn1Host);
            conf.Set(DFSUtil.AddKeySuffixes(DFSConfigKeys.DfsNamenodeRpcAddressKey, "ns2", "ns2-nn2"
                                            ), Ns2Nn2Host);
            IDictionary <string, IDictionary <string, IPEndPoint> > map = DFSUtil.GetHaNnRpcAddresses
                                                                              (conf);

            NUnit.Framework.Assert.IsTrue(HAUtil.IsHAEnabled(conf, "ns1"));
            NUnit.Framework.Assert.IsTrue(HAUtil.IsHAEnabled(conf, "ns2"));
            NUnit.Framework.Assert.IsFalse(HAUtil.IsHAEnabled(conf, "ns3"));
            NUnit.Framework.Assert.AreEqual(Ns1Nn1Host, map["ns1"]["ns1-nn1"].ToString());
            NUnit.Framework.Assert.AreEqual(Ns1Nn2Host, map["ns1"]["ns1-nn2"].ToString());
            NUnit.Framework.Assert.AreEqual(Ns2Nn1Host, map["ns2"]["ns2-nn1"].ToString());
            NUnit.Framework.Assert.AreEqual(Ns2Nn2Host, map["ns2"]["ns2-nn2"].ToString());
            NUnit.Framework.Assert.AreEqual(Ns1Nn1Host, DFSUtil.GetNamenodeServiceAddr(conf,
                                                                                       "ns1", "ns1-nn1"));
            NUnit.Framework.Assert.AreEqual(Ns1Nn2Host, DFSUtil.GetNamenodeServiceAddr(conf,
                                                                                       "ns1", "ns1-nn2"));
            NUnit.Framework.Assert.AreEqual(Ns2Nn1Host, DFSUtil.GetNamenodeServiceAddr(conf,
                                                                                       "ns2", "ns2-nn1"));
            // No nameservice was given and we can't determine which service addr
            // to use as two nameservices could share a namenode ID.
            NUnit.Framework.Assert.AreEqual(null, DFSUtil.GetNamenodeServiceAddr(conf, null,
                                                                                 "ns1-nn1"));
            // Ditto for nameservice IDs, if multiple are defined
            NUnit.Framework.Assert.AreEqual(null, DFSUtil.GetNamenodeNameServiceId(conf));
            NUnit.Framework.Assert.AreEqual(null, DFSUtil.GetSecondaryNameServiceId(conf));
            ICollection <URI> uris = DFSUtil.GetNameServiceUris(conf, DFSConfigKeys.DfsNamenodeRpcAddressKey
                                                                );

            NUnit.Framework.Assert.AreEqual(2, uris.Count);
            NUnit.Framework.Assert.IsTrue(uris.Contains(new URI("hdfs://ns1")));
            NUnit.Framework.Assert.IsTrue(uris.Contains(new URI("hdfs://ns2")));
        }
Example #28
0
        /// <summary>Verify namenode port usage.</summary>
        /// <exception cref="System.Exception"/>
        public virtual void RunTestNameNodePorts(bool withService)
        {
            NameNode nn = null;

            try
            {
                nn = StartNameNode(withService);
                // start another namenode on the same port
                Configuration conf2 = new HdfsConfiguration(config);
                conf2.Set(DFSConfigKeys.DfsNamenodeNameDirKey, Util.FileAsURI(new FilePath(hdfsDir
                                                                                           , "name2")).ToString());
                DFSTestUtil.FormatNameNode(conf2);
                bool started = CanStartNameNode(conf2);
                NUnit.Framework.Assert.IsFalse(started);
                // should fail
                // start on a different main port
                FileSystem.SetDefaultUri(conf2, "hdfs://" + ThisHost);
                started = CanStartNameNode(conf2);
                NUnit.Framework.Assert.IsFalse(started);
                // should fail again
                // reset conf2 since NameNode modifies it
                FileSystem.SetDefaultUri(conf2, "hdfs://" + ThisHost);
                // different http port
                conf2.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, ThisHost);
                started = CanStartNameNode(conf2);
                if (withService)
                {
                    NUnit.Framework.Assert.IsFalse("Should've failed on service port", started);
                    // reset conf2 since NameNode modifies it
                    FileSystem.SetDefaultUri(conf2, "hdfs://" + ThisHost);
                    conf2.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, ThisHost);
                    // Set Service address
                    conf2.Set(DFSConfigKeys.DfsNamenodeServiceRpcAddressKey, ThisHost);
                    started = CanStartNameNode(conf2);
                }
                NUnit.Framework.Assert.IsTrue(started);
            }
            finally
            {
                StopNameNode(nn);
            }
        }
Example #29
0
        public virtual void TestRegistrationWithDifferentSoftwareVersionsDuringUpgrade()
        {
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsDatanodeMinSupportedNamenodeVersionKey, "1.0.0");
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                NamenodeProtocols rpcServer = cluster.GetNameNodeRpc();
                long        nnCTime         = cluster.GetNamesystem().GetFSImage().GetStorage().GetCTime();
                StorageInfo mockStorageInfo = Org.Mockito.Mockito.Mock <StorageInfo>();
                Org.Mockito.Mockito.DoReturn(nnCTime).When(mockStorageInfo).GetCTime();
                DatanodeRegistration mockDnReg = Org.Mockito.Mockito.Mock <DatanodeRegistration>();
                Org.Mockito.Mockito.DoReturn(HdfsConstants.DatanodeLayoutVersion).When(mockDnReg)
                .GetVersion();
                Org.Mockito.Mockito.DoReturn("fake-storage-id").When(mockDnReg).GetDatanodeUuid();
                Org.Mockito.Mockito.DoReturn(mockStorageInfo).When(mockDnReg).GetStorageInfo();
                // Should succeed when software versions are the same and CTimes are the
                // same.
                Org.Mockito.Mockito.DoReturn(VersionInfo.GetVersion()).When(mockDnReg).GetSoftwareVersion
                    ();
                Org.Mockito.Mockito.DoReturn("127.0.0.1").When(mockDnReg).GetIpAddr();
                Org.Mockito.Mockito.DoReturn(123).When(mockDnReg).GetXferPort();
                rpcServer.RegisterDatanode(mockDnReg);
                // Should succeed when software versions are the same and CTimes are
                // different.
                Org.Mockito.Mockito.DoReturn(nnCTime + 1).When(mockStorageInfo).GetCTime();
                rpcServer.RegisterDatanode(mockDnReg);
                // Should fail when software version of DN is different from NN and CTimes
                // are different.
                Org.Mockito.Mockito.DoReturn(VersionInfo.GetVersion() + ".1").When(mockDnReg).GetSoftwareVersion
                    ();
                try
                {
                    rpcServer.RegisterDatanode(mockDnReg);
                    NUnit.Framework.Assert.Fail("Should not have been able to register DN with different software"
                                                + " versions and CTimes");
                }
                catch (IncorrectVersionException ive)
                {
                    GenericTestUtils.AssertExceptionContains("does not match CTime of NN", ive);
                    Log.Info("Got expected exception", ive);
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Example #30
0
		public virtual void TestDeprecatedKeys()
		{
			Configuration conf = new HdfsConfiguration();
			conf.Set("topology.script.file.name", "xyz");
			string scriptFile = conf.Get(DFSConfigKeys.NetTopologyScriptFileNameKey);
			NUnit.Framework.Assert.IsTrue(scriptFile.Equals("xyz"));
			conf.SetInt("dfs.replication.interval", 1);
			string alpha = DFSConfigKeys.DfsNamenodeReplicationIntervalKey;
			int repInterval = conf.GetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, 3);
			NUnit.Framework.Assert.IsTrue(repInterval == 1);
		}