Exemple #1
0
        public virtual void TestInvalidIdentifier()
        {
            DFSAdmin admin = new DFSAdmin(config);

            string[] args = new string[] { "-refresh", "localhost:" + cluster.GetNameNodePort
                                               (), "unregisteredIdentity" };
            int exitCode = admin.Run(args);

            NUnit.Framework.Assert.AreEqual("DFSAdmin should fail due to no handler registered"
                                            , -1, exitCode);
        }
Exemple #2
0
 // nothing
 /// <summary>Starts DFS and MR clusters, as specified in member-variable options.</summary>
 /// <remarks>
 /// Starts DFS and MR clusters, as specified in member-variable options. Also
 /// writes out configuration and details, if requested.
 /// </remarks>
 /// <exception cref="System.IO.IOException"/>
 /// <exception cref="System.IO.FileNotFoundException"/>
 /// <exception cref="Sharpen.URISyntaxException"/>
 public virtual void Start()
 {
     if (!noDFS)
     {
         dfs = new MiniDFSCluster.Builder(conf).NameNodePort(nnPort).NumDataNodes(numDataNodes
                                                                                  ).StartupOption(dfsOpts).Build();
         Log.Info("Started MiniDFSCluster -- namenode on port " + dfs.GetNameNodePort());
     }
     if (!noMR)
     {
         if (fs == null && dfs != null)
         {
             fs = dfs.GetFileSystem().GetUri().ToString();
         }
         else
         {
             if (fs == null)
             {
                 fs = "file:///tmp/minimr-" + Runtime.NanoTime();
             }
         }
         FileSystem.SetDefaultUri(conf, new URI(fs));
         // Instruct the minicluster to use fixed ports, so user will know which
         // ports to use when communicating with the cluster.
         conf.SetBoolean(YarnConfiguration.YarnMiniclusterFixedPorts, true);
         conf.SetBoolean(JHAdminConfig.MrHistoryMiniclusterFixedPorts, true);
         conf.Set(YarnConfiguration.RmAddress, MiniYARNCluster.GetHostname() + ":" + this.
                  rmPort);
         conf.Set(JHAdminConfig.MrHistoryAddress, MiniYARNCluster.GetHostname() + ":" + this
                  .jhsPort);
         mr = MiniMRClientClusterFactory.Create(this.GetType(), numNodeManagers, conf);
         Log.Info("Started MiniMRCluster");
     }
     if (writeConfig != null)
     {
         FileOutputStream fos = new FileOutputStream(new FilePath(writeConfig));
         conf.WriteXml(fos);
         fos.Close();
     }
     if (writeDetails != null)
     {
         IDictionary <string, object> map = new SortedDictionary <string, object>();
         if (dfs != null)
         {
             map["namenode_port"] = dfs.GetNameNodePort();
         }
         if (mr != null)
         {
             map["resourcemanager_port"] = mr.GetConfig().Get(YarnConfiguration.RmAddress).Split
                                               (":")[1];
         }
         FileWriter fw = new FileWriter(new FilePath(writeDetails));
         fw.Write(new JSON().ToJSON(map));
         fw.Close();
     }
 }
Exemple #3
0
 public virtual void Setup()
 {
     conf    = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology
                                                               ()).NumDataNodes(0).Build();
     tool = new DFSHAAdmin();
     tool.SetConf(conf);
     tool.SetErrOut(new TextWriter(errOutBytes));
     cluster.WaitActive();
     nn1Port = cluster.GetNameNodePort(0);
 }
 public virtual void StartUpCluster()
 {
     conf    = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDatanodes).Build();
     fs      = cluster.GetFileSystem();
     client  = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()), cluster
                             .GetConfiguration(0));
     dn0    = cluster.GetDataNodes()[0];
     poolId = cluster.GetNamesystem().GetBlockPoolId();
     dn0Reg = dn0.GetDNRegistrationForBP(poolId);
 }
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        /// <exception cref="Sharpen.TimeoutException"/>
        private void ChangeBlockLen(MiniDFSCluster cluster, int lenDelta)
        {
            Path       fileName          = new Path("/file1");
            short      ReplicationFactor = (short)1;
            FileSystem fs      = cluster.GetFileSystem();
            int        fileLen = fs.GetConf().GetInt(DFSConfigKeys.DfsBytesPerChecksumKey, 512);

            DFSTestUtil.CreateFile(fs, fileName, fileLen, ReplicationFactor, 0);
            DFSTestUtil.WaitReplication(fs, fileName, ReplicationFactor);
            ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, fileName);

            // Change the length of a replica
            for (int i = 0; i < cluster.GetDataNodes().Count; i++)
            {
                if (DFSTestUtil.ChangeReplicaLength(cluster, block, i, lenDelta))
                {
                    break;
                }
            }
            // increase the file's replication factor
            fs.SetReplication(fileName, (short)(ReplicationFactor + 1));
            // block replication triggers corrupt block detection
            DFSClient dfsClient = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort
                                                                   ()), fs.GetConf());
            LocatedBlocks blocks = dfsClient.GetNamenode().GetBlockLocations(fileName.ToString
                                                                                 (), 0, fileLen);

            if (lenDelta < 0)
            {
                // replica truncated
                while (!blocks.Get(0).IsCorrupt() || ReplicationFactor != blocks.Get(0).GetLocations
                           ().Length)
                {
                    Sharpen.Thread.Sleep(100);
                    blocks = dfsClient.GetNamenode().GetBlockLocations(fileName.ToString(), 0, fileLen
                                                                       );
                }
            }
            else
            {
                // no corruption detected; block replicated
                while (ReplicationFactor + 1 != blocks.Get(0).GetLocations().Length)
                {
                    Sharpen.Thread.Sleep(100);
                    blocks = dfsClient.GetNamenode().GetBlockLocations(fileName.ToString(), 0, fileLen
                                                                       );
                }
            }
            fs.Delete(fileName, true);
        }
Exemple #6
0
 // nothing
 /// <summary>Starts DFS as specified in member-variable options.</summary>
 /// <remarks>
 /// Starts DFS as specified in member-variable options. Also writes out
 /// configuration and details, if requested.
 /// </remarks>
 /// <exception cref="System.IO.IOException"/>
 /// <exception cref="System.IO.FileNotFoundException"/>
 public virtual void Start()
 {
     dfs = new MiniDFSCluster.Builder(conf).NameNodePort(nameNodePort).NumDataNodes(numDataNodes
                                                                                    ).StartupOption(dfsOpts).Format(format).Build();
     dfs.WaitActive();
     Log.Info("Started MiniDFSCluster -- namenode on port " + dfs.GetNameNodePort());
     if (writeConfig != null)
     {
         FileOutputStream fos = new FileOutputStream(new FilePath(writeConfig));
         conf.WriteXml(fos);
         fos.Close();
     }
     if (writeDetails != null)
     {
         IDictionary <string, object> map = new SortedDictionary <string, object>();
         if (dfs != null)
         {
             map["namenode_port"] = dfs.GetNameNodePort();
         }
         FileWriter fw = new FileWriter(new FilePath(writeDetails));
         fw.Write(new JSON().ToJSON(map));
         fw.Close();
     }
 }
        public virtual void Setup()
        {
            conf = new HdfsConfiguration();
            SimulatedFSDataset.SetFactory(conf);
            Configuration[] overlays = new Configuration[NumDatanodes];
            for (int i = 0; i < overlays.Length; i++)
            {
                overlays[i] = new Configuration();
                if (i == RoNodeIndex)
                {
                    overlays[i].SetEnum(SimulatedFSDataset.ConfigPropertyState, i == RoNodeIndex ? DatanodeStorage.State
                                        .ReadOnlyShared : DatanodeStorage.State.Normal);
                }
            }
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDatanodes).DataNodeConfOverlays
                          (overlays).Build();
            fs              = cluster.GetFileSystem();
            blockManager    = cluster.GetNameNode().GetNamesystem().GetBlockManager();
            datanodeManager = blockManager.GetDatanodeManager();
            client          = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()), cluster
                                            .GetConfiguration(0));
            for (int i_1 = 0; i_1 < NumDatanodes; i_1++)
            {
                DataNode dataNode = cluster.GetDataNodes()[i_1];
                ValidateStorageState(BlockManagerTestUtil.GetStorageReportsForDatanode(datanodeManager
                                                                                       .GetDatanode(dataNode.GetDatanodeId())), i_1 == RoNodeIndex ? DatanodeStorage.State
                                     .ReadOnlyShared : DatanodeStorage.State.Normal);
            }
            // Create a 1 block file
            DFSTestUtil.CreateFile(fs, Path, BlockSize, BlockSize, BlockSize, (short)1, seed);
            LocatedBlock locatedBlock = GetLocatedBlock();

            extendedBlock = locatedBlock.GetBlock();
            block         = extendedBlock.GetLocalBlock();
            Assert.AssertThat(locatedBlock.GetLocations().Length, CoreMatchers.Is(1));
            normalDataNode   = locatedBlock.GetLocations()[0];
            readOnlyDataNode = datanodeManager.GetDatanode(cluster.GetDataNodes()[RoNodeIndex
                                                           ].GetDatanodeId());
            Assert.AssertThat(normalDataNode, CoreMatchers.Is(CoreMatchers.Not(readOnlyDataNode
                                                                               )));
            ValidateNumberReplicas(1);
            // Inject the block into the datanode with READ_ONLY_SHARED storage
            cluster.InjectBlocks(0, RoNodeIndex, Collections.Singleton(block));
            // There should now be 2 *locations* for the block
            // Must wait until the NameNode has processed the block report for the injected blocks
            WaitForLocations(2);
        }
        public virtual void TestFailoverOnConnectTimeout()
        {
            conf.SetClass(CommonConfigurationKeysPublic.HadoopRpcSocketFactoryClassDefaultKey
                          , typeof(TestDFSClientFailover.InjectingSocketFactory), typeof(SocketFactory));
            // Set up the InjectingSocketFactory to throw a ConnectTimeoutException
            // when connecting to the first NN.
            TestDFSClientFailover.InjectingSocketFactory.portToInjectOn = cluster.GetNameNodePort
                                                                              (0);
            FileSystem fs = HATestUtil.ConfigureFailoverFs(cluster, conf);

            // Make the second NN the active one.
            cluster.ShutdownNameNode(0);
            cluster.TransitionToActive(1);
            // Call a non-idempotent method, and ensure the failover of the call proceeds
            // successfully.
            IOUtils.CloseStream(fs.Create(TestFile));
        }
Exemple #9
0
        public virtual void TestBlockReplacement()
        {
            Configuration Conf = new HdfsConfiguration();

            string[] InitialRacks      = new string[] { "/RACK0", "/RACK1", "/RACK2" };
            string[] NewRacks          = new string[] { "/RACK2" };
            short    ReplicationFactor = (short)3;
            int      DefaultBlockSize  = 1024;
            Random   r = new Random();

            Conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockSize);
            Conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, DefaultBlockSize / 2);
            Conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 500);
            cluster = new MiniDFSCluster.Builder(Conf).NumDataNodes(ReplicationFactor).Racks(
                InitialRacks).Build();
            try
            {
                cluster.WaitActive();
                FileSystem fs       = cluster.GetFileSystem();
                Path       fileName = new Path("/tmp.txt");
                // create a file with one block
                DFSTestUtil.CreateFile(fs, fileName, DefaultBlockSize, ReplicationFactor, r.NextLong
                                           ());
                DFSTestUtil.WaitReplication(fs, fileName, ReplicationFactor);
                // get all datanodes
                IPEndPoint           addr          = new IPEndPoint("localhost", cluster.GetNameNodePort());
                DFSClient            client        = new DFSClient(addr, Conf);
                IList <LocatedBlock> locatedBlocks = client.GetNamenode().GetBlockLocations("/tmp.txt"
                                                                                            , 0, DefaultBlockSize).GetLocatedBlocks();
                NUnit.Framework.Assert.AreEqual(1, locatedBlocks.Count);
                LocatedBlock   block    = locatedBlocks[0];
                DatanodeInfo[] oldNodes = block.GetLocations();
                NUnit.Framework.Assert.AreEqual(oldNodes.Length, 3);
                ExtendedBlock b = block.GetBlock();
                // add a fourth datanode to the cluster
                cluster.StartDataNodes(Conf, 1, true, null, NewRacks);
                cluster.WaitActive();
                DatanodeInfo[] datanodes = client.DatanodeReport(HdfsConstants.DatanodeReportType
                                                                 .All);
                // find out the new node
                DatanodeInfo newNode = null;
                foreach (DatanodeInfo node in datanodes)
                {
                    bool isNewNode = true;
                    foreach (DatanodeInfo oldNode in oldNodes)
                    {
                        if (node.Equals(oldNode))
                        {
                            isNewNode = false;
                            break;
                        }
                    }
                    if (isNewNode)
                    {
                        newNode = node;
                        break;
                    }
                }
                NUnit.Framework.Assert.IsTrue(newNode != null);
                DatanodeInfo         source  = null;
                AList <DatanodeInfo> proxies = new AList <DatanodeInfo>(2);
                foreach (DatanodeInfo node_1 in datanodes)
                {
                    if (node_1 != newNode)
                    {
                        if (node_1.GetNetworkLocation().Equals(newNode.GetNetworkLocation()))
                        {
                            source = node_1;
                        }
                        else
                        {
                            proxies.AddItem(node_1);
                        }
                    }
                }
                //current state: the newNode is on RACK2, and "source" is the other dn on RACK2.
                //the two datanodes on RACK0 and RACK1 are in "proxies".
                //"source" and both "proxies" all contain the block, while newNode doesn't yet.
                NUnit.Framework.Assert.IsTrue(source != null && proxies.Count == 2);
                // start to replace the block
                // case 1: proxySource does not contain the block
                Log.Info("Testcase 1: Proxy " + newNode + " does not contain the block " + b);
                NUnit.Framework.Assert.IsFalse(ReplaceBlock(b, source, newNode, proxies[0]));
                // case 2: destination already contains the block
                Log.Info("Testcase 2: Destination " + proxies[1] + " contains the block " + b);
                NUnit.Framework.Assert.IsFalse(ReplaceBlock(b, source, proxies[0], proxies[1]));
                // case 3: correct case
                Log.Info("Testcase 3: Source=" + source + " Proxy=" + proxies[0] + " Destination="
                         + newNode);
                NUnit.Framework.Assert.IsTrue(ReplaceBlock(b, source, proxies[0], newNode));
                // after cluster has time to resolve the over-replication,
                // block locations should contain two proxies and newNode
                // but not source
                CheckBlocks(new DatanodeInfo[] { newNode, proxies[0], proxies[1] }, fileName.ToString
                                (), DefaultBlockSize, ReplicationFactor, client);
                // case 4: proxies.get(0) is not a valid del hint
                // expect either source or newNode replica to be deleted instead
                Log.Info("Testcase 4: invalid del hint " + proxies[0]);
                NUnit.Framework.Assert.IsTrue(ReplaceBlock(b, proxies[0], proxies[1], source));
                // after cluster has time to resolve the over-replication,
                // block locations should contain two proxies,
                // and either source or newNode, but not both.
                CheckBlocks(Sharpen.Collections.ToArray(proxies, new DatanodeInfo[proxies.Count])
                            , fileName.ToString(), DefaultBlockSize, ReplicationFactor, client);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemple #10
0
        public virtual void TestDecommissionStatus()
        {
            IPEndPoint addr   = new IPEndPoint("localhost", cluster.GetNameNodePort());
            DFSClient  client = new DFSClient(addr, conf);

            DatanodeInfo[] info = client.DatanodeReport(HdfsConstants.DatanodeReportType.Live
                                                        );
            NUnit.Framework.Assert.AreEqual("Number of Datanodes ", 2, info.Length);
            DistributedFileSystem fileSys = cluster.GetFileSystem();
            DFSAdmin admin    = new DFSAdmin(cluster.GetConfiguration(0));
            short    replicas = numDatanodes;
            //
            // Decommission one node. Verify the decommission status
            //
            Path file1 = new Path("decommission.dat");

            WriteFile(fileSys, file1, replicas);
            Path file2             = new Path("decommission1.dat");
            FSDataOutputStream st1 = WriteIncompleteFile(fileSys, file2, replicas);

            foreach (DataNode d in cluster.GetDataNodes())
            {
                DataNodeTestUtils.TriggerBlockReport(d);
            }
            FSNamesystem    fsn = cluster.GetNamesystem();
            DatanodeManager dm  = fsn.GetBlockManager().GetDatanodeManager();

            for (int iteration = 0; iteration < numDatanodes; iteration++)
            {
                string downnode = DecommissionNode(fsn, client, localFileSys, iteration);
                dm.RefreshNodes(conf);
                decommissionedNodes.AddItem(downnode);
                BlockManagerTestUtil.RecheckDecommissionState(dm);
                IList <DatanodeDescriptor> decommissioningNodes = dm.GetDecommissioningNodes();
                if (iteration == 0)
                {
                    NUnit.Framework.Assert.AreEqual(decommissioningNodes.Count, 1);
                    DatanodeDescriptor decommNode = decommissioningNodes[0];
                    CheckDecommissionStatus(decommNode, 3, 0, 1);
                    CheckDFSAdminDecommissionStatus(decommissioningNodes.SubList(0, 1), fileSys, admin
                                                    );
                }
                else
                {
                    NUnit.Framework.Assert.AreEqual(decommissioningNodes.Count, 2);
                    DatanodeDescriptor decommNode1 = decommissioningNodes[0];
                    DatanodeDescriptor decommNode2 = decommissioningNodes[1];
                    // This one is still 3,3,1 since it passed over the UC block
                    // earlier, before node 2 was decommed
                    CheckDecommissionStatus(decommNode1, 3, 3, 1);
                    // This one is 4,4,2 since it has the full state
                    CheckDecommissionStatus(decommNode2, 4, 4, 2);
                    CheckDFSAdminDecommissionStatus(decommissioningNodes.SubList(0, 2), fileSys, admin
                                                    );
                }
            }
            // Call refreshNodes on FSNamesystem with empty exclude file.
            // This will remove the datanodes from decommissioning list and
            // make them available again.
            WriteConfigFile(localFileSys, excludeFile, null);
            dm.RefreshNodes(conf);
            st1.Close();
            CleanupFile(fileSys, file1);
            CleanupFile(fileSys, file2);
        }
Exemple #11
0
        /// <summary>Get the DFSClient.</summary>
        /// <exception cref="System.IO.IOException"/>
        public virtual DFSClient GetDFSClient()
        {
            IPEndPoint nnAddr = new IPEndPoint("localhost", cluster.GetNameNodePort());

            return(new DFSClient(nnAddr, conf));
        }
Exemple #12
0
 private string GetHostPortForNN(MiniDFSCluster cluster)
 {
     return("127.0.0.1:" + cluster.GetNameNodePort());
 }