コード例 #1
0
        /// <summary>Tests for a given volumes to be tolerated and volumes failed.</summary>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        private void TestVolumeConfig(int volumesTolerated, int volumesFailed, bool expectedBPServiceState
                                      , bool manageDfsDirs)
        {
            Assume.AssumeTrue(!Runtime.GetProperty("os.name").StartsWith("Windows"));
            int dnIndex = 0;

            // Fail the current directory since invalid storage directory perms
            // get fixed up automatically on datanode startup.
            FilePath[] dirs = new FilePath[] { new FilePath(cluster.GetInstanceStorageDir(dnIndex
                                                                                          , 0), "current"), new FilePath(cluster.GetInstanceStorageDir(dnIndex, 1), "current"
                                                                                                                         ) };
            try
            {
                for (int i = 0; i < volumesFailed; i++)
                {
                    PrepareDirToFail(dirs[i]);
                }
                RestartDatanodes(volumesTolerated, manageDfsDirs);
                NUnit.Framework.Assert.AreEqual(expectedBPServiceState, cluster.GetDataNodes()[0]
                                                .IsBPServiceAlive(cluster.GetNamesystem().GetBlockPoolId()));
            }
            finally
            {
                foreach (FilePath dir in dirs)
                {
                    FileUtil.Chmod(dir.ToString(), "755");
                }
            }
        }
コード例 #2
0
        public virtual void TestShutdown()
        {
            if (Runtime.GetProperty("os.name").StartsWith("Windows"))
            {
                return;
            }
            // Bring up two more datanodes
            cluster.StartDataNodes(conf, 2, true, null, null);
            cluster.WaitActive();
            int      dnIndex    = 0;
            string   bpid       = cluster.GetNamesystem().GetBlockPoolId();
            FilePath storageDir = cluster.GetInstanceStorageDir(dnIndex, 0);
            FilePath dir1       = MiniDFSCluster.GetRbwDir(storageDir, bpid);

            storageDir = cluster.GetInstanceStorageDir(dnIndex, 1);
            FilePath dir2 = MiniDFSCluster.GetRbwDir(storageDir, bpid);

            try
            {
                // make the data directory of the first datanode to be readonly
                NUnit.Framework.Assert.IsTrue("Couldn't chmod local vol", dir1.SetReadOnly());
                NUnit.Framework.Assert.IsTrue("Couldn't chmod local vol", dir2.SetReadOnly());
                // create files and make sure that first datanode will be down
                DataNode dn = cluster.GetDataNodes()[dnIndex];
                for (int i = 0; dn.IsDatanodeUp(); i++)
                {
                    Path fileName = new Path("/test.txt" + i);
                    DFSTestUtil.CreateFile(fs, fileName, 1024, (short)2, 1L);
                    DFSTestUtil.WaitReplication(fs, fileName, (short)2);
                    fs.Delete(fileName, true);
                }
            }
            finally
            {
                // restore its old permission
                FileUtil.SetWritable(dir1, true);
                FileUtil.SetWritable(dir2, true);
            }
        }
コード例 #3
0
        /// <summary>
        /// look for real blocks
        /// by counting *.meta files in all the storage dirs
        /// </summary>
        /// <param name="map"/>
        /// <returns/>
        private int CountRealBlocks(IDictionary <string, TestDataNodeVolumeFailure.BlockLocs
                                                 > map)
        {
            int    total = 0;
            string bpid  = cluster.GetNamesystem().GetBlockPoolId();

            for (int i = 0; i < dn_num; i++)
            {
                for (int j = 0; j <= 1; j++)
                {
                    FilePath storageDir = cluster.GetInstanceStorageDir(i, j);
                    FilePath dir        = MiniDFSCluster.GetFinalizedDir(storageDir, bpid);
                    if (dir == null)
                    {
                        System.Console.Out.WriteLine("dir is null for dn=" + i + " and data_dir=" + j);
                        continue;
                    }
                    IList <FilePath> res = MiniDFSCluster.GetAllBlockMetadataFiles(dir);
                    if (res == null)
                    {
                        System.Console.Out.WriteLine("res is null for dir = " + dir + " i=" + i + " and j="
                                                     + j);
                        continue;
                    }
                    //System.out.println("for dn" + i + "." + j + ": " + dir + "=" + res.length+ " files");
                    //int ii = 0;
                    foreach (FilePath f in res)
                    {
                        string s = f.GetName();
                        // cut off "blk_-" at the beginning and ".meta" at the end
                        NUnit.Framework.Assert.IsNotNull("Block file name should not be null", s);
                        string bid = Sharpen.Runtime.Substring(s, s.IndexOf("_") + 1, s.LastIndexOf("_"));
                        //System.out.println(ii++ + ". block " + s + "; id=" + bid);
                        TestDataNodeVolumeFailure.BlockLocs val = map[bid];
                        if (val == null)
                        {
                            val = new TestDataNodeVolumeFailure.BlockLocs(this);
                        }
                        val.num_files++;
                        // one more file for the block
                        map[bid] = val;
                    }
                    //System.out.println("dir1="+dir.getPath() + "blocks=" + res.length);
                    //System.out.println("dir2="+dir2.getPath() + "blocks=" + res2.length);
                    total += res.Count;
                }
            }
            return(total);
        }