示例#1
0
        public virtual void TestTimesAtClose()
        {
            Configuration conf        = new HdfsConfiguration();
            int           MaxIdleTime = 2000;
            // 2s
            int replicas = 1;

            // parameter initialization
            conf.SetInt("ipc.client.connection.maxidletime", MaxIdleTime);
            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1000);
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            conf.SetInt(DFSConfigKeys.DfsDatanodeHandlerCountKey, 50);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes
                                                                                   ).Build();

            cluster.WaitActive();
            IPEndPoint addr   = new IPEndPoint("localhost", cluster.GetNameNodePort());
            DFSClient  client = new DFSClient(addr, conf);

            DatanodeInfo[] info = client.DatanodeReport(HdfsConstants.DatanodeReportType.Live
                                                        );
            NUnit.Framework.Assert.AreEqual("Number of Datanodes ", numDatanodes, info.Length
                                            );
            FileSystem fileSys = cluster.GetFileSystem();

            NUnit.Framework.Assert.IsTrue(fileSys is DistributedFileSystem);
            try
            {
                // create a new file and write to it
                Path file1             = new Path("/simple.dat");
                FSDataOutputStream stm = WriteFile(fileSys, file1, replicas);
                System.Console.Out.WriteLine("Created and wrote file simple.dat");
                FileStatus statBeforeClose  = fileSys.GetFileStatus(file1);
                long       mtimeBeforeClose = statBeforeClose.GetModificationTime();
                string     mdateBeforeClose = dateForm.Format(Sharpen.Extensions.CreateDate(mtimeBeforeClose
                                                                                            ));
                System.Console.Out.WriteLine("mtime on " + file1 + " before close is " + mdateBeforeClose
                                             + " (" + mtimeBeforeClose + ")");
                NUnit.Framework.Assert.IsTrue(mtimeBeforeClose != 0);
                //close file after writing
                stm.Close();
                System.Console.Out.WriteLine("Closed file.");
                FileStatus statAfterClose  = fileSys.GetFileStatus(file1);
                long       mtimeAfterClose = statAfterClose.GetModificationTime();
                string     mdateAfterClose = dateForm.Format(Sharpen.Extensions.CreateDate(mtimeAfterClose
                                                                                           ));
                System.Console.Out.WriteLine("mtime on " + file1 + " after close is " + mdateAfterClose
                                             + " (" + mtimeAfterClose + ")");
                NUnit.Framework.Assert.IsTrue(mtimeAfterClose != 0);
                NUnit.Framework.Assert.IsTrue(mtimeBeforeClose != mtimeAfterClose);
                CleanupFile(fileSys, file1);
            }
            catch (IOException e)
            {
                info = client.DatanodeReport(HdfsConstants.DatanodeReportType.All);
                PrintDatanodeReport(info);
                throw;
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
        }
        /// <exception cref="System.Exception"/>
        private void TestStatistics(bool isShortCircuit)
        {
            Assume.AssumeTrue(DomainSocket.GetLoadingFailureReason() == null);
            HdfsConfiguration        conf    = new HdfsConfiguration();
            TemporarySocketDirectory sockDir = null;

            if (isShortCircuit)
            {
                DFSInputStream.tcpReadsDisabledForTesting = true;
                sockDir = new TemporarySocketDirectory();
                conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, new FilePath(sockDir.GetDir(), "TestStatisticsForLocalRead.%d.sock"
                                                                            ).GetAbsolutePath());
                conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true);
                DomainSocket.DisableBindPathValidation();
            }
            else
            {
                conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, false);
            }
            MiniDFSCluster    cluster    = null;
            Path              TestPath   = new Path("/a");
            long              RandomSeed = 4567L;
            FSDataInputStream fsIn       = null;

            byte[]     original = new byte[TestBlockReaderLocal.BlockReaderLocalTest.TestLength];
            FileSystem fs       = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, TestPath, TestBlockReaderLocal.BlockReaderLocalTest.TestLength
                                       , (short)1, RandomSeed);
                try
                {
                    DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
                }
                catch (Exception e)
                {
                    NUnit.Framework.Assert.Fail("unexpected InterruptedException during " + "waitReplication: "
                                                + e);
                }
                catch (TimeoutException e)
                {
                    NUnit.Framework.Assert.Fail("unexpected TimeoutException during " + "waitReplication: "
                                                + e);
                }
                fsIn = fs.Open(TestPath);
                IOUtils.ReadFully(fsIn, original, 0, TestBlockReaderLocal.BlockReaderLocalTest.TestLength
                                  );
                HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
                NUnit.Framework.Assert.AreEqual(TestBlockReaderLocal.BlockReaderLocalTest.TestLength
                                                , dfsIn.GetReadStatistics().GetTotalBytesRead());
                NUnit.Framework.Assert.AreEqual(TestBlockReaderLocal.BlockReaderLocalTest.TestLength
                                                , dfsIn.GetReadStatistics().GetTotalLocalBytesRead());
                if (isShortCircuit)
                {
                    NUnit.Framework.Assert.AreEqual(TestBlockReaderLocal.BlockReaderLocalTest.TestLength
                                                    , dfsIn.GetReadStatistics().GetTotalShortCircuitBytesRead());
                }
                else
                {
                    NUnit.Framework.Assert.AreEqual(0, dfsIn.GetReadStatistics().GetTotalShortCircuitBytesRead
                                                        ());
                }
                fsIn.Close();
                fsIn = null;
            }
            finally
            {
                DFSInputStream.tcpReadsDisabledForTesting = false;
                if (fsIn != null)
                {
                    fsIn.Close();
                }
                if (fs != null)
                {
                    fs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
                if (sockDir != null)
                {
                    sockDir.Close();
                }
            }
        }
示例#3
0
        public virtual void TestPendingReplicationRetry()
        {
            MiniDFSCluster cluster      = null;
            int            numDataNodes = 4;
            string         testFile     = "/replication-test-file";
            Path           testPath     = new Path(testFile);

            byte[] buffer = new byte[1024];
            for (int i = 0; i < buffer.Length; i++)
            {
                buffer[i] = (byte)('1');
            }
            try
            {
                Configuration conf = new HdfsConfiguration();
                conf.Set(DFSConfigKeys.DfsReplicationKey, Sharpen.Extensions.ToString(numDataNodes
                                                                                      ));
                //first time format
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build();
                cluster.WaitActive();
                DFSClient dfsClient = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort
                                                                       ()), conf);
                OutputStream @out = cluster.GetFileSystem().Create(testPath);
                @out.Write(buffer);
                @out.Close();
                WaitForBlockReplication(testFile, dfsClient.GetNamenode(), numDataNodes, -1);
                // get first block of the file.
                ExtendedBlock block = dfsClient.GetNamenode().GetBlockLocations(testFile, 0, long.MaxValue
                                                                                ).Get(0).GetBlock();
                cluster.Shutdown();
                for (int i_1 = 0; i_1 < 25; i_1++)
                {
                    buffer[i_1] = (byte)('0');
                }
                int fileCount = 0;
                // Choose 3 copies of block file - delete 1 and corrupt the remaining 2
                for (int dnIndex = 0; dnIndex < 3; dnIndex++)
                {
                    FilePath blockFile = cluster.GetBlockFile(dnIndex, block);
                    Log.Info("Checking for file " + blockFile);
                    if (blockFile != null && blockFile.Exists())
                    {
                        if (fileCount == 0)
                        {
                            Log.Info("Deleting file " + blockFile);
                            NUnit.Framework.Assert.IsTrue(blockFile.Delete());
                        }
                        else
                        {
                            // corrupt it.
                            Log.Info("Corrupting file " + blockFile);
                            long len = blockFile.Length();
                            NUnit.Framework.Assert.IsTrue(len > 50);
                            RandomAccessFile blockOut = new RandomAccessFile(blockFile, "rw");
                            try
                            {
                                blockOut.Seek(len / 3);
                                blockOut.Write(buffer, 0, 25);
                            }
                            finally
                            {
                                blockOut.Close();
                            }
                        }
                        fileCount++;
                    }
                }
                NUnit.Framework.Assert.AreEqual(3, fileCount);

                /* Start the MiniDFSCluster with more datanodes since once a writeBlock
                 * to a datanode node fails, same block can not be written to it
                 * immediately. In our case some replication attempts will fail.
                 */
                Log.Info("Restarting minicluster after deleting a replica and corrupting 2 crcs");
                conf = new HdfsConfiguration();
                conf.Set(DFSConfigKeys.DfsReplicationKey, Sharpen.Extensions.ToString(numDataNodes
                                                                                      ));
                conf.Set(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, Sharpen.Extensions.ToString
                             (2));
                conf.Set("dfs.datanode.block.write.timeout.sec", Sharpen.Extensions.ToString(5));
                conf.Set(DFSConfigKeys.DfsNamenodeSafemodeThresholdPctKey, "0.75f");
                // only 3 copies exist
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes * 2).Format(
                    false).Build();
                cluster.WaitActive();
                dfsClient = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()),
                                          conf);
                WaitForBlockReplication(testFile, dfsClient.GetNamenode(), numDataNodes, -1);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
示例#4
0
        // Root scratch directory on local filesystem
        // The singleton master storage directory for Namenode
        // A checksum of the contents in namenodeStorage directory
        // The namespaceId of the namenodeStorage directory
        // The clusterId of the namenodeStorage directory
        // The blockpoolId of the namenodeStorage directory
        // The fsscTime of the namenodeStorage directory
        // The singleton master storage directory for Datanode
        // A checksum of the contents in datanodeStorage directory
        // A checksum of the contents in blockpool storage directory
        // A checksum of the contents in blockpool finalize storage directory
        // A checksum of the contents in blockpool rbw storage directory
        /// <summary>Initialize the data structures used by this class.</summary>
        /// <remarks>
        /// Initialize the data structures used by this class.
        /// IMPORTANT NOTE: This method must be called once before calling
        /// any other public method on this class.
        /// <p>
        /// Creates a singleton master populated storage
        /// directory for a Namenode (contains edits, fsimage,
        /// version, and time files) and a Datanode (contains version and
        /// block files).  This can be a lengthy operation.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public static void Initialize()
        {
            CreateEmptyDirs(new string[] { TestRootDir.ToString() });
            Configuration config = new HdfsConfiguration();

            config.Set(DFSConfigKeys.DfsNamenodeNameDirKey, namenodeStorage.ToString());
            config.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, namenodeStorage.ToString());
            config.Set(DFSConfigKeys.DfsDatanodeDataDirKey, datanodeStorage.ToString());
            MiniDFSCluster cluster = null;
            string         bpid    = null;

            try
            {
                // format data-node
                CreateEmptyDirs(new string[] { datanodeStorage.ToString() });
                // format and start NameNode and start DataNode
                DFSTestUtil.FormatNameNode(config);
                cluster = new MiniDFSCluster.Builder(config).NumDataNodes(1).StartupOption(HdfsServerConstants.StartupOption
                                                                                           .Regular).Format(false).ManageDataDfsDirs(false).ManageNameDfsDirs(false).Build(
                    );
                NamenodeProtocols namenode = cluster.GetNameNodeRpc();
                namenodeStorageNamespaceID = namenode.VersionRequest().GetNamespaceID();
                namenodeStorageFsscTime    = namenode.VersionRequest().GetCTime();
                namenodeStorageClusterID   = namenode.VersionRequest().GetClusterID();
                namenodeStorageBlockPoolID = namenode.VersionRequest().GetBlockPoolID();
                FileSystem fs      = FileSystem.Get(config);
                Path       baseDir = new Path("/TestUpgrade");
                fs.Mkdirs(baseDir);
                // write some files
                int    bufferSize = 4096;
                byte[] buffer     = new byte[bufferSize];
                for (int i = 0; i < bufferSize; i++)
                {
                    buffer[i] = unchecked ((byte)((byte)('0') + i % 50));
                }
                WriteFile(fs, new Path(baseDir, "file1"), buffer, bufferSize);
                WriteFile(fs, new Path(baseDir, "file2"), buffer, bufferSize);
                // save image
                namenode.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter, false);
                namenode.SaveNamespace();
                namenode.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave, false);
                // write more files
                WriteFile(fs, new Path(baseDir, "file3"), buffer, bufferSize);
                WriteFile(fs, new Path(baseDir, "file4"), buffer, bufferSize);
                bpid = cluster.GetNamesystem(0).GetBlockPoolId();
            }
            finally
            {
                // shutdown
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
                FileUtil.FullyDelete(new FilePath(namenodeStorage, "in_use.lock"));
                FileUtil.FullyDelete(new FilePath(datanodeStorage, "in_use.lock"));
            }
            namenodeStorageChecksum = ChecksumContents(HdfsServerConstants.NodeType.NameNode,
                                                       new FilePath(namenodeStorage, "current"), false);
            FilePath dnCurDir = new FilePath(datanodeStorage, "current");

            datanodeStorageChecksum = ChecksumContents(HdfsServerConstants.NodeType.DataNode,
                                                       dnCurDir, false);
            FilePath bpCurDir = new FilePath(BlockPoolSliceStorage.GetBpRoot(bpid, dnCurDir),
                                             "current");

            blockPoolStorageChecksum = ChecksumContents(HdfsServerConstants.NodeType.DataNode
                                                        , bpCurDir, false);
            FilePath bpCurFinalizeDir = new FilePath(BlockPoolSliceStorage.GetBpRoot(bpid, dnCurDir
                                                                                     ), "current/" + DataStorage.StorageDirFinalized);

            blockPoolFinalizedStorageChecksum = ChecksumContents(HdfsServerConstants.NodeType
                                                                 .DataNode, bpCurFinalizeDir, true);
            FilePath bpCurRbwDir = new FilePath(BlockPoolSliceStorage.GetBpRoot(bpid, dnCurDir
                                                                                ), "current/" + DataStorage.StorageDirRbw);

            blockPoolRbwStorageChecksum = ChecksumContents(HdfsServerConstants.NodeType.DataNode
                                                           , bpCurRbwDir, false);
        }
        /// <exception cref="System.IO.IOException"/>
        public virtual void RunBlockReaderLocalTest(TestBlockReaderLocal.BlockReaderLocalTest
                                                    test, bool checksum, long readahead)
        {
            Assume.AssumeThat(DomainSocket.GetLoadingFailureReason(), CoreMatchers.EqualTo(null
                                                                                           ));
            MiniDFSCluster    cluster = null;
            HdfsConfiguration conf    = new HdfsConfiguration();

            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, !checksum
                            );
            conf.SetLong(DFSConfigKeys.DfsBytesPerChecksumKey, TestBlockReaderLocal.BlockReaderLocalTest
                         .BytesPerChecksum);
            conf.Set(DFSConfigKeys.DfsChecksumTypeKey, "CRC32C");
            conf.SetLong(DFSConfigKeys.DfsClientCacheReadahead, readahead);
            test.SetConfiguration(conf);
            FileInputStream   dataIn           = null;
            FileInputStream   metaIn           = null;
            Path              TestPath         = new Path("/a");
            long              RandomSeed       = 4567L;
            BlockReaderLocal  blockReaderLocal = null;
            FSDataInputStream fsIn             = null;

            byte[]           original = new byte[TestBlockReaderLocal.BlockReaderLocalTest.TestLength];
            FileSystem       fs       = null;
            ShortCircuitShm  shm      = null;
            RandomAccessFile raf      = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, TestPath, TestBlockReaderLocal.BlockReaderLocalTest.TestLength
                                       , (short)1, RandomSeed);
                try
                {
                    DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
                }
                catch (Exception e)
                {
                    NUnit.Framework.Assert.Fail("unexpected InterruptedException during " + "waitReplication: "
                                                + e);
                }
                catch (TimeoutException e)
                {
                    NUnit.Framework.Assert.Fail("unexpected TimeoutException during " + "waitReplication: "
                                                + e);
                }
                fsIn = fs.Open(TestPath);
                IOUtils.ReadFully(fsIn, original, 0, TestBlockReaderLocal.BlockReaderLocalTest.TestLength
                                  );
                fsIn.Close();
                fsIn = null;
                ExtendedBlock     block             = DFSTestUtil.GetFirstBlock(fs, TestPath);
                FilePath          dataFile          = cluster.GetBlockFile(0, block);
                FilePath          metaFile          = cluster.GetBlockMetadataFile(0, block);
                ShortCircuitCache shortCircuitCache = ClientContext.GetFromConf(conf).GetShortCircuitCache
                                                          ();
                cluster.Shutdown();
                cluster = null;
                test.Setup(dataFile, checksum);
                FileInputStream[] streams = new FileInputStream[] { new FileInputStream(dataFile)
                                                                    , new FileInputStream(metaFile) };
                dataIn = streams[0];
                metaIn = streams[1];
                ExtendedBlockId key = new ExtendedBlockId(block.GetBlockId(), block.GetBlockPoolId
                                                              ());
                raf = new RandomAccessFile(new FilePath(sockDir.GetDir().GetAbsolutePath(), UUID.
                                                        RandomUUID().ToString()), "rw");
                raf.SetLength(8192);
                FileInputStream shmStream = new FileInputStream(raf.GetFD());
                shm = new ShortCircuitShm(ShortCircuitShm.ShmId.CreateRandom(), shmStream);
                ShortCircuitReplica replica = new ShortCircuitReplica(key, dataIn, metaIn, shortCircuitCache
                                                                      , Time.Now(), shm.AllocAndRegisterSlot(ExtendedBlockId.FromExtendedBlock(block))
                                                                      );
                blockReaderLocal = new BlockReaderLocal.Builder(new DFSClient.Conf(conf)).SetFilename
                                       (TestPath.GetName()).SetBlock(block).SetShortCircuitReplica(replica).SetCachingStrategy
                                       (new CachingStrategy(false, readahead)).SetVerifyChecksum(checksum).Build();
                dataIn = null;
                metaIn = null;
                test.DoTest(blockReaderLocal, original);
                // BlockReaderLocal should not alter the file position.
                NUnit.Framework.Assert.AreEqual(0, streams[0].GetChannel().Position());
                NUnit.Framework.Assert.AreEqual(0, streams[1].GetChannel().Position());
            }
            finally
            {
                if (fsIn != null)
                {
                    fsIn.Close();
                }
                if (fs != null)
                {
                    fs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
                if (dataIn != null)
                {
                    dataIn.Close();
                }
                if (metaIn != null)
                {
                    metaIn.Close();
                }
                if (blockReaderLocal != null)
                {
                    blockReaderLocal.Close();
                }
                if (shm != null)
                {
                    shm.Free();
                }
                if (raf != null)
                {
                    raf.Close();
                }
            }
        }
示例#6
0
        public virtual void TestSimpleAppend()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsDatanodeHandlerCountKey, 50);
            fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            try
            {
                {
                    // test appending to a file.
                    // create a new file.
                    Path file1             = new Path("/simpleAppend.dat");
                    FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1);
                    System.Console.Out.WriteLine("Created file simpleAppend.dat");
                    // write to file
                    int mid = 186;
                    // io.bytes.per.checksum bytes
                    System.Console.Out.WriteLine("Writing " + mid + " bytes to file " + file1);
                    stm.Write(fileContents, 0, mid);
                    stm.Close();
                    System.Console.Out.WriteLine("Wrote and Closed first part of file.");
                    // write to file
                    int mid2 = 607;
                    // io.bytes.per.checksum bytes
                    System.Console.Out.WriteLine("Writing " + mid + " bytes to file " + file1);
                    stm = fs.Append(file1);
                    stm.Write(fileContents, mid, mid2 - mid);
                    stm.Close();
                    System.Console.Out.WriteLine("Wrote and Closed second part of file.");
                    // write the remainder of the file
                    stm = fs.Append(file1);
                    // ensure getPos is set to reflect existing size of the file
                    NUnit.Framework.Assert.IsTrue(stm.GetPos() > 0);
                    System.Console.Out.WriteLine("Writing " + (AppendTestUtil.FileSize - mid2) + " bytes to file "
                                                 + file1);
                    stm.Write(fileContents, mid2, AppendTestUtil.FileSize - mid2);
                    System.Console.Out.WriteLine("Written second part of file");
                    stm.Close();
                    System.Console.Out.WriteLine("Wrote and Closed second part of file.");
                    // verify that entire file is good
                    AppendTestUtil.CheckFullFile(fs, file1, AppendTestUtil.FileSize, fileContents, "Read 2"
                                                 );
                }
                {
                    // test appending to an non-existing file.
                    FSDataOutputStream @out = null;
                    try
                    {
                        @out = fs.Append(new Path("/non-existing.dat"));
                        NUnit.Framework.Assert.Fail("Expected to have FileNotFoundException");
                    }
                    catch (FileNotFoundException fnfe)
                    {
                        System.Console.Out.WriteLine("Good: got " + fnfe);
                        Sharpen.Runtime.PrintStackTrace(fnfe, System.Console.Out);
                    }
                    finally
                    {
                        IOUtils.CloseStream(@out);
                    }
                }
                {
                    // test append permission.
                    //set root to all writable
                    Path root = new Path("/");
                    fs.SetPermission(root, new FsPermission((short)0x1ff));
                    fs.Close();
                    // login as a different user
                    UserGroupInformation superuser = UserGroupInformation.GetCurrentUser();
                    string username = "******";
                    string group    = "testappendgroup";
                    NUnit.Framework.Assert.IsFalse(superuser.GetShortUserName().Equals(username));
                    NUnit.Framework.Assert.IsFalse(Arrays.AsList(superuser.GetGroupNames()).Contains(
                                                       group));
                    UserGroupInformation appenduser = UserGroupInformation.CreateUserForTesting(username
                                                                                                , new string[] { group });
                    fs = DFSTestUtil.GetFileSystemAs(appenduser, conf);
                    // create a file
                    Path dir = new Path(root, GetType().Name);
                    Path foo = new Path(dir, "foo.dat");
                    FSDataOutputStream @out = null;
                    int offset = 0;
                    try
                    {
                        @out = fs.Create(foo);
                        int len = 10 + AppendTestUtil.NextInt(100);
                        @out.Write(fileContents, offset, len);
                        offset += len;
                    }
                    finally
                    {
                        IOUtils.CloseStream(@out);
                    }
                    // change dir and foo to minimal permissions.
                    fs.SetPermission(dir, new FsPermission((short)0x40));
                    fs.SetPermission(foo, new FsPermission((short)0x80));
                    // try append, should success
                    @out = null;
                    try
                    {
                        @out = fs.Append(foo);
                        int len = 10 + AppendTestUtil.NextInt(100);
                        @out.Write(fileContents, offset, len);
                        offset += len;
                    }
                    finally
                    {
                        IOUtils.CloseStream(@out);
                    }
                    // change dir and foo to all but no write on foo.
                    fs.SetPermission(foo, new FsPermission((short)0x17f));
                    fs.SetPermission(dir, new FsPermission((short)0x1ff));
                    // try append, should fail
                    @out = null;
                    try
                    {
                        @out = fs.Append(foo);
                        NUnit.Framework.Assert.Fail("Expected to have AccessControlException");
                    }
                    catch (AccessControlException ace)
                    {
                        System.Console.Out.WriteLine("Good: got " + ace);
                        Sharpen.Runtime.PrintStackTrace(ace, System.Console.Out);
                    }
                    finally
                    {
                        IOUtils.CloseStream(@out);
                    }
                }
            }
            catch (IOException e)
            {
                System.Console.Out.WriteLine("Exception :" + e);
                throw;
            }
            catch (Exception e)
            {
                System.Console.Out.WriteLine("Throwable :" + e);
                Sharpen.Runtime.PrintStackTrace(e);
                throw new IOException("Throwable : " + e);
            }
            finally
            {
                fs.Close();
                cluster.Shutdown();
            }
        }
        public virtual void TestInjection()
        {
            MiniDFSCluster cluster  = null;
            string         testFile = "/replication-test-file";
            Path           testPath = new Path(testFile);

            byte[] buffer = new byte[1024];
            for (int i = 0; i < buffer.Length; i++)
            {
                buffer[i] = (byte)('1');
            }
            try
            {
                Configuration conf = new HdfsConfiguration();
                conf.Set(DFSConfigKeys.DfsReplicationKey, Sharpen.Extensions.ToString(numDataNodes
                                                                                      ));
                conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, checksumSize);
                SimulatedFSDataset.SetFactory(conf);
                //first time format
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build();
                cluster.WaitActive();
                string    bpid      = cluster.GetNamesystem().GetBlockPoolId();
                DFSClient dfsClient = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort
                                                                       ()), conf);
                WriteFile(cluster.GetFileSystem(), testPath, numDataNodes);
                WaitForBlockReplication(testFile, dfsClient.GetNamenode(), numDataNodes, 20);
                IList <IDictionary <DatanodeStorage, BlockListAsLongs> > blocksList = cluster.GetAllBlockReports
                                                                                          (bpid);
                cluster.Shutdown();
                cluster = null;

                /* Start the MiniDFSCluster with more datanodes since once a writeBlock
                 * to a datanode node fails, same block can not be written to it
                 * immediately. In our case some replication attempts will fail.
                 */
                Log.Info("Restarting minicluster");
                conf = new HdfsConfiguration();
                SimulatedFSDataset.SetFactory(conf);
                conf.Set(DFSConfigKeys.DfsNamenodeSafemodeThresholdPctKey, "0.0f");
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes * 2).Format(
                    false).Build();
                cluster.WaitActive();
                ICollection <Block> uniqueBlocks = new HashSet <Block>();
                foreach (IDictionary <DatanodeStorage, BlockListAsLongs> map in blocksList)
                {
                    foreach (BlockListAsLongs blockList in map.Values)
                    {
                        foreach (Block b in blockList)
                        {
                            uniqueBlocks.AddItem(new Block(b));
                        }
                    }
                }
                // Insert all the blocks in the first data node
                Log.Info("Inserting " + uniqueBlocks.Count + " blocks");
                cluster.InjectBlocks(0, uniqueBlocks, null);
                dfsClient = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()),
                                          conf);
                WaitForBlockReplication(testFile, dfsClient.GetNamenode(), numDataNodes, -1);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
示例#8
0
        /// <exception cref="System.Exception"/>
        public static void Main(string[] args)
        {
            int           numDataNodes            = 0;
            int           numRacks                = 0;
            bool          inject                  = false;
            long          startingBlockId         = 1;
            int           numBlocksPerDNtoInject  = 0;
            int           replication             = 1;
            bool          checkDataNodeAddrConfig = false;
            long          simulatedCapacityPerDn  = SimulatedFSDataset.DefaultCapacity;
            string        bpid = null;
            Configuration conf = new HdfsConfiguration();

            for (int i = 0; i < args.Length; i++)
            {
                // parse command line
                if (args[i].Equals("-n"))
                {
                    if (++i >= args.Length || args[i].StartsWith("-"))
                    {
                        PrintUsageExit("missing number of nodes");
                    }
                    numDataNodes = System.Convert.ToInt32(args[i]);
                }
                else
                {
                    if (args[i].Equals("-racks"))
                    {
                        if (++i >= args.Length || args[i].StartsWith("-"))
                        {
                            PrintUsageExit("Missing number of racks");
                        }
                        numRacks = System.Convert.ToInt32(args[i]);
                    }
                    else
                    {
                        if (args[i].Equals("-r"))
                        {
                            if (++i >= args.Length || args[i].StartsWith("-"))
                            {
                                PrintUsageExit("Missing replication factor");
                            }
                            replication = System.Convert.ToInt32(args[i]);
                        }
                        else
                        {
                            if (args[i].Equals("-d"))
                            {
                                if (++i >= args.Length || args[i].StartsWith("-"))
                                {
                                    PrintUsageExit("Missing datanode dirs parameter");
                                }
                                dataNodeDirs = args[i];
                            }
                            else
                            {
                                if (args[i].Equals("-simulated"))
                                {
                                    SimulatedFSDataset.SetFactory(conf);
                                    if ((i + 1) < args.Length && !args[i + 1].StartsWith("-"))
                                    {
                                        simulatedCapacityPerDn = long.Parse(args[++i]);
                                    }
                                }
                                else
                                {
                                    if (args[i].Equals("-bpid"))
                                    {
                                        if (++i >= args.Length || args[i].StartsWith("-"))
                                        {
                                            PrintUsageExit("Missing blockpoolid parameter");
                                        }
                                        bpid = args[i];
                                    }
                                    else
                                    {
                                        if (args[i].Equals("-inject"))
                                        {
                                            if (!FsDatasetSpi.Factory.GetFactory(conf).IsSimulated())
                                            {
                                                System.Console.Out.Write("-inject is valid only for simulated");
                                                PrintUsageExit();
                                            }
                                            inject = true;
                                            if (++i >= args.Length || args[i].StartsWith("-"))
                                            {
                                                PrintUsageExit("Missing starting block and number of blocks per DN to inject");
                                            }
                                            startingBlockId = System.Convert.ToInt32(args[i]);
                                            if (++i >= args.Length || args[i].StartsWith("-"))
                                            {
                                                PrintUsageExit("Missing number of blocks to inject");
                                            }
                                            numBlocksPerDNtoInject = System.Convert.ToInt32(args[i]);
                                        }
                                        else
                                        {
                                            if (args[i].Equals("-checkDataNodeAddrConfig"))
                                            {
                                                checkDataNodeAddrConfig = true;
                                            }
                                            else
                                            {
                                                PrintUsageExit();
                                            }
                                        }
                                    }
                                }
                            }
                        }
                    }
                }
            }
            if (numDataNodes <= 0 || replication <= 0)
            {
                PrintUsageExit("numDataNodes and replication have to be greater than zero");
            }
            if (replication > numDataNodes)
            {
                PrintUsageExit("Replication must be less than or equal to numDataNodes");
            }
            if (bpid == null)
            {
                PrintUsageExit("BlockPoolId must be provided");
            }
            string nameNodeAdr = FileSystem.GetDefaultUri(conf).GetAuthority();

            if (nameNodeAdr == null)
            {
                System.Console.Out.WriteLine("No name node address and port in config");
                System.Environment.Exit(-1);
            }
            bool simulated = FsDatasetSpi.Factory.GetFactory(conf).IsSimulated();

            System.Console.Out.WriteLine("Starting " + numDataNodes + (simulated ? " Simulated "
                                 : " ") + " Data Nodes that will connect to Name Node at " + nameNodeAdr);
            Runtime.SetProperty("test.build.data", dataNodeDirs);
            long[] simulatedCapacities = new long[numDataNodes];
            for (int i_1 = 0; i_1 < numDataNodes; ++i_1)
            {
                simulatedCapacities[i_1] = simulatedCapacityPerDn;
            }
            MiniDFSCluster mc = new MiniDFSCluster();

            try
            {
                mc.FormatDataNodeDirs();
            }
            catch (IOException e)
            {
                System.Console.Out.WriteLine("Error formating data node dirs:" + e);
            }
            string[] rack4DataNode = null;
            if (numRacks > 0)
            {
                System.Console.Out.WriteLine("Using " + numRacks + " racks: ");
                string rackPrefix = GetUniqueRackPrefix();
                rack4DataNode = new string[numDataNodes];
                for (int i_2 = 0; i_2 < numDataNodes; ++i_2)
                {
                    //rack4DataNode[i] = racks[i%numRacks];
                    rack4DataNode[i_2] = rackPrefix + "-" + i_2 % numRacks;
                    System.Console.Out.WriteLine("Data Node " + i_2 + " using " + rack4DataNode[i_2]);
                }
            }
            try
            {
                mc.StartDataNodes(conf, numDataNodes, true, HdfsServerConstants.StartupOption.Regular
                                  , rack4DataNode, null, simulatedCapacities, false, checkDataNodeAddrConfig);
                Sharpen.Thread.Sleep(10 * 1000);
                // Give the DN some time to connect to NN and init storage directories.
                if (inject)
                {
                    long blockSize = 10;
                    System.Console.Out.WriteLine("Injecting " + numBlocksPerDNtoInject + " blocks in each DN starting at blockId "
                                                 + startingBlockId + " with blocksize of " + blockSize);
                    Block[] blocks = new Block[numBlocksPerDNtoInject];
                    long    blkid  = startingBlockId;
                    for (int i_dn = 0; i_dn < numDataNodes; ++i_dn)
                    {
                        for (int i_2 = 0; i_2 < blocks.Length; ++i_2)
                        {
                            blocks[i_2] = new Block(blkid++, blockSize, CreateEditsLog.BlockGenerationStamp);
                        }
                        for (int i_3 = 1; i_3 <= replication; ++i_3)
                        {
                            // inject blocks for dn_i into dn_i and replica in dn_i's neighbors
                            mc.InjectBlocks((i_dn + i_3 - 1) % numDataNodes, Arrays.AsList(blocks), bpid);
                            System.Console.Out.WriteLine("Injecting blocks of dn " + i_dn + " into dn" + ((i_dn
                                                                                                           + i_3 - 1) % numDataNodes));
                        }
                    }
                    System.Console.Out.WriteLine("Created blocks from Bids " + startingBlockId + " to "
                                                 + (blkid - 1));
                }
            }
            catch (IOException e)
            {
                System.Console.Out.WriteLine("Error creating data node:" + e);
            }
        }
示例#9
0
        public virtual void TestGetBlocks()
        {
            Configuration Conf = new HdfsConfiguration();
            short         ReplicationFactor = (short)2;
            int           DefaultBlockSize  = 1024;
            Random        r = new Random();

            Conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockSize);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(Conf).NumDataNodes(ReplicationFactor
                                                                                   ).Build();

            try
            {
                cluster.WaitActive();
                // create a file with two blocks
                FileSystem         fs   = cluster.GetFileSystem();
                FSDataOutputStream @out = fs.Create(new Path("/tmp.txt"), ReplicationFactor);
                byte[]             data = new byte[1024];
                long fileLen            = 2 * DefaultBlockSize;
                long bytesToWrite       = fileLen;
                while (bytesToWrite > 0)
                {
                    r.NextBytes(data);
                    int bytesToWriteNext = (1024 < bytesToWrite) ? 1024 : (int)bytesToWrite;
                    @out.Write(data, 0, bytesToWriteNext);
                    bytesToWrite -= bytesToWriteNext;
                }
                @out.Close();
                // get blocks & data nodes
                IList <LocatedBlock> locatedBlocks;
                DatanodeInfo[]       dataNodes = null;
                bool notWritten;
                do
                {
                    DFSClient dfsclient = new DFSClient(NameNode.GetAddress(Conf), Conf);
                    locatedBlocks = dfsclient.GetNamenode().GetBlockLocations("/tmp.txt", 0, fileLen)
                                    .GetLocatedBlocks();
                    NUnit.Framework.Assert.AreEqual(2, locatedBlocks.Count);
                    notWritten = false;
                    for (int i = 0; i < 2; i++)
                    {
                        dataNodes = locatedBlocks[i].GetLocations();
                        if (dataNodes.Length != ReplicationFactor)
                        {
                            notWritten = true;
                            try
                            {
                                Sharpen.Thread.Sleep(10);
                            }
                            catch (Exception)
                            {
                            }
                            break;
                        }
                    }
                }while (notWritten);
                // get RPC client to namenode
                IPEndPoint       addr     = new IPEndPoint("localhost", cluster.GetNameNodePort());
                NamenodeProtocol namenode = NameNodeProxies.CreateProxy <NamenodeProtocol>(Conf, NameNode
                                                                                           .GetUri(addr)).GetProxy();
                // get blocks of size fileLen from dataNodes[0]
                BlocksWithLocations.BlockWithLocations[] locs;
                locs = namenode.GetBlocks(dataNodes[0], fileLen).GetBlocks();
                NUnit.Framework.Assert.AreEqual(locs.Length, 2);
                NUnit.Framework.Assert.AreEqual(locs[0].GetStorageIDs().Length, 2);
                NUnit.Framework.Assert.AreEqual(locs[1].GetStorageIDs().Length, 2);
                // get blocks of size BlockSize from dataNodes[0]
                locs = namenode.GetBlocks(dataNodes[0], DefaultBlockSize).GetBlocks();
                NUnit.Framework.Assert.AreEqual(locs.Length, 1);
                NUnit.Framework.Assert.AreEqual(locs[0].GetStorageIDs().Length, 2);
                // get blocks of size 1 from dataNodes[0]
                locs = namenode.GetBlocks(dataNodes[0], 1).GetBlocks();
                NUnit.Framework.Assert.AreEqual(locs.Length, 1);
                NUnit.Framework.Assert.AreEqual(locs[0].GetStorageIDs().Length, 2);
                // get blocks of size 0 from dataNodes[0]
                GetBlocksWithException(namenode, dataNodes[0], 0);
                // get blocks of size -1 from dataNodes[0]
                GetBlocksWithException(namenode, dataNodes[0], -1);
                // get blocks of size BlockSize from a non-existent datanode
                DatanodeInfo info = DFSTestUtil.GetDatanodeInfo("1.2.3.4");
                GetBlocksWithException(namenode, info, 2);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
示例#10
0
        public virtual void TestWhileOpenRenameToNonExistentDirectory()
        {
            Configuration conf        = new HdfsConfiguration();
            int           MaxIdleTime = 2000;

            // 2s
            conf.SetInt("ipc.client.connection.maxidletime", MaxIdleTime);
            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1000);
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            conf.SetInt(DFSConfigKeys.DfsNamenodeSafemodeThresholdPctKey, 1);
            System.Console.Out.WriteLine("Test 4************************************");
            // create cluster
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fs      = null;

            try
            {
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                int nnport = cluster.GetNameNodePort();
                // create file1.
                Path dir1  = new Path("/user/dir1");
                Path file1 = new Path(dir1, "file1");
                FSDataOutputStream stm1 = TestFileCreation.CreateFile(fs, file1, 1);
                System.Console.Out.WriteLine("testFileCreationDeleteParent: " + "Created file " +
                                             file1);
                TestFileCreation.WriteFile(stm1);
                stm1.Hflush();
                Path dir2 = new Path("/user/dir2");
                fs.Rename(file1, dir2);
                // restart cluster with the same namenode port as before.
                // This ensures that leases are persisted in fsimage.
                cluster.Shutdown();
                try
                {
                    Sharpen.Thread.Sleep(2 * MaxIdleTime);
                }
                catch (Exception)
                {
                }
                cluster = new MiniDFSCluster.Builder(conf).NameNodePort(nnport).Format(false).Build
                              ();
                cluster.WaitActive();
                // restart cluster yet again. This triggers the code to read in
                // persistent leases from fsimage.
                cluster.Shutdown();
                try
                {
                    Sharpen.Thread.Sleep(5000);
                }
                catch (Exception)
                {
                }
                cluster = new MiniDFSCluster.Builder(conf).NameNodePort(nnport).Format(false).Build
                              ();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                Path newfile = new Path("/user", "dir2");
                NUnit.Framework.Assert.IsTrue(!fs.Exists(file1));
                NUnit.Framework.Assert.IsTrue(fs.Exists(newfile));
                CheckFullFile(fs, newfile);
            }
            finally
            {
                fs.Close();
                cluster.Shutdown();
            }
        }
 /// <exception cref="System.IO.IOException"/>
 public virtual void StartDataNodes(Configuration conf, int numDataNodes, StorageType
                                    [][] storageTypes, bool manageDfsDirs, HdfsServerConstants.StartupOption operation
                                    , string[] racks, string[] nodeGroups, string[] hosts, long[][] storageCapacities
                                    , long[] simulatedCapacities, bool setupHostsFile, bool checkDataNodeAddrConfig,
                                    bool checkDataNodeHostConfig)
 {
     lock (this)
     {
         System.Diagnostics.Debug.Assert(storageCapacities == null || simulatedCapacities
                                         == null);
         System.Diagnostics.Debug.Assert(storageTypes == null || storageTypes.Length == numDataNodes
                                         );
         System.Diagnostics.Debug.Assert(storageCapacities == null || storageCapacities.Length
                                         == numDataNodes);
         if (operation == HdfsServerConstants.StartupOption.Recover)
         {
             return;
         }
         if (checkDataNodeHostConfig)
         {
             conf.SetIfUnset(DFSConfigKeys.DfsDatanodeHostNameKey, "127.0.0.1");
         }
         else
         {
             conf.Set(DFSConfigKeys.DfsDatanodeHostNameKey, "127.0.0.1");
         }
         conf.Set(DFSConfigKeys.DfsDatanodeHostNameKey, "127.0.0.1");
         int curDatanodesNum = dataNodes.Count;
         // for mincluster's the default initialDelay for BRs is 0
         if (conf.Get(DFSConfigKeys.DfsBlockreportInitialDelayKey) == null)
         {
             conf.SetLong(DFSConfigKeys.DfsBlockreportInitialDelayKey, 0);
         }
         // If minicluster's name node is null assume that the conf has been
         // set with the right address:port of the name node.
         //
         if (racks != null && numDataNodes > racks.Length)
         {
             throw new ArgumentException("The length of racks [" + racks.Length + "] is less than the number of datanodes ["
                                         + numDataNodes + "].");
         }
         if (nodeGroups != null && numDataNodes > nodeGroups.Length)
         {
             throw new ArgumentException("The length of nodeGroups [" + nodeGroups.Length + "] is less than the number of datanodes ["
                                         + numDataNodes + "].");
         }
         if (hosts != null && numDataNodes > hosts.Length)
         {
             throw new ArgumentException("The length of hosts [" + hosts.Length + "] is less than the number of datanodes ["
                                         + numDataNodes + "].");
         }
         //Generate some hostnames if required
         if (racks != null && hosts == null)
         {
             hosts = new string[numDataNodes];
             for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++)
             {
                 hosts[i - curDatanodesNum] = "host" + i + ".foo.com";
             }
         }
         if (simulatedCapacities != null && numDataNodes > simulatedCapacities.Length)
         {
             throw new ArgumentException("The length of simulatedCapacities [" + simulatedCapacities
                                         .Length + "] is less than the number of datanodes [" + numDataNodes + "].");
         }
         string[] dnArgs = (operation == null || operation != HdfsServerConstants.StartupOption
                            .Rollback) ? null : new string[] { operation.GetName() };
         DataNode[] dns = new DataNode[numDataNodes];
         for (int i_1 = curDatanodesNum; i_1 < curDatanodesNum + numDataNodes; i_1++)
         {
             Configuration dnConf = new HdfsConfiguration(conf);
             // Set up datanode address
             SetupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
             if (manageDfsDirs)
             {
                 string dirs = MakeDataNodeDirs(i_1, storageTypes == null ? null : storageTypes[i_1
                                                ]);
                 dnConf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dirs);
                 conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dirs);
             }
             if (simulatedCapacities != null)
             {
                 SimulatedFSDataset.SetFactory(dnConf);
                 dnConf.SetLong(SimulatedFSDataset.ConfigPropertyCapacity, simulatedCapacities[i_1
                                                                                               - curDatanodesNum]);
             }
             Log.Info("Starting DataNode " + i_1 + " with " + DFSConfigKeys.DfsDatanodeDataDirKey
                      + ": " + dnConf.Get(DFSConfigKeys.DfsDatanodeDataDirKey));
             if (hosts != null)
             {
                 dnConf.Set(DFSConfigKeys.DfsDatanodeHostNameKey, hosts[i_1 - curDatanodesNum]);
                 Log.Info("Starting DataNode " + i_1 + " with hostname set to: " + dnConf.Get(DFSConfigKeys
                                                                                              .DfsDatanodeHostNameKey));
             }
             if (racks != null)
             {
                 string name = hosts[i_1 - curDatanodesNum];
                 if (nodeGroups == null)
                 {
                     Log.Info("Adding node with hostname : " + name + " to rack " + racks[i_1 - curDatanodesNum
                              ]);
                     StaticMapping.AddNodeToRack(name, racks[i_1 - curDatanodesNum]);
                 }
                 else
                 {
                     Log.Info("Adding node with hostname : " + name + " to serverGroup " + nodeGroups[
                                  i_1 - curDatanodesNum] + " and rack " + racks[i_1 - curDatanodesNum]);
                     StaticMapping.AddNodeToRack(name, racks[i_1 - curDatanodesNum] + nodeGroups[i_1 -
                                                                                                 curDatanodesNum]);
                 }
             }
             Configuration newconf = new HdfsConfiguration(dnConf);
             // save config
             if (hosts != null)
             {
                 NetUtils.AddStaticResolution(hosts[i_1 - curDatanodesNum], "localhost");
             }
             SecureDataNodeStarter.SecureResources secureResources = null;
             if (UserGroupInformation.IsSecurityEnabled())
             {
                 try
                 {
                     secureResources = SecureDataNodeStarter.GetSecureResources(dnConf);
                 }
                 catch (Exception ex)
                 {
                     Sharpen.Runtime.PrintStackTrace(ex);
                 }
             }
             DataNode dn = DataNode.InstantiateDataNode(dnArgs, dnConf, secureResources);
             if (dn == null)
             {
                 throw new IOException("Cannot start DataNode in " + dnConf.Get(DFSConfigKeys.DfsDatanodeDataDirKey
                                                                                ));
             }
             //since the HDFS does things based on IP:port, we need to add the mapping
             //for IP:port to rackId
             string ipAddr = dn.GetXferAddress().Address.GetHostAddress();
             if (racks != null)
             {
                 int port = dn.GetXferAddress().Port;
                 if (nodeGroups == null)
                 {
                     Log.Info("Adding node with IP:port : " + ipAddr + ":" + port + " to rack " + racks
                              [i_1 - curDatanodesNum]);
                     StaticMapping.AddNodeToRack(ipAddr + ":" + port, racks[i_1 - curDatanodesNum]);
                 }
                 else
                 {
                     Log.Info("Adding node with IP:port : " + ipAddr + ":" + port + " to nodeGroup " +
                              nodeGroups[i_1 - curDatanodesNum] + " and rack " + racks[i_1 - curDatanodesNum]
                              );
                     StaticMapping.AddNodeToRack(ipAddr + ":" + port, racks[i_1 - curDatanodesNum] + nodeGroups
                                                 [i_1 - curDatanodesNum]);
                 }
             }
             dn.RunDatanodeDaemon();
             dataNodes.AddItem(new MiniDFSCluster.DataNodeProperties(this, dn, newconf, dnArgs
                                                                     , secureResources, dn.GetIpcPort()));
             dns[i_1 - curDatanodesNum] = dn;
         }
         curDatanodesNum   += numDataNodes;
         this.numDataNodes += numDataNodes;
         WaitActive();
         if (storageCapacities != null)
         {
             for (int i = curDatanodesNum; i_1 < curDatanodesNum + numDataNodes; ++i_1)
             {
                 IList <FsVolumeSpi> volumes = dns[i_1].GetFSDataset().GetVolumes();
                 System.Diagnostics.Debug.Assert(volumes.Count == storagesPerDatanode);
                 for (int j = 0; j < volumes.Count; ++j)
                 {
                     FsVolumeImpl volume = (FsVolumeImpl)volumes[j];
                     volume.SetCapacityForTesting(storageCapacities[i_1][j]);
                 }
             }
         }
     }
 }
示例#12
0
        public virtual void TestWhileOpenRenameParent()
        {
            Configuration conf        = new HdfsConfiguration();
            int           MaxIdleTime = 2000;

            // 2s
            conf.SetInt("ipc.client.connection.maxidletime", MaxIdleTime);
            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1000);
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            conf.SetInt(DFSConfigKeys.DfsNamenodeSafemodeThresholdPctKey, 1);
            conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, TestFileCreation.blockSize);
            // create cluster
            System.Console.Out.WriteLine("Test 1*****************************");
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fs      = null;

            try
            {
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                // Normally, the in-progress edit log would be finalized by
                // FSEditLog#endCurrentLogSegment.  For testing purposes, we
                // disable that here.
                FSEditLog spyLog = Org.Mockito.Mockito.Spy(cluster.GetNameNode().GetFSImage().GetEditLog
                                                               ());
                Org.Mockito.Mockito.DoNothing().When(spyLog).EndCurrentLogSegment(Org.Mockito.Mockito
                                                                                  .AnyBoolean());
                DFSTestUtil.SetEditLogForTesting(cluster.GetNamesystem(), spyLog);
                int nnport = cluster.GetNameNodePort();
                // create file1.
                Path dir1  = new Path("/user/a+b/dir1");
                Path file1 = new Path(dir1, "file1");
                FSDataOutputStream stm1 = TestFileCreation.CreateFile(fs, file1, 1);
                System.Console.Out.WriteLine("testFileCreationDeleteParent: " + "Created file " +
                                             file1);
                TestFileCreation.WriteFile(stm1);
                stm1.Hflush();
                // create file2.
                Path dir2  = new Path("/user/dir2");
                Path file2 = new Path(dir2, "file2");
                FSDataOutputStream stm2 = TestFileCreation.CreateFile(fs, file2, 1);
                System.Console.Out.WriteLine("testFileCreationDeleteParent: " + "Created file " +
                                             file2);
                TestFileCreation.WriteFile(stm2);
                stm2.Hflush();
                // move dir1 while file1 is open
                Path dir3 = new Path("/user/dir3");
                fs.Mkdirs(dir3);
                fs.Rename(dir1, dir3);
                // create file3
                Path file3 = new Path(dir3, "file3");
                FSDataOutputStream stm3 = fs.Create(file3);
                fs.Rename(file3, new Path(dir3, "bozo"));
                // Get a new block for the file.
                TestFileCreation.WriteFile(stm3, TestFileCreation.blockSize + 1);
                stm3.Hflush();
                // Stop the NameNode before closing the files.
                // This will ensure that the write leases are still active and present
                // in the edit log.  Simiarly, there should be a pending ADD_BLOCK_OP
                // for file3, since we just added a block to that file.
                cluster.GetNameNode().Stop();
                // Restart cluster with the same namenode port as before.
                cluster.Shutdown();
                try
                {
                    Sharpen.Thread.Sleep(2 * MaxIdleTime);
                }
                catch (Exception)
                {
                }
                cluster = new MiniDFSCluster.Builder(conf).NameNodePort(nnport).Format(false).Build
                              ();
                cluster.WaitActive();
                // restart cluster yet again. This triggers the code to read in
                // persistent leases from the edit log.
                cluster.Shutdown();
                try
                {
                    Sharpen.Thread.Sleep(5000);
                }
                catch (Exception)
                {
                }
                cluster = new MiniDFSCluster.Builder(conf).NameNodePort(nnport).Format(false).Build
                              ();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                Path newfile = new Path("/user/dir3/dir1", "file1");
                NUnit.Framework.Assert.IsTrue(!fs.Exists(file1));
                NUnit.Framework.Assert.IsTrue(fs.Exists(file2));
                NUnit.Framework.Assert.IsTrue(fs.Exists(newfile));
                CheckFullFile(fs, newfile);
            }
            finally
            {
                fs.Close();
                cluster.Shutdown();
            }
        }
        /// <summary>Tests all FsEditLogOps that are converted to inotify events.</summary>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="Sharpen.URISyntaxException"/>
        /// <exception cref="System.Exception"/>
        /// <exception cref="Org.Apache.Hadoop.Hdfs.Inotify.MissingEventsException"/>
        public virtual void TestBasic()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
            conf.SetBoolean(DFSConfigKeys.DfsNamenodeAclsEnabledKey, true);
            // so that we can get an atime change
            conf.SetLong(DFSConfigKeys.DfsNamenodeAccesstimePrecisionKey, 1);
            MiniQJMHACluster.Builder builder = new MiniQJMHACluster.Builder(conf);
            builder.GetDfsBuilder().NumDataNodes(2);
            MiniQJMHACluster cluster = builder.Build();

            try
            {
                cluster.GetDfsCluster().WaitActive();
                cluster.GetDfsCluster().TransitionToActive(0);
                DFSClient client = new DFSClient(cluster.GetDfsCluster().GetNameNode(0).GetNameNodeAddress
                                                     (), conf);
                FileSystem fs = cluster.GetDfsCluster().GetFileSystem(0);
                DFSTestUtil.CreateFile(fs, new Path("/file"), BlockSize, (short)1, 0L);
                DFSTestUtil.CreateFile(fs, new Path("/file3"), BlockSize, (short)1, 0L);
                DFSTestUtil.CreateFile(fs, new Path("/file5"), BlockSize, (short)1, 0L);
                DFSInotifyEventInputStream eis = client.GetInotifyEventStream();
                client.Rename("/file", "/file4", null);
                // RenameOp -> RenameEvent
                client.Rename("/file4", "/file2");
                // RenameOldOp -> RenameEvent
                // DeleteOp, AddOp -> UnlinkEvent, CreateEvent
                OutputStream os = client.Create("/file2", true, (short)2, BlockSize);
                os.Write(new byte[BlockSize]);
                os.Close();
                // CloseOp -> CloseEvent
                // AddOp -> AppendEvent
                os = client.Append("/file2", BlockSize, EnumSet.Of(CreateFlag.Append), null, null
                                   );
                os.Write(new byte[BlockSize]);
                os.Close();
                // CloseOp -> CloseEvent
                Sharpen.Thread.Sleep(10);
                // so that the atime will get updated on the next line
                client.Open("/file2").Read(new byte[1]);
                // TimesOp -> MetadataUpdateEvent
                // SetReplicationOp -> MetadataUpdateEvent
                client.SetReplication("/file2", (short)1);
                // ConcatDeleteOp -> AppendEvent, UnlinkEvent, CloseEvent
                client.Concat("/file2", new string[] { "/file3" });
                client.Delete("/file2", false);
                // DeleteOp -> UnlinkEvent
                client.Mkdirs("/dir", null, false);
                // MkdirOp -> CreateEvent
                // SetPermissionsOp -> MetadataUpdateEvent
                client.SetPermission("/dir", FsPermission.ValueOf("-rw-rw-rw-"));
                // SetOwnerOp -> MetadataUpdateEvent
                client.SetOwner("/dir", "username", "groupname");
                client.CreateSymlink("/dir", "/dir2", false);
                // SymlinkOp -> CreateEvent
                client.SetXAttr("/file5", "user.field", Sharpen.Runtime.GetBytesForString("value"
                                                                                          ), EnumSet.Of(XAttrSetFlag.Create));
                // SetXAttrOp -> MetadataUpdateEvent
                // RemoveXAttrOp -> MetadataUpdateEvent
                client.RemoveXAttr("/file5", "user.field");
                // SetAclOp -> MetadataUpdateEvent
                client.SetAcl("/file5", AclEntry.ParseAclSpec("user::rwx,user:foo:rw-,group::r--,other::---"
                                                              , true));
                client.RemoveAcl("/file5");
                // SetAclOp -> MetadataUpdateEvent
                client.Rename("/file5", "/dir");
                // RenameOldOp -> RenameEvent
                EventBatch batch = null;
                // RenameOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                long txid = batch.GetTxid();
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Rename);
                Event.RenameEvent re = (Event.RenameEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.AreEqual("/file4", re.GetDstPath());
                NUnit.Framework.Assert.AreEqual("/file", re.GetSrcPath());
                NUnit.Framework.Assert.IsTrue(re.GetTimestamp() > 0);
                long eventsBehind = eis.GetTxidsBehindEstimate();
                // RenameOldOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Rename);
                Event.RenameEvent re2 = (Event.RenameEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(re2.GetDstPath().Equals("/file2"));
                NUnit.Framework.Assert.IsTrue(re2.GetSrcPath().Equals("/file4"));
                NUnit.Framework.Assert.IsTrue(re.GetTimestamp() > 0);
                // AddOp with overwrite
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Create);
                Event.CreateEvent ce = (Event.CreateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(ce.GetiNodeType() == Event.CreateEvent.INodeType.File
                                              );
                NUnit.Framework.Assert.IsTrue(ce.GetPath().Equals("/file2"));
                NUnit.Framework.Assert.IsTrue(ce.GetCtime() > 0);
                NUnit.Framework.Assert.IsTrue(ce.GetReplication() > 0);
                NUnit.Framework.Assert.IsTrue(ce.GetSymlinkTarget() == null);
                NUnit.Framework.Assert.IsTrue(ce.GetOverwrite());
                NUnit.Framework.Assert.AreEqual(BlockSize, ce.GetDefaultBlockSize());
                // CloseOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Close);
                Event.CloseEvent ce2 = (Event.CloseEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(ce2.GetPath().Equals("/file2"));
                NUnit.Framework.Assert.IsTrue(ce2.GetFileSize() > 0);
                NUnit.Framework.Assert.IsTrue(ce2.GetTimestamp() > 0);
                // AppendOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Append);
                Event.AppendEvent append2 = (Event.AppendEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.AreEqual("/file2", append2.GetPath());
                NUnit.Framework.Assert.IsFalse(append2.ToNewBlock());
                // CloseOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Close);
                NUnit.Framework.Assert.IsTrue(((Event.CloseEvent)batch.GetEvents()[0]).GetPath().
                                              Equals("/file2"));
                // TimesOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Metadata);
                Event.MetadataUpdateEvent mue = (Event.MetadataUpdateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(mue.GetPath().Equals("/file2"));
                NUnit.Framework.Assert.IsTrue(mue.GetMetadataType() == Event.MetadataUpdateEvent.MetadataType
                                              .Times);
                // SetReplicationOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Metadata);
                Event.MetadataUpdateEvent mue2 = (Event.MetadataUpdateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(mue2.GetPath().Equals("/file2"));
                NUnit.Framework.Assert.IsTrue(mue2.GetMetadataType() == Event.MetadataUpdateEvent.MetadataType
                                              .Replication);
                NUnit.Framework.Assert.IsTrue(mue2.GetReplication() == 1);
                // ConcatDeleteOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(3, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Append);
                NUnit.Framework.Assert.IsTrue(((Event.AppendEvent)batch.GetEvents()[0]).GetPath()
                                              .Equals("/file2"));
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[1].GetEventType() == Event.EventType
                                              .Unlink);
                Event.UnlinkEvent ue2 = (Event.UnlinkEvent)batch.GetEvents()[1];
                NUnit.Framework.Assert.IsTrue(ue2.GetPath().Equals("/file3"));
                NUnit.Framework.Assert.IsTrue(ue2.GetTimestamp() > 0);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[2].GetEventType() == Event.EventType
                                              .Close);
                Event.CloseEvent ce3 = (Event.CloseEvent)batch.GetEvents()[2];
                NUnit.Framework.Assert.IsTrue(ce3.GetPath().Equals("/file2"));
                NUnit.Framework.Assert.IsTrue(ce3.GetTimestamp() > 0);
                // DeleteOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Unlink);
                Event.UnlinkEvent ue = (Event.UnlinkEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(ue.GetPath().Equals("/file2"));
                NUnit.Framework.Assert.IsTrue(ue.GetTimestamp() > 0);
                // MkdirOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Create);
                Event.CreateEvent ce4 = (Event.CreateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(ce4.GetiNodeType() == Event.CreateEvent.INodeType.Directory
                                              );
                NUnit.Framework.Assert.IsTrue(ce4.GetPath().Equals("/dir"));
                NUnit.Framework.Assert.IsTrue(ce4.GetCtime() > 0);
                NUnit.Framework.Assert.IsTrue(ce4.GetReplication() == 0);
                NUnit.Framework.Assert.IsTrue(ce4.GetSymlinkTarget() == null);
                // SetPermissionsOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Metadata);
                Event.MetadataUpdateEvent mue3 = (Event.MetadataUpdateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(mue3.GetPath().Equals("/dir"));
                NUnit.Framework.Assert.IsTrue(mue3.GetMetadataType() == Event.MetadataUpdateEvent.MetadataType
                                              .Perms);
                NUnit.Framework.Assert.IsTrue(mue3.GetPerms().ToString().Contains("rw-rw-rw-"));
                // SetOwnerOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Metadata);
                Event.MetadataUpdateEvent mue4 = (Event.MetadataUpdateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(mue4.GetPath().Equals("/dir"));
                NUnit.Framework.Assert.IsTrue(mue4.GetMetadataType() == Event.MetadataUpdateEvent.MetadataType
                                              .Owner);
                NUnit.Framework.Assert.IsTrue(mue4.GetOwnerName().Equals("username"));
                NUnit.Framework.Assert.IsTrue(mue4.GetGroupName().Equals("groupname"));
                // SymlinkOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Create);
                Event.CreateEvent ce5 = (Event.CreateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(ce5.GetiNodeType() == Event.CreateEvent.INodeType.Symlink
                                              );
                NUnit.Framework.Assert.IsTrue(ce5.GetPath().Equals("/dir2"));
                NUnit.Framework.Assert.IsTrue(ce5.GetCtime() > 0);
                NUnit.Framework.Assert.IsTrue(ce5.GetReplication() == 0);
                NUnit.Framework.Assert.IsTrue(ce5.GetSymlinkTarget().Equals("/dir"));
                // SetXAttrOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Metadata);
                Event.MetadataUpdateEvent mue5 = (Event.MetadataUpdateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(mue5.GetPath().Equals("/file5"));
                NUnit.Framework.Assert.IsTrue(mue5.GetMetadataType() == Event.MetadataUpdateEvent.MetadataType
                                              .Xattrs);
                NUnit.Framework.Assert.IsTrue(mue5.GetxAttrs().Count == 1);
                NUnit.Framework.Assert.IsTrue(mue5.GetxAttrs()[0].GetName().Contains("field"));
                NUnit.Framework.Assert.IsTrue(!mue5.IsxAttrsRemoved());
                // RemoveXAttrOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Metadata);
                Event.MetadataUpdateEvent mue6 = (Event.MetadataUpdateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(mue6.GetPath().Equals("/file5"));
                NUnit.Framework.Assert.IsTrue(mue6.GetMetadataType() == Event.MetadataUpdateEvent.MetadataType
                                              .Xattrs);
                NUnit.Framework.Assert.IsTrue(mue6.GetxAttrs().Count == 1);
                NUnit.Framework.Assert.IsTrue(mue6.GetxAttrs()[0].GetName().Contains("field"));
                NUnit.Framework.Assert.IsTrue(mue6.IsxAttrsRemoved());
                // SetAclOp (1)
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Metadata);
                Event.MetadataUpdateEvent mue7 = (Event.MetadataUpdateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(mue7.GetPath().Equals("/file5"));
                NUnit.Framework.Assert.IsTrue(mue7.GetMetadataType() == Event.MetadataUpdateEvent.MetadataType
                                              .Acls);
                NUnit.Framework.Assert.IsTrue(mue7.GetAcls().Contains(AclEntry.ParseAclEntry("user::rwx"
                                                                                             , true)));
                // SetAclOp (2)
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Metadata);
                Event.MetadataUpdateEvent mue8 = (Event.MetadataUpdateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(mue8.GetPath().Equals("/file5"));
                NUnit.Framework.Assert.IsTrue(mue8.GetMetadataType() == Event.MetadataUpdateEvent.MetadataType
                                              .Acls);
                NUnit.Framework.Assert.IsTrue(mue8.GetAcls() == null);
                // RenameOp (2)
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Rename);
                Event.RenameEvent re3 = (Event.RenameEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(re3.GetDstPath().Equals("/dir/file5"));
                NUnit.Framework.Assert.IsTrue(re3.GetSrcPath().Equals("/file5"));
                NUnit.Framework.Assert.IsTrue(re.GetTimestamp() > 0);
                // Returns null when there are no further events
                NUnit.Framework.Assert.IsTrue(eis.Poll() == null);
                // make sure the estimate hasn't changed since the above assertion
                // tells us that we are fully caught up to the current namesystem state
                // and we should not have been behind at all when eventsBehind was set
                // either, since there were few enough events that they should have all
                // been read to the client during the first poll() call
                NUnit.Framework.Assert.IsTrue(eis.GetTxidsBehindEstimate() == eventsBehind);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
示例#14
0
        public virtual void TestTimes()
        {
            Configuration conf        = new HdfsConfiguration();
            int           MaxIdleTime = 2000;

            // 2s
            conf.SetInt("ipc.client.connection.maxidletime", MaxIdleTime);
            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1000);
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes
                                                                                   ).Build();

            cluster.WaitActive();
            int        nnport = cluster.GetNameNodePort();
            IPEndPoint addr   = new IPEndPoint("localhost", cluster.GetNameNodePort());
            DFSClient  client = new DFSClient(addr, conf);

            DatanodeInfo[] info = client.DatanodeReport(HdfsConstants.DatanodeReportType.Live
                                                        );
            NUnit.Framework.Assert.AreEqual("Number of Datanodes ", numDatanodes, info.Length
                                            );
            FileSystem fileSys  = cluster.GetFileSystem();
            int        replicas = 1;

            NUnit.Framework.Assert.IsTrue(fileSys is DistributedFileSystem);
            try
            {
                //
                // create file and record atime/mtime
                //
                System.Console.Out.WriteLine("Creating testdir1 and testdir1/test1.dat.");
                Path dir1               = new Path("testdir1");
                Path file1              = new Path(dir1, "test1.dat");
                FSDataOutputStream stm  = WriteFile(fileSys, file1, replicas);
                FileStatus         stat = fileSys.GetFileStatus(file1);
                long   atimeBeforeClose = stat.GetAccessTime();
                string adate            = dateForm.Format(Sharpen.Extensions.CreateDate(atimeBeforeClose));
                System.Console.Out.WriteLine("atime on " + file1 + " before close is " + adate +
                                             " (" + atimeBeforeClose + ")");
                NUnit.Framework.Assert.IsTrue(atimeBeforeClose != 0);
                stm.Close();
                stat = fileSys.GetFileStatus(file1);
                long atime1 = stat.GetAccessTime();
                long mtime1 = stat.GetModificationTime();
                adate = dateForm.Format(Sharpen.Extensions.CreateDate(atime1));
                string mdate = dateForm.Format(Sharpen.Extensions.CreateDate(mtime1));
                System.Console.Out.WriteLine("atime on " + file1 + " is " + adate + " (" + atime1
                                             + ")");
                System.Console.Out.WriteLine("mtime on " + file1 + " is " + mdate + " (" + mtime1
                                             + ")");
                NUnit.Framework.Assert.IsTrue(atime1 != 0);
                //
                // record dir times
                //
                stat = fileSys.GetFileStatus(dir1);
                long mdir1 = stat.GetAccessTime();
                NUnit.Framework.Assert.IsTrue(mdir1 == 0);
                // set the access time to be one day in the past
                long atime2 = atime1 - (24L * 3600L * 1000L);
                fileSys.SetTimes(file1, -1, atime2);
                // check new access time on file
                stat = fileSys.GetFileStatus(file1);
                long   atime3 = stat.GetAccessTime();
                string adate3 = dateForm.Format(Sharpen.Extensions.CreateDate(atime3));
                System.Console.Out.WriteLine("new atime on " + file1 + " is " + adate3 + " (" + atime3
                                             + ")");
                NUnit.Framework.Assert.IsTrue(atime2 == atime3);
                NUnit.Framework.Assert.IsTrue(mtime1 == stat.GetModificationTime());
                // set the modification time to be 1 hour in the past
                long mtime2 = mtime1 - (3600L * 1000L);
                fileSys.SetTimes(file1, mtime2, -1);
                // check new modification time on file
                stat = fileSys.GetFileStatus(file1);
                long   mtime3 = stat.GetModificationTime();
                string mdate3 = dateForm.Format(Sharpen.Extensions.CreateDate(mtime3));
                System.Console.Out.WriteLine("new mtime on " + file1 + " is " + mdate3 + " (" + mtime3
                                             + ")");
                NUnit.Framework.Assert.IsTrue(atime2 == stat.GetAccessTime());
                NUnit.Framework.Assert.IsTrue(mtime2 == mtime3);
                long mtime4 = Time.Now() - (3600L * 1000L);
                long atime4 = Time.Now();
                fileSys.SetTimes(dir1, mtime4, atime4);
                // check new modification time on file
                stat = fileSys.GetFileStatus(dir1);
                NUnit.Framework.Assert.IsTrue("Not matching the modification times", mtime4 == stat
                                              .GetModificationTime());
                NUnit.Framework.Assert.IsTrue("Not matching the access times", atime4 == stat.GetAccessTime
                                                  ());
                Path nonExistingDir = new Path(dir1, "/nonExistingDir/");
                try
                {
                    fileSys.SetTimes(nonExistingDir, mtime4, atime4);
                    NUnit.Framework.Assert.Fail("Expecting FileNotFoundException");
                }
                catch (FileNotFoundException e)
                {
                    NUnit.Framework.Assert.IsTrue(e.Message.Contains("File/Directory " + nonExistingDir
                                                                     .ToString() + " does not exist."));
                }
                // shutdown cluster and restart
                cluster.Shutdown();
                try
                {
                    Sharpen.Thread.Sleep(2 * MaxIdleTime);
                }
                catch (Exception)
                {
                }
                cluster = new MiniDFSCluster.Builder(conf).NameNodePort(nnport).Format(false).Build
                              ();
                cluster.WaitActive();
                fileSys = cluster.GetFileSystem();
                // verify that access times and modification times persist after a
                // cluster restart.
                System.Console.Out.WriteLine("Verifying times after cluster restart");
                stat = fileSys.GetFileStatus(file1);
                NUnit.Framework.Assert.IsTrue(atime2 == stat.GetAccessTime());
                NUnit.Framework.Assert.IsTrue(mtime3 == stat.GetModificationTime());
                CleanupFile(fileSys, file1);
                CleanupFile(fileSys, dir1);
            }
            catch (IOException e)
            {
                info = client.DatanodeReport(HdfsConstants.DatanodeReportType.All);
                PrintDatanodeReport(info);
                throw;
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
        }
        public virtual void TestPreserveEditLogs()
        {
            UnpackStorage(Hadoop252Image, HadoopDfsDirTxt);
            Configuration conf = new HdfsConfiguration();

            conf = UpgradeUtilities.InitializeStorageStateConf(1, conf);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(
                false).ManageDataDfsDirs(false).ManageNameDfsDirs(false).StartupOption(HdfsServerConstants.StartupOption
                                                                                       .Upgrade).Build();
            DFSInotifyEventInputStream ieis = cluster.GetFileSystem().GetInotifyEventStream(0
                                                                                            );
            EventBatch batch;

            Event.CreateEvent ce;
            Event.RenameEvent re;
            // mkdir /input
            batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
            NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
            NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                          .Create);
            ce = (Event.CreateEvent)batch.GetEvents()[0];
            NUnit.Framework.Assert.AreEqual(ce.GetPath(), "/input");
            // mkdir /input/dir1~5
            for (int i = 1; i <= 5; i++)
            {
                batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Create);
                ce = (Event.CreateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.AreEqual(ce.GetPath(), "/input/dir" + i);
            }
            // copyFromLocal randome_file_1~2 /input/dir1~2
            for (int i_1 = 1; i_1 <= 2; i_1++)
            {
                batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                if (batch.GetEvents()[0].GetEventType() != Event.EventType.Create)
                {
                    FSImage.Log.Debug(string.Empty);
                }
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Create);
                // copyFromLocal randome_file_1 /input/dir1, CLOSE
                batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Close);
                // copyFromLocal randome_file_1 /input/dir1, CLOSE
                batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Rename);
                re = (Event.RenameEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.AreEqual(re.GetDstPath(), "/input/dir" + i_1 + "/randome_file_"
                                                + i_1);
            }
            // mv /input/dir1/randome_file_1 /input/dir3/randome_file_3
            long txIDBeforeRename = batch.GetTxid();

            batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
            NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
            NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                          .Rename);
            re = (Event.RenameEvent)batch.GetEvents()[0];
            NUnit.Framework.Assert.AreEqual(re.GetDstPath(), "/input/dir3/randome_file_3");
            // rmdir /input/dir1
            batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
            NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
            NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                          .Unlink);
            NUnit.Framework.Assert.AreEqual(((Event.UnlinkEvent)batch.GetEvents()[0]).GetPath
                                                (), "/input/dir1");
            long lastTxID = batch.GetTxid();

            // Start inotify from the tx before rename /input/dir1/randome_file_1
            ieis  = cluster.GetFileSystem().GetInotifyEventStream(txIDBeforeRename);
            batch = TestDFSInotifyEventInputStream.WaitForNextEvents(ieis);
            NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
            NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                          .Rename);
            re = (Event.RenameEvent)batch.GetEvents()[0];
            NUnit.Framework.Assert.AreEqual(re.GetDstPath(), "/input/dir3/randome_file_3");
            // Try to read beyond available edits
            ieis = cluster.GetFileSystem().GetInotifyEventStream(lastTxID + 1);
            NUnit.Framework.Assert.IsNull(ieis.Poll());
            cluster.Shutdown();
        }
示例#16
0
        public virtual void TestReadSelectNonStaleDatanode()
        {
            HdfsConfiguration conf = new HdfsConfiguration();

            conf.SetBoolean(DFSConfigKeys.DfsNamenodeAvoidStaleDatanodeForReadKey, true);
            long staleInterval = 30 * 1000 * 60;

            conf.SetLong(DFSConfigKeys.DfsNamenodeStaleDatanodeIntervalKey, staleInterval);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes
                                                                                   ).Racks(racks).Build();

            cluster.WaitActive();
            IPEndPoint addr   = new IPEndPoint("localhost", cluster.GetNameNodePort());
            DFSClient  client = new DFSClient(addr, conf);
            IList <DatanodeDescriptor> nodeInfoList = cluster.GetNameNode().GetNamesystem().GetBlockManager
                                                          ().GetDatanodeManager().GetDatanodeListForReport(HdfsConstants.DatanodeReportType
                                                                                                           .Live);

            NUnit.Framework.Assert.AreEqual("Unexpected number of datanodes", numDatanodes, nodeInfoList
                                            .Count);
            FileSystem         fileSys = cluster.GetFileSystem();
            FSDataOutputStream stm     = null;

            try
            {
                // do the writing but do not close the FSDataOutputStream
                // in order to mimic the ongoing writing
                Path fileName = new Path("/file1");
                stm = fileSys.Create(fileName, true, fileSys.GetConf().GetInt(CommonConfigurationKeys
                                                                              .IoFileBufferSizeKey, 4096), (short)3, blockSize);
                stm.Write(new byte[(blockSize * 3) / 2]);
                // We do not close the stream so that
                // the writing seems to be still ongoing
                stm.Hflush();
                LocatedBlocks blocks = client.GetNamenode().GetBlockLocations(fileName.ToString()
                                                                              , 0, blockSize);
                DatanodeInfo[] nodes = blocks.Get(0).GetLocations();
                NUnit.Framework.Assert.AreEqual(nodes.Length, 3);
                DataNode           staleNode     = null;
                DatanodeDescriptor staleNodeInfo = null;
                // stop the heartbeat of the first node
                staleNode = this.StopDataNodeHeartbeat(cluster, nodes[0].GetHostName());
                NUnit.Framework.Assert.IsNotNull(staleNode);
                // set the first node as stale
                staleNodeInfo = cluster.GetNameNode().GetNamesystem().GetBlockManager().GetDatanodeManager
                                    ().GetDatanode(staleNode.GetDatanodeId());
                DFSTestUtil.ResetLastUpdatesWithOffset(staleNodeInfo, -(staleInterval + 1));
                LocatedBlocks blocksAfterStale = client.GetNamenode().GetBlockLocations(fileName.
                                                                                        ToString(), 0, blockSize);
                DatanodeInfo[] nodesAfterStale = blocksAfterStale.Get(0).GetLocations();
                NUnit.Framework.Assert.AreEqual(nodesAfterStale.Length, 3);
                NUnit.Framework.Assert.AreEqual(nodesAfterStale[2].GetHostName(), nodes[0].GetHostName
                                                    ());
                // restart the staleNode's heartbeat
                DataNodeTestUtils.SetHeartbeatsDisabledForTests(staleNode, false);
                // reset the first node as non-stale, so as to avoid two stale nodes
                DFSTestUtil.ResetLastUpdatesWithOffset(staleNodeInfo, 0);
                LocatedBlock lastBlock = client.GetLocatedBlocks(fileName.ToString(), 0, long.MaxValue
                                                                 ).GetLastLocatedBlock();
                nodes = lastBlock.GetLocations();
                NUnit.Framework.Assert.AreEqual(nodes.Length, 3);
                // stop the heartbeat of the first node for the last block
                staleNode = this.StopDataNodeHeartbeat(cluster, nodes[0].GetHostName());
                NUnit.Framework.Assert.IsNotNull(staleNode);
                // set the node as stale
                DatanodeDescriptor dnDesc = cluster.GetNameNode().GetNamesystem().GetBlockManager
                                                ().GetDatanodeManager().GetDatanode(staleNode.GetDatanodeId());
                DFSTestUtil.ResetLastUpdatesWithOffset(dnDesc, -(staleInterval + 1));
                LocatedBlock lastBlockAfterStale = client.GetLocatedBlocks(fileName.ToString(), 0
                                                                           , long.MaxValue).GetLastLocatedBlock();
                nodesAfterStale = lastBlockAfterStale.GetLocations();
                NUnit.Framework.Assert.AreEqual(nodesAfterStale.Length, 3);
                NUnit.Framework.Assert.AreEqual(nodesAfterStale[2].GetHostName(), nodes[0].GetHostName
                                                    ());
            }
            finally
            {
                if (stm != null)
                {
                    stm.Close();
                }
                client.Close();
                cluster.Shutdown();
            }
        }
示例#17
0
        /// <summary>Test that appends to files at random offsets.</summary>
        /// <exception cref="System.IO.IOException"/>
        private void TestComplexAppend(bool appendToNewBlock)
        {
            fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize);
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 2000);
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 2);
            conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, 2);
            conf.SetInt(DFSConfigKeys.DfsClientSocketTimeoutKey, 30000);
            conf.SetInt(DFSConfigKeys.DfsDatanodeSocketWriteTimeoutKey, 30000);
            conf.SetInt(DFSConfigKeys.DfsDatanodeHandlerCountKey, 50);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes
                                                                                   ).Build();

            cluster.WaitActive();
            FileSystem fs = cluster.GetFileSystem();

            try
            {
                // create a bunch of test files with random replication factors.
                // Insert them into a linked list.
                //
                for (int i = 0; i < numberOfFiles; i++)
                {
                    int  replication       = AppendTestUtil.NextInt(numDatanodes - 2) + 1;
                    Path testFile          = new Path("/" + i + ".dat");
                    FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, testFile, replication);
                    stm.Close();
                    testFiles.AddItem(testFile);
                }
                // Create threads and make them run workload concurrently.
                workload = new TestFileAppend2.Workload[numThreads];
                for (int i_1 = 0; i_1 < numThreads; i_1++)
                {
                    workload[i_1] = new TestFileAppend2.Workload(this, cluster, i_1, appendToNewBlock
                                                                 );
                    workload[i_1].Start();
                }
                // wait for all transactions to get over
                for (int i_2 = 0; i_2 < numThreads; i_2++)
                {
                    try
                    {
                        System.Console.Out.WriteLine("Waiting for thread " + i_2 + " to complete...");
                        workload[i_2].Join();
                        System.Console.Out.WriteLine("Waiting for thread " + i_2 + " complete.");
                    }
                    catch (Exception)
                    {
                        i_2--;
                    }
                }
            }
            finally
            {
                // retry
                fs.Close();
                cluster.Shutdown();
            }
            // If any of the worker thread failed in their job, indicate that
            // this test failed.
            //
            NUnit.Framework.Assert.IsTrue("testComplexAppend Worker encountered exceptions.",
                                          globalStatus);
        }
示例#18
0
        public virtual void TestNamespaceCommands()
        {
            Configuration conf = new HdfsConfiguration();

            // Make it relinquish locks. When run serially, the result should
            // be identical.
            conf.SetInt(DFSConfigKeys.DfsContentSummaryLimitKey, 2);
            MiniDFSCluster        cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            DistributedFileSystem dfs     = cluster.GetFileSystem();

            try
            {
                // 1: create directory /nqdir0/qdir1/qdir20/nqdir30
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(new Path("/nqdir0/qdir1/qdir20/nqdir30")
                                                         ));
                // 2: set the quota of /nqdir0/qdir1 to be 6
                Path quotaDir1 = new Path("/nqdir0/qdir1");
                dfs.SetQuota(quotaDir1, 6, HdfsConstants.QuotaDontSet);
                ContentSummary c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 3);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 6);
                // 3: set the quota of /nqdir0/qdir1/qdir20 to be 7
                Path quotaDir2 = new Path("/nqdir0/qdir1/qdir20");
                dfs.SetQuota(quotaDir2, 7, HdfsConstants.QuotaDontSet);
                c = dfs.GetContentSummary(quotaDir2);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 2);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 7);
                // 4: Create directory /nqdir0/qdir1/qdir21 and set its quota to 2
                Path quotaDir3 = new Path("/nqdir0/qdir1/qdir21");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir3));
                dfs.SetQuota(quotaDir3, 2, HdfsConstants.QuotaDontSet);
                c = dfs.GetContentSummary(quotaDir3);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 1);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 2);
                // 5: Create directory /nqdir0/qdir1/qdir21/nqdir32
                Path tempPath = new Path(quotaDir3, "nqdir32");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(tempPath));
                c = dfs.GetContentSummary(quotaDir3);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 2);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 2);
                // 6: Create directory /nqdir0/qdir1/qdir21/nqdir33
                tempPath = new Path(quotaDir3, "nqdir33");
                bool hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Mkdirs(tempPath));
                }
                catch (NSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                c = dfs.GetContentSummary(quotaDir3);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 2);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 2);
                // 7: Create directory /nqdir0/qdir1/qdir20/nqdir31
                tempPath = new Path(quotaDir2, "nqdir31");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(tempPath));
                c = dfs.GetContentSummary(quotaDir2);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 3);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 7);
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 6);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 6);
                // 8: Create directory /nqdir0/qdir1/qdir20/nqdir33
                tempPath     = new Path(quotaDir2, "nqdir33");
                hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Mkdirs(tempPath));
                }
                catch (NSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // 9: Move /nqdir0/qdir1/qdir21/nqdir32 /nqdir0/qdir1/qdir20/nqdir30
                tempPath = new Path(quotaDir2, "nqdir30");
                dfs.Rename(new Path(quotaDir3, "nqdir32"), tempPath);
                c = dfs.GetContentSummary(quotaDir2);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 4);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 7);
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 6);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 6);
                // 10: Move /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0/qdir1/qdir21
                hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Rename(tempPath, quotaDir3));
                }
                catch (NSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                NUnit.Framework.Assert.IsTrue(dfs.Exists(tempPath));
                NUnit.Framework.Assert.IsFalse(dfs.Exists(new Path(quotaDir3, "nqdir30")));
                // 10.a: Rename /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0/qdir1/qdir21/nqdir32
                hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Rename(tempPath, new Path(quotaDir3, "nqdir32"
                                                                                 )));
                }
                catch (QuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                NUnit.Framework.Assert.IsTrue(dfs.Exists(tempPath));
                NUnit.Framework.Assert.IsFalse(dfs.Exists(new Path(quotaDir3, "nqdir32")));
                // 11: Move /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0
                NUnit.Framework.Assert.IsTrue(dfs.Rename(tempPath, new Path("/nqdir0")));
                c = dfs.GetContentSummary(quotaDir2);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 2);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 7);
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 4);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 6);
                // 12: Create directory /nqdir0/nqdir30/nqdir33
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(new Path("/nqdir0/nqdir30/nqdir33")));
                // 13: Move /nqdir0/nqdir30 /nqdir0/qdir1/qdir20/qdir30
                hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Rename(new Path("/nqdir0/nqdir30"), tempPath));
                }
                catch (NSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // 14: Move /nqdir0/qdir1/qdir21 /nqdir0/qdir1/qdir20
                NUnit.Framework.Assert.IsTrue(dfs.Rename(quotaDir3, quotaDir2));
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 4);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 6);
                c = dfs.GetContentSummary(quotaDir2);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 3);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 7);
                tempPath = new Path(quotaDir2, "qdir21");
                c        = dfs.GetContentSummary(tempPath);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 1);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 2);
                // 15: Delete /nqdir0/qdir1/qdir20/qdir21
                dfs.Delete(tempPath, true);
                c = dfs.GetContentSummary(quotaDir2);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 2);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 7);
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 3);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 6);
                // 16: Move /nqdir0/qdir30 /nqdir0/qdir1/qdir20
                NUnit.Framework.Assert.IsTrue(dfs.Rename(new Path("/nqdir0/nqdir30"), quotaDir2));
                c = dfs.GetContentSummary(quotaDir2);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 5);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 7);
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount(), 6);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 6);
                NUnit.Framework.Assert.AreEqual(14, cluster.GetNamesystem().GetFSDirectory().GetYieldCount
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
示例#19
0
        public virtual void TestRestartDualPortDFS()
        {
            Configuration conf = new HdfsConfiguration();

            RunTests(conf, true);
        }
示例#20
0
        public virtual void TestSpaceCommands()
        {
            Configuration conf = new HdfsConfiguration();

            // set a smaller block size so that we can test with smaller
            // diskspace quotas
            conf.Set(DFSConfigKeys.DfsBlockSizeKey, "512");
            // Make it relinquish locks. When run serially, the result should
            // be identical.
            conf.SetInt(DFSConfigKeys.DfsContentSummaryLimitKey, 2);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            NUnit.Framework.Assert.IsTrue("Not a HDFS: " + fs.GetUri(), fs is DistributedFileSystem
                                          );
            DistributedFileSystem dfs = (DistributedFileSystem)fs;

            try
            {
                int   fileLen     = 1024;
                short replication = 3;
                int   fileSpace   = fileLen * replication;
                // create directory /nqdir0/qdir1/qdir20/nqdir30
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(new Path("/nqdir0/qdir1/qdir20/nqdir30")
                                                         ));
                // set the quota of /nqdir0/qdir1 to 4 * fileSpace
                Path quotaDir1 = new Path("/nqdir0/qdir1");
                dfs.SetQuota(quotaDir1, HdfsConstants.QuotaDontSet, 4 * fileSpace);
                ContentSummary c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 4 * fileSpace);
                // set the quota of /nqdir0/qdir1/qdir20 to 6 * fileSpace
                Path quotaDir20 = new Path("/nqdir0/qdir1/qdir20");
                dfs.SetQuota(quotaDir20, HdfsConstants.QuotaDontSet, 6 * fileSpace);
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 6 * fileSpace);
                // Create /nqdir0/qdir1/qdir21 and set its space quota to 2 * fileSpace
                Path quotaDir21 = new Path("/nqdir0/qdir1/qdir21");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir21));
                dfs.SetQuota(quotaDir21, HdfsConstants.QuotaDontSet, 2 * fileSpace);
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 2 * fileSpace);
                // 5: Create directory /nqdir0/qdir1/qdir21/nqdir32
                Path tempPath = new Path(quotaDir21, "nqdir32");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(tempPath));
                // create a file under nqdir32/fileDir
                DFSTestUtil.CreateFile(dfs, new Path(tempPath, "fileDir/file1"), fileLen, replication
                                       , 0);
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileSpace);
                // Create a larger file /nqdir0/qdir1/qdir21/nqdir33/
                bool hasException = false;
                try
                {
                    DFSTestUtil.CreateFile(dfs, new Path(quotaDir21, "nqdir33/file2"), 2 * fileLen, replication
                                           , 0);
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // delete nqdir33
                NUnit.Framework.Assert.IsTrue(dfs.Delete(new Path(quotaDir21, "nqdir33"), true));
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileSpace);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 2 * fileSpace);
                // Verify space before the move:
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 0);
                // Move /nqdir0/qdir1/qdir21/nqdir32 /nqdir0/qdir1/qdir20/nqdir30
                Path dstPath = new Path(quotaDir20, "nqdir30");
                Path srcPath = new Path(quotaDir21, "nqdir32");
                NUnit.Framework.Assert.IsTrue(dfs.Rename(srcPath, dstPath));
                // verify space after the move
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileSpace);
                // verify space for its parent
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileSpace);
                // verify space for source for the move
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 0);
                Path file2    = new Path(dstPath, "fileDir/file2");
                int  file2Len = 2 * fileLen;
                // create a larger file under /nqdir0/qdir1/qdir20/nqdir30
                DFSTestUtil.CreateFile(dfs, file2, file2Len, replication, 0);
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 3 * fileSpace);
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 0);
                // Reverse: Move /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0/qdir1/qdir21/
                hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Rename(dstPath, srcPath));
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // make sure no intermediate directories left by failed rename
                NUnit.Framework.Assert.IsFalse(dfs.Exists(srcPath));
                // directory should exist
                NUnit.Framework.Assert.IsTrue(dfs.Exists(dstPath));
                // verify space after the failed move
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 3 * fileSpace);
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 0);
                // Test Append :
                // verify space quota
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 4 * fileSpace);
                // verify space before append;
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 3 * fileSpace);
                OutputStream @out = dfs.Append(file2);
                // appending 1 fileLen should succeed
                @out.Write(new byte[fileLen]);
                @out.Close();
                file2Len += fileLen;
                // after append
                // verify space after append;
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 4 * fileSpace);
                // now increase the quota for quotaDir1
                dfs.SetQuota(quotaDir1, HdfsConstants.QuotaDontSet, 5 * fileSpace);
                // Now, appending more than 1 fileLen should result in an error
                @out         = dfs.Append(file2);
                hasException = false;
                try
                {
                    @out.Write(new byte[fileLen + 1024]);
                    @out.Flush();
                    @out.Close();
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                    IOUtils.CloseStream(@out);
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                file2Len += fileLen;
                // after partial append
                // verify space after partial append
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 5 * fileSpace);
                // Test set replication :
                // first reduce the replication
                dfs.SetReplication(file2, (short)(replication - 1));
                // verify that space is reduced by file2Len
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 5 * fileSpace - file2Len);
                // now try to increase the replication and and expect an error.
                hasException = false;
                try
                {
                    dfs.SetReplication(file2, (short)(replication + 1));
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // verify space consumed remains unchanged.
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 5 * fileSpace - file2Len);
                // now increase the quota for quotaDir1 and quotaDir20
                dfs.SetQuota(quotaDir1, HdfsConstants.QuotaDontSet, 10 * fileSpace);
                dfs.SetQuota(quotaDir20, HdfsConstants.QuotaDontSet, 10 * fileSpace);
                // then increasing replication should be ok.
                dfs.SetReplication(file2, (short)(replication + 1));
                // verify increase in space
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 5 * fileSpace + file2Len);
                // Test HDFS-2053 :
                // Create directory /hdfs-2053
                Path quotaDir2053 = new Path("/hdfs-2053");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir2053));
                // Create subdirectories /hdfs-2053/{A,B,C}
                Path quotaDir2053_A = new Path(quotaDir2053, "A");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir2053_A));
                Path quotaDir2053_B = new Path(quotaDir2053, "B");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir2053_B));
                Path quotaDir2053_C = new Path(quotaDir2053, "C");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir2053_C));
                // Factors to vary the sizes of test files created in each subdir.
                // The actual factors are not really important but they allow us to create
                // identifiable file sizes per subdir, which helps during debugging.
                int sizeFactorA = 1;
                int sizeFactorB = 2;
                int sizeFactorC = 4;
                // Set space quota for subdirectory C
                dfs.SetQuota(quotaDir2053_C, HdfsConstants.QuotaDontSet, (sizeFactorC + 1) * fileSpace
                             );
                c = dfs.GetContentSummary(quotaDir2053_C);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), (sizeFactorC + 1) * fileSpace);
                // Create a file under subdirectory A
                DFSTestUtil.CreateFile(dfs, new Path(quotaDir2053_A, "fileA"), sizeFactorA * fileLen
                                       , replication, 0);
                c = dfs.GetContentSummary(quotaDir2053_A);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), sizeFactorA * fileSpace);
                // Create a file under subdirectory B
                DFSTestUtil.CreateFile(dfs, new Path(quotaDir2053_B, "fileB"), sizeFactorB * fileLen
                                       , replication, 0);
                c = dfs.GetContentSummary(quotaDir2053_B);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), sizeFactorB * fileSpace);
                // Create a file under subdirectory C (which has a space quota)
                DFSTestUtil.CreateFile(dfs, new Path(quotaDir2053_C, "fileC"), sizeFactorC * fileLen
                                       , replication, 0);
                c = dfs.GetContentSummary(quotaDir2053_C);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), sizeFactorC * fileSpace);
                // Check space consumed for /hdfs-2053
                c = dfs.GetContentSummary(quotaDir2053);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), (sizeFactorA + sizeFactorB
                                                                       + sizeFactorC) * fileSpace);
                NUnit.Framework.Assert.AreEqual(20, cluster.GetNamesystem().GetFSDirectory().GetYieldCount
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        public virtual void TestAppendRestart()
        {
            Configuration conf = new HdfsConfiguration();

            // Turn off persistent IPC, so that the DFSClient can survive NN restart
            conf.SetInt(CommonConfigurationKeysPublic.IpcClientConnectionMaxidletimeKey, 0);
            MiniDFSCluster     cluster = null;
            FSDataOutputStream stream  = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                FileSystem fs      = cluster.GetFileSystem();
                FilePath   editLog = new FilePath(FSImageTestUtil.GetNameNodeCurrentDirs(cluster, 0
                                                                                         )[0], NNStorage.GetInProgressEditsFileName(1));
                EnumMap <FSEditLogOpCodes, Holder <int> > counts;
                Path p1 = new Path("/block-boundaries");
                WriteAndAppend(fs, p1, BlockSize, BlockSize);
                counts = FSImageTestUtil.CountEditLogOpTypes(editLog);
                // OP_ADD to create file
                // OP_ADD_BLOCK for first block
                // OP_CLOSE to close file
                // OP_APPEND to reopen file
                // OP_ADD_BLOCK for second block
                // OP_CLOSE to close file
                NUnit.Framework.Assert.AreEqual(1, (int)counts[FSEditLogOpCodes.OpAdd].held);
                NUnit.Framework.Assert.AreEqual(1, (int)counts[FSEditLogOpCodes.OpAppend].held);
                NUnit.Framework.Assert.AreEqual(2, (int)counts[FSEditLogOpCodes.OpAddBlock].held);
                NUnit.Framework.Assert.AreEqual(2, (int)counts[FSEditLogOpCodes.OpClose].held);
                Path p2 = new Path("/not-block-boundaries");
                WriteAndAppend(fs, p2, BlockSize / 2, BlockSize);
                counts = FSImageTestUtil.CountEditLogOpTypes(editLog);
                // OP_ADD to create file
                // OP_ADD_BLOCK for first block
                // OP_CLOSE to close file
                // OP_APPEND to re-establish the lease
                // OP_UPDATE_BLOCKS from the updatePipeline call (increments genstamp of last block)
                // OP_ADD_BLOCK at the start of the second block
                // OP_CLOSE to close file
                // Total: 2 OP_ADDs, 1 OP_UPDATE_BLOCKS, 2 OP_ADD_BLOCKs, and 2 OP_CLOSEs
                //       in addition to the ones above
                NUnit.Framework.Assert.AreEqual(2, (int)counts[FSEditLogOpCodes.OpAdd].held);
                NUnit.Framework.Assert.AreEqual(2, (int)counts[FSEditLogOpCodes.OpAppend].held);
                NUnit.Framework.Assert.AreEqual(1, (int)counts[FSEditLogOpCodes.OpUpdateBlocks].held
                                                );
                NUnit.Framework.Assert.AreEqual(2 + 2, (int)counts[FSEditLogOpCodes.OpAddBlock].held
                                                );
                NUnit.Framework.Assert.AreEqual(2 + 2, (int)counts[FSEditLogOpCodes.OpClose].held
                                                );
                cluster.RestartNameNode();
                AppendTestUtil.Check(fs, p1, 2 * BlockSize);
                AppendTestUtil.Check(fs, p2, 3 * BlockSize / 2);
            }
            finally
            {
                IOUtils.CloseStream(stream);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
示例#22
0
        public virtual void TestQuotaCommands()
        {
            Configuration conf = new HdfsConfiguration();
            // set a smaller block size so that we can test with smaller
            // Space quotas
            int DefaultBlockSize = 512;

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockSize);
            // Make it relinquish locks. When run serially, the result should
            // be identical.
            conf.SetInt(DFSConfigKeys.DfsContentSummaryLimitKey, 2);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            NUnit.Framework.Assert.IsTrue("Not a HDFS: " + fs.GetUri(), fs is DistributedFileSystem
                                          );
            DistributedFileSystem dfs = (DistributedFileSystem)fs;
            DFSAdmin admin            = new DFSAdmin(conf);

            try
            {
                int   fileLen     = 1024;
                short replication = 5;
                long  spaceQuota  = fileLen * replication * 15 / 8;
                // 1: create a directory /test and set its quota to be 3
                Path parent = new Path("/test");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(parent));
                string[] args = new string[] { "-setQuota", "3", parent.ToString() };
                RunCommand(admin, args, false);
                //try setting space quota with a 'binary prefix'
                RunCommand(admin, false, "-setSpaceQuota", "2t", parent.ToString());
                NUnit.Framework.Assert.AreEqual(2L << 40, dfs.GetContentSummary(parent).GetSpaceQuota
                                                    ());
                // set diskspace quota to 10000
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota), parent
                           .ToString());
                // 2: create directory /test/data0
                Path childDir0 = new Path(parent, "data0");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(childDir0));
                // 3: create a file /test/datafile0
                Path childFile0 = new Path(parent, "datafile0");
                DFSTestUtil.CreateFile(fs, childFile0, fileLen, replication, 0);
                // 4: count -q /test
                ContentSummary c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetFileCount() + c.GetDirectoryCount(), 3);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 3);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileLen * replication);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), spaceQuota);
                // 5: count -q /test/data0
                c = dfs.GetContentSummary(childDir0);
                NUnit.Framework.Assert.AreEqual(c.GetFileCount() + c.GetDirectoryCount(), 1);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                // check disk space consumed
                c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileLen * replication);
                // 6: create a directory /test/data1
                Path childDir1    = new Path(parent, "data1");
                bool hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Mkdirs(childDir1));
                }
                catch (QuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                OutputStream fout;
                // 7: create a file /test/datafile1
                Path childFile1 = new Path(parent, "datafile1");
                hasException = false;
                try
                {
                    fout = dfs.Create(childFile1);
                }
                catch (QuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // 8: clear quota /test
                RunCommand(admin, new string[] { "-clrQuota", parent.ToString() }, false);
                c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), spaceQuota);
                // 9: clear quota /test/data0
                RunCommand(admin, new string[] { "-clrQuota", childDir0.ToString() }, false);
                c = dfs.GetContentSummary(childDir0);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                // 10: create a file /test/datafile1
                fout = dfs.Create(childFile1, replication);
                // 10.s: but writing fileLen bytes should result in an quota exception
                try
                {
                    fout.Write(new byte[fileLen]);
                    fout.Close();
                    NUnit.Framework.Assert.Fail();
                }
                catch (QuotaExceededException)
                {
                    IOUtils.CloseStream(fout);
                }
                //delete the file
                dfs.Delete(childFile1, false);
                // 9.s: clear diskspace quota
                RunCommand(admin, false, "-clrSpaceQuota", parent.ToString());
                c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), -1);
                // now creating childFile1 should succeed
                DFSTestUtil.CreateFile(dfs, childFile1, fileLen, replication, 0);
                // 11: set the quota of /test to be 1
                // HADOOP-5872 - we can set quota even if it is immediately violated
                args = new string[] { "-setQuota", "1", parent.ToString() };
                RunCommand(admin, args, false);
                RunCommand(admin, false, "-setSpaceQuota", Sharpen.Extensions.ToString(fileLen),
                           args[2]);
                // for space quota
                // 12: set the quota of /test/data0 to be 1
                args = new string[] { "-setQuota", "1", childDir0.ToString() };
                RunCommand(admin, args, false);
                // 13: not able create a directory under data0
                hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Mkdirs(new Path(childDir0, "in")));
                }
                catch (QuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                c = dfs.GetContentSummary(childDir0);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount() + c.GetFileCount(), 1);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 1);
                // 14a: set quota on a non-existent directory
                Path nonExistentPath = new Path("/test1");
                NUnit.Framework.Assert.IsFalse(dfs.Exists(nonExistentPath));
                args = new string[] { "-setQuota", "1", nonExistentPath.ToString() };
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", "1g", nonExistentPath.ToString());
                // for space quota
                // 14b: set quota on a file
                NUnit.Framework.Assert.IsTrue(dfs.IsFile(childFile0));
                args[1] = childFile0.ToString();
                RunCommand(admin, args, true);
                // same for space quota
                RunCommand(admin, true, "-setSpaceQuota", "1t", args[1]);
                // 15a: clear quota on a file
                args[0] = "-clrQuota";
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-clrSpaceQuota", args[1]);
                // 15b: clear quota on a non-existent directory
                args[1] = nonExistentPath.ToString();
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-clrSpaceQuota", args[1]);
                // 16a: set the quota of /test to be 0
                args = new string[] { "-setQuota", "0", parent.ToString() };
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", "0", args[2]);
                // 16b: set the quota of /test to be -1
                args[1] = "-1";
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
                // 16c: set the quota of /test to be Long.MAX_VALUE+1
                args[1] = (long.MaxValue + 1L).ToString();
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
                // 16d: set the quota of /test to be a non integer
                args[1] = "33aa1.5";
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
                // 16e: set space quota with a value larger than Long.MAX_VALUE
                RunCommand(admin, true, "-setSpaceQuota", (long.MaxValue / 1024 / 1024 + 1024) +
                           "m", args[2]);
                // 17:  setQuota by a non-administrator
                string username          = "******";
                UserGroupInformation ugi = UserGroupInformation.CreateUserForTesting(username, new
                                                                                     string[] { "groupyy" });
                string[] args2 = args.MemberwiseClone();
                // need final ref for doAs block
                ugi.DoAs(new _PrivilegedExceptionAction_275(this, username, conf, args2, parent));
                // 18: clrQuota by a non-administrator
                // 19: clrQuota on the root directory ("/") should fail
                RunCommand(admin, true, "-clrQuota", "/");
                // 20: setQuota on the root directory ("/") should succeed
                RunCommand(admin, false, "-setQuota", "1000000", "/");
                RunCommand(admin, true, "-clrQuota", "/");
                RunCommand(admin, false, "-clrSpaceQuota", "/");
                RunCommand(admin, new string[] { "-clrQuota", parent.ToString() }, false);
                RunCommand(admin, false, "-clrSpaceQuota", parent.ToString());
                // 2: create directory /test/data2
                Path childDir2 = new Path(parent, "data2");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(childDir2));
                Path childFile2  = new Path(childDir2, "datafile2");
                Path childFile3  = new Path(childDir2, "datafile3");
                long spaceQuota2 = DefaultBlockSize * replication;
                long fileLen2    = DefaultBlockSize;
                // set space quota to a real low value
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           childDir2.ToString());
                // clear space quota
                RunCommand(admin, false, "-clrSpaceQuota", childDir2.ToString());
                // create a file that is greater than the size of space quota
                DFSTestUtil.CreateFile(fs, childFile2, fileLen2, replication, 0);
                // now set space quota again. This should succeed
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           childDir2.ToString());
                hasException = false;
                try
                {
                    DFSTestUtil.CreateFile(fs, childFile3, fileLen2, replication, 0);
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // now test the same for root
                Path childFile4 = new Path("/", "datafile2");
                Path childFile5 = new Path("/", "datafile3");
                RunCommand(admin, true, "-clrQuota", "/");
                RunCommand(admin, false, "-clrSpaceQuota", "/");
                // set space quota to a real low value
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           "/");
                RunCommand(admin, false, "-clrSpaceQuota", "/");
                DFSTestUtil.CreateFile(fs, childFile4, fileLen2, replication, 0);
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           "/");
                hasException = false;
                try
                {
                    DFSTestUtil.CreateFile(fs, childFile5, fileLen2, replication, 0);
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                NUnit.Framework.Assert.AreEqual(4, cluster.GetNamesystem().GetFSDirectory().GetYieldCount
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
示例#23
0
        /// <summary>
        /// Test case for data corruption during data transmission for
        /// create/write.
        /// </summary>
        /// <remarks>
        /// Test case for data corruption during data transmission for
        /// create/write. To recover from corruption while writing, at
        /// least two replicas are needed.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestCorruptionDuringWrt()
        {
            Configuration conf = new HdfsConfiguration();

            // Set short retry timeouts so this test runs faster
            conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 10);
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(10).Build();
                cluster.WaitActive();
                FileSystem         fs   = cluster.GetFileSystem();
                Path               file = new Path("/test_corruption_file");
                FSDataOutputStream @out = fs.Create(file, true, 8192, (short)3, (long)(128 * 1024
                                                                                       * 1024));
                byte[] data = new byte[65536];
                for (int i = 0; i < 65536; i++)
                {
                    data[i] = unchecked ((byte)(i % 256));
                }
                for (int i_1 = 0; i_1 < 5; i_1++)
                {
                    @out.Write(data, 0, 65535);
                }
                @out.Hflush();
                // corrupt the packet once
                Org.Mockito.Mockito.When(faultInjector.CorruptPacket()).ThenReturn(true, false);
                Org.Mockito.Mockito.When(faultInjector.UncorruptPacket()).ThenReturn(true, false);
                for (int i_2 = 0; i_2 < 5; i_2++)
                {
                    @out.Write(data, 0, 65535);
                }
                @out.Close();
                // read should succeed
                FSDataInputStream @in = fs.Open(file);
                for (int c; (c = @in.Read()) != -1;)
                {
                }
                @in.Close();
                // test the retry limit
                @out = fs.Create(file, true, 8192, (short)3, (long)(128 * 1024 * 1024));
                // corrupt the packet once and never fix it.
                Org.Mockito.Mockito.When(faultInjector.CorruptPacket()).ThenReturn(true, false);
                Org.Mockito.Mockito.When(faultInjector.UncorruptPacket()).ThenReturn(false);
                // the client should give up pipeline reconstruction after retries.
                try
                {
                    for (int i_3 = 0; i_3 < 5; i_3++)
                    {
                        @out.Write(data, 0, 65535);
                    }
                    @out.Close();
                    NUnit.Framework.Assert.Fail("Write did not fail");
                }
                catch (IOException ioe)
                {
                    // we should get an ioe
                    DFSClient.Log.Info("Got expected exception", ioe);
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
                Org.Mockito.Mockito.When(faultInjector.CorruptPacket()).ThenReturn(false);
                Org.Mockito.Mockito.When(faultInjector.UncorruptPacket()).ThenReturn(false);
            }
        }
示例#24
0
        public virtual void TestBlockAllocationAdjustsUsageConservatively()
        {
            Configuration conf      = new HdfsConfiguration();
            int           BlockSize = 6 * 1024;

            conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
            conf.SetBoolean(DFSConfigKeys.DfsWebhdfsEnabledKey, true);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();

            cluster.WaitActive();
            FileSystem fs         = cluster.GetFileSystem();
            DFSAdmin   admin      = new DFSAdmin(conf);
            string     nnAddr     = conf.Get(DFSConfigKeys.DfsNamenodeHttpAddressKey);
            string     webhdfsuri = WebHdfsFileSystem.Scheme + "://" + nnAddr;

            System.Console.Out.WriteLine("webhdfsuri=" + webhdfsuri);
            FileSystem webhdfs = new Path(webhdfsuri).GetFileSystem(conf);

            try
            {
                Path dir           = new Path("/test");
                Path file1         = new Path("/test/test1");
                Path file2         = new Path("/test/test2");
                bool exceededQuota = false;
                int  QuotaSize     = 3 * BlockSize;
                // total space usage including
                // repl.
                int            FileSize = BlockSize / 2;
                ContentSummary c;
                // Create the directory and set the quota
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(dir));
                RunCommand(admin, false, "-setSpaceQuota", Sharpen.Extensions.ToString(QuotaSize)
                           , dir.ToString());
                // Creating a file should use half the quota
                DFSTestUtil.CreateFile(fs, file1, FileSize, (short)3, 1L);
                DFSTestUtil.WaitReplication(fs, file1, (short)3);
                c = fs.GetContentSummary(dir);
                CheckContentSummary(c, webhdfs.GetContentSummary(dir));
                NUnit.Framework.Assert.AreEqual("Quota is half consumed", QuotaSize / 2, c.GetSpaceConsumed
                                                    ());
                // We can not create the 2nd file because even though the total spaced
                // used by two files (2 * 3 * 512/2) would fit within the quota (3 * 512)
                // when a block for a file is created the space used is adjusted
                // conservatively (3 * block size, ie assumes a full block is written)
                // which will violate the quota (3 * block size) since we've already
                // used half the quota for the first file.
                try
                {
                    DFSTestUtil.CreateFile(fs, file2, FileSize, (short)3, 1L);
                }
                catch (QuotaExceededException)
                {
                    exceededQuota = true;
                }
                NUnit.Framework.Assert.IsTrue("Quota not exceeded", exceededQuota);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
示例#25
0
 public override void SetConfiguration(HdfsConfiguration conf)
 {
     conf.Set(DFSConfigKeys.DfsChecksumTypeKey, "NULL");
 }
示例#26
0
        public virtual void TestMultipleFilesSmallerThanOneBlock()
        {
            Configuration conf      = new HdfsConfiguration();
            int           BlockSize = 6 * 1024;

            conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
            conf.SetBoolean(DFSConfigKeys.DfsWebhdfsEnabledKey, true);
            // Make it relinquish locks. When run serially, the result should
            // be identical.
            conf.SetInt(DFSConfigKeys.DfsContentSummaryLimitKey, 2);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();

            cluster.WaitActive();
            FileSystem fs         = cluster.GetFileSystem();
            DFSAdmin   admin      = new DFSAdmin(conf);
            string     nnAddr     = conf.Get(DFSConfigKeys.DfsNamenodeHttpAddressKey);
            string     webhdfsuri = WebHdfsFileSystem.Scheme + "://" + nnAddr;

            System.Console.Out.WriteLine("webhdfsuri=" + webhdfsuri);
            FileSystem webhdfs = new Path(webhdfsuri).GetFileSystem(conf);

            try
            {
                Path           dir           = new Path("/test");
                bool           exceededQuota = false;
                ContentSummary c;
                // 1kb file
                // 6kb block
                // 192kb quota
                int FileSize  = 1024;
                int QuotaSize = 32 * (int)fs.GetDefaultBlockSize(dir);
                NUnit.Framework.Assert.AreEqual(6 * 1024, fs.GetDefaultBlockSize(dir));
                NUnit.Framework.Assert.AreEqual(192 * 1024, QuotaSize);
                // Create the dir and set the quota. We need to enable the quota before
                // writing the files as setting the quota afterwards will over-write
                // the cached disk space used for quota verification with the actual
                // amount used as calculated by INode#spaceConsumedInTree.
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(dir));
                RunCommand(admin, false, "-setSpaceQuota", Sharpen.Extensions.ToString(QuotaSize)
                           , dir.ToString());
                // We can create at most 59 files because block allocation is
                // conservative and initially assumes a full block is used, so we
                // need to leave at least 3 * BLOCK_SIZE free space when allocating
                // the last block: (58 * 3 * 1024) (3 * 6 * 1024) = 192kb
                for (int i = 0; i < 59; i++)
                {
                    Path file = new Path("/test/test" + i);
                    DFSTestUtil.CreateFile(fs, file, FileSize, (short)3, 1L);
                    DFSTestUtil.WaitReplication(fs, file, (short)3);
                }
                // Should account for all 59 files (almost QUOTA_SIZE)
                c = fs.GetContentSummary(dir);
                CheckContentSummary(c, webhdfs.GetContentSummary(dir));
                NUnit.Framework.Assert.AreEqual("Invalid space consumed", 59 * FileSize * 3, c.GetSpaceConsumed
                                                    ());
                NUnit.Framework.Assert.AreEqual("Invalid space consumed", QuotaSize - (59 * FileSize
                                                                                       * 3), 3 * (fs.GetDefaultBlockSize(dir) - FileSize));
                // Now check that trying to create another file violates the quota
                try
                {
                    Path file = new Path("/test/test59");
                    DFSTestUtil.CreateFile(fs, file, FileSize, (short)3, 1L);
                    DFSTestUtil.WaitReplication(fs, file, (short)3);
                }
                catch (QuotaExceededException)
                {
                    exceededQuota = true;
                }
                NUnit.Framework.Assert.IsTrue("Quota not exceeded", exceededQuota);
                NUnit.Framework.Assert.AreEqual(2, cluster.GetNamesystem().GetFSDirectory().GetYieldCount
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
示例#27
0
 public virtual void SetConfiguration(HdfsConfiguration conf)
 {
 }
示例#28
0
        public virtual void TestDFSAddressConfig()
        {
            Configuration conf = new HdfsConfiguration();

            /*-------------------------------------------------------------------------
            * By default, the DataNode socket address should be localhost (127.0.0.1).
            *------------------------------------------------------------------------*/
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();

            cluster.WaitActive();
            AList <DataNode> dns            = cluster.GetDataNodes();
            DataNode         dn             = dns[0];
            string           selfSocketAddr = dn.GetXferAddress().ToString();

            System.Console.Out.WriteLine("DN Self Socket Addr == " + selfSocketAddr);
            NUnit.Framework.Assert.IsTrue(selfSocketAddr.Contains("/127.0.0.1:"));

            /*-------------------------------------------------------------------------
            * Shut down the datanodes, reconfigure, and bring them back up.
            * Even if told to use the configuration properties for dfs.datanode,
            * MiniDFSCluster.startDataNodes() should use localhost as the default if
            * the dfs.datanode properties are not set.
            *------------------------------------------------------------------------*/
            for (int i = 0; i < dns.Count; i++)
            {
                MiniDFSCluster.DataNodeProperties dnp = cluster.StopDataNode(i);
                NUnit.Framework.Assert.IsNotNull("Should have been able to stop simulated datanode"
                                                 , dnp);
            }
            conf.Unset(DFSConfigKeys.DfsDatanodeAddressKey);
            conf.Unset(DFSConfigKeys.DfsDatanodeHttpAddressKey);
            conf.Unset(DFSConfigKeys.DfsDatanodeIpcAddressKey);
            cluster.StartDataNodes(conf, 1, true, HdfsServerConstants.StartupOption.Regular,
                                   null, null, null, false, true);
            dns            = cluster.GetDataNodes();
            dn             = dns[0];
            selfSocketAddr = dn.GetXferAddress().ToString();
            System.Console.Out.WriteLine("DN Self Socket Addr == " + selfSocketAddr);
            // assert that default self socket address is 127.0.0.1
            NUnit.Framework.Assert.IsTrue(selfSocketAddr.Contains("/127.0.0.1:"));

            /*-------------------------------------------------------------------------
            * Shut down the datanodes, reconfigure, and bring them back up.
            * This time, modify the dfs.datanode properties and make sure that they
            * are used to configure sockets by MiniDFSCluster.startDataNodes().
            *------------------------------------------------------------------------*/
            for (int i_1 = 0; i_1 < dns.Count; i_1++)
            {
                MiniDFSCluster.DataNodeProperties dnp = cluster.StopDataNode(i_1);
                NUnit.Framework.Assert.IsNotNull("Should have been able to stop simulated datanode"
                                                 , dnp);
            }
            conf.Set(DFSConfigKeys.DfsDatanodeAddressKey, "0.0.0.0:0");
            conf.Set(DFSConfigKeys.DfsDatanodeHttpAddressKey, "0.0.0.0:0");
            conf.Set(DFSConfigKeys.DfsDatanodeIpcAddressKey, "0.0.0.0:0");
            cluster.StartDataNodes(conf, 1, true, HdfsServerConstants.StartupOption.Regular,
                                   null, null, null, false, true);
            dns            = cluster.GetDataNodes();
            dn             = dns[0];
            selfSocketAddr = dn.GetXferAddress().ToString();
            System.Console.Out.WriteLine("DN Self Socket Addr == " + selfSocketAddr);
            // assert that default self socket address is 0.0.0.0
            NUnit.Framework.Assert.IsTrue(selfSocketAddr.Contains("/0.0.0.0:"));
            cluster.Shutdown();
        }
示例#29
0
        /// <summary>
        /// Test that blocks should get replicated if we have corrupted blocks and
        /// having good replicas at least equal or greater to minreplication
        /// Simulate rbw blocks by creating dummy copies, then a DN restart to detect
        /// those corrupted blocks asap.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestReplicationWhenBlockCorruption()
        {
            MiniDFSCluster cluster = null;

            try
            {
                Configuration conf = new HdfsConfiguration();
                conf.SetLong(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, 1);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
                FileSystem         fs     = cluster.GetFileSystem();
                FSDataOutputStream create = fs.Create(new Path("/test"));
                fs.SetReplication(new Path("/test"), (short)1);
                create.Write(new byte[1024]);
                create.Close();
                IList <FilePath> nonParticipatedNodeDirs = new AList <FilePath>();
                FilePath         participatedNodeDirs    = null;
                for (int i = 0; i < cluster.GetDataNodes().Count; i++)
                {
                    FilePath storageDir = cluster.GetInstanceStorageDir(i, 0);
                    string   bpid       = cluster.GetNamesystem().GetBlockPoolId();
                    FilePath data_dir   = MiniDFSCluster.GetFinalizedDir(storageDir, bpid);
                    if (data_dir.ListFiles().Length == 0)
                    {
                        nonParticipatedNodeDirs.AddItem(data_dir);
                    }
                    else
                    {
                        participatedNodeDirs = data_dir;
                    }
                }
                string     blockFile = null;
                FilePath[] listFiles = participatedNodeDirs.ListFiles();
                foreach (FilePath file in listFiles)
                {
                    if (file.GetName().StartsWith(Block.BlockFilePrefix) && !file.GetName().EndsWith(
                            "meta"))
                    {
                        blockFile = file.GetName();
                        foreach (FilePath file1 in nonParticipatedNodeDirs)
                        {
                            file1.Mkdirs();
                            new FilePath(file1, blockFile).CreateNewFile();
                            new FilePath(file1, blockFile + "_1000.meta").CreateNewFile();
                        }
                        break;
                    }
                }
                fs.SetReplication(new Path("/test"), (short)3);
                cluster.RestartDataNodes();
                // Lets detect all DNs about dummy copied
                // blocks
                cluster.WaitActive();
                cluster.TriggerBlockReports();
                DFSTestUtil.WaitReplication(fs, new Path("/test"), (short)3);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
示例#30
0
        public virtual void TestClientTriggeredLeaseRecovery()
        {
            int           Replication = 3;
            Configuration conf        = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsDatanodeHandlerCountKey, 1);
            conf.SetInt(DFSConfigKeys.DfsReplicationKey, Replication);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(Replication
                                                                                   ).Build();

            try
            {
                FileSystem fs  = cluster.GetFileSystem();
                Path       dir = new Path("/wrwelkj");
                TestFileCreationClient.SlowWriter[] slowwriters = new TestFileCreationClient.SlowWriter
                                                                  [10];
                for (int i = 0; i < slowwriters.Length; i++)
                {
                    slowwriters[i] = new TestFileCreationClient.SlowWriter(fs, new Path(dir, "file" +
                                                                                        i));
                }
                try
                {
                    for (int i_1 = 0; i_1 < slowwriters.Length; i_1++)
                    {
                        slowwriters[i_1].Start();
                    }
                    Sharpen.Thread.Sleep(1000);
                    // let writers get started
                    //stop a datanode, it should have least recover.
                    cluster.StopDataNode(AppendTestUtil.NextInt(Replication));
                    //let the slow writer writes a few more seconds
                    System.Console.Out.WriteLine("Wait a few seconds");
                    Sharpen.Thread.Sleep(5000);
                }
                finally
                {
                    for (int i_1 = 0; i_1 < slowwriters.Length; i_1++)
                    {
                        if (slowwriters[i_1] != null)
                        {
                            slowwriters[i_1].running = false;
                            slowwriters[i_1].Interrupt();
                        }
                    }
                    for (int i_2 = 0; i_2 < slowwriters.Length; i_2++)
                    {
                        if (slowwriters[i_2] != null)
                        {
                            slowwriters[i_2].Join();
                        }
                    }
                }
                //Verify the file
                System.Console.Out.WriteLine("Verify the file");
                for (int i_3 = 0; i_3 < slowwriters.Length; i_3++)
                {
                    System.Console.Out.WriteLine(slowwriters[i_3].filepath + ": length=" + fs.GetFileStatus
                                                     (slowwriters[i_3].filepath).GetLen());
                    FSDataInputStream @in = null;
                    try
                    {
                        @in = fs.Open(slowwriters[i_3].filepath);
                        for (int j = 0; (x = @in.Read()) != -1; j++)
                        {
                            NUnit.Framework.Assert.AreEqual(j, x);
                        }
                    }
                    finally
                    {
                        IOUtils.CloseStream(@in);
                    }
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }