public virtual void TestTruncate()
        {
            short repl               = 3;
            int   blockSize          = 1024;
            int   numOfBlocks        = 2;
            DistributedFileSystem fs = cluster.GetFileSystem();
            Path dir  = GetTestRootPath(fc, "test/hadoop");
            Path file = GetTestRootPath(fc, "test/hadoop/file");

            byte[] data = FileSystemTestHelper.GetFileData(numOfBlocks, blockSize);
            FileSystemTestHelper.CreateFile(fs, file, data, blockSize, repl);
            int  newLength = blockSize;
            bool isReady   = fc.Truncate(file, newLength);

            NUnit.Framework.Assert.IsTrue("Recovery is not expected.", isReady);
            FileStatus fileStatus = fc.GetFileStatus(file);

            NUnit.Framework.Assert.AreEqual(fileStatus.GetLen(), newLength);
            AppendTestUtil.CheckFullFile(fs, file, newLength, data, file.ToString());
            ContentSummary cs = fs.GetContentSummary(dir);

            NUnit.Framework.Assert.AreEqual("Bad disk space usage", cs.GetSpaceConsumed(), newLength
                                            * repl);
            NUnit.Framework.Assert.IsTrue(fs.Delete(dir, true));
        }
Ejemplo n.º 2
0
 /// <exception cref="System.Exception"/>
 private void TestTruncate()
 {
     if (!IsLocalFS())
     {
         short      repl        = 3;
         int        blockSize   = 1024;
         int        numOfBlocks = 2;
         FileSystem fs          = FileSystem.Get(GetProxiedFSConf());
         fs.Mkdirs(GetProxiedFSTestDir());
         Path   file = new Path(GetProxiedFSTestDir(), "foo.txt");
         byte[] data = FileSystemTestHelper.GetFileData(numOfBlocks, blockSize);
         FileSystemTestHelper.CreateFile(fs, file, data, blockSize, repl);
         int  newLength = blockSize;
         bool isReady   = fs.Truncate(file, newLength);
         NUnit.Framework.Assert.IsTrue("Recovery is not expected.", isReady);
         FileStatus fileStatus = fs.GetFileStatus(file);
         NUnit.Framework.Assert.AreEqual(fileStatus.GetLen(), newLength);
         AppendTestUtil.CheckFullFile(fs, file, newLength, data, file.ToString());
         fs.Close();
     }
 }
Ejemplo n.º 3
0
        public virtual void TestMultipleAppendsDuringCatchupTailing()
        {
            Configuration conf = new Configuration();

            // Set a length edits tailing period, and explicit rolling, so we can
            // control the ingest of edits by the standby for this test.
            conf.Set(DFSConfigKeys.DfsHaTaileditsPeriodKey, "5000");
            conf.SetInt(DFSConfigKeys.DfsHaLogrollPeriodKey, -1);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology
                                                                                 .SimpleHATopology()).NumDataNodes(3).Build();
            FileSystem fs = null;

            try
            {
                cluster.TransitionToActive(0);
                fs = HATestUtil.ConfigureFailoverFs(cluster, conf);
                Path   fileToAppend   = new Path("/FileToAppend");
                Path   fileToTruncate = new Path("/FileToTruncate");
                byte[] data           = new byte[1 << 16];
                DFSUtil.GetRandom().NextBytes(data);
                int[] appendPos   = AppendTestUtil.RandomFilePartition(data.Length, Count);
                int[] truncatePos = AppendTestUtil.RandomFilePartition(data.Length, 1);
                // Create file, write some data, and hflush so that the first
                // block is in the edit log prior to roll.
                FSDataOutputStream @out         = CreateAndHflush(fs, fileToAppend, data, appendPos[0]);
                FSDataOutputStream out4Truncate = CreateAndHflush(fs, fileToTruncate, data, data.
                                                                  Length);
                // Let the StandbyNode catch the creation of the file.
                cluster.GetNameNode(0).GetRpcServer().RollEditLog();
                cluster.GetNameNode(1).GetNamesystem().GetEditLogTailer().DoTailEdits();
                @out.Close();
                out4Truncate.Close();
                // Append and re-close a few time, so that many block entries are queued.
                for (int i = 0; i < Count; i++)
                {
                    int end = i < Count - 1 ? appendPos[i + 1] : data.Length;
                    @out = fs.Append(fileToAppend);
                    @out.Write(data, appendPos[i], end - appendPos[i]);
                    @out.Close();
                }
                bool isTruncateReady = fs.Truncate(fileToTruncate, truncatePos[0]);
                // Ensure that blocks have been reported to the SBN ahead of the edits
                // arriving.
                cluster.TriggerBlockReports();
                // Failover the current standby to active.
                cluster.ShutdownNameNode(0);
                cluster.TransitionToActive(1);
                // Check the FSCK doesn't detect any bad blocks on the SBN.
                int rc = ToolRunner.Run(new DFSck(cluster.GetConfiguration(1)), new string[] { "/"
                                                                                               , "-files", "-blocks" });
                NUnit.Framework.Assert.AreEqual(0, rc);
                NUnit.Framework.Assert.AreEqual("CorruptBlocks should be empty.", 0, cluster.GetNameNode
                                                    (1).GetNamesystem().GetCorruptReplicaBlocks());
                AppendTestUtil.CheckFullFile(fs, fileToAppend, data.Length, data, fileToAppend.ToString
                                                 ());
                if (!isTruncateReady)
                {
                    TestFileTruncate.CheckBlockRecovery(fileToTruncate, cluster.GetFileSystem(1));
                }
                AppendTestUtil.CheckFullFile(fs, fileToTruncate, truncatePos[0], data, fileToTruncate
                                             .ToString());
            }
            finally
            {
                if (null != cluster)
                {
                    cluster.Shutdown();
                }
                if (null != fs)
                {
                    fs.Close();
                }
            }
        }