/// <exception cref="System.IO.IOException"/>
            private bool Truncate(long newLength, StringBuilder b)
            {
                RandomAccessFile raf = new RandomAccessFile(localFile, "rw");

                raf.SetLength(newLength);
                raf.Close();
                bool isReady = dfs.Truncate(file, newLength);

                b.Append(", newLength=").Append(newLength).Append(", isReady=").Append(isReady);
                if (!isReady)
                {
                    TestFileTruncate.CheckBlockRecovery(file, dfs, 100, 300L);
                }
                return(isReady);
            }
 /// <exception cref="System.Exception"/>
 public virtual void Prepare()
 {
     // original size: 2.5 blocks
     DFSTestUtil.CreateFile(this._enclosing.dfs, TestTruncateQuotaUpdate.file, TestTruncateQuotaUpdate
                            .Blocksize * 2 + TestTruncateQuotaUpdate.Blocksize / 2, TestTruncateQuotaUpdate.
                            Replication, 0L);
     SnapshotTestHelper.CreateSnapshot(this._enclosing.dfs, TestTruncateQuotaUpdate.dir
                                       , "s1");
     // truncate to 1.5 block
     this._enclosing.dfs.Truncate(TestTruncateQuotaUpdate.file, TestTruncateQuotaUpdate
                                  .Blocksize + TestTruncateQuotaUpdate.Blocksize / 2);
     TestFileTruncate.CheckBlockRecovery(TestTruncateQuotaUpdate.file, this._enclosing
                                         .dfs);
     // append another 1 BLOCK
     DFSTestUtil.AppendFile(this._enclosing.dfs, TestTruncateQuotaUpdate.file, TestTruncateQuotaUpdate
                            .Blocksize);
 }
Esempio n. 3
0
        /// <exception cref="System.IO.IOException"/>
        private static void StartRollingUpgrade(Path foo, Path bar, Path file, byte[] data
                                                , MiniDFSCluster cluster)
        {
            DistributedFileSystem dfs = cluster.GetFileSystem();

            //start rolling upgrade
            dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
            dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare);
            dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
            dfs.Mkdirs(bar);
            NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
            NUnit.Framework.Assert.IsTrue(dfs.Exists(bar));
            //truncate a file
            int newLength = DFSUtil.GetRandom().Next(data.Length - 1) + 1;

            dfs.Truncate(file, newLength);
            TestFileTruncate.CheckBlockRecovery(file, dfs);
            AppendTestUtil.CheckFullFile(dfs, file, newLength, data);
        }
        /// <exception cref="System.Exception"/>
        private void TestTruncate(long newLength, long expectedDiff, long expectedUsage)
        {
            // before doing the real truncation, make sure the computation is correct
            INodesInPath iip      = fsdir.GetINodesInPath4Write(file.ToString());
            INodeFile    fileNode = iip.GetLastINode().AsFile();

            fileNode.RecordModification(iip.GetLatestSnapshotId(), true);
            long diff = fileNode.ComputeQuotaDeltaForTruncate(newLength);

            NUnit.Framework.Assert.AreEqual(expectedDiff, diff);
            // do the real truncation
            dfs.Truncate(file, newLength);
            // wait for truncate to finish
            TestFileTruncate.CheckBlockRecovery(file, dfs);
            INodeDirectory dirNode   = fsdir.GetINode4Write(dir.ToString()).AsDirectory();
            long           spaceUsed = dirNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed().GetStorageSpace
                                           ();
            long diskUsed = dirNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed().GetTypeSpaces
                                ().Get(StorageType.Disk);

            NUnit.Framework.Assert.AreEqual(expectedUsage, spaceUsed);
            NUnit.Framework.Assert.AreEqual(expectedUsage, diskUsed);
        }
Esempio n. 5
0
        public virtual void TestMultipleAppendsDuringCatchupTailing()
        {
            Configuration conf = new Configuration();

            // Set a length edits tailing period, and explicit rolling, so we can
            // control the ingest of edits by the standby for this test.
            conf.Set(DFSConfigKeys.DfsHaTaileditsPeriodKey, "5000");
            conf.SetInt(DFSConfigKeys.DfsHaLogrollPeriodKey, -1);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology
                                                                                 .SimpleHATopology()).NumDataNodes(3).Build();
            FileSystem fs = null;

            try
            {
                cluster.TransitionToActive(0);
                fs = HATestUtil.ConfigureFailoverFs(cluster, conf);
                Path   fileToAppend   = new Path("/FileToAppend");
                Path   fileToTruncate = new Path("/FileToTruncate");
                byte[] data           = new byte[1 << 16];
                DFSUtil.GetRandom().NextBytes(data);
                int[] appendPos   = AppendTestUtil.RandomFilePartition(data.Length, Count);
                int[] truncatePos = AppendTestUtil.RandomFilePartition(data.Length, 1);
                // Create file, write some data, and hflush so that the first
                // block is in the edit log prior to roll.
                FSDataOutputStream @out         = CreateAndHflush(fs, fileToAppend, data, appendPos[0]);
                FSDataOutputStream out4Truncate = CreateAndHflush(fs, fileToTruncate, data, data.
                                                                  Length);
                // Let the StandbyNode catch the creation of the file.
                cluster.GetNameNode(0).GetRpcServer().RollEditLog();
                cluster.GetNameNode(1).GetNamesystem().GetEditLogTailer().DoTailEdits();
                @out.Close();
                out4Truncate.Close();
                // Append and re-close a few time, so that many block entries are queued.
                for (int i = 0; i < Count; i++)
                {
                    int end = i < Count - 1 ? appendPos[i + 1] : data.Length;
                    @out = fs.Append(fileToAppend);
                    @out.Write(data, appendPos[i], end - appendPos[i]);
                    @out.Close();
                }
                bool isTruncateReady = fs.Truncate(fileToTruncate, truncatePos[0]);
                // Ensure that blocks have been reported to the SBN ahead of the edits
                // arriving.
                cluster.TriggerBlockReports();
                // Failover the current standby to active.
                cluster.ShutdownNameNode(0);
                cluster.TransitionToActive(1);
                // Check the FSCK doesn't detect any bad blocks on the SBN.
                int rc = ToolRunner.Run(new DFSck(cluster.GetConfiguration(1)), new string[] { "/"
                                                                                               , "-files", "-blocks" });
                NUnit.Framework.Assert.AreEqual(0, rc);
                NUnit.Framework.Assert.AreEqual("CorruptBlocks should be empty.", 0, cluster.GetNameNode
                                                    (1).GetNamesystem().GetCorruptReplicaBlocks());
                AppendTestUtil.CheckFullFile(fs, fileToAppend, data.Length, data, fileToAppend.ToString
                                                 ());
                if (!isTruncateReady)
                {
                    TestFileTruncate.CheckBlockRecovery(fileToTruncate, cluster.GetFileSystem(1));
                }
                AppendTestUtil.CheckFullFile(fs, fileToTruncate, truncatePos[0], data, fileToTruncate
                                             .ToString());
            }
            finally
            {
                if (null != cluster)
                {
                    cluster.Shutdown();
                }
                if (null != fs)
                {
                    fs.Close();
                }
            }
        }