예제 #1
0
        public virtual void TestFinalizedRwrReplicas()
        {
            if (Log.IsDebugEnabled())
            {
                Log.Debug("Running " + GenericTestUtils.GetMethodName());
            }
            // rbw and finalized replicas have the same length
            ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BlockId, ReplicaLen1, GenStamp
                                                                   - 1, HdfsServerConstants.ReplicaState.Finalized);
            ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BlockId, ReplicaLen1, GenStamp
                                                                   - 2, HdfsServerConstants.ReplicaState.Rwr);
            InterDatanodeProtocol dn1 = Org.Mockito.Mockito.Mock <InterDatanodeProtocol>();
            InterDatanodeProtocol dn2 = Org.Mockito.Mockito.Mock <InterDatanodeProtocol>();

            TestSyncReplicas(replica1, replica2, dn1, dn2, ReplicaLen1);
            Org.Mockito.Mockito.Verify(dn1).UpdateReplicaUnderRecovery(block, RecoveryId, BlockId
                                                                       , ReplicaLen1);
            Org.Mockito.Mockito.Verify(dn2, Org.Mockito.Mockito.Never()).UpdateReplicaUnderRecovery
                (block, RecoveryId, BlockId, ReplicaLen1);
            // rbw replica has a different length from the finalized one
            replica1 = new ReplicaRecoveryInfo(BlockId, ReplicaLen1, GenStamp - 1, HdfsServerConstants.ReplicaState
                                               .Finalized);
            replica2 = new ReplicaRecoveryInfo(BlockId, ReplicaLen2, GenStamp - 2, HdfsServerConstants.ReplicaState
                                               .Rbw);
            dn1 = Org.Mockito.Mockito.Mock <InterDatanodeProtocol>();
            dn2 = Org.Mockito.Mockito.Mock <InterDatanodeProtocol>();
            TestSyncReplicas(replica1, replica2, dn1, dn2, ReplicaLen1);
            Org.Mockito.Mockito.Verify(dn1).UpdateReplicaUnderRecovery(block, RecoveryId, BlockId
                                                                       , ReplicaLen1);
            Org.Mockito.Mockito.Verify(dn2, Org.Mockito.Mockito.Never()).UpdateReplicaUnderRecovery
                (block, RecoveryId, BlockId, ReplicaLen1);
        }
예제 #2
0
        public virtual void TestFinalizedReplicas()
        {
            if (Log.IsDebugEnabled())
            {
                Log.Debug("Running " + GenericTestUtils.GetMethodName());
            }
            ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BlockId, ReplicaLen1, GenStamp
                                                                   - 1, HdfsServerConstants.ReplicaState.Finalized);
            ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BlockId, ReplicaLen1, GenStamp
                                                                   - 2, HdfsServerConstants.ReplicaState.Finalized);
            InterDatanodeProtocol dn1 = Org.Mockito.Mockito.Mock <InterDatanodeProtocol>();
            InterDatanodeProtocol dn2 = Org.Mockito.Mockito.Mock <InterDatanodeProtocol>();

            TestSyncReplicas(replica1, replica2, dn1, dn2, ReplicaLen1);
            Org.Mockito.Mockito.Verify(dn1).UpdateReplicaUnderRecovery(block, RecoveryId, BlockId
                                                                       , ReplicaLen1);
            Org.Mockito.Mockito.Verify(dn2).UpdateReplicaUnderRecovery(block, RecoveryId, BlockId
                                                                       , ReplicaLen1);
            // two finalized replicas have different length
            replica1 = new ReplicaRecoveryInfo(BlockId, ReplicaLen1, GenStamp - 1, HdfsServerConstants.ReplicaState
                                               .Finalized);
            replica2 = new ReplicaRecoveryInfo(BlockId, ReplicaLen2, GenStamp - 2, HdfsServerConstants.ReplicaState
                                               .Finalized);
            try
            {
                TestSyncReplicas(replica1, replica2, dn1, dn2, ReplicaLen1);
                NUnit.Framework.Assert.Fail("Two finalized replicas should not have different lengthes!"
                                            );
            }
            catch (IOException e)
            {
                NUnit.Framework.Assert.IsTrue(e.Message.StartsWith("Inconsistent size of finalized replicas. "
                                                                   ));
            }
        }
        public virtual void BlockLengthHintIsPropagated()
        {
            string        MethodName = GenericTestUtils.GetMethodName();
            Path          path       = new Path("/" + MethodName + ".dat");
            Configuration conf       = new HdfsConfiguration();

            TestWriteBlockGetsBlockLengthHint.FsDatasetChecker.SetFactory(conf);
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockLength);
            conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            try
            {
                cluster.WaitActive();
                // FsDatasetChecker#createRbw asserts during block creation if the test
                // fails.
                DFSTestUtil.CreateFile(cluster.GetFileSystem(), path, 4096, ExpectedBlockLength,
                                       ExpectedBlockLength, (short)1, unchecked ((int)(0x1BAD5EED)));
            }
            finally
            {
                // Buffer size.
                cluster.Shutdown();
            }
        }
예제 #4
0
 /// <exception cref="System.Exception"/>
 public virtual void TestDatanodeRollingUpgradeWithRollback()
 {
     try
     {
         StartCluster();
         // Create files in DFS.
         Path testFile1 = new Path("/" + GenericTestUtils.GetMethodName() + ".01.dat");
         DFSTestUtil.CreateFile(fs, testFile1, FileSize, ReplFactor, Seed);
         string fileContents1 = DFSTestUtil.ReadFile(fs, testFile1);
         StartRollingUpgrade();
         FilePath blockFile = GetBlockForFile(testFile1, true);
         FilePath trashFile = GetTrashFileForBlock(blockFile, false);
         DeleteAndEnsureInTrash(testFile1, blockFile, trashFile);
         // Now perform a rollback to restore DFS to the pre-rollback state.
         RollbackRollingUpgrade();
         // Ensure that block was restored from trash
         EnsureTrashRestored(blockFile, trashFile);
         // Ensure that files exist and restored file contents are the same.
         System.Diagnostics.Debug.Assert((fs.Exists(testFile1)));
         string fileContents2 = DFSTestUtil.ReadFile(fs, testFile1);
         Assert.AssertThat(fileContents1, IS.Is(fileContents2));
     }
     finally
     {
         ShutdownCluster();
     }
 }
예제 #5
0
        public virtual void TestRamDiskShortCircuitRead()
        {
            StartUpCluster(ReplFactor, new StorageType[] { StorageType.RamDisk, StorageType.Default }, 2 * BlockSize - 1, true);
            // 1 replica + delta, SCR read
            string MethodName = GenericTestUtils.GetMethodName();
            int    Seed       = unchecked ((int)(0xFADED));
            Path   path       = new Path("/" + MethodName + ".dat");

            MakeRandomTestFile(path, BlockSize, true, Seed);
            EnsureFileReplicasOnStorageType(path, StorageType.RamDisk);
            // Sleep for a short time to allow the lazy writer thread to do its job
            Sharpen.Thread.Sleep(3 * LazyWriterIntervalSec * 1000);
            //assertThat(verifyReadRandomFile(path, BLOCK_SIZE, SEED), is(true));
            FSDataInputStream fis = fs.Open(path);

            // Verify SCR read counters
            try
            {
                fis = fs.Open(path);
                byte[] buf = new byte[BufferLength];
                fis.Read(0, buf, 0, BufferLength);
                HdfsDataInputStream dfsis = (HdfsDataInputStream)fis;
                NUnit.Framework.Assert.AreEqual(BufferLength, dfsis.GetReadStatistics().GetTotalBytesRead
                                                    ());
                NUnit.Framework.Assert.AreEqual(BufferLength, dfsis.GetReadStatistics().GetTotalShortCircuitBytesRead
                                                    ());
            }
            finally
            {
                fis.Close();
                fis = null;
            }
        }
예제 #6
0
        public virtual void TestFallbackToDiskPartial()
        {
            StartUpCluster(true, 2);
            string MethodName = GenericTestUtils.GetMethodName();
            Path   path       = new Path("/" + MethodName + ".dat");

            MakeTestFile(path, BlockSize * 5, true);
            // Sleep for a short time to allow the lazy writer thread to do its job
            Sharpen.Thread.Sleep(6 * LazyWriterIntervalSec * 1000);
            TriggerBlockReport();
            int           numBlocksOnRamDisk = 0;
            int           numBlocksOnDisk    = 0;
            long          fileLength         = client.GetFileInfo(path.ToString()).GetLen();
            LocatedBlocks locatedBlocks      = client.GetLocatedBlocks(path.ToString(), 0, fileLength
                                                                       );

            foreach (LocatedBlock locatedBlock in locatedBlocks.GetLocatedBlocks())
            {
                if (locatedBlock.GetStorageTypes()[0] == StorageType.RamDisk)
                {
                    numBlocksOnRamDisk++;
                }
                else
                {
                    if (locatedBlock.GetStorageTypes()[0] == StorageType.Default)
                    {
                        numBlocksOnDisk++;
                    }
                }
            }
            // Since eviction is asynchronous, depending on the timing of eviction
            // wrt writes, we may get 2 or less blocks on RAM disk.
            System.Diagnostics.Debug.Assert((numBlocksOnRamDisk <= 2));
            System.Diagnostics.Debug.Assert((numBlocksOnDisk >= 3));
        }
예제 #7
0
        public virtual void TestConcurrentRead()
        {
            StartUpCluster(true, 2);
            string MethodName = GenericTestUtils.GetMethodName();
            Path   path1      = new Path("/" + MethodName + ".dat");
            int    Seed       = unchecked ((int)(0xFADED));
            int    NumTasks   = 5;

            MakeRandomTestFile(path1, BlockSize, true, Seed);
            EnsureFileReplicasOnStorageType(path1, StorageType.RamDisk);
            //Read from multiple clients
            CountDownLatch latch          = new CountDownLatch(NumTasks);
            AtomicBoolean  testFailed     = new AtomicBoolean(false);
            Runnable       readerRunnable = new _Runnable_564(this, path1, Seed, testFailed, latch);

            Sharpen.Thread[] threads = new Sharpen.Thread[NumTasks];
            for (int i = 0; i < NumTasks; i++)
            {
                threads[i] = new Sharpen.Thread(readerRunnable);
                threads[i].Start();
            }
            Sharpen.Thread.Sleep(500);
            for (int i_1 = 0; i_1 < NumTasks; i_1++)
            {
                Uninterruptibles.JoinUninterruptibly(threads[i_1]);
            }
            NUnit.Framework.Assert.IsFalse(testFailed.Get());
        }
예제 #8
0
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestWithLimitedSpace()
        {
            // Cluster with just enough space for a full block + meta.
            StartCluster(BlockSize, 1, 2 * BlockSize - 1);
            string methodName = GenericTestUtils.GetMethodName();
            Path   file1      = new Path("/" + methodName + ".01.dat");
            Path   file2      = new Path("/" + methodName + ".02.dat");
            // Create two files.
            FSDataOutputStream os1 = null;
            FSDataOutputStream os2 = null;

            try
            {
                os1 = fs.Create(file1);
                os2 = fs.Create(file2);
                // Write one byte to the first file.
                byte[] data = new byte[1];
                os1.Write(data);
                os1.Hsync();
                // Try to write one byte to the second file.
                // The block allocation must fail.
                thrown.Expect(typeof(RemoteException));
                os2.Write(data);
                os2.Hsync();
            }
            finally
            {
                if (os1 != null)
                {
                    os1.Close();
                }
            }
        }
예제 #9
0
        /// <summary>
        /// Similar to BlockReport_03() but works with two DNs
        /// Test writes a file and closes it.
        /// </summary>
        /// <remarks>
        /// Similar to BlockReport_03() but works with two DNs
        /// Test writes a file and closes it.
        /// The second datanode is started in the cluster.
        /// As soon as the replication process is completed test finds a block from
        /// the second DN and sets its GS to be &lt; of original one.
        /// this is the markBlockAsCorrupt case 3 so we expect one pending deletion
        /// Block report is forced and the check for # of currupted blocks is performed.
        /// Another block is chosen and its length is set to a lesser than original.
        /// A check for another corrupted block is performed after yet another
        /// BlockReport
        /// </remarks>
        /// <exception cref="System.IO.IOException">in case of an error</exception>
        /// <exception cref="System.Exception"/>
        public virtual void BlockReport_07()
        {
            string MethodName = GenericTestUtils.GetMethodName();
            Path   filePath   = new Path("/" + MethodName + ".dat");
            int    DnN1       = DnN0 + 1;

            // write file and start second node to be "older" than the original
            WriteFile(MethodName, FileSize, filePath);
            StartDNandWait(filePath, true);
            // all blocks belong to the same file, hence same BP
            DataNode             dn     = cluster.GetDataNodes()[DnN1];
            string               poolId = cluster.GetNamesystem().GetBlockPoolId();
            DatanodeRegistration dnR    = dn.GetDNRegistrationForBP(poolId);

            StorageBlockReport[] reports = GetBlockReports(dn, poolId, true, false);
            SendBlockReports(dnR, poolId, reports);
            PrintStats();
            Assert.AssertThat("Wrong number of corrupt blocks", cluster.GetNamesystem().GetCorruptReplicaBlocks
                                  (), IS.Is(0L));
            Assert.AssertThat("Wrong number of PendingDeletion blocks", cluster.GetNamesystem
                                  ().GetPendingDeletionBlocks(), IS.Is(1L));
            Assert.AssertThat("Wrong number of PendingReplication blocks", cluster.GetNamesystem
                                  ().GetPendingReplicationBlocks(), IS.Is(0L));
            reports = GetBlockReports(dn, poolId, false, true);
            SendBlockReports(dnR, poolId, reports);
            PrintStats();
            Assert.AssertThat("Wrong number of corrupt blocks", cluster.GetNamesystem().GetCorruptReplicaBlocks
                                  (), IS.Is(1L));
            Assert.AssertThat("Wrong number of PendingDeletion blocks", cluster.GetNamesystem
                                  ().GetPendingDeletionBlocks(), IS.Is(1L));
            Assert.AssertThat("Wrong number of PendingReplication blocks", cluster.GetNamesystem
                                  ().GetPendingReplicationBlocks(), IS.Is(0L));
            PrintStats();
        }
예제 #10
0
        public virtual void Pipeline_01()
        {
            string MethodName = GenericTestUtils.GetMethodName();

            if (Log.IsDebugEnabled())
            {
                Log.Debug("Running " + MethodName);
            }
            Path filePath = new Path("/" + MethodName + ".dat");

            DFSTestUtil.CreateFile(fs, filePath, FileSize, ReplFactor, rand.NextLong());
            if (Log.IsDebugEnabled())
            {
                Log.Debug("Invoking append but doing nothing otherwise...");
            }
            FSDataOutputStream ofs = fs.Append(filePath);

            ofs.WriteBytes("Some more stuff to write");
            ((DFSOutputStream)ofs.GetWrappedStream()).Hflush();
            IList <LocatedBlock> lb = cluster.GetNameNodeRpc().GetBlockLocations(filePath.ToString
                                                                                     (), FileSize - 1, FileSize).GetLocatedBlocks();
            string bpid = cluster.GetNamesystem().GetBlockPoolId();

            foreach (DataNode dn in cluster.GetDataNodes())
            {
                Replica r = DataNodeTestUtils.FetchReplicaInfo(dn, bpid, lb[0].GetBlock().GetBlockId
                                                                   ());
                NUnit.Framework.Assert.IsTrue("Replica on DN " + dn + " shouldn't be null", r !=
                                              null);
                NUnit.Framework.Assert.AreEqual("Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()"
                                                , HdfsServerConstants.ReplicaState.Rbw, r.GetState());
            }
            ofs.Close();
        }
예제 #11
0
        public virtual void TestConcurrentWrites()
        {
            StartUpCluster(true, 9);
            string MethodName     = GenericTestUtils.GetMethodName();
            int    Seed           = unchecked ((int)(0xFADED));
            int    NumWriters     = 4;
            int    NumWriterPaths = 5;

            Path[][] paths = new Path[][] { new Path[NumWriterPaths], new Path[NumWriterPaths
                                            ], new Path[NumWriterPaths], new Path[NumWriterPaths] };
            for (int i = 0; i < NumWriters; i++)
            {
                paths[i] = new Path[NumWriterPaths];
                for (int j = 0; j < NumWriterPaths; j++)
                {
                    paths[i][j] = new Path("/" + MethodName + ".Writer" + i + ".File." + j + ".dat");
                }
            }
            CountDownLatch  latch      = new CountDownLatch(NumWriters);
            AtomicBoolean   testFailed = new AtomicBoolean(false);
            ExecutorService executor   = Executors.NewFixedThreadPool(ThreadpoolSize);

            for (int i_1 = 0; i_1 < NumWriters; i_1++)
            {
                Runnable writer = new TestLazyPersistFiles.WriterRunnable(this, i_1, paths[i_1],
                                                                          Seed, latch, testFailed);
                executor.Execute(writer);
            }
            Sharpen.Thread.Sleep(3 * LazyWriterIntervalSec * 1000);
            TriggerBlockReport();
            // Stop executor from adding new tasks to finish existing threads in queue
            latch.Await();
            Assert.AssertThat(testFailed.Get(), IS.Is(false));
        }
예제 #12
0
        public virtual void TestRamDiskEvictionIsLru()
        {
            int NumPaths = 5;

            StartUpCluster(true, NumPaths + EvictionLowWatermark);
            string MethodName = GenericTestUtils.GetMethodName();

            Path[] paths = new Path[NumPaths * 2];
            for (int i = 0; i < paths.Length; i++)
            {
                paths[i] = new Path("/" + MethodName + "." + i + ".dat");
            }
            for (int i_1 = 0; i_1 < NumPaths; i_1++)
            {
                MakeTestFile(paths[i_1], BlockSize, true);
            }
            // Sleep for a short time to allow the lazy writer thread to do its job.
            Sharpen.Thread.Sleep(3 * LazyWriterIntervalSec * 1000);
            for (int i_2 = 0; i_2 < NumPaths; ++i_2)
            {
                EnsureFileReplicasOnStorageType(paths[i_2], StorageType.RamDisk);
            }
            // Open the files for read in a random order.
            AList <int> indexes = new AList <int>(NumPaths);

            for (int i_3 = 0; i_3 < NumPaths; ++i_3)
            {
                indexes.AddItem(i_3);
            }
            Collections.Shuffle(indexes);
            for (int i_4 = 0; i_4 < NumPaths; ++i_4)
            {
                Log.Info("Touching file " + paths[indexes[i_4]]);
                DFSTestUtil.ReadFile(fs, paths[indexes[i_4]]);
            }
            // Create an equal number of new files ensuring that the previous
            // files are evicted in the same order they were read.
            for (int i_5 = 0; i_5 < NumPaths; ++i_5)
            {
                MakeTestFile(paths[i_5 + NumPaths], BlockSize, true);
                TriggerBlockReport();
                Sharpen.Thread.Sleep(3000);
                EnsureFileReplicasOnStorageType(paths[i_5 + NumPaths], StorageType.RamDisk);
                EnsureFileReplicasOnStorageType(paths[indexes[i_5]], StorageType.Default);
                for (int j = i_5 + 1; j < NumPaths; ++j)
                {
                    EnsureFileReplicasOnStorageType(paths[indexes[j]], StorageType.RamDisk);
                }
            }
            VerifyRamDiskJMXMetric("RamDiskBlocksWrite", NumPaths * 2);
            VerifyRamDiskJMXMetric("RamDiskBlocksWriteFallback", 0);
            VerifyRamDiskJMXMetric("RamDiskBytesWrite", BlockSize * NumPaths * 2);
            VerifyRamDiskJMXMetric("RamDiskBlocksReadHits", NumPaths);
            VerifyRamDiskJMXMetric("RamDiskBlocksEvicted", NumPaths);
            VerifyRamDiskJMXMetric("RamDiskBlocksEvictedWithoutRead", 0);
            VerifyRamDiskJMXMetric("RamDiskBlocksDeletedBeforeLazyPersisted", 0);
        }
예제 #13
0
        public virtual void TestPlacementOnRamDisk()
        {
            StartUpCluster(true, -1);
            string MethodName = GenericTestUtils.GetMethodName();
            Path   path       = new Path("/" + MethodName + ".dat");

            MakeTestFile(path, BlockSize, true);
            EnsureFileReplicasOnStorageType(path, StorageType.RamDisk);
        }
예제 #14
0
        public virtual void TestFallbackToDisk()
        {
            StartUpCluster(false, -1);
            string MethodName = GenericTestUtils.GetMethodName();
            Path   path       = new Path("/" + MethodName + ".dat");

            MakeTestFile(path, BlockSize, true);
            EnsureFileReplicasOnStorageType(path, StorageType.Default);
        }
예제 #15
0
        /// <summary>Test write a file, verifies and closes it.</summary>
        /// <remarks>
        /// Test write a file, verifies and closes it. Then the length of the blocks
        /// are messed up and BlockReport is forced.
        /// The modification of blocks' length has to be ignored
        /// </remarks>
        /// <exception cref="System.IO.IOException">on an error</exception>
        public virtual void BlockReport_01()
        {
            string        MethodName = GenericTestUtils.GetMethodName();
            Path          filePath   = new Path("/" + MethodName + ".dat");
            AList <Block> blocks     = PrepareForRide(filePath, MethodName, FileSize);

            if (Log.IsDebugEnabled())
            {
                Log.Debug("Number of blocks allocated " + blocks.Count);
            }
            long[] oldLengths = new long[blocks.Count];
            int    tempLen;

            for (int i = 0; i < blocks.Count; i++)
            {
                Block b = blocks[i];
                if (Log.IsDebugEnabled())
                {
                    Log.Debug("Block " + b.GetBlockName() + " before\t" + "Size " + b.GetNumBytes());
                }
                oldLengths[i] = b.GetNumBytes();
                if (Log.IsDebugEnabled())
                {
                    Log.Debug("Setting new length");
                }
                tempLen = rand.Next(BlockSize);
                b.Set(b.GetBlockId(), tempLen, b.GetGenerationStamp());
                if (Log.IsDebugEnabled())
                {
                    Log.Debug("Block " + b.GetBlockName() + " after\t " + "Size " + b.GetNumBytes());
                }
            }
            // all blocks belong to the same file, hence same BP
            DataNode             dn     = cluster.GetDataNodes()[DnN0];
            string               poolId = cluster.GetNamesystem().GetBlockPoolId();
            DatanodeRegistration dnR    = dn.GetDNRegistrationForBP(poolId);

            StorageBlockReport[] reports = GetBlockReports(dn, poolId, false, false);
            SendBlockReports(dnR, poolId, reports);
            IList <LocatedBlock> blocksAfterReport = DFSTestUtil.GetAllBlocks(fs.Open(filePath
                                                                                      ));

            if (Log.IsDebugEnabled())
            {
                Log.Debug("After mods: Number of blocks allocated " + blocksAfterReport.Count);
            }
            for (int i_1 = 0; i_1 < blocksAfterReport.Count; i_1++)
            {
                ExtendedBlock b = blocksAfterReport[i_1].GetBlock();
                NUnit.Framework.Assert.AreEqual("Length of " + i_1 + "th block is incorrect", oldLengths
                                                [i_1], b.GetNumBytes());
            }
        }
예제 #16
0
        public virtual void TestPolicyPropagation()
        {
            StartUpCluster(false, -1);
            string MethodName = GenericTestUtils.GetMethodName();
            Path   path       = new Path("/" + MethodName + ".dat");

            MakeTestFile(path, 0, true);
            // Stat the file and check that the lazyPersist flag is returned back.
            HdfsFileStatus status = client.GetFileInfo(path.ToString());

            Assert.AssertThat(status.GetStoragePolicy(), IS.Is(LazyPersistPolicyId));
        }
예제 #17
0
        public virtual void TestQuotaByStorageTypePersistenceInFsImage()
        {
            string MethodName   = GenericTestUtils.GetMethodName();
            Path   testDir      = new Path(dir, MethodName);
            Path   createdFile1 = new Path(testDir, "created_file1.data");

            dfs.Mkdirs(testDir);
            // set storage policy on testDir to ONESSD
            dfs.SetStoragePolicy(testDir, HdfsConstants.OnessdStoragePolicyName);
            // set quota by storage type on testDir
            long SsdQuota = Blocksize * 4;

            dfs.SetQuotaByStorageType(testDir, StorageType.Ssd, SsdQuota);
            INode testDirNode = fsdir.GetINode4Write(testDir.ToString());

            NUnit.Framework.Assert.IsTrue(testDirNode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(testDirNode.IsQuotaSet());
            // Create file of size 2 * BLOCKSIZE under testDir
            long file1Len = Blocksize * 2;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            // Verify SSD consumed before namenode restart
            long ssdConsumed = testDirNode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                   ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumed);
            // Restart the namenode with checkpoint to make sure fsImage is correct
            dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
            dfs.SaveNamespace();
            dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
            cluster.RestartNameNode(true);
            RefreshClusterState();
            INode testDirNodeAfterNNRestart = fsdir.GetINode4Write(testDir.ToString());

            NUnit.Framework.Assert.IsTrue(testDirNode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(testDirNode.IsQuotaSet());
            QuotaCounts qc = testDirNodeAfterNNRestart.GetQuotaCounts();

            NUnit.Framework.Assert.AreEqual(SsdQuota, qc.GetTypeSpace(StorageType.Ssd));
            foreach (StorageType t in StorageType.GetTypesSupportingQuota())
            {
                if (t != StorageType.Ssd)
                {
                    NUnit.Framework.Assert.AreEqual(HdfsConstants.QuotaReset, qc.GetTypeSpace(t));
                }
            }
            long ssdConsumedAfterNNRestart = testDirNodeAfterNNRestart.AsDirectory().GetDirectoryWithQuotaFeature
                                                 ().GetSpaceConsumed().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumedAfterNNRestart);
        }
예제 #18
0
        /// <summary>
        /// Support for layout version change with rolling upgrade was
        /// added by HDFS-6800 and HDFS-6981.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestWithLayoutChangeAndFinalize()
        {
            long seed = unchecked ((int)(0x600DF00D));

            try
            {
                StartCluster();
                Path[]     paths      = new Path[3];
                FilePath[] blockFiles = new FilePath[3];
                // Create two files in DFS.
                for (int i = 0; i < 2; ++i)
                {
                    paths[i] = new Path("/" + GenericTestUtils.GetMethodName() + "." + i + ".dat");
                    DFSTestUtil.CreateFile(fs, paths[i], BlockSize, (short)2, seed);
                }
                StartRollingUpgrade();
                // Delete the first file. The DN will save its block files in trash.
                blockFiles[0] = GetBlockForFile(paths[0], true);
                FilePath trashFile0 = GetTrashFileForBlock(blockFiles[0], false);
                DeleteAndEnsureInTrash(paths[0], blockFiles[0], trashFile0);
                // Restart the DN with a new layout version to trigger layout upgrade.
                Log.Info("Shutting down the Datanode");
                MiniDFSCluster.DataNodeProperties dnprop = cluster.StopDataNode(0);
                DFSTestUtil.AddDataNodeLayoutVersion(DataNodeLayoutVersion.CurrentLayoutVersion -
                                                     1, "Test Layout for TestDataNodeRollingUpgrade");
                Log.Info("Restarting the DataNode");
                cluster.RestartDataNode(dnprop, true);
                cluster.WaitActive();
                dn0 = cluster.GetDataNodes()[0];
                Log.Info("The DN has been restarted");
                NUnit.Framework.Assert.IsFalse(trashFile0.Exists());
                NUnit.Framework.Assert.IsFalse(dn0.GetStorage().GetBPStorage(blockPoolId).IsTrashAllowed
                                                   (blockFiles[0]));
                // Ensure that the block file for the first file was moved from 'trash' to 'previous'.
                NUnit.Framework.Assert.IsTrue(IsBlockFileInPrevious(blockFiles[0]));
                NUnit.Framework.Assert.IsFalse(IsTrashRootPresent());
                // Delete the second file. Ensure that its block file is in previous.
                blockFiles[1] = GetBlockForFile(paths[1], true);
                fs.Delete(paths[1], false);
                NUnit.Framework.Assert.IsTrue(IsBlockFileInPrevious(blockFiles[1]));
                NUnit.Framework.Assert.IsFalse(IsTrashRootPresent());
                // Finalize and ensure that neither block file exists in trash or previous.
                FinalizeRollingUpgrade();
                NUnit.Framework.Assert.IsFalse(IsTrashRootPresent());
                NUnit.Framework.Assert.IsFalse(IsBlockFileInPrevious(blockFiles[0]));
                NUnit.Framework.Assert.IsFalse(IsBlockFileInPrevious(blockFiles[1]));
            }
            finally
            {
                ShutdownCluster();
            }
        }
예제 #19
0
        public virtual void TestPolicyNotSetByDefault()
        {
            StartUpCluster(false, -1);
            string MethodName = GenericTestUtils.GetMethodName();
            Path   path       = new Path("/" + MethodName + ".dat");

            MakeTestFile(path, 0, false);
            // Stat the file and check that the LAZY_PERSIST policy is not
            // returned back.
            HdfsFileStatus status = client.GetFileInfo(path.ToString());

            Assert.AssertThat(status.GetStoragePolicy(), IsNot.Not(LazyPersistPolicyId));
        }
예제 #20
0
        // return the initial state of the configuration
        /// <summary>
        /// Test for the case where one of the DNs in the pipeline is in the
        /// process of doing a block report exactly when the block is closed.
        /// </summary>
        /// <remarks>
        /// Test for the case where one of the DNs in the pipeline is in the
        /// process of doing a block report exactly when the block is closed.
        /// In this case, the block report becomes delayed until after the
        /// block is marked completed on the NN, and hence it reports an RBW
        /// replica for a COMPLETE block. Such a report should not be marked
        /// corrupt.
        /// This is a regression test for HDFS-2791.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestOneReplicaRbwReportArrivesAfterBlockCompleted()
        {
            CountDownLatch brFinished = new CountDownLatch(1);

            GenericTestUtils.DelayAnswer delayer = new _DelayAnswer_579(brFinished, Log);
            // inform the test that our block report went through.
            string MethodName = GenericTestUtils.GetMethodName();
            Path   filePath   = new Path("/" + MethodName + ".dat");

            // Start a second DN for this test -- we're checking
            // what happens when one of the DNs is slowed for some reason.
            ReplFactor = 2;
            StartDNandWait(null, false);
            NameNode           nn   = cluster.GetNameNode();
            FSDataOutputStream @out = fs.Create(filePath, ReplFactor);

            try
            {
                AppendTestUtil.Write(@out, 0, 10);
                @out.Hflush();
                // Set up a spy so that we can delay the block report coming
                // from this node.
                DataNode dn = cluster.GetDataNodes()[0];
                DatanodeProtocolClientSideTranslatorPB spy = DataNodeTestUtils.SpyOnBposToNN(dn,
                                                                                             nn);
                Org.Mockito.Mockito.DoAnswer(delayer).When(spy).BlockReport(Org.Mockito.Mockito.AnyObject
                                                                            <DatanodeRegistration>(), Org.Mockito.Mockito.AnyString(), Org.Mockito.Mockito.AnyObject
                                                                            <StorageBlockReport[]>(), Org.Mockito.Mockito.AnyObject <BlockReportContext>());
                // Force a block report to be generated. The block report will have
                // an RBW replica in it. Wait for the RPC to be sent, but block
                // it before it gets to the NN.
                dn.ScheduleAllBlockReport(0);
                delayer.WaitForCall();
            }
            finally
            {
                IOUtils.CloseStream(@out);
            }
            // Now that the stream is closed, the NN will have the block in COMPLETE
            // state.
            delayer.Proceed();
            brFinished.Await();
            // Verify that no replicas are marked corrupt, and that the
            // file is still readable.
            BlockManagerTestUtil.UpdateState(nn.GetNamesystem().GetBlockManager());
            NUnit.Framework.Assert.AreEqual(0, nn.GetNamesystem().GetCorruptReplicaBlocks());
            DFSTestUtil.ReadFile(fs, filePath);
            // Ensure that the file is readable even from the DN that we futzed with.
            cluster.StopDataNode(1);
            DFSTestUtil.ReadFile(fs, filePath);
        }
예제 #21
0
        public virtual void TestDnRestartWithUnsavedReplicas()
        {
            StartUpCluster(true, 1);
            FsDatasetTestUtil.StopLazyWriter(cluster.GetDataNodes()[0]);
            string MethodName = GenericTestUtils.GetMethodName();
            Path   path1      = new Path("/" + MethodName + ".01.dat");

            MakeTestFile(path1, BlockSize, true);
            EnsureFileReplicasOnStorageType(path1, StorageType.RamDisk);
            Log.Info("Restarting the DataNode");
            cluster.RestartDataNode(0, true);
            cluster.WaitActive();
            // Ensure that the replica is still on transient storage.
            EnsureFileReplicasOnStorageType(path1, StorageType.RamDisk);
        }
예제 #22
0
        public virtual void TestBlockHasMultipleReplicasOnSameDN()
        {
            string filename = MakeFileName(GenericTestUtils.GetMethodName());
            Path   filePath = new Path(filename);

            // Write out a file with a few blocks.
            DFSTestUtil.CreateFile(fs, filePath, BlockSize, BlockSize * NumBlocks, BlockSize,
                                   NumDatanodes, seed);
            // Get the block list for the file with the block locations.
            LocatedBlocks locatedBlocks = client.GetLocatedBlocks(filePath.ToString(), 0, BlockSize
                                                                  * NumBlocks);
            // Generate a fake block report from one of the DataNodes, such
            // that it reports one copy of each block on either storage.
            DataNode             dn    = cluster.GetDataNodes()[0];
            DatanodeRegistration dnReg = dn.GetDNRegistrationForBP(bpid);

            StorageBlockReport[] reports = new StorageBlockReport[cluster.GetStoragesPerDatanode
                                                                      ()];
            AList <Replica> blocks = new AList <Replica>();

            foreach (LocatedBlock locatedBlock in locatedBlocks.GetLocatedBlocks())
            {
                Block localBlock = locatedBlock.GetBlock().GetLocalBlock();
                blocks.AddItem(new FinalizedReplica(localBlock, null, null));
            }
            BlockListAsLongs bll = BlockListAsLongs.Encode(blocks);

            for (int i = 0; i < cluster.GetStoragesPerDatanode(); ++i)
            {
                FsVolumeSpi     v   = dn.GetFSDataset().GetVolumes()[i];
                DatanodeStorage dns = new DatanodeStorage(v.GetStorageID());
                reports[i] = new StorageBlockReport(dns, bll);
            }
            // Should not assert!
            cluster.GetNameNodeRpc().BlockReport(dnReg, bpid, reports, new BlockReportContext
                                                     (1, 0, Runtime.NanoTime()));
            // Get the block locations once again.
            locatedBlocks = client.GetLocatedBlocks(filename, 0, BlockSize * NumBlocks);
            // Make sure that each block has two replicas, one on each DataNode.
            foreach (LocatedBlock locatedBlock_1 in locatedBlocks.GetLocatedBlocks())
            {
                DatanodeInfo[] locations = locatedBlock_1.GetLocations();
                Assert.AssertThat(locations.Length, IS.Is((int)NumDatanodes));
                Assert.AssertThat(locations[0].GetDatanodeUuid(), CoreMatchers.Not(locations[1].GetDatanodeUuid
                                                                                       ()));
            }
        }
        /// <exception cref="System.IO.IOException"/>
        public virtual void VerifyIncrementalBlockReports(bool splitReports)
        {
            // Get the block list for the file with the block locations.
            LocatedBlocks blocks = CreateFileGetBlocks(GenericTestUtils.GetMethodName());

            // We will send 'fake' incremental block reports to the NN that look
            // like they originated from DN 0.
            StorageReceivedDeletedBlocks[] reports = new StorageReceivedDeletedBlocks[dn0.GetFSDataset
                                                                                          ().GetVolumes().Count];
            // Lie to the NN that one block on each storage has been deleted.
            for (int i = 0; i < reports.Length; ++i)
            {
                FsVolumeSpi volume = dn0.GetFSDataset().GetVolumes()[i];
                bool        foundBlockOnStorage = false;
                ReceivedDeletedBlockInfo[] rdbi = new ReceivedDeletedBlockInfo[1];
                // Find the first block on this storage and mark it as deleted for the
                // report.
                foreach (LocatedBlock block in blocks.GetLocatedBlocks())
                {
                    if (block.GetStorageIDs()[0].Equals(volume.GetStorageID()))
                    {
                        rdbi[0] = new ReceivedDeletedBlockInfo(block.GetBlock().GetLocalBlock(), ReceivedDeletedBlockInfo.BlockStatus
                                                               .DeletedBlock, null);
                        foundBlockOnStorage = true;
                        break;
                    }
                }
                NUnit.Framework.Assert.IsTrue(foundBlockOnStorage);
                reports[i] = new StorageReceivedDeletedBlocks(volume.GetStorageID(), rdbi);
                if (splitReports)
                {
                    // If we are splitting reports then send the report for this storage now.
                    StorageReceivedDeletedBlocks[] singletonReport = new StorageReceivedDeletedBlocks
                                                                     [] { reports[i] };
                    cluster.GetNameNodeRpc().BlockReceivedAndDeleted(dn0Reg, poolId, singletonReport);
                }
            }
            if (!splitReports)
            {
                // Send a combined report.
                cluster.GetNameNodeRpc().BlockReceivedAndDeleted(dn0Reg, poolId, reports);
            }
            // Make sure that the deleted block from each storage was picked up
            // by the NameNode.
            Assert.AssertThat(cluster.GetNamesystem().GetMissingBlocksCount(), IS.Is((long)reports
                                                                                     .Length));
        }
예제 #24
0
        public virtual void TestErrorReplicas()
        {
            if (Log.IsDebugEnabled())
            {
                Log.Debug("Running " + GenericTestUtils.GetMethodName());
            }
            DataNode spyDN = Org.Mockito.Mockito.Spy(dn);

            Org.Mockito.Mockito.DoThrow(new IOException()).When(spyDN).InitReplicaRecovery(Matchers.Any
                                                                                           <BlockRecoveryCommand.RecoveringBlock>());
            Daemon d = spyDN.RecoverBlocks("fake NN", InitRecoveringBlocks());

            d.Join();
            Org.Mockito.Mockito.Verify(spyDN, Org.Mockito.Mockito.Never()).SyncBlock(Matchers.Any
                                                                                     <BlockRecoveryCommand.RecoveringBlock>(), Matchers.AnyListOf <DataNode.BlockRecord
                                                                                                                                                   >());
        }
예제 #25
0
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        private void DoShortCircuitReadAfterEvictionTest()
        {
            string MethodName = GenericTestUtils.GetMethodName();
            Path   path1      = new Path("/" + MethodName + ".01.dat");
            Path   path2      = new Path("/" + MethodName + ".02.dat");
            int    Seed       = unchecked ((int)(0xFADED));

            MakeRandomTestFile(path1, BlockSize, true, Seed);
            // Verify short-circuit read from RAM_DISK.
            EnsureFileReplicasOnStorageType(path1, StorageType.RamDisk);
            FilePath metaFile = cluster.GetBlockMetadataFile(0, DFSTestUtil.GetFirstBlock(fs,
                                                                                          path1));

            NUnit.Framework.Assert.IsTrue(metaFile.Length() <= BlockMetadataHeader.GetHeaderSize
                                              ());
            NUnit.Framework.Assert.IsTrue(VerifyReadRandomFile(path1, BlockSize, Seed));
            // Sleep for a short time to allow the lazy writer thread to do its job.
            Sharpen.Thread.Sleep(3 * LazyWriterIntervalSec * 1000);
            // Verify short-circuit read from RAM_DISK once again.
            EnsureFileReplicasOnStorageType(path1, StorageType.RamDisk);
            metaFile = cluster.GetBlockMetadataFile(0, DFSTestUtil.GetFirstBlock(fs, path1));
            NUnit.Framework.Assert.IsTrue(metaFile.Length() <= BlockMetadataHeader.GetHeaderSize
                                              ());
            NUnit.Framework.Assert.IsTrue(VerifyReadRandomFile(path1, BlockSize, Seed));
            // Create another file with a replica on RAM_DISK, which evicts the first.
            MakeRandomTestFile(path2, BlockSize, true, Seed);
            Sharpen.Thread.Sleep(3 * LazyWriterIntervalSec * 1000);
            TriggerBlockReport();
            // Verify short-circuit read still works from DEFAULT storage.  This time,
            // we'll have a checksum written during lazy persistence.
            EnsureFileReplicasOnStorageType(path1, StorageType.Default);
            metaFile = cluster.GetBlockMetadataFile(0, DFSTestUtil.GetFirstBlock(fs, path1));
            NUnit.Framework.Assert.IsTrue(metaFile.Length() > BlockMetadataHeader.GetHeaderSize
                                              ());
            NUnit.Framework.Assert.IsTrue(VerifyReadRandomFile(path1, BlockSize, Seed));
            // In the implementation of legacy short-circuit reads, any failure is
            // trapped silently, reverts back to a remote read, and also disables all
            // subsequent legacy short-circuit reads in the ClientContext.  If the test
            // uses legacy, then assert that it didn't get disabled.
            ClientContext clientContext = client.GetClientContext();

            if (clientContext.GetUseLegacyBlockReaderLocal())
            {
                NUnit.Framework.Assert.IsFalse(clientContext.GetDisableLegacyBlockReaderLocal());
            }
        }
예제 #26
0
        public virtual void TestRamDiskNotChosenByDefault()
        {
            StartUpCluster(true, -1);
            string MethodName = GenericTestUtils.GetMethodName();
            Path   path       = new Path("/" + MethodName + ".dat");

            try
            {
                MakeTestFile(path, BlockSize, false);
                NUnit.Framework.Assert.Fail("Block placement to RAM_DISK should have failed without lazyPersist flag"
                                            );
            }
            catch (Exception t)
            {
                Log.Info("Got expected exception ", t);
            }
        }
예제 #27
0
        public virtual void TestPolicyPersistenceInFsImage()
        {
            StartUpCluster(false, -1);
            string MethodName = GenericTestUtils.GetMethodName();
            Path   path       = new Path("/" + MethodName + ".dat");

            MakeTestFile(path, 0, true);
            // checkpoint
            fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
            fs.SaveNamespace();
            fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
            cluster.RestartNameNode(true);
            // Stat the file and check that the lazyPersist flag is returned back.
            HdfsFileStatus status = client.GetFileInfo(path.ToString());

            Assert.AssertThat(status.GetStoragePolicy(), IS.Is(LazyPersistPolicyId));
        }
예제 #28
0
        public virtual void TestDeleteBeforePersist()
        {
            StartUpCluster(true, -1);
            string MethodName = GenericTestUtils.GetMethodName();

            FsDatasetTestUtil.StopLazyWriter(cluster.GetDataNodes()[0]);
            Path path = new Path("/" + MethodName + ".dat");

            MakeTestFile(path, BlockSize, true);
            LocatedBlocks locatedBlocks = EnsureFileReplicasOnStorageType(path, StorageType.RamDisk
                                                                          );

            // Delete before persist
            client.Delete(path.ToString(), false);
            NUnit.Framework.Assert.IsFalse(fs.Exists(path));
            Assert.AssertThat(VerifyDeletedBlocks(locatedBlocks), IS.Is(true));
            VerifyRamDiskJMXMetric("RamDiskBlocksDeletedBeforeLazyPersisted", 1);
        }
예제 #29
0
        public virtual void TestTruncateIsDenied()
        {
            StartUpCluster(true, -1);
            string MethodName = GenericTestUtils.GetMethodName();
            Path   path       = new Path("/" + MethodName + ".dat");

            MakeTestFile(path, BlockSize, true);
            try
            {
                client.Truncate(path.ToString(), BlockSize / 2);
                NUnit.Framework.Assert.Fail("Truncate to LazyPersist file did not fail as expected"
                                            );
            }
            catch (Exception t)
            {
                Log.Info("Got expected exception ", t);
            }
        }
예제 #30
0
        public virtual void TestLazyPersistBlocksAreSaved()
        {
            StartUpCluster(true, -1);
            string MethodName = GenericTestUtils.GetMethodName();
            Path   path       = new Path("/" + MethodName + ".dat");

            // Create a test file
            MakeTestFile(path, BlockSize * 10, true);
            LocatedBlocks locatedBlocks = EnsureFileReplicasOnStorageType(path, StorageType.RamDisk
                                                                          );

            // Sleep for a short time to allow the lazy writer thread to do its job
            Sharpen.Thread.Sleep(6 * LazyWriterIntervalSec * 1000);
            Log.Info("Verifying copy was saved to lazyPersist/");
            // Make sure that there is a saved copy of the replica on persistent
            // storage.
            EnsureLazyPersistBlocksAreSaved(locatedBlocks);
        }