Esempio n. 1
0
 /// <exception cref="System.IO.IOException"/>
 public static FilePath GetMetaFile <_T0>(FsDatasetSpi <_T0> fsd, string bpid, Block
                                          b)
     where _T0 : FsVolumeSpi
 {
     return(FsDatasetUtil.GetMetaFile(GetBlockFile(fsd, bpid, b), b.GetGenerationStamp
                                          ()));
 }
Esempio n. 2
0
        public virtual void TestAddVolumeFailures()
        {
            StartDFSCluster(1, 1);
            string         dataDir    = cluster.GetDataDirectory();
            DataNode       dn         = cluster.GetDataNodes()[0];
            IList <string> newDirs    = Lists.NewArrayList();
            int            NumNewDirs = 4;

            for (int i = 0; i < NumNewDirs; i++)
            {
                FilePath newVolume = new FilePath(dataDir, "new_vol" + i);
                newDirs.AddItem(newVolume.ToString());
                if (i % 2 == 0)
                {
                    // Make addVolume() fail.
                    newVolume.CreateNewFile();
                }
            }
            string newValue = dn.GetConf().Get(DFSConfigKeys.DfsDatanodeDataDirKey) + "," + Joiner
                              .On(",").Join(newDirs);

            try
            {
                dn.ReconfigurePropertyImpl(DFSConfigKeys.DfsDatanodeDataDirKey, newValue);
                NUnit.Framework.Assert.Fail("Expect to throw IOException.");
            }
            catch (ReconfigurationException e)
            {
                string   errorMessage = e.InnerException.Message;
                string[] messages     = errorMessage.Split("\\r?\\n");
                NUnit.Framework.Assert.AreEqual(2, messages.Length);
                Assert.AssertThat(messages[0], CoreMatchers.ContainsString("new_vol0"));
                Assert.AssertThat(messages[1], CoreMatchers.ContainsString("new_vol2"));
            }
            // Make sure that vol0 and vol2's metadata are not left in memory.
            FsDatasetSpi <object> dataset = dn.GetFSDataset();

            foreach (FsVolumeSpi volume in dataset.GetVolumes())
            {
                Assert.AssertThat(volume.GetBasePath(), IS.Is(CoreMatchers.Not(CoreMatchers.AnyOf
                                                                                   (IS.Is(newDirs[0]), IS.Is(newDirs[2])))));
            }
            DataStorage storage = dn.GetStorage();

            for (int i_1 = 0; i_1 < storage.GetNumStorageDirs(); i_1++)
            {
                Storage.StorageDirectory sd = storage.GetStorageDir(i_1);
                Assert.AssertThat(sd.GetRoot().ToString(), IS.Is(CoreMatchers.Not(CoreMatchers.AnyOf
                                                                                      (IS.Is(newDirs[0]), IS.Is(newDirs[2])))));
            }
            // The newly effective conf does not have vol0 and vol2.
            string[] effectiveVolumes = dn.GetConf().Get(DFSConfigKeys.DfsDatanodeDataDirKey)
                                        .Split(",");
            NUnit.Framework.Assert.AreEqual(4, effectiveVolumes.Length);
            foreach (string ev in effectiveVolumes)
            {
                Assert.AssertThat(StorageLocation.Parse(ev).GetFile().GetCanonicalPath(), IS.Is(CoreMatchers.Not
                                                                                                    (CoreMatchers.AnyOf(IS.Is(newDirs[0]), IS.Is(newDirs[2])))));
            }
        }
Esempio n. 3
0
 /// <exception cref="System.Exception"/>
 internal TestContext(Configuration conf, int numNameServices)
 {
     this.numNameServices = numNameServices;
     MiniDFSCluster.Builder bld = new MiniDFSCluster.Builder(conf).NumDataNodes(1).StoragesPerDatanode
                                      (1);
     if (numNameServices > 1)
     {
         bld.NnTopology(MiniDFSNNTopology.SimpleFederatedTopology(numNameServices));
     }
     cluster = bld.Build();
     cluster.WaitActive();
     dfs = new DistributedFileSystem[numNameServices];
     for (int i = 0; i < numNameServices; i++)
     {
         dfs[i] = cluster.GetFileSystem(i);
     }
     bpids = new string[numNameServices];
     for (int i_1 = 0; i_1 < numNameServices; i_1++)
     {
         bpids[i_1] = cluster.GetNamesystem(i_1).GetBlockPoolId();
     }
     datanode     = cluster.GetDataNodes()[0];
     blockScanner = datanode.GetBlockScanner();
     for (int i_2 = 0; i_2 < numNameServices; i_2++)
     {
         dfs[i_2].Mkdirs(new Path("/test"));
     }
     data    = datanode.GetFSDataset();
     volumes = data.GetVolumes();
 }
Esempio n. 4
0
        /// <exception cref="System.IO.IOException"/>
        public static bool UnlinkBlock <_T0>(FsDatasetSpi <_T0> fsd, ExtendedBlock block, int
                                             numLinks)
            where _T0 : FsVolumeSpi
        {
            ReplicaInfo info = ((FsDatasetImpl)fsd).GetReplicaInfo(block);

            return(info.UnlinkBlock(numLinks));
        }
Esempio n. 5
0
        /// <summary>
        /// Test the case that remove a data volume on a particular DataNode when the
        /// volume is actively being written.
        /// </summary>
        /// <param name="dataNodeIdx">the index of the DataNode to remove a volume.</param>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="Org.Apache.Hadoop.Conf.ReconfigurationException"/>
        /// <exception cref="Sharpen.TimeoutException"/>
        /// <exception cref="System.Exception"/>
        /// <exception cref="Sharpen.BrokenBarrierException"/>
        private void TestRemoveVolumeBeingWrittenForDatanode(int dataNodeIdx)
        {
            // Starts DFS cluster with 3 DataNodes to form a pipeline.
            StartDFSCluster(1, 3);
            short              Replication            = 3;
            DataNode           dn                     = cluster.GetDataNodes()[dataNodeIdx];
            FileSystem         fs                     = cluster.GetFileSystem();
            Path               testFile               = new Path("/test");
            long               lastTimeDiskErrorCheck = dn.GetLastDiskErrorCheck();
            FSDataOutputStream @out                   = fs.Create(testFile, Replication);
            Random             rb                     = new Random(0);

            byte[] writeBuf = new byte[BlockSize / 2];
            // half of the block.
            rb.NextBytes(writeBuf);
            @out.Write(writeBuf);
            @out.Hflush();
            // Make FsDatasetSpi#finalizeBlock a time-consuming operation. So if the
            // BlockReceiver releases volume reference before finalizeBlock(), the blocks
            // on the volume will be removed, and finalizeBlock() throws IOE.
            FsDatasetSpi <FsVolumeSpi> data = dn.data;

            dn.data = Org.Mockito.Mockito.Spy(data);
            Org.Mockito.Mockito.DoAnswer(new _Answer_599(data)).When(dn.data).FinalizeBlock(Matchers.Any
                                                                                            <ExtendedBlock>());
            // Bypass the argument to FsDatasetImpl#finalizeBlock to verify that
            // the block is not removed, since the volume reference should not
            // be released at this point.
            CyclicBarrier  barrier = new CyclicBarrier(2);
            IList <string> oldDirs = GetDataDirs(dn);
            string         newDirs = oldDirs[1];
            // Remove the first volume.
            IList <Exception> exceptions = new AList <Exception>();

            Sharpen.Thread reconfigThread = new _Thread_616(barrier, dn, newDirs, exceptions);
            reconfigThread.Start();
            barrier.Await();
            rb.NextBytes(writeBuf);
            @out.Write(writeBuf);
            @out.Hflush();
            @out.Close();
            reconfigThread.Join();
            // Verify the file has sufficient replications.
            DFSTestUtil.WaitReplication(fs, testFile, Replication);
            // Read the content back
            byte[] content = DFSTestUtil.ReadFileBuffer(fs, testFile);
            NUnit.Framework.Assert.AreEqual(BlockSize, content.Length);
            // If an IOException thrown from BlockReceiver#run, it triggers
            // DataNode#checkDiskError(). So we can test whether checkDiskError() is called,
            // to see whether there is IOException in BlockReceiver#run().
            NUnit.Framework.Assert.AreEqual(lastTimeDiskErrorCheck, dn.GetLastDiskErrorCheck(
                                                ));
            if (!exceptions.IsEmpty())
            {
                throw new IOException(exceptions[0].InnerException);
            }
        }
Esempio n. 6
0
 /// <summary>Is the given volume still valid in the dataset?</summary>
 private static bool IsValid <_T0>(FsDatasetSpi <_T0> dataset, FsVolumeSpi volume)
     where _T0 : FsVolumeSpi
 {
     foreach (FsVolumeSpi vol in dataset.GetVolumes())
     {
         if (vol == volume)
         {
             return(true);
         }
     }
     return(false);
 }
Esempio n. 7
0
        /// <returns>the FileInputStream for the meta data of the given block.</returns>
        /// <exception cref="System.IO.FileNotFoundException">if the file not found.</exception>
        /// <exception cref="System.InvalidCastException">if the underlying input stream is not a FileInputStream.
        ///     </exception>
        /// <exception cref="System.IO.IOException"/>
        public static FileInputStream GetMetaDataInputStream <_T0>(ExtendedBlock b, FsDatasetSpi
                                                                   <_T0> data)
            where _T0 : FsVolumeSpi
        {
            LengthInputStream lin = data.GetMetaDataInputStream(b);

            if (lin == null)
            {
                throw new FileNotFoundException("Meta file for " + b + " not found.");
            }
            return((FileInputStream)lin.GetWrappedStream());
        }
Esempio n. 8
0
        internal DirectoryScanner(DataNode datanode, FsDatasetSpi <object> dataset, Configuration
                                  conf)
        {
            this.datanode = datanode;
            this.dataset  = dataset;
            int interval = conf.GetInt(DFSConfigKeys.DfsDatanodeDirectoryscanIntervalKey, DFSConfigKeys
                                       .DfsDatanodeDirectoryscanIntervalDefault);

            scanPeriodMsecs = interval * 1000L;
            //msec
            int threads = conf.GetInt(DFSConfigKeys.DfsDatanodeDirectoryscanThreadsKey, DFSConfigKeys
                                      .DfsDatanodeDirectoryscanThreadsDefault);

            reportCompileThreadPool = Executors.NewFixedThreadPool(threads, new Daemon.DaemonFactory
                                                                       ());
            masterThread = new ScheduledThreadPoolExecutor(1, new Daemon.DaemonFactory());
        }
Esempio n. 9
0
 public virtual void SetUp()
 {
     conf = new HdfsConfiguration();
     conf.SetLong(DFSConfigKeys.DfsNamenodePathBasedCacheRefreshIntervalMs, 100);
     conf.SetLong(DFSConfigKeys.DfsCachereportIntervalMsecKey, 500);
     conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
     conf.SetLong(DFSConfigKeys.DfsDatanodeMaxLockedMemoryKey, CacheCapacity);
     conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
     prevCacheManipulator = NativeIO.POSIX.GetCacheManipulator();
     NativeIO.POSIX.SetCacheManipulator(new NativeIO.POSIX.NoMlockCacheManipulator());
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
     cluster.WaitActive();
     fs      = cluster.GetFileSystem();
     nn      = cluster.GetNameNode();
     fsImage = nn.GetFSImage();
     dn      = cluster.GetDataNodes()[0];
     fsd     = dn.GetFSDataset();
     spyNN   = DataNodeTestUtils.SpyOnBposToNN(dn, nn);
 }
        /// <summary>
        /// Test that when we have an uncache request, and the client refuses to release
        /// the replica for a long time, we will un-mlock it.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestRevocation()
        {
            Assume.AssumeTrue(NativeCodeLoader.IsNativeCodeLoaded() && !Path.Windows);
            BlockReaderTestUtil.EnableHdfsCachingTracing();
            BlockReaderTestUtil.EnableShortCircuitShmTracing();
            Configuration conf = GetDefaultConf();

            // Set a really short revocation timeout.
            conf.SetLong(DFSConfigKeys.DfsDatanodeCacheRevocationTimeoutMs, 250L);
            // Poll very often
            conf.SetLong(DFSConfigKeys.DfsDatanodeCacheRevocationPollingMs, 2L);
            MiniDFSCluster cluster = null;

            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            cluster.WaitActive();
            DistributedFileSystem dfs = cluster.GetFileSystem();
            // Create and cache a file.
            string TestFile = "/test_file2";

            DFSTestUtil.CreateFile(dfs, new Path(TestFile), BlockSize, (short)1, unchecked ((int
                                                                                             )(0xcafe)));
            dfs.AddCachePool(new CachePoolInfo("pool"));
            long cacheDirectiveId = dfs.AddCacheDirective(new CacheDirectiveInfo.Builder().SetPool
                                                              ("pool").SetPath(new Path(TestFile)).SetReplication((short)1).Build());
            FsDatasetSpi <object> fsd = cluster.GetDataNodes()[0].GetFSDataset();

            DFSTestUtil.VerifyExpectedCacheUsage(BlockSize, 1, fsd);
            // Mmap the file.
            FSDataInputStream @in = dfs.Open(new Path(TestFile));
            ByteBuffer        buf = @in.Read(null, BlockSize, EnumSet.NoneOf <ReadOption>());

            // Attempt to uncache file.  The file should get uncached.
            Log.Info("removing cache directive {}", cacheDirectiveId);
            dfs.RemoveCacheDirective(cacheDirectiveId);
            Log.Info("finished removing cache directive {}", cacheDirectiveId);
            Sharpen.Thread.Sleep(1000);
            DFSTestUtil.VerifyExpectedCacheUsage(0, 0, fsd);
            // Cleanup
            @in.ReleaseBuffer(buf);
            @in.Close();
            cluster.Shutdown();
        }
Esempio n. 11
0
        /// <exception cref="System.Exception"/>
        public virtual void TestVerifyBlockChecksumCommand()
        {
            DFSTestUtil.CreateFile(fs, new Path("/bar"), 1234, (short)1, unchecked ((int)(0xdeadbeef
                                                                                          )));
            FsDatasetSpi <object> fsd   = datanode.GetFSDataset();
            ExtendedBlock         block = DFSTestUtil.GetFirstBlock(fs, new Path("/bar"));
            FilePath blockFile          = FsDatasetTestUtil.GetBlockFile(fsd, block.GetBlockPoolId(),
                                                                         block.GetLocalBlock());

            NUnit.Framework.Assert.AreEqual("ret: 1, You must specify a meta file with -meta"
                                            , RunCmd(new string[] { "verify", "-block", blockFile.GetAbsolutePath() }));
            FilePath metaFile = FsDatasetTestUtil.GetMetaFile(fsd, block.GetBlockPoolId(), block
                                                              .GetLocalBlock());

            NUnit.Framework.Assert.AreEqual("ret: 0, Checksum type: " + "DataChecksum(type=CRC32C, chunkSize=512)"
                                            , RunCmd(new string[] { "verify", "-meta", metaFile.GetAbsolutePath() }));
            NUnit.Framework.Assert.AreEqual("ret: 0, Checksum type: " + "DataChecksum(type=CRC32C, chunkSize=512)"
                                            + "Checksum verification succeeded on block file " + blockFile.GetAbsolutePath(
                                                ), RunCmd(new string[] { "verify", "-meta", metaFile.GetAbsolutePath(), "-block"
                                                                         , blockFile.GetAbsolutePath() }));
        }
Esempio n. 12
0
        public virtual void SetupMocks()
        {
            mockNN1 = SetupNNMock(0);
            mockNN2 = SetupNNMock(1);
            // Set up a mock DN with the bare-bones configuration
            // objects, etc.
            mockDn = Org.Mockito.Mockito.Mock <DataNode>();
            Org.Mockito.Mockito.DoReturn(true).When(mockDn).ShouldRun();
            Configuration conf      = new Configuration();
            FilePath      dnDataDir = new FilePath(new FilePath(TestBuildData, "dfs"), "data");

            conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dnDataDir.ToURI().ToString());
            Org.Mockito.Mockito.DoReturn(conf).When(mockDn).GetConf();
            Org.Mockito.Mockito.DoReturn(new DNConf(conf)).When(mockDn).GetDnConf();
            Org.Mockito.Mockito.DoReturn(DataNodeMetrics.Create(conf, "fake dn")).When(mockDn
                                                                                       ).GetMetrics();
            // Set up a simulated dataset with our fake BP
            mockFSDataset = Org.Mockito.Mockito.Spy(new SimulatedFSDataset(null, conf));
            mockFSDataset.AddBlockPool(FakeBpid, conf);
            // Wire the dataset to the DN.
            Org.Mockito.Mockito.DoReturn(mockFSDataset).When(mockDn).GetFSDataset();
        }
Esempio n. 13
0
 /// <exception cref="System.Exception"/>
 public virtual void TestDeleteBlockOnTransientStorage()
 {
     cluster = new MiniDFSCluster.Builder(Conf).StorageTypes(new StorageType[] { StorageType
                                                                                 .RamDisk, StorageType.Default }).NumDataNodes(1).Build();
     try
     {
         cluster.WaitActive();
         bpid = cluster.GetNamesystem().GetBlockPoolId();
         DataNode dataNode = cluster.GetDataNodes()[0];
         fds     = DataNodeTestUtils.GetFSDataset(cluster.GetDataNodes()[0]);
         client  = cluster.GetFileSystem().GetClient();
         scanner = new DirectoryScanner(dataNode, fds, Conf);
         scanner.SetRetainDiffs(true);
         FsDatasetTestUtil.StopLazyWriter(cluster.GetDataNodes()[0]);
         // Create a file file on RAM_DISK
         IList <LocatedBlock> blocks = CreateFile(GenericTestUtils.GetMethodName(), BlockLength
                                                  , true);
         // Ensure no difference between volumeMap and disk.
         Scan(1, 0, 0, 0, 0, 0);
         // Make a copy of the block on DEFAULT storage and ensure that it is
         // picked up by the scanner.
         DuplicateBlock(blocks[0].GetBlock().GetBlockId());
         Scan(2, 1, 0, 0, 0, 0, 1);
         // Ensure that the copy on RAM_DISK was deleted.
         VerifyStorageType(blocks[0].GetBlock().GetBlockId(), false);
         Scan(1, 0, 0, 0, 0, 0);
     }
     finally
     {
         if (scanner != null)
         {
             scanner.Shutdown();
             scanner = null;
         }
         cluster.Shutdown();
         cluster = null;
     }
 }
Esempio n. 14
0
        /// <summary>Checks a DataNode for correct reporting of failed volumes.</summary>
        /// <param name="dn">DataNode to check</param>
        /// <param name="expectedVolumeFailuresCounter">
        /// metric counter value for
        /// VolumeFailures.  The current implementation actually counts the number
        /// of failed disk checker cycles, which may be different from the length of
        /// expectedFailedVolumes if multiple disks fail in the same disk checker
        /// cycle
        /// </param>
        /// <param name="expectCapacityKnown">
        /// if true, then expect that the capacities of the
        /// volumes were known before the failures, and therefore the lost capacity
        /// can be reported
        /// </param>
        /// <param name="expectedFailedVolumes">expected locations of failed volumes</param>
        /// <exception cref="System.Exception">if there is any failure</exception>
        private void CheckFailuresAtDataNode(DataNode dn, long expectedVolumeFailuresCounter
                                             , bool expectCapacityKnown, params string[] expectedFailedVolumes)
        {
            MetricsAsserts.AssertCounter("VolumeFailures", expectedVolumeFailuresCounter, MetricsAsserts.GetMetrics
                                             (dn.GetMetrics().Name()));
            FsDatasetSpi <object> fsd = dn.GetFSDataset();

            NUnit.Framework.Assert.AreEqual(expectedFailedVolumes.Length, fsd.GetNumFailedVolumes
                                                ());
            Assert.AssertArrayEquals(expectedFailedVolumes, fsd.GetFailedStorageLocations());
            if (expectedFailedVolumes.Length > 0)
            {
                NUnit.Framework.Assert.IsTrue(fsd.GetLastVolumeFailureDate() > 0);
                long expectedCapacityLost = GetExpectedCapacityLost(expectCapacityKnown, expectedFailedVolumes
                                                                    .Length);
                NUnit.Framework.Assert.AreEqual(expectedCapacityLost, fsd.GetEstimatedCapacityLostTotal
                                                    ());
            }
            else
            {
                NUnit.Framework.Assert.AreEqual(0, fsd.GetLastVolumeFailureDate());
                NUnit.Framework.Assert.AreEqual(0, fsd.GetEstimatedCapacityLostTotal());
            }
        }
        /// <summary>
        /// Test that we can zero-copy read cached data even without disabling
        /// checksums.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestZeroCopyReadOfCachedData()
        {
            BlockReaderTestUtil.EnableShortCircuitShmTracing();
            BlockReaderTestUtil.EnableBlockReaderFactoryTracing();
            BlockReaderTestUtil.EnableHdfsCachingTracing();
            int  TestFileLength    = BlockSize;
            Path TestPath          = new Path("/a");
            int  RandomSeed        = 23453;
            HdfsConfiguration conf = InitZeroCopyTest();

            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, false);
            string Context = "testZeroCopyReadOfCachedData";

            conf.Set(DFSConfigKeys.DfsClientContext, Context);
            conf.SetLong(DFSConfigKeys.DfsDatanodeMaxLockedMemoryKey, DFSTestUtil.RoundUpToMultiple
                             (TestFileLength, (int)NativeIO.POSIX.GetCacheManipulator().GetOperatingSystemPageSize
                                 ()));
            MiniDFSCluster cluster = null;
            ByteBuffer     result  = null;
            ByteBuffer     result2 = null;

            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            cluster.WaitActive();
            FsDatasetSpi <object> fsd = cluster.GetDataNodes()[0].GetFSDataset();
            DistributedFileSystem fs  = cluster.GetFileSystem();

            DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, RandomSeed);
            DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
            byte[] original = DFSTestUtil.CalculateFileContentsFromSeed(RandomSeed, TestFileLength
                                                                        );
            // Prior to caching, the file can't be read via zero-copy
            FSDataInputStream fsIn = fs.Open(TestPath);

            try
            {
                result = fsIn.Read(null, TestFileLength / 2, EnumSet.NoneOf <ReadOption>());
                NUnit.Framework.Assert.Fail("expected UnsupportedOperationException");
            }
            catch (NotSupportedException)
            {
            }
            // expected
            // Cache the file
            fs.AddCachePool(new CachePoolInfo("pool1"));
            long directiveId = fs.AddCacheDirective(new CacheDirectiveInfo.Builder().SetPath(
                                                        TestPath).SetReplication((short)1).SetPool("pool1").Build());
            int numBlocks = (int)Math.Ceil((double)TestFileLength / BlockSize);

            DFSTestUtil.VerifyExpectedCacheUsage(DFSTestUtil.RoundUpToMultiple(TestFileLength
                                                                               , BlockSize), numBlocks, cluster.GetDataNodes()[0].GetFSDataset());
            try
            {
                result = fsIn.Read(null, TestFileLength, EnumSet.NoneOf <ReadOption>());
            }
            catch (NotSupportedException)
            {
                NUnit.Framework.Assert.Fail("expected to be able to read cached file via zero-copy"
                                            );
            }
            Assert.AssertArrayEquals(Arrays.CopyOfRange(original, 0, BlockSize), ByteBufferToArray
                                         (result));
            // Test that files opened after the cache operation has finished
            // still get the benefits of zero-copy (regression test for HDFS-6086)
            FSDataInputStream fsIn2 = fs.Open(TestPath);

            try
            {
                result2 = fsIn2.Read(null, TestFileLength, EnumSet.NoneOf <ReadOption>());
            }
            catch (NotSupportedException)
            {
                NUnit.Framework.Assert.Fail("expected to be able to read cached file via zero-copy"
                                            );
            }
            Assert.AssertArrayEquals(Arrays.CopyOfRange(original, 0, BlockSize), ByteBufferToArray
                                         (result2));
            fsIn2.ReleaseBuffer(result2);
            fsIn2.Close();
            // check that the replica is anchored
            ExtendedBlock     firstBlock = DFSTestUtil.GetFirstBlock(fs, TestPath);
            ShortCircuitCache cache      = ClientContext.Get(Context, new DFSClient.Conf(conf)).GetShortCircuitCache
                                               ();

            WaitForReplicaAnchorStatus(cache, firstBlock, true, true, 1);
            // Uncache the replica
            fs.RemoveCacheDirective(directiveId);
            WaitForReplicaAnchorStatus(cache, firstBlock, false, true, 1);
            fsIn.ReleaseBuffer(result);
            WaitForReplicaAnchorStatus(cache, firstBlock, false, false, 1);
            DFSTestUtil.VerifyExpectedCacheUsage(0, 0, fsd);
            fsIn.Close();
            fs.Close();
            cluster.Shutdown();
        }
Esempio n. 16
0
 public _Answer_599(FsDatasetSpi <FsVolumeSpi> data)
 {
     this.data = data;
 }
Esempio n. 17
0
        /// <summary>
        /// Test that DataStorage and BlockPoolSliceStorage remove the failed volume
        /// after failure.
        /// </summary>
        /// <exception cref="System.Exception"/>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="Sharpen.TimeoutException"/>
        public virtual void TestFailedVolumeBeingRemovedFromDataNode()
        {
            Path file1 = new Path("/test1");

            DFSTestUtil.CreateFile(fs, file1, 1024, (short)2, 1L);
            DFSTestUtil.WaitReplication(fs, file1, (short)2);
            FilePath dn0Vol1 = new FilePath(dataDir, "data" + (2 * 0 + 1));

            DataNodeTestUtils.InjectDataDirFailure(dn0Vol1);
            DataNode dn0 = cluster.GetDataNodes()[0];
            long     lastDiskErrorCheck = dn0.GetLastDiskErrorCheck();

            dn0.CheckDiskErrorAsync();
            // Wait checkDiskError thread finish to discover volume failure.
            while (dn0.GetLastDiskErrorCheck() == lastDiskErrorCheck)
            {
                Sharpen.Thread.Sleep(100);
            }
            // Verify dn0Vol1 has been completely removed from DN0.
            // 1. dn0Vol1 is removed from DataStorage.
            DataStorage storage = dn0.GetStorage();

            NUnit.Framework.Assert.AreEqual(1, storage.GetNumStorageDirs());
            for (int i = 0; i < storage.GetNumStorageDirs(); i++)
            {
                Storage.StorageDirectory sd = storage.GetStorageDir(i);
                NUnit.Framework.Assert.IsFalse(sd.GetRoot().GetAbsolutePath().StartsWith(dn0Vol1.
                                                                                         GetAbsolutePath()));
            }
            string bpid = cluster.GetNamesystem().GetBlockPoolId();
            BlockPoolSliceStorage bpsStorage = storage.GetBPStorage(bpid);

            NUnit.Framework.Assert.AreEqual(1, bpsStorage.GetNumStorageDirs());
            for (int i_1 = 0; i_1 < bpsStorage.GetNumStorageDirs(); i_1++)
            {
                Storage.StorageDirectory sd = bpsStorage.GetStorageDir(i_1);
                NUnit.Framework.Assert.IsFalse(sd.GetRoot().GetAbsolutePath().StartsWith(dn0Vol1.
                                                                                         GetAbsolutePath()));
            }
            // 2. dn0Vol1 is removed from FsDataset
            FsDatasetSpi <FsVolumeSpi> data = dn0.GetFSDataset();

            foreach (FsVolumeSpi volume in data.GetVolumes())
            {
                Assert.AssertNotEquals(new FilePath(volume.GetBasePath()).GetAbsoluteFile(), dn0Vol1
                                       .GetAbsoluteFile());
            }
            // 3. all blocks on dn0Vol1 have been removed.
            foreach (ReplicaInfo replica in FsDatasetTestUtil.GetReplicas(data, bpid))
            {
                NUnit.Framework.Assert.IsNotNull(replica.GetVolume());
                Assert.AssertNotEquals(new FilePath(replica.GetVolume().GetBasePath()).GetAbsoluteFile
                                           (), dn0Vol1.GetAbsoluteFile());
            }
            // 4. dn0Vol1 is not in DN0's configuration and dataDirs anymore.
            string[] dataDirStrs = dn0.GetConf().Get(DFSConfigKeys.DfsDatanodeDataDirKey).Split
                                       (",");
            NUnit.Framework.Assert.AreEqual(1, dataDirStrs.Length);
            NUnit.Framework.Assert.IsFalse(dataDirStrs[0].Contains(dn0Vol1.GetAbsolutePath())
                                           );
        }
Esempio n. 18
0
 public static FilePath GetFile <_T0>(FsDatasetSpi <_T0> fsd, string bpid, long bid)
     where _T0 : FsVolumeSpi
 {
     return(((FsDatasetImpl)fsd).GetFile(bpid, bid, false));
 }
Esempio n. 19
0
 /// <exception cref="System.Exception"/>
 public virtual void RunTest(int parallelism)
 {
     cluster = new MiniDFSCluster.Builder(Conf).Build();
     try
     {
         cluster.WaitActive();
         bpid   = cluster.GetNamesystem().GetBlockPoolId();
         fds    = DataNodeTestUtils.GetFSDataset(cluster.GetDataNodes()[0]);
         client = cluster.GetFileSystem().GetClient();
         Conf.SetInt(DFSConfigKeys.DfsDatanodeDirectoryscanThreadsKey, parallelism);
         DataNode dataNode = cluster.GetDataNodes()[0];
         scanner = new DirectoryScanner(dataNode, fds, Conf);
         scanner.SetRetainDiffs(true);
         // Add files with 100 blocks
         CreateFile(GenericTestUtils.GetMethodName(), BlockLength * 100, false);
         long totalBlocks = 100;
         // Test1: No difference between volumeMap and disk
         Scan(100, 0, 0, 0, 0, 0);
         // Test2: block metafile is missing
         long blockId = DeleteMetaFile();
         Scan(totalBlocks, 1, 1, 0, 0, 1);
         VerifyGenStamp(blockId, GenerationStamp.GrandfatherGenerationStamp);
         Scan(totalBlocks, 0, 0, 0, 0, 0);
         // Test3: block file is missing
         blockId = DeleteBlockFile();
         Scan(totalBlocks, 1, 0, 1, 0, 0);
         totalBlocks--;
         VerifyDeletion(blockId);
         Scan(totalBlocks, 0, 0, 0, 0, 0);
         // Test4: A block file exists for which there is no metafile and
         // a block in memory
         blockId = CreateBlockFile();
         totalBlocks++;
         Scan(totalBlocks, 1, 1, 0, 1, 0);
         VerifyAddition(blockId, GenerationStamp.GrandfatherGenerationStamp, 0);
         Scan(totalBlocks, 0, 0, 0, 0, 0);
         // Test5: A metafile exists for which there is no block file and
         // a block in memory
         blockId = CreateMetaFile();
         Scan(totalBlocks + 1, 1, 0, 1, 1, 0);
         FilePath metafile = new FilePath(GetMetaFile(blockId));
         NUnit.Framework.Assert.IsTrue(!metafile.Exists());
         Scan(totalBlocks, 0, 0, 0, 0, 0);
         // Test6: A block file and metafile exists for which there is no block in
         // memory
         blockId = CreateBlockMetaFile();
         totalBlocks++;
         Scan(totalBlocks, 1, 0, 0, 1, 0);
         VerifyAddition(blockId, DefaultGenStamp, 0);
         Scan(totalBlocks, 0, 0, 0, 0, 0);
         // Test7: Delete bunch of metafiles
         for (int i = 0; i < 10; i++)
         {
             blockId = DeleteMetaFile();
         }
         Scan(totalBlocks, 10, 10, 0, 0, 10);
         Scan(totalBlocks, 0, 0, 0, 0, 0);
         // Test8: Delete bunch of block files
         for (int i_1 = 0; i_1 < 10; i_1++)
         {
             blockId = DeleteBlockFile();
         }
         Scan(totalBlocks, 10, 0, 10, 0, 0);
         totalBlocks -= 10;
         Scan(totalBlocks, 0, 0, 0, 0, 0);
         // Test9: create a bunch of blocks files
         for (int i_2 = 0; i_2 < 10; i_2++)
         {
             blockId = CreateBlockFile();
         }
         totalBlocks += 10;
         Scan(totalBlocks, 10, 10, 0, 10, 0);
         Scan(totalBlocks, 0, 0, 0, 0, 0);
         // Test10: create a bunch of metafiles
         for (int i_3 = 0; i_3 < 10; i_3++)
         {
             blockId = CreateMetaFile();
         }
         Scan(totalBlocks + 10, 10, 0, 10, 10, 0);
         Scan(totalBlocks, 0, 0, 0, 0, 0);
         // Test11: create a bunch block files and meta files
         for (int i_4 = 0; i_4 < 10; i_4++)
         {
             blockId = CreateBlockMetaFile();
         }
         totalBlocks += 10;
         Scan(totalBlocks, 10, 0, 0, 10, 0);
         Scan(totalBlocks, 0, 0, 0, 0, 0);
         // Test12: truncate block files to test block length mismatch
         for (int i_5 = 0; i_5 < 10; i_5++)
         {
             TruncateBlockFile();
         }
         Scan(totalBlocks, 10, 0, 0, 0, 10);
         Scan(totalBlocks, 0, 0, 0, 0, 0);
         // Test13: all the conditions combined
         CreateMetaFile();
         CreateBlockFile();
         CreateBlockMetaFile();
         DeleteMetaFile();
         DeleteBlockFile();
         TruncateBlockFile();
         Scan(totalBlocks + 3, 6, 2, 2, 3, 2);
         Scan(totalBlocks + 1, 0, 0, 0, 0, 0);
         // Test14: validate clean shutdown of DirectoryScanner
         ////assertTrue(scanner.getRunStatus()); //assumes "real" FSDataset, not sim
         scanner.Shutdown();
         NUnit.Framework.Assert.IsFalse(scanner.GetRunStatus());
     }
     finally
     {
         if (scanner != null)
         {
             scanner.Shutdown();
             scanner = null;
         }
         cluster.Shutdown();
     }
 }
Esempio n. 20
0
 /// <exception cref="System.IO.IOException"/>
 public static FilePath GetBlockFile <_T0>(FsDatasetSpi <_T0> fsd, string bpid, Block
                                           b)
     where _T0 : FsVolumeSpi
 {
     return(((FsDatasetImpl)fsd).GetBlockFile(bpid, b.GetBlockId()));
 }
        public virtual void TestUpdateReplicaUnderRecovery()
        {
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
                cluster.WaitActive();
                string bpid = cluster.GetNamesystem().GetBlockPoolId();
                //create a file
                DistributedFileSystem dfs = cluster.GetFileSystem();
                string filestr            = "/foo";
                Path   filepath           = new Path(filestr);
                DFSTestUtil.CreateFile(dfs, filepath, 1024L, (short)3, 0L);
                //get block info
                LocatedBlock locatedblock = GetLastLocatedBlock(DFSClientAdapter.GetDFSClient(dfs
                                                                                              ).GetNamenode(), filestr);
                DatanodeInfo[] datanodeinfo = locatedblock.GetLocations();
                NUnit.Framework.Assert.IsTrue(datanodeinfo.Length > 0);
                //get DataNode and FSDataset objects
                DataNode datanode = cluster.GetDataNode(datanodeinfo[0].GetIpcPort());
                NUnit.Framework.Assert.IsTrue(datanode != null);
                //initReplicaRecovery
                ExtendedBlock         b          = locatedblock.GetBlock();
                long                  recoveryid = b.GetGenerationStamp() + 1;
                long                  newlength  = b.GetNumBytes() - 1;
                FsDatasetSpi <object> fsdataset  = DataNodeTestUtils.GetFSDataset(datanode);
                ReplicaRecoveryInfo   rri        = fsdataset.InitReplicaRecovery(new BlockRecoveryCommand.RecoveringBlock
                                                                                     (b, null, recoveryid));
                //check replica
                ReplicaInfo replica = FsDatasetTestUtil.FetchReplicaInfo(fsdataset, bpid, b.GetBlockId
                                                                             ());
                NUnit.Framework.Assert.AreEqual(HdfsServerConstants.ReplicaState.Rur, replica.GetState
                                                    ());
                //check meta data before update
                FsDatasetImpl.CheckReplicaFiles(replica);
                {
                    //case "THIS IS NOT SUPPOSED TO HAPPEN"
                    //with (block length) != (stored replica's on disk length).
                    //create a block with same id and gs but different length.
                    ExtendedBlock tmp = new ExtendedBlock(b.GetBlockPoolId(), rri.GetBlockId(), rri.GetNumBytes
                                                              () - 1, rri.GetGenerationStamp());
                    try
                    {
                        //update should fail
                        fsdataset.UpdateReplicaUnderRecovery(tmp, recoveryid, tmp.GetBlockId(), newlength
                                                             );
                        NUnit.Framework.Assert.Fail();
                    }
                    catch (IOException ioe)
                    {
                        System.Console.Out.WriteLine("GOOD: getting " + ioe);
                    }
                }
                //update
                string storageID = fsdataset.UpdateReplicaUnderRecovery(new ExtendedBlock(b.GetBlockPoolId
                                                                                              (), rri), recoveryid, rri.GetBlockId(), newlength);
                NUnit.Framework.Assert.IsTrue(storageID != null);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Esempio n. 22
0
 public static ReplicaInfo FetchReplicaInfo <_T0>(FsDatasetSpi <_T0> fsd, string bpid
                                                  , long blockId)
     where _T0 : FsVolumeSpi
 {
     return(((FsDatasetImpl)fsd).FetchReplicaInfo(bpid, blockId));
 }
Esempio n. 23
0
 public static ICollection <ReplicaInfo> GetReplicas <_T0>(FsDatasetSpi <_T0> fsd, string
                                                           bpid)
     where _T0 : FsVolumeSpi
 {
     return(((FsDatasetImpl)fsd).volumeMap.Replicas(bpid));
 }
Esempio n. 24
0
 public static long GetPendingAsyncDeletions <_T0>(FsDatasetSpi <_T0> fsd)
     where _T0 : FsVolumeSpi
 {
     return(((FsDatasetImpl)fsd).asyncDiskService.CountPendingDeletions());
 }