Exemplo n.º 1
0
        /// <exception cref="System.IO.IOException"/>
        private void TestRbwReplicas(MiniDFSCluster cluster, bool isCorrupt)
        {
            FSDataOutputStream @out = null;
            FileSystem         fs   = cluster.GetFileSystem();
            Path src = new Path("/test.txt");

            try
            {
                int fileLen = 515;
                // create some rbw replicas on disk
                byte[] writeBuf = new byte[fileLen];
                new Random().NextBytes(writeBuf);
                @out = fs.Create(src);
                @out.Write(writeBuf);
                @out.Hflush();
                DataNode dn = cluster.GetDataNodes()[0];
                foreach (FsVolumeSpi v in Dataset(dn).GetVolumes())
                {
                    FsVolumeImpl volume     = (FsVolumeImpl)v;
                    FilePath     currentDir = volume.GetCurrentDir().GetParentFile().GetParentFile();
                    FilePath     rbwDir     = new FilePath(currentDir, "rbw");
                    foreach (FilePath file in rbwDir.ListFiles())
                    {
                        if (isCorrupt && Block.IsBlockFilename(file))
                        {
                            new RandomAccessFile(file, "rw").SetLength(fileLen - 1);
                        }
                    }
                }
                // corrupt
                cluster.RestartDataNodes();
                cluster.WaitActive();
                dn = cluster.GetDataNodes()[0];
                // check volumeMap: one rwr replica
                string     bpid     = cluster.GetNamesystem().GetBlockPoolId();
                ReplicaMap replicas = Dataset(dn).volumeMap;
                NUnit.Framework.Assert.AreEqual(1, replicas.Size(bpid));
                ReplicaInfo replica = replicas.Replicas(bpid).GetEnumerator().Next();
                NUnit.Framework.Assert.AreEqual(HdfsServerConstants.ReplicaState.Rwr, replica.GetState
                                                    ());
                if (isCorrupt)
                {
                    NUnit.Framework.Assert.AreEqual((fileLen - 1) / 512 * 512, replica.GetNumBytes());
                }
                else
                {
                    NUnit.Framework.Assert.AreEqual(fileLen, replica.GetNumBytes());
                }
                Dataset(dn).Invalidate(bpid, new Block[] { replica });
            }
            finally
            {
                IOUtils.CloseStream(@out);
                if (fs.Exists(src))
                {
                    fs.Delete(src, false);
                }
                fs.Close();
            }
        }
Exemplo n.º 2
0
        public virtual void TestParseChangedVolumes()
        {
            StartDFSCluster(1, 1);
            DataNode                dn           = cluster.GetDataNodes()[0];
            Configuration           conf         = dn.GetConf();
            string                  oldPaths     = conf.Get(DFSConfigKeys.DfsDatanodeDataDirKey);
            IList <StorageLocation> oldLocations = new AList <StorageLocation>();

            foreach (string path in oldPaths.Split(","))
            {
                oldLocations.AddItem(StorageLocation.Parse(path));
            }
            NUnit.Framework.Assert.IsFalse(oldLocations.IsEmpty());
            string newPaths = oldLocations[0].GetFile().GetAbsolutePath() + ",/foo/path1,/foo/path2";

            DataNode.ChangedVolumes changedVolumes = dn.ParseChangedVolumes(newPaths);
            IList <StorageLocation> newVolumes     = changedVolumes.newLocations;

            NUnit.Framework.Assert.AreEqual(2, newVolumes.Count);
            NUnit.Framework.Assert.AreEqual(new FilePath("/foo/path1").GetAbsolutePath(), newVolumes
                                            [0].GetFile().GetAbsolutePath());
            NUnit.Framework.Assert.AreEqual(new FilePath("/foo/path2").GetAbsolutePath(), newVolumes
                                            [1].GetFile().GetAbsolutePath());
            IList <StorageLocation> removedVolumes = changedVolumes.deactivateLocations;

            NUnit.Framework.Assert.AreEqual(1, removedVolumes.Count);
            NUnit.Framework.Assert.AreEqual(oldLocations[1].GetFile(), removedVolumes[0].GetFile
                                                ());
            NUnit.Framework.Assert.AreEqual(1, changedVolumes.unchangedLocations.Count);
            NUnit.Framework.Assert.AreEqual(oldLocations[0].GetFile(), changedVolumes.unchangedLocations
                                            [0].GetFile());
        }
Exemplo n.º 3
0
 /// <param name="blockSize"/>
 /// <param name="perVolumeCapacity">
 /// limit the capacity of each volume to the given
 /// value. If negative, then don't limit.
 /// </param>
 /// <exception cref="System.IO.IOException"/>
 private void StartCluster(int blockSize, int numDatanodes, long perVolumeCapacity
                           )
 {
     InitConfig(blockSize);
     cluster = new MiniDFSCluster.Builder(conf).StoragesPerDatanode(StoragesPerDatanode
                                                                    ).NumDataNodes(numDatanodes).Build();
     fs     = cluster.GetFileSystem();
     client = fs.GetClient();
     cluster.WaitActive();
     if (perVolumeCapacity >= 0)
     {
         foreach (DataNode dn in cluster.GetDataNodes())
         {
             foreach (FsVolumeSpi volume in dn.GetFSDataset().GetVolumes())
             {
                 ((FsVolumeImpl)volume).SetCapacityForTesting(perVolumeCapacity);
             }
         }
     }
     if (numDatanodes == 1)
     {
         IList <FsVolumeSpi> volumes = cluster.GetDataNodes()[0].GetFSDataset().GetVolumes(
             );
         Assert.AssertThat(volumes.Count, IS.Is(1));
         singletonVolume = ((FsVolumeImpl)volumes[0]);
     }
 }
Exemplo n.º 4
0
 public virtual void TestStaleNodes()
 {
     // Set two datanodes as stale
     for (int i = 0; i < 2; i++)
     {
         DataNode dn = cluster.GetDataNodes()[i];
         DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, true);
         long staleInterval = Conf.GetLong(DFSConfigKeys.DfsNamenodeStaleDatanodeIntervalKey
                                           , DFSConfigKeys.DfsNamenodeStaleDatanodeIntervalDefault);
         DatanodeDescriptor dnDes = cluster.GetNameNode().GetNamesystem().GetBlockManager(
             ).GetDatanodeManager().GetDatanode(dn.GetDatanodeId());
         DFSTestUtil.ResetLastUpdatesWithOffset(dnDes, -(staleInterval + 1));
     }
     // Let HeartbeatManager to check heartbeat
     BlockManagerTestUtil.CheckHeartbeat(cluster.GetNameNode().GetNamesystem().GetBlockManager
                                             ());
     MetricsAsserts.AssertGauge("StaleDataNodes", 2, MetricsAsserts.GetMetrics(NsMetrics
                                                                               ));
     // Reset stale datanodes
     for (int i_1 = 0; i_1 < 2; i_1++)
     {
         DataNode dn = cluster.GetDataNodes()[i_1];
         DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, false);
         DatanodeDescriptor dnDes = cluster.GetNameNode().GetNamesystem().GetBlockManager(
             ).GetDatanodeManager().GetDatanode(dn.GetDatanodeId());
         DFSTestUtil.ResetLastUpdatesWithOffset(dnDes, 0);
     }
     // Let HeartbeatManager to refresh
     BlockManagerTestUtil.CheckHeartbeat(cluster.GetNameNode().GetNamesystem().GetBlockManager
                                             ());
     MetricsAsserts.AssertGauge("StaleDataNodes", 0, MetricsAsserts.GetMetrics(NsMetrics
                                                                               ));
 }
Exemplo n.º 5
0
        public virtual void TestDeadDatanode()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 500);
            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1L);
            cluster = new MiniDFSCluster.Builder(conf).Build();
            cluster.WaitActive();
            string poolId = cluster.GetNamesystem().GetBlockPoolId();
            // wait for datanode to be marked live
            DataNode             dn  = cluster.GetDataNodes()[0];
            DatanodeRegistration reg = DataNodeTestUtils.GetDNRegistrationForBP(cluster.GetDataNodes
                                                                                    ()[0], poolId);

            DFSTestUtil.WaitForDatanodeState(cluster, reg.GetDatanodeUuid(), true, 20000);
            // Shutdown and wait for datanode to be marked dead
            dn.Shutdown();
            DFSTestUtil.WaitForDatanodeState(cluster, reg.GetDatanodeUuid(), false, 20000);
            DatanodeProtocol dnp = cluster.GetNameNodeRpc();

            ReceivedDeletedBlockInfo[] blocks = new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo
                                                                                     (new Block(0), ReceivedDeletedBlockInfo.BlockStatus.ReceivedBlock, null) };
            StorageReceivedDeletedBlocks[] storageBlocks = new StorageReceivedDeletedBlocks[]
            { new StorageReceivedDeletedBlocks(reg.GetDatanodeUuid(), blocks) };
            // Ensure blockReceived call from dead datanode is rejected with IOException
            try
            {
                dnp.BlockReceivedAndDeleted(reg, poolId, storageBlocks);
                NUnit.Framework.Assert.Fail("Expected IOException is not thrown");
            }
            catch (IOException)
            {
            }
            // Expected
            // Ensure blockReport from dead datanode is rejected with IOException
            StorageBlockReport[] report = new StorageBlockReport[] { new StorageBlockReport(new
                                                                                            DatanodeStorage(reg.GetDatanodeUuid()), BlockListAsLongs.Empty) };
            try
            {
                dnp.BlockReport(reg, poolId, report, new BlockReportContext(1, 0, Runtime.NanoTime
                                                                                ()));
                NUnit.Framework.Assert.Fail("Expected IOException is not thrown");
            }
            catch (IOException)
            {
            }
            // Expected
            // Ensure heartbeat from dead datanode is rejected with a command
            // that asks datanode to register again
            StorageReport[] rep = new StorageReport[] { new StorageReport(new DatanodeStorage
                                                                              (reg.GetDatanodeUuid()), false, 0, 0, 0, 0) };
            DatanodeCommand[] cmd = dnp.SendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null).GetCommands
                                        ();
            NUnit.Framework.Assert.AreEqual(1, cmd.Length);
            NUnit.Framework.Assert.AreEqual(cmd[0].GetAction(), RegisterCommand.Register.GetAction
                                                ());
        }
Exemplo n.º 6
0
        public virtual void TestBPServiceExit()
        {
            DataNode dn = cluster.GetDataNodes()[0];

            StopBPServiceThreads(1, dn);
            NUnit.Framework.Assert.IsTrue("DataNode should not exit", dn.IsDatanodeUp());
            StopBPServiceThreads(2, dn);
            NUnit.Framework.Assert.IsFalse("DataNode should exit", dn.IsDatanodeUp());
        }
Exemplo n.º 7
0
        /// <exception cref="System.Exception"/>
        public virtual void TestDatanodeReRegistration()
        {
            // Create a test file
            DistributedFileSystem dfs = cluster.GetFileSystem();
            Path path = new Path("/testRR");

            // Create a file and shutdown the DNs, which populates InvalidateBlocks
            DFSTestUtil.CreateFile(dfs, path, dfs.GetDefaultBlockSize(), (short)NumOfDatanodes
                                   , unchecked ((int)(0xED0ED0)));
            foreach (DataNode dn in cluster.GetDataNodes())
            {
                dn.Shutdown();
            }
            dfs.Delete(path, false);
            namesystem.WriteLock();
            InvalidateBlocks invalidateBlocks;
            int expected = NumOfDatanodes;

            try
            {
                invalidateBlocks = (InvalidateBlocks)Whitebox.GetInternalState(cluster.GetNamesystem
                                                                                   ().GetBlockManager(), "invalidateBlocks");
                NUnit.Framework.Assert.AreEqual("Expected invalidate blocks to be the number of DNs"
                                                , (long)expected, invalidateBlocks.NumBlocks());
            }
            finally
            {
                namesystem.WriteUnlock();
            }
            // Re-register each DN and see that it wipes the invalidation work
            foreach (DataNode dn_1 in cluster.GetDataNodes())
            {
                DatanodeID           did = dn_1.GetDatanodeId();
                DatanodeRegistration reg = new DatanodeRegistration(new DatanodeID(UUID.RandomUUID
                                                                                       ().ToString(), did), new StorageInfo(HdfsServerConstants.NodeType.DataNode), new
                                                                    ExportedBlockKeys(), VersionInfo.GetVersion());
                namesystem.WriteLock();
                try
                {
                    bm.GetDatanodeManager().RegisterDatanode(reg);
                    expected--;
                    NUnit.Framework.Assert.AreEqual("Expected number of invalidate blocks to decrease"
                                                    , (long)expected, invalidateBlocks.NumBlocks());
                }
                finally
                {
                    namesystem.WriteUnlock();
                }
            }
        }
Exemplo n.º 8
0
        public virtual void TestStorageReportHasStorageTypeAndState()
        {
            // Make sure we are not testing with the default type, that would not
            // be a very good test.
            NUnit.Framework.Assert.AreNotSame(storageType, StorageType.Default);
            NameNode nn = cluster.GetNameNode();
            DataNode dn = cluster.GetDataNodes()[0];
            // Insert a spy object for the NN RPC.
            DatanodeProtocolClientSideTranslatorPB nnSpy = DataNodeTestUtils.SpyOnBposToNN(dn
                                                                                           , nn);

            // Trigger a heartbeat so there is an interaction with the spy
            // object.
            DataNodeTestUtils.TriggerHeartbeat(dn);
            // Verify that the callback passed in the expected parameters.
            ArgumentCaptor <StorageReport[]> captor = ArgumentCaptor.ForClass <StorageReport[]>
                                                          ();

            Org.Mockito.Mockito.Verify(nnSpy).SendHeartbeat(Matchers.Any <DatanodeRegistration
                                                                          >(), captor.Capture(), Matchers.AnyLong(), Matchers.AnyLong(), Matchers.AnyInt()
                                                            , Matchers.AnyInt(), Matchers.AnyInt(), Org.Mockito.Mockito.Any <VolumeFailureSummary
                                                                                                                             >());
            StorageReport[] reports = captor.GetValue();
            foreach (StorageReport report in reports)
            {
                Assert.AssertThat(report.GetStorage().GetStorageType(), IS.Is(storageType));
                Assert.AssertThat(report.GetStorage().GetState(), IS.Is(DatanodeStorage.State.Normal
                                                                        ));
            }
        }
Exemplo n.º 9
0
        /// <summary>Test that a full block report is sent after hot swapping volumes</summary>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="Org.Apache.Hadoop.Conf.ReconfigurationException"/>
        public virtual void TestFullBlockReportAfterRemovingVolumes()
        {
            Configuration conf = new Configuration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
            // Similar to TestTriggerBlockReport, set a really long value for
            // dfs.heartbeat.interval, so that incremental block reports and heartbeats
            // won't be sent during this test unless they're triggered
            // manually.
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 10800000L);
            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1080L);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            cluster.WaitActive();
            DataNode dn = cluster.GetDataNodes()[0];
            DatanodeProtocolClientSideTranslatorPB spy = DataNodeTestUtils.SpyOnBposToNN(dn,
                                                                                         cluster.GetNameNode());
            // Remove a data dir from datanode
            FilePath dataDirToKeep = new FilePath(cluster.GetDataDirectory(), "data1");

            dn.ReconfigurePropertyImpl(DFSConfigKeys.DfsDatanodeDataDirKey, dataDirToKeep.ToString
                                           ());
            // We should get 1 full report
            Org.Mockito.Mockito.Verify(spy, Org.Mockito.Mockito.Timeout(60000).Times(1)).BlockReport
                (Matchers.Any <DatanodeRegistration>(), Matchers.AnyString(), Matchers.Any <StorageBlockReport
                                                                                            []>(), Matchers.Any <BlockReportContext>());
        }
        /// <summary>
        /// Test for the case where the client beings to read a long block, but doesn't
        /// read bytes off the stream quickly.
        /// </summary>
        /// <remarks>
        /// Test for the case where the client beings to read a long block, but doesn't
        /// read bytes off the stream quickly. The datanode should time out sending the
        /// chunks and the transceiver should die, even if it has a long keepalive.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestSlowReader()
        {
            // Set a client socket cache expiry time much longer than
            // the datanode-side expiration time.
            long          ClientExpiryMs = 600000L;
            Configuration clientConf     = new Configuration(conf);

            clientConf.SetLong(DFSConfigKeys.DfsClientSocketCacheExpiryMsecKey, ClientExpiryMs
                               );
            clientConf.Set(DFSConfigKeys.DfsClientContext, "testSlowReader");
            DistributedFileSystem fs = (DistributedFileSystem)FileSystem.Get(cluster.GetURI()
                                                                             , clientConf);

            // Restart the DN with a shorter write timeout.
            MiniDFSCluster.DataNodeProperties props = cluster.StopDataNode(0);
            props.conf.SetInt(DFSConfigKeys.DfsDatanodeSocketWriteTimeoutKey, WriteTimeout);
            props.conf.SetInt(DFSConfigKeys.DfsDatanodeSocketReuseKeepaliveKey, 120000);
            NUnit.Framework.Assert.IsTrue(cluster.RestartDataNode(props, true));
            dn = cluster.GetDataNodes()[0];
            // Wait for heartbeats to avoid a startup race where we
            // try to write the block while the DN is still starting.
            cluster.TriggerHeartbeats();
            DFSTestUtil.CreateFile(fs, TestFile, 1024 * 1024 * 8L, (short)1, 0L);
            FSDataInputStream stm = fs.Open(TestFile);

            stm.Read();
            AssertXceiverCount(1);
            GenericTestUtils.WaitFor(new _Supplier_193(this), 500, 50000);
            // DN should time out in sendChunks, and this should force
            // the xceiver to exit.
            IOUtils.CloseStream(stm);
        }
Exemplo n.º 11
0
        private void CheckSyncMetric(MiniDFSCluster cluster, int dn, long value)
        {
            DataNode datanode = cluster.GetDataNodes()[dn];

            MetricsAsserts.AssertCounter("FsyncCount", value, MetricsAsserts.GetMetrics(datanode
                                                                                        .GetMetrics().Name()));
        }
Exemplo n.º 12
0
 /// <exception cref="System.Exception"/>
 internal TestContext(Configuration conf, int numNameServices)
 {
     this.numNameServices = numNameServices;
     MiniDFSCluster.Builder bld = new MiniDFSCluster.Builder(conf).NumDataNodes(1).StoragesPerDatanode
                                      (1);
     if (numNameServices > 1)
     {
         bld.NnTopology(MiniDFSNNTopology.SimpleFederatedTopology(numNameServices));
     }
     cluster = bld.Build();
     cluster.WaitActive();
     dfs = new DistributedFileSystem[numNameServices];
     for (int i = 0; i < numNameServices; i++)
     {
         dfs[i] = cluster.GetFileSystem(i);
     }
     bpids = new string[numNameServices];
     for (int i_1 = 0; i_1 < numNameServices; i_1++)
     {
         bpids[i_1] = cluster.GetNamesystem(i_1).GetBlockPoolId();
     }
     datanode     = cluster.GetDataNodes()[0];
     blockScanner = datanode.GetBlockScanner();
     for (int i_2 = 0; i_2 < numNameServices; i_2++)
     {
         dfs[i_2].Mkdirs(new Path("/test"));
     }
     data    = datanode.GetFSDataset();
     volumes = data.GetVolumes();
 }
Exemplo n.º 13
0
 /// <exception cref="System.Exception"/>
 private void RollbackRollingUpgrade()
 {
     // Shutdown datanodes and namenodes
     // Restart the namenode with rolling upgrade rollback
     Log.Info("Starting rollback of the rolling upgrade");
     MiniDFSCluster.DataNodeProperties dnprop = cluster.StopDataNode(0);
     dnprop.SetDnArgs("-rollback");
     cluster.ShutdownNameNodes();
     cluster.RestartNameNode("-rollingupgrade", "rollback");
     cluster.RestartDataNode(dnprop);
     cluster.WaitActive();
     nn  = cluster.GetNameNode(0);
     dn0 = cluster.GetDataNodes()[0];
     TriggerHeartBeats();
     Log.Info("The cluster is active after rollback");
 }
Exemplo n.º 14
0
        public virtual void TestQuotaUpdatedWhenBlockAbandoned()
        {
            // Setting diskspace quota to 3MB
            fs.SetQuota(new Path("/"), HdfsConstants.QuotaDontSet, 3 * 1024 * 1024);
            // Start writing a file with 2 replicas to ensure each datanode has one.
            // Block Size is 1MB.
            string             src  = FileNamePrefix + "test_quota1";
            FSDataOutputStream fout = fs.Create(new Path(src), true, 4096, (short)2, 1024 * 1024
                                                );

            for (int i = 0; i < 1024; i++)
            {
                fout.WriteByte(123);
            }
            // Shutdown one datanode, causing the block abandonment.
            cluster.GetDataNodes()[0].Shutdown();
            // Close the file, new block will be allocated with 2MB pending size.
            try
            {
                fout.Close();
            }
            catch (QuotaExceededException)
            {
                NUnit.Framework.Assert.Fail("Unexpected quota exception when closing fout");
            }
        }
 public virtual void Setup()
 {
     conf.SetInt(DFSConfigKeys.DfsDatanodeSocketReuseKeepaliveKey, KeepaliveTimeout);
     conf.SetInt(DFSConfigKeys.DfsClientMaxBlockAcquireFailuresKey, 0);
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
     dn      = cluster.GetDataNodes()[0];
 }
Exemplo n.º 16
0
        public virtual void Pipeline_01()
        {
            string MethodName = GenericTestUtils.GetMethodName();

            if (Log.IsDebugEnabled())
            {
                Log.Debug("Running " + MethodName);
            }
            Path filePath = new Path("/" + MethodName + ".dat");

            DFSTestUtil.CreateFile(fs, filePath, FileSize, ReplFactor, rand.NextLong());
            if (Log.IsDebugEnabled())
            {
                Log.Debug("Invoking append but doing nothing otherwise...");
            }
            FSDataOutputStream ofs = fs.Append(filePath);

            ofs.WriteBytes("Some more stuff to write");
            ((DFSOutputStream)ofs.GetWrappedStream()).Hflush();
            IList <LocatedBlock> lb = cluster.GetNameNodeRpc().GetBlockLocations(filePath.ToString
                                                                                     (), FileSize - 1, FileSize).GetLocatedBlocks();
            string bpid = cluster.GetNamesystem().GetBlockPoolId();

            foreach (DataNode dn in cluster.GetDataNodes())
            {
                Replica r = DataNodeTestUtils.FetchReplicaInfo(dn, bpid, lb[0].GetBlock().GetBlockId
                                                                   ());
                NUnit.Framework.Assert.IsTrue("Replica on DN " + dn + " shouldn't be null", r !=
                                              null);
                NUnit.Framework.Assert.AreEqual("Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()"
                                                , HdfsServerConstants.ReplicaState.Rbw, r.GetState());
            }
            ofs.Close();
        }
        public virtual void Setup()
        {
            conf = new HdfsConfiguration();
            SimulatedFSDataset.SetFactory(conf);
            Configuration[] overlays = new Configuration[NumDatanodes];
            for (int i = 0; i < overlays.Length; i++)
            {
                overlays[i] = new Configuration();
                if (i == RoNodeIndex)
                {
                    overlays[i].SetEnum(SimulatedFSDataset.ConfigPropertyState, i == RoNodeIndex ? DatanodeStorage.State
                                        .ReadOnlyShared : DatanodeStorage.State.Normal);
                }
            }
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDatanodes).DataNodeConfOverlays
                          (overlays).Build();
            fs              = cluster.GetFileSystem();
            blockManager    = cluster.GetNameNode().GetNamesystem().GetBlockManager();
            datanodeManager = blockManager.GetDatanodeManager();
            client          = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()), cluster
                                            .GetConfiguration(0));
            for (int i_1 = 0; i_1 < NumDatanodes; i_1++)
            {
                DataNode dataNode = cluster.GetDataNodes()[i_1];
                ValidateStorageState(BlockManagerTestUtil.GetStorageReportsForDatanode(datanodeManager
                                                                                       .GetDatanode(dataNode.GetDatanodeId())), i_1 == RoNodeIndex ? DatanodeStorage.State
                                     .ReadOnlyShared : DatanodeStorage.State.Normal);
            }
            // Create a 1 block file
            DFSTestUtil.CreateFile(fs, Path, BlockSize, BlockSize, BlockSize, (short)1, seed);
            LocatedBlock locatedBlock = GetLocatedBlock();

            extendedBlock = locatedBlock.GetBlock();
            block         = extendedBlock.GetLocalBlock();
            Assert.AssertThat(locatedBlock.GetLocations().Length, CoreMatchers.Is(1));
            normalDataNode   = locatedBlock.GetLocations()[0];
            readOnlyDataNode = datanodeManager.GetDatanode(cluster.GetDataNodes()[RoNodeIndex
                                                           ].GetDatanodeId());
            Assert.AssertThat(normalDataNode, CoreMatchers.Is(CoreMatchers.Not(readOnlyDataNode
                                                                               )));
            ValidateNumberReplicas(1);
            // Inject the block into the datanode with READ_ONLY_SHARED storage
            cluster.InjectBlocks(0, RoNodeIndex, Collections.Singleton(block));
            // There should now be 2 *locations* for the block
            // Must wait until the NameNode has processed the block report for the injected blocks
            WaitForLocations(2);
        }
        public virtual void TestPendingDeleteUnknownBlocks()
        {
            int fileNum = 5;

            // 5 files
            Path[] files = new Path[fileNum];
            MiniDFSCluster.DataNodeProperties[] dnprops = new MiniDFSCluster.DataNodeProperties
                                                          [Replication];
            // create a group of files, each file contains 1 block
            for (int i = 0; i < fileNum; i++)
            {
                files[i] = new Path("/file" + i);
                DFSTestUtil.CreateFile(dfs, files[i], Blocksize, Replication, i);
            }
            // wait until all DataNodes have replicas
            WaitForReplication();
            for (int i_1 = Replication - 1; i_1 >= 0; i_1--)
            {
                dnprops[i_1] = cluster.StopDataNode(i_1);
            }
            Sharpen.Thread.Sleep(2000);
            // delete 2 files, we still have 3 files remaining so that we can cover
            // every DN storage
            for (int i_2 = 0; i_2 < 2; i_2++)
            {
                dfs.Delete(files[i_2], true);
            }
            // restart NameNode
            cluster.RestartNameNode(false);
            InvalidateBlocks invalidateBlocks = (InvalidateBlocks)Whitebox.GetInternalState(cluster
                                                                                            .GetNamesystem().GetBlockManager(), "invalidateBlocks");
            InvalidateBlocks mockIb = Org.Mockito.Mockito.Spy(invalidateBlocks);

            Org.Mockito.Mockito.DoReturn(1L).When(mockIb).GetInvalidationDelay();
            Whitebox.SetInternalState(cluster.GetNamesystem().GetBlockManager(), "invalidateBlocks"
                                      , mockIb);
            NUnit.Framework.Assert.AreEqual(0L, cluster.GetNamesystem().GetPendingDeletionBlocks
                                                ());
            // restart DataNodes
            for (int i_3 = 0; i_3 < Replication; i_3++)
            {
                cluster.RestartDataNode(dnprops[i_3], true);
            }
            cluster.WaitActive();
            for (int i_4 = 0; i_4 < Replication; i_4++)
            {
                DataNodeTestUtils.TriggerBlockReport(cluster.GetDataNodes()[i_4]);
            }
            Sharpen.Thread.Sleep(2000);
            // make sure we have received block reports by checking the total block #
            NUnit.Framework.Assert.AreEqual(3, cluster.GetNamesystem().GetBlocksTotal());
            NUnit.Framework.Assert.AreEqual(4, cluster.GetNamesystem().GetPendingDeletionBlocks
                                                ());
            cluster.RestartNameNode(true);
            Sharpen.Thread.Sleep(6000);
            NUnit.Framework.Assert.AreEqual(3, cluster.GetNamesystem().GetBlocksTotal());
            NUnit.Framework.Assert.AreEqual(0, cluster.GetNamesystem().GetPendingDeletionBlocks
                                                ());
        }
Exemplo n.º 19
0
        public static void SetUp()
        {
            cluster   = (new MiniDFSCluster.Builder(conf)).NumDataNodes(1).Build();
            nnAddress = cluster.GetNameNode().GetNameNodeAddress();
            DataNode dn = cluster.GetDataNodes()[0];

            dnAddress = new IPEndPoint(dn.GetDatanodeId().GetIpAddr(), dn.GetIpcPort());
        }
Exemplo n.º 20
0
 // expected
 /// <summary>Attempts to start a DataNode with the given operation.</summary>
 /// <remarks>
 /// Attempts to start a DataNode with the given operation. Starting
 /// the given block pool should fail.
 /// </remarks>
 /// <param name="operation">startup option</param>
 /// <param name="bpid">block pool Id that should fail to start</param>
 /// <exception cref="System.IO.IOException"></exception>
 internal virtual void StartBlockPoolShouldFail(HdfsServerConstants.StartupOption
                                                operation, string bpid)
 {
     cluster.StartDataNodes(conf, 1, false, operation, null);
     // should fail
     NUnit.Framework.Assert.IsFalse("Block pool " + bpid + " should have failed to start"
                                    , cluster.GetDataNodes()[0].IsBPServiceAlive(bpid));
 }
Exemplo n.º 21
0
 public static void SetUpBeforeClass()
 {
     conf    = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Build();
     cluster.WaitClusterUp();
     dfs       = cluster.GetFileSystem();
     datanodes = cluster.GetDataNodes();
 }
Exemplo n.º 22
0
 private static void LowerKeyUpdateIntervalAndClearKeys(MiniDFSCluster cluster)
 {
     LowerKeyUpdateIntervalAndClearKeys(cluster.GetNamesystem(0));
     LowerKeyUpdateIntervalAndClearKeys(cluster.GetNamesystem(1));
     foreach (DataNode dn in cluster.GetDataNodes())
     {
         dn.ClearAllBlockSecretKeys();
     }
 }
Exemplo n.º 23
0
        public virtual void SetUp()
        {
            Configuration conf = new Configuration();

            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            cluster.WaitActive();
            admin    = new DFSAdmin();
            datanode = cluster.GetDataNodes()[0];
        }
Exemplo n.º 24
0
        /// <exception cref="System.IO.IOException"/>
        internal static long GetTotalDfsUsed(MiniDFSCluster cluster)
        {
            long total = 0;

            foreach (DataNode node in cluster.GetDataNodes())
            {
                total += DataNodeTestUtils.GetFSDataset(node).GetDfsUsed();
            }
            return(total);
        }
        public virtual void TestValidVolumesAtStartup()
        {
            Assume.AssumeTrue(!Runtime.GetProperty("os.name").StartsWith("Windows"));
            // Make sure no DNs are running.
            cluster.ShutdownDataNodes();
            // Bring up a datanode with two default data dirs, but with one bad one.
            conf.SetInt(DFSConfigKeys.DfsDatanodeFailedVolumesToleratedKey, 1);
            // We use subdirectories 0 and 1 in order to have only a single
            // data dir's parent inject a failure.
            FilePath tld            = new FilePath(MiniDFSCluster.GetBaseDirectory(), "badData");
            FilePath dataDir1       = new FilePath(tld, "data1");
            FilePath dataDir1Actual = new FilePath(dataDir1, "1");

            dataDir1Actual.Mkdirs();
            // Force an IOE to occur on one of the dfs.data.dir.
            FilePath dataDir2 = new FilePath(tld, "data2");

            PrepareDirToFail(dataDir2);
            FilePath dataDir2Actual = new FilePath(dataDir2, "2");

            // Start one DN, with manually managed DN dir
            conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dataDir1Actual.GetPath() + "," + dataDir2Actual
                     .GetPath());
            cluster.StartDataNodes(conf, 1, false, null, null);
            cluster.WaitActive();
            try
            {
                NUnit.Framework.Assert.IsTrue("The DN should have started up fine.", cluster.IsDataNodeUp
                                                  ());
                DataNode dn = cluster.GetDataNodes()[0];
                string   si = DataNodeTestUtils.GetFSDataset(dn).GetStorageInfo();
                NUnit.Framework.Assert.IsTrue("The DN should have started with this directory", si
                                              .Contains(dataDir1Actual.GetPath()));
                NUnit.Framework.Assert.IsFalse("The DN shouldn't have a bad directory.", si.Contains
                                                   (dataDir2Actual.GetPath()));
            }
            finally
            {
                cluster.ShutdownDataNodes();
                FileUtil.Chmod(dataDir2.ToString(), "755");
            }
        }
 public virtual void StartCluster()
 {
     conf        = new HdfsConfiguration();
     cluster     = new MiniDFSCluster.Builder(conf).NumDataNodes(DnCount).Build();
     fs          = cluster.GetFileSystem();
     singletonNn = cluster.GetNameNode();
     singletonDn = cluster.GetDataNodes()[0];
     bpos        = singletonDn.GetAllBpOs()[0];
     actor       = bpos.GetBPServiceActors()[0];
     storageUuid = singletonDn.GetFSDataset().GetVolumes()[0].GetStorageID();
 }
 public virtual void StartUpCluster()
 {
     conf    = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDatanodes).Build();
     fs      = cluster.GetFileSystem();
     client  = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()), cluster
                             .GetConfiguration(0));
     dn0    = cluster.GetDataNodes()[0];
     poolId = cluster.GetNamesystem().GetBlockPoolId();
     dn0Reg = dn0.GetDNRegistrationForBP(poolId);
 }
Exemplo n.º 28
0
 /// <summary>Stop the heartbeat of a datanode in the MiniDFSCluster</summary>
 /// <param name="cluster">The MiniDFSCluster</param>
 /// <param name="hostName">The hostName of the datanode to be stopped</param>
 /// <returns>The DataNode whose heartbeat has been stopped</returns>
 private DataNode StopDataNodeHeartbeat(MiniDFSCluster cluster, string hostName)
 {
     foreach (DataNode dn in cluster.GetDataNodes())
     {
         if (dn.GetDatanodeId().GetHostName().Equals(hostName))
         {
             DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, true);
             return(dn);
         }
     }
     return(null);
 }
Exemplo n.º 29
0
 /// <exception cref="System.IO.IOException"/>
 private void StartCluster()
 {
     conf = new HdfsConfiguration();
     conf.SetInt("dfs.blocksize", 1024 * 1024);
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(ReplFactor).Build();
     cluster.WaitActive();
     fs = cluster.GetFileSystem();
     nn = cluster.GetNameNode(0);
     NUnit.Framework.Assert.IsNotNull(nn);
     dn0 = cluster.GetDataNodes()[0];
     NUnit.Framework.Assert.IsNotNull(dn0);
     blockPoolId = cluster.GetNameNode(0).GetNamesystem().GetBlockPoolId();
 }
Exemplo n.º 30
0
        public virtual void TestShutdown()
        {
            if (Runtime.GetProperty("os.name").StartsWith("Windows"))
            {
                return;
            }
            // Bring up two more datanodes
            cluster.StartDataNodes(conf, 2, true, null, null);
            cluster.WaitActive();
            int      dnIndex    = 0;
            string   bpid       = cluster.GetNamesystem().GetBlockPoolId();
            FilePath storageDir = cluster.GetInstanceStorageDir(dnIndex, 0);
            FilePath dir1       = MiniDFSCluster.GetRbwDir(storageDir, bpid);

            storageDir = cluster.GetInstanceStorageDir(dnIndex, 1);
            FilePath dir2 = MiniDFSCluster.GetRbwDir(storageDir, bpid);

            try
            {
                // make the data directory of the first datanode to be readonly
                NUnit.Framework.Assert.IsTrue("Couldn't chmod local vol", dir1.SetReadOnly());
                NUnit.Framework.Assert.IsTrue("Couldn't chmod local vol", dir2.SetReadOnly());
                // create files and make sure that first datanode will be down
                DataNode dn = cluster.GetDataNodes()[dnIndex];
                for (int i = 0; dn.IsDatanodeUp(); i++)
                {
                    Path fileName = new Path("/test.txt" + i);
                    DFSTestUtil.CreateFile(fs, fileName, 1024, (short)2, 1L);
                    DFSTestUtil.WaitReplication(fs, fileName, (short)2);
                    fs.Delete(fileName, true);
                }
            }
            finally
            {
                // restore its old permission
                FileUtil.SetWritable(dir1, true);
                FileUtil.SetWritable(dir2, true);
            }
        }