/// <exception cref="System.IO.IOException"/>
        internal static void Setrep(int fromREP, int toREP, bool simulatedStorage)
        {
            Configuration conf = new HdfsConfiguration();

            if (simulatedStorage)
            {
                SimulatedFSDataset.SetFactory(conf);
            }
            conf.Set(DFSConfigKeys.DfsReplicationKey, string.Empty + fromREP);
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 1000L);
            conf.Set(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, Sharpen.Extensions.ToString
                         (2));
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(10).Build(
                );
            FileSystem fs = cluster.GetFileSystem();

            NUnit.Framework.Assert.IsTrue("Not a HDFS: " + fs.GetUri(), fs is DistributedFileSystem
                                          );
            try
            {
                Path root = TestDFSShell.Mkdir(fs, new Path("/test/setrep" + fromREP + "-" + toREP
                                                            ));
                Path f = TestDFSShell.WriteFile(fs, new Path(root, "foo"));
                {
                    // Verify setrep for changing replication
                    string[] args = new string[] { "-setrep", "-w", string.Empty + toREP, string.Empty
                                                   + f };
                    FsShell shell = new FsShell();
                    shell.SetConf(conf);
                    try
                    {
                        NUnit.Framework.Assert.AreEqual(0, shell.Run(args));
                    }
                    catch (Exception e)
                    {
                        NUnit.Framework.Assert.IsTrue("-setrep " + e, false);
                    }
                }
                //get fs again since the old one may be closed
                fs = cluster.GetFileSystem();
                FileStatus file = fs.GetFileStatus(f);
                long       len  = file.GetLen();
                foreach (BlockLocation locations in fs.GetFileBlockLocations(file, 0, len))
                {
                    NUnit.Framework.Assert.IsTrue(locations.GetHosts().Length == toREP);
                }
                TestDFSShell.Show("done setrep waiting: " + root);
            }
            finally
            {
                try
                {
                    fs.Close();
                }
                catch (Exception)
                {
                }
                cluster.Shutdown();
            }
        }
Exemple #2
0
        /// <exception cref="System.IO.IOException"/>
        private void DfsPreadTest(Configuration conf, bool disableTransferTo, bool verifyChecksum
                                  )
        {
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 4096);
            conf.SetLong(DFSConfigKeys.DfsClientReadPrefetchSizeKey, 4096);
            // Set short retry timeouts so this test runs faster
            conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 0);
            if (simulatedStorage)
            {
                SimulatedFSDataset.SetFactory(conf);
            }
            if (disableTransferTo)
            {
                conf.SetBoolean("dfs.datanode.transferTo.allowed", false);
            }
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
            FileSystem     fileSys = cluster.GetFileSystem();

            fileSys.SetVerifyChecksum(verifyChecksum);
            try
            {
                Path file1 = new Path("preadtest.dat");
                WriteFile(fileSys, file1);
                PReadFile(fileSys, file1);
                DatanodeRestartTest(cluster, fileSys, file1);
                CleanupFile(fileSys, file1);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
        }
        private SimulatedFSDataset GetSimulatedFSDataset()
        {
            SimulatedFSDataset fsdataset = new SimulatedFSDataset(null, conf);

            fsdataset.AddBlockPool(bpid, conf);
            return(fsdataset);
        }
        // ok - as expected
        public virtual void CheckInvalidBlock(ExtendedBlock b)
        {
            SimulatedFSDataset fsdataset = GetSimulatedFSDataset();

            NUnit.Framework.Assert.IsFalse(fsdataset.IsValidBlock(b));
            try
            {
                fsdataset.GetLength(b);
                NUnit.Framework.Assert.IsTrue("Expected an IO exception", false);
            }
            catch (IOException)
            {
            }
            // ok - as expected
            try
            {
                fsdataset.GetBlockInputStream(b);
                NUnit.Framework.Assert.IsTrue("Expected an IO exception", false);
            }
            catch (IOException)
            {
            }
            // ok - as expected
            try
            {
                fsdataset.FinalizeBlock(b);
                NUnit.Framework.Assert.IsTrue("Expected an IO exception", false);
            }
            catch (IOException)
            {
            }
        }
        public virtual void TestInjectionEmpty()
        {
            SimulatedFSDataset fsdataset   = GetSimulatedFSDataset();
            BlockListAsLongs   blockReport = fsdataset.GetBlockReport(bpid);

            NUnit.Framework.Assert.AreEqual(0, blockReport.GetNumberOfBlocks());
            int bytesAdded = AddSomeBlocks(fsdataset);

            blockReport = fsdataset.GetBlockReport(bpid);
            NUnit.Framework.Assert.AreEqual(Numblocks, blockReport.GetNumberOfBlocks());
            foreach (Block b in blockReport)
            {
                NUnit.Framework.Assert.IsNotNull(b);
                NUnit.Framework.Assert.AreEqual(BlockIdToLen(b.GetBlockId()), b.GetNumBytes());
            }
            // Inject blocks into an empty fsdataset
            //  - injecting the blocks we got above.
            SimulatedFSDataset sfsdataset = GetSimulatedFSDataset();

            sfsdataset.InjectBlocks(bpid, blockReport);
            blockReport = sfsdataset.GetBlockReport(bpid);
            NUnit.Framework.Assert.AreEqual(Numblocks, blockReport.GetNumberOfBlocks());
            foreach (Block b_1 in blockReport)
            {
                NUnit.Framework.Assert.IsNotNull(b_1);
                NUnit.Framework.Assert.AreEqual(BlockIdToLen(b_1.GetBlockId()), b_1.GetNumBytes()
                                                );
                NUnit.Framework.Assert.AreEqual(BlockIdToLen(b_1.GetBlockId()), sfsdataset.GetLength
                                                    (new ExtendedBlock(bpid, b_1)));
            }
            NUnit.Framework.Assert.AreEqual(bytesAdded, sfsdataset.GetDfsUsed());
            NUnit.Framework.Assert.AreEqual(sfsdataset.GetCapacity() - bytesAdded, sfsdataset
                                            .GetRemaining());
        }
        public virtual void TestSmallBlock()
        {
            Configuration conf = new HdfsConfiguration();

            if (simulatedStorage)
            {
                SimulatedFSDataset.SetFactory(conf);
            }
            conf.Set(DFSConfigKeys.DfsBytesPerChecksumKey, "1");
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fileSys = cluster.GetFileSystem();

            try
            {
                Path file1 = new Path("smallblocktest.dat");
                WriteFile(fileSys, file1);
                CheckFile(fileSys, file1);
                CleanupFile(fileSys, file1);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
        }
Exemple #7
0
        public virtual void TestDataNodeMetrics()
        {
            Configuration conf = new HdfsConfiguration();

            SimulatedFSDataset.SetFactory(conf);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();

            try
            {
                FileSystem fs          = cluster.GetFileSystem();
                long       LongFileLen = int.MaxValue + 1L;
                DFSTestUtil.CreateFile(fs, new Path("/tmp.txt"), LongFileLen, (short)1, 1L);
                IList <DataNode> datanodes = cluster.GetDataNodes();
                NUnit.Framework.Assert.AreEqual(datanodes.Count, 1);
                DataNode             datanode = datanodes[0];
                MetricsRecordBuilder rb       = MetricsAsserts.GetMetrics(datanode.GetMetrics().Name());
                MetricsAsserts.AssertCounter("BytesWritten", LongFileLen, rb);
                NUnit.Framework.Assert.IsTrue("Expected non-zero number of incremental block reports"
                                              , MetricsAsserts.GetLongCounter("IncrementalBlockReportsNumOps", rb) > 0);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void TestGetMetaData()
        {
            SimulatedFSDataset fsdataset = GetSimulatedFSDataset();
            ExtendedBlock      b         = new ExtendedBlock(bpid, 1, 5, 0);

            try
            {
                NUnit.Framework.Assert.IsTrue(fsdataset.GetMetaDataInputStream(b) == null);
                NUnit.Framework.Assert.IsTrue("Expected an IO exception", false);
            }
            catch (IOException)
            {
            }
            // ok - as expected
            AddSomeBlocks(fsdataset);
            // Only need to add one but ....
            b = new ExtendedBlock(bpid, 1, 0, 0);
            InputStream     metaInput     = fsdataset.GetMetaDataInputStream(b);
            DataInputStream metaDataInput = new DataInputStream(metaInput);
            short           version       = metaDataInput.ReadShort();

            NUnit.Framework.Assert.AreEqual(BlockMetadataHeader.Version, version);
            DataChecksum checksum = DataChecksum.NewDataChecksum(metaDataInput);

            NUnit.Framework.Assert.AreEqual(DataChecksum.Type.Null, checksum.GetChecksumType(
                                                ));
            NUnit.Framework.Assert.AreEqual(0, checksum.GetChecksumSize());
        }
        public virtual void TestInValidBlocks()
        {
            SimulatedFSDataset fsdataset = GetSimulatedFSDataset();
            ExtendedBlock      b         = new ExtendedBlock(bpid, 1, 5, 0);

            CheckInvalidBlock(b);
            // Now check invlaid after adding some blocks
            AddSomeBlocks(fsdataset);
            b = new ExtendedBlock(bpid, Numblocks + 99, 5, 0);
            CheckInvalidBlock(b);
        }
        public virtual void TestInjectionNonEmpty()
        {
            SimulatedFSDataset fsdataset   = GetSimulatedFSDataset();
            BlockListAsLongs   blockReport = fsdataset.GetBlockReport(bpid);

            NUnit.Framework.Assert.AreEqual(0, blockReport.GetNumberOfBlocks());
            int bytesAdded = AddSomeBlocks(fsdataset);

            blockReport = fsdataset.GetBlockReport(bpid);
            NUnit.Framework.Assert.AreEqual(Numblocks, blockReport.GetNumberOfBlocks());
            foreach (Block b in blockReport)
            {
                NUnit.Framework.Assert.IsNotNull(b);
                NUnit.Framework.Assert.AreEqual(BlockIdToLen(b.GetBlockId()), b.GetNumBytes());
            }
            fsdataset = null;
            // Inject blocks into an non-empty fsdataset
            //  - injecting the blocks we got above.
            SimulatedFSDataset sfsdataset = GetSimulatedFSDataset();

            // Add come blocks whose block ids do not conflict with
            // the ones we are going to inject.
            bytesAdded += AddSomeBlocks(sfsdataset, Numblocks + 1);
            sfsdataset.GetBlockReport(bpid);
            NUnit.Framework.Assert.AreEqual(Numblocks, blockReport.GetNumberOfBlocks());
            sfsdataset.GetBlockReport(bpid);
            NUnit.Framework.Assert.AreEqual(Numblocks, blockReport.GetNumberOfBlocks());
            sfsdataset.InjectBlocks(bpid, blockReport);
            blockReport = sfsdataset.GetBlockReport(bpid);
            NUnit.Framework.Assert.AreEqual(Numblocks * 2, blockReport.GetNumberOfBlocks());
            foreach (Block b_1 in blockReport)
            {
                NUnit.Framework.Assert.IsNotNull(b_1);
                NUnit.Framework.Assert.AreEqual(BlockIdToLen(b_1.GetBlockId()), b_1.GetNumBytes()
                                                );
                NUnit.Framework.Assert.AreEqual(BlockIdToLen(b_1.GetBlockId()), sfsdataset.GetLength
                                                    (new ExtendedBlock(bpid, b_1)));
            }
            NUnit.Framework.Assert.AreEqual(bytesAdded, sfsdataset.GetDfsUsed());
            NUnit.Framework.Assert.AreEqual(sfsdataset.GetCapacity() - bytesAdded, sfsdataset
                                            .GetRemaining());
            // Now test that the dataset cannot be created if it does not have sufficient cap
            conf.SetLong(SimulatedFSDataset.ConfigPropertyCapacity, 10);
            try
            {
                sfsdataset = GetSimulatedFSDataset();
                sfsdataset.AddBlockPool(bpid, conf);
                sfsdataset.InjectBlocks(bpid, blockReport);
                NUnit.Framework.Assert.IsTrue("Expected an IO exception", false);
            }
            catch (IOException)
            {
            }
        }
        public virtual void TestFSDatasetFactory()
        {
            Configuration conf = new Configuration();

            FsDatasetSpi.Factory <object> f = FsDatasetSpi.Factory.GetFactory(conf);
            NUnit.Framework.Assert.AreEqual(typeof(FsDatasetFactory), f.GetType());
            NUnit.Framework.Assert.IsFalse(f.IsSimulated());
            SimulatedFSDataset.SetFactory(conf);
            FsDatasetSpi.Factory <object> s = FsDatasetSpi.Factory.GetFactory(conf);
            NUnit.Framework.Assert.AreEqual(typeof(SimulatedFSDataset.Factory), s.GetType());
            NUnit.Framework.Assert.IsTrue(s.IsSimulated());
        }
        public virtual void TestWriteRead()
        {
            SimulatedFSDataset fsdataset = GetSimulatedFSDataset();

            AddSomeBlocks(fsdataset);
            for (int i = 1; i <= Numblocks; ++i)
            {
                ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0);
                NUnit.Framework.Assert.IsTrue(fsdataset.IsValidBlock(b));
                NUnit.Framework.Assert.AreEqual(BlockIdToLen(i), fsdataset.GetLength(b));
                CheckBlockDataAndSize(fsdataset, b, BlockIdToLen(i));
            }
        }
        public virtual void TestStorageUsage()
        {
            SimulatedFSDataset fsdataset = GetSimulatedFSDataset();

            NUnit.Framework.Assert.AreEqual(fsdataset.GetDfsUsed(), 0);
            NUnit.Framework.Assert.AreEqual(fsdataset.GetRemaining(), fsdataset.GetCapacity()
                                            );
            int bytesAdded = AddSomeBlocks(fsdataset);

            NUnit.Framework.Assert.AreEqual(bytesAdded, fsdataset.GetDfsUsed());
            NUnit.Framework.Assert.AreEqual(fsdataset.GetCapacity() - bytesAdded, fsdataset.GetRemaining
                                                ());
        }
        /// <exception cref="System.IO.IOException"/>
        internal virtual void CheckBlockDataAndSize(SimulatedFSDataset fsdataset, ExtendedBlock
                                                    b, long expectedLen)
        {
            InputStream input      = fsdataset.GetBlockInputStream(b);
            long        lengthRead = 0;
            int         data;

            while ((data = input.Read()) != -1)
            {
                NUnit.Framework.Assert.AreEqual(SimulatedFSDataset.DefaultDatabyte, data);
                lengthRead++;
            }
            NUnit.Framework.Assert.AreEqual(expectedLen, lengthRead);
        }
        public virtual void TestGetBlockReport()
        {
            SimulatedFSDataset fsdataset   = GetSimulatedFSDataset();
            BlockListAsLongs   blockReport = fsdataset.GetBlockReport(bpid);

            NUnit.Framework.Assert.AreEqual(0, blockReport.GetNumberOfBlocks());
            AddSomeBlocks(fsdataset);
            blockReport = fsdataset.GetBlockReport(bpid);
            NUnit.Framework.Assert.AreEqual(Numblocks, blockReport.GetNumberOfBlocks());
            foreach (Block b in blockReport)
            {
                NUnit.Framework.Assert.IsNotNull(b);
                NUnit.Framework.Assert.AreEqual(BlockIdToLen(b.GetBlockId()), b.GetNumBytes());
            }
        }
        /// <summary>Tests replication in DFS.</summary>
        /// <exception cref="System.IO.IOException"/>
        public virtual void RunReplication(bool simulated)
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetBoolean(DFSConfigKeys.DfsNamenodeReplicationConsiderloadKey, false);
            if (simulated)
            {
                SimulatedFSDataset.SetFactory(conf);
            }
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes
                                                                                   ).Racks(racks).Build();

            cluster.WaitActive();
            IPEndPoint addr   = new IPEndPoint("localhost", cluster.GetNameNodePort());
            DFSClient  client = new DFSClient(addr, conf);

            DatanodeInfo[] info = client.DatanodeReport(HdfsConstants.DatanodeReportType.Live
                                                        );
            NUnit.Framework.Assert.AreEqual("Number of Datanodes ", numDatanodes, info.Length
                                            );
            FileSystem fileSys = cluster.GetFileSystem();

            try
            {
                Path file1 = new Path("/smallblocktest.dat");
                WriteFile(fileSys, file1, 3);
                CheckFile(fileSys, file1, 3);
                CleanupFile(fileSys, file1);
                WriteFile(fileSys, file1, 10);
                CheckFile(fileSys, file1, 10);
                CleanupFile(fileSys, file1);
                WriteFile(fileSys, file1, 4);
                CheckFile(fileSys, file1, 4);
                CleanupFile(fileSys, file1);
                WriteFile(fileSys, file1, 1);
                CheckFile(fileSys, file1, 1);
                CleanupFile(fileSys, file1);
                WriteFile(fileSys, file1, 2);
                CheckFile(fileSys, file1, 2);
                CleanupFile(fileSys, file1);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
        }
        public virtual void Setup()
        {
            conf = new HdfsConfiguration();
            SimulatedFSDataset.SetFactory(conf);
            Configuration[] overlays = new Configuration[NumDatanodes];
            for (int i = 0; i < overlays.Length; i++)
            {
                overlays[i] = new Configuration();
                if (i == RoNodeIndex)
                {
                    overlays[i].SetEnum(SimulatedFSDataset.ConfigPropertyState, i == RoNodeIndex ? DatanodeStorage.State
                                        .ReadOnlyShared : DatanodeStorage.State.Normal);
                }
            }
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDatanodes).DataNodeConfOverlays
                          (overlays).Build();
            fs              = cluster.GetFileSystem();
            blockManager    = cluster.GetNameNode().GetNamesystem().GetBlockManager();
            datanodeManager = blockManager.GetDatanodeManager();
            client          = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()), cluster
                                            .GetConfiguration(0));
            for (int i_1 = 0; i_1 < NumDatanodes; i_1++)
            {
                DataNode dataNode = cluster.GetDataNodes()[i_1];
                ValidateStorageState(BlockManagerTestUtil.GetStorageReportsForDatanode(datanodeManager
                                                                                       .GetDatanode(dataNode.GetDatanodeId())), i_1 == RoNodeIndex ? DatanodeStorage.State
                                     .ReadOnlyShared : DatanodeStorage.State.Normal);
            }
            // Create a 1 block file
            DFSTestUtil.CreateFile(fs, Path, BlockSize, BlockSize, BlockSize, (short)1, seed);
            LocatedBlock locatedBlock = GetLocatedBlock();

            extendedBlock = locatedBlock.GetBlock();
            block         = extendedBlock.GetLocalBlock();
            Assert.AssertThat(locatedBlock.GetLocations().Length, CoreMatchers.Is(1));
            normalDataNode   = locatedBlock.GetLocations()[0];
            readOnlyDataNode = datanodeManager.GetDatanode(cluster.GetDataNodes()[RoNodeIndex
                                                           ].GetDatanodeId());
            Assert.AssertThat(normalDataNode, CoreMatchers.Is(CoreMatchers.Not(readOnlyDataNode
                                                                               )));
            ValidateNumberReplicas(1);
            // Inject the block into the datanode with READ_ONLY_SHARED storage
            cluster.InjectBlocks(0, RoNodeIndex, Collections.Singleton(block));
            // There should now be 2 *locations* for the block
            // Must wait until the NameNode has processed the block report for the injected blocks
            WaitForLocations(2);
        }
Exemple #18
0
        public virtual void TestDataNodeTimeSpend()
        {
            Configuration conf = new HdfsConfiguration();

            SimulatedFSDataset.SetFactory(conf);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();

            try
            {
                FileSystem       fs        = cluster.GetFileSystem();
                IList <DataNode> datanodes = cluster.GetDataNodes();
                NUnit.Framework.Assert.AreEqual(datanodes.Count, 1);
                DataNode             datanode = datanodes[0];
                MetricsRecordBuilder rb       = MetricsAsserts.GetMetrics(datanode.GetMetrics().Name());
                long LongFileLen     = 1024 * 1024 * 10;
                long startWriteValue = MetricsAsserts.GetLongCounter("TotalWriteTime", rb);
                long startReadValue  = MetricsAsserts.GetLongCounter("TotalReadTime", rb);
                for (int x = 0; x < 50; x++)
                {
                    DFSTestUtil.CreateFile(fs, new Path("/time.txt." + x), LongFileLen, (short)1, Time
                                           .MonotonicNow());
                }
                for (int x_1 = 0; x_1 < 50; x_1++)
                {
                    string s = DFSTestUtil.ReadFile(fs, new Path("/time.txt." + x_1));
                }
                MetricsRecordBuilder rbNew = MetricsAsserts.GetMetrics(datanode.GetMetrics().Name
                                                                           ());
                long endWriteValue = MetricsAsserts.GetLongCounter("TotalWriteTime", rbNew);
                long endReadValue  = MetricsAsserts.GetLongCounter("TotalReadTime", rbNew);
                NUnit.Framework.Assert.IsTrue(endReadValue > startReadValue);
                NUnit.Framework.Assert.IsTrue(endWriteValue > startWriteValue);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void TestInvalidate()
        {
            SimulatedFSDataset fsdataset = GetSimulatedFSDataset();
            int bytesAdded = AddSomeBlocks(fsdataset);

            Block[] deleteBlocks = new Block[2];
            deleteBlocks[0] = new Block(1, 0, 0);
            deleteBlocks[1] = new Block(2, 0, 0);
            fsdataset.Invalidate(bpid, deleteBlocks);
            CheckInvalidBlock(new ExtendedBlock(bpid, deleteBlocks[0]));
            CheckInvalidBlock(new ExtendedBlock(bpid, deleteBlocks[1]));
            long sizeDeleted = BlockIdToLen(1) + BlockIdToLen(2);

            NUnit.Framework.Assert.AreEqual(bytesAdded - sizeDeleted, fsdataset.GetDfsUsed());
            NUnit.Framework.Assert.AreEqual(fsdataset.GetCapacity() - bytesAdded + sizeDeleted
                                            , fsdataset.GetRemaining());
            // Now make sure the rest of the blocks are valid
            for (int i = 3; i <= Numblocks; ++i)
            {
                Block b = new Block(i, 0, 0);
                NUnit.Framework.Assert.IsTrue(fsdataset.IsValidBlock(new ExtendedBlock(bpid, b)));
            }
        }
        /// <exception cref="System.IO.IOException"/>
        internal virtual int AddSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId
                                           )
        {
            int bytesAdded = 0;

            for (int i = startingBlockId; i < startingBlockId + Numblocks; ++i)
            {
                ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0);
                // we pass expected len as zero, - fsdataset should use the sizeof actual
                // data written
                ReplicaInPipelineInterface bInfo = fsdataset.CreateRbw(StorageType.Default, b, false
                                                                       ).GetReplica();
                ReplicaOutputStreams @out = bInfo.CreateStreams(true, DataChecksum.NewDataChecksum
                                                                    (DataChecksum.Type.Crc32, 512));
                try
                {
                    OutputStream dataOut = @out.GetDataOut();
                    NUnit.Framework.Assert.AreEqual(0, fsdataset.GetLength(b));
                    for (int j = 1; j <= BlockIdToLen(i); ++j)
                    {
                        dataOut.Write(j);
                        NUnit.Framework.Assert.AreEqual(j, bInfo.GetBytesOnDisk());
                        // correct length even as we write
                        bytesAdded++;
                    }
                }
                finally
                {
                    @out.Close();
                }
                b.SetNumBytes(BlockIdToLen(i));
                fsdataset.FinalizeBlock(b);
                NUnit.Framework.Assert.AreEqual(BlockIdToLen(i), fsdataset.GetLength(b));
            }
            return(bytesAdded);
        }
Exemple #21
0
        public virtual void TestFileLimit()
        {
            Configuration conf       = new HdfsConfiguration();
            int           maxObjects = 5;

            conf.SetLong(DFSConfigKeys.DfsNamenodeMaxObjectsKey, maxObjects);
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 1000L);
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            int currentNodes = 0;

            if (simulatedStorage)
            {
                SimulatedFSDataset.SetFactory(conf);
            }
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fs      = cluster.GetFileSystem();
            FSNamesystem   namesys = cluster.GetNamesystem();

            try
            {
                //
                // check that / exists
                //
                Path path = new Path("/");
                NUnit.Framework.Assert.IsTrue("/ should be a directory", fs.GetFileStatus(path).IsDirectory
                                                  ());
                currentNodes = 1;
                // root inode
                // verify that we can create the specified number of files. We leave
                // one for the "/". Each file takes an inode and a block.
                //
                for (int i = 0; i < maxObjects / 2; i++)
                {
                    Path file = new Path("/filestatus" + i);
                    CreateFile(fs, file);
                    System.Console.Out.WriteLine("Created file " + file);
                    currentNodes += 2;
                }
                // two more objects for this creation.
                // verify that creating another file fails
                bool hitException = false;
                try
                {
                    Path file = new Path("/filestatus");
                    CreateFile(fs, file);
                    System.Console.Out.WriteLine("Created file " + file);
                }
                catch (IOException)
                {
                    hitException = true;
                }
                NUnit.Framework.Assert.IsTrue("Was able to exceed file limit", hitException);
                // delete one file
                Path file0 = new Path("/filestatus0");
                fs.Delete(file0, true);
                System.Console.Out.WriteLine("Deleted file " + file0);
                currentNodes -= 2;
                // wait for number of blocks to decrease
                WaitForLimit(namesys, currentNodes);
                // now, we shud be able to create a new file
                CreateFile(fs, file0);
                System.Console.Out.WriteLine("Created file " + file0 + " again.");
                currentNodes += 2;
                // delete the file again
                file0 = new Path("/filestatus0");
                fs.Delete(file0, true);
                System.Console.Out.WriteLine("Deleted file " + file0 + " again.");
                currentNodes -= 2;
                // wait for number of blocks to decrease
                WaitForLimit(namesys, currentNodes);
                // create two directories in place of the file that we deleted
                Path dir = new Path("/dir0/dir1");
                fs.Mkdirs(dir);
                System.Console.Out.WriteLine("Created directories " + dir);
                currentNodes += 2;
                WaitForLimit(namesys, currentNodes);
                // verify that creating another directory fails
                hitException = false;
                try
                {
                    fs.Mkdirs(new Path("dir.fail"));
                    System.Console.Out.WriteLine("Created directory should not have succeeded.");
                }
                catch (IOException)
                {
                    hitException = true;
                }
                NUnit.Framework.Assert.IsTrue("Was able to exceed dir limit", hitException);
            }
            finally
            {
                fs.Close();
                cluster.Shutdown();
            }
        }
 public virtual void SetUp()
 {
     conf = new HdfsConfiguration();
     SimulatedFSDataset.SetFactory(conf);
 }
        public virtual void TestInjection()
        {
            MiniDFSCluster cluster  = null;
            string         testFile = "/replication-test-file";
            Path           testPath = new Path(testFile);

            byte[] buffer = new byte[1024];
            for (int i = 0; i < buffer.Length; i++)
            {
                buffer[i] = (byte)('1');
            }
            try
            {
                Configuration conf = new HdfsConfiguration();
                conf.Set(DFSConfigKeys.DfsReplicationKey, Sharpen.Extensions.ToString(numDataNodes
                                                                                      ));
                conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, checksumSize);
                SimulatedFSDataset.SetFactory(conf);
                //first time format
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build();
                cluster.WaitActive();
                string    bpid      = cluster.GetNamesystem().GetBlockPoolId();
                DFSClient dfsClient = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort
                                                                       ()), conf);
                WriteFile(cluster.GetFileSystem(), testPath, numDataNodes);
                WaitForBlockReplication(testFile, dfsClient.GetNamenode(), numDataNodes, 20);
                IList <IDictionary <DatanodeStorage, BlockListAsLongs> > blocksList = cluster.GetAllBlockReports
                                                                                          (bpid);
                cluster.Shutdown();
                cluster = null;

                /* Start the MiniDFSCluster with more datanodes since once a writeBlock
                 * to a datanode node fails, same block can not be written to it
                 * immediately. In our case some replication attempts will fail.
                 */
                Log.Info("Restarting minicluster");
                conf = new HdfsConfiguration();
                SimulatedFSDataset.SetFactory(conf);
                conf.Set(DFSConfigKeys.DfsNamenodeSafemodeThresholdPctKey, "0.0f");
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes * 2).Format(
                    false).Build();
                cluster.WaitActive();
                ICollection <Block> uniqueBlocks = new HashSet <Block>();
                foreach (IDictionary <DatanodeStorage, BlockListAsLongs> map in blocksList)
                {
                    foreach (BlockListAsLongs blockList in map.Values)
                    {
                        foreach (Block b in blockList)
                        {
                            uniqueBlocks.AddItem(new Block(b));
                        }
                    }
                }
                // Insert all the blocks in the first data node
                Log.Info("Inserting " + uniqueBlocks.Count + " blocks");
                cluster.InjectBlocks(0, uniqueBlocks, null);
                dfsClient = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()),
                                          conf);
                WaitForBlockReplication(testFile, dfsClient.GetNamenode(), numDataNodes, -1);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Exemple #24
0
        /// <exception cref="System.Exception"/>
        public static void Main(string[] args)
        {
            int           numDataNodes            = 0;
            int           numRacks                = 0;
            bool          inject                  = false;
            long          startingBlockId         = 1;
            int           numBlocksPerDNtoInject  = 0;
            int           replication             = 1;
            bool          checkDataNodeAddrConfig = false;
            long          simulatedCapacityPerDn  = SimulatedFSDataset.DefaultCapacity;
            string        bpid = null;
            Configuration conf = new HdfsConfiguration();

            for (int i = 0; i < args.Length; i++)
            {
                // parse command line
                if (args[i].Equals("-n"))
                {
                    if (++i >= args.Length || args[i].StartsWith("-"))
                    {
                        PrintUsageExit("missing number of nodes");
                    }
                    numDataNodes = System.Convert.ToInt32(args[i]);
                }
                else
                {
                    if (args[i].Equals("-racks"))
                    {
                        if (++i >= args.Length || args[i].StartsWith("-"))
                        {
                            PrintUsageExit("Missing number of racks");
                        }
                        numRacks = System.Convert.ToInt32(args[i]);
                    }
                    else
                    {
                        if (args[i].Equals("-r"))
                        {
                            if (++i >= args.Length || args[i].StartsWith("-"))
                            {
                                PrintUsageExit("Missing replication factor");
                            }
                            replication = System.Convert.ToInt32(args[i]);
                        }
                        else
                        {
                            if (args[i].Equals("-d"))
                            {
                                if (++i >= args.Length || args[i].StartsWith("-"))
                                {
                                    PrintUsageExit("Missing datanode dirs parameter");
                                }
                                dataNodeDirs = args[i];
                            }
                            else
                            {
                                if (args[i].Equals("-simulated"))
                                {
                                    SimulatedFSDataset.SetFactory(conf);
                                    if ((i + 1) < args.Length && !args[i + 1].StartsWith("-"))
                                    {
                                        simulatedCapacityPerDn = long.Parse(args[++i]);
                                    }
                                }
                                else
                                {
                                    if (args[i].Equals("-bpid"))
                                    {
                                        if (++i >= args.Length || args[i].StartsWith("-"))
                                        {
                                            PrintUsageExit("Missing blockpoolid parameter");
                                        }
                                        bpid = args[i];
                                    }
                                    else
                                    {
                                        if (args[i].Equals("-inject"))
                                        {
                                            if (!FsDatasetSpi.Factory.GetFactory(conf).IsSimulated())
                                            {
                                                System.Console.Out.Write("-inject is valid only for simulated");
                                                PrintUsageExit();
                                            }
                                            inject = true;
                                            if (++i >= args.Length || args[i].StartsWith("-"))
                                            {
                                                PrintUsageExit("Missing starting block and number of blocks per DN to inject");
                                            }
                                            startingBlockId = System.Convert.ToInt32(args[i]);
                                            if (++i >= args.Length || args[i].StartsWith("-"))
                                            {
                                                PrintUsageExit("Missing number of blocks to inject");
                                            }
                                            numBlocksPerDNtoInject = System.Convert.ToInt32(args[i]);
                                        }
                                        else
                                        {
                                            if (args[i].Equals("-checkDataNodeAddrConfig"))
                                            {
                                                checkDataNodeAddrConfig = true;
                                            }
                                            else
                                            {
                                                PrintUsageExit();
                                            }
                                        }
                                    }
                                }
                            }
                        }
                    }
                }
            }
            if (numDataNodes <= 0 || replication <= 0)
            {
                PrintUsageExit("numDataNodes and replication have to be greater than zero");
            }
            if (replication > numDataNodes)
            {
                PrintUsageExit("Replication must be less than or equal to numDataNodes");
            }
            if (bpid == null)
            {
                PrintUsageExit("BlockPoolId must be provided");
            }
            string nameNodeAdr = FileSystem.GetDefaultUri(conf).GetAuthority();

            if (nameNodeAdr == null)
            {
                System.Console.Out.WriteLine("No name node address and port in config");
                System.Environment.Exit(-1);
            }
            bool simulated = FsDatasetSpi.Factory.GetFactory(conf).IsSimulated();

            System.Console.Out.WriteLine("Starting " + numDataNodes + (simulated ? " Simulated "
                                 : " ") + " Data Nodes that will connect to Name Node at " + nameNodeAdr);
            Runtime.SetProperty("test.build.data", dataNodeDirs);
            long[] simulatedCapacities = new long[numDataNodes];
            for (int i_1 = 0; i_1 < numDataNodes; ++i_1)
            {
                simulatedCapacities[i_1] = simulatedCapacityPerDn;
            }
            MiniDFSCluster mc = new MiniDFSCluster();

            try
            {
                mc.FormatDataNodeDirs();
            }
            catch (IOException e)
            {
                System.Console.Out.WriteLine("Error formating data node dirs:" + e);
            }
            string[] rack4DataNode = null;
            if (numRacks > 0)
            {
                System.Console.Out.WriteLine("Using " + numRacks + " racks: ");
                string rackPrefix = GetUniqueRackPrefix();
                rack4DataNode = new string[numDataNodes];
                for (int i_2 = 0; i_2 < numDataNodes; ++i_2)
                {
                    //rack4DataNode[i] = racks[i%numRacks];
                    rack4DataNode[i_2] = rackPrefix + "-" + i_2 % numRacks;
                    System.Console.Out.WriteLine("Data Node " + i_2 + " using " + rack4DataNode[i_2]);
                }
            }
            try
            {
                mc.StartDataNodes(conf, numDataNodes, true, HdfsServerConstants.StartupOption.Regular
                                  , rack4DataNode, null, simulatedCapacities, false, checkDataNodeAddrConfig);
                Sharpen.Thread.Sleep(10 * 1000);
                // Give the DN some time to connect to NN and init storage directories.
                if (inject)
                {
                    long blockSize = 10;
                    System.Console.Out.WriteLine("Injecting " + numBlocksPerDNtoInject + " blocks in each DN starting at blockId "
                                                 + startingBlockId + " with blocksize of " + blockSize);
                    Block[] blocks = new Block[numBlocksPerDNtoInject];
                    long    blkid  = startingBlockId;
                    for (int i_dn = 0; i_dn < numDataNodes; ++i_dn)
                    {
                        for (int i_2 = 0; i_2 < blocks.Length; ++i_2)
                        {
                            blocks[i_2] = new Block(blkid++, blockSize, CreateEditsLog.BlockGenerationStamp);
                        }
                        for (int i_3 = 1; i_3 <= replication; ++i_3)
                        {
                            // inject blocks for dn_i into dn_i and replica in dn_i's neighbors
                            mc.InjectBlocks((i_dn + i_3 - 1) % numDataNodes, Arrays.AsList(blocks), bpid);
                            System.Console.Out.WriteLine("Injecting blocks of dn " + i_dn + " into dn" + ((i_dn
                                                                                                           + i_3 - 1) % numDataNodes));
                        }
                    }
                    System.Console.Out.WriteLine("Created blocks from Bids " + startingBlockId + " to "
                                                 + (blkid - 1));
                }
            }
            catch (IOException e)
            {
                System.Console.Out.WriteLine("Error creating data node:" + e);
            }
        }
 /// <exception cref="System.IO.IOException"/>
 public virtual void StartDataNodes(Configuration conf, int numDataNodes, StorageType
                                    [][] storageTypes, bool manageDfsDirs, HdfsServerConstants.StartupOption operation
                                    , string[] racks, string[] nodeGroups, string[] hosts, long[][] storageCapacities
                                    , long[] simulatedCapacities, bool setupHostsFile, bool checkDataNodeAddrConfig,
                                    bool checkDataNodeHostConfig)
 {
     lock (this)
     {
         System.Diagnostics.Debug.Assert(storageCapacities == null || simulatedCapacities
                                         == null);
         System.Diagnostics.Debug.Assert(storageTypes == null || storageTypes.Length == numDataNodes
                                         );
         System.Diagnostics.Debug.Assert(storageCapacities == null || storageCapacities.Length
                                         == numDataNodes);
         if (operation == HdfsServerConstants.StartupOption.Recover)
         {
             return;
         }
         if (checkDataNodeHostConfig)
         {
             conf.SetIfUnset(DFSConfigKeys.DfsDatanodeHostNameKey, "127.0.0.1");
         }
         else
         {
             conf.Set(DFSConfigKeys.DfsDatanodeHostNameKey, "127.0.0.1");
         }
         conf.Set(DFSConfigKeys.DfsDatanodeHostNameKey, "127.0.0.1");
         int curDatanodesNum = dataNodes.Count;
         // for mincluster's the default initialDelay for BRs is 0
         if (conf.Get(DFSConfigKeys.DfsBlockreportInitialDelayKey) == null)
         {
             conf.SetLong(DFSConfigKeys.DfsBlockreportInitialDelayKey, 0);
         }
         // If minicluster's name node is null assume that the conf has been
         // set with the right address:port of the name node.
         //
         if (racks != null && numDataNodes > racks.Length)
         {
             throw new ArgumentException("The length of racks [" + racks.Length + "] is less than the number of datanodes ["
                                         + numDataNodes + "].");
         }
         if (nodeGroups != null && numDataNodes > nodeGroups.Length)
         {
             throw new ArgumentException("The length of nodeGroups [" + nodeGroups.Length + "] is less than the number of datanodes ["
                                         + numDataNodes + "].");
         }
         if (hosts != null && numDataNodes > hosts.Length)
         {
             throw new ArgumentException("The length of hosts [" + hosts.Length + "] is less than the number of datanodes ["
                                         + numDataNodes + "].");
         }
         //Generate some hostnames if required
         if (racks != null && hosts == null)
         {
             hosts = new string[numDataNodes];
             for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++)
             {
                 hosts[i - curDatanodesNum] = "host" + i + ".foo.com";
             }
         }
         if (simulatedCapacities != null && numDataNodes > simulatedCapacities.Length)
         {
             throw new ArgumentException("The length of simulatedCapacities [" + simulatedCapacities
                                         .Length + "] is less than the number of datanodes [" + numDataNodes + "].");
         }
         string[] dnArgs = (operation == null || operation != HdfsServerConstants.StartupOption
                            .Rollback) ? null : new string[] { operation.GetName() };
         DataNode[] dns = new DataNode[numDataNodes];
         for (int i_1 = curDatanodesNum; i_1 < curDatanodesNum + numDataNodes; i_1++)
         {
             Configuration dnConf = new HdfsConfiguration(conf);
             // Set up datanode address
             SetupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
             if (manageDfsDirs)
             {
                 string dirs = MakeDataNodeDirs(i_1, storageTypes == null ? null : storageTypes[i_1
                                                ]);
                 dnConf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dirs);
                 conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dirs);
             }
             if (simulatedCapacities != null)
             {
                 SimulatedFSDataset.SetFactory(dnConf);
                 dnConf.SetLong(SimulatedFSDataset.ConfigPropertyCapacity, simulatedCapacities[i_1
                                                                                               - curDatanodesNum]);
             }
             Log.Info("Starting DataNode " + i_1 + " with " + DFSConfigKeys.DfsDatanodeDataDirKey
                      + ": " + dnConf.Get(DFSConfigKeys.DfsDatanodeDataDirKey));
             if (hosts != null)
             {
                 dnConf.Set(DFSConfigKeys.DfsDatanodeHostNameKey, hosts[i_1 - curDatanodesNum]);
                 Log.Info("Starting DataNode " + i_1 + " with hostname set to: " + dnConf.Get(DFSConfigKeys
                                                                                              .DfsDatanodeHostNameKey));
             }
             if (racks != null)
             {
                 string name = hosts[i_1 - curDatanodesNum];
                 if (nodeGroups == null)
                 {
                     Log.Info("Adding node with hostname : " + name + " to rack " + racks[i_1 - curDatanodesNum
                              ]);
                     StaticMapping.AddNodeToRack(name, racks[i_1 - curDatanodesNum]);
                 }
                 else
                 {
                     Log.Info("Adding node with hostname : " + name + " to serverGroup " + nodeGroups[
                                  i_1 - curDatanodesNum] + " and rack " + racks[i_1 - curDatanodesNum]);
                     StaticMapping.AddNodeToRack(name, racks[i_1 - curDatanodesNum] + nodeGroups[i_1 -
                                                                                                 curDatanodesNum]);
                 }
             }
             Configuration newconf = new HdfsConfiguration(dnConf);
             // save config
             if (hosts != null)
             {
                 NetUtils.AddStaticResolution(hosts[i_1 - curDatanodesNum], "localhost");
             }
             SecureDataNodeStarter.SecureResources secureResources = null;
             if (UserGroupInformation.IsSecurityEnabled())
             {
                 try
                 {
                     secureResources = SecureDataNodeStarter.GetSecureResources(dnConf);
                 }
                 catch (Exception ex)
                 {
                     Sharpen.Runtime.PrintStackTrace(ex);
                 }
             }
             DataNode dn = DataNode.InstantiateDataNode(dnArgs, dnConf, secureResources);
             if (dn == null)
             {
                 throw new IOException("Cannot start DataNode in " + dnConf.Get(DFSConfigKeys.DfsDatanodeDataDirKey
                                                                                ));
             }
             //since the HDFS does things based on IP:port, we need to add the mapping
             //for IP:port to rackId
             string ipAddr = dn.GetXferAddress().Address.GetHostAddress();
             if (racks != null)
             {
                 int port = dn.GetXferAddress().Port;
                 if (nodeGroups == null)
                 {
                     Log.Info("Adding node with IP:port : " + ipAddr + ":" + port + " to rack " + racks
                              [i_1 - curDatanodesNum]);
                     StaticMapping.AddNodeToRack(ipAddr + ":" + port, racks[i_1 - curDatanodesNum]);
                 }
                 else
                 {
                     Log.Info("Adding node with IP:port : " + ipAddr + ":" + port + " to nodeGroup " +
                              nodeGroups[i_1 - curDatanodesNum] + " and rack " + racks[i_1 - curDatanodesNum]
                              );
                     StaticMapping.AddNodeToRack(ipAddr + ":" + port, racks[i_1 - curDatanodesNum] + nodeGroups
                                                 [i_1 - curDatanodesNum]);
                 }
             }
             dn.RunDatanodeDaemon();
             dataNodes.AddItem(new MiniDFSCluster.DataNodeProperties(this, dn, newconf, dnArgs
                                                                     , secureResources, dn.GetIpcPort()));
             dns[i_1 - curDatanodesNum] = dn;
         }
         curDatanodesNum   += numDataNodes;
         this.numDataNodes += numDataNodes;
         WaitActive();
         if (storageCapacities != null)
         {
             for (int i = curDatanodesNum; i_1 < curDatanodesNum + numDataNodes; ++i_1)
             {
                 IList <FsVolumeSpi> volumes = dns[i_1].GetFSDataset().GetVolumes();
                 System.Diagnostics.Debug.Assert(volumes.Count == storagesPerDatanode);
                 for (int j = 0; j < volumes.Count; ++j)
                 {
                     FsVolumeImpl volume = (FsVolumeImpl)volumes[j];
                     volume.SetCapacityForTesting(storageCapacities[i_1][j]);
                 }
             }
         }
     }
 }
 /// <exception cref="System.IO.IOException"/>
 internal virtual int AddSomeBlocks(SimulatedFSDataset fsdataset)
 {
     return(AddSomeBlocks(fsdataset, 1));
 }