Esempio n. 1
0
        /// <exception cref="System.Exception"/>
        public virtual void TestFilesExceedMaxLockedMemory()
        {
            Log.Info("beginning testFilesExceedMaxLockedMemory");
            // Create some test files that will exceed total cache capacity
            int  numFiles = 5;
            long fileSize = CacheCapacity / (numFiles - 1);

            Path[] testFiles = new Path[numFiles];
            HdfsBlockLocation[][] fileLocs = new HdfsBlockLocation[numFiles][];
            long[] fileSizes = new long[numFiles];
            for (int i = 0; i < numFiles; i++)
            {
                testFiles[i] = new Path("/testFilesExceedMaxLockedMemory-" + i);
                DFSTestUtil.CreateFile(fs, testFiles[i], fileSize, (short)1, unchecked ((long)(0xDFAl
                                                                                               )));
                fileLocs[i] = (HdfsBlockLocation[])fs.GetFileBlockLocations(testFiles[i], 0, fileSize
                                                                            );
                // Get the file size (sum of blocks)
                long[] sizes = GetBlockSizes(fileLocs[i]);
                for (int j = 0; j < sizes.Length; j++)
                {
                    fileSizes[i] += sizes[j];
                }
            }
            // Cache the first n-1 files
            long total = 0;

            DFSTestUtil.VerifyExpectedCacheUsage(0, 0, fsd);
            for (int i_1 = 0; i_1 < numFiles - 1; i_1++)
            {
                SetHeartbeatResponse(CacheBlocks(fileLocs[i_1]));
                total = DFSTestUtil.VerifyExpectedCacheUsage(rounder.Round(total + fileSizes[i_1]
                                                                           ), 4 * (i_1 + 1), fsd);
            }
            // nth file should hit a capacity exception
            LogVerificationAppender appender = new LogVerificationAppender();
            Logger logger = Logger.GetRootLogger();

            logger.AddAppender(appender);
            SetHeartbeatResponse(CacheBlocks(fileLocs[numFiles - 1]));
            GenericTestUtils.WaitFor(new _Supplier_351(appender), 500, 30000);
            // Also check the metrics for the failure
            NUnit.Framework.Assert.IsTrue("Expected more than 0 failed cache attempts", fsd.GetNumBlocksFailedToCache
                                              () > 0);
            // Uncache the n-1 files
            int curCachedBlocks = 16;

            for (int i_2 = 0; i_2 < numFiles - 1; i_2++)
            {
                SetHeartbeatResponse(UncacheBlocks(fileLocs[i_2]));
                long uncachedBytes = rounder.Round(fileSizes[i_2]);
                total           -= uncachedBytes;
                curCachedBlocks -= uncachedBytes / BlockSize;
                DFSTestUtil.VerifyExpectedCacheUsage(total, curCachedBlocks, fsd);
            }
            Log.Info("finishing testFilesExceedMaxLockedMemory");
        }
Esempio n. 2
0
        /// <exception cref="System.Exception"/>
        private void TestImageChecksum(bool compress)
        {
            MiniDFSCluster cluster = null;

            if (compress)
            {
                config.SetBoolean(DFSConfigKeys.DfsImageCompressionCodecKey, true);
            }
            try
            {
                Log.Info("\n===========================================\n" + "Starting empty cluster"
                         );
                cluster = new MiniDFSCluster.Builder(config).NumDataNodes(0).Format(true).Build();
                cluster.WaitActive();
                FileSystem fs = cluster.GetFileSystem();
                fs.Mkdirs(new Path("/test"));
                Log.Info("Shutting down cluster #1");
                cluster.Shutdown();
                cluster = null;
                // Corrupt the md5 files in all the namedirs
                CorruptFSImageMD5(true);
                // Attach our own log appender so we can verify output
                LogVerificationAppender appender = new LogVerificationAppender();
                Logger logger = Logger.GetRootLogger();
                logger.AddAppender(appender);
                // Try to start a new cluster
                Log.Info("\n===========================================\n" + "Starting same cluster after simulated crash"
                         );
                try
                {
                    cluster = new MiniDFSCluster.Builder(config).NumDataNodes(0).Format(false).Build(
                        );
                    NUnit.Framework.Assert.Fail("Should not have successfully started with corrupt image"
                                                );
                }
                catch (IOException ioe)
                {
                    GenericTestUtils.AssertExceptionContains("Failed to load an FSImage file!", ioe);
                    int md5failures = appender.CountExceptionsWithMessage(" is corrupt with MD5 checksum of "
                                                                          );
                    // Two namedirs, so should have seen two failures
                    NUnit.Framework.Assert.AreEqual(2, md5failures);
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Esempio n. 3
0
 public _Supplier_351(LogVerificationAppender appender)
 {
     this.appender = appender;
 }
Esempio n. 4
0
        /// <exception cref="System.Exception"/>
        public virtual void TestXattrConfiguration()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = null;

            try
            {
                conf.SetInt(DFSConfigKeys.DfsNamenodeMaxXattrSizeKey, -1);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(true).Build();
                NUnit.Framework.Assert.Fail("Expected exception with negative xattr size");
            }
            catch (ArgumentException e)
            {
                GenericTestUtils.AssertExceptionContains("Cannot set a negative value for the maximum size of an xattr"
                                                         , e);
            }
            finally
            {
                conf.SetInt(DFSConfigKeys.DfsNamenodeMaxXattrSizeKey, DFSConfigKeys.DfsNamenodeMaxXattrSizeDefault
                            );
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            try
            {
                conf.SetInt(DFSConfigKeys.DfsNamenodeMaxXattrsPerInodeKey, -1);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(true).Build();
                NUnit.Framework.Assert.Fail("Expected exception with negative # xattrs per inode"
                                            );
            }
            catch (ArgumentException e)
            {
                GenericTestUtils.AssertExceptionContains("Cannot set a negative limit on the number of xattrs per inode"
                                                         , e);
            }
            finally
            {
                conf.SetInt(DFSConfigKeys.DfsNamenodeMaxXattrsPerInodeKey, DFSConfigKeys.DfsNamenodeMaxXattrsPerInodeDefault
                            );
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            try
            {
                // Set up a logger to check log message
                LogVerificationAppender appender = new LogVerificationAppender();
                Logger logger = Logger.GetRootLogger();
                logger.AddAppender(appender);
                int count = appender.CountLinesWithMessage("Maximum size of an xattr: 0 (unlimited)"
                                                           );
                NUnit.Framework.Assert.AreEqual("Expected no messages about unlimited xattr size"
                                                , 0, count);
                conf.SetInt(DFSConfigKeys.DfsNamenodeMaxXattrSizeKey, 0);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(true).Build();
                count   = appender.CountLinesWithMessage("Maximum size of an xattr: 0 (unlimited)");
                // happens twice because we format then run
                NUnit.Framework.Assert.AreEqual("Expected unlimited xattr size", 2, count);
            }
            finally
            {
                conf.SetInt(DFSConfigKeys.DfsNamenodeMaxXattrSizeKey, DFSConfigKeys.DfsNamenodeMaxXattrSizeDefault
                            );
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }