Ejemplo n.º 1
0
        public virtual void TestMoverFailedRetry()
        {
            // HDFS-8147
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsMoverRetryMaxAttemptsKey, "2");
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).StorageTypes
                                         (new StorageType[][] { new StorageType[] { StorageType.Disk, StorageType.Archive }, new StorageType[] { StorageType.Disk, StorageType.Archive }, new StorageType
                                                                [] { StorageType.Disk, StorageType.Archive } }).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                string file = "/testMoverFailedRetry";
                // write to DISK
                FSDataOutputStream @out = dfs.Create(new Path(file), (short)2);
                @out.WriteChars("testMoverFailedRetry");
                @out.Close();
                // Delete block file so, block move will fail with FileNotFoundException
                LocatedBlock lb = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                cluster.CorruptBlockOnDataNodesByDeletingBlockFile(lb.GetBlock());
                // move to ARCHIVE
                dfs.SetStoragePolicy(new Path(file), "COLD");
                int rc = ToolRunner.Run(conf, new Mover.Cli(), new string[] { "-p", file.ToString
                                                                                  () });
                NUnit.Framework.Assert.AreEqual("Movement should fail after some retry", ExitStatus
                                                .IoException.GetExitCode(), rc);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Ejemplo n.º 2
0
        public virtual void TestScheduleSameBlock()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(4).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                string file = "/testScheduleSameBlock/file";
                {
                    FSDataOutputStream @out = dfs.Create(new Path(file));
                    @out.WriteChars("testScheduleSameBlock");
                    @out.Close();
                }
                Org.Apache.Hadoop.Hdfs.Server.Mover.Mover mover = NewMover(conf);
                mover.Init();
                Mover.Processor         processor    = new Mover.Processor(this);
                LocatedBlock            lb           = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                IList <Mover.MLocation> locations    = Mover.MLocation.ToLocations(lb);
                Mover.MLocation         ml           = locations[0];
                Dispatcher.DBlock       db           = mover.NewDBlock(lb.GetBlock().GetLocalBlock(), locations);
                IList <StorageType>     storageTypes = new AList <StorageType>(Arrays.AsList(StorageType
                                                                                             .Default, StorageType.Default));
                NUnit.Framework.Assert.IsTrue(processor.ScheduleMoveReplica(db, ml, storageTypes)
                                              );
                NUnit.Framework.Assert.IsFalse(processor.ScheduleMoveReplica(db, ml, storageTypes
                                                                             ));
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Ejemplo n.º 3
0
        /// <exception cref="System.Exception"/>
        public virtual void TestTwoReplicaSameStorageTypeShouldNotSelect()
        {
            // HDFS-8147
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).StorageTypes
                                         (new StorageType[][] { new StorageType[] { StorageType.Disk, StorageType.Archive }, new StorageType[] { StorageType.Disk, StorageType.Disk }, new StorageType[]
                                                                { StorageType.Disk, StorageType.Archive } }).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                string file = "/testForTwoReplicaSameStorageTypeShouldNotSelect";
                // write to DISK
                FSDataOutputStream @out = dfs.Create(new Path(file), (short)2);
                @out.WriteChars("testForTwoReplicaSameStorageTypeShouldNotSelect");
                @out.Close();
                // verify before movement
                LocatedBlock  lb           = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                StorageType[] storageTypes = lb.GetStorageTypes();
                foreach (StorageType storageType in storageTypes)
                {
                    NUnit.Framework.Assert.IsTrue(StorageType.Disk == storageType);
                }
                // move to ARCHIVE
                dfs.SetStoragePolicy(new Path(file), "COLD");
                int rc = ToolRunner.Run(conf, new Mover.Cli(), new string[] { "-p", file.ToString
                                                                                  () });
                NUnit.Framework.Assert.AreEqual("Movement to ARCHIVE should be successfull", 0, rc
                                                );
                // Wait till namenode notified
                Sharpen.Thread.Sleep(3000);
                lb           = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                storageTypes = lb.GetStorageTypes();
                int archiveCount = 0;
                foreach (StorageType storageType_1 in storageTypes)
                {
                    if (StorageType.Archive == storageType_1)
                    {
                        archiveCount++;
                    }
                }
                NUnit.Framework.Assert.AreEqual(archiveCount, 2);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Ejemplo n.º 4
0
        public virtual void TestScheduleBlockWithinSameNode()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).StorageTypes
                                         (new StorageType[] { StorageType.Disk, StorageType.Archive }).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                string file = "/testScheduleWithinSameNode/file";
                Path   dir  = new Path("/testScheduleWithinSameNode");
                dfs.Mkdirs(dir);
                // write to DISK
                dfs.SetStoragePolicy(dir, "HOT");
                {
                    FSDataOutputStream @out = dfs.Create(new Path(file));
                    @out.WriteChars("testScheduleWithinSameNode");
                    @out.Close();
                }
                //verify before movement
                LocatedBlock  lb           = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                StorageType[] storageTypes = lb.GetStorageTypes();
                foreach (StorageType storageType in storageTypes)
                {
                    NUnit.Framework.Assert.IsTrue(StorageType.Disk == storageType);
                }
                // move to ARCHIVE
                dfs.SetStoragePolicy(dir, "COLD");
                int rc = ToolRunner.Run(conf, new Mover.Cli(), new string[] { "-p", dir.ToString(
                                                                                  ) });
                NUnit.Framework.Assert.AreEqual("Movement to ARCHIVE should be successfull", 0, rc
                                                );
                // Wait till namenode notified
                Sharpen.Thread.Sleep(3000);
                lb           = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                storageTypes = lb.GetStorageTypes();
                foreach (StorageType storageType_1 in storageTypes)
                {
                    NUnit.Framework.Assert.IsTrue(StorageType.Archive == storageType_1);
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }