Exemplo n.º 1
0
        /// <exception cref="System.Exception"/>
        public virtual void TestTwoReplicaSameStorageTypeShouldNotSelect()
        {
            // HDFS-8147
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).StorageTypes
                                         (new StorageType[][] { new StorageType[] { StorageType.Disk, StorageType.Archive }, new StorageType[] { StorageType.Disk, StorageType.Disk }, new StorageType[]
                                                                { StorageType.Disk, StorageType.Archive } }).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                string file = "/testForTwoReplicaSameStorageTypeShouldNotSelect";
                // write to DISK
                FSDataOutputStream @out = dfs.Create(new Path(file), (short)2);
                @out.WriteChars("testForTwoReplicaSameStorageTypeShouldNotSelect");
                @out.Close();
                // verify before movement
                LocatedBlock  lb           = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                StorageType[] storageTypes = lb.GetStorageTypes();
                foreach (StorageType storageType in storageTypes)
                {
                    NUnit.Framework.Assert.IsTrue(StorageType.Disk == storageType);
                }
                // move to ARCHIVE
                dfs.SetStoragePolicy(new Path(file), "COLD");
                int rc = ToolRunner.Run(conf, new Mover.Cli(), new string[] { "-p", file.ToString
                                                                                  () });
                NUnit.Framework.Assert.AreEqual("Movement to ARCHIVE should be successfull", 0, rc
                                                );
                // Wait till namenode notified
                Sharpen.Thread.Sleep(3000);
                lb           = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                storageTypes = lb.GetStorageTypes();
                int archiveCount = 0;
                foreach (StorageType storageType_1 in storageTypes)
                {
                    if (StorageType.Archive == storageType_1)
                    {
                        archiveCount++;
                    }
                }
                NUnit.Framework.Assert.AreEqual(archiveCount, 2);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemplo n.º 2
0
        public virtual void TestScheduleBlockWithinSameNode()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).StorageTypes
                                         (new StorageType[] { StorageType.Disk, StorageType.Archive }).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                string file = "/testScheduleWithinSameNode/file";
                Path   dir  = new Path("/testScheduleWithinSameNode");
                dfs.Mkdirs(dir);
                // write to DISK
                dfs.SetStoragePolicy(dir, "HOT");
                {
                    FSDataOutputStream @out = dfs.Create(new Path(file));
                    @out.WriteChars("testScheduleWithinSameNode");
                    @out.Close();
                }
                //verify before movement
                LocatedBlock  lb           = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                StorageType[] storageTypes = lb.GetStorageTypes();
                foreach (StorageType storageType in storageTypes)
                {
                    NUnit.Framework.Assert.IsTrue(StorageType.Disk == storageType);
                }
                // move to ARCHIVE
                dfs.SetStoragePolicy(dir, "COLD");
                int rc = ToolRunner.Run(conf, new Mover.Cli(), new string[] { "-p", dir.ToString(
                                                                                  ) });
                NUnit.Framework.Assert.AreEqual("Movement to ARCHIVE should be successfull", 0, rc
                                                );
                // Wait till namenode notified
                Sharpen.Thread.Sleep(3000);
                lb           = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                storageTypes = lb.GetStorageTypes();
                foreach (StorageType storageType_1 in storageTypes)
                {
                    NUnit.Framework.Assert.IsTrue(StorageType.Archive == storageType_1);
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemplo n.º 3
0
        public virtual void TestBlockMoveAcrossStorageInSameNode()
        {
            Configuration conf = new HdfsConfiguration();
            // create only one datanode in the cluster to verify movement within
            // datanode.
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).StorageTypes
                                         (new StorageType[] { StorageType.Disk, StorageType.Archive }).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                Path file = new Path("/testBlockMoveAcrossStorageInSameNode/file");
                DFSTestUtil.CreateFile(dfs, file, 1024, (short)1, 1024);
                LocatedBlocks locatedBlocks = dfs.GetClient().GetLocatedBlocks(file.ToString(), 0
                                                                               );
                // get the current
                LocatedBlock   locatedBlock = locatedBlocks.Get(0);
                ExtendedBlock  block        = locatedBlock.GetBlock();
                DatanodeInfo[] locations    = locatedBlock.GetLocations();
                NUnit.Framework.Assert.AreEqual(1, locations.Length);
                StorageType[] storageTypes = locatedBlock.GetStorageTypes();
                // current block should be written to DISK
                NUnit.Framework.Assert.IsTrue(storageTypes[0] == StorageType.Disk);
                DatanodeInfo source = locations[0];
                // move block to ARCHIVE by using same DataNodeInfo for source, proxy and
                // destination so that movement happens within datanode
                NUnit.Framework.Assert.IsTrue(ReplaceBlock(block, source, source, source, StorageType
                                                           .Archive));
                // wait till namenode notified
                Sharpen.Thread.Sleep(3000);
                locatedBlocks = dfs.GetClient().GetLocatedBlocks(file.ToString(), 0);
                // get the current
                locatedBlock = locatedBlocks.Get(0);
                NUnit.Framework.Assert.AreEqual("Storage should be only one", 1, locatedBlock.GetLocations
                                                    ().Length);
                NUnit.Framework.Assert.IsTrue("Block should be moved to ARCHIVE", locatedBlock.GetStorageTypes
                                                  ()[0] == StorageType.Archive);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemplo n.º 4
0
            internal static IList <Mover.MLocation> ToLocations(LocatedBlock lb)
            {
                DatanodeInfo[]          datanodeInfos = lb.GetLocations();
                StorageType[]           storageTypes  = lb.GetStorageTypes();
                long                    size          = lb.GetBlockSize();
                IList <Mover.MLocation> locations     = new List <Mover.MLocation>();

                for (int i = 0; i < datanodeInfos.Length; i++)
                {
                    locations.AddItem(new Mover.MLocation(datanodeInfos[i], storageTypes[i], size));
                }
                return(locations);
            }
Exemplo n.º 5
0
            /// <returns>true if it is necessary to run another round of migration</returns>
            private bool ProcessFile(string fullPath, HdfsLocatedFileStatus status)
            {
                byte policyId = status.GetStoragePolicy();

                // currently we ignore files with unspecified storage policy
                if (policyId == BlockStoragePolicySuite.IdUnspecified)
                {
                    return(false);
                }
                BlockStoragePolicy policy = this._enclosing.blockStoragePolicies[policyId];

                if (policy == null)
                {
                    Org.Apache.Hadoop.Hdfs.Server.Mover.Mover.Log.Warn("Failed to get the storage policy of file "
                                                                       + fullPath);
                    return(false);
                }
                IList <StorageType> types         = policy.ChooseStorageTypes(status.GetReplication());
                LocatedBlocks       locatedBlocks = status.GetBlockLocations();
                bool hasRemaining        = false;
                bool lastBlkComplete     = locatedBlocks.IsLastBlockComplete();
                IList <LocatedBlock> lbs = locatedBlocks.GetLocatedBlocks();

                for (int i = 0; i < lbs.Count; i++)
                {
                    if (i == lbs.Count - 1 && !lastBlkComplete)
                    {
                        // last block is incomplete, skip it
                        continue;
                    }
                    LocatedBlock          lb   = lbs[i];
                    Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types, lb.GetStorageTypes(
                                                                               ));
                    if (!diff.RemoveOverlap(true))
                    {
                        if (this.ScheduleMoves4Block(diff, lb))
                        {
                            hasRemaining |= (diff.existing.Count > 1 && diff.expected.Count > 1);
                        }
                    }
                }
                return(hasRemaining);
            }
Exemplo n.º 6
0
            /// <exception cref="System.IO.IOException"/>
            private TestStorageMover.Replication GetOrVerifyReplication(Path file, TestStorageMover.Replication
                                                                        expected)
            {
                IList <LocatedBlock> lbs = this.dfs.GetClient().GetLocatedBlocks(file.ToString(),
                                                                                 0).GetLocatedBlocks();

                NUnit.Framework.Assert.AreEqual(1, lbs.Count);
                LocatedBlock  lb    = lbs[0];
                StringBuilder types = new StringBuilder();

                TestStorageMover.Replication r = new TestStorageMover.Replication();
                foreach (StorageType t in lb.GetStorageTypes())
                {
                    types.Append(t).Append(", ");
                    if (t == StorageType.Disk)
                    {
                        r.disk++;
                    }
                    else
                    {
                        if (t == StorageType.Archive)
                        {
                            r.archive++;
                        }
                        else
                        {
                            NUnit.Framework.Assert.Fail("Unexpected storage type " + t);
                        }
                    }
                }
                if (expected != null)
                {
                    string s = "file = " + file + "\n  types = [" + types + "]";
                    NUnit.Framework.Assert.AreEqual(s, expected, r);
                }
                return(r);
            }
Exemplo n.º 7
0
        public virtual void TestSortLocatedBlocks()
        {
            // create the DatanodeManager which will be tested
            FSNamesystem fsn = Org.Mockito.Mockito.Mock <FSNamesystem>();

            Org.Mockito.Mockito.When(fsn.HasWriteLock()).ThenReturn(true);
            DatanodeManager dm = new DatanodeManager(Org.Mockito.Mockito.Mock <BlockManager>()
                                                     , fsn, new Configuration());

            // register 5 datanodes, each with different storage ID and type
            DatanodeInfo[] locs         = new DatanodeInfo[5];
            string[]       storageIDs   = new string[5];
            StorageType[]  storageTypes = new StorageType[] { StorageType.Archive, StorageType
                                                              .Default, StorageType.Disk, StorageType.RamDisk, StorageType.Ssd };
            for (int i = 0; i < 5; i++)
            {
                // register new datanode
                string uuid             = "UUID-" + i;
                string ip               = "IP-" + i;
                DatanodeRegistration dr = Org.Mockito.Mockito.Mock <DatanodeRegistration>();
                Org.Mockito.Mockito.When(dr.GetDatanodeUuid()).ThenReturn(uuid);
                Org.Mockito.Mockito.When(dr.GetIpAddr()).ThenReturn(ip);
                Org.Mockito.Mockito.When(dr.GetXferAddr()).ThenReturn(ip + ":9000");
                Org.Mockito.Mockito.When(dr.GetXferPort()).ThenReturn(9000);
                Org.Mockito.Mockito.When(dr.GetSoftwareVersion()).ThenReturn("version1");
                dm.RegisterDatanode(dr);
                // get location and storage information
                locs[i]       = dm.GetDatanode(uuid);
                storageIDs[i] = "storageID-" + i;
            }
            // set first 2 locations as decomissioned
            locs[0].SetDecommissioned();
            locs[1].SetDecommissioned();
            // create LocatedBlock with above locations
            ExtendedBlock        b      = new ExtendedBlock("somePoolID", 1234);
            LocatedBlock         block  = new LocatedBlock(b, locs, storageIDs, storageTypes);
            IList <LocatedBlock> blocks = new AList <LocatedBlock>();

            blocks.AddItem(block);
            string targetIp = locs[4].GetIpAddr();

            // sort block locations
            dm.SortLocatedBlocks(targetIp, blocks);
            // check that storage IDs/types are aligned with datanode locs
            DatanodeInfo[] sortedLocs = block.GetLocations();
            storageIDs   = block.GetStorageIDs();
            storageTypes = block.GetStorageTypes();
            Assert.AssertThat(sortedLocs.Length, IS.Is(5));
            Assert.AssertThat(storageIDs.Length, IS.Is(5));
            Assert.AssertThat(storageTypes.Length, IS.Is(5));
            for (int i_1 = 0; i_1 < sortedLocs.Length; i_1++)
            {
                Assert.AssertThat(((DatanodeInfoWithStorage)sortedLocs[i_1]).GetStorageID(), IS.Is
                                      (storageIDs[i_1]));
                Assert.AssertThat(((DatanodeInfoWithStorage)sortedLocs[i_1]).GetStorageType(), IS.Is
                                      (storageTypes[i_1]));
            }
            // Ensure the local node is first.
            Assert.AssertThat(sortedLocs[0].GetIpAddr(), IS.Is(targetIp));
            // Ensure the two decommissioned DNs were moved to the end.
            Assert.AssertThat(sortedLocs[sortedLocs.Length - 1].GetAdminState(), IS.Is(DatanodeInfo.AdminStates
                                                                                       .Decommissioned));
            Assert.AssertThat(sortedLocs[sortedLocs.Length - 2].GetAdminState(), IS.Is(DatanodeInfo.AdminStates
                                                                                       .Decommissioned));
        }