/// <exception cref="System.IO.IOException"/> /// <exception cref="System.Exception"/> internal static int Run(IDictionary <URI, IList <Path> > namenodes, Configuration conf ) { long sleeptime = conf.GetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, DFSConfigKeys .DfsHeartbeatIntervalDefault) * 2000 + conf.GetLong(DFSConfigKeys.DfsNamenodeReplicationIntervalKey , DFSConfigKeys.DfsNamenodeReplicationIntervalDefault) * 1000; AtomicInteger retryCount = new AtomicInteger(0); Log.Info("namenodes = " + namenodes); IList <NameNodeConnector> connectors = Sharpen.Collections.EmptyList(); try { connectors = NameNodeConnector.NewNameNodeConnectors(namenodes, typeof(Org.Apache.Hadoop.Hdfs.Server.Mover.Mover ).Name, MoverIdPath, conf, NameNodeConnector.DefaultMaxIdleIterations); while (connectors.Count > 0) { Sharpen.Collections.Shuffle(connectors); IEnumerator <NameNodeConnector> iter = connectors.GetEnumerator(); while (iter.HasNext()) { NameNodeConnector nnc = iter.Next(); Org.Apache.Hadoop.Hdfs.Server.Mover.Mover m = new Org.Apache.Hadoop.Hdfs.Server.Mover.Mover (nnc, conf, retryCount); ExitStatus r = m.Run(); if (r == ExitStatus.Success) { IOUtils.Cleanup(Log, nnc); iter.Remove(); } else { if (r != ExitStatus.InProgress) { // must be an error statue, return return(r.GetExitCode()); } } } Sharpen.Thread.Sleep(sleeptime); } return(ExitStatus.Success.GetExitCode()); } finally { foreach (NameNodeConnector nnc in connectors) { IOUtils.Cleanup(Log, nnc); } } }
/// <exception cref="System.IO.IOException"/> internal static Org.Apache.Hadoop.Hdfs.Server.Mover.Mover NewMover(Configuration conf) { ICollection <URI> namenodes = DFSUtil.GetNsServiceRpcUris(conf); NUnit.Framework.Assert.AreEqual(1, namenodes.Count); IDictionary <URI, IList <Path> > nnMap = Maps.NewHashMap(); foreach (URI nn in namenodes) { nnMap[nn] = null; } IList <NameNodeConnector> nncs = NameNodeConnector.NewNameNodeConnectors(nnMap, typeof( Org.Apache.Hadoop.Hdfs.Server.Mover.Mover).Name, Org.Apache.Hadoop.Hdfs.Server.Mover.Mover .MoverIdPath, conf, NameNodeConnector.DefaultMaxIdleIterations); return(new Org.Apache.Hadoop.Hdfs.Server.Mover.Mover(nncs[0], conf, new AtomicInteger (0))); }
internal Mover(NameNodeConnector nnc, Configuration conf, AtomicInteger retryCount ) { long movedWinWidth = conf.GetLong(DFSConfigKeys.DfsMoverMovedwinwidthKey, DFSConfigKeys .DfsMoverMovedwinwidthDefault); int moverThreads = conf.GetInt(DFSConfigKeys.DfsMoverMoverthreadsKey, DFSConfigKeys .DfsMoverMoverthreadsDefault); int maxConcurrentMovesPerNode = conf.GetInt(DFSConfigKeys.DfsDatanodeBalanceMaxNumConcurrentMovesKey , DFSConfigKeys.DfsDatanodeBalanceMaxNumConcurrentMovesDefault); this.retryMaxAttempts = conf.GetInt(DFSConfigKeys.DfsMoverRetryMaxAttemptsKey, DFSConfigKeys .DfsMoverRetryMaxAttemptsDefault); this.retryCount = retryCount; this.dispatcher = new Dispatcher(nnc, Sharpen.Collections.EmptySet <string>(), Sharpen.Collections .EmptySet <string>(), movedWinWidth, moverThreads, 0, maxConcurrentMovesPerNode, conf); this.storages = new Mover.StorageMap(); this.targetPaths = nnc.GetTargetPaths(); this.blockStoragePolicies = new BlockStoragePolicy[1 << BlockStoragePolicySuite.IdBitLength ]; }