コード例 #1
0
        public virtual void TestBlocksCounter()
        {
            DatanodeDescriptor dd = BlockManagerTestUtil.GetLocalDatanodeDescriptor(true);

            NUnit.Framework.Assert.AreEqual(0, dd.NumBlocks());
            BlockInfoContiguous blk  = new BlockInfoContiguous(new Block(1L), (short)1);
            BlockInfoContiguous blk1 = new BlockInfoContiguous(new Block(2L), (short)2);

            DatanodeStorageInfo[] storages = dd.GetStorageInfos();
            NUnit.Framework.Assert.IsTrue(storages.Length > 0);
            // add first block
            NUnit.Framework.Assert.IsTrue(storages[0].AddBlock(blk) == DatanodeStorageInfo.AddBlockResult
                                          .Added);
            NUnit.Framework.Assert.AreEqual(1, dd.NumBlocks());
            // remove a non-existent block
            NUnit.Framework.Assert.IsFalse(dd.RemoveBlock(blk1));
            NUnit.Framework.Assert.AreEqual(1, dd.NumBlocks());
            // add an existent block
            NUnit.Framework.Assert.IsFalse(storages[0].AddBlock(blk) == DatanodeStorageInfo.AddBlockResult
                                           .Added);
            NUnit.Framework.Assert.AreEqual(1, dd.NumBlocks());
            // add second block
            NUnit.Framework.Assert.IsTrue(storages[0].AddBlock(blk1) == DatanodeStorageInfo.AddBlockResult
                                          .Added);
            NUnit.Framework.Assert.AreEqual(2, dd.NumBlocks());
            // remove first block
            NUnit.Framework.Assert.IsTrue(dd.RemoveBlock(blk));
            NUnit.Framework.Assert.AreEqual(1, dd.NumBlocks());
            // remove second block
            NUnit.Framework.Assert.IsTrue(dd.RemoveBlock(blk1));
            NUnit.Framework.Assert.AreEqual(0, dd.NumBlocks());
        }
コード例 #2
0
        public virtual void TestQueues()
        {
            DatanodeDescriptor  fakeDN      = DFSTestUtil.GetLocalDatanodeDescriptor();
            DatanodeStorage     storage     = new DatanodeStorage("STORAGE_ID");
            DatanodeStorageInfo storageInfo = new DatanodeStorageInfo(fakeDN, storage);

            msgs.EnqueueReportedBlock(storageInfo, block1Gs1, HdfsServerConstants.ReplicaState
                                      .Finalized);
            msgs.EnqueueReportedBlock(storageInfo, block1Gs2, HdfsServerConstants.ReplicaState
                                      .Finalized);
            NUnit.Framework.Assert.AreEqual(2, msgs.Count());
            // Nothing queued yet for block 2
            NUnit.Framework.Assert.IsNull(msgs.TakeBlockQueue(block2Gs1));
            NUnit.Framework.Assert.AreEqual(2, msgs.Count());
            Queue <PendingDataNodeMessages.ReportedBlockInfo> q = msgs.TakeBlockQueue(block1Gs2DifferentInstance
                                                                                      );

            NUnit.Framework.Assert.AreEqual("ReportedBlockInfo [block=blk_1_1, dn=127.0.0.1:50010, reportedState=FINALIZED],"
                                            + "ReportedBlockInfo [block=blk_1_2, dn=127.0.0.1:50010, reportedState=FINALIZED]"
                                            , Joiner.On(",").Join(q));
            NUnit.Framework.Assert.AreEqual(0, msgs.Count());
            // Should be null if we pull again
            NUnit.Framework.Assert.IsNull(msgs.TakeBlockQueue(block1Gs1));
            NUnit.Framework.Assert.AreEqual(0, msgs.Count());
        }
コード例 #3
0
ファイル: Host2NodesMap.cs プロジェクト: orf53975/hadoop.net
        /// <summary>Check if node is already in the map.</summary>
        internal virtual bool Contains(DatanodeDescriptor node)
        {
            if (node == null)
            {
                return(false);
            }
            string ipAddr = node.GetIpAddr();

            hostmapLock.ReadLock().Lock();
            try
            {
                DatanodeDescriptor[] nodes = map[ipAddr];
                if (nodes != null)
                {
                    foreach (DatanodeDescriptor containedNode in nodes)
                    {
                        if (node == containedNode)
                        {
                            return(true);
                        }
                    }
                }
            }
            finally
            {
                hostmapLock.ReadLock().Unlock();
            }
            return(false);
        }
コード例 #4
0
        /// <summary>
        /// Ensure that the given NameNode marks the specified DataNode as
        /// entirely dead/expired.
        /// </summary>
        /// <param name="nn">the NameNode to manipulate</param>
        /// <param name="dnName">the name of the DataNode</param>
        public static void NoticeDeadDatanode(NameNode nn, string dnName)
        {
            FSNamesystem namesystem = nn.GetNamesystem();

            namesystem.WriteLock();
            try
            {
                DatanodeManager      dnm    = namesystem.GetBlockManager().GetDatanodeManager();
                HeartbeatManager     hbm    = dnm.GetHeartbeatManager();
                DatanodeDescriptor[] dnds   = hbm.GetDatanodes();
                DatanodeDescriptor   theDND = null;
                foreach (DatanodeDescriptor dnd in dnds)
                {
                    if (dnd.GetXferAddr().Equals(dnName))
                    {
                        theDND = dnd;
                    }
                }
                NUnit.Framework.Assert.IsNotNull("Could not find DN with name: " + dnName, theDND
                                                 );
                lock (hbm)
                {
                    DFSTestUtil.SetDatanodeDead(theDND);
                    hbm.HeartbeatCheck();
                }
            }
            finally
            {
                namesystem.WriteUnlock();
            }
        }
コード例 #5
0
        /// <summary>
        /// Choose a single datanode from the provided list of possible
        /// targets, weighted by the percentage of free space remaining on the node.
        /// </summary>
        /// <returns>The chosen datanode</returns>
        private static DatanodeDescriptor ChooseRandomDatanodeByRemainingCapacity(IList <DatanodeDescriptor
                                                                                         > targets)
        {
            // Use a weighted probability to choose the target datanode
            float total = 0;

            foreach (DatanodeDescriptor d in targets)
            {
                total += d.GetCacheRemainingPercent();
            }
            // Give each datanode a portion of keyspace equal to its relative weight
            // [0, w1) selects d1, [w1, w2) selects d2, etc.
            SortedDictionary <int, DatanodeDescriptor> lottery = new SortedDictionary <int, DatanodeDescriptor
                                                                                       >();
            int offset = 0;

            foreach (DatanodeDescriptor d_1 in targets)
            {
                // Since we're using floats, be paranoid about negative values
                int weight = Math.Max(1, (int)((d_1.GetCacheRemainingPercent() / total) * 1000000
                                               ));
                offset         += weight;
                lottery[offset] = d_1;
            }
            // Choose a number from [0, offset), which is the total amount of weight,
            // to select the winner
            DatanodeDescriptor winner = lottery.HigherEntry(random.Next(offset)).Value;

            return(winner);
        }
コード例 #6
0
        public virtual void TestChooseReplicaToDelete()
        {
            MiniDFSCluster cluster = null;
            FileSystem     fs      = null;

            try
            {
                Configuration conf = new HdfsConfiguration();
                conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, SmallBlockSize);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
                fs      = cluster.GetFileSystem();
                FSNamesystem namesystem = cluster.GetNamesystem();
                conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 300);
                cluster.StartDataNodes(conf, 1, true, null, null, null);
                DataNode             lastDN = cluster.GetDataNodes()[3];
                DatanodeRegistration dnReg  = DataNodeTestUtils.GetDNRegistrationForBP(lastDN, namesystem
                                                                                       .GetBlockPoolId());
                string lastDNid = dnReg.GetDatanodeUuid();
                Path   fileName = new Path("/foo2");
                DFSTestUtil.CreateFile(fs, fileName, SmallFileLength, (short)4, 0L);
                DFSTestUtil.WaitReplication(fs, fileName, (short)4);
                // Wait for tolerable number of heartbeats plus one
                DatanodeDescriptor nodeInfo = null;
                long lastHeartbeat          = 0;
                long waitTime = DFSConfigKeys.DfsHeartbeatIntervalDefault * 1000 * (DFSConfigKeys
                                                                                    .DfsNamenodeTolerateHeartbeatMultiplierDefault + 1);
                do
                {
                    nodeInfo      = namesystem.GetBlockManager().GetDatanodeManager().GetDatanode(dnReg);
                    lastHeartbeat = nodeInfo.GetLastUpdateMonotonic();
                }while (Time.MonotonicNow() - lastHeartbeat < waitTime);
                fs.SetReplication(fileName, (short)3);
                BlockLocation[] locs = fs.GetFileBlockLocations(fs.GetFileStatus(fileName), 0, long.MaxValue
                                                                );
                // All replicas for deletion should be scheduled on lastDN.
                // And should not actually be deleted, because lastDN does not heartbeat.
                namesystem.ReadLock();
                ICollection <Block> dnBlocks = namesystem.GetBlockManager().excessReplicateMap[lastDNid
                                               ];
                NUnit.Framework.Assert.AreEqual("Replicas on node " + lastDNid + " should have been deleted"
                                                , SmallFileLength / SmallBlockSize, dnBlocks.Count);
                namesystem.ReadUnlock();
                foreach (BlockLocation location in locs)
                {
                    NUnit.Framework.Assert.AreEqual("Block should still have 4 replicas", 4, location
                                                    .GetNames().Length);
                }
            }
            finally
            {
                if (fs != null)
                {
                    fs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
コード例 #7
0
 public virtual void TestChooseMoreTargetsThanNodeGroups()
 {
     for (int i = 0; i < NumOfDatanodes; i++)
     {
         cluster.Remove(dataNodes[i]);
     }
     for (int i_1 = 0; i_1 < NumOfDatanodesBoundary; i_1++)
     {
         DatanodeDescriptor node = dataNodesInBoundaryCase[i_1];
         if (cluster.Contains(node))
         {
             cluster.Remove(node);
         }
     }
     for (int i_2 = 0; i_2 < NumOfDatanodesMoreTargets; i_2++)
     {
         cluster.Add(dataNodesInMoreTargetsCase[i_2]);
     }
     for (int i_3 = 0; i_3 < NumOfDatanodesMoreTargets; i_3++)
     {
         UpdateHeartbeatWithUsage(dataNodesInMoreTargetsCase[i_3], 2 * HdfsConstants.MinBlocksForWrite
                                  * BlockSize, 0L, 2 * HdfsConstants.MinBlocksForWrite * BlockSize, 0L, 0L, 0L, 0
                                  , 0);
     }
     DatanodeStorageInfo[] targets;
     // Test normal case -- 3 replicas
     targets = ChooseTarget(3, dataNodesInMoreTargetsCase[0]);
     NUnit.Framework.Assert.AreEqual(targets.Length, 3);
     NUnit.Framework.Assert.IsTrue(CheckTargetsOnDifferentNodeGroup(targets));
     // Test special case -- replica number over node groups.
     targets = ChooseTarget(10, dataNodesInMoreTargetsCase[0]);
     NUnit.Framework.Assert.IsTrue(CheckTargetsOnDifferentNodeGroup(targets));
     // Verify it only can find 6 targets for placing replicas.
     NUnit.Framework.Assert.AreEqual(targets.Length, 6);
 }
コード例 #8
0
        public virtual void TestMultipleLists()
        {
            DatanodeDescriptor[] datanodes = new DatanodeDescriptor[] { new DatanodeDescriptor
                                                                            (new DatanodeID("127.0.0.1", "localhost", "abcd", 5000, 5001, 5002, 5003)), new
                                                                        DatanodeDescriptor(new DatanodeID("127.0.1.1", "localhost", "efgh", 6000, 6001,
                                                                                                          6002, 6003)) };
            DatanodeDescriptor.CachedBlocksList[] lists = new DatanodeDescriptor.CachedBlocksList
                                                          [] { datanodes[0].GetPendingCached(), datanodes[0].GetCached(), datanodes[1].GetPendingCached
                                                                   (), datanodes[1].GetCached(), datanodes[1].GetPendingUncached() };
            int NumBlocks = 8000;

            CachedBlock[] blocks = new CachedBlock[NumBlocks];
            for (int i = 0; i < NumBlocks; i++)
            {
                blocks[i] = new CachedBlock(i, (short)i, true);
            }
            Random r = new Random(654);

            foreach (DatanodeDescriptor.CachedBlocksList list in lists)
            {
                TestAddElementsToList(list, blocks);
            }
            foreach (DatanodeDescriptor.CachedBlocksList list_1 in lists)
            {
                TestRemoveElementsFromList(r, list_1, blocks);
            }
        }
コード例 #9
0
        internal virtual bool RemoveFromCorruptReplicasMap(Block blk, DatanodeDescriptor
                                                           datanode, CorruptReplicasMap.Reason reason)
        {
            IDictionary <DatanodeDescriptor, CorruptReplicasMap.Reason> datanodes = corruptReplicasMap
                                                                                    [blk];

            if (datanodes == null)
            {
                return(false);
            }
            // if reasons can be compared but don't match, return false.
            CorruptReplicasMap.Reason storedReason = datanodes[datanode];
            if (reason != CorruptReplicasMap.Reason.Any && storedReason != null && reason !=
                storedReason)
            {
                return(false);
            }
            if (Sharpen.Collections.Remove(datanodes, datanode) != null)
            {
                // remove the replicas
                if (datanodes.IsEmpty())
                {
                    // remove the block if there is no more corrupted replicas
                    Sharpen.Collections.Remove(corruptReplicasMap, blk);
                }
                return(true);
            }
            return(false);
        }
コード例 #10
0
 internal virtual IList <Block> InvalidateWork(DatanodeDescriptor dn)
 {
     lock (this)
     {
         long delay = GetInvalidationDelay();
         if (delay > 0)
         {
             if (BlockManager.Log.IsDebugEnabled())
             {
                 BlockManager.Log.Debug("Block deletion is delayed during NameNode startup. " + "The deletion will start after "
                                        + delay + " ms.");
             }
             return(null);
         }
         LightWeightHashSet <Block> set = node2blocks[dn];
         if (set == null)
         {
             return(null);
         }
         // # blocks that can be sent in one message is limited
         int           limit        = blockInvalidateLimit;
         IList <Block> toInvalidate = set.PollN(limit);
         // If we send everything in this message, remove this node entry
         if (set.IsEmpty())
         {
             Remove(dn);
         }
         dn.AddBlocksToBeInvalidated(toInvalidate);
         numBlocks -= toInvalidate.Count;
         return(toInvalidate);
     }
 }
コード例 #11
0
        public virtual void TestRemove()
        {
            DatanodeDescriptor nodeNotInMap = DFSTestUtil.GetDatanodeDescriptor("3.3.3.3", "/d1/r4"
                                                                                );

            NUnit.Framework.Assert.IsFalse(map.Remove(nodeNotInMap));
            NUnit.Framework.Assert.IsTrue(map.Remove(dataNodes[0]));
            NUnit.Framework.Assert.IsTrue(map.GetDatanodeByHost("1.1.1.1.") == null);
            NUnit.Framework.Assert.IsTrue(map.GetDatanodeByHost("2.2.2.2") == dataNodes[1]);
            DatanodeDescriptor node = map.GetDatanodeByHost("3.3.3.3");

            NUnit.Framework.Assert.IsTrue(node == dataNodes[2] || node == dataNodes[3]);
            NUnit.Framework.Assert.IsNull(map.GetDatanodeByHost("4.4.4.4"));
            NUnit.Framework.Assert.IsTrue(map.Remove(dataNodes[2]));
            NUnit.Framework.Assert.IsNull(map.GetDatanodeByHost("1.1.1.1"));
            NUnit.Framework.Assert.AreEqual(map.GetDatanodeByHost("2.2.2.2"), dataNodes[1]);
            NUnit.Framework.Assert.AreEqual(map.GetDatanodeByHost("3.3.3.3"), dataNodes[3]);
            NUnit.Framework.Assert.IsTrue(map.Remove(dataNodes[3]));
            NUnit.Framework.Assert.IsNull(map.GetDatanodeByHost("1.1.1.1"));
            NUnit.Framework.Assert.AreEqual(map.GetDatanodeByHost("2.2.2.2"), dataNodes[1]);
            NUnit.Framework.Assert.IsNull(map.GetDatanodeByHost("3.3.3.3"));
            NUnit.Framework.Assert.IsFalse(map.Remove(null));
            NUnit.Framework.Assert.IsTrue(map.Remove(dataNodes[1]));
            NUnit.Framework.Assert.IsFalse(map.Remove(dataNodes[1]));
        }
コード例 #12
0
        /// <exception cref="System.Exception"/>
        public virtual void TestSetrepIncWithUnderReplicatedBlocks()
        {
            // 1 min timeout
            Configuration  conf = new HdfsConfiguration();
            short          ReplicationFactor = 2;
            string         FileName          = "/testFile";
            Path           FilePath          = new Path(FileName);
            MiniDFSCluster cluster           = new MiniDFSCluster.Builder(conf).NumDataNodes(ReplicationFactor
                                                                                             + 1).Build();

            try
            {
                // create a file with one block with a replication factor of 2
                FileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, FilePath, 1L, ReplicationFactor, 1L);
                DFSTestUtil.WaitReplication(fs, FilePath, ReplicationFactor);
                // remove one replica from the blocksMap so block becomes under-replicated
                // but the block does not get put into the under-replicated blocks queue
                BlockManager       bm = cluster.GetNamesystem().GetBlockManager();
                ExtendedBlock      b  = DFSTestUtil.GetFirstBlock(fs, FilePath);
                DatanodeDescriptor dn = bm.blocksMap.GetStorages(b.GetLocalBlock()).GetEnumerator
                                            ().Next().GetDatanodeDescriptor();
                bm.AddToInvalidates(b.GetLocalBlock(), dn);
                Sharpen.Thread.Sleep(5000);
                bm.blocksMap.RemoveNode(b.GetLocalBlock(), dn);
                // increment this file's replication factor
                FsShell shell = new FsShell(conf);
                NUnit.Framework.Assert.AreEqual(0, shell.Run(new string[] { "-setrep", "-w", Sharpen.Extensions.ToString
                                                                                (1 + ReplicationFactor), FileName }));
            }
            finally
            {
                cluster.Shutdown();
            }
        }
コード例 #13
0
        /// <summary>Add new entries to the PendingUncached list.</summary>
        /// <param name="neededUncached">The number of replicas that need to be uncached.</param>
        /// <param name="cachedBlock">The block which needs to be uncached.</param>
        /// <param name="cached">A list of DataNodes currently caching the block.</param>
        /// <param name="pendingUncached">
        /// A list of DataNodes that will soon uncache the
        /// block.
        /// </param>
        private void AddNewPendingUncached(int neededUncached, CachedBlock cachedBlock, IList
                                           <DatanodeDescriptor> cached, IList <DatanodeDescriptor> pendingUncached)
        {
            // Figure out which replicas can be uncached.
            List <DatanodeDescriptor> possibilities = new List <DatanodeDescriptor>();

            foreach (DatanodeDescriptor datanode in cached)
            {
                if (!pendingUncached.Contains(datanode))
                {
                    possibilities.AddItem(datanode);
                }
            }
            while (neededUncached > 0)
            {
                if (possibilities.IsEmpty())
                {
                    Log.Warn("Logic error: we're trying to uncache more replicas than " + "actually exist for "
                             + cachedBlock);
                    return;
                }
                DatanodeDescriptor datanode_1 = possibilities.Remove(random.Next(possibilities.Count
                                                                                 ));
                pendingUncached.AddItem(datanode_1);
                bool added = datanode_1.GetPendingUncached().AddItem(cachedBlock);
                System.Diagnostics.Debug.Assert(added);
                neededUncached--;
            }
        }
コード例 #14
0
        /// <summary>Add all nodes from a dependent nodes list to excludedNodes.</summary>
        /// <returns>number of new excluded nodes</returns>
        private int AddDependentNodesToExcludedNodes(DatanodeDescriptor chosenNode, ICollection
                                                     <Node> excludedNodes)
        {
            if (this.host2datanodeMap == null)
            {
                return(0);
            }
            int countOfExcludedNodes = 0;

            foreach (string hostname in chosenNode.GetDependentHostNames())
            {
                DatanodeDescriptor node = this.host2datanodeMap.GetDataNodeByHostName(hostname);
                if (node != null)
                {
                    if (excludedNodes.AddItem(node))
                    {
                        countOfExcludedNodes++;
                    }
                }
                else
                {
                    Log.Warn("Not able to find datanode " + hostname + " which has dependency with datanode "
                             + chosenNode.GetHostName());
                }
            }
            return(countOfExcludedNodes);
        }
コード例 #15
0
ファイル: Host2NodesMap.cs プロジェクト: orf53975/hadoop.net
 /// <summary>
 /// add node to the map
 /// return true if the node is added; false otherwise.
 /// </summary>
 internal virtual bool Add(DatanodeDescriptor node)
 {
     hostmapLock.WriteLock().Lock();
     try
     {
         if (node == null || Contains(node))
         {
             return(false);
         }
         string ipAddr   = node.GetIpAddr();
         string hostname = node.GetHostName();
         mapHost[hostname] = ipAddr;
         DatanodeDescriptor[] nodes = map[ipAddr];
         DatanodeDescriptor[] newNodes;
         if (nodes == null)
         {
             newNodes    = new DatanodeDescriptor[1];
             newNodes[0] = node;
         }
         else
         {
             // rare case: more than one datanode on the host
             newNodes = new DatanodeDescriptor[nodes.Length + 1];
             System.Array.Copy(nodes, 0, newNodes, 0, nodes.Length);
             newNodes[nodes.Length] = node;
         }
         map[ipAddr] = newNodes;
         return(true);
     }
     finally
     {
         hostmapLock.WriteLock().Unlock();
     }
 }
コード例 #16
0
        private void RemoveNode(DatanodeDescriptor deadNode)
        {
            NetworkTopology cluster = bm.GetDatanodeManager().GetNetworkTopology();

            cluster.Remove(deadNode);
            bm.RemoveBlocksAssociatedTo(deadNode);
        }
コード例 #17
0
        /// <exception cref="System.Exception"/>
        private void DoTestTwoOfThreeNodesDecommissioned(int testIndex)
        {
            // Block originally on A1, A2, B1
            IList <DatanodeStorageInfo> origStorages = GetStorages(0, 1, 3);
            IList <DatanodeDescriptor>  origNodes    = GetNodes(origStorages);
            BlockInfoContiguous         blockInfo    = AddBlockOnNodes(testIndex, origNodes);
            // Decommission two of the nodes (A1, A2)
            IList <DatanodeDescriptor> decomNodes = StartDecommission(0, 1);

            DatanodeStorageInfo[] pipeline = ScheduleSingleReplication(blockInfo);
            NUnit.Framework.Assert.IsTrue("Source of replication should be one of the nodes the block "
                                          + "was on. Was: " + pipeline[0], origStorages.Contains(pipeline[0]));
            NUnit.Framework.Assert.AreEqual("Should have three targets", 3, pipeline.Length);
            bool foundOneOnRackA = false;

            for (int i = 1; i < pipeline.Length; i++)
            {
                DatanodeDescriptor target = pipeline[i].GetDatanodeDescriptor();
                if (rackA.Contains(target))
                {
                    foundOneOnRackA = true;
                }
                NUnit.Framework.Assert.IsFalse(decomNodes.Contains(target));
                NUnit.Framework.Assert.IsFalse(origNodes.Contains(target));
            }
            NUnit.Framework.Assert.IsTrue("Should have at least one target on rack A. Pipeline: "
                                          + Joiner.On(",").Join(pipeline), foundOneOnRackA);
        }
コード例 #18
0
ファイル: Host2NodesMap.cs プロジェクト: orf53975/hadoop.net
        /// <summary>
        /// remove node from the map
        /// return true if the node is removed; false otherwise.
        /// </summary>
        internal virtual bool Remove(DatanodeDescriptor node)
        {
            if (node == null)
            {
                return(false);
            }
            string ipAddr   = node.GetIpAddr();
            string hostname = node.GetHostName();

            hostmapLock.WriteLock().Lock();
            try
            {
                DatanodeDescriptor[] nodes = map[ipAddr];
                if (nodes == null)
                {
                    return(false);
                }
                if (nodes.Length == 1)
                {
                    if (nodes[0] == node)
                    {
                        Sharpen.Collections.Remove(map, ipAddr);
                        //remove hostname key since last datanode is removed
                        Sharpen.Collections.Remove(mapHost, hostname);
                        return(true);
                    }
                    else
                    {
                        return(false);
                    }
                }
                //rare case
                int i = 0;
                for (; i < nodes.Length; i++)
                {
                    if (nodes[i] == node)
                    {
                        break;
                    }
                }
                if (i == nodes.Length)
                {
                    return(false);
                }
                else
                {
                    DatanodeDescriptor[] newNodes;
                    newNodes = new DatanodeDescriptor[nodes.Length - 1];
                    System.Array.Copy(nodes, 0, newNodes, 0, i);
                    System.Array.Copy(nodes, i + 1, newNodes, i, nodes.Length - i - 1);
                    map[ipAddr] = newNodes;
                    return(true);
                }
            }
            finally
            {
                hostmapLock.WriteLock().Unlock();
            }
        }
コード例 #19
0
 internal DatanodeStorageInfo(DatanodeDescriptor dn, DatanodeStorage s)
 {
     // The ID of the last full block report which updated this storage.
     this.dn          = dn;
     this.storageID   = s.GetStorageID();
     this.storageType = s.GetStorageType();
     this.state       = s.GetState();
 }
コード例 #20
0
 public virtual void TestChooseTargetWithDecomNodes()
 {
     namenode.GetNamesystem().WriteLock();
     try
     {
         string blockPoolId = namenode.GetNamesystem().GetBlockPoolId();
         dnManager.HandleHeartbeat(dnrList[3], BlockManagerTestUtil.GetStorageReportsForDatanode
                                       (dataNodes[3]), blockPoolId, dataNodes[3].GetCacheCapacity(), dataNodes[3].GetCacheRemaining
                                       (), 2, 0, 0, null);
         dnManager.HandleHeartbeat(dnrList[4], BlockManagerTestUtil.GetStorageReportsForDatanode
                                       (dataNodes[4]), blockPoolId, dataNodes[4].GetCacheCapacity(), dataNodes[4].GetCacheRemaining
                                       (), 4, 0, 0, null);
         dnManager.HandleHeartbeat(dnrList[5], BlockManagerTestUtil.GetStorageReportsForDatanode
                                       (dataNodes[5]), blockPoolId, dataNodes[5].GetCacheCapacity(), dataNodes[5].GetCacheRemaining
                                       (), 4, 0, 0, null);
         // value in the above heartbeats
         int          load = 2 + 4 + 4;
         FSNamesystem fsn  = namenode.GetNamesystem();
         NUnit.Framework.Assert.AreEqual((double)load / 6, dnManager.GetFSClusterStats().GetInServiceXceiverAverage
                                             (), Epsilon);
         // Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
         // returns false
         for (int i = 0; i < 3; i++)
         {
             DatanodeDescriptor d = dnManager.GetDatanode(dnrList[i]);
             dnManager.GetDecomManager().StartDecommission(d);
             d.SetDecommissioned();
         }
         NUnit.Framework.Assert.AreEqual((double)load / 3, dnManager.GetFSClusterStats().GetInServiceXceiverAverage
                                             (), Epsilon);
         // update references of writer DN to update the de-commissioned state
         IList <DatanodeDescriptor> liveNodes = new AList <DatanodeDescriptor>();
         dnManager.FetchDatanodes(liveNodes, null, false);
         DatanodeDescriptor writerDn = null;
         if (liveNodes.Contains(dataNodes[0]))
         {
             writerDn = liveNodes[liveNodes.IndexOf(dataNodes[0])];
         }
         // Call chooseTarget()
         DatanodeStorageInfo[] targets = namenode.GetNamesystem().GetBlockManager().GetBlockPlacementPolicy
                                             ().ChooseTarget("testFile.txt", 3, writerDn, new AList <DatanodeStorageInfo>(), false
                                                             , null, 1024, TestBlockStoragePolicy.DefaultStoragePolicy);
         NUnit.Framework.Assert.AreEqual(3, targets.Length);
         ICollection <DatanodeStorageInfo> targetSet = new HashSet <DatanodeStorageInfo>(Arrays
                                                                                         .AsList(targets));
         for (int i_1 = 3; i_1 < storages.Length; i_1++)
         {
             NUnit.Framework.Assert.IsTrue(targetSet.Contains(storages[i_1]));
         }
     }
     finally
     {
         dataNodes[0].StopDecommission();
         dataNodes[1].StopDecommission();
         dataNodes[2].StopDecommission();
         namenode.GetNamesystem().WriteUnlock();
     }
 }
コード例 #21
0
        public virtual void TestGetDatanodeByHost()
        {
            NUnit.Framework.Assert.AreEqual(map.GetDatanodeByHost("1.1.1.1"), dataNodes[0]);
            NUnit.Framework.Assert.AreEqual(map.GetDatanodeByHost("2.2.2.2"), dataNodes[1]);
            DatanodeDescriptor node = map.GetDatanodeByHost("3.3.3.3");

            NUnit.Framework.Assert.IsTrue(node == dataNodes[2] || node == dataNodes[3]);
            NUnit.Framework.Assert.IsNull(map.GetDatanodeByHost("4.4.4.4"));
        }
コード例 #22
0
 private static void UpdateHeartbeatWithUsage(DatanodeDescriptor dn, long capacity
                                              , long dfsUsed, long remaining, long blockPoolUsed, long dnCacheCapacity, long dnCacheUsed
                                              , int xceiverCount, int volFailures)
 {
     dn.GetStorageInfos()[0].SetUtilizationForTesting(capacity, dfsUsed, remaining, blockPoolUsed
                                                      );
     dn.UpdateHeartbeat(BlockManagerTestUtil.GetStorageReportsForDatanode(dn), dnCacheCapacity
                        , dnCacheUsed, xceiverCount, volFailures, null);
 }
コード例 #23
0
 internal virtual void AddDatanode(DatanodeDescriptor d)
 {
     lock (this)
     {
         // update in-service node count
         stats.Add(d);
         datanodes.AddItem(d);
         d.isAlive = true;
     }
 }
コード例 #24
0
 internal static DatanodeDescriptor[] ToDatanodeDescriptors(Org.Apache.Hadoop.Hdfs.Server.Blockmanagement.DatanodeStorageInfo
                                                            [] storages)
 {
     DatanodeDescriptor[] datanodes = new DatanodeDescriptor[storages.Length];
     for (int i = 0; i < storages.Length; ++i)
     {
         datanodes[i] = storages[i].GetDatanodeDescriptor();
     }
     return(datanodes);
 }
コード例 #25
0
        public virtual void TestCorruptReplicaInfo()
        {
            CorruptReplicasMap crm = new CorruptReplicasMap();

            // Make sure initial values are returned correctly
            NUnit.Framework.Assert.AreEqual("Number of corrupt blocks must initially be 0", 0
                                            , crm.Size());
            NUnit.Framework.Assert.IsNull("Param n cannot be less than 0", crm.GetCorruptReplicaBlockIds
                                              (-1, null));
            NUnit.Framework.Assert.IsNull("Param n cannot be greater than 100", crm.GetCorruptReplicaBlockIds
                                              (101, null));
            long[] l = crm.GetCorruptReplicaBlockIds(0, null);
            NUnit.Framework.Assert.IsNotNull("n = 0 must return non-null", l);
            NUnit.Framework.Assert.AreEqual("n = 0 must return an empty list", 0, l.Length);
            // create a list of block_ids. A list is used to allow easy validation of the
            // output of getCorruptReplicaBlockIds
            int          NumBlockIds = 140;
            IList <long> block_ids   = new List <long>();

            for (int i = 0; i < NumBlockIds; i++)
            {
                block_ids.AddItem((long)i);
            }
            DatanodeDescriptor dn1 = DFSTestUtil.GetLocalDatanodeDescriptor();
            DatanodeDescriptor dn2 = DFSTestUtil.GetLocalDatanodeDescriptor();

            AddToCorruptReplicasMap(crm, GetBlock(0), dn1);
            NUnit.Framework.Assert.AreEqual("Number of corrupt blocks not returning correctly"
                                            , 1, crm.Size());
            AddToCorruptReplicasMap(crm, GetBlock(1), dn1);
            NUnit.Framework.Assert.AreEqual("Number of corrupt blocks not returning correctly"
                                            , 2, crm.Size());
            AddToCorruptReplicasMap(crm, GetBlock(1), dn2);
            NUnit.Framework.Assert.AreEqual("Number of corrupt blocks not returning correctly"
                                            , 2, crm.Size());
            crm.RemoveFromCorruptReplicasMap(GetBlock(1));
            NUnit.Framework.Assert.AreEqual("Number of corrupt blocks not returning correctly"
                                            , 1, crm.Size());
            crm.RemoveFromCorruptReplicasMap(GetBlock(0));
            NUnit.Framework.Assert.AreEqual("Number of corrupt blocks not returning correctly"
                                            , 0, crm.Size());
            foreach (long block_id in block_ids)
            {
                AddToCorruptReplicasMap(crm, GetBlock(block_id), dn1);
            }
            NUnit.Framework.Assert.AreEqual("Number of corrupt blocks not returning correctly"
                                            , NumBlockIds, crm.Size());
            NUnit.Framework.Assert.IsTrue("First five block ids not returned correctly ", Arrays
                                          .Equals(new long[] { 0, 1, 2, 3, 4 }, crm.GetCorruptReplicaBlockIds(5, null)));
            Log.Info(crm.GetCorruptReplicaBlockIds(10, 7L));
            Log.Info(block_ids.SubList(7, 18));
            NUnit.Framework.Assert.IsTrue("10 blocks after 7 not returned correctly ", Arrays
                                          .Equals(new long[] { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 }, crm.GetCorruptReplicaBlockIds
                                                      (10, 7L)));
        }
コード例 #26
0
        public static DatanodeDescriptor GetLocalDatanodeDescriptor(bool initializeStorage
                                                                    )
        {
            DatanodeDescriptor dn = new DatanodeDescriptor(DFSTestUtil.GetLocalDatanodeID());

            if (initializeStorage)
            {
                dn.UpdateStorage(new DatanodeStorage(DatanodeStorage.GenerateUuid()));
            }
            return(dn);
        }
コード例 #27
0
        public virtual void TestContains()
        {
            DatanodeDescriptor nodeNotInMap = DFSTestUtil.GetDatanodeDescriptor("3.3.3.3", "/d1/r4"
                                                                                );

            for (int i = 0; i < dataNodes.Length; i++)
            {
                NUnit.Framework.Assert.IsTrue(map.Contains(dataNodes[i]));
            }
            NUnit.Framework.Assert.IsFalse(map.Contains(null));
            NUnit.Framework.Assert.IsFalse(map.Contains(nodeNotInMap));
        }
コード例 #28
0
        public static DatanodeDescriptor GetDatanodeDescriptor(string ipAddr, string rackLocation
                                                               , DatanodeStorage storage, string hostname)
        {
            DatanodeDescriptor dn = DFSTestUtil.GetDatanodeDescriptor(ipAddr, DFSConfigKeys.DfsDatanodeDefaultPort
                                                                      , rackLocation, hostname);

            if (storage != null)
            {
                dn.UpdateStorage(storage);
            }
            return(dn);
        }
コード例 #29
0
 internal virtual void UpdateHeartbeat(DatanodeDescriptor node, StorageReport[] reports
                                       , long cacheCapacity, long cacheUsed, int xceiverCount, int failedVolumes, VolumeFailureSummary
                                       volumeFailureSummary)
 {
     lock (this)
     {
         stats.Subtract(node);
         node.UpdateHeartbeat(reports, cacheCapacity, cacheUsed, xceiverCount, failedVolumes
                              , volumeFailureSummary);
         stats.Add(node);
     }
 }
コード例 #30
0
        public virtual void TestIncludeExcludeLists()
        {
            BlockManager    bm   = Org.Mockito.Mockito.Mock <BlockManager>();
            FSNamesystem    fsn  = Org.Mockito.Mockito.Mock <FSNamesystem>();
            Configuration   conf = new Configuration();
            HostFileManager hm   = new HostFileManager();

            HostFileManager.HostSet includedNodes = new HostFileManager.HostSet();
            HostFileManager.HostSet excludedNodes = new HostFileManager.HostSet();
            includedNodes.Add(Entry("127.0.0.1:12345"));
            includedNodes.Add(Entry("localhost:12345"));
            includedNodes.Add(Entry("127.0.0.1:12345"));
            includedNodes.Add(Entry("127.0.0.2"));
            excludedNodes.Add(Entry("127.0.0.1:12346"));
            excludedNodes.Add(Entry("127.0.30.1:12346"));
            NUnit.Framework.Assert.AreEqual(2, includedNodes.Size());
            NUnit.Framework.Assert.AreEqual(2, excludedNodes.Size());
            hm.Refresh(includedNodes, excludedNodes);
            DatanodeManager dm = new DatanodeManager(bm, fsn, conf);

            Whitebox.SetInternalState(dm, "hostFileManager", hm);
            IDictionary <string, DatanodeDescriptor> dnMap = (IDictionary <string, DatanodeDescriptor
                                                                           >)Whitebox.GetInternalState(dm, "datanodeMap");

            // After the de-duplication, there should be only one DN from the included
            // nodes declared as dead.
            NUnit.Framework.Assert.AreEqual(2, dm.GetDatanodeListForReport(HdfsConstants.DatanodeReportType
                                                                           .All).Count);
            NUnit.Framework.Assert.AreEqual(2, dm.GetDatanodeListForReport(HdfsConstants.DatanodeReportType
                                                                           .Dead).Count);
            dnMap["uuid-foo"] = new DatanodeDescriptor(new DatanodeID("127.0.0.1", "localhost"
                                                                      , "uuid-foo", 12345, 1020, 1021, 1022));
            NUnit.Framework.Assert.AreEqual(1, dm.GetDatanodeListForReport(HdfsConstants.DatanodeReportType
                                                                           .Dead).Count);
            dnMap["uuid-bar"] = new DatanodeDescriptor(new DatanodeID("127.0.0.2", "127.0.0.2"
                                                                      , "uuid-bar", 12345, 1020, 1021, 1022));
            NUnit.Framework.Assert.AreEqual(0, dm.GetDatanodeListForReport(HdfsConstants.DatanodeReportType
                                                                           .Dead).Count);
            DatanodeDescriptor spam = new DatanodeDescriptor(new DatanodeID("127.0.0" + ".3",
                                                                            "127.0.0.3", "uuid-spam", 12345, 1020, 1021, 1022));

            DFSTestUtil.SetDatanodeDead(spam);
            includedNodes.Add(Entry("127.0.0.3:12345"));
            dnMap["uuid-spam"] = spam;
            NUnit.Framework.Assert.AreEqual(1, dm.GetDatanodeListForReport(HdfsConstants.DatanodeReportType
                                                                           .Dead).Count);
            Sharpen.Collections.Remove(dnMap, "uuid-spam");
            NUnit.Framework.Assert.AreEqual(1, dm.GetDatanodeListForReport(HdfsConstants.DatanodeReportType
                                                                           .Dead).Count);
            excludedNodes.Add(Entry("127.0.0.3"));
            NUnit.Framework.Assert.AreEqual(0, dm.GetDatanodeListForReport(HdfsConstants.DatanodeReportType
                                                                           .Dead).Count);
        }