protected DistributionMaps GetMapsOnNodeJoining(DistributionInfoData distInfoData) { if (NCacheLog.IsInfoEnabled) { NCacheLog.Info("DistributionMgr.GetMapsOnNodeJoining()", "Total Data Size : " + TotalDataSize.ToString()); } ArrayList tmpMap = null; Hashtable bucketsOwnershipMap = null; ArrayList partitionNodes = new ArrayList(); ArrayList newHashMap = DistributeHashMap.BalanceBuckets(distInfoData, _lastCreatedHashMap, _bucketsStats, _existingMembers, _cacheSizePerNode, NCacheLog); _existingMembers.Add(distInfoData.AffectedNode.NodeAddress); tmpMap = ChangeOwnerShip(newHashMap, distInfoData.AffectedNode.NodeAddress); _lastCreatedHashMap = tmpMap.Clone() as ArrayList; bucketsOwnershipMap = GetBucketsOwnershipMap(_lastCreatedHashMap); if (NCacheLog.IsInfoEnabled) { NCacheLog.Info("DistributionMgr.GetMaps()", "Sending new map as a new node joined the cluster"); } return(new DistributionMaps(_lastCreatedHashMap, bucketsOwnershipMap)); }
protected DistributionMaps GetMapsOnNodeLeaving(DistributionInfoData distInfoData) { ArrayList tmpMap = null; Hashtable bucketsOwnershipMap = null; _existingMembers.Remove(distInfoData.AffectedNode.NodeAddress); if (NCacheLog.IsInfoEnabled) { NCacheLog.Info("DistributionMgr.GetMapsOnNodeLeaving()", "Before Calling DistributeOrphanBuckets() ---- Leaving Node:" + distInfoData.AffectedNode.NodeAddress.ToString() + " Existing Members Count:0" + _existingMembers.Count); } tmpMap = DistributeHashMap.DistributeOrphanBuckets(_lastCreatedHashMap, distInfoData.AffectedNode.NodeAddress, _existingMembers); if (tmpMap == null) { return(null); } _lastCreatedHashMap = tmpMap.Clone() as ArrayList; bucketsOwnershipMap = GetBucketsOwnershipMap(_lastCreatedHashMap); return(new DistributionMaps(_lastCreatedHashMap, bucketsOwnershipMap)); }
public DistributionMaps BalanceNodes(DistributionInfoData distInfo, ArrayList hashMap, Hashtable bucketStats, ArrayList members) { _hashMap = hashMap; _nodeBalData = new NodeBalanceData(hashMap, bucketStats, members); //Check if any other state transfer is not in progress... bool bAllFunctional = this.SanityCheckForAllFunctional(hashMap); //Add some status saying that node balancing is not possible at the moment. if (!bAllFunctional) { DistributionMaps result = new DistributionMaps(BalancingResult.AlreadyInBalancing); return(result); } //Check if really the node needs some balancing or not. bool bShouldBalance = this.SanityCheckForCandidateNode((Address)distInfo.AffectedNode.NodeAddress); if (!bShouldBalance) { DistributionMaps result = new DistributionMaps(BalancingResult.NotRequired); return(result); } ArrayList dataListForNodes = _nodeBalData.BalanceDataListForNodes; ArrayList candidates = FilterCandidateNodes(); foreach (AddressWeightPair awPair in candidates) { BalanceDataForNode secNode = GetBalDataForNode(awPair.NodeAddress); BalanceTwoNodes(_primaryNode, secNode, awPair.WeightShare); ApplyChangesInHashMap(secNode); } ApplyChangesInHashMap(_primaryNode); return(new DistributionMaps(_hashMap, null)); }
/// <summary> /// A new map is required when a member leaves or joins the cluster. /// This method returns a new map based on the input paramameters. /// </summary> /// <param name="member">Address of the member that has either left /// or joined the cluster</param> /// <param name="isNew">A flag. True if the member has joined otherwise false.</param> /// <returns>A new hashmap instance</returns> public virtual DistributionMaps GetMaps(DistributionInfoData distInfoData) { ArrayList tmpMap = null; Hashtable bucketsOwnershipMap = null; ArrayList partitionNodes = new ArrayList(); _sync.AcquireWriterLock(Timeout.Infinite); try { if (_installedHashMap == null) { tmpMap = new ArrayList(TotalBuckets); for (int i = 0; i < TotalBuckets; i++) { HashMapBucket bucket = new HashMapBucket(distInfoData.AffectedNode.NodeAddress, i, BucketStatus.Functional); tmpMap.Add(bucket); } _existingMembers.Add(distInfoData.AffectedNode.NodeAddress); _lastCreatedHashMap = tmpMap.Clone() as ArrayList; bucketsOwnershipMap = GetBucketsOwnershipMap(_lastCreatedHashMap); return(new DistributionMaps(_lastCreatedHashMap, bucketsOwnershipMap)); } else if (_lastCreatedHashMap == null) { _lastCreatedHashMap = _installedHashMap.Clone() as ArrayList; } switch (distInfoData.ClustActivity) { case ClusterActivity.NodeJoin: try { return(GetMapsOnNodeJoining(distInfoData)); } catch (Exception e) { if (NCacheLog.IsErrorEnabled) { NCacheLog.Error("DistributionMgr.GetMaps()", e.ToString()); } break; } case ClusterActivity.NodeLeave: try { return(GetMapsOnNodeLeaving(distInfoData)); } catch (Exception e) { if (NCacheLog.IsErrorEnabled) { NCacheLog.Error("DistributionMgr.GetMaps()", e.ToString()); } break; } case ClusterActivity.None: BalanceNodeMgr bnMgr = new BalanceNodeMgr(null); DistributionMaps result = bnMgr.BalanceNodes(distInfoData, _lastCreatedHashMap, _bucketsStats, _existingMembers); if (result.Hashmap != null) { _lastCreatedHashMap = result.Hashmap.Clone() as ArrayList; result.BucketsOwnershipMap = GetBucketsOwnershipMap(_lastCreatedHashMap); } return(result); default: break; } } finally { _sync.ReleaseWriterLock(); } return(null); }
public static ArrayList BalanceBuckets(DistributionInfoData distInfo, ArrayList hashMap, Hashtable bucketStats, ArrayList members, long cacheSizePerNode, ILogger NCacheLog) { DistributionData distData = new DistributionData(hashMap, bucketStats, members, NCacheLog, cacheSizePerNode); Boolean bShouldBalanceWeight = false; if (distInfo.DistribMode == DistributionMode.AvgWeightTime) //If weight and time to move has to be avg. Cut the weight to half. { if (NCacheLog.IsInfoEnabled) { NCacheLog.Info("DistributionImpl.BalanceBuckets()", "Request comes with DistributionMode.AvgWeightTime"); } distData.WeightPerNode /= 2; } ArrayList distMatrix = distData.DistributionMatrixForNodes; ArrayList finalBuckets = new ArrayList(); //We need to cater for the cases where we don't need to actually balance the data over nodes, as cluster itself is starting //and no actual load is present within a cluster and on each node. foreach (DistributionMatrix dMatrix in distMatrix) { if (dMatrix.DoWeightBalance == true) { bShouldBalanceWeight = true; break; } } //If cluster is not loaded only shuffled distribution is required. No need to balance any weight. if (bShouldBalanceWeight == false) { if (NCacheLog.IsInfoEnabled) { NCacheLog.Info("DistributionImpl.BalanceBuckets()", "Cluster is not loaded only shuffled distribution is required. No need to balance any weight."); } distInfo.DistribMode = DistributionMode.ShuffleBuckets; } //For cases below we also need to calculate Weight to be balanced along with buckets sacrifices. switch (distInfo.DistribMode) { case DistributionMode.OptimalTime: foreach (DistributionMatrix dMatrix in distMatrix) { int [,] IdMatrix = dMatrix.IdMatrix; for (int i = 0; i < dMatrix.MatrixDimension.Cols; i++) { finalBuckets.Add(IdMatrix[0, i]); //Always first row of the matrix to be given } } if (NCacheLog.IsInfoEnabled) { NCacheLog.Info("DistributionImpl.BalanceBuckets()", "Request is DistributionMode.OptimalTime"); NCacheLog.Info("Selected Buckets are: -"); for (int i = 0; i < finalBuckets.Count; i++) { NCacheLog.Info(finalBuckets[i].ToString()); } } return(finalBuckets); case DistributionMode.ShuffleBuckets: //Although code replication is observed here. Still I prefer to make its copy rather putting fewer if-else to control. I need some time efficiency here. foreach (DistributionMatrix dMatrix in distMatrix) { int[,] IdMatrix = dMatrix.IdMatrix; int[] resultIndices; RowsBalanceResult rbResult = DistributionCore.ShuffleSelect(dMatrix); resultIndices = rbResult.ResultIndicies; for (int i = 0, j = 0; i < resultIndices.Length; i++) { int index = resultIndices[i]; //Index would never be zero, rather the value corresponding in the Matrix be zero. //Get row and col on the basis of matrix index (index of one-D array). int row = index / dMatrix.MatrixDimension.Cols; int col = index % dMatrix.MatrixDimension.Cols; if (IdMatrix[row, col] == -1) //dealing with exceptional case when last row is selected and it got few non-indices.So replace those with lowest most indices in the matrix. { finalBuckets.Add(IdMatrix[0, j]); j++; } else { finalBuckets.Add(IdMatrix[row, col]); } } } if (NCacheLog.IsInfoEnabled) { NCacheLog.Info("DistributionImpl.BalanceBuckets()", "Request is DistributionMode.ShuffleBuckets"); NCacheLog.Info("Selected Buckets are: -"); for (int i = 0; i < finalBuckets.Count; i++) { NCacheLog.Info(finalBuckets[i].ToString()); } } return(finalBuckets); case DistributionMode.OptimalWeight: //For both same code works. Change is only in weight that is modified above . it is called FallThrough in switch statements. case DistributionMode.AvgWeightTime: foreach (DistributionMatrix dMatrix in distMatrix) { int[,] IdMatrix = dMatrix.IdMatrix; int[] resultIndices; RowsBalanceResult rbResult = DistributionCore.CompareAndSelect(dMatrix); resultIndices = rbResult.ResultIndicies; for (int i = 0, j = 0; i < resultIndices.Length; i++) { int index = resultIndices[i]; //Index would never be zero, rather the value corresponding in the Matrix be zero. //Get row and col on the basis of matrix index (index of one-D array). int row = index / dMatrix.MatrixDimension.Cols; int col = index % dMatrix.MatrixDimension.Cols; if (IdMatrix[row, col] == -1) //dealing with exceptional case when last row is selected and it got few non-indices.So replace those with lowest most indices in the matrix. { finalBuckets.Add(IdMatrix[0, j]); j++; } else { finalBuckets.Add(IdMatrix[row, col]); } } } if (NCacheLog.IsInfoEnabled) { NCacheLog.Info("DistributionImpl.BalanceBuckets()", "Request is DistributionMode.AvgWeightTime/ DistributionMode.OptimalWeight"); NCacheLog.Info("Selected Buckets are: -"); for (int i = 0; i < finalBuckets.Count; i++) { NCacheLog.Info(finalBuckets[i].ToString()); } } return(finalBuckets); default: break; } //end switch return(null); } //end func.
public override DistributionMaps GetMaps(DistributionInfoData distInfoData) { ArrayList tmpMap = null; Hashtable bucketsOwnershipMap = null; ArrayList partitionNodes = new ArrayList(); if (_installedHashMap == null) { tmpMap = new ArrayList(TotalBuckets); for (int i = 0; i < TotalBuckets; i++) { HashMapBucket bucket = new HashMapBucket(distInfoData.AffectedNode.NodeAddress, i, BucketStatus.Functional); tmpMap.Add(bucket); } _existingMembers.Add(distInfoData.AffectedNode.NodeAddress); _subGroupMap[distInfoData.AffectedNode.NodeAddress] = distInfoData.AffectedNode.SubGroup; //for each new group we are keeping list of members. For only Partition it will be one ..for POR can be greater then one. //This is new member, the first one. So create the list here. distInfoData.AffectedNode.IsCoordinator = true; partitionNodes.Add(distInfoData.AffectedNode); _partitionNodesInfo.Add(distInfoData.AffectedNode.SubGroup, partitionNodes); //A hash table keeping list of addresses against each GROUP/Partition. _lastCreatedHashMap = tmpMap.Clone() as ArrayList; bucketsOwnershipMap = GetBucketsOwnershipMap(_lastCreatedHashMap); return(new DistributionMaps(_lastCreatedHashMap, bucketsOwnershipMap)); } //for non-coordinator node that recently becomes coordinator... else if (_lastCreatedHashMap == null) { _lastCreatedHashMap = _installedHashMap.Clone() as ArrayList; } switch (distInfoData.ClustActivity) { case ClusterActivity.NodeJoin: try { //assuming existing members doesnot contain the newly added member. if (!_partitionNodesInfo.ContainsKey(distInfoData.AffectedNode.SubGroup)) { partitionNodes = new ArrayList(); distInfoData.AffectedNode.IsCoordinator = true; partitionNodes.Add(distInfoData.AffectedNode); _subGroupMap[distInfoData.AffectedNode.NodeAddress] = distInfoData.AffectedNode.SubGroup; _partitionNodesInfo.Add(distInfoData.AffectedNode.SubGroup, partitionNodes); //A hash table keeping list of addresses against each GROUP/Partition. if (NCacheLog.IsInfoEnabled) { NCacheLog.Info("DistributionMgr.GetMaps()", "Sending new map as a new node joined the cluster"); } return(GetMapsOnNodeJoining(distInfoData)); } else { partitionNodes = (ArrayList)_partitionNodesInfo[distInfoData.AffectedNode.SubGroup]; partitionNodes.Add(distInfoData.AffectedNode); _subGroupMap[distInfoData.AffectedNode.NodeAddress] = distInfoData.AffectedNode.SubGroup; return(new DistributionMaps(_lastCreatedHashMap, GetBucketsOwnershipMap(_lastCreatedHashMap))); } } catch (Exception e) { if (NCacheLog.IsErrorEnabled) { NCacheLog.Error("DistributionMgr.GetMaps()", e.ToString()); } break; } case ClusterActivity.NodeLeave: //assuming existing members do not containt the node to be removed/left. _existingMembers.Remove(distInfoData.AffectedNode.NodeAddress); _subGroupMap.Remove(distInfoData.AffectedNode.NodeAddress); //Check if this node is the only one in partition or not.So better do distribution if (IsLastNodeInPartition(distInfoData.AffectedNode)) { _partitionNodesInfo.Remove(distInfoData.AffectedNode.SubGroup); return(GetMapsOnNodeLeaving(distInfoData)); } else //this mean we still have nodes available for this partition. { ArrayList groupNodes = (ArrayList)_partitionNodesInfo[distInfoData.AffectedNode.SubGroup]; if (IsCoordinatorNodeInPartition(distInfoData.AffectedNode)) { groupNodes.Remove((object)distInfoData.AffectedNode); ((PartNodeInfo)groupNodes[0]).IsCoordinator = true; _partitionNodesInfo[distInfoData.AffectedNode.SubGroup] = groupNodes; _existingMembers.Add(((PartNodeInfo)groupNodes[0]).NodeAddress); tmpMap = UpgradeToCoordinatorOfReplica(distInfoData.AffectedNode.NodeAddress, ((PartNodeInfo)groupNodes[0]).NodeAddress); _lastCreatedHashMap = tmpMap.Clone() as ArrayList; bucketsOwnershipMap = GetBucketsOwnershipMap(_lastCreatedHashMap); return(new DistributionMaps(_lastCreatedHashMap, bucketsOwnershipMap)); } else { //simply remove the node and get a new bucket ownership map. groupNodes.Remove(distInfoData.AffectedNode); return(new DistributionMaps(_lastCreatedHashMap, GetBucketsOwnershipMap(_lastCreatedHashMap))); } } case ClusterActivity.None: BalanceNodeMgr bnMgr = new BalanceNodeMgr(null); DistributionMaps result = bnMgr.BalanceNodes(distInfoData, _lastCreatedHashMap, _bucketsStats, _existingMembers); if (result.Hashmap != null) { _lastCreatedHashMap = result.Hashmap.Clone() as ArrayList; result.BucketsOwnershipMap = GetBucketsOwnershipMap(_lastCreatedHashMap); } return(result); default: break; } return(null); }