public DistributionMaps BalanceNodes(DistributionInfoData distInfo, ArrayList hashMap, Hashtable bucketStats, ArrayList members) { _hashMap = hashMap; _nodeBalData = new NodeBalanceData(hashMap, bucketStats, members); //Check if any other state transfer is not in progress... bool bAllFunctional = this.SanityCheckForAllFunctional(hashMap); //Add some status saying that node balancing is not possible at the moment. if (!bAllFunctional) { DistributionMaps result = new DistributionMaps(BalancingResult.AlreadyInBalancing); return result; } //Check if really the node needs some balancing or not. bool bShouldBalance = this.SanityCheckForCandidateNode((Address)distInfo.AffectedNode.NodeAddress); if (!bShouldBalance) { DistributionMaps result = new DistributionMaps(BalancingResult.NotRequired); return result; } ArrayList dataListForNodes = _nodeBalData.BalanceDataListForNodes; ArrayList candidates = FilterCandidateNodes(); foreach (AddressWeightPair awPair in candidates) { BalanceDataForNode secNode = GetBalDataForNode(awPair.NodeAddress); BalanceTwoNodes(_primaryNode, secNode, awPair.WeightShare); ApplyChangesInHashMap(secNode); } ApplyChangesInHashMap(_primaryNode); return new DistributionMaps(_hashMap, null); } //end func.
public static ArrayList BalanceBuckets(DistributionInfoData distInfo, ArrayList hashMap, Hashtable bucketStats, ArrayList members,long cacheSizePerNode ,ILogger NCacheLog) { DistributionData distData = new DistributionData(hashMap, bucketStats, members, NCacheLog, cacheSizePerNode); Boolean bShouldBalanceWeight = false; if (distInfo.DistribMode == DistributionMode.AvgWeightTime) //If weight and time to move has to be avg. Cut the weight to half. { if (NCacheLog.IsInfoEnabled) NCacheLog.Info("DistributionImpl.BalanceBuckets()", "Request comes with DistributionMode.AvgWeightTime"); distData.WeightPerNode/= 2; } ArrayList distMatrix = distData.DistributionMatrixForNodes; ArrayList finalBuckets = new ArrayList(); //We need to cater for the cases where we dont need to actually balance the data over nodes, as cluster itself is starting //and no actual load is present within a cluster and on each node. foreach (DistributionMatrix dMatrix in distMatrix) { if (dMatrix.DoWeightBalance == true) { bShouldBalanceWeight = true; break; } } //If cluster is not loaded only shuffled disribution is required. No need to balance any weight. if (bShouldBalanceWeight == false) { if (NCacheLog.IsInfoEnabled) NCacheLog.Info("DistributionImpl.BalanceBuckets()", "Cluster is not loaded only shuffled disribution is required. No need to balance any weight."); distInfo.DistribMode = DistributionMode.ShuffleBuckets; } //For cases below we also need to calculate Weight to be balanced along with buckets sacrifices. switch (distInfo.DistribMode) { case DistributionMode.OptimalTime: foreach (DistributionMatrix dMatrix in distMatrix) { int [,] IdMatrix = dMatrix.IdMatrix; for (int i = 0; i < dMatrix.MatrixDimension.Cols; i++) finalBuckets.Add(IdMatrix[0, i]); //Always first row of the matrix to be given } if (NCacheLog.IsInfoEnabled) { NCacheLog.Info("DistributionImpl.BalanceBuckets()", "Request is DistributionMode.OptimalTime"); NCacheLog.Info("Selected Buckets are: -"); for (int i = 0; i < finalBuckets.Count; i++) NCacheLog.Info(finalBuckets[i].ToString()); } return finalBuckets; case DistributionMode.ShuffleBuckets: //Although code replication is observed here. Still I prefer to make its copy rather puting fewer if-else to control. I need some time efficiency here. foreach (DistributionMatrix dMatrix in distMatrix) { int[,] IdMatrix = dMatrix.IdMatrix; int[] resultIndices; RowsBalanceResult rbResult = DistributionCore.ShuffleSelect(dMatrix); resultIndices = rbResult.ResultIndicies; for (int i = 0, j = 0; i < resultIndices.Length; i++) { int index = resultIndices[i]; //Index would never be zero, rather the value corresponding in the Matrix be zero. //Get row and col on the basis of matrix index (index of one-D array). int row = index / dMatrix.MatrixDimension.Cols; int col = index % dMatrix.MatrixDimension.Cols; if (IdMatrix[row, col] == -1) //dealing with exceptional case when last row is selected and it got few non-indices.So replace those with lowest most indices in the matrix. { finalBuckets.Add(IdMatrix[0, j]); j++; } else { finalBuckets.Add(IdMatrix[row, col]); } } } if (NCacheLog.IsInfoEnabled ) { NCacheLog.Info("DistributionImpl.BalanceBuckets()", "Request is DistributionMode.ShuffleBuckets"); NCacheLog.Info("Selected Buckets are: -"); for (int i = 0; i < finalBuckets.Count; i++) NCacheLog.Info(finalBuckets[i].ToString()); } return finalBuckets; case DistributionMode.OptimalWeight: //For both same code works. Change is only in weight that is modified above . it is called FallThrough in switch statements. case DistributionMode.AvgWeightTime: foreach (DistributionMatrix dMatrix in distMatrix) { int[,] IdMatrix = dMatrix.IdMatrix; int[] resultIndices; RowsBalanceResult rbResult = DistributionCore.CompareAndSelect(dMatrix); resultIndices = rbResult.ResultIndicies; for (int i = 0,j=0; i < resultIndices.Length; i++) { int index = resultIndices[i]; //Index would never be zero, rather the value corresponding in the Matrix be zero. //Get row and col on the basis of matrix index (index of one-D array). int row = index / dMatrix.MatrixDimension.Cols; int col = index % dMatrix.MatrixDimension.Cols; if (IdMatrix[row, col] == -1) //dealing with exceptional case when last row is selected and it got few non-indices.So replace those with lowest most indices in the matrix. { finalBuckets.Add(IdMatrix[0,j]); j++; } else { finalBuckets.Add(IdMatrix[row, col]); } } } if (NCacheLog.IsInfoEnabled ) { NCacheLog.Info("DistributionImpl.BalanceBuckets()", "Request is DistributionMode.AvgWeightTime/ DistributionMode.OptimalWeight"); NCacheLog.Info("Selected Buckets are: -"); for (int i = 0; i < finalBuckets.Count; i++) NCacheLog.Info(finalBuckets[i].ToString()); } return finalBuckets; default: break; } //end switch return null; } //end func.
protected virtual DistributionMaps GetMaps(DistributionInfoData info) { return null; }
private void handleBalanceDataLoad(object info) { Address requestingNode = info as Address; PartNodeInfo partNode = new PartNodeInfo(requestingNode, null, false); DistributionInfoData distData = new DistributionInfoData(DistributionMode.Manual, ClusterActivity.None, partNode); DistributionMaps maps = GetMaps(distData); if (maps.BalancingResult == BalancingResult.Default) { PublishMaps(maps); } }
public virtual DistributionMaps GetDistributionMaps(DistributionInfoData distInfo) { return null; }
public object GetDistributionAndMirrorMaps(object data) { NCacheLog.Debug("MessageResponder.GetDistributionAndMirrorMaps()", "here comes the request for hashmap"); object[] package = data as object[]; ArrayList members = package[0] as ArrayList; bool isJoining = (bool)package[1]; string subGroup = (string)package[2]; bool isStartedAsMirror = (bool)package[3]; ClusterActivity activity = ClusterActivity.None; activity = isJoining ? ClusterActivity.NodeJoin : ClusterActivity.NodeLeave; DistributionMaps maps = null; // if the joining node is mirror and its coordinator/active exists { PartNodeInfo affectedNode = new PartNodeInfo((Address)members[0], subGroup, !isStartedAsMirror); DistributionInfoData info = new DistributionInfoData(DistributionMode.OptimalWeight, activity, affectedNode); if (NCacheLog.IsInfoEnabled) NCacheLog.Info("ClusterService.GetDistributionMaps", "NodeAddress: " + info.AffectedNode.NodeAddress.ToString() + " subGroup: " + subGroup + " isMirror: " + isStartedAsMirror.ToString() + " " + (isJoining ? "joining" : "leaving")); maps = _distributionPolicyMbr.GetDistributionMaps(info); } CacheNode[] mirrors = _distributionPolicyMbr.GetMirrorMap(); NCacheLog.Debug("MessageResponder.GetDistributionAndMirrorMaps()", "sending hashmap response back..."); return new object[] { maps, mirrors }; }
/// <summary> /// Notify the target object of a change of membership. /// </summary> /// <param name="new_view">New view of group</param> void MembershipListener.viewAccepted(View newView) { System.Collections.ArrayList joined_mbrs, left_mbrs, tmp; ArrayList joining_mbrs = new ArrayList(); lock (viewMutex) { object tmp_mbr; if (newView == null) return; NCacheLog.CriticalInfo("ClusterService.ViewAccepted", newView.ToString()); tmp = newView.Members; if (newView.Vid != null) { this._lastViewId = newView.Vid.Id; } // get new members joined_mbrs = System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(10)); for (int i = 0; i < tmp.Count; i++) { tmp_mbr = tmp[i]; if (!_members.Contains(tmp_mbr)) joined_mbrs.Add(tmp_mbr); } int localIndex = 0; if (joined_mbrs.Contains(LocalAddress)) localIndex = joined_mbrs.IndexOf(LocalAddress); for (int i = localIndex; i < joined_mbrs.Count; i++) { joining_mbrs.Add(joined_mbrs[i]); } // get members that left left_mbrs = System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(10)); for (int i = 0; i < _members.Count; i++) { tmp_mbr = _members[i]; if (!tmp.Contains(tmp_mbr)) left_mbrs.Add(tmp_mbr); } // adjust our own membership _members.Clear(); _members.AddRange(tmp); //muds: //pick the map from the view and send it to cache. //if i am the only member, i can build the map locally. if (newView.DistributionMaps == null && newView.Members.Count == 1) { if (NCacheLog.IsInfoEnabled) NCacheLog.Info("ClusterService.viewAccepted()", "I am the only member in the view so, building map myself"); PartNodeInfo affectedNode = new PartNodeInfo(LocalAddress, _subgroupid, true); DistributionInfoData info = new DistributionInfoData(DistributionMode.OptimalWeight, ClusterActivity.NodeJoin, affectedNode); DistributionMaps maps = _distributionPolicyMbr.GetDistributionMaps(info); if (maps != null) { _distributionPolicyMbr.HashMap = maps.Hashmap; _distributionPolicyMbr.BucketsOwnershipMap = maps.BucketsOwnershipMap; } } else { if (newView.MirrorMapping != null) { _distributionPolicyMbr.InstallMirrorMap(newView.MirrorMapping); NCacheLog.Info("ClusterService.viewAccepted()", "New MirrorMap installed."); } if (newView.DistributionMaps != null) { _distributionPolicyMbr.InstallHashMap(newView.DistributionMaps, left_mbrs); if (NCacheLog.IsInfoEnabled) NCacheLog.Info("ClusterService.viewAccepted()", "New hashmap installed"); } } lock (_servers.SyncRoot) { if (left_mbrs.Count > 0) { for (int i = left_mbrs.Count - 1; i >= 0; i--) { Address ipAddr = (Address)((Address)left_mbrs[i]); if (NCacheLog.IsInfoEnabled) NCacheLog.Info("ClusterService.viewAccepted", ipAddr.AdditionalData.Length.ToString()); ipAddr = (Address)ipAddr.Clone(); if (_servers.Contains(ipAddr)) _servers.Remove(ipAddr); OnMemberLeft(ipAddr, CompactBinaryFormatter.FromByteBuffer(ipAddr.AdditionalData, _context.SerializationContext) as NodeIdentity); ipAddr.AdditionalData = null; } } _validMembers = (ArrayList)_members.Clone(); if (NCacheLog.IsInfoEnabled) NCacheLog.Info("ClusterService.viewAccepted", joining_mbrs.Count.ToString()); if (joined_mbrs.Count > 0) { for (int i = 0; i < joined_mbrs.Count; i++) { Address ipAddr = (Address)joined_mbrs[i]; if (NCacheLog.IsInfoEnabled) NCacheLog.Info("ClusterService.viewAccepted", ipAddr.AdditionalData.Length.ToString()); ipAddr = (Address)ipAddr.Clone(); if (OnMemberJoined(ipAddr, CompactBinaryFormatter.FromByteBuffer(ipAddr.AdditionalData, _context.SerializationContext) as NodeIdentity, joining_mbrs)) { if (NCacheLog.IsInfoEnabled) NCacheLog.Info("ClusterServices.ViewAccepted", ipAddr.ToString() + " is added to _servers list."); _servers.Add(ipAddr); } ipAddr.AdditionalData = null; } } } if (String.IsNullOrEmpty(_bridgeSourceCacheId)) _bridgeSourceCacheId = newView.BridgeSourceCacheId; OnAfterMembershipChange(); } }
protected DistributionMaps GetMapsOnNodeLeaving(DistributionInfoData distInfoData) { ArrayList tmpMap = null; Hashtable bucketsOwnershipMap = null; _existingMembers.Remove(distInfoData.AffectedNode.NodeAddress); if (NCacheLog.IsInfoEnabled) NCacheLog.Info("DistributionMgr.GetMapsOnNodeLeaving()", "Before Calling DistributeOrphanBuckets() ---- Leaving Node:" + distInfoData.AffectedNode.NodeAddress.ToString() + " Existing Members Count:0" + _existingMembers.Count); tmpMap = DistributeHashMap.DistributeOrphanBuckets(_lastCreatedHashMap, distInfoData.AffectedNode.NodeAddress, _existingMembers); if (tmpMap == null) return null; _lastCreatedHashMap = tmpMap.Clone() as ArrayList; bucketsOwnershipMap = GetBucketsOwnershipMap(_lastCreatedHashMap); return new DistributionMaps(_lastCreatedHashMap, bucketsOwnershipMap); }
protected DistributionMaps GetMapsOnNodeJoining(DistributionInfoData distInfoData) { if (NCacheLog.IsInfoEnabled) NCacheLog.Info("DistributionMgr.GetMapsOnNodeJoining()", "Total Data Size : " + TotalDataSize.ToString()); ArrayList tmpMap = null; Hashtable bucketsOwnershipMap = null; ArrayList partitionNodes = new ArrayList(); ArrayList newHashMap = DistributeHashMap.BalanceBuckets(distInfoData, _lastCreatedHashMap, _bucketsStats, _existingMembers, _cacheSizePerNode, NCacheLog); _existingMembers.Add(distInfoData.AffectedNode.NodeAddress); tmpMap = ChangeOwnerShip(newHashMap, distInfoData.AffectedNode.NodeAddress); _lastCreatedHashMap = tmpMap.Clone() as ArrayList; bucketsOwnershipMap = GetBucketsOwnershipMap(_lastCreatedHashMap); if (NCacheLog.IsInfoEnabled) NCacheLog.Info("DistributionMgr.GetMaps()", "Sending new map as a new node joined the cluster"); return new DistributionMaps(_lastCreatedHashMap, bucketsOwnershipMap); }
/// <summary> /// A new map is required when a member leaves or joins the cluster. /// This method returns a new map based on the input paramameters. /// </summary> /// <param name="member">Address of the member that has either left /// or joined the cluster</param> /// <param name="isNew">A flag. True if the member has joined otherwise false.</param> /// <returns>A new hashmap instance</returns> public virtual DistributionMaps GetMaps(DistributionInfoData distInfoData) { ArrayList tmpMap = null; Hashtable bucketsOwnershipMap = null; ArrayList partitionNodes = new ArrayList(); _sync.AcquireWriterLock(Timeout.Infinite); try { if (_installedHashMap == null) { tmpMap = new ArrayList(TotalBuckets); for (int i = 0; i < TotalBuckets; i++) { HashMapBucket bucket = new HashMapBucket(distInfoData.AffectedNode.NodeAddress, i, BucketStatus.Functional); tmpMap.Add(bucket); } _existingMembers.Add(distInfoData.AffectedNode.NodeAddress); _lastCreatedHashMap = tmpMap.Clone() as ArrayList; bucketsOwnershipMap = GetBucketsOwnershipMap(_lastCreatedHashMap); return new DistributionMaps(_lastCreatedHashMap, bucketsOwnershipMap); } else if (_lastCreatedHashMap == null) { _lastCreatedHashMap = _installedHashMap.Clone() as ArrayList; } switch (distInfoData.ClustActivity) { case ClusterActivity.NodeJoin: try { return GetMapsOnNodeJoining(distInfoData); } catch (Exception e) { if (NCacheLog.IsErrorEnabled) NCacheLog.Error("DistributionMgr.GetMaps()", e.ToString()); break; } case ClusterActivity.NodeLeave: try { return GetMapsOnNodeLeaving(distInfoData); } catch (Exception e) { if (NCacheLog.IsErrorEnabled) NCacheLog.Error("DistributionMgr.GetMaps()", e.ToString()); break; } case ClusterActivity.None: BalanceNodeMgr bnMgr = new BalanceNodeMgr(null); DistributionMaps result = bnMgr.BalanceNodes(distInfoData, _lastCreatedHashMap, _bucketsStats, _existingMembers); if (result.Hashmap != null) { _lastCreatedHashMap = result.Hashmap.Clone() as ArrayList; result.BucketsOwnershipMap = GetBucketsOwnershipMap(_lastCreatedHashMap); } return result; default: break; } } finally { _sync.ReleaseWriterLock(); } return null; }
internal override void AutoLoadBalance() { if (_distributionMgr.CandidateNodesForBalance.Count > 0) { DetermineClusterStatus(); ArrayList candidateNodes = _distributionMgr.CandidateNodesForBalance; if (candidateNodes != null && candidateNodes.Count > 0) { DistributionMaps maps = null; DistributionManager.CandidateNodeForLoadBalance candidateNode = candidateNodes[0] as DistributionManager.CandidateNodeForLoadBalance; if (NCacheLog.IsInfoEnabled) NCacheLog.Info("PartitionedCache.AutoLoadBalance", "candidate node count: " + candidateNodes.Count + " candidate node :" + candidateNode.Node + " above avg(%) :" + candidateNode.PercentageAboveAverage); PartNodeInfo nodeInfo = new PartNodeInfo(candidateNode.Node as Address, null, false); DistributionInfoData distInfo = new DistributionInfoData(DistributionMode.Manual, ClusterActivity.None, nodeInfo); maps = _distributionMgr.GetMaps(distInfo); if (NCacheLog.IsInfoEnabled) NCacheLog.Info("PartitionedCache.AutoLoadBalance", "result :" + maps.BalancingResult); if (maps.BalancingResult == BalancingResult.Default) { PublishMaps(maps); } } else if (NCacheLog.IsInfoEnabled) NCacheLog.Info("PartitionedCache.AutoLoadBalance", "No need to load balance"); } }
protected override DistributionMaps GetMaps(DistributionInfoData info) { return _distributionMgr.GetMaps(info); }
public override DistributionMaps GetDistributionMaps(DistributionInfoData distInfo) { NCacheLog.Debug("PartitionedCache.GetHashMap()", "here comes the request for hashmap"); distInfo.Group = null; return this._distributionMgr.GetMaps(distInfo); }