/// <summary> /// Combines the collected statistics of the nodes, in a replicated environment. /// </summary> /// <returns></returns> public static CacheStatistics CombineReplicatedStatistics(ClusterCacheStatistics s) { CacheStatistics stats = new CacheStatistics(); if (s.Nodes == null) { return(stats); } for (int i = 0; i < s.Nodes.Count; i++) { NodeInfo info = s.Nodes[i] as NodeInfo; if (info == null || info.Statistics == null) { continue; } stats.HitCount += info.Statistics.HitCount; stats.MissCount += info.Statistics.MissCount; } stats.UpdateCount(s.LocalNode.Statistics.Count); stats.MaxCount = s.LocalNode.Statistics.MaxCount; stats.MaxSize = s.LocalNode.Statistics.MaxSize; stats.SessionCount = s.LocalNode.Statistics.SessionCount; return(stats); }
internal void ClientDisconnected(string client, DateTime dcTime) { if (_stats == null) { return; } if (_stats is ClusterCacheStatistics && _stats.ClassName != "mirror-server") { bool clientExist = false; ClusterCacheStatistics clusterStats = _stats as ClusterCacheStatistics; foreach (NodeInfo node in clusterStats.Nodes) { if (node.ConnectedClients.Contains(client)) { clientExist = true; break; } } if (!clientExist) { CreateNewDeadClientFinalizer(client, dcTime); } } else { CreateNewDeadClientFinalizer(client, dcTime); } }
internal void ClientConnected(string client) { if (_deadClients.ContainsKey(client)) { lock (syncRoot) _deadClients.Remove(client); } else if (_suspectedClients.ContainsKey(client)) { if (_stats is ClusterCacheStatistics) { bool clientExist = true; ClusterCacheStatistics clusterStats = _stats as ClusterCacheStatistics; foreach (NodeInfo node in clusterStats.Nodes) { if (!node.ConnectedClients.Contains(client)) { clientExist = false; break; } } if (clientExist) { lock (syncRoot) _suspectedClients.Remove(client); } } } }
internal void ClientDisconnected(string client, DateTime dcTime) { if (_stats == null) { return; } if (_stats is ClusterCacheStatistics && _stats.ClassName != "mirror-server") { bool clientExist = false; ClusterCacheStatistics clusterStats = _stats as ClusterCacheStatistics; foreach (NodeInfo node in clusterStats.Nodes) { if (node.ConnectedClients.Contains(client)) { clientExist = true; break; } } lock (syncRoot) { if (clientExist) { if (_stats.ClassName != "replicated-server") { _suspectedClients[client] = dcTime; } } else { if (_suspectedClients.ContainsKey(client)) { _suspectedClients.Remove(client); } _deadClients[client] = dcTime; Monitor.Pulse(syncRoot); } } } else { lock (syncRoot) { _deadClients[client] = dcTime; Monitor.Pulse(syncRoot); } } }
/// <summary> /// Return the next node in call balacing order that is fully functional. /// </summary> /// <returns></returns> NodeInfo IActivityDistributor.SelectNode(ClusterCacheStatistics clusterStats, object hint) { ArrayList memberInfos = clusterStats.Nodes; lock (memberInfos.SyncRoot) { int maxtries = memberInfos.Count; NodeInfo info = null; do { info = (NodeInfo)memberInfos[_lastServ % memberInfos.Count]; _lastServ = ++_lastServ % memberInfos.Count; if (info.Status.IsAnyBitSet(NodeStatus.Running)) { return(info); } maxtries--; }while (maxtries > 0); } return(null); }
/// <summary> /// Combines the collected statistics of the nodes, in a partitioned environment. /// </summary> /// <returns></returns> public static CacheStatistics CombinePartitionReplicasStatistics(ClusterCacheStatistics s) { CacheStatistics stats = new CacheStatistics(); if (s.Nodes == null) { return(stats); } bool zeroSeen = false; for (int i = 0; i < s.Nodes.Count; i++) { NodeInfo info = s.Nodes[i] as NodeInfo; if (info == null || info.Statistics == null || !info.Status.IsAnyBitSet(NodeStatus.Coordinator | NodeStatus.SubCoordinator)) { continue; } stats.HitCount += info.Statistics.HitCount; stats.MissCount += info.Statistics.MissCount; stats.UpdateCount(stats.Count + info.Statistics.Count); stats.MaxCount += info.Statistics.MaxCount; if (info.Statistics.MaxCount == 0) { zeroSeen = true; } } stats.MaxSize = s.LocalNode.Statistics.MaxSize; if (zeroSeen) { stats.MaxCount = 0; } return(stats); }
/// <summary> /// Return the next node in call balacing order that is fully functional. /// </summary> /// <returns></returns> internal Address SelectNode(ClusterCacheStatistics clusterStats, SubCluster targetGroup, Address localAddress) { ArrayList servers = targetGroup.Servers; ArrayList memberInfos = clusterStats.Nodes; //lock (servers.SyncRoot) { int maxtries = servers.Count; Address address = null; //if local node participates as a backup in the //target group then return the local node. foreach (Address node in servers) { if (node.IpAddress.Equals(localAddress.IpAddress)) { if (IsNodeRunning(node, memberInfos)) { return(node); } } } //if local node could not be selected then //do the call balancing. do { address = (Address)servers[_lastServ % servers.Count]; _lastServ = ++_lastServ % servers.Count; if (IsNodeRunning(address, memberInfos)) { return(address); } maxtries--; }while (maxtries > 0); } return(null); }
private void PrintDetailedCacheInfo(CacheStatistics s, string topology, string partId, bool isRunning, string cacheName, string configString, string pid) { long MaxSize = 0; string schemeName = topology; bool running = isRunning; OutputProvider.WriteLine("Cache-Name:\t\t{0}", cacheName); OutputProvider.WriteLine("Topology:\t\t{0} ", schemeName); if (running) { OutputProvider.WriteLine("UpTime: " + s.UpTime); if (s.MaxSize != 0) { OutputProvider.WriteLine("Capacity: " + ((s.MaxSize / 1024) / 1024) + " MB"); } else { OutputProvider.WriteLine("Capacity: " + MaxSize + "MB"); } OutputProvider.WriteLine("Item Count: " + s.Count); } OutputProvider.WriteLine("Status:\t\t\t{0}", isRunning ? "Running" : "Stopped"); if (running) { OutputProvider.WriteLine("Process-ID:\t\t{0}", pid); if (s is ClusterCacheStatistics) { System.Text.StringBuilder nodes = new System.Text.StringBuilder(); ClusterCacheStatistics cs = s as ClusterCacheStatistics; OutputProvider.WriteLine("Cluster-size: " + cs.Nodes.Count); MaxSize = (cs.LocalNode.Statistics.MaxSize / 1024) / 1024; foreach (NodeInfo n in cs.Nodes) { nodes.Append(" ").Append(n.Address).Append("\n"); } OutputProvider.WriteLine("{0}", nodes.ToString()); if (partId != null && partId != string.Empty) { if (cs.SubgroupNodes != null && cs.SubgroupNodes.Contains(partId.ToLower())) { nodes = new System.Text.StringBuilder(); ArrayList groupNodes = cs.SubgroupNodes[partId.ToLower()] as ArrayList; OutputProvider.WriteLine("Partition-size: " + groupNodes.Count); foreach (Address address in groupNodes) { nodes.Append(" ").Append(address).Append("\n"); } } OutputProvider.WriteLine("{0}", nodes.ToString()); } } else { OutputProvider.WriteLine(""); } } else { OutputProvider.WriteLine(""); } }
static private void PrintDetailedCacheInfo(Cache cache, string partId, bool printConf, bool xmlSyntax) { CacheStatistics s = cache.Statistics; long MaxSize = 0; string schemeName = s.ClassName.ToLower(CultureInfo.CurrentCulture); bool running = cache.IsRunning; Console.WriteLine("Cache-ID: {0}", cache.Name); if (partId != null && partId != string.Empty) { Console.WriteLine("Partition-ID: {0}", partId); } Console.WriteLine("Scheme: {0}", schemeName); Console.WriteLine("Status: {0}", cache.IsRunning ? "Running":"Stopped"); if (running) { if (s is ClusterCacheStatistics) { System.Text.StringBuilder nodes = new System.Text.StringBuilder(); ClusterCacheStatistics cs = s as ClusterCacheStatistics; Console.WriteLine("Cluster size: {0}", cs.Nodes.Count); MaxSize = (cs.LocalNode.Statistics.MaxSize / 1024) / 1024; foreach (NodeInfo n in cs.Nodes) { nodes.Append(" ").Append(n.Address).Append("\n"); } Console.Write("{0}", nodes.ToString()); if (partId != null && partId != string.Empty) { if (cs.SubgroupNodes != null && cs.SubgroupNodes.Contains(partId.ToLower())) { nodes = new System.Text.StringBuilder(); ArrayList groupNodes = cs.SubgroupNodes[partId.ToLower()] as ArrayList; Console.WriteLine("Partition size: {0}", groupNodes.Count); foreach (Address address in groupNodes) { nodes.Append(" ").Append(address).Append("\n"); } } Console.Write("{0}", nodes.ToString()); } } Console.WriteLine("UpTime: {0}", s.UpTime); if (s.MaxSize != 0) { Console.WriteLine("Capacity: {0} MB", ((s.MaxSize / 1024) / 1024)); } else { Console.WriteLine("Capacity: {0} MB", MaxSize); } Console.WriteLine("Count: {0}", s.Count); } if (printConf) { try { if (xmlSyntax) { PropsConfigReader pr = new PropsConfigReader(cache.ConfigString); Console.WriteLine("Configuration:\n{0}", ConfigReader.ToPropertiesXml(pr.Properties, true)); } else { Console.WriteLine("Configuration:\n{0}", cache.ConfigString); } } catch (ConfigurationException) {} } Console.WriteLine(""); }
/// <summary> /// Returns an ordered list of nodes, based upon the preferred order of /// load balancing algorithm. /// </summary> /// <param name="memberInfo">collected information about all the server nodes</param> /// <returns>ordered list of server nodes</returns> NodeInfo IActivityDistributor.SelectNode(ClusterCacheStatistics clusterStats, object data) { ArrayList memberInfos = clusterStats.Nodes; string group = data as string; bool gpAfStrict = false; NodeInfo min = null; NodeInfo gMin = null; NodeInfo sMin = null; lock (memberInfos.SyncRoot) { if (group != null) { gpAfStrict = clusterStats.ClusterDataAffinity != null?clusterStats.ClusterDataAffinity.Contains(group) : false; } for (int i = 0; i < memberInfos.Count; i++) { NodeInfo curr = (NodeInfo)memberInfos[i]; if (curr.Status.IsAnyBitSet(NodeStatus.Coordinator | NodeStatus.SubCoordinator)) { if (curr.Statistics == null) { continue; } if (min == null || (curr.Statistics.Count < min.Statistics.Count)) { min = curr; } if (curr.DataAffinity != null) { if (curr.DataAffinity.IsExists(group)) { if (gMin == null || (curr.Statistics.Count < gMin.Statistics.Count)) { gMin = (NodeInfo)memberInfos[i]; } } else if (curr.DataAffinity.Strict == false) { sMin = min; } else { min = sMin; } } else { sMin = min; } } } } if (gpAfStrict && gMin == null) { if (NCacheLog.IsInfoEnabled) { NCacheLog.Info("CoordinatorBiasedObjectCountBalancer.SelectNode", "strict group affinity, no node found to accommodate " + group + " data"); } return(null); } return((gMin == null) ? sMin : gMin); }