/// <summary> /// Add the object to the cluster. Does load balancing as well. /// </summary> /// <param name="key">key of the entry.</param> /// <returns>cache entry.</returns> /// <remarks> /// This method either invokes <see cref="handleAdd"/> on one of the server nodes in the cluster, /// or invokes <see cref="Local_Add"/> locally. /// </remarks> protected bool Clustered_Add(ArrayList dests, object key, ExpirationHint eh, OperationContext operationContext) { bool result = false; try { /// Ask every server to add the object, except myself. Function func = new Function((int)OpCodes.AddHint, new object[] { key, eh, operationContext }, false, key); RspList results = Cluster.BroadcastToMultiple(dests, func, GroupRequest.GET_ALL, _asyncOperation); ClusterHelper.ValidateResponses(results, typeof(bool), Name); /// Check if the operation failed on any node. result = ClusterHelper.FindAtomicAddHintReplicated(results); } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } return(result); }
protected Hashtable Clustered_Insert(ArrayList dests, object[] keys, CacheEntry[] cacheEntries, OperationContext operationContext) { try { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("RepCacheBase.InsertBlk", "enter"); } /// Ask every server to update the object, except myself. Function func = new Function((int)OpCodes.Insert, new object[] { keys, cacheEntries, operationContext }, false); RspList results = Cluster.BroadcastToMultiple(dests, func, GroupRequest.GET_ALL); ClusterHelper.ValidateResponses(results, typeof(Hashtable), Name); /// Check if the operation failed on any node. return(ClusterHelper.FindAtomicBulkInsertStatusReplicated(results)); } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } finally { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("RepCacheBase.InsertBlk", "exit"); } } }
protected LockOptions Clustered_IsLocked(object key, ref object lockId, ref DateTime lockDate, OperationContext operationContext) { try { Function func = new Function((int)OpCodes.IsLocked, new object[] { key, lockId, lockDate, operationContext }, false); RspList results = Cluster.BroadcastToMultiple(Cluster.Servers, func, GroupRequest.GET_ALL); try { ClusterHelper.ValidateResponses(results, typeof(LockOptions), Name); } catch (LockingException le) { //release the lock preemptively... Clustered_UnLock(key, null, true, operationContext); return(null); } return(ClusterHelper.FindAtomicIsLockedStatusReplicated(results, ref lockId, ref lockDate)); } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } }
/// <summary> /// Remove the objects from the cluster. /// </summary> /// <param name="dest"></param> /// <param name="keys">keys of the entries.</param> /// <param name="ir"></param> /// <param name="cbEntry"></param> /// <param name="notify"></param> /// <param name="operationContext"></param> /// <returns>list of failed keys</returns> /// <remarks> /// This method invokes <see cref="handleRemove"/> on every server node in the cluster. /// </remarks> protected Hashtable Clustered_Remove(Address dest, object[] keys, ItemRemoveReason ir, CallbackEntry cbEntry, bool notify, OperationContext operationContext) { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("PartCacheBase.RemoveBlk", ""); } Hashtable removedEntries = new Hashtable(); ArrayList dests = new ArrayList(); dests.Add(dest); try { Function func = new Function((int)OpCodes.Remove, new object[] { keys, ir, notify, cbEntry, operationContext }, false); RspList results = Cluster.Multicast(dests, func, GetFirstResponse, false); if (results == null) { return(removedEntries); } //muds: if (results.SuspectedMembers.Count == dests.Count) { //All the members of this group has gone down. //we must try this operation on some other group. throw new Runtime.Exceptions.SuspectedException("operation failed because the group member was suspected"); } ClusterHelper.ValidateResponses(results, typeof(Hashtable), Name); ArrayList rspList = ClusterHelper.GetAllNonNullRsp(results, typeof(Hashtable)); if (rspList.Count <= 0) { return(removedEntries); } IEnumerator ia = rspList.GetEnumerator(); while (ia.MoveNext()) { Rsp rsp = (Rsp)ia.Current; Hashtable removed = (Hashtable)rsp.Value; IDictionaryEnumerator ide = removed.GetEnumerator(); while (ide.MoveNext()) { removedEntries.Add(ide.Key, ide.Value); } } } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } return(removedEntries); }
/// <summary> /// Returns a .NET IEnumerator interface so that a client should be able /// to iterate over the elements of the cache store. /// </summary> /// <returns>IDictionaryEnumerator enumerator.</returns> protected IDictionaryEnumerator Clustered_GetEnumerator(ArrayList dests, IDictionaryEnumerator local) { IDictionaryEnumerator retVal = null; try { Function func = new Function((int)OpCodes.KeyList, null); RspList results = Cluster.BroadcastToMultiple(dests, func, GroupRequest.GET_ALL, Cluster.Timeout * 10, false); if (results == null) { return(retVal); } ClusterHelper.ValidateResponses(results, typeof(object[]), Name); Rsp rsp = null; ArrayList validRsps = new ArrayList(); for (int i = 0; i < results.size(); i++) { rsp = (Rsp)results.elementAt(i); if (rsp.Value != null) { validRsps.Add(rsp); } } int index = (local == null ? 0 : 1); int totalEnums = validRsps.Count + index; IDictionaryEnumerator[] enums = new IDictionaryEnumerator[totalEnums]; if (local != null) { enums[0] = local; } for (int i = 0; i < validRsps.Count; i++) { rsp = (Rsp)validRsps[i]; enums[index++] = new LazyPartitionedKeysetEnumerator(this, rsp.Value as object[], rsp.Sender as Address, false); } retVal = new AggregateEnumerator(enums); } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } return(retVal); }
/// <summary> /// Verifies that joining node has no data integrity conflicts with other nodes of the /// cluster. /// </summary> /// <returns>True, if no data integrity conflicts found, other wise false</returns> /// <remarks>Each partitioned node can have his data affinity. Data groups other than the /// strongly affiliated groups can be loadbalanced to any of the existing node. In such a /// situaltion if a new node joins and it has strong affinity with the groups whose data /// was previously distributed evenly, then a data integrity conflict arises. To avoid such /// conflicts each joining node first varifes that no other node on the cluster has data /// of his groups. If it is so, then he has to leave the cluster.</remarks> public bool VerifyDataIntegrity() { bool integrityVarified = true; bool integrityIssue = false; try { if (Cluster.Servers.Count > 1) { if (_stats != null && _stats.LocalNode.DataAffinity != null) { DataAffinity affinity = _stats.LocalNode.DataAffinity; if (affinity.Groups != null && affinity.Groups.Count > 0) { Function fun = new Function((int)OpCodes.VerifyDataIntegrity, (object)affinity.Groups, false); RspList results = Cluster.BroadcastToServers(fun, GroupRequest.GET_ALL, false); if (results != null) { ClusterHelper.ValidateResponses(results, typeof(bool), Name); Rsp response; for (int i = 0; i < results.size(); i++) { response = (Rsp)results.elementAt(i); if (response.wasReceived()) { integrityIssue = Convert.ToBoolean(response.Value); if (integrityIssue) { Context.NCacheLog.Error("PartitionedCacheBase.Verifydataintegrity()", "data integrity issue from " + response.Sender.ToString()); integrityVarified = false; } } else { Context.NCacheLog.Error("PartitionedCacheBase.Verifydataintegrity()", "data integrity varification not received from " + response.Sender.ToString()); integrityVarified = false; break; } } } } } } } catch (Exception e) { if (Context != null) { Context.NCacheLog.Error("PartitionedCacheBase.Verifydataintegrity()", e.ToString()); } integrityVarified = false; } return(integrityVarified); }
/// <summary> /// Remove the objects from the cluster. /// </summary> /// <param name="keys">keys of the entries.</param> /// <returns>list of failed keys</returns> /// <remarks> /// This method invokes <see cref="handleRemove"/> on every server node in the cluster. /// </remarks> protected Hashtable Clustered_Remove(object[] keys, ItemRemoveReason ir, CallbackEntry cbEntry, bool notify, OperationContext operationContext) { Hashtable removedEntries = new Hashtable(); try { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("RepCacheBase.RemoveBlk", "enter"); } Function func = new Function((int)OpCodes.Remove, new object[] { keys, ir, notify, cbEntry, operationContext }, false); RspList results = Cluster.BroadcastToServers(func, GroupRequest.GET_ALL); if (results == null) { return(removedEntries); } ClusterHelper.ValidateResponses(results, typeof(Hashtable), Name); ArrayList rspList = ClusterHelper.GetAllNonNullRsp(results, typeof(Hashtable)); if (rspList.Count <= 0) { return(removedEntries); } IEnumerator ia = rspList.GetEnumerator(); while (ia.MoveNext()) { Rsp rsp = (Rsp)ia.Current; Hashtable removed = (Hashtable)rsp.Value; IDictionaryEnumerator ide = removed.GetEnumerator(); while (ide.MoveNext()) { removedEntries.Add(ide.Key, ide.Value); } } } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } finally { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("RepCacheBase.RemoveBlk", "exit"); } } return(removedEntries); }
/// <summary> /// Remove the object from the cluster. /// </summary> /// <param name="key">key of the entry.</param> /// <returns>cache entry.</returns> /// <remarks> /// This method invokes <see cref="handleRemove"/> on every server node in the cluster. /// </remarks> protected CacheEntry Clustered_Remove(object key, ItemRemoveReason ir, CallbackEntry cbEntry, bool notify, object lockId, LockAccessType accessType, OperationContext operationContext) { CacheEntry retVal = null; try { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("RepCacheBase.Remove", "enter"); } Function func = new Function((int)OpCodes.Remove, new object[] { key, ir, notify, cbEntry, lockId, accessType, operationContext }, false, key); RspList results = Cluster.BroadcastToServers(func, GroupRequest.GET_ALL, _asyncOperation); if (results == null) { return(retVal); } ClusterHelper.ValidateResponses(results, typeof(OperationResponse), Name); Rsp rsp = ClusterHelper.FindAtomicRemoveStatusReplicated(results); if (rsp == null) { return(retVal); } OperationResponse opRes = rsp.Value as OperationResponse; if (opRes != null) { CacheEntry entry = opRes.SerializablePayload as CacheEntry; if (entry != null) { entry.Value = opRes.UserPayload; } return(entry); } } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } finally { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("RepCacheBase.Remove", "exit"); } } return(retVal); }
/// <summary> /// Add the object to the cluster. Does load balancing as well. /// </summary> /// <param name="key">key of the entry.</param> /// <returns>cache entry.</returns> /// <remarks> /// This method either invokes <see cref="handleAdd"/> on one of the server nodes in the cluster, /// or invokes <see cref="Local_Add"/> locally. /// </remarks> protected CacheAddResult Clustered_Add(ArrayList dests, object key, CacheEntry cacheEntry, OperationContext operationContext) { CacheAddResult result = CacheAddResult.Failure; try { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("RepCacheBase.Add", "enter"); } /// Ask every server to add the object, except myself. Function func = new Function((int)OpCodes.Add, new object[] { key, cacheEntry, operationContext }, false, key); Array userPayLoad = null; if (cacheEntry.Value is CallbackEntry) { CallbackEntry cbEntry = ((CallbackEntry)cacheEntry.Value); userPayLoad = cbEntry.UserData; } else { userPayLoad = cacheEntry.UserData; } func.UserPayload = userPayLoad; RspList results = Cluster.BroadcastToMultiple(dests, func, GroupRequest.GET_ALL, _asyncOperation); ClusterHelper.ValidateResponses(results, typeof(CacheAddResult), Name); /// Check if the operation failed on any node. result = ClusterHelper.FindAtomicAddStatusReplicated(results); } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } finally { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("RepCacheBase.Add", "exit"); } } return(result); }
/// <summary> /// Retrieve the list of keys fron the cache for the given group or sub group. /// </summary> protected Hashtable Clustered_GetData(ArrayList dests, string group, string subGroup, OperationContext operationContext) { Hashtable table = new Hashtable(); try { Function func = new Function((int)OpCodes.GetData, new object[] { group, subGroup, operationContext }, true); func.Cancellable = true; RspList results = Cluster.Multicast(dests, func, GroupRequest.GET_ALL, false); if (results == null) { return(null); } ClusterHelper.ValidateResponses(results, typeof(HashVector), Name); IList rspList = ClusterHelper.GetAllNonNullRsp(results, typeof(HashVector)); if (rspList.Count <= 0) { return(null); } else { IEnumerator im = rspList.GetEnumerator(); while (im.MoveNext()) { Rsp rsp = (Rsp)im.Current; Hashtable cTable = (Hashtable)rsp.Value; if (cTable != null) { IDictionaryEnumerator ide = cTable.GetEnumerator(); while (ide.MoveNext()) { table[ide.Key] = ide.Value; } } } } } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } return(table); }
protected ArrayList Clustered_GetKeysByTag(ArrayList dests, string[] tags, TagComparisonType comparisonType, bool excludeSelf, OperationContext operationContext) { ArrayList keys = new ArrayList(); try { Function func = new Function((int)OpCodes.GetKeysByTag, new object[] { tags, comparisonType, operationContext }, excludeSelf); func.Cancellable = true; RspList results = Cluster.Multicast(dests, func, GroupRequest.GET_ALL, false, Cluster.Timeout * 10); if (results == null) { return(null); } ClusterHelper.ValidateResponses(results, typeof(ArrayList), Name); IList rspList = ClusterHelper.GetAllNonNullRsp(results, typeof(ClusteredArrayList)); if (rspList.Count <= 0) { return(null); } else { IEnumerator im = rspList.GetEnumerator(); while (im.MoveNext()) { Rsp rsp = (Rsp)im.Current; ICollection entries = (ICollection)rsp.Value; if (entries != null) { IEnumerator ide = entries.GetEnumerator(); while (ide.MoveNext()) { keys.Add(ide.Current); } } } } return(keys); } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } }
protected Hashtable Clustered_GetTag(ArrayList dests, string[] tags, TagComparisonType comparisonType, bool excludeSelf, OperationContext operationContext) { Hashtable keyValues = new Hashtable(); try { Function func = new Function((int)OpCodes.GetTag, new object[] { tags, comparisonType, operationContext }, excludeSelf); func.Cancellable = true; RspList results = Cluster.BroadcastToMultiple(dests, func, GroupRequest.GET_ALL, false); if (results == null) { return(null); } ClusterHelper.ValidateResponses(results, typeof(HashVector), Name); IList rspList = ClusterHelper.GetAllNonNullRsp(results, typeof(HashVector)); if (rspList.Count <= 0) { return(null); } else { IEnumerator im = rspList.GetEnumerator(); while (im.MoveNext()) { Rsp rsp = (Rsp)im.Current; IDictionary entries = (IDictionary)rsp.Value; if (entries != null) { IDictionaryEnumerator ide = entries.GetEnumerator(); while (ide.MoveNext()) { keyValues[ide.Key] = ide.Value; } } } } return(keyValues); } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } }
/// <summary> /// Remove the objects from the cluster. /// </summary> /// <param name="keys">keys of the entries.</param> /// <returns>list of failed keys</returns> /// <remarks> /// This method invokes <see cref="handleRemove"/> on every server node in the cluster. /// </remarks> protected Hashtable Clustered_RemoveGroup(string group, string subGroup, bool notify, OperationContext operationContext) { Hashtable removedEntries = new Hashtable(); try { Function func = new Function((int)OpCodes.RemoveGroup, new object[] { group, subGroup, notify, operationContext }, false); func.Cancellable = true; RspList results = Cluster.BroadcastToServers(func, GroupRequest.GET_ALL, false); if (results == null) { return(removedEntries); } ClusterHelper.ValidateResponses(results, typeof(Hashtable), Name); IList rspList = ClusterHelper.GetAllNonNullRsp(results, typeof(Hashtable)); if (rspList.Count <= 0) { return(removedEntries); } IEnumerator ia = rspList.GetEnumerator(); while (ia.MoveNext()) { Rsp rsp = (Rsp)ia.Current; Hashtable removed = (Hashtable)rsp.Value; IDictionaryEnumerator ide = removed.GetEnumerator(); while (ide.MoveNext()) { removedEntries.Add(ide.Key, ide.Value); } } } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } return(removedEntries); }
protected void Clustered_GetNextChunk(ArrayList dests, EnumerationPointer pointer, OperationContext operationContext) { try { Function func = new Function((int)OpCodes.GetNextChunk, new object[] { pointer, operationContext }, true); RspList results = Cluster.BroadcastToMultiple(dests, func, GroupRequest.GET_ALL, false); ClusterHelper.ValidateResponses(results, null, Name); } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } }
/// <summary> /// Retrieve the list of keys fron the cache for the given group or sub group. /// </summary> protected ArrayList Clustered_GetKeys(ArrayList dests, string group, string subGroup) { ArrayList list = new ArrayList(); try { Function func = new Function((int)OpCodes.GetKeys, new object[] { group, subGroup }, true); func.Cancellable = true; RspList results = Cluster.Multicast(dests, func, GroupRequest.GET_ALL, false); if (results == null) { return(null); } ClusterHelper.ValidateResponses(results, typeof(ArrayList), Name); IList rspList = ClusterHelper.GetAllNonNullRsp(results, typeof(ClusteredArrayList)); if (rspList.Count <= 0) { return(null); } else { IEnumerator im = rspList.GetEnumerator(); while (im.MoveNext()) { Rsp rsp = (Rsp)im.Current; ArrayList cList = (ArrayList)rsp.Value; if (cList != null) { list.AddRange(cList); } } } } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } return(list); }
protected QueryResultSet Clustered_Search(ArrayList dests, string queryText, IDictionary values, bool excludeSelf, OperationContext operationContext) { QueryResultSet resultSet = new QueryResultSet(); try { Function func = new Function((int)OpCodes.Search, new object[] { queryText, values, operationContext }, excludeSelf); RspList results = Cluster.BroadcastToMultiple(dests, func, GroupRequest.GET_ALL, false); if (results == null) { return(null); } ClusterHelper.ValidateResponses(results, typeof(QueryResultSet), Name); ArrayList rspList = ClusterHelper.GetAllNonNullRsp(results, typeof(QueryResultSet)); if (rspList.Count <= 0) { return(null); } else { IEnumerator im = rspList.GetEnumerator(); while (im.MoveNext()) { Rsp rsp = (Rsp)im.Current; QueryResultSet cRestultSet = (QueryResultSet)rsp.Value; resultSet.Compile(cRestultSet); } } return(resultSet); } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } }
protected bool Clustered_Lock(object key, LockExpiration lockExpiration, ref object lockId, ref DateTime lockDate, OperationContext operationContext) { try { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("RepCacheBase.Lock", "enter"); } Function func = new Function((int)OpCodes.LockKey, new object[] { key, lockId, lockDate, lockExpiration, operationContext }, false); RspList results = Cluster.BroadcastToMultiple(Cluster.Servers, func, GroupRequest.GET_ALL); try { ClusterHelper.ValidateResponses(results, typeof(LockOptions), Name); } catch (LockingException le) { //release the lock preemptively... Clustered_UnLock(key, null, true, operationContext); return(false); } return(ClusterHelper.FindAtomicLockStatusReplicated(results, ref lockId, ref lockDate)); } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } finally { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("RepCacheBase.Lock", "exit"); } } }
protected CacheInsResultWithEntry Clustered_Insert(ArrayList dests, object key, CacheEntry cacheEntry, object lockId, LockAccessType accessType, OperationContext operationContext) { try { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("RepCacheBase.Insert", "enter"); } /// Ask every server to update the object, except myself. Function func = new Function((int)OpCodes.Insert, new object[] { key, cacheEntry, _statusLatch.IsAnyBitsSet(NodeStatus.Initializing), lockId, accessType, operationContext }, false, key); Array userPayLoad = null; if (cacheEntry.Value is CallbackEntry) { CallbackEntry cbEntry = ((CallbackEntry)cacheEntry.Value); userPayLoad = cbEntry.UserData; } else { userPayLoad = cacheEntry.UserData; } func.UserPayload = userPayLoad; RspList results = Cluster.BroadcastToMultiple(dests, func, GroupRequest.GET_ALL, _asyncOperation); ClusterHelper.ValidateResponses(results, typeof(OperationResponse), Name); //Bug Fixed, during state transfer (one node up with the exisiting one) of replicated cache, //while client doing insert operaion continously, which incrementing the add/sec counter while the client only performing insert //means no need to incrment add/sec counter, need only updat/sec to be incremented //so after discussing with QA, we modify the code here. CacheInsResultWithEntry retVal = ClusterHelper.FindAtomicInsertStatusReplicated(results); if (retVal != null && retVal.Result == CacheInsResult.Success && results != null) { for (int i = 0; i < results.Results.Count; i++) { if (((CacheInsResultWithEntry)((OperationResponse)results.Results[i]).SerializablePayload).Result == CacheInsResult.SuccessOverwrite) { retVal.Result = CacheInsResult.SuccessOverwrite; break; } } } return(retVal); } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } finally { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("RepCacheBase.Insert", "exit"); } } }