internal override CacheAddResult AddInternal(object key, CacheEntry cacheEntry, bool isUserOperation, OperationContext operationContext) { int bucketId = GetBucketId(key as string); if (IsBucketTransfered(bucketId)) { throw new StateTransferException("I am no more the owner of this bucket"); } if (_logMgr.IsLoggingEnbaled(bucketId, LogMode.LogBeforeActualOperation) && isUserOperation) { _logMgr.LogOperation(bucketId, key, cacheEntry, OperationType.Add); return(CacheAddResult.Success); } CacheAddResult result = base.AddInternal(key, cacheEntry, isUserOperation, operationContext); if (result == CacheAddResult.Success || result == CacheAddResult.SuccessNearEviction) { IncrementBucketStats(key as string, bucketId, cacheEntry.DataSize); if (isUserOperation) { _logMgr.LogOperation(bucketId, key, cacheEntry, OperationType.Add); } } return(result); }
/// <summary> /// Adds a pair of key and value to the cache. Throws an exception or reports error /// if the specified key already exists in the cache. /// </summary> /// <param name="key">key of the entry.</param> /// <param name="cacheEntry">the cache entry.</param> /// <returns>returns the result of operation.</returns> internal override CacheAddResult AddInternal(object key, CacheEntry cacheEntry, bool isUserOperation, OperationContext operationContext) { CacheAddResult result = base.AddInternal(key, cacheEntry, isUserOperation, operationContext); if (result == CacheAddResult.Success || result == CacheAddResult.SuccessNearEviction) { _grpIndexManager.AddToGroup(key, cacheEntry.GroupInfo); if (_queryIndexManager != null && cacheEntry.QueryInfo != null) { _queryIndexManager.AddToIndex(key, cacheEntry, operationContext); } } if (_context.PerfStatsColl != null) { if (_queryIndexManager != null) { _context.PerfStatsColl.SetQueryIndexSize(_queryIndexManager.IndexInMemorySize); } _context.PerfStatsColl.SetGroupIndexSize(_grpIndexManager.IndexInMemorySize); } return(result); }
/// <summary> /// Adds a pair of key and value to the cache. If the specified key already exists /// in the cache; it is updated, otherwise a new item is added to the cache. /// </summary> /// <param name="key">key of the entry.</param> /// <param name="cacheEntry">the cache entry.</param> /// <returns>returns the result of operation.</returns> internal override CacheInsResult InsertInternal(object key, CacheEntry cacheEntry, bool isUserOperation, CacheEntry oldEntry, OperationContext operationContext, bool updateIndex) { if (_primary == null || _secondary == null) { throw new InvalidOperationException(); } // If the primary has it then we are bound to update that item if (_primary.ContainsInternal(key)) { return(_primary.InsertInternal(key, cacheEntry, false, oldEntry, operationContext, updateIndex)); } // If the secondary has it then we are bound to update that item if (_secondary.Contains(key, operationContext)) { return(_secondary.InsertInternal(key, cacheEntry, false, oldEntry, operationContext, updateIndex)); } CacheAddResult result = AddInternal(key, cacheEntry, false, operationContext); switch (result) { case CacheAddResult.Success: return(CacheInsResult.Success); case CacheAddResult.NeedsEviction: return(CacheInsResult.NeedsEviction); } return(CacheInsResult.Failure); }
/// <summary> /// Método interno usado para inserir uma nova entrada no cache. /// </summary> /// <param name="key">Chave que representa a entrada.</param> /// <param name="cacheEntry">Instancia da entrada.</param> /// <param name="isUserOperation">True se for uma operação do usuário.</param> /// <param name="oldEntry">Valor da antiga entrada.</param> /// <param name="operationContext">Contexto da operação.</param> /// <returns>Resulta da operação.</returns> internal override CacheInsResult InsertInternal(object key, CacheEntry cacheEntry, bool isUserOperation, CacheEntry oldEntry, OperationContext operationContext) { if ((_primary == null) || (_secondary == null)) { throw new InvalidOperationException(); } if (_primary.ContainsInternal(key)) { return(_primary.InsertInternal(key, cacheEntry, false, oldEntry, operationContext)); } if (_secondary.Contains(key, operationContext)) { return(_secondary.InsertInternal(key, cacheEntry, false, oldEntry, operationContext)); } CacheAddResult result2 = this.AddInternal(key, cacheEntry, false); if (result2 != CacheAddResult.Success) { if (result2 == CacheAddResult.NeedsEviction) { return(CacheInsResult.NeedsEviction); } return(CacheInsResult.Failure); } return(CacheInsResult.Success); }
/// <summary> /// Add the object to specfied node in the cluster. /// </summary> /// <param name="key">key of the entry.</param> /// <returns>cache entry.</returns> /// <remarks> /// This method either invokes <see cref="handleAdd"/> on every server-node in the cluster. /// </remarks> protected CacheAddResult Clustered_Add(Address dest, object key, CacheEntry cacheEntry, string taskId, OperationContext operationContext) { //if (ServerMonitor.MonitorActivity) ServerMonitor.LogClientActivity("PartCacheBase.Add_1", ""); CacheAddResult retVal = CacheAddResult.Success; CacheEntry cloneValue = null; try { operationContext?.MarkInUse(NCModulesConstants.Topology); cacheEntry?.MarkInUse(NCModulesConstants.Topology); Array userPayLoad; long payLoadSize; _context.CachingSubSystemDataService.GetEntryClone(cacheEntry, out cloneValue, out userPayLoad, out payLoadSize); Function func = new Function((int)OpCodes.Add, new object[] { key, cloneValue, taskId, operationContext }); func.UserPayload = userPayLoad; object result = Cluster.SendMessage(dest, func, GroupRequest.GET_FIRST); if (result == null) { return(retVal); } if (result is CacheAddResult) { retVal = (CacheAddResult)result; //retvals[0]; } else if (result is System.Exception) { throw (Exception)result; } } catch (Runtime.Exceptions.SuspectedException se) { throw; } catch (Runtime.Exceptions.TimeoutException te) { throw; } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } finally { operationContext?.MarkFree(NCModulesConstants.Topology); if (cloneValue != null) { cloneValue.MarkFree(NCModulesConstants.Global); } cacheEntry?.MarkFree(NCModulesConstants.Topology); MiscUtil.ReturnEntryToPool(cloneValue, Context.TransactionalPoolManager); } return(retVal); }
/// <summary> /// Adds a pair of key and value to the cache. Throws an exception or reports error /// if the specified key already exists in the cache. /// </summary> /// <param name="key">key of the entry.</param> /// <param name="cacheEntry">the cache entry.</param> /// <returns>returns the result of operation.</returns> public override CacheAddResult Add(object key, CacheEntry cacheEntry, bool notify, OperationContext operationContext) { CacheAddResult result = CacheAddResult.Failure; if (Internal != null) { result = Internal.Add(key, cacheEntry, notify, operationContext); } return(result); }
/// <summary> /// Add the object to specfied node in the cluster. /// </summary> /// <param name="dest"></param> /// <param name="key">key of the entry.</param> /// <param name="cacheEntry"></param> /// <param name="operationContext"></param> /// <returns>cache entry.</returns> /// <remarks> /// This method either invokes <see cref="handleAdd"/> on every server-node in the cluster. /// </remarks> protected CacheAddResult Clustered_Add(Address dest, object key, CacheEntry cacheEntry, OperationContext operationContext) { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("PartCacheBase.Add_1", ""); } CacheAddResult retVal = CacheAddResult.Success; try { Function func = new Function((int)OpCodes.Add, new object[] { key, cacheEntry.CloneWithoutValue(), operationContext }); Array userPayLoad = null; if (cacheEntry.Value is CallbackEntry) { CallbackEntry cbEntry = ((CallbackEntry)cacheEntry.Value); userPayLoad = cbEntry.UserData; } else { userPayLoad = cacheEntry.UserData; } func.UserPayload = userPayLoad; object result = Cluster.SendMessage(dest, func, GroupRequest.GET_FIRST); if (result == null) { return(retVal); } if (result is CacheAddResult) { retVal = (CacheAddResult)result; } else if (result is System.Exception) { throw (Exception)result; } } catch (Runtime.Exceptions.SuspectedException se) { throw; } catch (Runtime.Exceptions.TimeoutException te) { throw; } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } return(retVal); }
/// <summary> /// Add the object to the cluster. Does load balancing as well. /// </summary> /// <param name="key">key of the entry.</param> /// <returns>cache entry.</returns> /// <remarks> /// This method either invokes <see cref="handleAdd"/> on one of the server nodes in the cluster, /// or invokes <see cref="Local_Add"/> locally. /// </remarks> protected CacheAddResult Clustered_Add(ArrayList dests, object key, CacheEntry cacheEntry, OperationContext operationContext) { CacheAddResult result = CacheAddResult.Failure; try { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("RepCacheBase.Add", "enter"); } /// Ask every server to add the object, except myself. Function func = new Function((int)OpCodes.Add, new object[] { key, cacheEntry, operationContext }, false, key); Array userPayLoad = null; if (cacheEntry.Value is CallbackEntry) { CallbackEntry cbEntry = ((CallbackEntry)cacheEntry.Value); userPayLoad = cbEntry.UserData; } else { userPayLoad = cacheEntry.UserData; } func.UserPayload = userPayLoad; RspList results = Cluster.BroadcastToMultiple(dests, func, GroupRequest.GET_ALL, _asyncOperation); ClusterHelper.ValidateResponses(results, typeof(CacheAddResult), Name); /// Check if the operation failed on any node. result = ClusterHelper.FindAtomicAddStatusReplicated(results); } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } finally { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("RepCacheBase.Add", "exit"); } } return(result); }
/// <summary> /// Método interno acionado para adicionar um novo item. /// </summary> /// <param name="key">Chave do item.</param> /// <param name="cacheEntry">Instancia da entrada que está sendo adicionada.</param> /// <param name="isUserOperation">True se for uma operação do usuário.</param> /// <returns>Resultado da operação.</returns> internal override CacheAddResult AddInternal(object key, CacheEntry cacheEntry, bool isUserOperation) { CacheAddResult result = base.AddInternal(key, cacheEntry, isUserOperation); switch (result) { case CacheAddResult.Success: case CacheAddResult.SuccessNearEviction: _grpIndexManager.AddToGroup(key, cacheEntry.GroupInfo); if ((_queryIndexManager != null) && (cacheEntry.QueryInfo != null)) { _queryIndexManager.AddToIndex(key, cacheEntry); } break; } return(result); }
public override CacheAddResult Add(object key, CacheEntry cacheEntry, bool notify, OperationContext operationContext) { CacheAddResult failure = CacheAddResult.Failure; if (this.Internal != null) { object[] keysIAmDependingOn = cacheEntry.KeysIAmDependingOn; if (keysIAmDependingOn != null) { Hashtable hashtable = this.Contains(keysIAmDependingOn, operationContext); if (!hashtable.ContainsKey("items-found")) { throw new OperationFailedException("One of the dependency keys does not exist."); } if (hashtable["items-found"] == null) { throw new OperationFailedException("One of the dependency keys does not exist."); } if ((hashtable["items-found"] == null) || (((ArrayList)hashtable["items-found"]).Count != keysIAmDependingOn.Length)) { throw new OperationFailedException("One of the dependency keys does not exist."); } } failure = this.Internal.Add(key, cacheEntry, notify, operationContext); if ((failure != CacheAddResult.Success) || (keysIAmDependingOn == null)) { return(failure); } Hashtable table = new Hashtable(); for (int i = 0; i < keysIAmDependingOn.Length; i++) { if (table[keysIAmDependingOn[i]] == null) { table.Add(keysIAmDependingOn[i], new ArrayList()); } ((ArrayList)table[keysIAmDependingOn[i]]).Add(key); } this.Internal.AddDependencyKeyList(table, operationContext); } return(failure); }
/// <summary> /// Returns the set of nodes where the addition was performed as an atomic operation. /// </summary> /// <param name="results">responses collected from all members of cluster.</param> /// <returns>list of nodes where the operation succeeded</returns> public static CacheAddResult FindAtomicAddStatusReplicated(RspList results) { CacheAddResult res = CacheAddResult.Failure; if (results == null) { return(res); } int timeoutCount = 0; int suspectedCount = 0; int successCount = 0; for (int i = 0; i < results.size(); i++) { Rsp rsp = (Rsp)results.elementAt(i); if (rsp.wasSuspected()) { //throw new Alachisoft.NGroups.SuspectedException(rsp.Sender); suspectedCount++; continue; } if (!rsp.wasReceived() && !rsp.wasSuspected()) { //throw new Alachisoft.NGroups.TimeoutException(); timeoutCount++; continue; } res = (CacheAddResult)rsp.Value; if (res == CacheAddResult.Success) { successCount++; } if (res != CacheAddResult.Success && res != CacheAddResult.KeyExists) { return(res); } } if (suspectedCount > 0 && successCount > 0 && (suspectedCount + successCount == results.size())) { //as operation is successfull on all other nodes other than suspected node(s). return(CacheAddResult.Success); } if (timeoutCount > 0 && (timeoutCount + successCount == results.size())) { if (successCount > 0) { //operation is not succeeded on some of the nodes; therefore we throw timeout exception. return(CacheAddResult.PartialTimeout); } else { //operation timed out on all of the node; no need to rollback. return(CacheAddResult.FullTimeout); } } if (timeoutCount > 0 && suspectedCount > 0) { if (successCount > 0) { return(CacheAddResult.PartialTimeout); } else { return(CacheAddResult.FullTimeout); } } return(res); }
public override Hashtable Add(object[] keys, CacheEntry[] cacheEntries, bool notify, OperationContext operationContext) { Hashtable hashtable = new Hashtable(); ArrayList list = new ArrayList(); ArrayList list2 = new ArrayList(); ArrayList list3 = new ArrayList(); if (this.Internal != null) { for (int i = 0; i < cacheEntries.Length; i++) { object[] keysIAmDependingOn = cacheEntries[i].KeysIAmDependingOn; if (keysIAmDependingOn != null) { Hashtable hashtable2 = this.Contains(keysIAmDependingOn, operationContext); if ((hashtable2.ContainsKey("items-found") && (hashtable2["items-found"] != null)) && (keysIAmDependingOn.Length == ((ArrayList)hashtable2["items-found"]).Count)) { list.Add(keys[i]); list3.Add(cacheEntries[i]); } else { list2.Add(keys[i]); } } else { list.Add(keys[i]); list3.Add(cacheEntries[i]); } } CacheEntry[] array = new CacheEntry[list3.Count]; list3.CopyTo(array); hashtable = this.Internal.Add(list.ToArray(), array, notify, operationContext); for (int j = 0; j < list.Count; j++) { if (!(hashtable[list[j]] is Exception)) { CacheAddResult result = (CacheAddResult)hashtable[list[j]]; object[] objArray2 = array[j].KeysIAmDependingOn; if ((result == CacheAddResult.Success) && (objArray2 != null)) { Hashtable table = new Hashtable(); for (int m = 0; m < objArray2.Length; m++) { if (table[objArray2[m]] == null) { table.Add(objArray2[m], new ArrayList()); } ((ArrayList)table[objArray2[m]]).Add(list[j]); } this.Internal.AddDependencyKeyList(table, operationContext); } } } for (int k = 0; k < list2.Count; k++) { hashtable.Add(list2[k], new OperationFailedException("One of the dependency keys does not exist.")); } } return(hashtable); }