/// <summary> /// Ressincroniza a entrada do cache. /// </summary> /// <param name="key">Chave do item.</param> /// <param name="entry">Instancia da entrada.</param> /// <param name="flag">Conjunto associado.</param> /// <param name="group">Grupo onde a entrada está inserida.</param> /// <param name="subGroup">Subgrupo da entrada.</param> /// <param name="providerName">Nome do provedor.</param> /// <param name="operationContext">Contexto da operação.</param> /// <returns></returns> public object ResyncCacheItem(string key, out CacheEntry entry, ref BitSet flag, string group, string subGroup, string providerName, OperationContext operationContext) { ProviderCacheItem item = null; this.ReadThru(key, out item, providerName); UserBinaryObject obj2 = null; try { obj2 = this.GetCacheEntry(key, item, ref flag, group, subGroup, out entry); if (obj2 == null) { return(obj2); } CacheInsResultWithEntry entry2 = _context.CacheImpl.Insert(key, entry, false, null, 0, LockAccessType.IGNORE_LOCK, operationContext); if (entry2.Result == CacheInsResult.Failure) { throw new OperationFailedException("Operation failed to synchronize with data source"); } if (entry2.Result == CacheInsResult.NeedsEviction) { throw new OperationFailedException("The cache is full and not enough items could be evicted."); } } catch (Exception exception) { throw new OperationFailedException("Error occurred while synchronization with data source. Error: " + exception.Message, exception); } return(obj2); }
/// <summary> /// Updates or Adds the object to the cluster. /// </summary> /// <param name="key">key of the entry.</param> /// <returns>cache entry.</returns> /// <remarks> /// This method invokes <see cref="handleInsert"/> on the specified node. /// </remarks> protected CacheInsResultWithEntry Clustered_Insert(Address dest, object key, CacheEntry cacheEntry, string taskId, object lockId, ulong version, LockAccessType accessType, OperationContext operationContext) { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("PartCacheBase.Insert", ""); } CacheInsResultWithEntry retVal = null; CacheEntry cloneValue = null; try { operationContext?.MarkInUse(NCModulesConstants.Topology); Array userPayLoad; long payLoadSize; _context.CachingSubSystemDataService.GetEntryClone(cacheEntry, out cloneValue, out userPayLoad, out payLoadSize); Function func = new Function((int)OpCodes.Insert, new object[] { key, cloneValue, taskId, lockId, accessType, version, operationContext }); func.UserPayload = userPayLoad; func.ResponseExpected = true; object result = Cluster.SendMessage(dest, func, GroupRequest.GET_FIRST, false); if (result == null) { return(retVal); } retVal = (CacheInsResultWithEntry)((OperationResponse)result).SerializablePayload; if (retVal.Entry != null && ((OperationResponse)result).UserPayload != null) { retVal.Entry.Value = ((OperationResponse)result).UserPayload; } } catch (Runtime.Exceptions.SuspectedException se) { throw; } catch (Runtime.Exceptions.TimeoutException te) { throw; } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } finally { operationContext?.MarkFree(NCModulesConstants.Topology); if (retVal == null) { retVal = CacheInsResultWithEntry.CreateCacheInsResultWithEntry(_context.TransactionalPoolManager); } MiscUtil.ReturnEntryToPool(cloneValue, Context.TransactionalPoolManager); } return(retVal); }
/// <summary> /// Adds a pair of key and value to the cache. If the specified key already exists /// in the cache; it is updated, otherwise a new item is added to the cache. /// </summary> /// <param name="key">key of the entry.</param> /// <param name="cacheEntry">the cache entry.</param> /// <returns>returns the result of operation.</returns> public override CacheInsResultWithEntry Insert(object key, CacheEntry cacheEntry, bool notify, object lockId, LockAccessType accessType, OperationContext operationContext) { CacheInsResultWithEntry retVal = new CacheInsResultWithEntry(); if (Internal != null) { retVal = Internal.Insert(key, cacheEntry, notify, lockId, accessType, operationContext); } return(retVal); }
/// <summary> /// Updates or Adds the object to the cluster. /// </summary> /// <param name="dest"></param> /// <param name="key">key of the entry.</param> /// <param name="cacheEntry"></param> /// <param name="lockId"></param> /// <param name="accessType"></param> /// <param name="operationContext"></param> /// <returns>cache entry.</returns> /// <remarks> /// This method invokes <see cref="handleInsert"/> on the specified node. /// </remarks> protected CacheInsResultWithEntry Clustered_Insert(Address dest, object key, CacheEntry cacheEntry, object lockId, LockAccessType accessType, OperationContext operationContext) { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("PartCacheBase.Insert", ""); } CacheInsResultWithEntry retVal = new CacheInsResultWithEntry(); try { Function func = new Function((int)OpCodes.Insert, new object[] { key, cacheEntry.CloneWithoutValue(), lockId, accessType, operationContext }); Array userPayLoad = null; if (cacheEntry.Value is CallbackEntry) { CallbackEntry cbEntry = ((CallbackEntry)cacheEntry.Value); userPayLoad = cbEntry.UserData; } else { userPayLoad = cacheEntry.UserData; } func.UserPayload = userPayLoad; func.ResponseExpected = true; object result = Cluster.SendMessage(dest, func, GroupRequest.GET_FIRST, false); if (result == null) { return(retVal); } retVal = (CacheInsResultWithEntry)((OperationResponse)result).SerializablePayload; if (retVal.Entry != null) { retVal.Entry.Value = ((OperationResponse)result).UserPayload; } } catch (Runtime.Exceptions.SuspectedException se) { throw; } catch (Runtime.Exceptions.TimeoutException te) { throw; } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } return(retVal); }
internal static bool ReturnCacheInsResultToPool(CacheInsResultWithEntry result, PoolManager poolManager) { if (poolManager == null || result == null || !result.FromPool(poolManager)) { return(false); } result.ReturnLeasableToPool(); poolManager.GetCacheInsResultWithEntryPool().Return(result); return(true); }
/// <summary> Do write-thru now. </summary> public void Process() { lock (this) { try { if (_val == null) { ProviderCacheItem item = null; LanguageContext languageContext = LanguageContext.NONE; OperationContext operationContext = new OperationContext(); CacheEntry entry = null; object userBrinaryObject = null; try { _parent.ReadThru(_key, out item, _resyncProviderName, out languageContext, operationContext); userBrinaryObject = _parent.GetCacheEntry(_key, item, ref this._flag, _groupInfo != null ? _groupInfo.Group : null, _groupInfo != null ? _groupInfo.SubGroup : null, out entry, languageContext); } catch (Exception ex) { _val = ex; _parent.Context.NCacheLog.Error("DatasourceMgr.ResyncCacheItem", ex.Message + " " + ex.StackTrace); } if (!(_val is Exception) && userBrinaryObject != null) { operationContext.Add(OperationContextFieldName.OperationType, OperationContextOperationType.CacheOperation); CacheInsResultWithEntry result = _parent.Context.CacheImpl.Insert(_key, entry, true, null, 0, LockAccessType.IGNORE_LOCK, operationContext); if (result != null && result.Result == CacheInsResult.IncompatibleGroup) { _parent.Context.CacheImpl.Remove(_key, ItemRemoveReason.Removed, true, null, 0, LockAccessType.IGNORE_LOCK, new OperationContext(OperationContextFieldName.OperationType, OperationContextOperationType.CacheOperation)); } } else { _parent.Context.CacheImpl.Remove(_key, ItemRemoveReason.Removed, true, null, 0, LockAccessType.IGNORE_LOCK, new OperationContext(OperationContextFieldName.OperationType, OperationContextOperationType.CacheOperation)); } } } catch (Exception e) { _val = e; _parent.Context.NCacheLog.Error("DatasourceMgr.ResyncCacheItem", e.Message + " " + e.StackTrace); } finally { _parent.Context.PerfStatsColl.IncrementCountStats(_parent.Context.CacheInternal.Count); _parent.Queue.Remove(_key); } } }
/// <summary> /// Adds a pair of key and value to the cache. If the specified key already exists /// in the cache; it is updated, otherwise a new item is added to the cache. /// </summary> /// <param name="key">key of the entry.</param> /// <param name="cacheEntry">the cache entry.</param> /// <returns>returns the result of operation.</returns> public sealed override CacheInsResultWithEntry Insert(object key, CacheEntry cacheEntry, bool notify, bool isUserOperation, object lockId, LockAccessType access, OperationContext operationContext) { CacheInsResultWithEntry result = new CacheInsResultWithEntry(); try { CacheEntry pe = null; CallbackEntry cbEtnry = null; OperationID opId = operationContext.OperatoinID; EventId eventId = null; EventContext eventContext = null; pe = GetInternal(key, false, operationContext); result.Entry = pe; if (pe != null && access != LockAccessType.IGNORE_LOCK) { { if (access == LockAccessType.RELEASE || access == LockAccessType.DONT_RELEASE) { if (pe.IsItemLocked() && !pe.CompareLock(lockId)) { result.Result = CacheInsResult.ItemLocked; result.Entry = null; return result; } } if (access == LockAccessType.DONT_RELEASE) { cacheEntry.CopyLock(pe.LockId, pe.LockDate, pe.LockExpiration); } else { cacheEntry.ReleaseLock(); } } } ExpirationHint peExh = pe == null ? null : pe.ExpirationHint; if (pe != null && pe.Value is CallbackEntry) { cbEtnry = pe.Value as CallbackEntry; cacheEntry = CacheHelper.MergeEntries(pe, cacheEntry); } result.Result = InsertInternal(key, cacheEntry, isUserOperation, pe, operationContext); if ((result.Result == CacheInsResult.Success || result.Result == CacheInsResult.SuccessNearEvicition) && _stateTransferKeyList != null && _stateTransferKeyList.ContainsKey(key)) { result.Result = result.Result == CacheInsResult.Success ? CacheInsResult.SuccessOverwrite : CacheInsResult.SuccessOverwriteNearEviction; } // Not enough space, evict and try again. if (result.Result == CacheInsResult.NeedsEviction || result.Result == CacheInsResult.SuccessNearEvicition || result.Result == CacheInsResult.SuccessOverwriteNearEviction) { Evict(); if (result.Result == CacheInsResult.SuccessNearEvicition) result.Result = CacheInsResult.Success; if (result.Result == CacheInsResult.SuccessOverwriteNearEviction) result.Result = CacheInsResult.SuccessOverwrite; } // Operation completed! if (result.Result == CacheInsResult.Success || result.Result == CacheInsResult.SuccessOverwrite) { // commented by muds //remove the old hint from expiry index. if (peExh != null) _context.ExpiryMgr.RemoveFromIndex(key); if (cacheEntry.ExpirationHint != null) { cacheEntry.ExpirationHint.CacheKey = (string)key; if (isUserOperation) { try { _context.ExpiryMgr.ResetHint(peExh, cacheEntry.ExpirationHint); } catch (Exception e) { RemoveInternal(key, ItemRemoveReason.Removed, false, operationContext); throw e; } } else { cacheEntry.ExpirationHint.ReInitializeHint(Context); } _context.ExpiryMgr.UpdateIndex(key, cacheEntry); } if (IsSelfInternal) { _context.PerfStatsColl.IncrementCountStats((long)Count); } } _stats.UpdateCount(this.Count); switch (result.Result) { case CacheInsResult.Success: break; case CacheInsResult.SuccessOverwrite: if (notify) { EventCacheEntry eventCacheEntry = CacheHelper.CreateCacheEventEntry(Runtime.Events.EventDataFilter.DataWithMetadata, cacheEntry); ; EventCacheEntry oldEventCacheEntry = CacheHelper.CreateCacheEventEntry(Runtime.Events.EventDataFilter.DataWithMetadata, pe); if (cbEtnry != null) { if (cbEtnry.ItemUpdateCallbackListener != null && cbEtnry.ItemUpdateCallbackListener.Count > 0) { if (!operationContext.Contains(OperationContextFieldName.EventContext)) //for atomic operations { eventId = EventId.CreateEventId(opId); eventContext = new EventContext(); } else //for bulk { eventId = ((EventContext)operationContext.GetValueByField(OperationContextFieldName.EventContext)).EventID; } eventContext = new EventContext(); eventId.EventType = EventType.ITEM_UPDATED_CALLBACK; eventContext.Add(EventContextFieldName.EventID, eventId); eventContext.Item = eventCacheEntry; eventContext.OldItem = oldEventCacheEntry; NotifyCustomUpdateCallback(key, cbEtnry.ItemUpdateCallbackListener, false, (OperationContext)operationContext.Clone(), eventContext); } } } break; } } finally { } if (_context.PerfStatsColl != null) { _context.PerfStatsColl.SetCacheSize(Size); } return result; }
/// <summary> /// Returns the set of nodes where the insertion was performed as an atomic operation. /// </summary> /// <param name="results">responses collected from all members of cluster.</param> /// <returns>list of nodes where the operation succeeded</returns> public static CacheInsResultWithEntry FindAtomicInsertStatusReplicated(RspList results) { int needEvictCount = 0; int timeoutCount = 0; int suspectedCount = 0; int successCount = 0; CacheInsResultWithEntry res = new CacheInsResultWithEntry(); if (results == null) { return(res); } for (int i = 0; i < results.size(); i++) { Rsp rsp = (Rsp)results.elementAt(i); if (!rsp.wasReceived() && !rsp.wasSuspected()) { timeoutCount++; continue; } if (rsp.wasSuspected()) { suspectedCount++; continue; } res = (CacheInsResultWithEntry)((OperationResponse)rsp.Value).SerializablePayload; if (res.Result == CacheInsResult.Success || res.Result == CacheInsResult.SuccessOverwrite) { successCount++; } if (res.Result != CacheInsResult.Success && res.Result != CacheInsResult.SuccessOverwrite && res.Result != CacheInsResult.NeedsEviction) { } /* If all the nodes in the Cluster return NeedsEviction response then we do not need to remove */ if (res.Result == CacheInsResult.NeedsEviction) { needEvictCount++; } } if (needEvictCount == results.size()) { //every node returned the NeedEviction; so we need not remove the item //as data is not corrupted. res.Result = CacheInsResult.NeedsEvictionNotRemove; } if (suspectedCount > 0 && successCount > 0 && (suspectedCount + successCount == results.size())) { //as operation is successfull on all other nodes other than suspected node(s). } if (timeoutCount > 0 && (timeoutCount + successCount == results.size())) { if (successCount > 0) { //operation is not succeeded on some of the nodes; therefore we throw timeout exception. res.Result = CacheInsResult.PartialTimeout; } else { //operation timed out on all of the node; no need to rollback. res.Result = CacheInsResult.FullTimeout; } } if (timeoutCount > 0 && suspectedCount > 0) { if (successCount > 0) { //operation is not succeeded on some of the nodes; therefore we throw timeout exception. res.Result = CacheInsResult.PartialTimeout; } else { //operation timed out on all of the node; no need to rollback. res.Result = CacheInsResult.FullTimeout; } } return(res); }
public override CacheInsResultWithEntry Insert(object key, CacheEntry cacheEntry, bool notify, object lockId, ulong version, LockAccessType accessType, OperationContext operationContext) { CacheInsResultWithEntry entry = new CacheInsResultWithEntry(); if (this.Internal != null) { object[] keysIAmDependingOn = cacheEntry.KeysIAmDependingOn; if (keysIAmDependingOn != null) { Hashtable hashtable = this.Contains(keysIAmDependingOn, operationContext); if (!hashtable.ContainsKey("items-found")) { throw new OperationFailedException("One of the dependency keys does not exist."); } if (hashtable["items-found"] == null) { throw new OperationFailedException("One of the dependency keys does not exist."); } if (keysIAmDependingOn.Length != ((ArrayList)hashtable["items-found"]).Count) { throw new OperationFailedException("One of the dependency keys does not exist."); } } entry = this.Internal.Insert(key, cacheEntry, notify, lockId, version, accessType, operationContext); if ((entry.Result != CacheInsResult.Success) && (entry.Result != CacheInsResult.SuccessOverwrite)) { return(entry); } Hashtable finalKeysList = null; if ((entry.Entry != null) && (entry.Entry.KeysIAmDependingOn != null)) { Hashtable table = null; finalKeysList = base.GetFinalKeysList(entry.Entry.KeysIAmDependingOn, cacheEntry.KeysIAmDependingOn); object[] objArray2 = (object[])finalKeysList["oldKeys"]; if (objArray2 != null) { table = new Hashtable(); for (int i = 0; i < objArray2.Length; i++) { if (!table.Contains(objArray2[i])) { table.Add(objArray2[i], new ArrayList()); } ((ArrayList)table[objArray2[i]]).Add(key); } this.Internal.RemoveDepKeyList(table, operationContext); } object[] objArray3 = (object[])finalKeysList["newKeys"]; if (objArray3 != null) { table = new Hashtable(); for (int j = 0; j < objArray3.Length; j++) { if (!table.Contains(objArray3[j])) { table.Add(objArray3[j], new ArrayList()); } ((ArrayList)table[objArray3[j]]).Add(key); } this.Internal.AddDependencyKeyList(table, operationContext); } return(entry); } if (cacheEntry.KeysIAmDependingOn != null) { object[] objArray4 = cacheEntry.KeysIAmDependingOn; Hashtable hashtable4 = new Hashtable(); for (int k = 0; k < objArray4.Length; k++) { if (!hashtable4.Contains(objArray4[k])) { hashtable4.Add(objArray4[k], new ArrayList()); } ((ArrayList)hashtable4[objArray4[k]]).Add(key); } this.Internal.AddDependencyKeyList(hashtable4, operationContext); } } return(entry); }
public override Hashtable Insert(object[] keys, CacheEntry[] cacheEntries, bool notify, OperationContext operationContext) { Hashtable hashtable = null; ArrayList list = new ArrayList(); ArrayList list2 = new ArrayList(); ArrayList list3 = new ArrayList(); if (this.Internal != null) { for (int i = 0; i < cacheEntries.Length; i++) { object[] keysIAmDependingOn = cacheEntries[i].KeysIAmDependingOn; if (keysIAmDependingOn != null) { Hashtable hashtable2 = this.Contains(keysIAmDependingOn, operationContext); if ((hashtable2.ContainsKey("items-found") && (hashtable2["items-found"] != null)) && (keysIAmDependingOn.Length == ((ArrayList)hashtable2["items-found"]).Count)) { list.Add(keys[i]); list2.Add(cacheEntries[i]); } else { list3.Add(keys[i]); } } else { list.Add(keys[i]); list2.Add(cacheEntries[i]); } } CacheEntry[] array = new CacheEntry[list2.Count]; list2.CopyTo(array); hashtable = this.Internal.Insert(list.ToArray(), array, notify, operationContext); for (int j = 0; j < list.Count; j++) { CacheInsResultWithEntry entry = hashtable[list[j]] as CacheInsResultWithEntry; if ((entry != null) && ((entry.Result == CacheInsResult.Success) || (entry.Result == CacheInsResult.SuccessOverwrite))) { Hashtable finalKeysList = null; if ((entry.Entry != null) && (entry.Entry.KeysIAmDependingOn != null)) { finalKeysList = base.GetFinalKeysList(entry.Entry.KeysIAmDependingOn, array[j].KeysIAmDependingOn); object[] objArray2 = (object[])finalKeysList["oldKeys"]; Hashtable table = new Hashtable(); for (int m = 0; m < objArray2.Length; m++) { if (!table.Contains(objArray2[m])) { table.Add(objArray2[m], new ArrayList()); } ((ArrayList)table[objArray2[m]]).Add(list[j]); } this.Internal.RemoveDepKeyList(table, operationContext); object[] objArray3 = (object[])finalKeysList["newKeys"]; table.Clear(); for (int n = 0; n < objArray3.Length; n++) { if (!table.Contains(objArray3[n])) { table.Add(objArray3[n], new ArrayList()); } ((ArrayList)table[objArray3[n]]).Add(list[j]); } this.Internal.AddDependencyKeyList(table, operationContext); } else if (array[j].KeysIAmDependingOn != null) { object[] objArray4 = array[j].KeysIAmDependingOn; Hashtable hashtable5 = new Hashtable(); for (int num5 = 0; num5 < objArray4.Length; num5++) { if (!hashtable5.Contains(objArray4[num5])) { hashtable5.Add(objArray4[num5], new ArrayList()); } ((ArrayList)hashtable5[objArray4[num5]]).Add(list[j]); } this.Internal.AddDependencyKeyList(hashtable5, operationContext); } } } for (int k = 0; k < list3.Count; k++) { hashtable.Add(list3[k], new OperationFailedException("One of the dependency keys does not exist.")); } } return(hashtable); }
/// <summary> /// Resincroniza /// </summary> /// <param name="orginalTable"></param> /// <param name="keys"></param> /// <param name="e"></param> /// <param name="flag"></param> /// <param name="providerName"></param> /// <param name="operationContext"></param> public void ResyncCacheItem(Hashtable orginalTable, string[] keys, CacheEntry[] e, BitSet[] flag, string providerName, OperationContext operationContext) { Dictionary <string, ProviderCacheItem> dictionary = this.ReadThru(keys, providerName); if ((dictionary != null) && ((dictionary == null) || (dictionary.Count != 0))) { object[] array = new object[dictionary.Count]; CacheEntry[] entryArray = new CacheEntry[dictionary.Count]; int index = 0; for (int i = 0; i < keys.Length; i++) { ProviderCacheItem item; if (dictionary.TryGetValue(keys[i], out item) && (item != null)) { try { CacheEntry entry; if (this.GetCacheEntry(keys[i], item, ref flag[i], null, null, out entry) != null) { array[index] = keys[i]; entryArray[index++] = entry; } } catch (Exception exception) { _context.Logger.Error(("Error occurred while synchronization with data source; " + exception.Message).GetFormatter()); } } } if (index != 0) { Cache.Resize(ref array, index); Cache.Resize(ref entryArray, index); Hashtable hashtable = null; try { hashtable = _context.CacheImpl.Insert(array, entryArray, false, operationContext); } catch (Exception exception2) { throw new OperationFailedException("error while trying to synchronize the cache with data source. Error: " + exception2.Message, exception2); } for (int j = 0; j < array.Length; j++) { if (hashtable.ContainsKey(array[j])) { CacheInsResultWithEntry entry2 = hashtable[array[j]] as CacheInsResultWithEntry; if ((entry2 != null) && ((entry2.Result == CacheInsResult.Success) || (entry2.Result == CacheInsResult.SuccessOverwrite))) { object obj3 = entryArray[j].Value; if (obj3 is CallbackEntry) { obj3 = ((CallbackEntry)obj3).Value; } orginalTable.Add(array[j], new CompressedValueEntry(obj3, entryArray[j].Flag)); } } else { object obj4 = entryArray[j].Value; if (obj4 is CallbackEntry) { obj4 = ((CallbackEntry)obj4).Value; } orginalTable.Add(array[j], new CompressedValueEntry(obj4, entryArray[j].Flag)); } } } } }
protected CacheInsResultWithEntry Clustered_Insert(ArrayList dests, object key, CacheEntry cacheEntry, object lockId, LockAccessType accessType, OperationContext operationContext) { try { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("RepCacheBase.Insert", "enter"); } /// Ask every server to update the object, except myself. Function func = new Function((int)OpCodes.Insert, new object[] { key, cacheEntry, _statusLatch.IsAnyBitsSet(NodeStatus.Initializing), lockId, accessType, operationContext }, false, key); Array userPayLoad = null; if (cacheEntry.Value is CallbackEntry) { CallbackEntry cbEntry = ((CallbackEntry)cacheEntry.Value); userPayLoad = cbEntry.UserData; } else { userPayLoad = cacheEntry.UserData; } func.UserPayload = userPayLoad; RspList results = Cluster.BroadcastToMultiple(dests, func, GroupRequest.GET_ALL, _asyncOperation); ClusterHelper.ValidateResponses(results, typeof(OperationResponse), Name); //Bug Fixed, during state transfer (one node up with the exisiting one) of replicated cache, //while client doing insert operaion continously, which incrementing the add/sec counter while the client only performing insert //means no need to incrment add/sec counter, need only updat/sec to be incremented //so after discussing with QA, we modify the code here. CacheInsResultWithEntry retVal = ClusterHelper.FindAtomicInsertStatusReplicated(results); if (retVal != null && retVal.Result == CacheInsResult.Success && results != null) { for (int i = 0; i < results.Results.Count; i++) { if (((CacheInsResultWithEntry)((OperationResponse)results.Results[i]).SerializablePayload).Result == CacheInsResult.SuccessOverwrite) { retVal.Result = CacheInsResult.SuccessOverwrite; break; } } } return(retVal); } catch (CacheException e) { throw; } catch (Exception e) { throw new GeneralFailureException(e.Message, e); } finally { if (ServerMonitor.MonitorActivity) { ServerMonitor.LogClientActivity("RepCacheBase.Insert", "exit"); } } }