/// <summary> /// Adds key and value pairs to the cache. Throws an exception or returns the /// list of keys that failed to add in the cache. /// </summary> /// <param name="keys">keys of the entries.</param> /// <param name="cacheEntries">the cache entries.</param> /// <returns>List of keys that are added or that alredy exists in the cache and their status.</returns> public sealed override Hashtable Add(object[] keys, CacheEntry[] cacheEntries, bool notify, OperationContext operationContext) { Hashtable table = new Hashtable(); EventContext eventContext = null; EventId eventId = null; OperationID opId = operationContext.OperatoinID; for (int i = 0; i < keys.Length; i++) { try { operationContext.RemoveValueByField(OperationContextFieldName.EventContext); if (notify) { //generate EventId eventId = new EventId(); eventId.EventUniqueID = opId.OperationId; eventId.OperationCounter = opId.OpCounter; eventId.EventCounter = i; eventContext = new EventContext(); eventContext.Add(EventContextFieldName.EventID, eventId); operationContext.Add(OperationContextFieldName.EventContext, eventContext); } CacheAddResult result = Add(keys[i], cacheEntries[i], notify, operationContext); table[keys[i]] = result; } catch (Exceptions.StateTransferException se) { table[keys[i]] = se; } catch (Exception inner) { table[keys[i]] = new OperationFailedException(inner.Message, inner); } finally { operationContext.RemoveValueByField(OperationContextFieldName.EventContext); } } if (_context.PerfStatsColl != null) { _context.PerfStatsColl.SetCacheSize(Size); } return table; }
/// <summary> /// Adds a pair of key and value to the cache. Throws an exception or reports error /// if the specified key already exists in the cache. /// </summary> /// <param name="key">key of the entry.</param> /// <param name="cacheEntry">the cache entry.</param> /// <returns>returns the result of operation.</returns> /// <remarks> /// This method invokes <see cref="handleAdd"/> on every server-node in the cluster. If the operation /// fails on any one node the whole operation is considered to have failed and is rolled-back. /// Moreover the node initiating this request (this method) also triggers a cluster-wide item-add /// notificaion. /// </remarks> public override Hashtable Add(object[] keys, CacheEntry[] cacheEntries, bool notify, OperationContext operationContext) { if (ServerMonitor.MonitorActivity) ServerMonitor.LogClientActivity("RepCache.AddBlk", ""); /// Wait until the object enters any running status _statusLatch.WaitForAny(NodeStatus.Initializing | NodeStatus.Running); if (_internalCache == null) throw new InvalidOperationException(); Hashtable addResult = new Hashtable(); Hashtable tmp = new Hashtable(); Hashtable existingKeys = Local_Contains(keys, operationContext); ArrayList list = new ArrayList(); if (existingKeys != null && existingKeys.Count > 0) list = existingKeys["items-found"] as ArrayList; int failCount = list.Count; if (failCount > 0) { IEnumerator ie = list.GetEnumerator(); while (ie.MoveNext()) { addResult[ie.Current] = new OperationFailedException("The specified key already exists."); } // all keys failed, so return. if (failCount == keys.Length) { return addResult; } object[] newKeys = new object[keys.Length - failCount]; CacheEntry[] entries = new CacheEntry[keys.Length - failCount]; int i = 0; int j = 0; IEnumerator im = keys.GetEnumerator(); while (im.MoveNext()) { object key = im.Current; if (!list.Contains(key)) { newKeys[j] = key; entries[j] = cacheEntries[i]; j++; } i++; } keys = newKeys; cacheEntries = entries; } Exception thrown = null; try { if (Cluster.Servers.Count > 1) { // Try to add to the local node and the cluster. tmp = Clustered_Add(keys, cacheEntries, operationContext); } else { tmp = Local_Add(keys, cacheEntries, Cluster.LocalAddress,true, operationContext); } } catch (Exception inner) { Context.NCacheLog.Error("Replicated.Clustered_Add()", inner.ToString()); for (int i = 0; i < keys.Length; i++) { tmp[keys[i]] = new OperationFailedException(inner.Message, inner); } thrown = inner; } if (thrown != null) { if (Cluster.Servers.Count > 1) { Clustered_Remove(keys, ItemRemoveReason.Removed, operationContext); } else { Local_Remove(keys, ItemRemoveReason.Removed, null, null, false, operationContext); } } else { failCount = 0; ArrayList failKeys = new ArrayList(); IDictionaryEnumerator ide = tmp.GetEnumerator(); Hashtable writeBehindTable = new Hashtable(); while (ide.MoveNext()) { if (ide.Value is CacheAddResult) { CacheAddResult res = (CacheAddResult)ide.Value; switch (res) { case CacheAddResult.Failure: case CacheAddResult.KeyExists: case CacheAddResult.NeedsEviction: failCount++; failKeys.Add(ide.Key); addResult[ide.Key] = ide.Value; break; case CacheAddResult.Success: addResult[ide.Key] = ide.Value; writeBehindTable.Add(ide.Key, null); break; } } else //it means value is exception { failCount++; failKeys.Add(ide.Key); addResult[ide.Key] = ide.Value; } } if (failCount > 0) { object[] keysToRemove = new object[failCount]; failKeys.CopyTo(keysToRemove, 0); if (Cluster.Servers.Count > 1) { Clustered_Remove(keysToRemove, ItemRemoveReason.Removed, null, false, operationContext); } else { Local_Remove(keysToRemove, ItemRemoveReason.Removed, null, null, false, operationContext); } } } return addResult; }
private Hashtable OptimizedRemove(IList keys, ItemRemoveReason ir, CallbackEntry cbEntry, bool notify, OperationContext operationContext) { if (ServerMonitor.MonitorActivity) ServerMonitor.LogClientActivity("PoRCache.RemoveBlk", ""); Hashtable result = new Hashtable(); Hashtable totalDepKeys = new Hashtable(); ArrayList remainingKeys = new ArrayList(); try { result = Local_Remove(keys, ir, Cluster.LocalAddress, cbEntry, notify, operationContext); } catch (Exception ex) { for (int i = 0; i < keys.Count; i++) { result[keys[i]] = new OperationFailedException(ex.Message, ex); } } if (result != null) { IDictionaryEnumerator ie = result.GetEnumerator(); while (ie.MoveNext()) { if (ie.Value is StateTransferException) { remainingKeys.Add(ie.Key); } } } if (result.Count > 0) { IDictionaryEnumerator ide = result.GetEnumerator(); while (ide.MoveNext()) { object key = ide.Key; CacheEntry entry = ide.Value as CacheEntry; if (entry != null) { if (notify) { object value = entry.Value; if (value is CallbackEntry) { RaiseCustomRemoveCalbackNotifier(key, entry, ir); } } } } } return result; }
private Hashtable ClusteredRemove(IList keys, ItemRemoveReason ir, CallbackEntry cbEntry, bool notify, OperationContext operationContext) { if (ServerMonitor.MonitorActivity) ServerMonitor.LogClientActivity("PoRCache.RemoveBlk", ""); Hashtable targetNodes = null; Hashtable result = new Hashtable(); Hashtable tmpResult = null; ArrayList totalKeys = new ArrayList(keys); ArrayList totalRemainingKeys = new ArrayList(); Hashtable totalDepKeys = new Hashtable(); Address targetNode = null; do { targetNodes = GetTargetNodes(totalKeys); if (targetNodes != null && targetNodes.Count == 0) { foreach (object key in totalKeys) { result[key] = new OperationFailedException("No target node available to accommodate the data."); } return result; } IDictionaryEnumerator ide = targetNodes.GetEnumerator(); Hashtable keyList = null; //We select one node at a time for Add operation. while (ide.MoveNext()) { targetNode = ide.Key as Address; keyList = (Hashtable)ide.Value; if (targetNode != null && keyList != null) { object[] currentKeys = MiscUtil.GetArrayFromCollection(keyList.Keys); try { if (targetNode.Equals(Cluster.LocalAddress)) { tmpResult = Local_Remove(currentKeys, ir, Cluster.LocalAddress, cbEntry, notify, operationContext); } else { tmpResult = Clustered_Remove(targetNode, currentKeys, ir, cbEntry, notify, operationContext); } } catch (Runtime.Exceptions.SuspectedException se) { if (Context.NCacheLog.IsInfoEnabled) Context.NCacheLog.Info("PartitionedServerCache.Remove()", targetNode + " left while addition"); totalRemainingKeys.AddRange(currentKeys); continue; } catch (Runtime.Exceptions.TimeoutException te) { if (Context.NCacheLog.IsInfoEnabled) Context.NCacheLog.Info("PartitionedServerCache.Remove()", targetNode + " operation timed out"); totalRemainingKeys.AddRange(currentKeys); continue; } if (tmpResult != null) { IDictionaryEnumerator ie = tmpResult.GetEnumerator(); while (ie.MoveNext()) { if (ie.Value is StateTransferException) totalRemainingKeys.Add(ie.Key); else { if (ie.Value is CacheEntry) result[ie.Key] = ie.Value; } } } } } totalKeys = new ArrayList(totalRemainingKeys); totalRemainingKeys.Clear(); } while (totalKeys.Count > 0); if (result.Count > 0) { IDictionaryEnumerator ide = result.GetEnumerator(); while (ide.MoveNext()) { object key = ide.Key; CacheEntry entry = (CacheEntry) ide.Value; // Already fired from LocalCacheBase if (notify) { object value = entry.Value; if (value is CallbackEntry) { RaiseCustomRemoveCalbackNotifier(key, entry, ir); } } } } return result; }
private Hashtable ClusteredInsert(object[] keys, CacheEntry[] cacheEntries, bool notify, OperationContext operationContext) { Hashtable targetNodes = null; Hashtable result = new Hashtable(); Hashtable tmpResult = null; ArrayList totalKeys = new ArrayList(keys); ArrayList totalEntries = new ArrayList(cacheEntries); ArrayList keysToInsert = new ArrayList(keys); Address targetNode = null; object[] currentKeys = null; CacheEntry[] currentValues = null; Dictionary<object, CacheEntry> fullEntrySet = new Dictionary<object, CacheEntry>(); Hashtable totalAddedKeys = new Hashtable(); Hashtable totalInsertedKeys = new Hashtable(); Hashtable totalRemainingKeys = new Hashtable(); if (_internalCache == null) throw new InvalidOperationException(); do { targetNodes = GetTargetNodes(keysToInsert); if (targetNodes != null && targetNodes.Count == 0) { foreach (object key in keysToInsert) { result[key] = new OperationFailedException("No target node available to accommodate the data."); } return result; } IDictionaryEnumerator ide = targetNodes.GetEnumerator(); Hashtable keyList = null; //We select one node at a time for Add operation. while (ide.MoveNext()) { targetNode = ide.Key as Address; keyList = (Hashtable) ide.Value; if (targetNode != null && keyList != null) { currentKeys = new object[keyList.Count]; currentValues = new CacheEntry[keyList.Count]; int j = 0; foreach (object key in keyList.Keys) { int index = totalKeys.IndexOf(key); if (index != -1) { currentKeys[j] = totalKeys[index]; currentValues[j] = totalEntries[index] as CacheEntry; if (!fullEntrySet.ContainsKey((string) totalKeys[index])) fullEntrySet.Add((string) totalKeys[index], (CacheEntry) totalEntries[index]); j++; } } try { if (targetNode.Equals(Cluster.LocalAddress)) { tmpResult = Local_Insert(currentKeys, currentValues, Cluster.LocalAddress, notify, operationContext); } else { tmpResult = Clustered_Insert(targetNode, currentKeys, currentValues, operationContext); } } catch (Runtime.Exceptions.SuspectedException se) { //we redo the operation if (Context.NCacheLog.IsInfoEnabled) Context.NCacheLog.Info("PartitionedServerCache.SafeAdd", targetNode + " left while addition"); tmpResult = new Hashtable(); for (int i = 0; i < currentKeys.Length; i++) { tmpResult[currentKeys[i]] = new GeneralFailureException(se.Message, se); } } catch (Runtime.Exceptions.TimeoutException te) { if (Context.NCacheLog.IsInfoEnabled) Context.NCacheLog.Info("PartitionedServerCache.SafeAdd", targetNode + " operation timed out"); tmpResult = new Hashtable(); for (int i = 0; i < currentKeys.Length; i++) { tmpResult[currentKeys[i]] = new GeneralFailureException(te.Message, te); } } catch (BucketTransferredException ex) { tmpResult = new Hashtable(); for (int i = 0; i < currentKeys.Length; i++) { tmpResult[currentKeys[i]] = new OperationFailedException(ex.Message, ex); } } if (tmpResult != null && tmpResult.Count > 0) { IDictionaryEnumerator ie = tmpResult.GetEnumerator(); while (ie.MoveNext()) { if (ie.Value is StateTransferException) { totalRemainingKeys[ie.Key] = null; } else { if (ie.Value is Exception) { result[ie.Key] = ie.Value; } else if (ie.Value is CacheInsResultWithEntry) { CacheInsResultWithEntry res = ie.Value as CacheInsResultWithEntry; switch (res.Result) { case CacheInsResult.Failure: result[ie.Key] = new OperationFailedException( "Generic operation failure; not enough information is available."); break; case CacheInsResult.NeedsEviction: result[ie.Key] = new OperationFailedException( "The cache is full and not enough items could be evicted."); break; case CacheInsResult.Success: totalAddedKeys[ie.Key] = null; break; case CacheInsResult.SuccessOverwrite: totalInsertedKeys[ie.Key] = ie.Value; result[ie.Key] = ie.Value; break; } } } } } } } keysToInsert = new ArrayList(totalRemainingKeys.Keys); totalRemainingKeys.Clear(); } while (keysToInsert.Count > 0); object generateQueryInfo = operationContext.GetValueByField(OperationContextFieldName.GenerateQueryInfo); if (generateQueryInfo == null) { operationContext.Add(OperationContextFieldName.GenerateQueryInfo, true); } if (totalInsertedKeys.Count > 0) { IDictionaryEnumerator ide = totalInsertedKeys.GetEnumerator(); while (ide.MoveNext()) { object key = ide.Key; CacheInsResultWithEntry insResult = ide.Value as CacheInsResultWithEntry; if (notify) { CacheEntry currentEntry = fullEntrySet[(string) ide.Key]; object value = insResult.Entry.Value; if (value is CallbackEntry) { RaiseCustomUpdateCalbackNotifier(ide.Key, currentEntry, insResult.Entry, operationContext); } } } } if (generateQueryInfo == null) { operationContext.RemoveValueByField(OperationContextFieldName.GenerateQueryInfo); } return result; }
private Hashtable OptimizedInsert(object[] keys, CacheEntry[] cacheEntries, bool notify, OperationContext operationContext) { if (ServerMonitor.MonitorActivity) ServerMonitor.LogClientActivity("PoRCache.InsertBlk", ""); Hashtable result = new Hashtable(); Hashtable addedKeys = new Hashtable(); Hashtable insertedKeys = new Hashtable(); ArrayList remainingKeys = new ArrayList(); ArrayList totalKeys = new ArrayList(keys); ArrayList totalEntries = new ArrayList(cacheEntries); Hashtable tmpResult = new Hashtable(); try { tmpResult = Local_Insert(keys, cacheEntries, Cluster.LocalAddress, notify, operationContext); } catch (BucketTransferredException ex) { tmpResult = new Hashtable(); for (int i = 0; i < keys.Length; i++) { tmpResult[keys[i]] = new OperationFailedException(ex.Message, ex); } } if (tmpResult != null && tmpResult.Count > 0) { IDictionaryEnumerator ie = tmpResult.GetEnumerator(); while (ie.MoveNext()) { if (ie.Value is StateTransferException) { remainingKeys.Add(ie.Key); } else { if (ie.Value is Exception) { result[ie.Key] = ie.Value; } else if (ie.Value is CacheInsResultWithEntry) { CacheInsResultWithEntry res = ie.Value as CacheInsResultWithEntry; switch (res.Result) { case CacheInsResult.Failure: result[ie.Key] = new OperationFailedException( "Generic operation failure; not enough information is available."); break; case CacheInsResult.NeedsEviction: result[ie.Key] = new OperationFailedException( "The cache is full and not enough items could be evicted."); break; case CacheInsResult.Success: addedKeys[ie.Key] = null; break; case CacheInsResult.SuccessOverwrite: insertedKeys[ie.Key] = ie.Value; result[ie.Key] = ie.Value; break; } } } } } if (remainingKeys.Count > 0) { object[] currentKeys = new object[remainingKeys.Count]; CacheEntry[] currentValues = new CacheEntry[remainingKeys.Count]; int j = 0; foreach (string key in remainingKeys) { int index = totalKeys.IndexOf(key); if (index != -1) { currentKeys[j] = totalKeys[index]; currentValues[j] = totalEntries[index] as CacheEntry; j++; } } tmpResult = ClusteredInsert(currentKeys, currentValues, notify, operationContext); foreach (DictionaryEntry entry in tmpResult) { result[entry.Key] = entry.Value; } } return result; }