/// <summary> /// Merge the first entry i.e. c1 into c2 /// </summary> /// <param name="c1"></param> /// <param name="c2"></param> /// <returns>returns merged entry c2</returns> public static CacheEntry MergeEntries(CacheEntry c1, CacheEntry c2) { if (c1 != null && c1.Value is CallbackEntry) { CallbackEntry cbEtnry = null; cbEtnry = c1.Value as CallbackEntry; if (cbEtnry.ItemRemoveCallbackListener != null) { foreach (CallbackInfo cbInfo in cbEtnry.ItemRemoveCallbackListener) c2.AddCallbackInfo(null, cbInfo); } if (cbEtnry.ItemUpdateCallbackListener != null) { foreach (CallbackInfo cbInfo in cbEtnry.ItemUpdateCallbackListener) c2.AddCallbackInfo(cbInfo, null); } } if (c1 != null && c1.EvictionHint != null) { if (c2.EvictionHint == null) c2.EvictionHint = c1.EvictionHint; } return c2; }
public static bool ReleaseLock(CacheEntry existingEntry, CacheEntry newEntry) { if (CheckLockCompatibility(existingEntry, newEntry)) { existingEntry.ReleaseLock(); newEntry.ReleaseLock(); return true; } return false; }
private void PollForEventMessage(string clientId) { SubscriptionInfo subscriptionInfo = new SubscriptionInfo() { SubscriptionId = SubscriptionInfo.EventsSubscriptionName, ClientId = clientId, Type = SubscriptionType.Subscriber, SubPolicyType = SubscriptionPolicyType.EventSubscription, Expiration = TimeSpan.MaxValue.Ticks }; result = context.CacheImpl.GetAssignedMessage(subscriptionInfo, operationContext); foreach (var pair in result.AssignedMessages) { IList <object> eventMessages = pair.Value; lock (eventMessages) { IEnumerator <object> enu = eventMessages.GetEnumerator(); while (enu.MoveNext()) { //Create Events here and fire var message = (EventMessage)enu.Current; EventContext eventContext = SetEventContext(message); switch (eventContext.EventID.EventType) { case Persistence.EventType.ITEM_UPDATED_CALLBACK: context.CacheImpl.RaiseOldCustomUpdateCalbackNotifier(message.Key, message.CallbackInfos, eventContext); break; case Persistence.EventType.ITEM_REMOVED_CALLBACK: CacheEntry entry = CacheEntry.CreateCacheEntry(context.FakeObjectPool); entry.Notifications = new Notifications(); entry.Notifications.ItemRemoveCallbackListener = new ArrayList(); entry.Notifications.ItemRemoveCallbackListener = message.CallbackInfos; entry.MarkInUse(NCModulesConstants.Global); context.CacheImpl.RaiseOldCustomRemoveCalbackNotifier(message.Key, entry, message.RemoveReason, operationContext, eventContext); break; } switch (message.MessageMetaData.TopicName) { case TopicConstant.ItemLevelEventsTopic: itemLevelEventsMessageIds.Add(message.MessageId); break; } } } } if (itemLevelEventsMessageIds.Count > 0) { topicWiseMessageIds.Add(TopicConstant.ItemLevelEventsTopic, itemLevelEventsMessageIds); itemLevelEventsMessageIds = new List <string>(); } if (topicWiseMessageIds.Count > 0) { context.CacheImpl.AcknowledgeMessageReceipt(clientId, topicWiseMessageIds, operationContext); topicWiseMessageIds = new Dictionary <string, IList <string> >(); } }
/// <summary> /// Fill available keys and their relative data /// </summary> /// <param name="keys"></param> /// <param name="entries"></param> /// <param name="available"></param> /// <param name="data"></param> /// <param name="table"></param> public static void FillArrays(object[] keys, CacheEntry[] entries, object[] available, CacheEntry[] data, Hashtable table) { int a = 0, i = 0; foreach (object key in keys) { if (table.Contains(key) == false) { available[a] = key; data[a] = entries[i]; a++; } i++; } }
public QueryItemContainer(CacheEntry item, Hashtable itemAttributes) { _item = item; _itemArrtribs = itemAttributes; }
/// <summary> /// Creates a new object that is a copy of the current instance and that is routable as well. /// </summary> /// <returns>A routable copy of this instance.</returns> internal CacheEntry RoutableClone(Address localAddress) { lock (this) { if (_syncDependency != null || _exh != null) { //see if expiration hint itself is non-routable then we only need //a node expiration to handle both the syncDependency and expiration. //otherwise we need a node expiration for syncDependency and also need to //maintain the actual routable expiration hint. NodeExpiration expiry = null; if (localAddress != null) { expiry = new NodeExpiration(localAddress); } if (SyncDependency == null) { if (!_exh.IsRoutable) { CacheEntry e = new CacheEntry(Value, expiry, _evh); if (_grpInfo != null) { e._grpInfo = (GroupInfo)_grpInfo.Clone(); } e._bitset = (BitSet)_bitset.Clone(); e._version = this._version; e._creationTime = this._creationTime; e._lastModifiedTime = this._lastModifiedTime; if (this.LockMetaInfo != null) { e.LockExpiration = this.LockExpiration; } e._resyncProviderName = this._resyncProviderName; e.Priority = Priority; return(e); } } else { if (_exh != null && _exh.IsRoutable) { AggregateExpirationHint aggHint = new AggregateExpirationHint(); aggHint.Add(_exh); CacheEntry e = new CacheEntry(Value, aggHint, _evh); if (_grpInfo != null) { e._grpInfo = (GroupInfo)_grpInfo.Clone(); } e._bitset = (BitSet)_bitset.Clone(); e._version = this._version; e._creationTime = this._creationTime; e._lastModifiedTime = this._lastModifiedTime; if (this.LockMetaInfo != null) { e.LockExpiration = this.LockExpiration; } e._resyncProviderName = this._resyncProviderName; e.Priority = Priority; return(e); } else { CacheEntry e = new CacheEntry(Value, expiry, _evh); if (_grpInfo != null) { e._grpInfo = (GroupInfo)_grpInfo.Clone(); } e._bitset = (BitSet)_bitset.Clone(); e._version = this._version; e._creationTime = this._creationTime; e._lastModifiedTime = this._lastModifiedTime; if (this.LockMetaInfo != null) { e.LockExpiration = this.LockExpiration; } e._resyncProviderName = this._resyncProviderName; e.Priority = Priority; return(e); } } } } return((CacheEntry)Clone()); }
/// <summary> /// Overload of Add operation for bulk additions. Uses EvictionHint and ExpirationHint arrays. /// </summary> public IDictionary Add(string[] keys, CacheEntry[] enteries, OperationContext operationContext) { if (ServerMonitor.MonitorActivity) ServerMonitor.LogClientActivity("Cache.InsertBlk", ""); if (keys == null) throw new ArgumentNullException("keys"); if (enteries == null) throw new ArgumentNullException("entries"); try { Hashtable result = _context.CacheImpl.Add(keys, enteries, true, operationContext); if (result != null) { Hashtable tmp = (Hashtable)result.Clone(); IDictionaryEnumerator ide = tmp.GetEnumerator(); while (ide.MoveNext()) { CacheAddResult addResult = CacheAddResult.Failure; if (ide.Value is CacheAddResult) { addResult = (CacheAddResult)ide.Value; switch (addResult) { case CacheAddResult.Failure: break; case CacheAddResult.KeyExists: result[ide.Key] = new OperationFailedException("The specified key already exists."); break; case CacheAddResult.NeedsEviction: result[ide.Key] = new OperationFailedException("The cache is full and not enough items could be evicted."); break; case CacheAddResult.Success: result.Remove(ide.Key); break; } } } } return result; } catch (Exception) { //NCacheLog.Error(_context.CacheName, "Cache.Add():", inner.ToString()); throw; } }
/// <summary> /// Adds a pair of key and value to the cache. Throws an exception or reports error /// if the specified key already exists in the cache. /// </summary> /// <param name="key">key of the entry.</param> /// <param name="cacheEntry">the cache entry.</param> /// <returns>returns the result of operation.</returns> public override CacheAddResult Add(object key, CacheEntry cacheEntry, bool notify, OperationContext operationContext) { CacheAddResult result = CacheAddResult.Failure; if (Internal != null) { result = Internal.Add(key, cacheEntry, notify, operationContext); } return result; }
/// <summary> /// Convert CompactCacheEntry to CacheEntry, CompactCacheEntry may be serialized /// </summary> /// <param name="data"></param> /// <returns></returns> CacheEntry MakeCacheEntry(CompactCacheEntry cce) { bool isAbsolute = false; int priority = (int)CacheItemPriority.Normal; int opt = (int)cce.Options; if (opt != 255) { isAbsolute = Convert.ToBoolean(opt & 1); opt = (opt >> 1); opt = (opt >> 1); priority = opt - 2; } ExpirationHint eh = ExpirationHelper.MakeExpirationHint(cce.Expiration, isAbsolute); CacheEntry e = new CacheEntry(cce.Value, eh, new PriorityEvictionHint((CacheItemPriority)priority)); e.QueryInfo = cce.QueryInfo; e.Flag = cce.Flag; e.LockId = cce.LockId; e.LockAccessType = cce.LockAccessType; return e; }
/// <summary> /// Overload of Add operation. uses additional paramer of Flag for checking if compressed or not /// </summary> public void Add(object key, object value, ExpirationHint expiryHint, EvictionHint evictionHint, Hashtable queryInfo, BitSet flag, OperationContext operationContext) { if (key == null) throw new ArgumentNullException("key"); if (value == null) throw new ArgumentNullException("value"); if (!key.GetType().IsSerializable) throw new ArgumentException("key is not serializable"); if (!value.GetType().IsSerializable) throw new ArgumentException("value is not serializable"); if ((expiryHint != null) && !expiryHint.GetType().IsSerializable) throw new ArgumentException("expiryHint is not serializable"); if ((evictionHint != null) && !evictionHint.GetType().IsSerializable) throw new ArgumentException("evictionHint is not serializable"); // Cache has possibly expired so do default. if (!IsRunning) return; CacheEntry e = new CacheEntry(value, expiryHint, evictionHint); ////Object size for inproc object dataSize = operationContext.GetValueByField(OperationContextFieldName.ValueDataSize); if (dataSize != null) e.DataSize = Convert.ToInt64(dataSize); e.QueryInfo = queryInfo; e.Flag.Data |= flag.Data; try { HPTimeStats addTime = new HPTimeStats(); _context.PerfStatsColl.MsecPerAddBeginSample(); addTime.BeginSample(); Add(key, e, operationContext); addTime.EndSample(); _context.PerfStatsColl.MsecPerAddEndSample(); } catch (Exception inner) { throw; } }
/// <summary> /// Adds key and value pairs to the cache. If any of the specified key already exists /// in the cache; it is updated, otherwise a new item is added to the cache. /// </summary> /// <param name="keys">keys of the entries.</param> /// <param name="cacheEntries">the cache entries.</param> /// <returns>returns the results for inserted keys</returns> public override Hashtable Insert(object[] keys, CacheEntry[] cacheEntries, bool notify, OperationContext operationContext) { Hashtable retVal = null; if (Internal != null) { retVal = Internal.Insert(keys, cacheEntries, notify, operationContext); } return retVal; }
/// <summary> /// Adds a pair of key and value to the cache. If the specified key already exists /// in the cache; it is updated, otherwise a new item is added to the cache. /// </summary> /// <param name="key">key of the entry.</param> /// <param name="cacheEntry">the cache entry.</param> /// <returns>returns the result of operation.</returns> public override CacheInsResultWithEntry Insert(object key, CacheEntry cacheEntry, bool notify, object lockId, LockAccessType accessType, OperationContext operationContext) { CacheInsResultWithEntry retVal = new CacheInsResultWithEntry(); if (Internal != null) { retVal = Internal.Insert(key, cacheEntry, notify, lockId, accessType, operationContext); } return retVal; }
/// <summary> /// Adds key and value pairs to the cache. Throws an exception or returns the /// list of keys that already exists in the cache. /// </summary> /// <param name="keys">key of the entry.</param> /// <param name="cacheEntries">the cache entry.</param> /// <returns>List of keys that are added or that alredy exists in the cache and their status</returns> public override Hashtable Add(object[] keys, CacheEntry[] cacheEntries, bool notify, OperationContext operationContext) { Hashtable table = new Hashtable(); if (Internal != null) { table = Internal.Add(keys, cacheEntries, notify, operationContext); } return table; }
public EventCacheEntry(CacheEntry cacheEntry) { Priority = (int)cacheEntry.Priority; }
/// <summary> /// Internal Add operation. Does write-through as well. /// </summary> public void Add(object key, CacheEntry e, OperationContext operationContext) { object value = e.Value; try { CacheAddResult result = CacheAddResult.Failure; if (ServerMonitor.MonitorActivity) ServerMonitor.LogClientActivity("Cache.Add", key as string); result = _context.CacheImpl.Add(key, e, true, operationContext); switch (result) { case CacheAddResult.Failure: break; case CacheAddResult.NeedsEviction: throw new OperationFailedException("The cache is full and not enough items could be evicted.", false); case CacheAddResult.KeyExists: throw new OperationFailedException("The specified key already exists.", false); case CacheAddResult.Success: _context.PerfStatsColl.IncrementAddPerSecStats(); break; } } catch (OperationFailedException inner) { if (inner.IsTracable) _context.NCacheLog.Error("Cache.Add():", inner.ToString()); throw; } catch (Exception inner) { _context.NCacheLog.Error("Cache.Add():", inner.ToString()); throw new OperationFailedException("Add operation failed. Error : " + inner.Message, inner); } }
/// <summary> /// Creates a new object that is a copy of the current instance. The value is not copied. /// </summary> /// <returns>A new object that is a copy of this instance without value.</returns> public CacheEntry CloneWithoutValue() { CacheEntry e = new CacheEntry(); lock (this) { e._exh = _exh; e._evh = _evh; e._bitset = (BitSet)_bitset.Clone(); e._queryInfo = _queryInfo; if (this.LockMetaInfo != null) { e.LockId = this.LockId; e.LockDate = this.LockDate; e.LockAge = this.LockAge; e.LockExpiration = this.LockExpiration; e.LockMetaInfo.LockManager = this.LockMetaInfo.LockManager; } e._size = _size; e._creationTime = this._creationTime; e._lastModifiedTime = this._lastModifiedTime; if (this.Value is CallbackEntry) { CallbackEntry cbEntry = (CallbackEntry)this.Value; cbEntry = cbEntry.Clone() as CallbackEntry; cbEntry.Value = null; e.Value = cbEntry; } e._type = _type; } return e; }
/// <summary> /// Internal Add operation for bulk additions. Does write-through as well. /// </summary> private Hashtable Add(object[] keys, CacheEntry[] entries, OperationContext operationContext) { try { Hashtable result = new Hashtable(); result = _context.CacheImpl.Add(keys, entries, true, operationContext); if (result != null) { Hashtable tmp = (Hashtable)result.Clone(); IDictionaryEnumerator ide = tmp.GetEnumerator(); while (ide.MoveNext()) { CacheAddResult addResult = CacheAddResult.Failure; if (ide.Value is CacheAddResult) { addResult = (CacheAddResult)ide.Value; switch (addResult) { case CacheAddResult.Failure: break; case CacheAddResult.KeyExists: result[ide.Key] = new OperationFailedException("The specified key already exists."); break; case CacheAddResult.NeedsEviction: result[ide.Key] = new OperationFailedException("The cache is full and not enough items could be evicted."); break; case CacheAddResult.Success: result.Remove(ide.Key); break; } } } } return result; } catch (OperationFailedException inner) { if (inner.IsTracable) _context.NCacheLog.Error("Cache.Add():", inner.ToString()); throw; } catch (Exception inner) { _context.NCacheLog.Error("Cache.Add():", inner.ToString()); throw new OperationFailedException("Add operation failed. Error : " + inner.Message, inner); } }
internal static void Resize(ref CacheEntry[] array, int newLength) { if (array == null) return; if (array.Length == newLength) return; CacheEntry[] copyArray = new CacheEntry[newLength]; for (int i = 0; i < newLength; i++) { if (i < array.Length) copyArray[i] = array[i]; else break; } array = copyArray; }
public void Insert(object key, object value, ExpirationHint expiryHint, EvictionHint evictionHint, Hashtable queryInfo, BitSet flag, OperationContext operationContext) { if (key == null) throw new ArgumentNullException("key"); if (value == null) throw new ArgumentNullException("value"); if (!key.GetType().IsSerializable) throw new ArgumentException("key is not serializable"); if (!value.GetType().IsSerializable) throw new ArgumentException("value is not serializable"); if ((expiryHint != null) && !expiryHint.GetType().IsSerializable) throw new ArgumentException("expiryHint is not not serializable"); if ((evictionHint != null) && !evictionHint.GetType().IsSerializable) throw new ArgumentException("evictionHint is not serializable"); // Cache has possibly expired so do default. if (!IsRunning) return; CacheEntry e = new CacheEntry(value, expiryHint, evictionHint); e.QueryInfo = queryInfo; e.Flag.Data |= flag.Data; // update the counters for various statistics try { CacheEntry clone; clone = e; _context.PerfStatsColl.MsecPerUpdBeginSample(); Insert(key, e, null, LockAccessType.IGNORE_LOCK, operationContext); _context.PerfStatsColl.MsecPerUpdEndSample(); } catch (Exception inner) { _context.NCacheLog.CriticalInfo("Cache.Insert():", inner.ToString()); throw; } }
public static CompressedValueEntry CreateCompressedCacheEntry(PoolManager poolmanger, CacheEntry entry) { CompressedValueEntry compressedEntry = CreateCompressedCacheEntry(poolmanger, entry.Value, entry.Flag, entry.Type); compressedEntry.Entry = entry; return(compressedEntry); }
public void Insert(object key, object value, ExpirationHint expiryHint, EvictionHint evictionHint, Hashtable queryInfo, BitSet flag, object lockId, LockAccessType accessType, OperationContext operationContext) { try { if (ServerMonitor.MonitorActivity) ServerMonitor.LogClientActivity("Cache.Insert", ""); if (key == null) throw new ArgumentNullException("key"); if (value == null) throw new ArgumentNullException("value"); if (!key.GetType().IsSerializable) throw new ArgumentException("key is not serializable"); if (!value.GetType().IsSerializable) throw new ArgumentException("value is not serializable"); if ((expiryHint != null) && !expiryHint.GetType().IsSerializable) throw new ArgumentException("expiryHint is not not serializable"); if ((evictionHint != null) && !evictionHint.GetType().IsSerializable) throw new ArgumentException("evictionHint is not serializable"); // Cache has possibly expired so do default. if (!IsRunning) return; CacheEntry e = new CacheEntry(value, expiryHint, evictionHint); e.QueryInfo = queryInfo; e.Flag.Data |= flag.Data; object dataSize = operationContext.GetValueByField(OperationContextFieldName.ValueDataSize); if (dataSize != null) e.DataSize = Convert.ToInt64(dataSize); /// update the counters for various statistics _context.PerfStatsColl.MsecPerUpdBeginSample(); Insert(key, e, lockId, accessType, operationContext); _context.PerfStatsColl.MsecPerUpdEndSample(); } catch (Exception inner) { if (_context.NCacheLog.IsErrorEnabled) _context.NCacheLog.Error("Cache.Insert():", inner.ToString()); throw; } }
public EventCacheEntry(CacheEntry cacheEntry) { Priority = (int)cacheEntry.Priority; }
/// <summary> /// Internal Insert operation. Does a write thru as well. /// </summary> private void Insert(object key, CacheEntry e, object lockId, LockAccessType accessType, OperationContext operationContext) { HPTimeStats insertTime = new HPTimeStats(); insertTime.BeginSample(); object value = e.Value; try { CacheInsResultWithEntry retVal = CascadedInsert(key, e, true, lockId, accessType, operationContext); insertTime.EndSample(); switch (retVal.Result) { case CacheInsResult.Failure: break; case CacheInsResult.NeedsEviction: case CacheInsResult.NeedsEvictionNotRemove: throw new OperationFailedException("The cache is full and not enough items could be evicted.", false); case CacheInsResult.SuccessOverwrite: _context.PerfStatsColl.IncrementUpdPerSecStats(); break; case CacheInsResult.Success: _context.PerfStatsColl.IncrementAddPerSecStats(); break; case CacheInsResult.ItemLocked: throw new LockingException("Item is locked."); } } catch (OperationFailedException inner) { if (inner.IsTracable) _context.NCacheLog.Error("Cache.Insert():", inner.ToString()); throw; } catch (Exception inner) { _context.NCacheLog.Error("Cache.Insert():", inner.ToString()); _context.NCacheLog.CriticalInfo("Cache.Insert():", inner.ToString()); throw new OperationFailedException("Insert operation failed. Error : " + inner.Message, inner); } }
/// <summary> /// Fill available keys and their relative data /// </summary> /// <param name="keys"></param> /// <param name="entries"></param> /// <param name="available"></param> /// <param name="data"></param> /// <param name="list"></param> public static void FillArrays(object[] keys, CacheEntry[] entries, object[] available, CacheEntry[] data, ArrayList list) { Hashtable table = new Hashtable(); foreach (object key in list) { table.Add(key, ""); } FillArrays(keys, entries, available, data, table); }
/// <summary> /// Overload of Insert operation for bulk inserts. Uses additional EvictionHint and ExpirationHint parameters. /// </summary> public IDictionary Insert(object[] keys, object[] values, ExpirationHint expiryHint, EvictionHint evictionHint, OperationContext operationContext) { if (keys == null) throw new ArgumentNullException("keys"); if (values == null) throw new ArgumentNullException("items"); if (keys.Length != values.Length) throw new ArgumentException("keys count is not equals to values count"); CacheEntry[] ce = new CacheEntry[values.Length]; for (int i = 0; i < values.Length; i++) { object key = keys[i]; object value = values[i]; if (key == null) throw new ArgumentNullException("key"); if (value == null) throw new ArgumentNullException("value"); if (!key.GetType().IsSerializable) throw new ArgumentException("key is not serializable"); if (!value.GetType().IsSerializable) throw new ArgumentException("value is not serializable"); if ((expiryHint != null) && !expiryHint.GetType().IsSerializable) throw new ArgumentException("expiryHint is not not serializable"); if ((evictionHint != null) && !evictionHint.GetType().IsSerializable) throw new ArgumentException("evictionHint is not serializable"); // Cache has possibly expired so do default. if (!IsRunning) return null; ce[i] = new CacheEntry(value, expiryHint, evictionHint); } /// update the counters for various statistics try { return Insert(keys, ce, operationContext); } catch (Exception inner) { throw; } }
public static EventCacheEntry CreateCacheEventEntry(EventDataFilter? filter, CacheEntry cacheEntry) { if (filter != EventDataFilter.None && cacheEntry != null) { cacheEntry = (CacheEntry)cacheEntry.Clone(); EventCacheEntry entry = new EventCacheEntry(cacheEntry); entry.Flags = cacheEntry.Flag; if (filter == EventDataFilter.DataWithMetadata) { if (cacheEntry.Value is CallbackEntry) { entry.Value = ((CallbackEntry)cacheEntry.Value).Value; } else entry.Value = cacheEntry.Value; } return entry; } return null; }
/// <summary> /// Overload of Insert operation for bulk inserts. Uses EvictionHint and ExpirationHint arrays. /// </summary> public IDictionary Insert(object[] keys, object[] values, CallbackEntry[] callbackEnteries, ExpirationHint[] expirations, EvictionHint[] evictions, Hashtable[] queryInfos, BitSet[] flags,OperationContext operationContext) { if (ServerMonitor.MonitorActivity) ServerMonitor.LogClientActivity("Cache.InsertBlk", ""); if (keys == null) throw new ArgumentNullException("keys"); if (values == null) throw new ArgumentNullException("items"); if (keys.Length != values.Length) throw new ArgumentException("keys count is not equals to values count"); CacheEntry[] ce = new CacheEntry[values.Length]; long[] sizes = null; object dataSize = operationContext.GetValueByField(OperationContextFieldName.ValueDataSize); if (dataSize != null) { sizes = (long[])dataSize; } for (int i = 0; i < values.Length; i++) { if (keys[i] == null) throw new ArgumentNullException("key"); if (values[i] == null) throw new ArgumentNullException("value"); if (!keys[i].GetType().IsSerializable) throw new ArgumentException("key is not serializable"); if (!values[i].GetType().IsSerializable) throw new ArgumentException("value is not serializable"); if ((expirations[i] != null) && !expirations[i].GetType().IsSerializable) throw new ArgumentException("expiryHint is not not serializable"); if ((evictions[i] != null) && !evictions[i].GetType().IsSerializable) throw new ArgumentException("evictionHint is not serializable"); // Cache has possibly expired so do default. if (!IsRunning) return null; ce[i] = new CacheEntry(values[i], expirations[i], evictions[i]); ce[i].QueryInfo = queryInfos[i]; ce[i].Flag.Data |= flags[i].Data; if(sizes != null) ce[i].DataSize = sizes[i]; if (callbackEnteries[i] != null) { CallbackEntry cloned = callbackEnteries[i].Clone() as CallbackEntry; cloned.Value = values[i]; cloned.Flag = ce[i].Flag; ce[i].Value = cloned; } } /// update the counters for various statistics try { HPTimeStats insertTime = new HPTimeStats(); insertTime.BeginSample(); IDictionary result = Insert(keys, ce, operationContext); insertTime.EndSample(); return result; } catch (Exception inner) { throw; } }
public static bool CheckLockCompatibility(CacheEntry existingEntry, CacheEntry newEntry) { object lockId = null; DateTime lockDate = new DateTime(); if (existingEntry.IsLocked(ref lockId, ref lockDate)) { return existingEntry.LockId.Equals(newEntry.LockId); } return true; }
/// <summary> /// Internal Insert operation. Does a write thru as well. /// </summary> public Hashtable Insert(object[] keys, CacheEntry[] entries, OperationContext operationContext) { try { Hashtable result; result = CascadedInsert(keys, entries, true, operationContext); if (result != null) { Hashtable tmp = (Hashtable)result.Clone(); IDictionaryEnumerator ide = tmp.GetEnumerator(); while (ide.MoveNext()) { CacheInsResultWithEntry insResult = null; if (ide.Value is CacheInsResultWithEntry) { insResult = (CacheInsResultWithEntry)ide.Value; switch (insResult.Result) { case CacheInsResult.Failure: break; case CacheInsResult.NeedsEviction: result[ide.Key] = new OperationFailedException("The cache is full and not enough items could be evicted."); break; case CacheInsResult.Success: result.Remove(ide.Key); break; case CacheInsResult.SuccessOverwrite: result.Remove(ide.Key); break; } } } } return result; } catch (OperationFailedException inner) { if (inner.IsTracable) _context.NCacheLog.Error("Cache.Insert()", inner.ToString()); throw; } catch (Exception inner) { _context.NCacheLog.Error("Cache.Insert()", inner.ToString()); throw new OperationFailedException("Insert operation failed. Error : " + inner.Message, inner); } }
public static EventCacheEntry CreateCacheEventEntry(ArrayList listeners, CacheEntry cacheEntry) { EventCacheEntry entry = null; EventDataFilter maxFilter = EventDataFilter.None; foreach (CallbackInfo cbInfo in listeners) { if (cbInfo.DataFilter > maxFilter) maxFilter = cbInfo.DataFilter; if (maxFilter == EventDataFilter.DataWithMetadata) break; } return CreateCacheEventEntry(maxFilter, cacheEntry); }
internal CacheInsResultWithEntry CascadedInsert(object key, CacheEntry entry, bool notify, object lockId, LockAccessType accessType, OperationContext operationContext) { CacheInsResultWithEntry result = _context.CacheImpl.Insert(key, entry, notify, lockId, accessType, operationContext); return result; }
public void UpdateLastModifiedTime(CacheEntry entry) { lock (this) { this._creationTime = entry.CreationTime; } }
internal Hashtable CascadedInsert(object[] keys, CacheEntry[] cacheEntries, bool notify, OperationContext operationContext) { Hashtable table = _context.CacheImpl.Insert(keys, cacheEntries, notify, operationContext); return table; }
/// <summary> /// Creates a new object that is a copy of the current instance. /// </summary> /// <returns>A new object that is a copy of this instance.</returns> public object Clone() { CacheEntry e = new CacheEntry(Value, _exh, _evh); lock (this) { e._bitset = (BitSet)_bitset.Clone(); e.Priority = Priority; e._queryInfo = _queryInfo; if (this.LockMetaInfo != null) { e.LockId = this.LockId; e.LockDate = this.LockDate; e.LockAge = this.LockAge; e.LockExpiration = this.LockExpiration; e.LockMetaInfo.LockManager = this.LockMetaInfo.LockManager; } e._size = _size; e._creationTime = this._creationTime; e._lastModifiedTime = this._lastModifiedTime; e._type = this._type; } return e; }
public void GetEntryClone(CacheEntry cacheEntry, out CacheEntry entry, out Array userPayload, out long payLoadSize) { entry = cacheEntry.Clone() as CacheEntry; userPayload = null; payLoadSize = 0; }