/// <summary> /// Optimized Enqueue opeartion, adds the opeartion at _tail index and removes /// any previous operations on that key from the queue /// </summary> /// <param name="operation"></param> internal bool Enqueue(object cacheKey, IOptimizedQueueOperation operation) { bool isNewItem = true; try { lock (_sync_mutex) { if (_keyToIndexMap.ContainsKey(cacheKey)) //Optimized Queue, so checking in the map if the current cache key is already mapped to some index of Queue or not { //just update the old operation without chaning it's order in the queue. int index1 = (int)_keyToIndexMap[cacheKey]; IOptimizedQueueOperation oldOperation = _queue[index1] as IOptimizedQueueOperation; _queue[index1] = operation; isNewItem = false; _size -= oldOperation.Size; //subtract old operation size _size += operation.Size; oldOperation.ReturnPooledItemsToPool(_context?.TransactionalPoolManager); return(isNewItem); } if (_tail == int.MaxValue) //checks if the _tail value has reached the maxvalue of the long data type, so reinitialize it { _tail = -1; _tailMaxReached = true; } int index = ++_tail; _size += operation.Size; _queue.Add(index, operation); //Add new opeartion at the tail of the queue _keyToIndexMap[cacheKey] = index; // update (cache key, queue index) map _indexToKeyMap[index] = cacheKey; if (isNewItem) { _count++; } } } catch (Exception exp) { throw exp; } finally { } return(isNewItem); }
internal IOptimizedQueueOperation Dequeue() { IOptimizedQueueOperation operation = null; try { lock (_sync_mutex) { int index = 0; do //fetch the next valid operation from the queue { if (_head < _tail || _tailMaxReached) //or contition checks if the _tail has reached max long value and _head has not yet reached there , so in this case _head<_tail will fail bcz _tail has been reinitialized { if (_head == int.MaxValue) //checks if _head has reached the max long value, so reinitialize _head and make _tailMaxReached is set to false as _head<_tail is now again valid { _head = -1; _tailMaxReached = false; } index = ++_head; operation = _queue[index] as IOptimizedQueueOperation; //get key on which the operation is to be performed from the head of the queue if (operation != null) { string cacheKey = _indexToKeyMap[index] as string; _keyToIndexMap.Remove(cacheKey); //update map _indexToKeyMap.Remove(index); _queue.Remove(index); //update queue _size -= operation.Size; _count--; } } else { break; } } while (operation == null); } } catch (Exception exp) { throw exp; } return(operation); }
/// <summary> /// Optimized Enqueue opeartion, adds the opeartion at _tail index and removes /// any previous operations on that key from the queue /// </summary> /// <param name="operation"></param> internal bool Enqueue(object cacheKey, IOptimizedQueueOperation operation) { bool isNewItem = true; try { lock (_sync_mutex) { if (_keyToIndexMap.ContainsKey(cacheKey)) //Optimized Queue, so checking in the map if the current cache key is already mapped to some index of Queue or not { //just update the old operation without chaning it's order in the queue. int index1 = (int)_keyToIndexMap[cacheKey]; IOptimizedQueueOperation oldOperation = _queue[index1] as IOptimizedQueueOperation; _queue[index1] = operation; isNewItem = false; _size -= oldOperation.Size; //subtract old operation size _size += operation.Size; return isNewItem; } if (_tail == int.MaxValue) //checks if the _tail value has reached the maxvalue of the long data type, so reinitialize it { _tail = -1; _tailMaxReached = true; } int index = ++_tail; _size += operation.Size; _queue.Add(index, operation); //Add new opeartion at the tail of the queue _keyToIndexMap[cacheKey] = index; // update (cache key, queue index) map _indexToKeyMap[index] = cacheKey; if (isNewItem) _count++; } } catch (Exception exp) { throw exp; } finally { } return isNewItem; }
/// <summary> /// replication thread function. /// note: While replicating operations, a dummy '0' sequence id is passed. /// this sequence id is totally ignored by asynchronous POR, but we are keeping it /// to maintain the symmetry in API. /// </summary> public void Run() { //reload threshold value from service config, consider the probability that values would have been changed by user _bulkKeysToReplicate = ServiceConfiguration.BulkItemsToReplicate; IList opCodesToBeReplicated = new ClusteredArrayList(_bulkKeysToReplicate); IList infoToBeReplicated = new ClusteredArrayList(_bulkKeysToReplicate); IList compilationInfo = new ClusteredArrayList(_bulkKeysToReplicate); IList userPayLoad = new ClusteredArrayList(); try { while (!stopped || _queue.Count > 0) { DateTime startedAt = DateTime.Now; DateTime finishedAt = DateTime.Now; try { for (int i = 0; _queue.Count > 0 && i < _bulkKeysToReplicate; i++) { IOptimizedQueueOperation operation = null; operation = _queue.Dequeue(); DictionaryEntry entry = (DictionaryEntry)operation.Data; opCodesToBeReplicated.Add(entry.Key); infoToBeReplicated.Add(entry.Value); if (operation.UserPayLoad != null) { if (userPayLoad == null) { userPayLoad = new ArrayList(); } for (int j = 0; j < operation.UserPayLoad.Length; j++) { userPayLoad.Add(operation.UserPayLoad.GetValue(j)); } } compilationInfo.Add(operation.PayLoadSize); } object[] updateIndexKeys = GetIndexOperations(); if (!stopped) { if (opCodesToBeReplicated.Count > 0 || updateIndexKeys != null) { if (updateIndexKeys != null) { opCodesToBeReplicated.Add((int)ClusterCacheBase.OpCodes.UpdateIndice); infoToBeReplicated.Add(updateIndexKeys); } _context.CacheImpl.ReplicateOperations(opCodesToBeReplicated, infoToBeReplicated, userPayLoad, compilationInfo, _context.CacheImpl.OperationSequenceId, _context.CacheImpl.CurrentViewId); } } if (!stopped && _context.PerfStatsColl != null) { _context.PerfStatsColl.IncrementMirrorQueueSizeStats(_queue.Count); } } catch (Exception e) { if (e.Message.IndexOf("operation timeout", StringComparison.OrdinalIgnoreCase) >= 0 && !_shutdownStatusLatch.IsAnyBitsSet(ShutDownStatus.SHUTDOWN_INPROGRESS)) { _context.NCacheLog.CriticalInfo("AsyncReplicator.Run", "Bulk operation timeout. Retrying the operation."); try { if (!stopped) { _context.CacheImpl.ReplicateOperations(opCodesToBeReplicated, infoToBeReplicated, userPayLoad, compilationInfo, 0, 0); _context.NCacheLog.CriticalInfo("AsyncReplicator.Run", "RETRY is successfully."); } } catch (Exception ex) { if (_context.NCacheLog.IsErrorEnabled) { _context.NCacheLog.Error("AsyncReplicator.RUN", "Error occurred while retrying operation. " + ex.ToString()); } } } else if (_context.NCacheLog.IsErrorEnabled) { _context.NCacheLog.Error("AsyncReplicator.RUN", e.ToString()); } } finally { opCodesToBeReplicated.Clear(); infoToBeReplicated.Clear(); compilationInfo.Clear(); if (userPayLoad != null) { userPayLoad.Clear(); } finishedAt = DateTime.Now; } if (_queue.Count > 0) { continue; } else if (_queue.Count == 0 && _shutdownStatusLatch.IsAnyBitsSet(ShutDownStatus.SHUTDOWN_INPROGRESS)) { _shutdownStatusLatch.SetStatusBit(ShutDownStatus.SHUTDOWN_COMPLETED, ShutDownStatus.SHUTDOWN_INPROGRESS); return; } if ((finishedAt.Ticks - startedAt.Ticks) < _interval.Ticks) { Thread.Sleep(_interval.Subtract(finishedAt.Subtract(startedAt))); } else { Thread.Sleep(_interval); } } } catch (ThreadAbortException ta) { //_context.CacheTrace.error("AsyncReplicator.RUN", "Async replicator stopped. " + ta.ToString()); } catch (ThreadInterruptedException ti) { //_context.CacheTrace.error("AsyncReplicator.RUN", "Async replicator stopped. " + ti.ToString()); } catch (NullReferenceException) { } catch (Exception e) { if (!stopped) { _context.NCacheLog.Error("AsyncReplicator.RUN", "Async replicator stopped. " + e.ToString()); } } }
/// <summary> /// replication thread function. /// note: While replicating operations, a dummy '0' sequence id is passed. /// this sequence id is totally ignored by asynchronous POR, but we are keeping it /// to maintain the symmetry in API. /// </summary> public void Run() { ArrayList opCodesToBeReplicated = new ArrayList(_bulkKeysToReplicate); ArrayList infoToBeReplicated = new ArrayList(_bulkKeysToReplicate); ArrayList compilationInfo = new ArrayList(_bulkKeysToReplicate); ArrayList userPayLoad = new ArrayList(); try { while (!stopped || _queue.Count > 0) { DateTime startedAt = DateTime.Now; DateTime finishedAt = DateTime.Now; try { for (int i = 0; _queue.Count > 0 && i < _bulkKeysToReplicate; i++) { IOptimizedQueueOperation operation = null; operation = _queue.Dequeue(); DictionaryEntry entry = (DictionaryEntry)operation.Data; opCodesToBeReplicated.Add(entry.Key); infoToBeReplicated.Add(entry.Value); if (operation.UserPayLoad != null) { for (int j = 0; j < operation.UserPayLoad.Length; j++) { userPayLoad.Add(operation.UserPayLoad.GetValue(j)); } } compilationInfo.Add(operation.PayLoadSize); } object[] updateIndexKeys = GetIndexOperations(); if (!stopped) { if (opCodesToBeReplicated.Count > 0 || updateIndexKeys != null) { if (updateIndexKeys != null) { opCodesToBeReplicated.Add((int)ClusterCacheBase.OpCodes.UpdateIndice); infoToBeReplicated.Add(updateIndexKeys); } _context.CacheImpl.ReplicateOperations(opCodesToBeReplicated.ToArray(), infoToBeReplicated.ToArray(), userPayLoad.ToArray(), compilationInfo, _context.CacheImpl.OperationSequenceId, _context.CacheImpl.CurrentViewId); } } if (!stopped && _context.PerfStatsColl != null) { _context.PerfStatsColl.IncrementMirrorQueueSizeStats(_queue.Count); } } catch (Exception e) { if (e.Message.IndexOf("operation timeout", StringComparison.OrdinalIgnoreCase) >= 0) { _context.NCacheLog.CriticalInfo("AsyncReplicator.Run", "Bulk operation timedout. Retrying the operation."); try { if (!stopped) { _context.CacheImpl.ReplicateOperations(opCodesToBeReplicated.ToArray(), infoToBeReplicated.ToArray(), userPayLoad.ToArray(), compilationInfo, 0, 0); _context.NCacheLog.CriticalInfo("AsyncReplicator.Run", "RETRY is successfull."); } } catch (Exception ex) { if (_context.NCacheLog.IsErrorEnabled) { _context.NCacheLog.Error("AsyncReplicator.RUN", "Error occurred while retrying operation. " + ex.ToString()); } } } else if (_context.NCacheLog.IsErrorEnabled) { _context.NCacheLog.Error("AsyncReplicator.RUN", e.ToString()); } } finally { opCodesToBeReplicated.Clear(); infoToBeReplicated.Clear(); compilationInfo.Clear(); userPayLoad.Clear(); finishedAt = DateTime.Now; } if (_queue.Count > 0) { continue; } if ((finishedAt.Ticks - startedAt.Ticks) < _interval.Ticks) { Thread.Sleep(_interval.Subtract(finishedAt.Subtract(startedAt))); } else { Thread.Sleep(_interval); } } } catch (ThreadAbortException ta) { } catch (ThreadInterruptedException ti) { } catch (NullReferenceException) { } catch (Exception e) { if (!stopped) { _context.NCacheLog.Error("AsyncReplicator.RUN", "Async replicator stopped. " + e.ToString()); } } }