internal PersistentStreamPullingAgent( GrainId id, string strProviderName, IStreamProviderRuntime runtime, QueueId queueId, TimeSpan queueGetPeriod, TimeSpan initQueueTimeout, TimeSpan maxDeliveryTime) : base(id, runtime.ExecutingSiloAddress, true) { if (runtime == null) throw new ArgumentNullException("runtime", "PersistentStreamPullingAgent: runtime reference should not be null"); if (strProviderName == null) throw new ArgumentNullException("runtime", "PersistentStreamPullingAgent: strProviderName should not be null"); QueueId = queueId; streamProviderName = strProviderName; providerRuntime = runtime; pubSub = runtime.PubSub(StreamPubSubType.GrainBased); pubSubCache = new Dictionary<StreamId, StreamConsumerCollection>(); safeRandom = new SafeRandom(); this.queueGetPeriod = queueGetPeriod; this.initQueueTimeout = initQueueTimeout; this.maxDeliveryTime = maxDeliveryTime; numMessages = 0; logger = providerRuntime.GetLogger(GrainId + "-" + streamProviderName); logger.Info((int)ErrorCode.PersistentStreamPullingAgent_01, "Created {0} {1} for Stream Provider {2} on silo {3} for Queue {4}.", GetType().Name, GrainId.ToDetailedString(), streamProviderName, Silo, QueueId.ToStringWithHashCode()); numReadMessagesCounter = CounterStatistic.FindOrCreate(new StatisticName(StatisticNames.STREAMS_PERSISTENT_STREAM_NUM_READ_MESSAGES, strProviderName)); numSentMessagesCounter = CounterStatistic.FindOrCreate(new StatisticName(StatisticNames.STREAMS_PERSISTENT_STREAM_NUM_SENT_MESSAGES, strProviderName)); }
internal PersistentStreamPullingAgent( GrainId id, string strProviderName, IStreamProviderRuntime runtime, IStreamPubSub streamPubSub, QueueId queueId, PersistentStreamProviderConfig config) : base(id, runtime.ExecutingSiloAddress, true) { if (runtime == null) throw new ArgumentNullException("runtime", "PersistentStreamPullingAgent: runtime reference should not be null"); if (strProviderName == null) throw new ArgumentNullException("runtime", "PersistentStreamPullingAgent: strProviderName should not be null"); QueueId = queueId; streamProviderName = strProviderName; providerRuntime = runtime; pubSub = streamPubSub; pubSubCache = new Dictionary<StreamId, StreamConsumerCollection>(); safeRandom = new SafeRandom(); this.config = config; numMessages = 0; logger = providerRuntime.GetLogger(GrainId + "-" + streamProviderName); logger.Info((int)ErrorCode.PersistentStreamPullingAgent_01, "Created {0} {1} for Stream Provider {2} on silo {3} for Queue {4}.", GetType().Name, GrainId.ToDetailedString(), streamProviderName, Silo, QueueId.ToStringWithHashCode()); string statUniquePostfix = strProviderName + "." + QueueId; numReadMessagesCounter = CounterStatistic.FindOrCreate(new StatisticName(StatisticNames.STREAMS_PERSISTENT_STREAM_NUM_READ_MESSAGES, statUniquePostfix)); numSentMessagesCounter = CounterStatistic.FindOrCreate(new StatisticName(StatisticNames.STREAMS_PERSISTENT_STREAM_NUM_SENT_MESSAGES, statUniquePostfix)); IntValueStatistic.FindOrCreate(new StatisticName(StatisticNames.STREAMS_PERSISTENT_STREAM_PUBSUB_CACHE_SIZE, statUniquePostfix), () => pubSubCache.Count); // TODO: move queue cache size statistics tracking into queue cache implementation once Telemetry APIs and LogStatistics have been reconciled. //IntValueStatistic.FindOrCreate(new StatisticName(StatisticNames.STREAMS_PERSISTENT_STREAM_QUEUE_CACHE_SIZE, statUniquePostfix), () => queueCache != null ? queueCache.Size : 0); }
public RedisQueueAdapterReceiver(Logger logger, QueueId queueid, IDatabase database, string redisListName) { _logger = logger; _database = database; _redisListName = redisListName; Id = queueid; }
private RabbitMessageQueueAdapterReceiver(QueueId queueId, RabbitMessageQueueDataManager queue) { if (queueId == null) throw new ArgumentNullException(nameof(queueId)); if (queue == null) throw new ArgumentNullException(nameof(queue)); this.Id = queueId; this._queue = queue; }
public GenericQueueAdapterReceiver(Logger logger, QueueId queueid, IProviderQueue queueProvider) { _queueProvider = queueProvider; _logger = logger; Id = queueid; }
private AzureQueueAdapterReceiver(QueueId queueId, AzureQueueDataManager queue) { if (queueId == null) throw new ArgumentNullException("queueId"); if (queue == null) throw new ArgumentNullException("queue"); Id = queueId; this.queue = queue; }
public TimedQueueCacheTests() { Mock<Logger> loggerMock = new Mock<Logger>(); _logger = loggerMock.Object; _defaultId = QueueId.GetQueueId("defaultQueue"); _defaultCacheSize = 4096; _defaultCacheBucketNum = 10; }
private AzureQueueAdapterReceiver(QueueId queueId, AzureQueueDataManager queue) { if (queueId == null) throw new ArgumentNullException("queueId"); if (queue == null) throw new ArgumentNullException("queue"); Id = queueId; this.queue = queue; logger = TraceLogger.GetLogger(GetType().Name, TraceLogger.LoggerType.Provider); }
private RabbitMessageQueueAdapterReceiver(QueueId queueId, RabbitMessageQueueDataManager queue) { if (queueId == null) throw new ArgumentNullException("queueId"); if (queue == null) throw new ArgumentNullException("queue"); Id = queueId; _queue = queue; }
public static IQueueAdapterReceiver Create(QueueId queueId, string rabbitMqConnectionString, string deploymentId = "") { if (queueId == null) throw new ArgumentNullException(nameof(queueId)); if (String.IsNullOrEmpty(rabbitMqConnectionString)) throw new ArgumentNullException(nameof(rabbitMqConnectionString)); var queue = new RabbitMessageQueueDataManager(queueId.ToString(), deploymentId, rabbitMqConnectionString); return new RabbitMessageQueueAdapterReceiver(queueId, queue); }
private SQSAdapterReceiver(QueueId queueId, SQSStorage queue) { if (queueId == null) throw new ArgumentNullException("queueId"); if (queue == null) throw new ArgumentNullException("queue"); Id = queueId; this.queue = queue; logger = LogManager.GetLogger(GetType().Name, LoggerType.Provider); }
public static IQueueAdapterReceiver Create(QueueId queueId, string dataConnectionString, string deploymentId) { if (queueId == null) throw new ArgumentNullException("queueId"); if (String.IsNullOrEmpty(dataConnectionString)) throw new ArgumentNullException("dataConnectionString"); if (String.IsNullOrEmpty(deploymentId)) throw new ArgumentNullException("deploymentId"); var queue = new AzureQueueDataManager(queueId.ToString(), deploymentId, dataConnectionString); return new AzureQueueAdapterReceiver(queueId, queue); }
private Queue<byte[]> GetQueue(QueueId queueId) { Queue<byte[]> queue; if (!_queues.TryGetValue(queueId, out queue)) { var tmpQueue = new Queue<byte[]>(); queue = _queues.GetOrAdd(queueId, tmpQueue); } return queue; }
public SimpleQueueCache(QueueId queueId, int cacheSize, Logger logger) { Id = queueId; cachedMessages = new LinkedList<SimpleQueueCacheItem>(); maxCacheSize = cacheSize; this.logger = logger; cacheCursorHistogram = new List<CacheBucket>(); CACHE_HISTOGRAM_MAX_BUCKET_SIZE = Math.Max(cacheSize / NUM_CACHE_HISTOGRAM_BUCKETS, 1); // we have 10 buckets }
public Task<byte[]> Dequeue(QueueId queueId) { var queue = GetQueue(queueId); if (queue.Count == 0) { _logger.AutoWarn("Trying to dequeue when the queue is empty. This shouldn't happen. Returning null."); return null; } return Task.FromResult<byte[]>(queue.Dequeue()); }
public KafkaQueueAdapterReceiverUnitTests() { Mock<Logger> loggerMock = new Mock<Logger>(); _logger = loggerMock.Object; var connectionStrings = new List<Uri> {new Uri("http://192.168.10.27:9092")}; var topicName = "TestTopic"; var consumerGroupName = "TestConsumerGroup"; _options = new KafkaStreamProviderOptions(connectionStrings.ToArray(), topicName, consumerGroupName); _id = QueueId.GetQueueId("test", 0, 0); }
public Task Enqueue(QueueId queueId, byte[] bytes) { var redisListName = GetRedisListName(queueId); try { return _database.ListLeftPushAsync(redisListName, bytes); } catch (Exception exception) { _logger.AutoError($"failed to write to Redis list.\n Queue Id: {queueId}\nList name: {redisListName}\nException: {exception}"); return TaskDone.Done; } }
public string QueueToPartition(QueueId queue) { if (queue == null) { throw new ArgumentNullException("queue"); } string partitionId; if (!partitionDictionary.TryGetValue(queue, out partitionId)) { throw new ArgumentOutOfRangeException(string.Format(CultureInfo.InvariantCulture, "queue {0}", queue.ToStringWithHashCode())); } return partitionId; }
public KafkaQueueAdapterReceiver(QueueId queueId, IManualConsumer consumer, KafkaStreamProviderOptions options, IKafkaBatchFactory factory, Logger logger) { // input checks if (queueId == null) throw new ArgumentNullException(nameof(queueId)); if (consumer == null) throw new ArgumentNullException(nameof(consumer)); if (factory == null) throw new ArgumentNullException(nameof(factory)); if (options == null) throw new ArgumentNullException(nameof(options)); if (logger == null) throw new ArgumentNullException(nameof(logger)); _counterCurrentOffset = Metric.Context("KafkaStreamProvider").Counter($"CurrentOffset queueId:({queueId.GetNumericId()})", unit: Unit.Custom("Log")); _options = options; Id = queueId; _consumer = consumer; _factory = factory; _logger = logger; }
public IQueueCache CreateQueueCache(QueueId queueId) { return _caches.AddOrUpdate(queueId, (id) => new QueueCache(id, _timeToKeepMessages, _logger), (id, queueCache) => queueCache); }
public IQueueAdapterReceiver CreateReceiver(QueueId queueId) { return(AzureQueueAdapterReceiver.Create(this.serializationManager, this.loggerFactory, queueId, DataConnectionString, DeploymentId, this.dataAdapter, MessageVisibilityTimeout)); }
public Task<IStreamFailureHandler> GetDeliveryFailureHandler(QueueId queueId) { return Task.FromResult<IStreamFailureHandler>(new LoggerStreamFailureHandler(_logger)); }
public IQueueCache CreateQueueCache(QueueId queueId) { return(m_QueueCaches.GetOrAdd(queueId, ConstructQueueCache)); }
public IEnumerable <QueueId> GetAllQueues() => _queueMap.Values .SelectMany(queueMap => queueMap.GetAllRingMembers()) .Select(identifiers => QueueId.GetQueueId(identifiers.QueueNamePrefix, identifiers.QueueId, identifiers.UniformHashCache));
public EventStoreQueueCache(int cacheSize, Logger logger, QueueId queueId, EventStoreAdapterReceiver receiver) : base(cacheSize, logger) { m_QueueId = queueId; m_Receiver = receiver; }
private async Task SendAndReceiveFromQueueAdapter(IQueueAdapterFactory adapterFactory, IProviderConfiguration config) { IQueueAdapter adapter = await adapterFactory.CreateAdapter(); IQueueAdapterCache cache = adapterFactory.GetQueueAdapterCache(); // Create receiver per queue IStreamQueueMapper mapper = adapterFactory.GetStreamQueueMapper(); Dictionary <QueueId, IQueueAdapterReceiver> receivers = mapper.GetAllQueues().ToDictionary(queueId => queueId, adapter.CreateReceiver); Dictionary <QueueId, IQueueCache> caches = mapper.GetAllQueues().ToDictionary(queueId => queueId, cache.CreateQueueCache); await Task.WhenAll(receivers.Values.Select(receiver => receiver.Initialize(TimeSpan.FromSeconds(5)))); // test using 2 streams Guid streamId1 = Guid.NewGuid(); Guid streamId2 = Guid.NewGuid(); int receivedBatches = 0; var streamsPerQueue = new ConcurrentDictionary <QueueId, HashSet <IStreamIdentity> >(); // reader threads (at most 2 active queues because only two streams) var work = new List <Task>(); foreach (KeyValuePair <QueueId, IQueueAdapterReceiver> receiverKvp in receivers) { QueueId queueId = receiverKvp.Key; var receiver = receiverKvp.Value; var qCache = caches[queueId]; Task task = Task.Factory.StartNew(() => { while (receivedBatches < NumBatches) { var messages = receiver.GetQueueMessagesAsync(CloudQueueMessage.MaxNumberOfMessagesToPeek).Result.ToArray(); if (!messages.Any()) { continue; } foreach (IBatchContainer message in messages) { streamsPerQueue.AddOrUpdate(queueId, id => new HashSet <IStreamIdentity> { new StreamIdentity(message.StreamGuid, message.StreamGuid.ToString()) }, (id, set) => { set.Add(new StreamIdentity(message.StreamGuid, message.StreamGuid.ToString())); return(set); }); output.WriteLine("Queue {0} received message on stream {1}", queueId, message.StreamGuid); Assert.Equal(NumMessagesPerBatch / 2, message.GetEvents <int>().Count()); // "Half the events were ints" Assert.Equal(NumMessagesPerBatch / 2, message.GetEvents <string>().Count()); // "Half the events were strings" } Interlocked.Add(ref receivedBatches, messages.Length); qCache.AddToCache(messages); } }); work.Add(task); } // send events List <object> events = CreateEvents(NumMessagesPerBatch); work.Add(Task.Factory.StartNew(() => Enumerable.Range(0, NumBatches) .Select(i => i % 2 == 0 ? streamId1 : streamId2) .ToList() .ForEach(streamId => adapter.QueueMessageBatchAsync(streamId, streamId.ToString(), events.Take(NumMessagesPerBatch).ToArray(), null, RequestContext.Export()).Wait()))); await Task.WhenAll(work); // Make sure we got back everything we sent Assert.Equal(NumBatches, receivedBatches); // check to see if all the events are in the cache and we can enumerate through them StreamSequenceToken firstInCache = new EventSequenceTokenV2(0); foreach (KeyValuePair <QueueId, HashSet <IStreamIdentity> > kvp in streamsPerQueue) { var receiver = receivers[kvp.Key]; var qCache = caches[kvp.Key]; foreach (IStreamIdentity streamGuid in kvp.Value) { // read all messages in cache for stream IQueueCacheCursor cursor = qCache.GetCacheCursor(streamGuid, firstInCache); int messageCount = 0; StreamSequenceToken tenthInCache = null; StreamSequenceToken lastToken = firstInCache; while (cursor.MoveNext()) { Exception ex; messageCount++; IBatchContainer batch = cursor.GetCurrent(out ex); output.WriteLine("Token: {0}", batch.SequenceToken); Assert.True(batch.SequenceToken.CompareTo(lastToken) >= 0, $"order check for event {messageCount}"); lastToken = batch.SequenceToken; if (messageCount == 10) { tenthInCache = batch.SequenceToken; } } output.WriteLine("On Queue {0} we received a total of {1} message on stream {2}", kvp.Key, messageCount, streamGuid); Assert.Equal(NumBatches / 2, messageCount); Assert.NotNull(tenthInCache); // read all messages from the 10th cursor = qCache.GetCacheCursor(streamGuid, tenthInCache); messageCount = 0; while (cursor.MoveNext()) { messageCount++; } output.WriteLine("On Queue {0} we received a total of {1} message on stream {2}", kvp.Key, messageCount, streamGuid); const int expected = NumBatches / 2 - 10 + 1; // all except the first 10, including the 10th (10 + 1) Assert.Equal(expected, messageCount); } } }
private string GetRedisListName(QueueId queueId) { return $"{_redisListBaseName}-{queueId}"; }
public IQueueAdapterReceiver CreateReceiver(QueueId queueId) { return(AzureQueueAdapterReceiver.Create(queueId, DataConnectionString, DeploymentId, cachSize)); }
/// <summary> /// Creates a delivery failure handler for the specified queue. /// </summary> /// <param name="queueId"></param> /// <returns></returns> public Task <IStreamFailureHandler> GetDeliveryFailureHandler(QueueId queueId) { return(StreamFailureHandlerFactory(queueId)); }
public IQueueAdapterReceiver CreateReceiver(QueueId queueId) => new KafkaQueueAdapterReceiver(Name, queueId, _options, _loggerFactory);
public IQueueAdapterReceiver CreateReceiver(QueueId queueId) { return(SQSAdapterReceiver.Create(this.serializationManager, queueId, DataConnectionString, DeploymentId)); }
public IEnumerable <QueueId> GetQueuesForRange(IRingRange range) => from ring in _queueMap.Values from queueId in ring.GetAllRingMembers() where range.InRange(queueId.GetUniformHashCode()) select QueueId.GetQueueId(queueId.QueueNamePrefix, queueId.QueueId, queueId.UniformHashCache);
public Task<IStreamFailureHandler> GetDeliveryFailureHandler(QueueId queueId) { return StreamFailureHandlerFactory(queueId); }
public IQueueCache CreateQueueCache(QueueId queueId) { return(caches.AddOrUpdate(queueId, (id) => new SimpleQueueCache(cacheSize, logger), (id, queueCache) => queueCache)); }
public IQueueAdapterReceiver CreateReceiver(QueueId queueId) { throw new OrleansException("SimpleAzureQueueAdapter is a write-only adapter, it does not support reading from the queue."); }
public Task <IStreamFailureHandler> GetDeliveryFailureHandler(QueueId queueId) { //TODO: Add a queue specific default failure handler with reasonable error reporting. //TODO: Try to get failure handler from service provider so users can inject their own. return(Task.FromResult(streamFailureHandler ?? (streamFailureHandler = new NoOpStreamDeliveryFailureHandler()))); }
/// <summary> /// Creates a delivery failure handler for the specified queue. /// </summary> /// <param name="queueId"></param> /// <returns></returns> public Task<IStreamFailureHandler> GetDeliveryFailureHandler(QueueId queueId) { return Task.FromResult<IStreamFailureHandler>(new NoOpStreamDeliveryFailureHandler(false)); }
private async Task <IReadOnlyList <Post> > CreatePostsAsync( TestDatabaseContext testDatabase, UserId userId, QueueId queueId, bool liveDateInFuture, bool scheduledByQueue) { using (var databaseContext = testDatabase.CreateContext()) { var user = UserTests.UniqueEntity(Random); user.Id = userId.Value; await databaseContext.Database.Connection.InsertAsync(user); var file = FileTests.UniqueEntity(Random); file.Id = FileId.Value; file.UserId = user.Id; await databaseContext.Database.Connection.InsertAsync(file); var blog = BlogTests.UniqueEntity(Random); blog.CreatorId = user.Id; await databaseContext.Database.Connection.InsertAsync(blog); var channel = ChannelTests.UniqueEntity(Random); channel.BlogId = blog.Id; await databaseContext.Database.Connection.InsertAsync(channel); var queue = QueueTests.UniqueEntity(Random); queue.Id = queueId.Value; queue.BlogId = blog.Id; await databaseContext.Database.Connection.InsertAsync(queue); var notes = new List <Post>(); for (var i = 0; i < CollectionTotal; i++) { // Notes are not covered by this feature as they do not belong in a collection, but we add them to create a more realistic test state. var post = PostTests.UniqueNote(Random); post.ChannelId = channel.Id; notes.Add(post); } var postsInCollection = new List <Post>(); for (var i = 0; i < CollectionTotal; i++) { var post = PostTests.UniqueFileOrImage(Random); post.ChannelId = channel.Id; post.QueueId = scheduledByQueue ? queueId.Value : (Guid?)null; post.PreviewImageId = file.Id; post.LiveDate = Now.AddDays((1 + Random.Next(100)) * (liveDateInFuture ? 1 : -1)); // Clip dates as we will be comparing from these entities. post.LiveDate = new SqlDateTime(post.LiveDate).Value; post.CreationDate = new SqlDateTime(post.CreationDate).Value; postsInCollection.Add(post); } await databaseContext.Database.Connection.InsertAsync(notes.Concat(postsInCollection)); return(postsInCollection); } }
public static IQueueAdapterReceiver Create(SerializationManager serializationManager, ILoggerFactory loggerFactory, QueueId queueId, string dataConnectionString, string deploymentId, IAzureQueueDataAdapter dataAdapter, TimeSpan?messageVisibilityTimeout = null) { if (queueId == null) { throw new ArgumentNullException(nameof(queueId)); } if (string.IsNullOrEmpty(dataConnectionString)) { throw new ArgumentNullException(nameof(dataConnectionString)); } if (string.IsNullOrEmpty(deploymentId)) { throw new ArgumentNullException(nameof(deploymentId)); } if (dataAdapter == null) { throw new ArgumentNullException(nameof(dataAdapter)); } if (serializationManager == null) { throw new ArgumentNullException(nameof(serializationManager)); } var queue = new AzureQueueDataManager(loggerFactory, queueId.ToString(), deploymentId, dataConnectionString, messageVisibilityTimeout); return(new AzureQueueAdapterReceiver(serializationManager, loggerFactory, queueId, queue, dataAdapter)); }
public Task <long> Length(QueueId id) { var redisListName = GetRedisListName(id); return(_database.ListLengthAsync(redisListName)); }
public IQueueCache CreateQueueCache(QueueId queueId) { return new FastPipeQueueCache(bufferPool, queueId); }
/// <summary> /// Create a cache for a given queue id /// </summary> /// <param name="queueId"></param> public IQueueCache CreateQueueCache(QueueId queueId) { return(GetOrCreateReceiver(queueId)); }
private EventStoreQueueCache ConstructQueueCache(QueueId queueId) { var receiver = (EventStoreAdapterReceiver)m_AdapterFactory.CreateReceiver(queueId); return(new EventStoreQueueCache(100, m_Logger, queueId, receiver)); }
private EventHubAdapterReceiver GetOrCreateReceiver(QueueId queueId) { return(receivers.GetOrAdd(queueId, q => MakeReceiver(queueId))); }
public Task <IStreamFailureHandler> GetDeliveryFailureHandler(QueueId queueId) => _failureHandler;
public IRabbitMqConsumer CreateConsumer(QueueId queueId) => new RabbitMqConsumer(CreateConnector(LoggerFactory.CreateLogger($"{typeof(RabbitMqConsumer).FullName}.{queueId}")), _topologyProvider.GetNameForQueue(queueId), _topologyProvider);
/// <summary> /// Create a cache for a given queue id /// </summary> /// <param name="queueId"></param> public IQueueCache CreateQueueCache(QueueId queueId) { return(new GeneratorPooledCache(bufferPool, logger)); }
/// <summary> /// Get a MemoryStreamQueueGrain instance by queue Id. /// </summary> /// <param name="queueId"></param> /// <returns></returns> private IMemoryStreamQueueGrain GetQueueGrain(QueueId queueId) { return(queueGrains.GetOrAdd(queueId, id => grainFactory.GetGrain <IMemoryStreamQueueGrain>(GenerateDeterministicGuid(id)))); }
public Task <IStreamFailureHandler> GetDeliveryFailureHandler(QueueId queueId) => Task.FromResult <IStreamFailureHandler>(this);
/// <summary> /// Acquire delivery failure handler for a queue /// </summary> /// <param name="queueId"></param> /// <returns></returns> public Task <IStreamFailureHandler> GetDeliveryFailureHandler(QueueId queueId) { return(Task.FromResult(streamFailureHandler ?? (streamFailureHandler = new NoOpStreamDeliveryFailureHandler()))); }
public async Task<byte[]> Dequeue(QueueId queueId) { var redisListName = GetRedisListName(queueId); return await _database.ListRightPopAsync(redisListName); }
public Task <IStreamFailureHandler> GetDeliveryFailureHandler(QueueId queueId) => StreamFailureHandlerFactory(queueId);
public Task<long> Length(QueueId id) { var redisListName = GetRedisListName(id); return _database.ListLengthAsync(redisListName); }
private AzureQueueAdapterReceiver(SerializationManager serializationManager, ILoggerFactory loggerFactory, QueueId queueId, AzureQueueDataManager queue, IAzureQueueDataAdapter dataAdapter) { if (queueId == null) { throw new ArgumentNullException(nameof(queueId)); } if (queue == null) { throw new ArgumentNullException(nameof(queue)); } if (dataAdapter == null) { throw new ArgumentNullException(nameof(queue)); } Id = queueId; this.serializationManager = serializationManager; this.queue = queue; this.dataAdapter = dataAdapter; this.logger = loggerFactory.CreateLogger <AzureQueueAdapterReceiver>(); this.pending = new List <PendingDelivery>(); }
public IQueueCache CreateQueueCache(QueueId queueId) { return caches.AddOrUpdate(queueId, (id) => new SimpleQueueCache(cacheSize, logger), (id, queueCache) => queueCache); }
public IQueueCache CreateQueueCache(QueueId queueId) { return _caches.AddOrUpdate(queueId, (id) => new DictQueueCache(id, _logger), (id, queueCache) => queueCache); }
/// <summary> /// Aquire delivery failure handler for a queue /// </summary> /// <param name="queueId"></param> /// <returns></returns> public Task <IStreamFailureHandler> GetDeliveryFailureHandler(QueueId queueId) { return(StreamFailureHandlerFactory(streamQueueMapper.QueueToPartition(queueId))); }
public Task <IStreamFailureHandler> GetDeliveryFailureHandler(QueueId queueId) { return(Task.FromResult <IStreamFailureHandler>(new NoOpStreamDeliveryFailureHandler(false))); }
public TimedQueueCache(QueueId queueId, TimeSpan cacheTimespan, int cacheSize, int numOfBuckets, Logger logger) { _counterMessagesInCache = Metric.Context("KafkaStreamProvider").Counter($"Messages In Cache queueId:({queueId.GetNumericId()})", Unit.Items); _meterCacheEvacuationsPerSecond = Metric.Context("KafkaStreamProvider").Meter($"Cache Evacuations Per Second queueId:({queueId.GetNumericId()})", Unit.Items); _counterNumberOfCursorsCausingPressure = Metric.Context("KafkaStreamProvider").Counter($"Cursors causing pressure queueId:({queueId.GetNumericId()})", Unit.Items); Id = queueId; _cachedMessages = new LinkedList<TimedQueueCacheItem>(); _logger = logger; _cacheCursorHistogram = new List<TimedQueueCacheBucket>(); _maxCacheSize = cacheSize; _cacheHistogramMaxBucketSize = Math.Max(_maxCacheSize / numOfBuckets, 1); _maxNumberToAdd = _cacheHistogramMaxBucketSize; _cacheTimeSpan = cacheTimespan; _bucketTimeSpan = TimeSpan.FromMilliseconds(cacheTimespan.TotalMilliseconds / numOfBuckets); }
/// <summary> /// Creates a quere receiver for the specificed queueId /// </summary> /// <param name="queueId"></param> /// <returns></returns> public IQueueAdapterReceiver CreateReceiver(QueueId queueId) { return(GetOrCreateReceiver(queueId)); }