protected override IQueue <SimpleWorkItem> GetQueue(int retries = 1, TimeSpan?workItemTimeout = null, TimeSpan?retryDelay = null, int deadLetterMaxItems = 100, bool runQueueMaintenance = true) { var queue = new RedisQueue <SimpleWorkItem>(SharedConnection.GetMuxer(), workItemTimeout: workItemTimeout, retries: retries, retryDelay: retryDelay, deadLetterMaxItems: deadLetterMaxItems, runMaintenanceTasks: runQueueMaintenance); Logger.Debug().Message($"Queue Id: {queue.QueueId}").Write(); return(queue); }
private int CountAllKeys() { var endpoints = SharedConnection.GetMuxer().GetEndPoints(true); if (endpoints.Length == 0) { return(0); } int count = 0; foreach (var endpoint in endpoints) { var server = SharedConnection.GetMuxer().GetServer(endpoint); try { var keys = server.Keys().ToArray(); foreach (var key in keys) { _logger.Info(key); } count += keys.Length; } catch (Exception) { } } return(count); }
protected override IQueue <SimpleWorkItem> GetQueue(int retries = 1, TimeSpan?workItemTimeout = null, TimeSpan?retryDelay = null, int deadLetterMaxItems = 100) { var queue = new RedisQueue <SimpleWorkItem>(SharedConnection.GetMuxer(), workItemTimeout: workItemTimeout, retries: retries, retryDelay: retryDelay, deadLetterMaxItems: deadLetterMaxItems); Debug.WriteLine(String.Format("Queue Id: {0}", queue.QueueId)); return(queue); }
public RedisLockTests(ITestOutputHelper output) : base(output) { var muxer = SharedConnection.GetMuxer(); muxer.FlushAllAsync().GetAwaiter().GetResult(); _cache = new RedisCacheClient(muxer, loggerFactory: Log); _messageBus = new RedisMessageBus(muxer.GetSubscriber(), loggerFactory: Log); }
public async Task VerifyCacheKeysAreCorrect() { var queue = GetQueue(retries: 3, workItemTimeout: TimeSpan.FromSeconds(2), retryDelay: TimeSpan.Zero, runQueueMaintenance: false); if (queue == null) { return; } using (queue) { var muxer = SharedConnection.GetMuxer(); var db = muxer.GetDatabase(); string listPrefix = muxer.IsCluster() ? "{q:SimpleWorkItem}" : "q:SimpleWorkItem"; string id = await queue.EnqueueAsync(new SimpleWorkItem { Data = "blah", Id = 1 }); Assert.True(await db.KeyExistsAsync("q:SimpleWorkItem:" + id)); Assert.Equal(1, await db.ListLengthAsync($"{listPrefix}:in")); Assert.True(await db.KeyExistsAsync("q:SimpleWorkItem:" + id + ":enqueued")); Assert.Equal(3, await muxer.CountAllKeysAsync()); _logger.LogInformation("-----"); Assert.False(await db.KeyExistsAsync("q:SimpleWorkItem:" + id + ":renewed")); var workItem = await queue.DequeueAsync(); Assert.True(await db.KeyExistsAsync("q:SimpleWorkItem:" + id)); Assert.Equal(0, await db.ListLengthAsync($"{listPrefix}:in")); Assert.Equal(1, await db.ListLengthAsync($"{listPrefix}:work")); Assert.True(await db.KeyExistsAsync("q:SimpleWorkItem:" + id + ":enqueued")); Assert.True(await db.KeyExistsAsync("q:SimpleWorkItem:" + id + ":renewed")); Assert.True(await db.KeyExistsAsync("q:SimpleWorkItem:" + id + ":dequeued")); Assert.Equal(5, await muxer.CountAllKeysAsync()); await Task.Delay(TimeSpan.FromSeconds(4)); Assert.True(await db.KeyExistsAsync("q:SimpleWorkItem:" + id)); Assert.Equal(0, await db.ListLengthAsync($"{listPrefix}:in")); Assert.Equal(1, await db.ListLengthAsync($"{listPrefix}:work")); Assert.True(await db.KeyExistsAsync("q:SimpleWorkItem:" + id + ":enqueued")); Assert.True(await db.KeyExistsAsync("q:SimpleWorkItem:" + id + ":renewed")); Assert.True(await db.KeyExistsAsync("q:SimpleWorkItem:" + id + ":dequeued")); Assert.Equal(5, await muxer.CountAllKeysAsync()); _logger.LogInformation("-----"); await workItem.CompleteAsync(); Assert.False(await db.KeyExistsAsync("q:SimpleWorkItem:" + id)); Assert.False(await db.KeyExistsAsync("q:SimpleWorkItem:" + id + ":enqueued")); Assert.False(await db.KeyExistsAsync("q:SimpleWorkItem:" + id + ":renewed")); Assert.False(await db.KeyExistsAsync("q:SimpleWorkItem:" + id + ":dequeued")); Assert.Equal(0, await db.ListLengthAsync($"{listPrefix}:in")); Assert.Equal(0, await db.ListLengthAsync($"{listPrefix}:work")); Assert.Equal(0, await muxer.CountAllKeysAsync()); } }
protected override IQueue <SampleQueueWorkItem> GetSampleWorkItemQueue(int retries, TimeSpan retryDelay) { return(new RedisQueue <SampleQueueWorkItem>(new RedisQueueOptions <SampleQueueWorkItem> { ConnectionMultiplexer = SharedConnection.GetMuxer(), Retries = retries, RetryDelay = retryDelay, LoggerFactory = Log })); }
protected override IQueue <SampleQueueWorkItem> GetSampleWorkItemQueue(int retries, TimeSpan retryDelay) { return(new RedisQueue <SampleQueueWorkItem>(o => o .ConnectionMultiplexer(SharedConnection.GetMuxer()) .Retries(retries) .RetryDelay(retryDelay) .LoggerFactory(Log) )); }
public void VerifyCacheKeysAreCorrectAfterAbandon() { var queue = GetQueue(retries: 2, workItemTimeout: TimeSpan.FromMilliseconds(100), retryDelay: TimeSpan.Zero); if (queue == null) { return; } FlushAll(); Assert.Equal(0, CountAllKeys()); using (queue) { var db = SharedConnection.GetMuxer().GetDatabase(); var id = queue.Enqueue(new SimpleWorkItem { Data = "blah", Id = 1 }); var workItem = queue.Dequeue(); workItem.Abandon(); Assert.True(db.KeyExists("q:SimpleWorkItem:" + id)); Assert.Equal(1, db.ListLength("q:SimpleWorkItem:in")); Assert.Equal(0, db.ListLength("q:SimpleWorkItem:work")); Assert.True(db.KeyExists("q:SimpleWorkItem:" + id + ":dequeued")); Assert.Equal(1, db.StringGet("q:SimpleWorkItem:" + id + ":attempts")); Assert.Equal(4, CountAllKeys()); workItem = queue.Dequeue(); Assert.True(db.KeyExists("q:SimpleWorkItem:" + id)); Assert.Equal(0, db.ListLength("q:SimpleWorkItem:in")); Assert.Equal(1, db.ListLength("q:SimpleWorkItem:work")); Assert.True(db.KeyExists("q:SimpleWorkItem:" + id + ":dequeued")); Assert.Equal(1, db.StringGet("q:SimpleWorkItem:" + id + ":attempts")); Assert.Equal(4, CountAllKeys()); // let the work item timeout Thread.Sleep(1000); Assert.Equal(1, queue.WorkItemTimeoutCount); Assert.True(db.KeyExists("q:SimpleWorkItem:" + id)); Assert.Equal(1, db.ListLength("q:SimpleWorkItem:in")); Assert.Equal(0, db.ListLength("q:SimpleWorkItem:work")); Assert.True(db.KeyExists("q:SimpleWorkItem:" + id + ":dequeued")); Assert.Equal(2, db.StringGet("q:SimpleWorkItem:" + id + ":attempts")); Assert.Equal(4, CountAllKeys()); // should go to deadletter now workItem = queue.Dequeue(); workItem.Abandon(); Assert.True(db.KeyExists("q:SimpleWorkItem:" + id)); Assert.Equal(0, db.ListLength("q:SimpleWorkItem:in")); Assert.Equal(0, db.ListLength("q:SimpleWorkItem:work")); Assert.Equal(1, db.ListLength("q:SimpleWorkItem:dead")); Assert.True(db.KeyExists("q:SimpleWorkItem:" + id + ":dequeued")); Assert.Equal(3, db.StringGet("q:SimpleWorkItem:" + id + ":attempts")); Assert.Equal(4, CountAllKeys()); } }
protected override ICacheClient GetCacheClient(bool shouldThrowOnSerializationError = true) { return(new RedisHybridCacheClient(o => o .ConnectionMultiplexer(SharedConnection.GetMuxer()) .LoggerFactory(Log).ShouldThrowOnSerializationError(shouldThrowOnSerializationError), localConfig => localConfig .CloneValues(true) .ShouldThrowOnSerializationError(shouldThrowOnSerializationError))); }
protected override IQueue <SimpleWorkItem> GetQueue(int retries = 1, TimeSpan?workItemTimeout = null, TimeSpan?retryDelay = null, int deadLetterMaxItems = 100, bool runQueueMaintenance = true) { var muxer = SharedConnection.GetMuxer(); var queue = new RedisQueue <SimpleWorkItem>(muxer, workItemTimeout: workItemTimeout, retries: retries, retryDelay: retryDelay, deadLetterMaxItems: deadLetterMaxItems, runMaintenanceTasks: runQueueMaintenance, loggerFactory: Log); _logger.Debug("Queue Id: {queueId}", queue.QueueId); return(queue); }
public override async Task CanHaveMultipleQueueInstancesWithLockingAsync() { var muxer = SharedConnection.GetMuxer(); using var cache = new RedisCacheClient(new RedisCacheClientOptions { ConnectionMultiplexer = muxer, LoggerFactory = Log }); using var messageBus = new RedisMessageBus(new RedisMessageBusOptions { Subscriber = muxer.GetSubscriber(), Topic = "test-queue", LoggerFactory = Log }); var distributedLock = new CacheLockProvider(cache, messageBus, Log); await CanHaveMultipleQueueInstancesWithLockingImplAsync(distributedLock); }
public override async Task CanHaveMultipleQueueInstancesWithLockingAsync() { var muxer = SharedConnection.GetMuxer(); using (var cache = new RedisCacheClient(muxer, loggerFactory: Log)) { using (var messageBus = new RedisMessageBus(muxer.GetSubscriber(), "test", loggerFactory: Log)) { var distributedLock = new CacheLockProvider(cache, messageBus, Log); await CanHaveMultipleQueueInstancesWithLockingImplAsync(distributedLock); } } }
public RedisLockTests(ITestOutputHelper output) : base(output) { var muxer = SharedConnection.GetMuxer(); muxer.FlushAllAsync().GetAwaiter().GetResult(); _cache = new RedisCacheClient(new RedisCacheClientOptions { ConnectionMultiplexer = muxer, LoggerFactory = Log }); _messageBus = new RedisMessageBus(new RedisMessageBusOptions { Subscriber = muxer.GetSubscriber(), Topic = "test-lock", LoggerFactory = Log }); }
public async Task MeasureThroughputWithRandomFailures() { var queue = GetQueue(retries: 3, workItemTimeout: TimeSpan.FromSeconds(2), retryDelay: TimeSpan.Zero); if (queue == null) { return; } using (queue) { await queue.DeleteQueueAsync(); const int workItemCount = 1000; for (int i = 0; i < workItemCount; i++) { await queue.EnqueueAsync(new SimpleWorkItem { Data = "Hello" }); } Assert.Equal(workItemCount, (await queue.GetQueueStatsAsync()).Queued); var metrics = new InMemoryMetricsClient(); var workItem = await queue.DequeueAsync(TimeSpan.Zero); while (workItem != null) { Assert.Equal("Hello", workItem.Value.Data); if (RandomData.GetBool(10)) { await workItem.AbandonAsync(); } else { await workItem.CompleteAsync(); } await metrics.CounterAsync("work"); workItem = await queue.DequeueAsync(TimeSpan.FromMilliseconds(100)); } _logger.Trace((await metrics.GetCounterStatsAsync("work")).ToString()); var stats = await queue.GetQueueStatsAsync(); Assert.True(stats.Dequeued >= workItemCount); Assert.Equal(workItemCount, stats.Completed + stats.Deadletter); Assert.Equal(0, stats.Queued); var muxer = SharedConnection.GetMuxer(); _logger.Trace("# Keys: {0}", muxer.CountAllKeysAsync()); } }
public async Task CanDisposeCacheAndQueueAndReceiveSubscribedMessages() { var muxer = SharedConnection.GetMuxer(); var messageBus1 = new RedisMessageBus(new RedisMessageBusOptions { Subscriber = muxer.GetSubscriber(), Topic = "test-messages", LoggerFactory = Log }); var cache = new RedisCacheClient(new RedisCacheClientOptions { ConnectionMultiplexer = muxer }); Assert.NotNull(cache); var queue = new RedisQueue <SimpleWorkItem>(new RedisQueueOptions <SimpleWorkItem> { ConnectionMultiplexer = muxer, LoggerFactory = Log }); Assert.NotNull(queue); using (messageBus1) { using (cache) { using (queue) { await cache.SetAsync("test", "test", TimeSpan.FromSeconds(10)); await queue.DequeueAsync(new CancellationToken(true)); var countdown = new AsyncCountdownEvent(2); await messageBus1.SubscribeAsync <SimpleMessageA>(msg => { Assert.Equal("Hello", msg.Data); countdown.Signal(); }); await messageBus1.PublishAsync(new SimpleMessageA { Data = "Hello" }); await countdown.WaitAsync(TimeSpan.FromSeconds(2)); Assert.Equal(1, countdown.CurrentCount); cache.Dispose(); queue.Dispose(); await messageBus1.PublishAsync(new SimpleMessageA { Data = "Hello" }); await countdown.WaitAsync(TimeSpan.FromSeconds(2)); Assert.Equal(0, countdown.CurrentCount); } } } }
protected override IMessageBus GetMessageBus(Func <SharedMessageBusOptions, SharedMessageBusOptions> config = null) { return(new RedisMessageBus(o => { o.Subscriber(SharedConnection.GetMuxer().GetSubscriber()); o.Topic("test-messages"); o.LoggerFactory(Log); if (config != null) { config(o.Target); } return o; })); }
public async Task CanTrimDeadletterItems() { var queue = GetQueue(retries: 0, workItemTimeout: TimeSpan.FromMilliseconds(50), deadLetterMaxItems: 3, runQueueMaintenance: false) as RedisQueue <SimpleWorkItem>; if (queue == null) { return; } FlushAll(); Assert.Equal(0, CountAllKeys()); using (queue) { var db = SharedConnection.GetMuxer().GetDatabase(); var workItemIds = new List <string>(); for (int i = 0; i < 10; i++) { var id = await queue.EnqueueAsync(new SimpleWorkItem { Data = "blah", Id = i }); Trace.WriteLine(id); workItemIds.Add(id); } for (int i = 0; i < 10; i++) { var workItem = await queue.DequeueAsync(); await workItem.AbandonAsync(); Trace.WriteLine("Abondoning: " + workItem.Id); } workItemIds.Reverse(); await queue.DoMaintenanceWorkAsync(); foreach (var id in workItemIds.Take(3)) { Trace.WriteLine("Checking: " + id); Assert.True(await db.KeyExistsAsync("q:SimpleWorkItem:" + id)); } Assert.Equal(0, await db.ListLengthAsync("q:SimpleWorkItem:in")); Assert.Equal(0, await db.ListLengthAsync("q:SimpleWorkItem:work")); Assert.Equal(0, await db.ListLengthAsync("q:SimpleWorkItem:wait")); Assert.Equal(3, await db.ListLengthAsync("q:SimpleWorkItem:dead")); Assert.InRange(CountAllKeys(), 13, 14); } }
protected override IQueue <SimpleWorkItem> GetQueue(int retries = 1, TimeSpan?workItemTimeout = null, TimeSpan?retryDelay = null, int deadLetterMaxItems = 100, bool runQueueMaintenance = true) { var queue = new RedisQueue <SimpleWorkItem>(o => o .ConnectionMultiplexer(SharedConnection.GetMuxer()) .Retries(retries) .RetryDelay(retryDelay.GetValueOrDefault(TimeSpan.FromMinutes(1))) .DeadLetterMaxItems(deadLetterMaxItems) .WorkItemTimeout(workItemTimeout.GetValueOrDefault(TimeSpan.FromMinutes(5))) .RunMaintenanceTasks(runQueueMaintenance) .LoggerFactory(Log) ); _logger.LogDebug("Queue Id: {queueId}", queue.QueueId); return(queue); }
protected override IQueue <SimpleWorkItem> GetQueue(int retries = 1, TimeSpan?workItemTimeout = null, TimeSpan?retryDelay = null, int deadLetterMaxItems = 100, bool runQueueMaintenance = true) { var queue = new RedisQueue <SimpleWorkItem>(new RedisQueueOptions <SimpleWorkItem> { ConnectionMultiplexer = SharedConnection.GetMuxer(), Retries = retries, RetryDelay = retryDelay.GetValueOrDefault(TimeSpan.FromMinutes(1)), DeadLetterMaxItems = deadLetterMaxItems, WorkItemTimeout = workItemTimeout.GetValueOrDefault(TimeSpan.FromMinutes(5)), RunMaintenanceTasks = runQueueMaintenance, LoggerFactory = Log }); _logger.LogDebug("Queue Id: {queueId}", queue.QueueId); return(queue); }
public void CanTrimDeadletterItems() { var queue = GetQueue(retries: 0, workItemTimeout: TimeSpan.FromMilliseconds(50), deadLetterMaxItems: 3); if (queue == null) { return; } FlushAll(); Assert.Equal(0, CountAllKeys()); using (queue) { var db = SharedConnection.GetMuxer().GetDatabase(); var workItemIds = new List <string>(); for (int i = 0; i < 10; i++) { var id = queue.Enqueue(new SimpleWorkItem { Data = "blah", Id = i }); Trace.WriteLine(id); workItemIds.Add(id); } for (int i = 0; i < 10; i++) { var workItem = queue.Dequeue(); workItem.Abandon(); Trace.WriteLine("Abondoning: " + workItem.Id); } workItemIds.Reverse(); Thread.Sleep(1000); foreach (var id in workItemIds.Take(3)) { Trace.WriteLine("Checking: " + id); Assert.True(db.KeyExists("q:SimpleWorkItem:" + id)); } Assert.Equal(0, db.ListLength("q:SimpleWorkItem:in")); Assert.Equal(0, db.ListLength("q:SimpleWorkItem:work")); Assert.Equal(0, db.ListLength("q:SimpleWorkItem:wait")); Assert.Equal(3, db.ListLength("q:SimpleWorkItem:dead")); Assert.Equal(10, CountAllKeys()); } }
public async Task VerifyCacheKeysAreCorrect() { var queue = GetQueue(retries: 3, workItemTimeout: TimeSpan.FromSeconds(2), retryDelay: TimeSpan.Zero, runQueueMaintenance: false); if (queue == null) { return; } FlushAll(); Assert.Equal(0, CountAllKeys()); using (queue) { var db = SharedConnection.GetMuxer().GetDatabase(); string id = await queue.EnqueueAsync(new SimpleWorkItem { Data = "blah", Id = 1 }); Assert.True(await db.KeyExistsAsync("q:SimpleWorkItem:" + id)); Assert.Equal(1, await db.ListLengthAsync("q:SimpleWorkItem:in")); Assert.True(await db.KeyExistsAsync("q:SimpleWorkItem:" + id + ":enqueued")); Assert.Equal(3, CountAllKeys()); _logger.Info("-----"); var workItem = await queue.DequeueAsync(); Assert.True(await db.KeyExistsAsync("q:SimpleWorkItem:" + id)); Assert.Equal(0, await db.ListLengthAsync("q:SimpleWorkItem:in")); Assert.Equal(1, await db.ListLengthAsync("q:SimpleWorkItem:work")); Assert.True(await db.KeyExistsAsync("q:SimpleWorkItem:" + id + ":enqueued")); Assert.True(await db.KeyExistsAsync("q:SimpleWorkItem:" + id + ":dequeued")); Assert.Equal(4, CountAllKeys()); _logger.Info("-----"); await workItem.CompleteAsync(); Assert.False(await db.KeyExistsAsync("q:SimpleWorkItem:" + id)); Assert.False(await db.KeyExistsAsync("q:SimpleWorkItem:" + id + ":enqueued")); Assert.False(await db.KeyExistsAsync("q:SimpleWorkItem:" + id + ":dequeued")); Assert.Equal(0, await db.ListLengthAsync("q:SimpleWorkItem:in")); Assert.Equal(0, await db.ListLengthAsync("q:SimpleWorkItem:work")); Assert.Equal(0, CountAllKeys()); } }