public void ProcessSlaTimeouts_should_remove_requests_only_if_the_longest_sla_timed_out() { var lowerSlaInMs = 20; var higherSlaInMs = 50; var slaMultiplier = 2; var lowerTimeout = lowerSlaInMs * slaMultiplier + WaitDeltaMs; var higherTimeout = higherSlaInMs * slaMultiplier + WaitDeltaMs; SlaDefinitionBuilder .For <Request>(req => req.Id) .AddSla <Response>(TimeSpan.FromMilliseconds(lowerSlaInMs), rsp => rsp.Id) .AddSla <Response2>(TimeSpan.FromMilliseconds(higherSlaInMs), rsp => rsp.Id) .Configure(_slaProvider); _slaProcessor = new TestableSlaProcessor(_slaProvider); _slaProcessor.ProcessOutgoingMessage(new Request { Id = Guid.NewGuid() }); Thread.Sleep(lowerTimeout); _slaProcessor.ProcessSlaTimeouts(); Assert.That(_slaProcessor.Timeouts.Count, Is.EqualTo(0), "None of messages should time out yet"); Thread.Sleep(higherTimeout - lowerSlaInMs); _slaProcessor.ProcessSlaTimeouts(); Assert.That(_slaProcessor.Timeouts.Count, Is.EqualTo(1), "Message should time out now"); }
public void ProcessSlaTimeouts_should_be_thread_safe() { var actionsCount = 50000; var groupSize = 100; var messagesInGroup = groupSize - 1; SlaDefinitionBuilder.AddSla <Request, Response>(TimeSpan.FromTicks(1), r => r.Id, r => r.Id, _slaProvider); _slaProcessor = new TestableSlaProcessor(_slaProvider, actionsCount); Action purge = () => _slaProcessor.ProcessSlaTimeouts(); Action add = () => _slaProcessor.ProcessOutgoingMessage(new Request { Id = Guid.NewGuid() }); Enumerable.Range(0, actionsCount) .Select(index => ((index % groupSize) >= messagesInGroup) ? purge : add) .AsParallel() .WithDegreeOfParallelism(64) .ForAll(action => action.Invoke()); _slaProcessor.ProcessSlaTimeouts(); var expectedMessagesCount = messagesInGroup * actionsCount / groupSize; Assert.That(_slaProcessor.Timeouts.Count, Is.EqualTo(expectedMessagesCount), "Processor should timeout all messages"); }
public void ProcessSlaTimeouts_should_remove_all_timedout_requests() { var slaInMs = 50; var slaMultiplier = 2; SlaDefinitionBuilder.AddSla <Request, Response>(TimeSpan.FromMilliseconds(slaInMs), req => req.Id, rsp => rsp.Id, _slaProvider); SlaDefinitionBuilder.AddSla <Request2, Response2>(TimeSpan.FromMinutes(2), req => req.Id, rsp => rsp.Id, _slaProvider); _slaProcessor = new TestableSlaProcessor(_slaProvider); _slaProcessor.ProcessOutgoingMessage(new Request { Id = Guid.NewGuid() }); _slaProcessor.ProcessOutgoingMessage(new Request { Id = Guid.NewGuid() }); _slaProcessor.ProcessOutgoingMessage(new Request2 { Id = Guid.NewGuid() }); _slaProcessor.ProcessOutgoingMessage(new Request2 { Id = Guid.NewGuid() }); _slaProcessor.ProcessSlaTimeouts(); Assert.That(_slaProcessor.Timeouts.Count, Is.EqualTo(0), "None of messages should time out yet"); Thread.Sleep(TimeSpan.FromMilliseconds(slaInMs * slaMultiplier + WaitDeltaMs)); _slaProcessor.ProcessSlaTimeouts(); Assert.That(_slaProcessor.Timeouts.Count, Is.EqualTo(2), "Two messages should timeout"); Assert.That(_slaProcessor.Timeouts.Count(m => m.Item1.Type == typeof(Request)), Is.EqualTo(2), "Messages of Request type should timeout"); }
public void Processor_should_limit_processed_messages() { var quota = 1000; var degreeOfParallelism = 64; _slaProcessor = new TestableSlaProcessor(_slaProvider, quota); ConfigureSla(); var ids = Enumerable.Range(0, 50000).Select(i => Guid.NewGuid()).ToArray(); ids.AsParallel().WithDegreeOfParallelism(degreeOfParallelism).ForAll(id => { _slaProcessor.ProcessOutgoingMessage(new Request { Id = id }); _slaProcessor.ProcessOutgoingMessage(new Request2 { Id = id }); }); ids.AsParallel().WithDegreeOfParallelism(degreeOfParallelism).ForAll(id => { _slaProcessor.ProcessIncomingMessage(new Response { Id = id }); _slaProcessor.ProcessIncomingMessage(new ErrorResponse2 { Id = id }); }); Assert.That(CountProcessedMessages <Request>(), Is.EqualTo(quota).Within(degreeOfParallelism), "Processed Request messages should be limited by quota"); Assert.That(CountProcessedMessages <Request2>(), Is.EqualTo(quota).Within(degreeOfParallelism), "Processed Request2 messages should be limited by quota"); Assert.That(CountRejectedMessages <Request>(), Is.EqualTo(ids.Length - quota).Within(degreeOfParallelism), "The Request messages exceeding the quota should be rejected"); Assert.That(CountRejectedMessages <Request2>(), Is.EqualTo(ids.Length - quota).Within(degreeOfParallelism), "The Request2 messages exceeding the quota should be rejected"); }
public void Processor_should_be_able_to_process_multiple_messages_in_parallel() { _slaProcessor = new TestableSlaProcessor(_slaProvider, 50000); ConfigureSla(); var ids = Enumerable.Range(0, 50000).Select(i => Guid.NewGuid()).ToArray(); ids.AsParallel().WithDegreeOfParallelism(64).ForAll(id => { _slaProcessor.ProcessOutgoingMessage(new Request { Id = id }); _slaProcessor.ProcessOutgoingMessage(new Request2 { Id = id }); }); ids.AsParallel().WithDegreeOfParallelism(64).ForAll(id => { _slaProcessor.ProcessIncomingMessage(new Response { Id = id }); _slaProcessor.ProcessIncomingMessage(new ErrorResponse2 { Id = id }); }); Assert.That(CountProcessedMessages <Request>(), Is.EqualTo(ids.Length), "Not all Request messages were processed"); Assert.That(CountProcessedMessages <Request2>(), Is.EqualTo(ids.Length), "Not all Request2 messages were processed"); Assert.That(CountRejectedMessages <Request>(), Is.EqualTo(0), "All Request messages should be successfully processed"); Assert.That(CountRejectedMessages <Request2>(), Is.EqualTo(0), "All Request2 messages should be successfully processed"); }
public void Dispose_should_dispose_timeout_scheduler_once() { var mockScheduler = new Mock <IDisposable>(); _slaProcessor = new TestableSlaProcessor(_slaProvider, p => mockScheduler.Object); _slaProcessor.Dispose(); _slaProcessor.Dispose(); mockScheduler.Verify(s => s.Dispose(), Times.Once); }
public void SetUp() { _slaProvider = new SlaProvider(); _slaProcessor = new TestableSlaProcessor(_slaProvider); }