public async Task Should_return_the_complete_list_of_timeouts() { const int numberOfTimeoutsToAdd = 10; var context = new ContextBag(); for (var i = 0; i < numberOfTimeoutsToAdd; i++) { await _persister.Add(new TimeoutData { Time = DateTime.UtcNow.AddHours(-1), Destination = "timeouts@" + RuntimeEnvironment.MachineName, SagaId = Guid.NewGuid(), State = new byte[] { 0, 0, 133 }, Headers = new Dictionary <string, string> { { "Bar", "34234" }, { "Foo", "aString1" }, { "Super", "aString2" } }, OwningTimeoutManager = "MyTestEndpoint" }, context); } Assert.AreEqual(numberOfTimeoutsToAdd, (await _query.GetNextChunk(DateTime.UtcNow.AddYears(-3))).DueTimeouts.Length); }
public void Should_remove_timeouts_by_id() { new TimeoutsIndex().Execute(store); var persister = new TimeoutPersister { DocumentStore = store, EndpointName = "MyTestEndpoint", }; var t1 = new TimeoutData { Time = DateTime.Now.AddYears(-1), OwningTimeoutManager = "MyTestEndpoint", Headers = new Dictionary <string, string> { { "Header1", "Value1" } } }; var t2 = new TimeoutData { Time = DateTime.Now.AddYears(-1), OwningTimeoutManager = "MyTestEndpoint", Headers = new Dictionary <string, string> { { "Header1", "Value1" } } }; persister.Add(t1); persister.Add(t2); WaitForIndexing(store); DateTime nextTimeToRunQuery; var timeouts = persister.GetNextChunk(DateTime.UtcNow.AddYears(-3), out nextTimeToRunQuery); foreach (var timeout in timeouts) { TimeoutData timeoutData; persister.TryRemove(timeout.Item1, out timeoutData); } using (var session = store.OpenSession()) { Assert.Null(session.Load <Timeout>(new Guid(t1.Id))); Assert.Null(session.Load <Timeout>(new Guid(t2.Id))); } }
public void Should_return_the_correct_headers() { var persister = new TimeoutPersister { DocumentStore = store, EndpointName = "MyTestEndpoint", }; var headers = new Dictionary <string, string> { { "Bar", "34234" }, { "Foo", "aString1" }, { "Super", "aString2" } }; var timeout = new TimeoutData { Time = DateTime.UtcNow.AddHours(-1), Destination = new Address("timeouts", RuntimeEnvironment.MachineName), SagaId = Guid.NewGuid(), State = new byte[] { 1, 1, 133, 200 }, Headers = headers, OwningTimeoutManager = "MyTestEndpoint", }; persister.Add(timeout); TimeoutData timeoutData; persister.TryRemove(timeout.Id, out timeoutData); CollectionAssert.AreEqual(headers, timeoutData.Headers); }
public async Task Remove_WhenConcurrentDeletesUsingDtc_OnlyOneOperationShouldSucceed() { var persister = new TimeoutPersister(store); var timeoutData = new TimeoutData(); await persister.Add(timeoutData, new ContextBag()); var documentRemoved = new CountdownEvent(2); var t1 = Task.Run(async () => { using (var tx = new TransactionScope(TransactionScopeOption.RequiresNew, TransactionScopeAsyncFlowOption.Enabled)) { var result = await persister.TryRemove(timeoutData.Id, new ContextBag()); documentRemoved.Signal(1); documentRemoved.Wait(); tx.Complete(); return result; } }); var t2 = Task.Run(async () => { using (var tx = new TransactionScope(TransactionScopeOption.RequiresNew, TransactionScopeAsyncFlowOption.Enabled)) { var result = await persister.TryRemove(timeoutData.Id, new ContextBag()); documentRemoved.Signal(1); documentRemoved.Wait(); tx.Complete(); return result; } }); Assert.IsTrue(await t1 | await t2, "the document should be deleted"); Assert.IsFalse(t1.Result && t2.Result, "only one operation should complete successfully"); }
public void EnsureMappingDocumentIsUsed() { var context = new ContextBag(); using (var db = new ReusableDB()) { for (var i = 0; i < 5; i++) { using (var store = db.NewStore()) { ApplyTestConventions(store, ConventionType.RavenDefault); store.Initialize(); if (i > 0) { // On every iteration after the first, remove the index so that operations // will throw if the mapping document does not exist. store.DatabaseCommands.DeleteIndex("Raven/DocumentsByEntityName"); } var persister = new TimeoutPersister(store); persister.Add(new TimeoutData { Destination = EndpointName, Headers = new Dictionary <string, string>(), OwningTimeoutManager = EndpointName, SagaId = Guid.NewGuid(), Time = DateTime.UtcNow }, context).Wait(); } } } }
public void EnsureMappingFailsWithoutIndex() { var context = new ContextBag(); using (var db = new ReusableDB()) { using (var store = db.NewStore()) { ApplyTestConventions(store, ConventionType.RavenDefault); store.Initialize(); // Remove the index to make sure the conventions will throw store.DatabaseCommands.DeleteIndex("Raven/DocumentsByEntityName"); var persister = new TimeoutPersister(store); var exception = Assert.Throws<AggregateException>(() => { persister.Add(new TimeoutData { Destination = EndpointName, Headers = new Dictionary<string, string>(), OwningTimeoutManager = EndpointName, SagaId = Guid.NewGuid(), Time = DateTime.UtcNow }, context).Wait(); }); Assert.IsInstanceOf<InvalidOperationException>(exception.GetBaseException()); Console.WriteLine($"Got expected Exception: {exception.Message}"); } } }
public void Should_return_the_complete_list_of_timeouts() { new TimeoutsIndex().Execute(store); var persister = new TimeoutPersister { DocumentStore = store, EndpointName = "MyTestEndpoint", }; const int numberOfTimeoutsToAdd = 10; for (var i = 0; i < numberOfTimeoutsToAdd; i++) { persister.Add(new TimeoutData { Time = DateTime.UtcNow.AddHours(-1), Destination = new Address("timeouts", RuntimeEnvironment.MachineName), SagaId = Guid.NewGuid(), State = new byte[] { 0, 0, 133 }, Headers = new Dictionary<string, string> { { "Bar", "34234" }, { "Foo", "aString1" }, { "Super", "aString2" } }, OwningTimeoutManager = "MyTestEndpoint", }); } WaitForIndexing(store); DateTime nextTimeToRunQuery; Assert.AreEqual(numberOfTimeoutsToAdd, persister.GetNextChunk(DateTime.UtcNow.AddYears(-3), out nextTimeToRunQuery).Count()); }
public void EnsureMappingFailsWithoutIndex() { var context = new ContextBag(); using (var db = new ReusableDB()) { using (var store = db.NewStore()) { ApplyTestConventions(store, ConventionType.RavenDefault); store.Initialize(); // Remove the index to make sure the conventions will throw store.DatabaseCommands.DeleteIndex("Raven/DocumentsByEntityName"); var persister = new TimeoutPersister(store); var exception = Assert.Throws <AggregateException>(() => { persister.Add(new TimeoutData { Destination = EndpointName, Headers = new Dictionary <string, string>(), OwningTimeoutManager = EndpointName, SagaId = Guid.NewGuid(), Time = DateTime.UtcNow }, context).Wait(); }); Assert.IsInstanceOf <InvalidOperationException>(exception.GetBaseException()); Console.WriteLine($"Got expected Exception: {exception.Message}"); } } }
public void Should_return_the_complete_list_of_timeouts() { new TimeoutsIndex().Execute(store); var persister = new TimeoutPersister { DocumentStore = store, EndpointName = "MyTestEndpoint", }; const int numberOfTimeoutsToAdd = 10; for (var i = 0; i < numberOfTimeoutsToAdd; i++) { persister.Add(new TimeoutData { Time = DateTime.UtcNow.AddHours(-1), Destination = new Address("timeouts", RuntimeEnvironment.MachineName), SagaId = Guid.NewGuid(), State = new byte[] { 0, 0, 133 }, Headers = new Dictionary <string, string> { { "Bar", "34234" }, { "Foo", "aString1" }, { "Super", "aString2" } }, OwningTimeoutManager = "MyTestEndpoint", }); } WaitForIndexing(store); DateTime nextTimeToRunQuery; Assert.AreEqual(numberOfTimeoutsToAdd, persister.GetNextChunk(DateTime.UtcNow.AddYears(-3), out nextTimeToRunQuery).Count()); }
public void Should_return_the_next_time_of_retrieval() { new TimeoutsIndex().Execute(store); var persister = new TimeoutPersister { DocumentStore = store, EndpointName = "MyTestEndpoint", CleanupGapFromTimeslice = TimeSpan.FromSeconds(1), TriggerCleanupEvery = TimeSpan.MinValue, }; var nextTime = DateTime.UtcNow.AddHours(1); persister.Add(new TimeoutData { Time = nextTime, Destination = new Address("timeouts", RuntimeEnvironment.MachineName), SagaId = Guid.NewGuid(), State = new byte[] { 0, 0, 133 }, Headers = new Dictionary <string, string> { { "Bar", "34234" }, { "Foo", "aString1" }, { "Super", "aString2" } }, OwningTimeoutManager = "MyTestEndpoint", }); WaitForIndexing(store); DateTime nextTimeToRunQuery; persister.GetNextChunk(DateTime.UtcNow.AddYears(-3), out nextTimeToRunQuery); Assert.IsTrue((nextTime - nextTimeToRunQuery).TotalSeconds < 1); }
public void EnsureMappingDocumentIsUsed() { var context = new ContextBag(); using (var db = new ReusableDB()) { for (var i = 0; i < 5; i++) { using (var store = db.NewStore()) { ApplyTestConventions(store, ConventionType.RavenDefault); store.Initialize(); if (i > 0) { // On every iteration after the first, remove the index so that operations // will throw if the mapping document does not exist. store.DatabaseCommands.DeleteIndex("Raven/DocumentsByEntityName"); } var persister = new TimeoutPersister(store); persister.Add(new TimeoutData { Destination = EndpointName, Headers = new Dictionary<string, string>(), OwningTimeoutManager = EndpointName, SagaId = Guid.NewGuid(), Time = DateTime.UtcNow }, context).Wait(); } } } }
public void Should_return_the_next_time_of_retrieval() { new TimeoutsIndex().Execute(store); var persister = new TimeoutPersister { DocumentStore = store, EndpointName = "MyTestEndpoint", CleanupGapFromTimeslice = TimeSpan.FromSeconds(1), TriggerCleanupEvery = TimeSpan.MinValue, }; var nextTime = DateTime.UtcNow.AddHours(1); persister.Add(new TimeoutData { Time = nextTime, Destination = new Address("timeouts", RuntimeEnvironment.MachineName), SagaId = Guid.NewGuid(), State = new byte[] { 0, 0, 133 }, Headers = new Dictionary<string, string> { { "Bar", "34234" }, { "Foo", "aString1" }, { "Super", "aString2" } }, OwningTimeoutManager = "MyTestEndpoint", }); WaitForIndexing(store); DateTime nextTimeToRunQuery; persister.GetNextChunk(DateTime.UtcNow.AddYears(-3), out nextTimeToRunQuery); Assert.IsTrue((nextTime - nextTimeToRunQuery).TotalSeconds < 1); }
public void Should_remove_timeouts_by_sagaid() { new TimeoutsIndex().Execute(store); var persister = new TimeoutPersister { DocumentStore = store, EndpointName = "MyTestEndpoint", }; var sagaId1 = Guid.NewGuid(); var sagaId2 = Guid.NewGuid(); var t1 = new TimeoutData { SagaId = sagaId1, Time = DateTime.Now.AddYears(1), OwningTimeoutManager = "MyTestEndpoint", Headers = new Dictionary <string, string> { { "Header1", "Value1" } } }; var t2 = new TimeoutData { SagaId = sagaId2, Time = DateTime.Now.AddYears(1), OwningTimeoutManager = "MyTestEndpoint", Headers = new Dictionary <string, string> { { "Header1", "Value1" } } }; persister.Add(t1); persister.Add(t2); WaitForIndexing(store); persister.RemoveTimeoutBy(sagaId1); persister.RemoveTimeoutBy(sagaId2); using (var session = store.OpenSession()) { Assert.Null(session.Load <Timeout>(new Guid(t1.Id))); Assert.Null(session.Load <Timeout>(new Guid(t2.Id))); } }
public async Task Remove_WhenNoTimeoutRemoved_ShouldReturnFalse() { var persister = new TimeoutPersister(store); await persister.Add(new TimeoutData(), new ContextBag()); var result = await persister.TryRemove(Guid.NewGuid().ToString(), new ContextBag()); Assert.IsFalse(result); }
public async Task Remove_WhenTimeoutRemoved_ShouldReturnTrue() { var persister = new TimeoutPersister(store); var timeoutData = new TimeoutData(); await persister.Add(timeoutData, new ContextBag()); var result = await persister.TryRemove(timeoutData.Id, new ContextBag()); Assert.IsTrue(result); }
public async Task Add_WhenNoIdProvided_ShouldSetDbGeneratedTimeoutId() { var persister = new TimeoutPersister(store); var timeout = new TimeoutData { Id = null }; await persister.Add(timeout, new ContextBag()); Assert.IsNotNull(timeout.Id); var result = await persister.Peek(timeout.Id, new ContextBag()); Assert.IsNotNull(result); }
public async Task Add_WhenIdProvided_ShouldOverrideGivenId() { var persister = new TimeoutPersister(store); var timeoutId = Guid.NewGuid().ToString(); var timeout = new TimeoutData { Id = timeoutId }; await persister.Add(timeout, new ContextBag()); Assert.AreNotEqual(timeoutId, timeout.Id); var result = await persister.Peek(timeoutId, new ContextBag()); Assert.IsNull(result); }
public void Add_AddsTimeout() { var timeout = new TimeoutData { Destination = new Address("queue", "machine"), OwningTimeoutManager = "owner", Headers = null, Id = Guid.NewGuid().ToString(), SagaId = Guid.NewGuid(), State = null, Time = DateTime.UtcNow }; _persister.Add(timeout); var actual = _dbContext.Timeouts.FirstOrDefault(t => t.SagaId == timeout.SagaId); actual.Should().NotBeNull(); }
public async Task Add_ShouldStoreSchemaVersion() { var persister = new TimeoutPersister(store); var timeoutId = Guid.NewGuid().ToString(); var timeout = new CoreTimeoutData { Id = timeoutId }; await persister.Add(timeout, new ContextBag()); WaitForIndexing(); using (var session = store.OpenAsyncSession()) { var ravenDBTimeoutData = await session .Query <RavenDBTimeoutData>() .SingleOrDefaultAsync(); var metadata = session.Advanced.GetMetadataFor(ravenDBTimeoutData); Assert.AreEqual(RavenDBTimeoutData.SchemaVersion, metadata[SchemaVersionExtensions.TimeoutDataSchemaVersionMetadataKey]); } }
public void Never_ever() { var db = Guid.NewGuid().ToString(); using (var documentStore = new DocumentStore { Url = "http://localhost:8081", DefaultDatabase = db, }.Initialize()) { new TimeoutsIndex().Execute(documentStore); var persister = new TimeoutPersister { DocumentStore = documentStore, EndpointName = "foo", TriggerCleanupEvery = TimeSpan.FromHours(1), // Make sure cleanup doesn't run automatically }; var startSlice = DateTime.UtcNow.AddYears(-10); // avoid cleanup from running during the test by making it register as being run Assert.AreEqual(0, persister.GetCleanupChunk(startSlice).Count()); var expected = new List <Tuple <string, DateTime> >(); var lastTimeout = DateTime.UtcNow; var finishedAdding = false; new Thread(() => { var sagaId = Guid.NewGuid(); for (var i = 0; i < 10000; i++) { var td = new TimeoutData { SagaId = sagaId, Destination = new Address("queue", "machine"), Time = DateTime.UtcNow.AddSeconds(RandomProvider.GetThreadRandom().Next(1, 20)), OwningTimeoutManager = string.Empty, }; persister.Add(td); expected.Add(new Tuple <string, DateTime>(td.Id, td.Time)); lastTimeout = (td.Time > lastTimeout) ? td.Time : lastTimeout; } finishedAdding = true; Trace.WriteLine("*** Finished adding ***"); }).Start(); // Mimic the behavior of the TimeoutPersister coordinator var found = 0; TimeoutData tmptd; while (!finishedAdding || startSlice < lastTimeout) { DateTime nextRetrieval; var timeoutDatas = persister.GetNextChunk(startSlice, out nextRetrieval); foreach (var timeoutData in timeoutDatas) { if (startSlice < timeoutData.Item2) { startSlice = timeoutData.Item2; } Assert.IsTrue(persister.TryRemove(timeoutData.Item1, out tmptd)); found++; } } WaitForIndexing(documentStore); // If the persister reports stale results have been seen at one point during its normal operation, // we need to perform manual cleaup. while (true) { var chunkToCleanup = persister.GetCleanupChunk(DateTime.UtcNow.AddDays(1)).ToArray(); if (chunkToCleanup.Length == 0) { break; } found += chunkToCleanup.Length; foreach (var tuple in chunkToCleanup) { Assert.IsTrue(persister.TryRemove(tuple.Item1, out tmptd)); } WaitForIndexing(documentStore); } using (var session = documentStore.OpenSession()) { var results = session.Query <TimeoutData>().ToList(); Assert.AreEqual(0, results.Count); } Assert.AreEqual(expected.Count, found); } }
public async Task Never_ever() { var db = Guid.NewGuid().ToString(); using (var documentStore = new DocumentStore { Url = "http://*****:*****@machine", Time = DateTime.UtcNow.AddSeconds(RandomProvider.GetThreadRandom().Next(1, 20)), OwningTimeoutManager = string.Empty }; persister.Add(td, context).Wait(); expected.Add(new Tuple<string, DateTime>(td.Id, td.Time)); lastTimeout = (td.Time > lastTimeout) ? td.Time : lastTimeout; } finishedAdding = true; Trace.WriteLine("*** Finished adding ***"); }).Start(); // Mimic the behavior of the TimeoutPersister coordinator var found = 0; while (!finishedAdding || startSlice < lastTimeout) { var timeoutData = await query.GetNextChunk(startSlice); foreach (var timeout in timeoutData.DueTimeouts) { if (startSlice < timeout.DueTime) { startSlice = timeout.DueTime; } Assert.True(await persister.TryRemove(timeout.Id, context)); found++; } } WaitForIndexing(documentStore); // If the persister reports stale results have been seen at one point during its normal operation, // we need to perform manual cleaup. while (true) { var chunkToCleanup = (await query.GetCleanupChunk(DateTime.UtcNow.AddDays(1))).ToArray(); if (chunkToCleanup.Length == 0) { break; } found += chunkToCleanup.Length; foreach (var tuple in chunkToCleanup) { Assert.True(await persister.TryRemove(tuple.Id, context)); } WaitForIndexing(documentStore); } using (var session = documentStore.OpenAsyncSession()) { var results = await session.Query<TimeoutData>().ToListAsync(); Assert.AreEqual(0, results.Count); } Assert.AreEqual(expected.Count, found); } }
public async Task Should_not_skip_timeouts_also_with_multiple_clients_adding_timeouts() { var documentStore = store; var query = new QueryTimeouts(documentStore, "foo") { TriggerCleanupEvery = TimeSpan.FromDays(1) // Make sure cleanup doesn't run automatically }; var persister = new TimeoutPersister(documentStore); var context = new ContextBag(); var startSlice = DateTime.UtcNow.AddYears(-10); // avoid cleanup from running during the test by making it register as being run Assert.AreEqual(0, (await query.GetCleanupChunk(startSlice)).Count()); const int insertsPerThread = 1000; var expected = 0; var lastExpectedTimeout = DateTime.UtcNow; var finishedAdding1 = false; var finishedAdding2 = false; new Thread(() => { var sagaId = Guid.NewGuid(); for (var i = 0; i < insertsPerThread; i++) { var td = new TimeoutData { SagaId = sagaId, Destination = "queue@machine", Time = DateTime.UtcNow.AddSeconds(RandomProvider.GetThreadRandom().Next(1, 20)), OwningTimeoutManager = string.Empty }; persister.Add(td, context).Wait(); Interlocked.Increment(ref expected); lastExpectedTimeout = (td.Time > lastExpectedTimeout) ? td.Time : lastExpectedTimeout; } finishedAdding1 = true; Console.WriteLine("*** Finished adding ***"); }).Start(); new Thread(() => { var persister2 = new TimeoutPersister(store); var sagaId = Guid.NewGuid(); for (var i = 0; i < insertsPerThread; i++) { var td = new TimeoutData { SagaId = sagaId, Destination = "queue@machine", Time = DateTime.UtcNow.AddSeconds(RandomProvider.GetThreadRandom().Next(1, 20)), OwningTimeoutManager = string.Empty }; persister2.Add(td, context).Wait(); Interlocked.Increment(ref expected); lastExpectedTimeout = (td.Time > lastExpectedTimeout) ? td.Time : lastExpectedTimeout; } finishedAdding2 = true; Console.WriteLine("*** Finished adding via a second client connection ***"); }).Start(); // Mimic the behavior of the TimeoutPersister coordinator var found = 0; while (!finishedAdding1 || !finishedAdding2 || startSlice < lastExpectedTimeout) { var timeoutDatas = await query.GetNextChunk(startSlice); foreach (var timeoutData in timeoutDatas.DueTimeouts) { if (startSlice < timeoutData.DueTime) { startSlice = timeoutData.DueTime; } Assert.True(await persister.TryRemove(timeoutData.Id, context)); found++; } } // If the persister reports stale results have been seen at one point during its normal operation, // we need to perform manual cleaup. while (true) { var chunkToCleanup = (await query.GetCleanupChunk(DateTime.UtcNow.AddDays(1))).ToArray(); Console.WriteLine("Cleanup: got a chunk of size " + chunkToCleanup.Length); if (chunkToCleanup.Length == 0) { break; } found += chunkToCleanup.Length; foreach (var tuple in chunkToCleanup) { Assert.True(await persister.TryRemove(tuple.Id, context)); } } using (var session = documentStore.OpenSession()) { var results = session.Query <TimeoutDocument>().ToList(); Assert.AreEqual(0, results.Count); } Assert.AreEqual(expected, found); }
public async Task Never_ever() { var documentStore = store; var query = new QueryTimeouts(documentStore, "foo") { TriggerCleanupEvery = TimeSpan.FromHours(1) // Make sure cleanup doesn't run automatically }; var persister = new TimeoutPersister(documentStore); var context = new ContextBag(); var startSlice = DateTime.UtcNow.AddYears(-10); // avoid cleanup from running during the test by making it register as being run Assert.AreEqual(0, (await query.GetCleanupChunk(startSlice)).Count()); var expected = new List <Tuple <string, DateTime> >(); var lastTimeout = DateTime.UtcNow; var finishedAdding = false; new Thread(() => { var sagaId = Guid.NewGuid(); for (var i = 0; i < 10000; i++) { var td = new TimeoutData { SagaId = sagaId, Destination = "queue@machine", Time = DateTime.UtcNow.AddSeconds(RandomProvider.GetThreadRandom().Next(1, 20)), OwningTimeoutManager = string.Empty }; persister.Add(td, context).Wait(); expected.Add(new Tuple <string, DateTime>(td.Id, td.Time)); lastTimeout = (td.Time > lastTimeout) ? td.Time : lastTimeout; } finishedAdding = true; Trace.WriteLine("*** Finished adding ***"); }).Start(); // Mimic the behavior of the TimeoutPersister coordinator var found = 0; while (!finishedAdding || startSlice < lastTimeout) { var timeoutData = await query.GetNextChunk(startSlice); foreach (var timeout in timeoutData.DueTimeouts) { if (startSlice < timeout.DueTime) { startSlice = timeout.DueTime; } Assert.True(await persister.TryRemove(timeout.Id, context)); found++; } //Todo: Investigate! //Without this, sometime it never exited the while loop, even though everything was correctly removed if (!timeoutData.DueTimeouts.Any()) { startSlice = timeoutData.NextTimeToQuery; } } // If the persister reports stale results have been seen at one point during its normal operation, // we need to perform manual cleaup. while (true) { var chunkToCleanup = (await query.GetCleanupChunk(DateTime.UtcNow.AddDays(1))).ToArray(); if (chunkToCleanup.Length == 0) { break; } found += chunkToCleanup.Length; foreach (var tuple in chunkToCleanup) { Assert.True(await persister.TryRemove(tuple.Id, context)); } } using (var session = documentStore.OpenSession()) { var results = session.Query <TimeoutDocument>().ToList(); Assert.AreEqual(0, results.Count); } Assert.AreEqual(expected.Count, found); }
public async Task EnsureOldAndNewTimeoutsCanBeReceived(ConventionType seedType) { using (var db = new ReusableDB()) { using (var store = db.NewStore()) { ApplyPrefillConventions(store, seedType); store.Initialize(); await Prefill(store, seedType); } using (var store = db.NewStore()) { ApplyTestConventions(store, seedType); store.Initialize(); CreateTimeoutIndex(store); var persister = new TimeoutPersister(store); for (var i = 1; i <= 10; i++) { await persister.Add(new TimeoutData { Destination = EndpointName, Headers = new Dictionary<string, string>(), OwningTimeoutManager = EndpointName, SagaId = Guid.NewGuid(), Time = dueTimeout }, new ContextBag()); } db.WaitForIndexing(store); var query = new QueryTimeouts(store, EndpointName); var chunkTuples = (await query.GetNextChunk(DateTime.UtcNow.AddYears(-10))).DueTimeouts.ToArray(); Assert.AreEqual(20, chunkTuples.Length); foreach (var tuple in chunkTuples) { Console.WriteLine($"Received timeout {tuple.Id}"); Assert.AreEqual(dueTimeout, tuple.DueTime); } } } }