public override void SetUp() { base.SetUp(); _persister = new TimeoutPersister(store); _query = new QueryTimeouts(store, "MyTestEndpoint"); }
public override void SetUp() { base.SetUp(); new TimeoutsIndex().Execute(store); persister = new TimeoutPersister(store); query = new QueryTimeouts(store, "MyTestEndpoint"); }
public override void SetUp() { base.SetUp(); store.Listeners.RegisterListener(new FakeLegacyTimoutDataClrTypeConversionListener()); // for querying we don't need TimeoutDataV1toV2Converter new TimeoutsIndex().Execute(store); query = new QueryTimeouts(store, "MyTestEndpoint"); }
public async Task TestReceivingTimeouts(ConventionType seedType) { using (var db = new ReusableDB()) { string prefillIndex; using (var store = db.NewStore()) { ApplyPrefillConventions(store, seedType); store.Initialize(); await Prefill(store, seedType); prefillIndex = CreateTimeoutIndex(store); } // Need to ensure multiple runs will work, after conventions document is stored for (var i = 0; i < 3; i++) { using (var store = db.NewStore()) { Console.WriteLine($"Testing receives with DocumentStore initially configured for {seedType} conventions."); ApplyTestConventions(store, seedType); store.Initialize(); var index = CreateTimeoutIndex(store); db.WaitForIndexing(store); Assert.AreEqual(index, prefillIndex, "Index definitions must match or previous timeouts will not be found."); var query = new QueryTimeouts(store, EndpointName); var chunkTuples = (await query.GetNextChunk(DateTime.UtcNow.AddYears(-10))).DueTimeouts.ToArray(); Assert.AreEqual(10, chunkTuples.Length); foreach (var tuple in chunkTuples) { Console.WriteLine($"Received timeout {tuple.Id}"); Assert.AreEqual(dueTimeout, tuple.DueTime); } } } } }
public QueryCanceller(QueryTimeouts queryTimeouts) { this.queryTimeouts = queryTimeouts; }
public async Task EnsureOldAndNewTimeoutsCanBeReceived(ConventionType seedType) { using (var db = new ReusableDB()) { using (var store = db.NewStore()) { ApplyPrefillConventions(store, seedType); store.Initialize(); await Prefill(store, seedType); } using (var store = db.NewStore()) { ApplyTestConventions(store, seedType); store.Initialize(); CreateTimeoutIndex(store); var persister = new TimeoutPersister(store); for (var i = 1; i <= 10; i++) { await persister.Add(new TimeoutData { Destination = EndpointName, Headers = new Dictionary<string, string>(), OwningTimeoutManager = EndpointName, SagaId = Guid.NewGuid(), Time = dueTimeout }, new ContextBag()); } db.WaitForIndexing(store); var query = new QueryTimeouts(store, EndpointName); var chunkTuples = (await query.GetNextChunk(DateTime.UtcNow.AddYears(-10))).DueTimeouts.ToArray(); Assert.AreEqual(20, chunkTuples.Length); foreach (var tuple in chunkTuples) { Console.WriteLine($"Received timeout {tuple.Id}"); Assert.AreEqual(dueTimeout, tuple.DueTime); } } } }
public async Task Never_ever() { var documentStore = store; var query = new QueryTimeouts(documentStore, "foo") { TriggerCleanupEvery = TimeSpan.FromHours(1) // Make sure cleanup doesn't run automatically }; var persister = new TimeoutPersister(documentStore); var context = new ContextBag(); var startSlice = DateTime.UtcNow.AddYears(-10); // avoid cleanup from running during the test by making it register as being run Assert.AreEqual(0, (await query.GetCleanupChunk(startSlice)).Count()); var expected = new List <Tuple <string, DateTime> >(); var lastTimeout = DateTime.UtcNow; var finishedAdding = false; new Thread(() => { var sagaId = Guid.NewGuid(); for (var i = 0; i < 10000; i++) { var td = new TimeoutData { SagaId = sagaId, Destination = "queue@machine", Time = DateTime.UtcNow.AddSeconds(RandomProvider.GetThreadRandom().Next(1, 20)), OwningTimeoutManager = string.Empty }; persister.Add(td, context).Wait(); expected.Add(new Tuple <string, DateTime>(td.Id, td.Time)); lastTimeout = (td.Time > lastTimeout) ? td.Time : lastTimeout; } finishedAdding = true; Trace.WriteLine("*** Finished adding ***"); }).Start(); // Mimic the behavior of the TimeoutPersister coordinator var found = 0; while (!finishedAdding || startSlice < lastTimeout) { var timeoutData = await query.GetNextChunk(startSlice); foreach (var timeout in timeoutData.DueTimeouts) { if (startSlice < timeout.DueTime) { startSlice = timeout.DueTime; } Assert.True(await persister.TryRemove(timeout.Id, context)); found++; } //Todo: Investigate! //Without this, sometime it never exited the while loop, even though everything was correctly removed if (!timeoutData.DueTimeouts.Any()) { startSlice = timeoutData.NextTimeToQuery; } } // If the persister reports stale results have been seen at one point during its normal operation, // we need to perform manual cleaup. while (true) { var chunkToCleanup = (await query.GetCleanupChunk(DateTime.UtcNow.AddDays(1))).ToArray(); if (chunkToCleanup.Length == 0) { break; } found += chunkToCleanup.Length; foreach (var tuple in chunkToCleanup) { Assert.True(await persister.TryRemove(tuple.Id, context)); } } using (var session = documentStore.OpenSession()) { var results = session.Query <TimeoutDocument>().ToList(); Assert.AreEqual(0, results.Count); } Assert.AreEqual(expected.Count, found); }
public async Task Should_not_skip_timeouts_also_with_multiple_clients_adding_timeouts() { var documentStore = store; var query = new QueryTimeouts(documentStore, "foo") { TriggerCleanupEvery = TimeSpan.FromDays(1) // Make sure cleanup doesn't run automatically }; var persister = new TimeoutPersister(documentStore); var context = new ContextBag(); var startSlice = DateTime.UtcNow.AddYears(-10); // avoid cleanup from running during the test by making it register as being run Assert.AreEqual(0, (await query.GetCleanupChunk(startSlice)).Count()); const int insertsPerThread = 1000; var expected = 0; var lastExpectedTimeout = DateTime.UtcNow; var finishedAdding1 = false; var finishedAdding2 = false; new Thread(() => { var sagaId = Guid.NewGuid(); for (var i = 0; i < insertsPerThread; i++) { var td = new TimeoutData { SagaId = sagaId, Destination = "queue@machine", Time = DateTime.UtcNow.AddSeconds(RandomProvider.GetThreadRandom().Next(1, 20)), OwningTimeoutManager = string.Empty }; persister.Add(td, context).Wait(); Interlocked.Increment(ref expected); lastExpectedTimeout = (td.Time > lastExpectedTimeout) ? td.Time : lastExpectedTimeout; } finishedAdding1 = true; Console.WriteLine("*** Finished adding ***"); }).Start(); new Thread(() => { var persister2 = new TimeoutPersister(store); var sagaId = Guid.NewGuid(); for (var i = 0; i < insertsPerThread; i++) { var td = new TimeoutData { SagaId = sagaId, Destination = "queue@machine", Time = DateTime.UtcNow.AddSeconds(RandomProvider.GetThreadRandom().Next(1, 20)), OwningTimeoutManager = string.Empty }; persister2.Add(td, context).Wait(); Interlocked.Increment(ref expected); lastExpectedTimeout = (td.Time > lastExpectedTimeout) ? td.Time : lastExpectedTimeout; } finishedAdding2 = true; Console.WriteLine("*** Finished adding via a second client connection ***"); }).Start(); // Mimic the behavior of the TimeoutPersister coordinator var found = 0; while (!finishedAdding1 || !finishedAdding2 || startSlice < lastExpectedTimeout) { var timeoutDatas = await query.GetNextChunk(startSlice); foreach (var timeoutData in timeoutDatas.DueTimeouts) { if (startSlice < timeoutData.DueTime) { startSlice = timeoutData.DueTime; } Assert.True(await persister.TryRemove(timeoutData.Id, context)); found++; } } // If the persister reports stale results have been seen at one point during its normal operation, // we need to perform manual cleaup. while (true) { var chunkToCleanup = (await query.GetCleanupChunk(DateTime.UtcNow.AddDays(1))).ToArray(); Console.WriteLine("Cleanup: got a chunk of size " + chunkToCleanup.Length); if (chunkToCleanup.Length == 0) { break; } found += chunkToCleanup.Length; foreach (var tuple in chunkToCleanup) { Assert.True(await persister.TryRemove(tuple.Id, context)); } } using (var session = documentStore.OpenSession()) { var results = session.Query <TimeoutDocument>().ToList(); Assert.AreEqual(0, results.Count); } Assert.AreEqual(expected, found); }
public async Task Never_ever() { var db = Guid.NewGuid().ToString(); using (var documentStore = new DocumentStore { Url = "http://*****:*****@machine", Time = DateTime.UtcNow.AddSeconds(RandomProvider.GetThreadRandom().Next(1, 20)), OwningTimeoutManager = string.Empty }; persister.Add(td, context).Wait(); expected.Add(new Tuple<string, DateTime>(td.Id, td.Time)); lastTimeout = (td.Time > lastTimeout) ? td.Time : lastTimeout; } finishedAdding = true; Trace.WriteLine("*** Finished adding ***"); }).Start(); // Mimic the behavior of the TimeoutPersister coordinator var found = 0; while (!finishedAdding || startSlice < lastTimeout) { var timeoutData = await query.GetNextChunk(startSlice); foreach (var timeout in timeoutData.DueTimeouts) { if (startSlice < timeout.DueTime) { startSlice = timeout.DueTime; } Assert.True(await persister.TryRemove(timeout.Id, context)); found++; } } WaitForIndexing(documentStore); // If the persister reports stale results have been seen at one point during its normal operation, // we need to perform manual cleaup. while (true) { var chunkToCleanup = (await query.GetCleanupChunk(DateTime.UtcNow.AddDays(1))).ToArray(); if (chunkToCleanup.Length == 0) { break; } found += chunkToCleanup.Length; foreach (var tuple in chunkToCleanup) { Assert.True(await persister.TryRemove(tuple.Id, context)); } WaitForIndexing(documentStore); } using (var session = documentStore.OpenAsyncSession()) { var results = await session.Query<TimeoutData>().ToListAsync(); Assert.AreEqual(0, results.Count); } Assert.AreEqual(expected.Count, found); } }