public void AddCount_AfterSet_ThrowsException() { var ce = new AsyncCountdownEvent(1); ce.Signal(); AssertEx.ThrowsException <InvalidOperationException>(() => ce.AddCount()); }
public static async Task RateLimit(IEnumerable<Func<Task>> tasks, double rateLimit){ var s = System.Diagnostics.Stopwatch.StartNew(); var n = 0; var batch = new List<Task>(); var sem = new AsyncCountdownEvent(1); foreach (var taskFn in tasks){ Console.WriteLine("Starting " + n++); var time = s.Elapsed.TotalSeconds; var timeout = n / rateLimit; var delay = timeout - time; if(delay > 0){ Console.WriteLine("Pausing for " + delay + "seconds"); await Task.Delay(TimeSpan.FromSeconds(delay)); } sem.AddCount(1); Task.Run(async ()=>{ await taskFn(); sem.Signal(); }); } sem.Signal(); await sem.WaitAsync(); Console.WriteLine("Current count is " + sem.CurrentCount); }
public async Task Should_start_and_stop_client_multiple_times() { var address = Guid.NewGuid().ToString(); var asyncCountdownEvent = new AsyncCountdownEvent(2); var msgBag = new ConcurrentBag <Message>(); async Task MessageHandler(Message msg, IConsumer consumer, IServiceProvider provider, CancellationToken cancellationToken) { msgBag.Add(msg); asyncCountdownEvent.Signal(); await consumer.AcceptAsync(msg); } // STEP 1: Configure producers and consumers await using var testFixture = await TestFixture.CreateAsync(_testOutputHelper, addActiveMqHostedService : false, configureActiveMq : builder => { builder.AddProducer <TestProducer>(address, RoutingType.Multicast); builder.AddAnonymousProducer <TestAnonymousProducer>(); builder.AddConsumer(address, RoutingType.Multicast, MessageHandler); }); // STEP 2: Start the client var activeMqClient = testFixture.Services.GetRequiredService <IActiveMqClient>(); await activeMqClient.StartAsync(CancellationToken.None); // STEP 3: Send the first round of messages var testProducer = testFixture.Services.GetRequiredService <TestProducer>(); var testAnonymousProducer = testFixture.Services.GetRequiredService <TestAnonymousProducer>(); await testProducer.SendMessage("msg-1", CancellationToken.None); await testAnonymousProducer.SendMessage(address, RoutingType.Multicast, "msg-2", CancellationToken.None); await asyncCountdownEvent.WaitAsync(); Assert.Equal(2, msgBag.Count); // STEP 4: Stop the client await activeMqClient.StopAsync(CancellationToken.None); // STEP 5: Make sure that the client was stopped await Assert.ThrowsAnyAsync <InvalidOperationException>(() => testProducer.SendMessage("msg-3", CancellationToken.None)); await Assert.ThrowsAnyAsync <InvalidOperationException>(() => testAnonymousProducer.SendMessage(address, RoutingType.Multicast, "msg-4", CancellationToken.None)); // STEP 6: Restart the client await activeMqClient.StartAsync(CancellationToken.None); asyncCountdownEvent.AddCount(2); // STEP 7: Send the second round of messages await testProducer.SendMessage("msg-5", CancellationToken.None); await testAnonymousProducer.SendMessage(address, RoutingType.Multicast, "msg-6", CancellationToken.None); await asyncCountdownEvent.WaitAsync(); Assert.Equal(4, msgBag.Count); }
public async Task AddCount_AfterSet_CountsPositiveAndResetsTask() { var ce = new AsyncCountdownEvent(0); var originalTask = ce.WaitAsync(); ce.AddCount(); var newTask = ce.WaitAsync(); Assert.Equal(1, ce.CurrentCount); Assert.NotSame(originalTask, newTask); ce.Signal(); await newTask; }
public async Task AddCount_PastZero_PulsesTask() { var ce = new AsyncCountdownEvent(-1); var originalTask = ce.WaitAsync(); ce.AddCount(2); await originalTask; var newTask = ce.WaitAsync(); Assert.Equal(1, ce.CurrentCount); Assert.NotSame(originalTask, newTask); ce.Signal(); await newTask; }
public async Task Signal_AfterSet_CountsNegativeAndResetsTask() { AsyncCountdownEvent ce = new AsyncCountdownEvent(0); Task originalTask = ce.WaitAsync(); ce.Signal(); Task newTask = ce.WaitAsync(); Assert.Equal(-1, ce.CurrentCount); Assert.NotSame(originalTask, newTask); ce.AddCount(); await newTask; }
public async Task Signal_PastZero_PulsesTask() { AsyncCountdownEvent ce = new AsyncCountdownEvent(1); Task originalTask = ce.WaitAsync(); ce.Signal(2); await originalTask; Task newTask = ce.WaitAsync(); Assert.Equal(-1, ce.CurrentCount); Assert.NotSame(originalTask, newTask); ce.AddCount(); await newTask; }
public async Task AddCount_IncrementsCount() { var ce = new AsyncCountdownEvent(1); var task = ce.WaitAsync(); Assert.Equal(1, ce.CurrentCount); Assert.False(task.IsCompleted); ce.AddCount(); Assert.Equal(2, ce.CurrentCount); Assert.False(task.IsCompleted); ce.Signal(2); await task; }
public void AddCount_IncrementsCount() { var ce = new AsyncCountdownEvent(1); var task = ce.WaitAsync(); Assert.AreEqual(1, ce.CurrentCount); Assert.IsFalse(task.IsCompleted); ce.AddCount(); Assert.AreEqual(2, ce.CurrentCount); Assert.IsFalse(task.IsCompleted); ce.Signal(); Assert.AreEqual(1, ce.CurrentCount); Assert.IsFalse(task.IsCompleted); ce.Signal(); Assert.AreEqual(0, ce.CurrentCount); Assert.IsTrue(task.IsCompleted); }
private async Task <ServerResponse <TContent> > SendAsyncInternal <TContent>( HttpMethod method, string url, object content = null, bool needsEncryption = true, bool needsThreadSafe = true, CancellationToken cancellationToken = default(CancellationToken)) { if (needsEncryption) { await this.EnsureKeysInitializationAsync(); } if (needsThreadSafe) { await AsyncFactory.FromWaitHandle(ChangeKeySemaphore.AvailableWaitHandle, cancellationToken); } CountdownEvent.AddCount(); RequestProcessor requestProcessor = this.RequestProcessorBuilder.UseEncryption(needsEncryption).Build(this.ClientState); using (var request = await requestProcessor.CreateRequestAsync(method, this.Host + url, content)) { using (HttpResponseMessage response = await this.HttpClient.SendAsync(request, cancellationToken).ContinueWith(this.DecrementEvent)) { if (response.StatusCode == (HttpStatusCode)424) { await this.ChangeRoundKey(); return(await this.SendAsyncInternal <TContent>(method, url, content, needsEncryption, needsThreadSafe, cancellationToken)); } var result = await requestProcessor.ProcessResponseAsync <TContent>(response); return(result); } } }
public void AddCount_Overflow_ThrowsException() { var ce = new AsyncCountdownEvent(int.MaxValue); AssertEx.ThrowsException <InvalidOperationException>(() => ce.AddCount()); }
public async Task ProcessCrawlingQueueAsync(CrawlingQueue crawlingQueue) { _crawlingParameters.CancellationTokenSource.Token.Register(() => crawlingQueue.QueueCancellationTokenSource.Cancel() ); var tasksLock = new System.Threading.ReaderWriterLockSlim(); var tasks = new HashSet <Task>(); var queueItemsProcessingSemaphore = new SemaphoreSlim(crawlingQueue.CrawlingConfiguration.MaxSimmultaneousQueueItemsProcessed / 2, crawlingQueue.CrawlingConfiguration.MaxSimmultaneousQueueItemsProcessed); while (await queueItemsProcessingSemaphore.WaitAsync(crawlingQueue.CrawlingConfiguration.MaxTimeToProcessOneQueueItem)) { if (crawlingQueue.QueueCancellationTokenSource.IsCancellationRequested) { await Task.WhenAll(tasks.ToArray()); // TODO: Move remaining items from local queue to the distributed queue // TODO: Figure out how to filter out duplicates from the queue? Or should we? // We will probably have to resort to known urls-based duplicates check // Because otherwise we will drown in failing sql queries on multiplie machines Trace.TraceWarning("ProcessCrawlingQueueAsync: Queue cancellation requested. Preventing dequeing of new elements. Processing will be shut down after currently executing items are complete."); break; } var queueItem = await crawlingQueue.DequeueAsync(); if (queueItem == null) // Both Local and Proxy queues are depleted { // NOTE: If Queue is depleted, we must wait until all running tasks are executed, because they might add new items to queue await Task.WhenAll(tasks.ToArray()); // wait for all queue proxies to complete fetching items // TODO: consider locking (multithreading scenario) var queueProxiesPending = crawlingQueue.QueueProxies.Where(queueProxy => queueProxy.IsPending()).ToArray(); if (queueProxiesPending.Length > 0) { continue; } if (crawlingQueue.LocalQueue.Count > 0) { continue; } break; } if (!await _crawlingEventInterceptorManager.OnAfterDequeueAsync(queueItem)) { // If interceptor returns false, means it's an instruction to ignore this item; continue; } tasksLock.EnterWriteLock(); queueItem.ChangeStatus(CrawlingQueueItem.CrawlingStatuses.Downloading); tasks.Add(System.Threading.Tasks.TaskExtensions.Unwrap( CrawlAsync(queueItem.ResourceLink) .ContinueWith(async task => { tasksLock.EnterWriteLock(); tasks.Remove(task); // to avoid infinite bloating of the collection tasksLock.ExitWriteLock(); try { queueItem.ChangeStatus(CrawlingQueueItem.CrawlingStatuses.Downloaded); if (task.Status == TaskStatus.RanToCompletion) { var resourceContentUnits = task.Result; var httpResultUnit = resourceContentUnits.OfType <HttpResultUnit>().Single(); queueItem.ChangeStatus(CrawlingQueueItem.CrawlingStatuses.Processing); var resourceContentUnitsProcessingCountdown = new AsyncCountdownEvent(resourceContentUnits.Count); // Process resource content units extracted from Response foreach (var resourceContentUnit in resourceContentUnits) { switch (resourceContentUnit) { case ExtractedLinksUnit extractedLinksUnit: if (extractedLinksUnit.ExtractedLinks.Count > 0) { var linksProcessingCountdown = new AsyncCountdownEvent(extractedLinksUnit.ExtractedLinks.Count); foreach (var extractedLink in extractedLinksUnit.ExtractedLinks) { var crawlingQueueItem = new CrawlingQueueItem(extractedLink); // Do not enqueue item if prevented by any interceptor if (!await _crawlingEventInterceptorManager.OnBeforeEnqueueAsync(crawlingQueueItem)) { continue; } crawlingQueueItem.ProcessingCompleted += () => linksProcessingCountdown.AddCount(1) ; crawlingQueue.Enqueue(crawlingQueueItem); } // Wait while all links are processed before releasing the content units semaphore and set Status = Processed for parent linksProcessingCountdown.WaitAsync() .ContinueWith(linksProcessingTask => resourceContentUnitsProcessingCountdown.AddCount(1) ); } else { resourceContentUnitsProcessingCountdown.AddCount(1); } // Set Processed status when all extracted links are processed break; case ExtractedDataUnit extractedDataUnit: if (!await _crawlingEventInterceptorManager.OnDataDocumentDownloadedAsync( queueItem.ResourceLink, // May be a DocumentLink, or a FrameLink. Not quite intuitive and probably requires redesign. extractedDataUnit, httpResultUnit )) { // If any of interceptors failed to process the download result, // AND failed to store download result for later processing // we must re-enqueue the item, in order to ensure the results are not lost for good // We ignore the item and log the error. Chances are we couldn't process the item for a reason. And repeating would just make it stuck infinitely (re-downloading and re-processing) // (WAS) we must re-enqueue the item, in order to ensure the results are not lost for good //crawlingQueue.EnqueueAsync(queueItem); } resourceContentUnitsProcessingCountdown.Signal(); break; case DownloadedFilesUnit downloadedFileUnit: // If download file is a result of redirection, // we must either explicitly declare that we're expecting a file, or throw a processing exception var fileLink = queueItem.ResourceLink as FileLink; if (fileLink == null) { Trace.TraceError($"ProcessCrawlingQueueAsync: Downloaded file unit. Resource link is of type {queueItem.ResourceLink.GetType().Name}, expecting FileLink. Preventing processing."); break; } if (!await _crawlingEventInterceptorManager.OnFileDownloadedAsync( fileLink, downloadedFileUnit, httpResultUnit )) { // If any of interceptors failed to process the download result, // AND failed to store download result for later processing.... // We ignore the item and log the error. Chances are we couldn't process the item for a reason. And repeating would just make it stuck infinitely (re-downloading and re-processing) // (WAS) we must re-enqueue the item, in order to ensure the results are not lost for good //crawlingQueue.EnqueueAsync(queueItem); } resourceContentUnitsProcessingCountdown.Signal(); break; case HttpResultUnit httpResultUnitStub: // TODO: Determine what we should do if HTTP download failed. Either re-enqueue or ignore, or alert/do something else switch (httpResultUnitStub.HttpStatus) { //case HttpStatusCode.InternalServerError: // it's likely to repeat within the same run case HttpStatusCode.GatewayTimeout: case HttpStatusCode.RequestTimeout: queueItem.ChangeStatus(CrawlingQueueItem.CrawlingStatuses.NotLinked); crawlingQueue.Enqueue(queueItem); // Trying to recrawl item if it failed for some intermitent reason break; default: // We need to invoke ProcessingCompleted only after Data and Links extracted are really processed. //queueItem.ChangeStatus(CrawlingQueueItem.CrawlingStatuses.ProcessingCompleted); break; } resourceContentUnitsProcessingCountdown.Signal(); break; default: throw new NotSupportedException(); } } // Do not actually wait for related resources processing completion. // Those might be extracted links or files. No need to hold queue resources while linked units are downloaded // Set Processed status after all content units were registered and interceptors triggered await resourceContentUnitsProcessingCountdown.WaitAsync() .ContinueWith(resourceContentUnitsProcessingTask => queueItem.ChangeStatus(CrawlingQueueItem.CrawlingStatuses.Processed) ); } else { Trace.TraceError("CrawlAsync: Failed for queue item {0} with exception [{1}]", queueItem.ResourceLink, task.Exception); } } finally { queueItemsProcessingSemaphore.Release(); } }) ) ); tasksLock.ExitWriteLock(); } await Task.WhenAll(tasks.ToArray()); }
/// <summary> /// Registers a task with the ASP.NET runtime. /// </summary> /// <param name="task">The task to register.</param> private void Register(Task task) { count.AddCount(); task.ContinueWith(_ => count.Signal(), TaskContinuationOptions.ExecuteSynchronously); }
/// <summary> /// Consume the tasks in parallel but with a rate limit. The results /// are returned as an observable. /// </summary> /// <typeparam name="T"></typeparam> /// <param name="tasks"></param> /// <param name="rateLimit"></param> /// <returns></returns> public static IObservable <T> RateLimit <T>(IEnumerable <Func <Task <T> > > tasks, double rateLimit) { var s = System.Diagnostics.Stopwatch.StartNew(); var n = 0; var sem = new AsyncCountdownEvent(1); var errors = new ConcurrentBag <Exception>(); return(Observable.Create <T> (observer => { var ctx = new CancellationTokenSource(); Task.Run (async() => { foreach (var taskFn in tasks) { n++; ctx.Token.ThrowIfCancellationRequested(); var elapsedTotalSeconds = s.Elapsed.TotalSeconds; var delay = Delay(rateLimit, n, elapsedTotalSeconds); if (delay > 0) { await Task.Delay(TimeSpan.FromSeconds(delay), ctx.Token); } sem.AddCount(1); Task.Run (async() => { try { observer.OnNext(await taskFn()); } catch (Exception e) { errors.Add(e); } finally { sem.Signal(); } } , ctx.Token); } sem.Signal(); await sem.WaitAsync(ctx.Token); if (errors.Count > 0) { observer.OnError(new AggregateException(errors)); } else { observer.OnCompleted(); } } , ctx.Token); return Disposable.Create(() => ctx.Cancel()); })); }
public void AddCount_Overflow_ThrowsException() { var ce = new AsyncCountdownEvent(long.MaxValue); AsyncAssert.Throws <OverflowException>(() => ce.AddCount()); }
public async Task CanPersistAndNotLoseMessages() { Log.MinimumLevel = LogLevel.Trace; var messageBus1 = new RabbitMQMessageBus(o => o .ConnectionString("amqp://localhost:5673") .LoggerFactory(Log) .SubscriptionQueueName($"{_topic}-offline") .IsSubscriptionQueueExclusive(false) .SubscriptionQueueAutoDelete(false) .AcknowledgementStrategy(AcknowledgementStrategy.Automatic)); var countdownEvent = new AsyncCountdownEvent(1); var cts = new CancellationTokenSource(); await messageBus1.SubscribeAsync <SimpleMessageA>(msg => { _logger.LogInformation("[Subscriber1] Got message: {Message}", msg.Data); countdownEvent.Signal(); }, cts.Token); await messageBus1.PublishAsync(new SimpleMessageA { Data = "Audit message 1" }); await countdownEvent.WaitAsync(TimeSpan.FromSeconds(5)); Assert.Equal(0, countdownEvent.CurrentCount); cts.Cancel(); await messageBus1.PublishAsync(new SimpleMessageA { Data = "Audit message 2" }); cts = new CancellationTokenSource(); countdownEvent.AddCount(1); await messageBus1.SubscribeAsync <SimpleMessageA>(msg => { _logger.LogInformation("[Subscriber2] Got message: {Message}", msg.Data); countdownEvent.Signal(); }, cts.Token); await countdownEvent.WaitAsync(TimeSpan.FromSeconds(5)); Assert.Equal(0, countdownEvent.CurrentCount); cts.Cancel(); await messageBus1.PublishAsync(new SimpleMessageA { Data = "Audit offline message 1" }); await messageBus1.PublishAsync(new SimpleMessageA { Data = "Audit offline message 2" }); await messageBus1.PublishAsync(new SimpleMessageA { Data = "Audit offline message 3" }); messageBus1.Dispose(); var messageBus2 = new RabbitMQMessageBus(o => o .ConnectionString("amqp://localhost:5673") .LoggerFactory(Log) .SubscriptionQueueName($"{_topic}-offline") .IsSubscriptionQueueExclusive(false) .SubscriptionQueueAutoDelete(false) .AcknowledgementStrategy(AcknowledgementStrategy.Automatic)); cts = new CancellationTokenSource(); countdownEvent.AddCount(4); await messageBus2.SubscribeAsync <SimpleMessageA>(msg => { _logger.LogInformation("[Subscriber3] Got message: {Message}", msg.Data); countdownEvent.Signal(); }, cts.Token); await messageBus2.PublishAsync(new SimpleMessageA { Data = "Another audit message 4" }); await countdownEvent.WaitAsync(TimeSpan.FromSeconds(5)); Assert.Equal(0, countdownEvent.CurrentCount); messageBus2.Dispose(); }