public void ItCompletesTheTaskWhenSetIsCalled() { var asyncMRE = new AsyncManualResetEvent(); var task = asyncMRE.WaitAsync(); asyncMRE.Set(); task.Wait(TimeSpan.FromSeconds(1)); Assert.IsTrue(task.IsCompleted); }
public void ItReturnsNewUncompletedTaskIfResetIsCalled() { var asyncMRE = new AsyncManualResetEvent(); var task = asyncMRE.WaitAsync(); asyncMRE.Set(); asyncMRE.Reset(); var newTask = asyncMRE.WaitAsync(); Assert.IsTrue(newTask != task && !newTask.IsCompleted); }
public void ItReturnsTaskWithNoInnerDependencies() { var asyncMRE = new AsyncManualResetEvent(); asyncMRE.Set(); asyncMRE.WaitAsync().ContinueWith(_ => { asyncMRE.Reset(); }).Wait(); var task = asyncMRE.WaitAsync(); Assert.IsFalse(task.IsCompleted); }
public async Task When_the_distributor_is_started_then_notifications_begin() { var mre = new AsyncManualResetEvent(); var distributor = CreateDistributor(async lease => { mre.Set(); }); await distributor.Start(); await mre.WaitAsync().Timeout(); await distributor.Stop(); // no TimeoutException, success! }
public async Task No_further_acquisitions_occur_after_Dispose_is_called() { var received = 0; var mre = new AsyncManualResetEvent(); var distributor = CreateDistributor(async lease => { Interlocked.Increment(ref received); mre.Set(); }); await distributor.Start(); await mre.WaitAsync().Timeout(); await distributor.Stop(); var receivedAsOfStop = received; await Task.Delay(((int) DefaultLeaseDuration.TotalMilliseconds*3)); received.Should().Be(receivedAsOfStop); }
public async Task when_subscribing_observable_then_can_async_wait_on_value() { var e = new object(); var initializedEvent = new AsyncManualResetEvent(); var observable = Observable.Create<object>(async o => { await initializedEvent.WaitAsync(); o.OnNext(e); o.OnCompleted(); return Disposable.Empty; }); object obj1 = null; bool completed1 = false; var s1 = observable.Subscribe(o => obj1 = o, () => completed1 = true); Assert.False(completed1); await initializedEvent.SetAsync(); SpinWait.SpinUntil(() => completed1, 5000); Assert.True(completed1); Assert.Same(e, obj1); object obj2 = null; bool completed2 = false; var s2 = observable.Subscribe(o => obj2 = o, () => completed2 = true); Assert.True(completed2); Assert.Same(e, obj2); }
public BlockPuller(IChainState chainState, NodeSettings nodeSettings, IDateTimeProvider dateTimeProvider, INodeStats nodeStats, ILoggerFactory loggerFactory) { this.reassignedJobsQueue = new Queue <DownloadJob>(); this.downloadJobsQueue = new Queue <DownloadJob>(); this.assignedDownloadsByHash = new Dictionary <uint256, AssignedDownload>(); this.assignedDownloadsSorted = new LinkedList <AssignedDownload>(); this.assignedHeadersByPeerId = new Dictionary <int, List <ChainedHeader> >(); this.averageBlockSizeBytes = new AverageCalculator(AverageBlockSizeSamplesCount); this.pullerBehaviorsByPeerId = new Dictionary <int, IBlockPullerBehavior>(); this.processQueuesSignal = new AsyncManualResetEvent(false); this.queueLock = new object(); this.peerLock = new object(); this.assignedLock = new object(); this.nextJobId = 0; this.networkPeerRequirement = new NetworkPeerRequirement { MinVersion = nodeSettings.MinProtocolVersion ?? nodeSettings.ProtocolVersion, RequiredServices = NetworkPeerServices.Network }; this.cancellationSource = new CancellationTokenSource(); this.random = new Random(); this.maxBlocksBeingDownloaded = MinimalCountOfBlocksBeingDownloaded; this.chainState = chainState; this.dateTimeProvider = dateTimeProvider; this.logger = loggerFactory.CreateLogger(this.GetType().FullName); nodeStats.RegisterStats(this.AddComponentStats, StatsType.Component); }
public static Task GetTask(this RemoteTask remoteTask) { AsyncManualResetEvent taskCompletedEvent = new AsyncManualResetEvent(); void Signal(Exception ex) { if (ex != null) { LogTo.Warning(ex, "Exception caught"); throw ex; } taskCompletedEvent.Set(); } async Task WaitForSignal() { await taskCompletedEvent.WaitAsync(); } remoteTask.SetCallback(new ActionProxy <Exception>(Signal)); return(Task.Run(WaitForSignal)); }
public static Task WaitAsync(this AsyncManualResetEvent resetEvent, TimeSpan timeout) { return(resetEvent.WaitAsync(timeout.ToCancellationToken())); }
public AsyncManualResetEventTests(ITestOutputHelper logger) : base(logger) { this.evt = new AsyncManualResetEvent(); }
internal YieldAndNotifyAwaiter(INotifyCompletion baseAwaiter, AsyncManualResetEvent yieldingSignal, AsyncManualResetEvent resumingSignal) { Requires.NotNull(baseAwaiter, nameof(baseAwaiter)); this.baseAwaiter = baseAwaiter; this.yieldingSignal = yieldingSignal; this.resumingSignal = resumingSignal; }
public async Task WillLog() { using (var client = new ClientWebSocket()) using (var store = GetDocumentStore()) { CreateRdbmsSchema(store); using (var session = store.OpenAsyncSession()) { await session.StoreAsync(new Order()); await session.SaveChangesAsync(); } var str = string.Format("{0}/admin/logs/watch", store.Urls.First().Replace("http", "ws")); var sb = new StringBuilder(); var mre = new AsyncManualResetEvent(); await client.ConnectAsync(new Uri(str), CancellationToken.None); var task = Task.Run(async() => { ArraySegment <byte> buffer = new ArraySegment <byte>(new byte[1024]); while (client.State == WebSocketState.Open) { var value = await ReadFromWebSocket(buffer, client); lock (sb) { mre.Set(); sb.AppendLine(value); } const string expectedValue = "skipping document: orders/"; if (value.Contains(expectedValue) || sb.ToString().Contains(expectedValue)) { return; } } }); await mre.WaitAsync(TimeSpan.FromSeconds(60)); SetupSqlEtl(store, @"output ('Tralala'); undefined(); var nameArr = this.StepName.split('.'); loadToOrders({});"); using (var session = store.OpenAsyncSession()) { for (var i = 0; i < 100; i++) { await session.StoreAsync(new Order()); } await session.SaveChangesAsync(); } var condition = await task.WaitWithTimeout(TimeSpan.FromSeconds(60)); if (condition == false) { var msg = "Could not process SQL Replication script for OrdersAndLines, skipping document: orders/"; var tempFileName = Path.GetTempFileName(); lock (sb) { File.WriteAllText(tempFileName, sb.ToString()); } throw new InvalidOperationException($"{msg}. Full log is: \r\n{tempFileName}"); } } }
public async Task WatcherIntegrationTest() { var kubernetes = CreateClient(); var job = await kubernetes.CreateNamespacedJobAsync( new V1Job() { ApiVersion = "batch/v1", Kind = V1Job.KubeKind, Metadata = new V1ObjectMeta() { Name = nameof(WatcherIntegrationTest).ToLowerInvariant() }, Spec = new V1JobSpec() { Template = new V1PodTemplateSpec() { Spec = new V1PodSpec() { Containers = new List <V1Container>() { new V1Container() { Image = "ubuntu", Name = "runner", Command = new List <string>() { "/bin/bash", "-c", "--" }, Args = new List <string>() { "trap : TERM INT; sleep infinity & wait", }, }, }, RestartPolicy = "Never", }, }, }, }, "default").ConfigureAwait(false); var events = new Collection <Tuple <WatchEventType, V1Job> >(); var started = new AsyncManualResetEvent(); var connectionClosed = new AsyncManualResetEvent(); var watcher = await kubernetes.WatchNamespacedJobAsync( job.Metadata.Name, job.Metadata.NamespaceProperty, resourceVersion : job.Metadata.ResourceVersion, timeoutSeconds : 30, onEvent : (type, source) => { Debug.WriteLine($"Watcher 1: {type}, {source}"); events.Add(new Tuple <WatchEventType, V1Job>(type, source)); job = source; started.Set(); }, onClosed : connectionClosed.Set).ConfigureAwait(false); await started.WaitAsync().ConfigureAwait(false); await Task.WhenAny(connectionClosed.WaitAsync(), Task.Delay(TimeSpan.FromMinutes(3))).ConfigureAwait(false); Assert.True(connectionClosed.IsSet); await kubernetes.DeleteNamespacedJobAsync( job.Metadata.Name, job.Metadata.NamespaceProperty, new V1DeleteOptions() { PropagationPolicy = "Foreground" }).ConfigureAwait(false); }
public async Task PlainRevisionsSubscriptionsCompareDocs() { using (var store = GetDocumentStore()) { var subscriptionId = await store.Subscriptions.CreateAsync <Revision <User> >(); using (var context = JsonOperationContext.ShortTermSingleUse()) { var configuration = new RevisionsConfiguration { Default = new RevisionsCollectionConfiguration { Active = true, MinimumRevisionsToKeep = 5, }, Collections = new Dictionary <string, RevisionsCollectionConfiguration> { ["Users"] = new RevisionsCollectionConfiguration { Active = true }, ["Dons"] = new RevisionsCollectionConfiguration { Active = true, } } }; await Server.ServerStore.ModifyDatabaseRevisions(context, store.Database, EntityToBlittable.ConvertEntityToBlittable(configuration, new DocumentConventions(), context)); } for (var j = 0; j < 10; j++) { using (var session = store.OpenSession()) { session.Store(new User { Name = $"users1 ver {j}", Age = j }, "users/1"); session.Store(new Company() { Name = $"dons1 ver {j}" }, "dons/1"); session.SaveChanges(); } } using (var sub = store.Subscriptions.Open <Revision <User> >(new SubscriptionConnectionOptions(subscriptionId))) { var mre = new AsyncManualResetEvent(); var names = new HashSet <string>(); var maxAge = -1; GC.KeepAlive(sub.Run(a => { foreach (var item in a.Items) { var x = item.Result; if (x.Current.Age > maxAge && x.Current.Age > (x.Previous?.Age ?? -1)) { names.Add(x.Current?.Name + x.Previous?.Name); maxAge = x.Current.Age; } names.Add(x.Current?.Name + x.Previous?.Name); if (names.Count == 10) { mre.Set(); } } })); Assert.True(await mre.WaitAsync(_reasonableWaitTime)); } } }
private async Task <SubscriptionWorker <User> > CreateAndInitiateSubscription(IDocumentStore store, string defaultDatabase, List <User> usersCount, AsyncManualResetEvent reachedMaxDocCountMre, int batchSize, string mentor = null) { var proggress = new SubscriptionProggress() { MaxId = 0 }; var subscriptionName = await store.Subscriptions.CreateAsync <User>(options : new SubscriptionCreationOptions { MentorNode = mentor }).ConfigureAwait(false); var subscription = store.Subscriptions.GetSubscriptionWorker <User>(new SubscriptionWorkerOptions(subscriptionName) { TimeToWaitBeforeConnectionRetry = TimeSpan.FromMilliseconds(500), MaxDocsPerBatch = batchSize }); var subscripitonState = await store.Subscriptions.GetSubscriptionStateAsync(subscriptionName, store.Database); var getDatabaseTopologyCommand = new GetDatabaseRecordOperation(defaultDatabase); var record = await store.Maintenance.Server.SendAsync(getDatabaseTopologyCommand).ConfigureAwait(false); foreach (var server in Servers.Where(s => record.Topology.RelevantFor(s.ServerStore.NodeTag))) { await server.ServerStore.Cluster.WaitForIndexNotification(subscripitonState.SubscriptionId).ConfigureAwait(false); } if (mentor != null) { Assert.Equal(mentor, record.Topology.WhoseTaskIsIt(subscripitonState, RachisState.Follower)); } var task = subscription.Run(a => { foreach (var item in a.Items) { var x = item.Result; int curId = 0; var afterSlash = x.Id.Substring(x.Id.LastIndexOf("/", StringComparison.OrdinalIgnoreCase) + 1); curId = int.Parse(afterSlash.Substring(0, afterSlash.Length - 2)); Assert.True(curId >= proggress.MaxId);// todo: change back to '>' usersCount.Add(x); proggress.MaxId = curId; } }); subscription.AfterAcknowledgment += b => { try { if (usersCount.Count == 10) { reachedMaxDocCountMre.Set(); } } catch (Exception) { } return(Task.CompletedTask); }; await Task.WhenAny(task, Task.Delay(_reasonableWaitTime)).ConfigureAwait(false); return(subscription); }
public async Task FarmGroupAddCompletesSuccessfully(TransportType transportType) { // https://github.com/SignalR/SignalR/issues/3337 // Each node shares the same bus but are independent servers const int nodeCount = 2; var counters = new PerformanceCounterManager(); var configurationManager = new DefaultConfigurationManager(); // Ensure /send and /connect requests get handled by different servers Func <string, int> scheduler = url => url.Contains("/send") ? 0 : 1; using (EnableTracing()) using (var bus = new MessageBus(new StringMinifier(), new TraceManager(), counters, configurationManager, 5000)) using (var loadBalancer = new LoadBalancer(nodeCount, scheduler)) { loadBalancer.Configure(app => { var resolver = new DefaultDependencyResolver(); resolver.Register(typeof(IMessageBus), () => bus); app.MapSignalR(new HubConfiguration { Resolver = resolver }); }); using (var connection = new HubConnection("http://goo/")) { var proxy = connection.CreateHubProxy("FarmGroupHub"); const string group = "group"; const string message = "message"; var mre = new AsyncManualResetEvent(); proxy.On <string>("message", m => { if (m == message) { mre.Set(); } }); Client.Transports.IClientTransport transport; switch (transportType) { case TransportType.LongPolling: transport = new Client.Transports.LongPollingTransport(loadBalancer); break; case TransportType.ServerSentEvents: transport = new Client.Transports.ServerSentEventsTransport(loadBalancer); break; default: throw new ArgumentException("transportType"); } await connection.Start(transport); await proxy.Invoke("JoinGroup", group); await proxy.Invoke("SendToGroup", group, message); Assert.True(await mre.WaitAsync(TimeSpan.FromSeconds(5))); } } }
public async Task SubscripitonDeletionFromCluster() { const int nodesAmount = 5; var leader = await this.CreateRaftClusterAndGetLeader(nodesAmount); var defaultDatabase = "ContinueFromThePointIStopped"; await CreateDatabaseInCluster(defaultDatabase, nodesAmount, leader.WebUrl).ConfigureAwait(false); using (var store = new DocumentStore { Urls = new[] { leader.WebUrl }, Database = defaultDatabase }.Initialize()) { var usersCount = new List <User>(); var reachedMaxDocCountMre = new AsyncManualResetEvent(); var subscriptionId = await store.Subscriptions.CreateAsync <User>(); using (var session = store.OpenAsyncSession()) { await session.StoreAsync(new User { Name = "Peter" }); await session.SaveChangesAsync(); } var subscription = store.Subscriptions.GetSubscriptionWorker <User>(new SubscriptionWorkerOptions(subscriptionId)); subscription.AfterAcknowledgment += b => { reachedMaxDocCountMre.Set(); return(Task.CompletedTask); }; GC.KeepAlive(subscription.Run(x => { })); await reachedMaxDocCountMre.WaitAsync(); foreach (var ravenServer in Servers) { using (ravenServer.ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { Assert.NotNull(ravenServer.ServerStore.Cluster.Read(context, SubscriptionState.GenerateSubscriptionItemKeyName(defaultDatabase, subscriptionId.ToString()))); } } await subscription.DisposeAsync(); var deleteResult = store.Maintenance.Server.Send(new DeleteDatabasesOperation(defaultDatabase, hardDelete: true)); foreach (var ravenServer in Servers) { await ravenServer.ServerStore.WaitForCommitIndexChange(RachisConsensus.CommitIndexModification.GreaterOrEqual, deleteResult.RaftCommandIndex + nodesAmount).WaitWithTimeout(TimeSpan.FromSeconds(60)); } Thread.Sleep(2000); foreach (var ravenServer in Servers) { using (ravenServer.ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) using (context.OpenReadTransaction()) { Assert.Null(ravenServer.ServerStore.Cluster.Read(context, SubscriptionState.GenerateSubscriptionItemKeyName(defaultDatabase, subscriptionId.ToString()))); } } } }
public async Task SubscriptionWaitStrategy() { using (var store = GetDocumentStore()) { await CreateDocuments(store, 1); var lastChangeVector = (await store.Maintenance.SendAsync(new GetStatisticsOperation())).DatabaseChangeVector; var subscriptionCreationParams = new SubscriptionCreationOptions() { Query = "from Things", ChangeVector = lastChangeVector }; await CreateDocuments(store, 5); var subsId = await store.Subscriptions.CreateAsync(subscriptionCreationParams); using ( var acceptedSubscription = store.Subscriptions.GetSubscriptionWorker <Thing>(new SubscriptionWorkerOptions(subsId))) { var acceptedSusbscriptionList = new BlockingCollection <Thing>(); var waitingSubscriptionList = new BlockingCollection <Thing>(); var ackSentAmre = new AsyncManualResetEvent(); acceptedSubscription.AfterAcknowledgment += b => { ackSentAmre.Set(); return(Task.CompletedTask); }; GC.KeepAlive(acceptedSubscription.Run(x => { foreach (var item in x.Items) { acceptedSusbscriptionList.Add(item.Result); } Thread.Sleep(20); })); // wait until we know that connection was established Thing thing; // wait until we know that connection was established for (var i = 0; i < 5; i++) { Assert.True(acceptedSusbscriptionList.TryTake(out thing, 50000)); } Assert.False(acceptedSusbscriptionList.TryTake(out thing, 50)); // open second subscription using ( var waitingSubscription = store.Subscriptions.GetSubscriptionWorker <Thing>(new SubscriptionWorkerOptions(subsId) { Strategy = SubscriptionOpeningStrategy.WaitForFree, TimeToWaitBeforeConnectionRetry = TimeSpan.FromMilliseconds(250) })) { GC.KeepAlive(waitingSubscription.Run(x => { foreach (var item in x.Items) { waitingSubscriptionList.Add(item.Result); } })); Assert.True(await ackSentAmre.WaitAsync(TimeSpan.FromSeconds(50))); acceptedSubscription.Dispose(); await CreateDocuments(store, 5); // wait until we know that connection was established for (var i = 0; i < 5; i++) { Assert.True(waitingSubscriptionList.TryTake(out thing, 3000)); } Assert.False(waitingSubscriptionList.TryTake(out thing, 50)); } } } }
public void ItReturnsTaskWhenWaitAsyncIsCalled() { var asyncMRE = new AsyncManualResetEvent(); Assert.IsInstanceOfType(asyncMRE.WaitAsync(), typeof(Task)); }
public virtual async Task When_Extend_is_called_after_a_lease_has_expired_then_it_throws() { Exception exception = null; var distributor = CreateDistributor().Trace(); var mre = new AsyncManualResetEvent(); distributor.OnReceive(async lease => { // wait too long, until another receiver gets the lease await Task.Delay((int) (DefaultLeaseDuration.TotalMilliseconds*1.5)); // now try to extend the lease try { await lease.Extend(TimeSpan.FromMilliseconds(1)); } catch (Exception ex) { Console.WriteLine("CAUGHT " + ex); exception = ex; } mre.Set(); }); distributor.Distribute(1); await mre.WaitAsync().Timeout(); await Task.Delay(1000); exception.Should().BeOfType<InvalidOperationException>(); exception.Message.Should().Contain("lease cannot be extended"); }
public async Task When_a_lease_expires_because_the_recipient_took_too_long_then_it_is_leased_out_again() { var blocked = false; var receiveCount = 0; var mre = new AsyncManualResetEvent(); var distributor = CreateDistributor( leasableResources: DefaultLeasableResources.Take(1).ToArray()) .Trace(); distributor.OnReceive(async lease => { if (!blocked) { blocked = true; await Task.Delay((int) (DefaultLeaseDuration.TotalMilliseconds*3)); } Interlocked.Increment(ref receiveCount); mre.Set(); }); await distributor.Start(); await mre.WaitAsync().Timeout(); await distributor.Stop(); receiveCount.Should().Be(1); }
public async Task SubscriptionSimpleTakeOverStrategy() { using (var store = GetDocumentStore()) { await CreateDocuments(store, 1); var lastChangeVector = (await store.Maintenance.SendAsync(new GetStatisticsOperation())).DatabaseChangeVector ?? null; await CreateDocuments(store, 5); var subscriptionCreationParams = new SubscriptionCreationOptions() { Query = "from Things", ChangeVector = lastChangeVector }; var subsId = await store.Subscriptions.CreateAsync(subscriptionCreationParams); using ( var acceptedSubscription = store.Subscriptions.GetSubscriptionWorker <Thing>(new SubscriptionWorkerOptions(subsId))) { var acceptedSusbscriptionList = new BlockingCollection <Thing>(); var takingOverSubscriptionList = new BlockingCollection <Thing>(); long counter = 0; var batchProccessedByFirstSubscription = new AsyncManualResetEvent(); acceptedSubscription.AfterAcknowledgment += b => { if (Interlocked.Read(ref counter) == 5) { batchProccessedByFirstSubscription.Set(); } return(Task.CompletedTask); }; var firstRun = acceptedSubscription.Run(x => { foreach (var item in x.Items) { Interlocked.Increment(ref counter); acceptedSusbscriptionList.Add(item.Result); } }); Thing thing; // wait until we know that connection was established for (var i = 0; i < 5; i++) { Assert.True(acceptedSusbscriptionList.TryTake(out thing, 5000), "no doc"); } Assert.True(await batchProccessedByFirstSubscription.WaitAsync(TimeSpan.FromSeconds(15)), "no ack"); Assert.False(acceptedSusbscriptionList.TryTake(out thing)); // open second subscription using (var takingOverSubscription = store.Subscriptions.GetSubscriptionWorker <Thing>( new SubscriptionWorkerOptions(subsId) { Strategy = SubscriptionOpeningStrategy.TakeOver })) { GC.KeepAlive(takingOverSubscription.Run(x => { foreach (var item in x.Items) { takingOverSubscriptionList.Add(item.Result); } })); Assert.ThrowsAsync <SubscriptionInUseException>(() => firstRun).Wait(); await CreateDocuments(store, 5); // wait until we know that connection was established for (var i = 0; i < 5; i++) { Assert.True(takingOverSubscriptionList.TryTake(out thing, 5000), "no doc takeover"); } Assert.False(takingOverSubscriptionList.TryTake(out thing)); } } } }
public async Task WatchAllEvents() { AsyncCountdownEvent eventsReceived = new AsyncCountdownEvent(4 /* first line of response is eaten by WatcherDelegatingHandler */); AsyncManualResetEvent serverShutdown = new AsyncManualResetEvent(); var waitForClosed = new AsyncManualResetEvent(false); using (var server = new MockKubeApiServer(testOutput, async httpContext => { await WriteStreamLine(httpContext, MockAddedEventStreamLine); await WriteStreamLine(httpContext, MockDeletedStreamLine); await WriteStreamLine(httpContext, MockModifiedStreamLine); await WriteStreamLine(httpContext, MockErrorStreamLine); // make server alive, cannot set to int.max as of it would block response await serverShutdown.WaitAsync(); return(false); })) { var client = new Kubernetes(new KubernetesClientConfiguration { Host = server.Uri.ToString() }); var listTask = await client.ListNamespacedPodWithHttpMessagesAsync("default", watch : true); var events = new HashSet <WatchEventType>(); var errors = 0; var watcher = listTask.Watch <V1Pod, V1PodList>( (type, item) => { testOutput.WriteLine($"Watcher received '{type}' event."); events.Add(type); eventsReceived.Signal(); }, error => { testOutput.WriteLine($"Watcher received '{error.GetType().FullName}' error."); errors += 1; eventsReceived.Signal(); }, onClosed: waitForClosed.Set ); // wait server yields all events await Task.WhenAny(eventsReceived.WaitAsync(), Task.Delay(TestTimeout)); Assert.True( eventsReceived.CurrentCount == 0, "Timed out waiting for all events / errors to be received." ); Assert.Contains(WatchEventType.Added, events); Assert.Contains(WatchEventType.Deleted, events); Assert.Contains(WatchEventType.Modified, events); Assert.Contains(WatchEventType.Error, events); Assert.Equal(0, errors); Assert.True(watcher.Watching); serverShutdown.Set(); await Task.WhenAny(waitForClosed.WaitAsync(), Task.Delay(TestTimeout)); Assert.True(waitForClosed.IsSet); Assert.False(watcher.Watching); } }
public async Task DistributedRevisionsSubscription(int nodesAmount) { var leader = await CreateRaftClusterAndGetLeader(nodesAmount).ConfigureAwait(false); var defaultDatabase = "DistributedRevisionsSubscription"; await CreateDatabaseInCluster(defaultDatabase, nodesAmount, leader.WebUrl).ConfigureAwait(false); using (var store = new DocumentStore { Urls = new[] { leader.WebUrl }, Database = defaultDatabase }.Initialize()) { await SetupRevisions(leader, defaultDatabase).ConfigureAwait(false); var reachedMaxDocCountMre = new AsyncManualResetEvent(); var ackSent = new AsyncManualResetEvent(); var continueMre = new AsyncManualResetEvent(); GenerateDistributedRevisionsData(defaultDatabase); var subscriptionId = await store.Subscriptions.CreateAsync <Revision <User> >().ConfigureAwait(false); var subscription = store.Subscriptions.GetSubscriptionWorker <Revision <User> >(new SubscriptionWorkerOptions(subscriptionId) { MaxDocsPerBatch = 1, TimeToWaitBeforeConnectionRetry = TimeSpan.FromMilliseconds(100) }); var docsCount = 0; var revisionsCount = 0; var expectedRevisionsCount = 0; subscription.AfterAcknowledgment += async b => { await continueMre.WaitAsync(); try { if (revisionsCount == expectedRevisionsCount) { continueMre.Reset(); ackSent.Set(); } await continueMre.WaitAsync(); } catch (Exception) { } }; var task = subscription.Run(b => { foreach (var item in b.Items) { var x = item.Result; try { if (x == null) { } else if (x.Previous == null) { docsCount++; revisionsCount++; } else if (x.Current == null) { } else { if (x.Current.Age > x.Previous.Age) { revisionsCount++; } } if (docsCount == nodesAmount && revisionsCount == Math.Pow(nodesAmount, 2)) { reachedMaxDocCountMre.Set(); } } catch (Exception) { } } }); expectedRevisionsCount = nodesAmount + 2; continueMre.Set(); //Assert.True(await task.WaitAsync(_reasonableWaitTime).ConfigureAwait(false), $"Doc count is {docsCount} with revesions {revisionsCount}/{expectedRevisionsCount}"); Assert.True(await ackSent.WaitAsync(_reasonableWaitTime).ConfigureAwait(false), $"Doc count is {docsCount} with revesions {revisionsCount}/{expectedRevisionsCount}"); ackSent.Reset(true); await KillServerWhereSubscriptionWorks(defaultDatabase, subscription.SubscriptionName).ConfigureAwait(false); continueMre.Set(); expectedRevisionsCount += 2; Assert.True(await ackSent.WaitAsync(_reasonableWaitTime).ConfigureAwait(false), $"Doc count is {docsCount} with revesions {revisionsCount}/{expectedRevisionsCount}"); ackSent.Reset(true); continueMre.Set(); if (nodesAmount == 5) { await KillServerWhereSubscriptionWorks(defaultDatabase, subscription.SubscriptionName); } Assert.True(await reachedMaxDocCountMre.WaitAsync(_reasonableWaitTime).ConfigureAwait(false), $"Doc count is {docsCount} with revesions {revisionsCount}/{expectedRevisionsCount}"); } }
public async Task DirectWatchEventsWithTimeout() { AsyncCountdownEvent eventsReceived = new AsyncCountdownEvent(4); AsyncManualResetEvent serverShutdown = new AsyncManualResetEvent(); using (var server = new MockKubeApiServer(testOutput, async httpContext => { await Task.Delay(TimeSpan.FromSeconds(120)); // The default timeout is 100 seconds await WriteStreamLine(httpContext, MockAddedEventStreamLine); await WriteStreamLine(httpContext, MockDeletedStreamLine); await WriteStreamLine(httpContext, MockModifiedStreamLine); await WriteStreamLine(httpContext, MockErrorStreamLine); // make server alive, cannot set to int.max as of it would block response await serverShutdown.WaitAsync(); return(false); })) { var client = new Kubernetes(new KubernetesClientConfiguration { Host = server.Uri.ToString() }); var events = new HashSet <WatchEventType>(); var errors = 0; var watcher = await client.WatchNamespacedPodAsync( name : "myPod", @namespace : "default", onEvent : (type, item) => { testOutput.WriteLine($"Watcher received '{type}' event."); events.Add(type); eventsReceived.Signal(); }, onError : error => { testOutput.WriteLine($"Watcher received '{error.GetType().FullName}' error."); errors += 1; eventsReceived.Signal(); } ); // wait server yields all events await Task.WhenAny(eventsReceived.WaitAsync(), Task.Delay(TestTimeout)); Assert.True( eventsReceived.CurrentCount == 0, "Timed out waiting for all events / errors to be received." ); Assert.Contains(WatchEventType.Added, events); Assert.Contains(WatchEventType.Deleted, events); Assert.Contains(WatchEventType.Modified, events); Assert.Contains(WatchEventType.Error, events); Assert.Equal(0, errors); Assert.True(watcher.Watching); serverShutdown.Set(); } }
public async Task SubscriptionShouldNotFailIfLeaderIsDownButItStillHasEnoughTimeToRetry() { const int nodesAmount = 2; var leader = await CreateRaftClusterAndGetLeader(nodesAmount, shouldRunInMemory : false); var leaderDataDir = leader.Configuration.Core.DataDirectory.FullPath.Split('/').Last(); var leaderUrl = leader.WebUrl; var defaultDatabase = "SubscriptionShouldNotFailIfLeaderIsDownButItStillHasEnoughTimeToRetry"; await CreateDatabaseInCluster(defaultDatabase, nodesAmount, leader.WebUrl).ConfigureAwait(false); string mentor = Servers.First(x => x.ServerStore.NodeTag != x.ServerStore.LeaderTag).ServerStore.NodeTag; using (var store = new DocumentStore { Urls = new[] { leader.WebUrl }, Database = defaultDatabase }.Initialize()) { var subscriptionName = await store.Subscriptions.CreateAsync <User>(options : new SubscriptionCreationOptions { MentorNode = mentor }).ConfigureAwait(false); var subscripitonState = await store.Subscriptions.GetSubscriptionStateAsync(subscriptionName, store.Database); var getDatabaseTopologyCommand = new GetDatabaseRecordOperation(defaultDatabase); var record = await store.Maintenance.Server.SendAsync(getDatabaseTopologyCommand).ConfigureAwait(false); foreach (var server in Servers.Where(s => record.Topology.RelevantFor(s.ServerStore.NodeTag))) { await server.ServerStore.Cluster.WaitForIndexNotification(subscripitonState.SubscriptionId).ConfigureAwait(false); } if (mentor != null) { Assert.Equal(mentor, record.Topology.WhoseTaskIsIt(subscripitonState, RachisState.Follower)); } using (var subscription = store.Subscriptions.GetSubscriptionWorker <User>(new SubscriptionWorkerOptions(subscriptionName) { TimeToWaitBeforeConnectionRetry = TimeSpan.FromMilliseconds(500), MaxDocsPerBatch = 20, MaxErrorousPeriod = TimeSpan.FromSeconds(120) })) { var batchProccessed = new AsyncManualResetEvent(); var subscriptionRetryBegins = new AsyncManualResetEvent(); var batchedAcked = new AsyncManualResetEvent(); var disposedOnce = false; subscription.AfterAcknowledgment += x => { batchedAcked.Set(); return(Task.CompletedTask); }; var task = subscription.Run(async a => { if (disposedOnce == false) { subscription.OnSubscriptionConnectionRetry += x => { subscriptionRetryBegins.SetAndResetAtomically(); }; await DisposeServerAndWaitForFinishOfDisposalAsync(leader); disposedOnce = true; } batchProccessed.SetAndResetAtomically(); }); await GenerateDocuments(store); Assert.True(await batchProccessed.WaitAsync(_reasonableWaitTime)); Assert.True(await subscriptionRetryBegins.WaitAsync(TimeSpan.FromSeconds(30))); Assert.True(await subscriptionRetryBegins.WaitAsync(TimeSpan.FromSeconds(30))); Assert.True(await subscriptionRetryBegins.WaitAsync(TimeSpan.FromSeconds(30))); leader = Servers[0] = GetNewServer(new Dictionary <string, string> { { RavenConfiguration.GetKey(x => x.Core.PublicServerUrl), leaderUrl }, { RavenConfiguration.GetKey(x => x.Core.ServerUrls), leaderUrl } }, runInMemory: false, deletePrevious: false, partialPath: leaderDataDir); Assert.True(await batchProccessed.WaitAsync(TimeSpan.FromSeconds(120))); Assert.True(await batchedAcked.WaitAsync(TimeSpan.FromSeconds(120))); } } }
public Subscription(bool isLive = false) { _catchup = new AsyncManualResetEvent(isLive); }
/// <summary> /// Creates a task for async download of thumbnail corresponding to the updated document. /// </summary> /// <param name="change">Change record that includes document state and new metadata.</param> /// <param name="dropbox">DropBox client</param> /// <param name="throttle"></param> /// <param name="options"></param> /// <returns>An awaitable task that yields status and associated change record.</returns> private async Task <ThumbTransferResult> DownloadSingeThumbnailAsync( PendingChange change, IClient dropbox, AsyncManualResetEvent throttle) { var thumbSize = 0; const int bufferSize = (int)(150 * Kbyte); // this should be sufficient for most thumbnails var attempt = 0; TimeSpan?retryDelay = null; do { try { if (attempt >= _options.MaxRetryAttempts) { break; } ++attempt; if (retryDelay.HasValue) { Debug.WriteLine("Pausing {0} seconds before retrying.", retryDelay.Value.TotalSeconds); await Task.Delay(retryDelay.Value); retryDelay = null; } // pause if we are being throttled by DropBox await throttle.WaitAsync(); var itemPath = change.Meta.path; using (var thumbStream = await dropbox.Core.Metadata.ThumbnailsAsync(itemPath, size: _options.ThumbSizeToRequest)) using (var buffer = new MemoryStream()) { await thumbStream.CopyToAsync(buffer, bufferSize); change.Thumbnail = buffer.ToArray(); thumbSize = change.Thumbnail.Length; } break; } // we need to eat this exception, to prevent terminating processing for other thumbnails catch (TaskCanceledException /*e*/) { if (attempt > 1) { // after first attempt we will retry, after second, break out to return failed result Debug.WriteLine("DownloadSingeThumbnailAsync task was canceled."); break; } } } catch (System.Net.WebException) { // just retry after failure Debug.WriteLine("Got WebException."); }
public async Task UpdatingSubscriptionScriptShouldNotChangeVectorButShouldDropConnection() { using (var store = GetDocumentStore()) { var subscriptionName = store.Subscriptions.Create <User>(options: new SubscriptionCreationOptions() { Name = "Subs1", Query = "from Users as u select {Name:'David'}" }); var subscription = store.Subscriptions.Open <User>(new SubscriptionConnectionOptions("Subs1")); var results = new List <User>(); var mre = new AsyncManualResetEvent(); using (var session = store.OpenSession()) { session.Store(new User { }); session.SaveChanges(); } var subscriptionTask = subscription.Run(batch => { results.AddRange(batch.Items.Select(i => i.Result).ToArray()); }); subscription.AfterAcknowledgment += x => { mre.Set(); return(Task.CompletedTask); }; Assert.True(await mre.WaitAsync(_reasonableWaitTime)); mre.Reset(); Assert.Equal("David", results[0].Name); results.Clear(); var currentDatabase = await Server.ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(store.Database); string changeVectorBeforeScriptUpdate = GetSubscriptionChangeVector(currentDatabase); var subscriptionState = currentDatabase.SubscriptionStorage.GetSubscriptionFromServerStore(subscriptionName); // updating only subscription script and making sure conneciton drops await currentDatabase.SubscriptionStorage.PutSubscription(new SubscriptionCreationOptions() { Name = "Subs1", ChangeVector = Raven.Client.Constants.Documents.SubscriptionChangeVectorSpecialStates.DoNotChange.ToString(), Query = "from Users as u select {Name:'Jorgen'}" }, subscriptionState.SubscriptionId); Assert.Equal(subscriptionTask, await Task.WhenAny(subscriptionTask, Task.Delay(_reasonableWaitTime))); await Assert.ThrowsAsync(typeof(SubscriptionClosedException), () => subscriptionTask); var changeVectorAfterUpdatingScript = GetSubscriptionChangeVector(currentDatabase); Assert.Equal(changeVectorBeforeScriptUpdate, changeVectorAfterUpdatingScript); // reconnecting and making sure that the new script is in power subscription = store.Subscriptions.Open <User>(new SubscriptionConnectionOptions("Subs1")); subscriptionTask = subscription.Run(batch => { results.AddRange(batch.Items.Select(i => i.Result).ToArray()); }); subscription.AfterAcknowledgment += x => { mre.Set(); return(Task.CompletedTask); }; using (var session = store.OpenSession()) { session.Store(new User { }); session.SaveChanges(); } Assert.True(await mre.WaitAsync(_reasonableWaitTime)); Assert.Equal("Jorgen", results[0].Name); } }
public async Task RevisionsSubscriptionsWithCustomScriptCompareDocs() { using (var store = GetDocumentStore()) { var subscriptionId = await store.Subscriptions.CreateAsync(new SubscriptionCreationOptions { Query = @" declare function match(d){ return d.Current.Age > d.Previous.Age; } from Users (Revisions = true) as d where match(d) select { Id: id(d.Current), Age: d.Current.Age } " }); using (var context = JsonOperationContext.ShortTermSingleUse()) { var configuration = new RevisionsConfiguration { Default = new RevisionsCollectionConfiguration { Active = true, MinimumRevisionsToKeep = 5, }, Collections = new Dictionary <string, RevisionsCollectionConfiguration> { ["Users"] = new RevisionsCollectionConfiguration { Active = true }, ["Dons"] = new RevisionsCollectionConfiguration { Active = true, } } }; await Server.ServerStore.ModifyDatabaseRevisions(context, store.Database, EntityToBlittable.ConvertEntityToBlittable(configuration, new DocumentConventions(), context)); } for (int i = 0; i < 10; i++) { for (var j = 0; j < 10; j++) { using (var session = store.OpenSession()) { session.Store(new User { Name = $"users{i} ver {j}", Age = j }, "users/" + i); session.Store(new Company() { Name = $"dons{i} ver {j}" }, "companies/" + i); session.SaveChanges(); } } } using (var sub = store.Subscriptions.Open <Result>(new SubscriptionConnectionOptions(subscriptionId))) { var mre = new AsyncManualResetEvent(); var names = new HashSet <string>(); var maxAge = -1; GC.KeepAlive(sub.Run(x => { foreach (var item in x.Items) { if (item.Result.Age > maxAge) { names.Add(item.Result.Id + item.Result.Age); maxAge = item.Result.Age; } if (names.Count == 9) { mre.Set(); } } })); Assert.True(await mre.WaitAsync(_reasonableWaitTime)); } } }
public async Task PlainRevisionsSubscriptions() { using (var store = GetDocumentStore()) { var subscriptionId = await store.Subscriptions.CreateAsync <Revision <User> >(); using (var context = JsonOperationContext.ShortTermSingleUse()) { var configuration = new RevisionsConfiguration { Default = new RevisionsCollectionConfiguration { Disabled = false, MinimumRevisionsToKeep = 5, }, Collections = new Dictionary <string, RevisionsCollectionConfiguration> { ["Users"] = new RevisionsCollectionConfiguration { Disabled = false }, ["Dons"] = new RevisionsCollectionConfiguration { Disabled = false } } }; await Server.ServerStore.ModifyDatabaseRevisions(context, store.Database, DocumentConventions.Default.Serialization.DefaultConverter.ToBlittable(configuration, context), Guid.NewGuid().ToString()); } for (int i = 0; i < 10; i++) { for (var j = 0; j < 10; j++) { using (var session = store.OpenSession()) { session.Store(new User { Name = $"users{i} ver {j}" }, "users/" + i); session.Store(new Company() { Name = $"dons{i} ver {j}" }, "dons/" + i); session.SaveChanges(); } } } using (var sub = store.Subscriptions.GetSubscriptionWorker <Revision <User> >(new SubscriptionWorkerOptions(subscriptionId) { TimeToWaitBeforeConnectionRetry = TimeSpan.FromSeconds(5) })) { var mre = new AsyncManualResetEvent(); var names = new HashSet <string>(); GC.KeepAlive(sub.Run(x => { foreach (var item in x.Items) { names.Add(item.Result.Current?.Name + item.Result.Previous?.Name); if (names.Count == 100) { mre.Set(); } } })); Assert.True(await mre.WaitAsync(_reasonableWaitTime)); } } }
/// <summary> /// Forces an awaitable to yield, setting signals after the continuation has been pended and when the continuation has begun execution. /// </summary> /// <param name="baseAwaiter">The awaiter to extend.</param> /// <param name="yieldingSignal">The signal to set after the continuation has been pended.</param> /// <param name="resumingSignal">The signal to set when the continuation has been invoked.</param> /// <returns>A new awaitable.</returns> internal static YieldAndNotifyAwaitable YieldAndNotify(this INotifyCompletion baseAwaiter, AsyncManualResetEvent yieldingSignal = null, AsyncManualResetEvent resumingSignal = null) { Requires.NotNull(baseAwaiter, nameof(baseAwaiter)); return(new YieldAndNotifyAwaitable(baseAwaiter, yieldingSignal, resumingSignal)); }
public void Id_IsNotZero() { var mre = new AsyncManualResetEvent(); Assert.AreNotEqual(0, mre.Id); }
public IScraper Create(AsyncManualResetEvent manualResetEvent, CancellationToken cancellationToken) { return(new Scraper(_scraperQueue, _crawlLogger, manualResetEvent, cancellationToken)); }
public void Wait_Set_IsCompleted() { var mre = new AsyncManualResetEvent(true); mre.Wait(); }
public async Task StreamClosesDeterministically() { Tuple <Nerdbank.FullDuplexStream, Nerdbank.FullDuplexStream> streams = Nerdbank.FullDuplexStream.CreateStreams(); var monitoredStream = new MonitoringStream(new OneWayStreamWrapper(streams.Item1, canWrite: true)); var disposedEvent = new AsyncManualResetEvent(); monitoredStream.Disposed += (s, e) => disposedEvent.Set(); bool writing = false; monitoredStream.WillWrite += (s, e) => { Assert.False(writing); writing = true; this.Logger.WriteLine("Writing {0} bytes.", e.Count); }; monitoredStream.WillWriteByte += (s, e) => { Assert.False(writing); writing = true; this.Logger.WriteLine("Writing 1 byte."); }; monitoredStream.WillWriteMemory += (s, e) => { Assert.False(writing); writing = true; this.Logger.WriteLine("Writing {0} bytes.", e.Length); }; monitoredStream.WillWriteSpan += (s, e) => { Assert.False(writing); writing = true; this.Logger.WriteLine("Writing {0} bytes.", e.Length); }; monitoredStream.DidWrite += (s, e) => { Assert.True(writing); writing = false; this.Logger.WriteLine("Wrote {0} bytes.", e.Count); }; monitoredStream.DidWriteByte += (s, e) => { Assert.True(writing); writing = false; this.Logger.WriteLine("Wrote 1 byte."); }; monitoredStream.DidWriteMemory += (s, e) => { Assert.True(writing); writing = false; this.Logger.WriteLine("Wrote {0} bytes.", e.Length); }; monitoredStream.DidWriteSpan += (s, e) => { Assert.True(writing); writing = false; this.Logger.WriteLine("Wrote {0} bytes.", e.Length); }; try { await this.clientRpc.InvokeWithCancellationAsync( nameof(Server.AcceptWritableStream), new object[] { monitoredStream, MemoryBuffer.Length }, this.TimeoutToken); this.Logger.WriteLine("RPC call completed."); } catch (Exception ex) when(!(ex is RemoteInvocationException)) { // The only failure case where the stream will be closed automatically is if it came in as an error response from the server. monitoredStream.Dispose(); throw; } await disposedEvent.WaitAsync(this.TimeoutToken); this.Logger.WriteLine("Stream disposed."); }
public async Task GroupsTokenIsPerConnectionId() { using (var host = new MemoryHost()) { IProtectedData protectedData = null; host.Configure(app => { var config = new HubConfiguration { Resolver = new DefaultDependencyResolver() }; app.MapSignalR <MyGroupConnection>("/echo", config); protectedData = config.Resolver.Resolve <IProtectedData>(); }); var connection = new Client.Connection("http://memoryhost/echo"); using (connection) { var inGroup = new AsyncManualResetEvent(); connection.Received += data => { if (data == "group") { inGroup.Set(); } }; await connection.Start(host); await inGroup.WaitAsync(TimeSpan.FromSeconds(10)); Assert.NotNull(connection.GroupsToken); var spyWh = new AsyncManualResetEvent(); var hackerConnection = new Client.Connection(connection.Url) { ConnectionId = "hacker" }; var url = GetUrl(protectedData, connection, connection.GroupsToken); var response = await host.Get(url, r => { }, isLongRunning : true); var reader = new EventSourceStreamReader(hackerConnection, response.GetStream()); reader.Message = sseEvent => { if (sseEvent.EventType == EventType.Data && sseEvent.Data != "initialized" && sseEvent.Data != "{}") { spyWh.Set(); } }; reader.Start(); await connection.Send("random"); Assert.False(await spyWh.WaitAsync(TimeSpan.FromSeconds(5))); } } }
public static async Task <IReadOnlyCollection <GraphTaskResult> > Run(this TaskGraph tasks, int parallel, ILogger log, CancellationToken cancel) { async Task <GraphTaskResult> RunTask(GraphTask task) { var sw = Stopwatch.StartNew(); GraphTaskResult Result(Exception ex = null) => new() { Name = task.Name, FinalStatus = task.Status, Duration = sw.Elapsed, Exception = ex }; try { if (cancel.IsCancellationRequested || tasks.DependenciesDeep(task).Any(d => d.Status.In(Cancelled, Error))) { task.Status = Cancelled; return(Result()); } task.Status = Running; log = log.ForContext("Task", task.Name); await task.Run(log, cancel); if (cancel.IsCancellationRequested) { task.Status = Cancelled; } else { task.Status = Success; } return(Result()); } catch (Exception ex) { task.Status = Error; log.Error(ex, "Task {Task} failed: {Message}", task.Name, ex.Message); return(Result(ex)); } } var block = new TransformBlock <GraphTask, GraphTaskResult>(RunTask, new() { MaxDegreeOfParallelism = parallel }); var newTaskSignal = new AsyncManualResetEvent(true); async Task Producer() { while (!tasks.AllComplete) { if (cancel.IsCancellationRequested) { foreach (var t in tasks.All.Where(t => t.Status.IsIncomplete())) { t.Status = Cancelled; } } var tasksToAdd = tasks.AvailableToRun().ToList(); if (tasksToAdd.IsEmpty()) { // if no tasks are ready to start. Wait to either be signaled, or log which tasks are still running var logTimeTask = Task.Delay(1.Minutes(), cancel); await Task.WhenAny(logTimeTask, newTaskSignal.WaitAsync()); if (newTaskSignal.IsSet) { newTaskSignal.Reset(); } if (logTimeTask.IsCompleted) { log.Debug("Waiting for {TaskList} to complete", tasks.Running.Select(t => t.Name)); } } foreach (var task in tasksToAdd) { task.Status = Queued; await block.SendAsync(task); } } block.Complete(); } var producer = Producer(); if (producer.IsFaulted) { await producer; } var taskResults = new List <GraphTaskResult>(); while (await block.OutputAvailableAsync()) { var item = await block.ReceiveAsync(); taskResults.Add(item); newTaskSignal.Set(); } await Task.WhenAll(producer, block.Completion); return(taskResults); } }
public UpdateSiblingCurrentEtag(ReplicationMessageReply replicationBatchReply, AsyncManualResetEvent trigger) { _replicationBatchReply = replicationBatchReply; _trigger = trigger; }