public async Task RegisterConsumerFaultTest() { this.fixture.Logger.LogInformation("************************ RegisterConsumerFaultTest *********************************"); var streamId = new InternalStreamId("ProviderName", StreamId.Create("StreamNamespace", Guid.NewGuid())); var pubSubGrain = this.fixture.GrainFactory.GetGrain <IPubSubRendezvousGrain>(streamId.ToString()); var faultGrain = this.fixture.GrainFactory.GetGrain <IStorageFaultGrain>(typeof(PubSubRendezvousGrain).FullName); // clean call, to make sure everything is happy and pubsub has state. await pubSubGrain.RegisterConsumer(GuidId.GetGuidId(Guid.NewGuid()), streamId, null, null); int consumers = await pubSubGrain.ConsumerCount(streamId); Assert.Equal(1, consumers); // inject fault await faultGrain.AddFaultOnWrite(pubSubGrain as GrainReference, new ApplicationException("Write")); // expect exception when registering a new consumer await Assert.ThrowsAsync <OrleansException>( () => pubSubGrain.RegisterConsumer(GuidId.GetGuidId(Guid.NewGuid()), streamId, null, null)); // pubsub grain should recover and still function await pubSubGrain.RegisterConsumer(GuidId.GetGuidId(Guid.NewGuid()), streamId, null, null); consumers = await pubSubGrain.ConsumerCount(streamId); Assert.Equal(2, consumers); }
public override StreamPosition GetStreamPosition(string partition, EventData queueMessage) { var steamIdentity = StreamId.Create("EmptySpace", Guid.NewGuid()); var sequenceToken = new EventHubSequenceTokenV2(this.eventHubOffset, this.sequenceNumberCounter++, this.eventIndex); return(new StreamPosition(steamIdentity, sequenceToken)); }
public void SimpleCacheMiss() { var bufferPool = new ObjectPool <FixedSizeBuffer>(() => new FixedSizeBuffer(PooledBufferSize)); var dataAdapter = new TestCacheDataAdapter(); var cache = new PooledQueueCache(dataAdapter, NullLogger.Instance, null, null, TimeSpan.FromSeconds(10)); var evictionStrategy = new ChronologicalEvictionStrategy(NullLogger.Instance, new TimePurgePredicate(TimeSpan.FromSeconds(1), TimeSpan.FromSeconds(1)), null, null); evictionStrategy.PurgeObservable = cache; var converter = new CachedMessageConverter(bufferPool, evictionStrategy); var seqNumber = 123; var streamKey = Guid.NewGuid(); var stream = StreamId.Create(TestStreamNamespace, streamKey); var cursor = cache.GetCursor(stream, new EventSequenceTokenV2(seqNumber)); // Start by enqueuing a message for stream, followed bu another one destined for another one EnqueueMessage(streamKey); EnqueueMessage(Guid.NewGuid()); // Consume the stream, should be fine Assert.True(cache.TryGetNextMessage(cursor, out _)); Assert.False(cache.TryGetNextMessage(cursor, out _)); // Enqueue a new batch // First and last messages destined for stream, following messages // destined for other streams EnqueueMessage(streamKey); for (var idx = 0; idx < 20; idx++) { EnqueueMessage(Guid.NewGuid()); } // Remove first three messages from the cache cache.RemoveOldestMessage(); // Destined for stream, consumed cache.RemoveOldestMessage(); // Not destined for stream cache.RemoveOldestMessage(); // Destined for stream, not consumed // Enqueue a new message for stream EnqueueMessage(streamKey); // Should throw since we missed the second message destined for stream Assert.Throws <QueueCacheMissException>(() => cache.TryGetNextMessage(cursor, out _)); long EnqueueMessage(Guid streamId) { var now = DateTime.UtcNow; var msg = new TestQueueMessage { StreamId = StreamId.Create(TestStreamNamespace, streamId), SequenceNumber = seqNumber, }; cache.Add(new List <CachedMessage>() { converter.ToCachedMessage(msg, now) }, now); seqNumber++; return(msg.SequenceNumber); } }
public virtual async Task IgnoreBadFilter() { EnsureStreamFilterIsRegistered(); const int numberOfEvents = 10; var streamId = StreamId.Create("IgnoreBadFilter", "my-stream"); var grain = this.clusterClient.GetGrain <IStreamingHistoryGrain>("IgnoreBadFilter"); try { await grain.BecomeConsumer(streamId, ProviderName, "throw"); var stream = this.clusterClient.GetStreamProvider(ProviderName).GetStream <int>(streamId); for (var i = 0; i < numberOfEvents; i++) { await stream.OnNextAsync(i); } await Task.Delay(WaitTime); var history = await grain.GetReceivedItems(); Assert.Equal(numberOfEvents, history.Count); for (var i = 0; i < numberOfEvents; i++) { Assert.Equal(i, history[i]); } } finally { await grain.StopBeingConsumer(); } }
public virtual async Task MultipleSubscriptionsDifferentFilterData() { EnsureStreamFilterIsRegistered(); const int numberOfEvents = 10; var streamId = StreamId.Create("MultipleSubscriptionsDifferentFilterData", "my-stream"); var grain = this.clusterClient.GetGrain <IStreamingHistoryGrain>("MultipleSubscriptionsDifferentFilterData"); try { await grain.BecomeConsumer(streamId, ProviderName, "only3"); await grain.BecomeConsumer(streamId, ProviderName, "only7"); var stream = this.clusterClient.GetStreamProvider(ProviderName).GetStream <int>(streamId); for (var i = 0; i < numberOfEvents; i++) { await stream.OnNextAsync(i); } await Task.Delay(WaitTime); var history = await grain.GetReceivedItems(); Assert.Equal(2, history.Count); Assert.Contains(3, history); Assert.Contains(7, history); } finally { await grain.StopBeingConsumer(); } }
public async Task EHSlowConsuming_ShouldFavorSlowConsumer() { var streamGuid = Guid.NewGuid(); var streamId = StreamId.Create(StreamNamespace, streamGuid); //set up one slow consumer grain var slowConsumer = this.fixture.GrainFactory.GetGrain <ISlowConsumingGrain>(Guid.NewGuid()); await slowConsumer.BecomeConsumer(streamGuid, StreamNamespace, StreamProviderName); //set up 30 healthy consumer grain to show how much we favor slow consumer int healthyConsumerCount = 30; var healthyConsumers = await SetUpHealthyConsumerGrain(this.fixture.GrainFactory, streamGuid, StreamNamespace, StreamProviderName, healthyConsumerCount); //configure data generator for stream and start producing var mgmtGrain = this.fixture.GrainFactory.GetGrain <IManagementGrain>(0); var randomStreamPlacementArg = new EventDataGeneratorAdapterFactory.StreamRandomPlacementArg(streamId, this.seed.Next(100)); await mgmtGrain.SendControlCommandToProvider(typeof(PersistentStreamProvider).FullName, StreamProviderName, (int)EventDataGeneratorAdapterFactory.Commands.Randomly_Place_Stream_To_Queue, randomStreamPlacementArg); //since there's an extreme slow consumer, so the back pressure algorithm should be triggered await TestingUtils.WaitUntilAsync(lastTry => AssertCacheBackPressureTriggered(true, lastTry), timeout); //make slow consumer stop consuming await slowConsumer.StopConsuming(); //slowConsumer stopped consuming, back pressure algorithm should be cleared in next check period. await Task.Delay(monitorPressureWindowSize); await TestingUtils.WaitUntilAsync(lastTry => AssertCacheBackPressureTriggered(false, lastTry), timeout); //clean up test await StopHealthyConsumerGrainComing(healthyConsumers); await mgmtGrain.SendControlCommandToProvider(typeof(PersistentStreamProvider).FullName, StreamProviderName, (int)EventDataGeneratorAdapterFactory.Commands.Stop_Producing_On_Stream, streamId); }
/// <summary> /// Get the <see cref="IStreamIdentity"/> for an event message. /// </summary> /// <param name="queueMessage">The event message.</param> /// <returns>The stream identity.</returns> public virtual StreamId GetStreamIdentity(EventData queueMessage) { string streamKey = queueMessage.PartitionKey; string streamNamespace = queueMessage.GetStreamNamespaceProperty(); return(StreamId.Create(streamNamespace, streamKey)); }
private StreamPosition GetStreamPosition(TestQueueMessage queueMessage) { var streamId = StreamId.Create(null, queueMessage.StreamGuid); StreamSequenceToken sequenceToken = queueMessage.SequenceToken; return(new StreamPosition(streamId, sequenceToken)); }
public void NextInStreamTest() { IObjectPool <CachedMessageBlock> pool = new MyTestPooled(); ICacheDataAdapter dataAdapter = new TestCacheDataAdapter(); CachedMessageBlock block = pool.Allocate(); int last = 0; int sequenceNumber = 0; // define 2 streams var streams = new[] { new StreamIdentity(Guid.NewGuid(), null), new StreamIdentity(Guid.NewGuid(), null) }; // add both streams interleaved, until lock is full while (block.HasCapacity) { var stream = streams[last % 2]; var message = new TestQueueMessage { StreamGuid = stream.Guid, SequenceToken = new EventSequenceTokenV2(sequenceNumber) }; // add message to end of block AddAndCheck(block, dataAdapter, message, 0, last - 1); last++; sequenceNumber += 2; } // get index of first stream int streamIndex; Assert.True(block.TryFindFirstMessage(StreamId.Create(streams[0]), dataAdapter, out streamIndex)); Assert.Equal(0, streamIndex); Assert.Equal(0, block.GetSequenceToken(streamIndex, dataAdapter).SequenceNumber); // find stream1 messages int iteration = 1; while (block.TryFindNextMessage(streamIndex + 1, StreamId.Create(streams[0]), dataAdapter, out streamIndex)) { Assert.Equal(iteration * 2, streamIndex); Assert.Equal(iteration * 4, block.GetSequenceToken(streamIndex, dataAdapter).SequenceNumber); iteration++; } Assert.Equal(iteration, TestBlockSize / 2); // get index of first stream Assert.True(block.TryFindFirstMessage(StreamId.Create(streams[1]), dataAdapter, out streamIndex)); Assert.Equal(1, streamIndex); Assert.Equal(2, block.GetSequenceToken(streamIndex, dataAdapter).SequenceNumber); // find stream1 messages iteration = 1; while (block.TryFindNextMessage(streamIndex + 1, StreamId.Create(streams[1]), dataAdapter, out streamIndex)) { Assert.Equal(iteration * 2 + 1, streamIndex); Assert.Equal(iteration * 4 + 2, block.GetSequenceToken(streamIndex, dataAdapter).SequenceNumber); iteration++; } Assert.Equal(iteration, TestBlockSize / 2); }
public override StreamPosition GetStreamPosition(string partition, EventData queueMessage) { var streamId = StreamId.Create(new StreamIdentity(GetPartitionGuid(partition), null)); StreamSequenceToken token = new EventHubSequenceTokenV2(queueMessage.Offset.ToString(), queueMessage.SequenceNumber, 0); return(new StreamPosition(streamId, token)); }
private Task Test_Stream_Churn_NumStreams( string streamProviderName, int pipelineSize, int numStreams, int numConsumers = 9, int numProducers = 1, bool warmUpPubSub = true, bool normalSubscribeCalls = true) { output.WriteLine("Testing churn with {0} Streams with {1} Consumers and {2} Producers per Stream NormalSubscribe={3}", numStreams, numConsumers, numProducers, normalSubscribeCalls); AsyncPipeline pipeline = new AsyncPipeline(pipelineSize); var promises = new List <Task>(); // Create streamId Guids StreamId[] streamIds = new StreamId[numStreams]; for (int i = 0; i < numStreams; i++) { streamIds[i] = StreamId.Create(this.StreamNamespace, Guid.NewGuid()); } if (warmUpPubSub) { WarmUpPubSub(streamProviderName, streamIds, pipeline); pipeline.Wait(); int activePubSubGrains = ActiveGrainCount(typeof(PubSubRendezvousGrain).FullName); Assert.Equal(streamIds.Length, activePubSubGrains); // "Initial PubSub count -- should all be warmed up" } int activeConsumerGrains = ActiveGrainCount(typeof(StreamLifecycleConsumerGrain).FullName); Assert.Equal(0, activeConsumerGrains); // "Initial Consumer count should be zero" Stopwatch sw = Stopwatch.StartNew(); for (int i = 0; i < numStreams; i++) { Task promise = SetupOneStream(streamIds[i], streamProviderName, pipeline, numConsumers, numProducers, normalSubscribeCalls); promises.Add(promise); } Task.WhenAll(promises).Wait(); sw.Stop(); int consumerCount = ActiveGrainCount(typeof(StreamLifecycleConsumerGrain).FullName); Assert.Equal(activeConsumerGrains + (numStreams * numConsumers), consumerCount); // "The correct number of new Consumer grains are active" TimeSpan elapsed = sw.Elapsed; int totalSubscriptions = numStreams * numConsumers; double rps = totalSubscriptions / elapsed.TotalSeconds; output.WriteLine("Subscriptions-per-second = {0} during period {1}", rps, elapsed); Assert.NotEqual(0.0, rps); // "RPS greater than zero" return(Task.CompletedTask); }
private ProducerProxy(IStreaming_ProducerGrain[] targets, Guid streamId, string providerName, ILogger logger) { _targets = targets; _logger = logger; _streamId = streamId; _providerName = providerName; _cleanedUpFlag = new InterlockedFlag(); StreamId = StreamId.Create(null, streamId); }
public void AvoidCacheMissMultipleStreamsActive() { var bufferPool = new ObjectPool <FixedSizeBuffer>(() => new FixedSizeBuffer(PooledBufferSize)); var dataAdapter = new TestCacheDataAdapter(); var cache = new PooledQueueCache(dataAdapter, NullLogger.Instance, null, null, TimeSpan.FromSeconds(30)); var evictionStrategy = new ChronologicalEvictionStrategy(NullLogger.Instance, new TimePurgePredicate(TimeSpan.FromSeconds(1), TimeSpan.FromSeconds(1)), null, null); evictionStrategy.PurgeObservable = cache; var converter = new CachedMessageConverter(bufferPool, evictionStrategy); var seqNumber = 123; var streamKey = Guid.NewGuid(); var stream = StreamId.Create(TestStreamNamespace, streamKey); // Enqueue a message for our stream var firstSequenceNumber = EnqueueMessage(streamKey); // Enqueue a few other messages for other streams EnqueueMessage(Guid.NewGuid()); EnqueueMessage(Guid.NewGuid()); // Consume the first event and see that the cursor has moved to last seen event (not matching our streamIdentity) var cursor = cache.GetCursor(stream, new EventSequenceTokenV2(firstSequenceNumber)); Assert.True(cache.TryGetNextMessage(cursor, out var firstContainer)); Assert.False(cache.TryGetNextMessage(cursor, out _)); // Remove multiple events, including the one that the cursor is currently pointing to cache.RemoveOldestMessage(); cache.RemoveOldestMessage(); cache.RemoveOldestMessage(); // Enqueue another message for stream var lastSequenceNumber = EnqueueMessage(streamKey); // Should be able to consume the event just pushed Assert.True(cache.TryGetNextMessage(cursor, out var lastContainer)); Assert.Equal(stream, lastContainer.StreamId); Assert.Equal(lastSequenceNumber, lastContainer.SequenceToken.SequenceNumber); long EnqueueMessage(Guid streamId) { var now = DateTime.UtcNow; var msg = new TestQueueMessage { StreamId = StreamId.Create(TestStreamNamespace, streamId), SequenceNumber = seqNumber, }; cache.Add(new List <CachedMessage>() { converter.ToCachedMessage(msg, now) }, now); seqNumber++; return(msg.SequenceNumber); } }
private async Task AssertProducerCount(int expectedCount, string providerName, Guid streamIdGuid) { // currently, we only support checking the producer count on the SMS rendezvous grain. if (providerName == SMS_STREAM_PROVIDER_NAME) { var streamId = StreamId.Create(StreamTestsConstants.DefaultStreamNamespace, streamIdGuid); var actualCount = await StreamTestUtils.GetStreamPubSub(this.client).ProducerCount(new InternalStreamId(providerName, streamId)); logger.Info("StreamingTestRunner.AssertProducerCount: expected={0} actual (SMSStreamRendezvousGrain.ProducerCount)={1} streamId={2}", expectedCount, actualCount, streamId); Assert.Equal(expectedCount, actualCount); } }
public void SimpleCacheMiss() { var bufferPool = new ObjectPool <FixedSizeBuffer>(() => new FixedSizeBuffer(PooledBufferSize)); var dataAdapter = new TestCacheDataAdapter(); var cache = new PooledQueueCache(dataAdapter, NullLogger.Instance, null, null, TimeSpan.FromSeconds(10)); var evictionStrategy = new ChronologicalEvictionStrategy(NullLogger.Instance, new TimePurgePredicate(TimeSpan.FromSeconds(1), TimeSpan.FromSeconds(1)), null, null); evictionStrategy.PurgeObservable = cache; var converter = new CachedMessageConverter(bufferPool, evictionStrategy); int idx; var seqNumber = 123; var stream = StreamId.Create(TestStreamNamespace, Guid.NewGuid()); // First and last messages destined for stream, following messages // destined for other streams for (idx = 0; idx < 20; idx++) { var now = DateTime.UtcNow; var msg = new TestQueueMessage { StreamId = (idx == 0) ? stream : StreamId.Create(TestStreamNamespace, Guid.NewGuid()), SequenceNumber = seqNumber + idx, }; cache.Add(new List <CachedMessage>() { converter.ToCachedMessage(msg, now) }, now); } var cursor = cache.GetCursor(stream, new EventSequenceTokenV2(seqNumber)); // Remove first message cache.RemoveOldestMessage(); // Enqueue a new message for stream { idx++; var now = DateTime.UtcNow; var msg = new TestQueueMessage { StreamId = stream, SequenceNumber = seqNumber + idx, }; cache.Add(new List <CachedMessage>() { converter.ToCachedMessage(msg, now) }, now); } // Should throw since we missed the first message Assert.Throws <QueueCacheMissException>(() => cache.TryGetNextMessage(cursor, out _)); }
public void Configure(IServiceProvider serviceProvider, IStreamGeneratorConfig generatorConfig) { var cfg = generatorConfig as SimpleGeneratorOptions; if (cfg == null) { throw new ArgumentOutOfRangeException(nameof(generatorConfig)); } options = cfg; sequenceId = 0; streamId = StreamId.Create(options.StreamNamespace, Guid.NewGuid()); }
internal static async Task CheckPubSubCounts(IInternalClusterClient client, ITestOutputHelper output, string when, int expectedPublisherCount, int expectedConsumerCount, Guid streamIdGuid, string streamProviderName, string streamNamespace) { var pubSub = GetStreamPubSub(client); var streamId = new InternalStreamId(streamProviderName, StreamId.Create(streamNamespace, streamIdGuid)); int consumerCount = await pubSub.ConsumerCount(streamId); Assert_AreEqual(output, expectedConsumerCount, consumerCount, "{0} - ConsumerCount for stream {1} = {2}", when, streamId, consumerCount); int publisherCount = await pubSub.ProducerCount(streamId); Assert_AreEqual(output, expectedPublisherCount, publisherCount, "{0} - PublisherCount for stream {1} = {2}", when, streamId, publisherCount); }
public async Task UnregisterConsumerFaultTest() { this.fixture.Logger.LogInformation("************************ UnregisterConsumerFaultTest *********************************"); var streamId = new InternalStreamId("ProviderName", StreamId.Create("StreamNamespace", Guid.NewGuid())); var pubSubGrain = this.fixture.GrainFactory.GetGrain <IPubSubRendezvousGrain>(streamId.ToString()); var faultGrain = this.fixture.GrainFactory.GetGrain <IStorageFaultGrain>(typeof(PubSubRendezvousGrain).FullName); // Add two consumers so when we remove the first it does a storage write, not a storage clear. GuidId subscriptionId1 = GuidId.GetGuidId(Guid.NewGuid()); GuidId subscriptionId2 = GuidId.GetGuidId(Guid.NewGuid()); await pubSubGrain.RegisterConsumer(subscriptionId1, streamId, null, null); await pubSubGrain.RegisterConsumer(subscriptionId2, streamId, null, null); int consumers = await pubSubGrain.ConsumerCount(streamId); Assert.Equal(2, consumers); // inject fault await faultGrain.AddFaultOnWrite(pubSubGrain as GrainReference, new ApplicationException("Write")); // expect exception when unregistering a consumer await Assert.ThrowsAsync <OrleansException>( () => pubSubGrain.UnregisterConsumer(subscriptionId1, streamId)); // pubsub grain should recover and still function await pubSubGrain.UnregisterConsumer(subscriptionId1, streamId); consumers = await pubSubGrain.ConsumerCount(streamId); Assert.Equal(1, consumers); // inject clear fault, because removing last consumer should trigger a clear storage call. await faultGrain.AddFaultOnClear(pubSubGrain as GrainReference, new ApplicationException("Write")); // expect exception when unregistering a consumer await Assert.ThrowsAsync <OrleansException>( () => pubSubGrain.UnregisterConsumer(subscriptionId2, streamId)); // pubsub grain should recover and still function await pubSubGrain.UnregisterConsumer(subscriptionId2, streamId); consumers = await pubSubGrain.ConsumerCount(streamId); Assert.Equal(0, consumers); }
public virtual async Task TestBecomeConsumerSlim(Guid streamIdGuid, string providerName) { // TODO NOT SURE THIS FUNCTION MAKESE ANY SENSE var streamId = StreamId.Create(null, streamIdGuid); InitStream(streamId, providerName); var observer = new MyStreamObserver <int>(logger); var(myExtension, myExtensionReference) = this.streamProviderRuntime.BindExtension <StreamConsumerExtension, IStreamConsumerExtension>( () => new StreamConsumerExtension(streamProviderRuntime)); var id = new InternalStreamId(providerName, streamId); IPubSubRendezvousGrain pubsub = GrainFactory.GetGrain <IPubSubRendezvousGrain>(id.ToString()); GuidId subscriptionId = GuidId.GetNewGuidId(); await pubsub.RegisterConsumer(subscriptionId, ((StreamImpl <int>)State.Stream).InternalStreamId, myExtensionReference, null); myExtension.SetObserver(subscriptionId, ((StreamImpl <int>)State.Stream), observer, null, null, null); }
internal static async Task CheckPubSubCounts(IInternalClusterClient client, ITestOutputHelper output, string when, int expectedPublisherCount, int expectedConsumerCount, Guid streamIdGuid, string streamProviderName, string streamNamespace) { var pubSub = GetStreamPubSub(client); var streamId = new InternalStreamId(streamProviderName, StreamId.Create(streamNamespace, streamIdGuid)); var totalWait = TimeSpan.Zero; int consumerCount; while ((consumerCount = await pubSub.ConsumerCount(streamId)) != expectedConsumerCount) { await Task.Delay(1000); totalWait += TimeSpan.FromMilliseconds(1000); if (totalWait > TimeSpan.FromMilliseconds(5000)) { break; } } Assert_AreEqual(output, expectedConsumerCount, consumerCount, "{0} - ConsumerCount for stream {1} = {2}", when, streamId, consumerCount); int publisherCount; totalWait = TimeSpan.Zero; while ((publisherCount = await pubSub.ProducerCount(streamId)) != expectedPublisherCount) { await Task.Delay(1000); totalWait += TimeSpan.FromMilliseconds(1000); if (totalWait > TimeSpan.FromMilliseconds(5000)) { break; } } Assert_AreEqual(output, expectedPublisherCount, publisherCount, "{0} - PublisherCount for stream {1} = {2}", when, streamId, publisherCount); }
public virtual async Task OnlyEvenItems() { EnsureStreamFilterIsRegistered(); const int numberOfEvents = 10; var streamId = StreamId.Create("OnlyEvenItems", "my-stream"); var grain = this.clusterClient.GetGrain <IStreamingHistoryGrain>("OnlyEvenItems"); try { await grain.BecomeConsumer(streamId, ProviderName, "even"); var stream = this.clusterClient.GetStreamProvider(ProviderName).GetStream <int>(streamId); for (var i = 0; i < numberOfEvents; i++) { await stream.OnNextAsync(i); } await Task.Delay(WaitTime); var history = await grain.GetReceivedItems(); var idx = 0; for (var i = 0; i < numberOfEvents; i++) { if (i % 2 == 0) { Assert.Equal(i, history[idx]); idx++; } } } finally { await grain.StopBeingConsumer(); } }
private int RunGoldenPath(PooledQueueCache cache, CachedMessageConverter converter, int startOfCache) { int sequenceNumber = startOfCache; IBatchContainer batch; var stream1 = StreamId.Create(TestStreamNamespace, Guid.NewGuid()); var stream2 = StreamId.Create(TestStreamNamespace, Guid.NewGuid()); // now add messages into cache newer than cursor // Adding enough to fill the pool List <TestQueueMessage> messages = Enumerable.Range(0, MessagesPerBuffer * PooledBufferCount) .Select(i => new TestQueueMessage { StreamId = i % 2 == 0 ? stream1 : stream2, SequenceNumber = sequenceNumber + i }) .ToList(); DateTime utcNow = DateTime.UtcNow; List <CachedMessage> cachedMessages = messages .Select(m => converter.ToCachedMessage(m, utcNow)) .ToList(); cache.Add(cachedMessages, utcNow); sequenceNumber += MessagesPerBuffer * PooledBufferCount; // get cursor for stream1, walk all the events in the stream using the cursor object stream1Cursor = cache.GetCursor(stream1, new EventSequenceTokenV2(startOfCache)); int stream1EventCount = 0; while (cache.TryGetNextMessage(stream1Cursor, out batch)) { Assert.NotNull(stream1Cursor); Assert.NotNull(batch); Assert.Equal(stream1, batch.StreamId); Assert.NotNull(batch.SequenceToken); stream1EventCount++; } Assert.Equal((sequenceNumber - startOfCache) / 2, stream1EventCount); // get cursor for stream2, walk all the events in the stream using the cursor object stream2Cursor = cache.GetCursor(stream2, new EventSequenceTokenV2(startOfCache)); int stream2EventCount = 0; while (cache.TryGetNextMessage(stream2Cursor, out batch)) { Assert.NotNull(stream2Cursor); Assert.NotNull(batch); Assert.Equal(stream2, batch.StreamId); Assert.NotNull(batch.SequenceToken); stream2EventCount++; } Assert.Equal((sequenceNumber - startOfCache) / 2, stream2EventCount); // Add a blocks worth of events to the cache, then walk each cursor. Do this enough times to fill the cache twice. for (int j = 0; j < PooledBufferCount * 2; j++) { List <TestQueueMessage> moreMessages = Enumerable.Range(0, MessagesPerBuffer) .Select(i => new TestQueueMessage { StreamId = i % 2 == 0 ? stream1 : stream2, SequenceNumber = sequenceNumber + i }) .ToList(); utcNow = DateTime.UtcNow; List <CachedMessage> moreCachedMessages = moreMessages .Select(m => converter.ToCachedMessage(m, utcNow)) .ToList(); cache.Add(moreCachedMessages, utcNow); sequenceNumber += MessagesPerBuffer; // walk all the events in the stream using the cursor while (cache.TryGetNextMessage(stream1Cursor, out batch)) { Assert.NotNull(stream1Cursor); Assert.NotNull(batch); Assert.Equal(stream1, batch.StreamId); Assert.NotNull(batch.SequenceToken); stream1EventCount++; } Assert.Equal((sequenceNumber - startOfCache) / 2, stream1EventCount); // walk all the events in the stream using the cursor while (cache.TryGetNextMessage(stream2Cursor, out batch)) { Assert.NotNull(stream2Cursor); Assert.NotNull(batch); Assert.Equal(stream2, batch.StreamId); Assert.NotNull(batch.SequenceToken); stream2EventCount++; } Assert.Equal((sequenceNumber - startOfCache) / 2, stream2EventCount); } return(sequenceNumber); }
public static Task RemoveConsumer(this IStreamLifecycleConsumerGrain grain, Guid streamIdGuid, string streamNamespace, string providerName, StreamSubscriptionHandle <int> consumerHandle) { var streamId = StreamId.Create(streamNamespace, streamIdGuid); return(grain.RemoveConsumer(streamId, providerName, consumerHandle)); }
private async Task SendAndReceiveFromQueueAdapter(IQueueAdapterFactory adapterFactory) { IQueueAdapter adapter = await adapterFactory.CreateAdapter(); IQueueAdapterCache cache = adapterFactory.GetQueueAdapterCache(); // Create receiver per queue IStreamQueueMapper mapper = adapterFactory.GetStreamQueueMapper(); Dictionary <QueueId, IQueueAdapterReceiver> receivers = mapper.GetAllQueues().ToDictionary(queueId => queueId, adapter.CreateReceiver); Dictionary <QueueId, IQueueCache> caches = mapper.GetAllQueues().ToDictionary(queueId => queueId, cache.CreateQueueCache); await Task.WhenAll(receivers.Values.Select(receiver => receiver.Initialize(TimeSpan.FromSeconds(5)))); // test using 2 streams Guid streamId1 = Guid.NewGuid(); Guid streamId2 = Guid.NewGuid(); int receivedBatches = 0; var streamsPerQueue = new ConcurrentDictionary <QueueId, HashSet <StreamId> >(); // reader threads (at most 2 active queues because only two streams) var work = new List <Task>(); foreach (KeyValuePair <QueueId, IQueueAdapterReceiver> receiverKvp in receivers) { QueueId queueId = receiverKvp.Key; var receiver = receiverKvp.Value; var qCache = caches[queueId]; Task task = Task.Factory.StartNew(() => { while (receivedBatches < NumBatches) { var messages = receiver.GetQueueMessagesAsync(SQSStorage.MAX_NUMBER_OF_MESSAGE_TO_PEAK).Result.ToArray(); if (!messages.Any()) { continue; } foreach (var message in messages.Cast <SQSBatchContainer>()) { streamsPerQueue.AddOrUpdate(queueId, id => new HashSet <StreamId> { message.StreamId }, (id, set) => { set.Add(message.StreamId); return(set); }); output.WriteLine("Queue {0} received message on stream {1}", queueId, message.StreamId); Assert.Equal(NumMessagesPerBatch / 2, message.GetEvents <int>().Count()); // "Half the events were ints" Assert.Equal(NumMessagesPerBatch / 2, message.GetEvents <string>().Count()); // "Half the events were strings" } Interlocked.Add(ref receivedBatches, messages.Length); qCache.AddToCache(messages); } }); work.Add(task); } // send events List <object> events = CreateEvents(NumMessagesPerBatch); work.Add(Task.Factory.StartNew(() => Enumerable.Range(0, NumBatches) .Select(i => i % 2 == 0 ? streamId1 : streamId2) .ToList() .ForEach(streamId => adapter.QueueMessageBatchAsync(StreamId.Create(streamId.ToString(), streamId), events.Take(NumMessagesPerBatch).ToArray(), null, RequestContextExtensions.Export(this.fixture.SerializationManager)).Wait()))); await Task.WhenAll(work); // Make sure we got back everything we sent Assert.Equal(NumBatches, receivedBatches); // check to see if all the events are in the cache and we can enumerate through them StreamSequenceToken firstInCache = new EventSequenceTokenV2(0); foreach (KeyValuePair <QueueId, HashSet <StreamId> > kvp in streamsPerQueue) { var receiver = receivers[kvp.Key]; var qCache = caches[kvp.Key]; foreach (StreamId streamGuid in kvp.Value) { // read all messages in cache for stream IQueueCacheCursor cursor = qCache.GetCacheCursor(streamGuid, firstInCache); int messageCount = 0; StreamSequenceToken tenthInCache = null; StreamSequenceToken lastToken = firstInCache; while (cursor.MoveNext()) { Exception ex; messageCount++; IBatchContainer batch = cursor.GetCurrent(out ex); output.WriteLine("Token: {0}", batch.SequenceToken); Assert.True(batch.SequenceToken.CompareTo(lastToken) >= 0, $"order check for event {messageCount}"); lastToken = batch.SequenceToken; if (messageCount == 10) { tenthInCache = batch.SequenceToken; } } output.WriteLine("On Queue {0} we received a total of {1} message on stream {2}", kvp.Key, messageCount, streamGuid); Assert.Equal(NumBatches / 2, messageCount); Assert.NotNull(tenthInCache); // read all messages from the 10th cursor = qCache.GetCacheCursor(streamGuid, tenthInCache); messageCount = 0; while (cursor.MoveNext()) { messageCount++; } output.WriteLine("On Queue {0} we received a total of {1} message on stream {2}", kvp.Key, messageCount, streamGuid); const int expected = NumBatches / 2 - 10 + 1; // all except the first 10, including the 10th (10 + 1) Assert.Equal(expected, messageCount); } } }
public static Task BecomeConsumer(this IFilteredStreamConsumerGrain grain, Guid streamIdGuid, string streamNamespace, string providerName, bool sendEvensOnly) { var streamId = StreamId.Create(streamNamespace, streamIdGuid); return(grain.BecomeConsumer(streamId, providerName, sendEvensOnly)); }
public static Task SubscribeWithBadFunc(this IFilteredStreamConsumerGrain grain, Guid streamIdGuid, string streamNamespace, string providerName) { var streamId = StreamId.Create(streamNamespace, streamIdGuid); return(grain.SubscribeWithBadFunc(streamId, providerName)); }
public static Task BecomeProducer(this IStreamLifecycleProducerGrain grain, Guid streamIdGuid, string streamNamespace, string providerName) { var streamId = StreamId.Create(streamNamespace, streamIdGuid); return(grain.BecomeProducer(streamId, providerName)); }
private void AvoidCacheMiss(bool emptyCache) { var bufferPool = new ObjectPool <FixedSizeBuffer>(() => new FixedSizeBuffer(PooledBufferSize)); var dataAdapter = new TestCacheDataAdapter(); var cache = new PooledQueueCache(dataAdapter, NullLogger.Instance, null, null, TimeSpan.FromSeconds(30)); var evictionStrategy = new ChronologicalEvictionStrategy(NullLogger.Instance, new TimePurgePredicate(TimeSpan.FromSeconds(1), TimeSpan.FromSeconds(1)), null, null); evictionStrategy.PurgeObservable = cache; var converter = new CachedMessageConverter(bufferPool, evictionStrategy); var seqNumber = 123; var stream = StreamId.Create(TestStreamNamespace, Guid.NewGuid()); // Enqueue a message for stream var firstSequenceNumber = EnqueueMessage(stream); // Consume first event var cursor = cache.GetCursor(stream, new EventSequenceTokenV2(firstSequenceNumber)); Assert.True(cache.TryGetNextMessage(cursor, out var firstContainer)); Assert.Equal(stream, firstContainer.StreamId); Assert.Equal(firstSequenceNumber, firstContainer.SequenceToken.SequenceNumber); // Remove first message, that was consumed cache.RemoveOldestMessage(); if (!emptyCache) { // Enqueue something not related to the stream // so the cache isn't empty EnqueueMessage(StreamId.Create(TestStreamNamespace, Guid.NewGuid())); EnqueueMessage(StreamId.Create(TestStreamNamespace, Guid.NewGuid())); EnqueueMessage(StreamId.Create(TestStreamNamespace, Guid.NewGuid())); EnqueueMessage(StreamId.Create(TestStreamNamespace, Guid.NewGuid())); EnqueueMessage(StreamId.Create(TestStreamNamespace, Guid.NewGuid())); EnqueueMessage(StreamId.Create(TestStreamNamespace, Guid.NewGuid())); } // Enqueue another message for stream var lastSequenceNumber = EnqueueMessage(stream); // Should be able to consume the event just pushed Assert.True(cache.TryGetNextMessage(cursor, out var lastContainer)); Assert.Equal(stream, lastContainer.StreamId); Assert.Equal(lastSequenceNumber, lastContainer.SequenceToken.SequenceNumber); long EnqueueMessage(StreamId streamId) { var now = DateTime.UtcNow; var msg = new TestQueueMessage { StreamId = streamId, SequenceNumber = seqNumber, }; cache.Add(new List <CachedMessage>() { converter.ToCachedMessage(msg, now) }, now); seqNumber++; return(msg.SequenceNumber); } }
private async Task <double> TestOneStream(Guid streamId, string streamProviderName, int numProducers, int numConsumers, int numMessages, bool useFanOut = true) { output.WriteLine("Testing Stream {0} with Producers={1} Consumers={2} x {3} messages", streamId, numProducers, numConsumers, numMessages); Stopwatch sw = Stopwatch.StartNew(); List <IStreamLifecycleConsumerGrain> consumers = new List <IStreamLifecycleConsumerGrain>(); List <IStreamLifecycleProducerGrain> producers = new List <IStreamLifecycleProducerGrain>(); await InitializeTopology(streamId, this.StreamNamespace, streamProviderName, numProducers, numConsumers, producers, consumers, useFanOut); var promises = new List <Task>(); // Producers send M message each int item = 1; AsyncPipeline pipeline = new AsyncPipeline(MessagePipelineSize); foreach (var grain in producers) { for (int m = 0; m < numMessages; m++) { Task promise = grain.SendItem(item++); if (useFanOut) { pipeline.Add(promise); promises.Add(promise); } else { await promise; } } } if (useFanOut) { //output.WriteLine("Test: Waiting for {0} producers to finish sending {1} messages", producers.Count, promises.Count); await Task.WhenAll(promises); promises.Clear(); } var pubSub = StreamTestUtils.GetStreamPubSub(this.InternalClient); // Check Consumer counts var streamId1 = new InternalStreamId(streamProviderName, StreamId.Create(StreamNamespace, streamId)); int consumerCount = await pubSub.ConsumerCount(streamId1); Assert.Equal(numConsumers, consumerCount); // "ConsumerCount for Stream {0}", streamId // Check Producer counts int producerCount = await pubSub.ProducerCount(streamId1); Assert.Equal(numProducers, producerCount); // "ProducerCount for Stream {0}", streamId // Check message counts received by consumers int totalMessages = (numMessages + 1) * numProducers; foreach (var grain in consumers) { int count = await grain.GetReceivedCount(); Assert.Equal(totalMessages, count); // "ReceivedCount for Consumer grain {0}", grain.GetPrimaryKey()); } double rps = totalMessages / sw.Elapsed.TotalSeconds; //output.WriteLine("Sent {0} messages total from {1} Producers to {2} Consumers in {3} at {4} RPS", // totalMessages, numProducers, numConsumers, // sw.Elapsed, rps); return(rps); }
private Task Test_Stream_Churn_NumStreams_FewPublishers( string streamProviderName, int pipelineSize, int numStreams, int numConsumers = 9, int numProducers = 4, bool warmUpPubSub = true, bool warmUpProducers = false, bool normalSubscribeCalls = true) { output.WriteLine("Testing churn with {0} Streams on {1} Producers with {2} Consumers per Stream", numStreams, numProducers, numConsumers); AsyncPipeline pipeline = new AsyncPipeline(pipelineSize); // Create streamId Guids StreamId[] streamIds = new StreamId[numStreams]; for (int i = 0; i < numStreams; i++) { streamIds[i] = StreamId.Create(this.StreamNamespace, Guid.NewGuid()); } int activeConsumerGrains = ActiveGrainCount(typeof(StreamLifecycleConsumerGrain).FullName); Assert.Equal(0, activeConsumerGrains); // "Initial Consumer count should be zero" int activeProducerGrains = ActiveGrainCount(typeof(StreamLifecycleProducerGrain).FullName); Assert.Equal(0, activeProducerGrains); // "Initial Producer count should be zero" if (warmUpPubSub) { WarmUpPubSub(streamProviderName, streamIds, pipeline); pipeline.Wait(); int activePubSubGrains = ActiveGrainCount(typeof(PubSubRendezvousGrain).FullName); Assert.Equal(streamIds.Length, activePubSubGrains); // "Initial PubSub count -- should all be warmed up" } Guid[] producerIds = new Guid[numProducers]; if (numProducers > 0 && warmUpProducers) { // Warm up Producers to pre-create grains for (int i = 0; i < numProducers; i++) { producerIds[i] = Guid.NewGuid(); var grain = this.GrainFactory.GetGrain <IStreamLifecycleProducerGrain>(producerIds[i]); Task promise = grain.Ping(); pipeline.Add(promise); } pipeline.Wait(); int activePublisherGrains = this.ActiveGrainCount(typeof(StreamLifecycleProducerGrain).FullName); Assert.Equal(numProducers, activePublisherGrains); // "Initial Publisher count -- should all be warmed up" } var promises = new List <Task>(); Stopwatch sw = Stopwatch.StartNew(); if (numProducers > 0) { // Producers for (int i = 0; i < numStreams; i++) { StreamId streamId = streamIds[i]; Guid producerId = producerIds[i % numProducers]; var grain = this.GrainFactory.GetGrain <IStreamLifecycleProducerGrain>(producerId); Task promise = grain.BecomeProducer(streamId, streamProviderName); promises.Add(promise); pipeline.Add(promise); } pipeline.Wait(); promises.Clear(); } // Consumers for (int i = 0; i < numStreams; i++) { StreamId streamId = streamIds[i]; Task promise = SetupOneStream(streamId, streamProviderName, pipeline, numConsumers, 0, normalSubscribeCalls); promises.Add(promise); } pipeline.Wait(); Task.WhenAll(promises).Wait(); sw.Stop(); int consumerCount = ActiveGrainCount(typeof(StreamLifecycleConsumerGrain).FullName); Assert.Equal(activeConsumerGrains + (numStreams * numConsumers), consumerCount); // "The right number of Consumer grains are active" int producerCount = ActiveGrainCount(typeof(StreamLifecycleProducerGrain).FullName); Assert.Equal(activeProducerGrains + (numStreams * numProducers), producerCount); // "The right number of Producer grains are active" int pubSubCount = ActiveGrainCount(typeof(PubSubRendezvousGrain).FullName); Assert.Equal(streamIds.Length, pubSubCount); // "Final PubSub count -- no more started" TimeSpan elapsed = sw.Elapsed; int totalSubscriptions = numStreams * numConsumers; double rps = totalSubscriptions / elapsed.TotalSeconds; output.WriteLine("Subscriptions-per-second = {0} during period {1}", rps, elapsed); Assert.NotEqual(0.0, rps); // "RPS greater than zero" return(Task.CompletedTask); }