private async Task LocalObjectMessagePumpAsync(LocalObjectData objectData) { while (true) { try { Message message; lock (objectData.Messages) { if (objectData.Messages.Count == 0) { objectData.Running = false; break; } message = objectData.Messages.Dequeue(); } if (message.IsExpired) { this.messagingTrace.OnDropExpiredMessage(message, MessagingStatisticsGroup.Phase.Invoke); continue; } RequestContextExtensions.Import(message.RequestContextData); InvokeMethodRequest request = null; try { request = (InvokeMethodRequest)message.BodyObject; } catch (Exception deserializationException) { if (this.logger.IsEnabled(LogLevel.Warning)) { this.logger.LogWarning( "Exception during message body deserialization in " + nameof(LocalObjectMessagePumpAsync) + " for message: {Message}, Exception: {Exception}", message, deserializationException); } this.runtimeClient.SendResponse(message, Response.ExceptionResponse(deserializationException)); continue; } var targetOb = (IAddressable)objectData.LocalObject.Target; object resultObject = null; Exception caught = null; try { // exceptions thrown within this scope are not considered to be thrown from user code // and not from runtime code. var resultPromise = objectData.Invoker.Invoke(targetOb, request); if (resultPromise != null) // it will be null for one way messages { resultObject = await resultPromise; } } catch (Exception exc) { // the exception needs to be reported in the log or propagated back to the caller. caught = exc; } if (caught != null) { this.ReportException(message, caught); } else if (message.Direction != Message.Directions.OneWay) { this.SendResponseAsync(message, resultObject); } } catch (Exception outerException) { // ignore, keep looping. this.logger.LogWarning("Exception in " + nameof(LocalObjectMessagePumpAsync) + ": {Exception}", outerException); } finally { RequestContext.Clear(); } } }
public Task OnNextBatchAsync(IEnumerable <T> batch, StreamSequenceToken token) { return(this.queueAdapter.QueueMessageBatchAsync(this.stream.StreamId.Guid, this.stream.StreamId.Namespace, batch, token, RequestContextExtensions.Export(this.serializationManager))); }
private async Task LocalObjectMessagePumpAsync() { while (true) { try { Message message; lock (this.Messages) { if (this.Messages.Count == 0) { this.Running = false; break; } message = this.Messages.Dequeue(); } if (message.IsExpired) { _manager.messagingTrace.OnDropExpiredMessage(message, MessagingStatisticsGroup.Phase.Invoke); continue; } RequestContextExtensions.Import(message.RequestContextData); IInvokable request = null; try { request = (IInvokable)message.BodyObject; } catch (Exception deserializationException) { if (_manager.logger.IsEnabled(LogLevel.Warning)) { _manager.logger.LogWarning( deserializationException, "Exception during message body deserialization in " + nameof(LocalObjectMessagePumpAsync) + " for message: {Message}", message); } _manager.runtimeClient.SendResponse(message, Response.FromException(deserializationException)); continue; } try { request.SetTarget(this); var response = await request.Invoke(); if (message.Direction != Message.Directions.OneWay) { this.SendResponseAsync(message, response); } } catch (Exception exc) { this.ReportException(message, exc); } } catch (Exception outerException) { // ignore, keep looping. _manager.logger.LogWarning( outerException, "Exception in " + nameof(LocalObjectMessagePumpAsync)); } finally { RequestContext.Clear(); } } }
private async Task SendAndReceiveFromQueueAdapter(IQueueAdapterFactory adapterFactory, IProviderConfiguration config) { IQueueAdapter adapter = await adapterFactory.CreateAdapter(); IQueueAdapterCache cache = adapterFactory.GetQueueAdapterCache(); // Create receiver per queue IStreamQueueMapper mapper = adapterFactory.GetStreamQueueMapper(); Dictionary <QueueId, IQueueAdapterReceiver> receivers = mapper.GetAllQueues().ToDictionary(queueId => queueId, adapter.CreateReceiver); Dictionary <QueueId, IQueueCache> caches = mapper.GetAllQueues().ToDictionary(queueId => queueId, cache.CreateQueueCache); await Task.WhenAll(receivers.Values.Select(receiver => receiver.Initialize(TimeSpan.FromSeconds(5)))); // test using 2 streams Guid streamId1 = Guid.NewGuid(); Guid streamId2 = Guid.NewGuid(); int receivedBatches = 0; var streamsPerQueue = new ConcurrentDictionary <QueueId, HashSet <IStreamIdentity> >(); // reader threads (at most 2 active queues because only two streams) var work = new List <Task>(); foreach (KeyValuePair <QueueId, IQueueAdapterReceiver> receiverKvp in receivers) { QueueId queueId = receiverKvp.Key; var receiver = receiverKvp.Value; var qCache = caches[queueId]; Task task = Task.Factory.StartNew(() => { while (receivedBatches < NumBatches) { var messages = receiver.GetQueueMessagesAsync(SQSStorage.MAX_NUMBER_OF_MESSAGE_TO_PEAK).Result.ToArray(); if (!messages.Any()) { continue; } foreach (var message in messages.Cast <SQSBatchContainer>()) { streamsPerQueue.AddOrUpdate(queueId, id => new HashSet <IStreamIdentity> { new StreamIdentity(message.StreamGuid, message.StreamGuid.ToString()) }, (id, set) => { set.Add(new StreamIdentity(message.StreamGuid, message.StreamGuid.ToString())); return(set); }); output.WriteLine("Queue {0} received message on stream {1}", queueId, message.StreamGuid); Assert.Equal(NumMessagesPerBatch / 2, message.GetEvents <int>().Count()); // "Half the events were ints" Assert.Equal(NumMessagesPerBatch / 2, message.GetEvents <string>().Count()); // "Half the events were strings" } Interlocked.Add(ref receivedBatches, messages.Length); qCache.AddToCache(messages); } }); work.Add(task); } // send events List <object> events = CreateEvents(NumMessagesPerBatch); work.Add(Task.Factory.StartNew(() => Enumerable.Range(0, NumBatches) .Select(i => i % 2 == 0 ? streamId1 : streamId2) .ToList() .ForEach(streamId => adapter.QueueMessageBatchAsync(streamId, streamId.ToString(), events.Take(NumMessagesPerBatch).ToArray(), null, RequestContextExtensions.Export(this.fixture.SerializationManager)).Wait()))); await Task.WhenAll(work); // Make sure we got back everything we sent Assert.Equal(NumBatches, receivedBatches); // check to see if all the events are in the cache and we can enumerate through them StreamSequenceToken firstInCache = new EventSequenceTokenV2(0); foreach (KeyValuePair <QueueId, HashSet <IStreamIdentity> > kvp in streamsPerQueue) { var receiver = receivers[kvp.Key]; var qCache = caches[kvp.Key]; foreach (IStreamIdentity streamGuid in kvp.Value) { // read all messages in cache for stream IQueueCacheCursor cursor = qCache.GetCacheCursor(streamGuid, firstInCache); int messageCount = 0; StreamSequenceToken tenthInCache = null; StreamSequenceToken lastToken = firstInCache; while (cursor.MoveNext()) { Exception ex; messageCount++; IBatchContainer batch = cursor.GetCurrent(out ex); output.WriteLine("Token: {0}", batch.SequenceToken); Assert.True(batch.SequenceToken.CompareTo(lastToken) >= 0, $"order check for event {messageCount}"); lastToken = batch.SequenceToken; if (messageCount == 10) { tenthInCache = batch.SequenceToken; } } output.WriteLine("On Queue {0} we received a total of {1} message on stream {2}", kvp.Key, messageCount, streamGuid); Assert.Equal(NumBatches / 2, messageCount); Assert.NotNull(tenthInCache); // read all messages from the 10th cursor = qCache.GetCacheCursor(streamGuid, tenthInCache); messageCount = 0; while (cursor.MoveNext()) { messageCount++; } output.WriteLine("On Queue {0} we received a total of {1} message on stream {2}", kvp.Key, messageCount, streamGuid); const int expected = NumBatches / 2 - 10 + 1; // all except the first 10, including the 10th (10 + 1) Assert.Equal(expected, messageCount); } } }
public Task OnNextAsync(T item, StreamSequenceToken token) { return(this.queueAdapter.QueueMessageAsync(this.stream.StreamId.Guid, this.stream.StreamId.Namespace, item, token, RequestContextExtensions.Export(this.serializationManager))); }
public void RequestContext_ActivityId_ExportToMessage() { Guid activityId = Guid.NewGuid(); Guid activityId2 = Guid.NewGuid(); Guid nullActivityId = Guid.Empty; Message msg = new Message(); msg.RequestContextData = RequestContextExtensions.Export(this.fixture.SerializationManager); if (msg.RequestContextData != null) { foreach (var kvp in msg.RequestContextData) { headers.Add(kvp.Key, kvp.Value); } } ; Assert.False(headers.ContainsKey(RequestContext.E2_E_TRACING_ACTIVITY_ID_HEADER), "ActivityId should not be be present " + headers.ToStrings(separator: ",")); TestCleanup(); RequestContextTestUtils.SetActivityId(activityId); msg = new Message(); msg.RequestContextData = RequestContextExtensions.Export(this.fixture.SerializationManager); if (msg.RequestContextData != null) { foreach (var kvp in msg.RequestContextData) { headers.Add(kvp.Key, kvp.Value); } } ; Assert.True(headers.ContainsKey(RequestContext.E2_E_TRACING_ACTIVITY_ID_HEADER), "ActivityId #1 should be present " + headers.ToStrings(separator: ",")); object result = headers[RequestContext.E2_E_TRACING_ACTIVITY_ID_HEADER]; Assert.NotNull(result); // ActivityId #1 should not be null Assert.Equal(activityId, result); // "E2E ActivityId #1 not propagated correctly" Assert.Equal(activityId, RequestContextTestUtils.GetActivityId()); // "Original E2E ActivityId #1 should not have changed" TestCleanup(); RequestContextTestUtils.SetActivityId(nullActivityId); msg = new Message(); msg.RequestContextData = RequestContextExtensions.Export(this.fixture.SerializationManager); if (msg.RequestContextData != null) { foreach (var kvp in msg.RequestContextData) { headers.Add(kvp.Key, kvp.Value); } } ; Assert.False(headers.ContainsKey(RequestContext.E2_E_TRACING_ACTIVITY_ID_HEADER), "Null ActivityId should not be present " + headers.ToStrings(separator: ",")); TestCleanup(); RequestContextTestUtils.SetActivityId(activityId2); msg = new Message(); msg.RequestContextData = RequestContextExtensions.Export(this.fixture.SerializationManager); foreach (var kvp in msg.RequestContextData) { headers.Add(kvp.Key, kvp.Value); } ; Assert.True(headers.ContainsKey(RequestContext.E2_E_TRACING_ACTIVITY_ID_HEADER), "ActivityId #2 should be present " + headers.ToStrings(separator: ",")); result = headers[RequestContext.E2_E_TRACING_ACTIVITY_ID_HEADER]; Assert.NotNull(result); // ActivityId #2 should not be null Assert.Equal(activityId2, result); // "E2E ActivityId #2 not propagated correctly" Assert.Equal(activityId2, RequestContextTestUtils.GetActivityId()); // "Original E2E ActivityId #2 should not have changed" TestCleanup(); }
public Task OnNextBatchAsync(IEnumerable <T> batch, StreamSequenceToken token) { return(this.queueAdapter.QueueMessageBatchAsync(this.stream.StreamId, batch, token, RequestContextExtensions.Export(this.deepCopier))); }
public Task OnNextAsync(T item, StreamSequenceToken token) { return(this.queueAdapter.QueueMessageAsync(this.stream.StreamId, item, token, RequestContextExtensions.Export(this.deepCopier))); }
/// <inheritdoc /> public bool TryReadEvents(int maxCount, out IEnumerable <EventData> events) { if (!this.ShouldProduce) { events = null; return(false); } int count = maxCount; List <EventData> eventDataList = new List <EventData>(); while (count-- > 0) { this.SequenceNumberCounter.Increment(); var eventData = EventHubBatchContainer.ToEventData <int>(this.serializationManager, this.StreamId.Guid, this.StreamId.Namespace, this.GenerateEvent(this.SequenceNumberCounter.Value), RequestContextExtensions.Export(this.serializationManager)); //set partition key eventData.SetPartitionKey(this.StreamId.Guid.ToString()); //set offset DateTime now = DateTime.UtcNow; var offSet = this.StreamId.Guid.ToString() + now.ToString(); eventData.SetOffset(offSet); //set sequence number eventData.SetSequenceNumber(this.SequenceNumberCounter.Value); //set enqueue time eventData.SetEnqueuedTimeUtc(now); eventDataList.Add(eventData); this.logger.Info($"Generate data of SequemceNumber {SequenceNumberCounter.Value} for stream {this.StreamId.Namespace}-{this.StreamId.Guid}"); } events = eventDataList; return(eventDataList.Count > 0); }
private async Task <StreamSubscriptionHandle <T> > SubscribeAsyncImpl( IAsyncObserver <T> observer, IAsyncBatchObserver <T> batchObserver, StreamSequenceToken token, string filterData = null) { if (token != null && !IsRewindable) { throw new ArgumentNullException("token", "Passing a non-null token to a non-rewindable IAsyncObservable."); } if (observer is GrainReference) { throw new ArgumentException("On-behalf subscription via grain references is not supported. Only passing of object references is allowed.", nameof(observer)); } if (batchObserver is GrainReference) { throw new ArgumentException("On-behalf subscription via grain references is not supported. Only passing of object references is allowed.", nameof(batchObserver)); } _ = RequestContextExtensions.SuppressCurrentCallChainFlow(); if (logger.IsEnabled(LogLevel.Debug)) { logger.Debug("Subscribe Token={Token}", token); } await BindExtensionLazy(); if (logger.IsEnabled(LogLevel.Debug)) { logger.Debug("Subscribe - Connecting to Rendezvous {0} My GrainRef={1} Token={2}", pubSub, myGrainReference, token); } GuidId subscriptionId = pubSub.CreateSubscriptionId(stream.InternalStreamId, myGrainReference); // Optimistic Concurrency: // In general, we should first register the subsription with the pubsub (pubSub.RegisterConsumer) // and only if it succeeds store it locally (myExtension.SetObserver). // Basicaly, those 2 operations should be done as one atomic transaction - either both or none and isolated from concurrent reads. // BUT: there is a distributed race here: the first msg may arrive before the call is awaited // (since the pubsub notifies the producer that may immideately produce) // and will thus not find the subriptionHandle in the extension, basically violating "isolation". // Therefore, we employ Optimistic Concurrency Control here to guarantee isolation: // we optimisticaly store subscriptionId in the handle first before calling pubSub.RegisterConsumer // and undo it in the case of failure. // There is no problem with that we call myExtension.SetObserver too early before the handle is registered in pub sub, // since this subscriptionId is unique (random Guid) and no one knows it anyway, unless successfully subscribed in the pubsub. var subriptionHandle = myExtension.SetObserver(subscriptionId, stream, observer, batchObserver, token, filterData); try { await pubSub.RegisterConsumer(subscriptionId, stream.InternalStreamId, myGrainReference, filterData); return(subriptionHandle); } catch (Exception) { // Undo the previous call myExtension.SetObserver. myExtension.RemoveObserver(subscriptionId); throw; } }