private async Task scheduleIncoming(Envelope[] envelopes) { try { using (var session = _store.LightweightSession()) { foreach (var envelope in envelopes) { session.ScheduleExecution(_tables.Incoming, envelope); } await session.SaveChangesAsync(); Scheduled.Set(); } } catch (Exception e) { _logger.LogException(e); foreach (var envelope in envelopes) { ScheduleExecution(envelope); } } }
private async Task ConsumeAsync(IListeningWorkerQueue callback) { await foreach (Message message in _consumer.Messages(_cancellation)) { Envelope envelope; try { envelope = _protocol.ReadEnvelope(new DotPulsarMessage(message.Data, message.Properties)); } catch (Exception ex) { _logger.LogException(ex, message: $"Error trying to map an incoming Pulsar {_endpoint.Topic} Topic message to an Envelope. See the Dead Letter Queue"); continue; } try { await callback.Received(Address, envelope); await _consumer.Acknowledge(message, _cancellation); } catch (Exception e) { // DotPulsar currently doesn't support Nack so will likely just keep retrying message for now. _logger.LogException(e, envelope.Id, "Error trying to receive a message from " + Address); } } }
public Task SerializationFailure(OutgoingMessageBatch outgoing) { _logger.OutgoingBatchFailed(outgoing); // Can't really happen now, but what the heck. _logger.LogException(new Exception("Serialization failure with outgoing envelopes " + outgoing.Messages.Select(x => x.ToString()).Join(", "))); return(Task.CompletedTask); }
private async Task <int> recoverFrom(Uri destination, IDocumentSession session) { try { var channel = _channels.GetOrBuildChannel(destination); if (channel.Latched) { return(0); } var outgoing = await session.Connection.CreateCommand(_findOutgoingEnvelopesSql) .With("destination", destination.ToString(), NpgsqlDbType.Varchar) .ExecuteToEnvelopes(); var filtered = filterExpired(session, outgoing); // Might easily try to do this in the time between starting // and having the data fetched. Was able to make that happen in // (contrived) testing if (channel.Latched || !filtered.Any()) { return(0); } session.MarkOwnership(_marker.Outgoing, _marker.CurrentNodeId, filtered); await session.SaveChangesAsync(); _logger.RecoveredOutgoing(filtered); foreach (var envelope in filtered) { try { await channel.QuickSend(envelope); } catch (Exception e) { _logger.LogException(e, message: $"Unable to enqueue {envelope} for sending"); } } return(outgoing.Count()); } catch (UnknownTransportException e) { _logger.LogException(e, message: $"Could not resolve a channel for {destination}"); await DeleteFromOutgoingEnvelopes(session, TransportConstants.AnyNode, destination); await session.SaveChangesAsync(); return(0); } }
public async Task MarkComplete() { try { await _persistor.DeleteIncomingEnvelope(_envelope); } catch (Exception e) { _logger.LogException(e); _retries.DeleteIncoming(_envelope); } }
public void Start(ISenderCallback callback) { _callback = callback; _serialization = new ActionBlock <Envelope>(e => { try { e.EnsureData(); _sending.Post(e); } catch (Exception exception) { _logger.LogException(exception, e.Id, "Serialization Failure!"); } }); // The variance here should be in constructing the sending & buffer blocks if (_endpoint.Uri.TopicName.IsEmpty()) { _sender = _endpoint.TokenProvider != null ? new MessageSender(_endpoint.ConnectionString, _endpoint.Uri.QueueName, _endpoint.TokenProvider, _endpoint.TransportType, _endpoint.RetryPolicy) : new MessageSender(_endpoint.ConnectionString, _endpoint.Uri.QueueName, _endpoint.RetryPolicy); _sending = new ActionBlock <Envelope>(sendBySession, new ExecutionDataflowBlockOptions { CancellationToken = _cancellation }); } else if (_endpoint.Uri.IsMessageSpecificTopic()) { _sending = new ActionBlock <Envelope>(sendByMessageTopicAndSession, new ExecutionDataflowBlockOptions { CancellationToken = _cancellation }); } else { _sender = _endpoint.TokenProvider != null ? new TopicClient(_endpoint.ConnectionString, _endpoint.Uri.TopicName, _endpoint.TokenProvider, _endpoint.TransportType, _endpoint.RetryPolicy) : new TopicClient(_endpoint.ConnectionString, _endpoint.Uri.TopicName, _endpoint.RetryPolicy); _sending = new ActionBlock <Envelope>(sendBySession, new ExecutionDataflowBlockOptions { CancellationToken = _cancellation }); } }
public override async Task Successful(OutgoingMessageBatch outgoing) { try { await _persistor.DeleteOutgoingEnvelopes(outgoing.Messages.ToArray()); } catch (Exception e) { _logger.LogException(e, message: "Error trying to delete outgoing envelopes after a successful batch send"); foreach (var envelope in outgoing.Messages) { _persistenceRetries.DeleteOutgoing(envelope); } } }
public LightweightWorkerQueue(Endpoint endpoint, ITransportLogger logger, IHandlerPipeline pipeline, AdvancedSettings settings) { _logger = logger; _settings = settings; Pipeline = pipeline; _scheduler = new InMemoryScheduledJobProcessor(this); endpoint.ExecutionOptions.CancellationToken = settings.Cancellation; _receiver = new ActionBlock <Envelope>(async envelope => { try { if (envelope.ContentType.IsEmpty()) { envelope.ContentType = "application/json"; } await Pipeline.Invoke(envelope); } catch (Exception e) { // This *should* never happen, but of course it will logger.LogException(e); } }, endpoint.ExecutionOptions); }
public async Task ConnectAndLockCurrentNode(ITransportLogger logger, int nodeId) { if (Connection != null) { try { Connection.Close(); Connection.Dispose(); Connection = null; } catch (Exception e) { logger.LogException(e); } } try { Connection = _settings.CreateConnection(); await Connection.OpenAsync(_cancellation); await _settings.GetGlobalLock(Connection, nodeId, _cancellation, Transaction); } catch (Exception) { Connection?.Dispose(); Connection = null; throw; } }
public DurableWorkerQueue(Endpoint endpoint, IHandlerPipeline pipeline, AdvancedSettings settings, IEnvelopePersistence persistence, ITransportLogger logger) { _settings = settings; _persistence = persistence; _logger = logger; endpoint.ExecutionOptions.CancellationToken = settings.Cancellation; _receiver = new ActionBlock <Envelope>(async envelope => { try { envelope.ContentType = envelope.ContentType ?? "application/json"; await pipeline.Invoke(envelope, this); } catch (Exception e) { // This *should* never happen, but of course it will logger.LogException(e); } }, endpoint.ExecutionOptions); _policy = Policy .Handle <Exception>() .WaitAndRetryForeverAsync(i => (i * 100).Milliseconds() , (e, timeSpan) => { _logger.LogException(e); }); }
public override void HandleBasicDeliver(string consumerTag, ulong deliveryTag, bool redelivered, string exchange, string routingKey, IBasicProperties properties, byte[] body) { if (_latched) { _channel.BasicReject(deliveryTag, true); return; } Envelope envelope; try { envelope = Mapper.ReadEnvelope(body, properties); } catch (Exception e) { _logger.LogException(e, message: "Error trying to map an incoming RabbitMQ message to an Envelope"); _channel.BasicAck(deliveryTag, false); return; } if (envelope.IsPing()) { _channel.BasicAck(deliveryTag, false); return; } // THIS NEEDS TO BE VARIABLE executeEnvelope(deliveryTag, envelope); }
private async Task sendBySession(Envelope envelope) { try { var message = _protocol.WriteFromEnvelope(envelope); message.SessionId = Guid.NewGuid().ToString(); if (envelope.IsDelayed(DateTime.UtcNow)) { await _sender.ScheduleMessageAsync(message, envelope.ExecutionTime.Value); } else { await _sender.SendAsync(message); } await _callback.Successful(envelope); } catch (Exception e) { try { await _callback.ProcessingFailure(envelope, e); } catch (Exception exception) { _logger.LogException(exception); } } }
private static async Task receive(ITransportLogger logger, Stream stream, IListeningWorkerQueue callback, Envelope[] messages, Uri uri) { // Just a ping if (messages.Any() && messages.First().IsPing()) { await stream.SendBuffer(ReceivedBuffer); // We aren't gonna use this in this case var ack = await stream.ReadExpectedBuffer(AcknowledgedBuffer); return; } try { await callback.Received(uri, messages); await stream.SendBuffer(ReceivedBuffer); await stream.ReadExpectedBuffer(AcknowledgedBuffer); } catch (Exception e) { logger.LogException(e); await stream.SendBuffer(ProcessingFailureBuffer); } }
private async Task send(Envelope envelope) { if (State == AgentState.Disconnected) { throw new InvalidOperationException($"The RabbitMQ agent for {Destination} is disconnected"); } try { var props = Channel.CreateBasicProperties(); props.Persistent = _isDurable; _protocol.WriteFromEnvelope(envelope, props); Channel.BasicPublish(_exchangeName, _key, props, envelope.Data); await _callback.Successful(envelope); } catch (Exception e) { try { await _callback.ProcessingFailure(envelope, e); } catch (Exception exception) { _logger.LogException(exception); } } }
private async Task scheduleIncoming(Envelope[] envelopes) { try { await _persistor.ScheduleExecution(envelopes); Scheduled.Set(); } catch (Exception e) { _logger.LogException(e); foreach (var envelope in envelopes) { ScheduleExecution(envelope); } } }
private async Task sendWithCallbackHandling(Envelope envelope) { try { await _sender.Send(envelope); } catch (Exception e) { try { await ProcessingFailure(envelope, e); } catch (Exception exception) { _logger.LogException(exception); } } }
private void batchSendFailed(OutgoingMessageBatch batch, Exception exception) { try { _callback.ProcessingFailure(batch, exception); } catch (Exception e) { _logger.LogException(e); } }
public static async Task Receive(ITransportLogger logger, Stream stream, IListeningWorkerQueue callback, Uri uri) { Envelope[] messages = null; try { var lengthBytes = await stream.ReadBytesAsync(sizeof(int)); var length = BitConverter.ToInt32(lengthBytes, 0); if (length == 0) { return; } var bytes = await stream.ReadBytesAsync(length); messages = Envelope.ReadMany(bytes); } catch (Exception e) { logger.LogException(new MessageFailureException(messages, e)); await stream.SendBuffer(SerializationFailureBuffer); return; } try { await receive(logger, stream, callback, messages, uri); } catch (Exception ex) { logger.LogException(new MessageFailureException(messages, ex)); await stream.SendBuffer(ProcessingFailureBuffer); } }
public DurableSendingAgent(ISender sender, AdvancedSettings settings, ITransportLogger logger, IMessageLogger messageLogger, IEnvelopePersistence persistence, Endpoint endpoint) : base(logger, messageLogger, sender, settings, endpoint) { _logger = logger; _persistence = persistence; _policy = Policy .Handle <Exception>() .WaitAndRetryForeverAsync(i => (i * 100).Milliseconds() , (e, timeSpan) => { _logger.LogException(e, message: "Failed while trying to enqueue a message batch for retries"); }); }
public DurableCallback(Envelope envelope, IWorkerQueue queue, IEnvelopePersistence persistence, ITransportLogger logger) { _envelope = envelope; _queue = queue; _persistence = persistence; _logger = logger; _policy = Policy .Handle <Exception>() .WaitAndRetryForeverAsync(i => (i * 100).Milliseconds() , (e, timeSpan) => { _logger.LogException(e); }); }
public void Start(ISenderCallback callback) { _agent.Start(); _callback = callback; _serialization = new ActionBlock <Envelope>(e => { try { e.EnsureData(); _sending.Post(e); } catch (Exception exception) { _logger.LogException(exception, e.Id, "Serialization Failure!"); } }); _sending = new ActionBlock <Envelope>(send, new ExecutionDataflowBlockOptions { CancellationToken = _cancellation }); }
public DurableRetryAgent(ISender sender, JasperOptions options, ITransportLogger logger, IEnvelopePersistence persistence) : base(sender, options.Retries) { _options = options; _logger = logger; _persistence = persistence; _policy = Policy .Handle <Exception>() .WaitAndRetryForeverAsync(i => (i * 100).Milliseconds() , (e, timeSpan) => { _logger.LogException(e, message: "Failed while trying to enqueue a message batch for retries"); }); }
public DurableSendingAgent(Uri destination, ISender sender, ITransportLogger logger, JasperOptions options, IEnvelopePersistence persistence) : base(destination, sender, logger, options, new DurableRetryAgent(sender, options, logger, persistence)) { _logger = logger; _options = options; _persistence = persistence; _policy = Policy .Handle <Exception>() .WaitAndRetryForeverAsync(i => (i * 100).Milliseconds() , (e, timeSpan) => { _logger.LogException(e); }); }
private async Task ConsumeAsync(IListeningWorkerQueue callback) { while (!_cancellation.IsCancellationRequested) { ConsumeResult <byte[], byte[]> message; try { message = await Task.Run(() => _consumer.Consume(), _cancellation); } catch (Confluent.Kafka.ConsumeException cex) { if (cex.Error.Code == ErrorCode.PolicyViolation) { throw; } continue; } catch (Exception ex) { _logger.LogException(ex, message: $"Error consuming message from Kafka topic {_endpoint.TopicName}"); continue; } Envelope envelope; try { envelope = _protocol.ReadEnvelope(message.Message); } catch (Exception ex) { _logger.LogException(ex, message: $"Error trying to map an incoming Kafka {_endpoint.TopicName} Topic message to an Envelope. See the Dead Letter Queue"); continue; } try { await callback.Received(Address, envelope); _consumer.Commit(message); } catch (Exception e) { // TODO -- Got to either discard this or defer it back to the queue _logger.LogException(e, envelope.Id, "Error trying to receive a message from " + Address); } } }
public override async Task EnqueueForRetry(OutgoingMessageBatch batch) { var expiredInQueue = Queued.Where(x => x.IsExpired()).ToArray(); var expiredInBatch = batch.Messages.Where(x => x.IsExpired()).ToArray(); var expired = expiredInBatch.Concat(expiredInQueue).ToArray(); var all = Queued.Where(x => !expiredInQueue.Contains(x)) .Concat(batch.Messages.Where(x => !expiredInBatch.Contains(x))) .ToList(); var reassigned = new Envelope[0]; if (all.Count > _settings.MaximumEnvelopeRetryStorage) { reassigned = all.Skip(_settings.MaximumEnvelopeRetryStorage).ToArray(); } try { await _persistor.DiscardAndReassignOutgoing(expired, reassigned, TransportConstants.AnyNode); _logger.DiscardedExpired(expired); Queued = all.Take(_settings.MaximumEnvelopeRetryStorage).ToList(); } catch (Exception e) { _logger.LogException(e, message: "Failed while trying to enqueue a message batch for retries"); #pragma warning disable 4014 Task.Delay(100).ContinueWith(async _ => await EnqueueForRetry(batch)); #pragma warning restore 4014 } }
public DurableWorkerQueue(Endpoint endpoint, IHandlerPipeline pipeline, AdvancedSettings settings, IEnvelopePersistence persistence, ITransportLogger logger) { _settings = settings; _persistence = persistence; _logger = logger; endpoint.ExecutionOptions.CancellationToken = settings.Cancellation; _receiver = new ActionBlock <Envelope>(async envelope => { try { envelope.ContentType = envelope.ContentType ?? "application/json"; await pipeline.Invoke(envelope); } catch (Exception e) { // This *should* never happen, but of course it will logger.LogException(e); } }, endpoint.ExecutionOptions); }
public BatchedSender(Uri destination, ISenderProtocol protocol, CancellationToken cancellation, ITransportLogger logger) { Destination = destination; _protocol = protocol; _cancellation = cancellation; _logger = logger; _sender = new ActionBlock <OutgoingMessageBatch>(SendBatch, new ExecutionDataflowBlockOptions { MaxDegreeOfParallelism = 1, CancellationToken = _cancellation, BoundedCapacity = DataflowBlockOptions.Unbounded }); _sender.Completion.ContinueWith(x => { if (x.IsFaulted) { _logger.LogException(x.Exception); } }, _cancellation); _serializing = new ActionBlock <Envelope>(async e => { try { await _batching.SendAsync(e); } catch (Exception ex) { _logger.LogException(ex, message: $"Error while trying to serialize envelope {e}"); } }, new ExecutionDataflowBlockOptions { CancellationToken = _cancellation, BoundedCapacity = DataflowBlockOptions.Unbounded }); _serializing.Completion.ContinueWith(x => { if (x.IsFaulted) { _logger.LogException(x.Exception); } }, _cancellation); _batchWriting = new TransformBlock <Envelope[], OutgoingMessageBatch>( envelopes => { var batch = new OutgoingMessageBatch(Destination, envelopes); _queued += batch.Messages.Count; return(batch); }, new ExecutionDataflowBlockOptions { BoundedCapacity = DataflowBlockOptions.Unbounded, MaxDegreeOfParallelism = 10, CancellationToken = _cancellation }); _batchWriting.Completion.ContinueWith(x => { if (x.IsFaulted) { _logger.LogException(x.Exception); } }, _cancellation); _batchWriting.LinkTo(_sender); _batching = new BatchingBlock <Envelope>(200, _batchWriting, _cancellation); _batching.Completion.ContinueWith(x => { if (x.IsFaulted) { _logger.LogException(x.Exception); } }, _cancellation); }
private async Task <int> recoverFrom(Uri destination, IDurabilityAgentStorage storage) { try { Envelope[] filtered = null; Envelope[] outgoing = null; if (_runtime.GetOrBuildSendingAgent(destination).Latched) { return(0); } await storage.Session.Begin(); try { outgoing = await storage.Outgoing.Load(destination : destination); var expiredMessages = outgoing.Where(x => x.IsExpired()).ToArray(); _logger.DiscardedExpired(expiredMessages); await storage.Outgoing.Delete(expiredMessages.ToArray()); filtered = outgoing.Where(x => !expiredMessages.Contains(x)).ToArray(); // Might easily try to do this in the time between starting // and having the data fetched. Was able to make that happen in // (contrived) testing if (_runtime.GetOrBuildSendingAgent(destination).Latched || !filtered.Any()) { await storage.Session.Rollback(); return(0); } await storage.Outgoing.Reassign(_settings.UniqueNodeId, filtered); await storage.Session.Commit(); } catch (Exception) { await storage.Session.Rollback(); throw; } _logger.RecoveredOutgoing(filtered); foreach (var envelope in filtered) { try { await _runtime.GetOrBuildSendingAgent(destination).EnqueueOutgoing(envelope); } catch (Exception e) { _logger.LogException(e, message: $"Unable to enqueue {envelope} for sending"); } } return(outgoing.Count()); } catch (UnknownTransportException e) { _logger.LogException(e, message: $"Could not resolve a channel for {destination}"); await storage.Session.Begin(); await storage.Outgoing.DeleteByDestination(destination); await storage.Session.Commit(); return(0); } }
private Task handleException(ExceptionReceivedEventArgs arg) { _logger.LogException(arg.Exception, message: "Internal failure in the Azure Service Bus QueueClient for " + Address); return(Task.CompletedTask); }
private async Task <int> recoverFrom(Uri destination, SqlConnection conn) { try { Envelope[] filtered = null; List <Envelope> outgoing = null; if (_subscribers.GetOrBuild(destination).Latched) { return(0); } var tx = conn.BeginTransaction(); try { outgoing = await conn.CreateCommand(tx, _findOutgoingEnvelopesSql) .With("destination", destination.ToString(), SqlDbType.VarChar) .ExecuteToEnvelopes(); filtered = await filterExpired(conn, tx, outgoing); // Might easily try to do this in the time between starting // and having the data fetched. Was able to make that happen in // (contrived) testing if (_subscribers.GetOrBuild(destination).Latched || !filtered.Any()) { tx.Rollback(); return(0); } await markOwnership(conn, tx, filtered); tx.Commit(); } catch (Exception) { tx.Rollback(); throw; } _logger.RecoveredOutgoing(filtered); foreach (var envelope in filtered) { try { await _subscribers.GetOrBuild(destination).QuickSend(envelope); } catch (Exception e) { _logger.LogException(e, message: $"Unable to enqueue {envelope} for sending"); } } return(outgoing.Count()); } catch (UnknownTransportException e) { _logger.LogException(e, message: $"Could not resolve a channel for {destination}"); var tx = conn.BeginTransaction(); await DeleteFromOutgoingEnvelopes(conn, TransportConstants.AnyNode, destination, tx); tx.Commit(); return(0); } }