コード例 #1
0
        private async Task ProcessPartitionItemsAsync(int partition, IEnumerable <IKafkaEventData> events, CancellationToken cancellationToken)
        {
            TopicPartition topicPartition = null;

            foreach (var kafkaEventData in events)
            {
                var triggerInput = KafkaTriggerInput.New(kafkaEventData);
                var triggerData  = new TriggeredFunctionData
                {
                    TriggerValue = triggerInput,
                };

                await this.ExecuteFunctionAsync(triggerData, cancellationToken);

                if (topicPartition == null)
                {
                    topicPartition = new TopicPartition(kafkaEventData.Topic, partition);
                }

                // Commiting after each function execution plays nicer with function scaler.
                // When processing a large batch of events where the execution of each event takes time
                // it would take Events_In_Batch_For_Partition * Event_Processing_Time to update the current offset.
                // Doing it after each event minimizes the delay
                this.Commit(new[] { new TopicPartitionOffset(topicPartition, kafkaEventData.Offset + 1) });  // offset is inclusive when resuming
            }
        }
 public IKafkaEventData[] BindMultiple(KafkaTriggerInput value, ValueBindingContext context)
 {
     if (value == null)
     {
         throw new ArgumentNullException(nameof(value));
     }
     return(value.Events);
 }
 // Single instance: Core --> EventData
 public IKafkaEventData BindSingle(KafkaTriggerInput value, ValueBindingContext context)
 {
     if (value == null)
     {
         throw new ArgumentNullException(nameof(value));
     }
     return(value.GetSingleEventData());
 }
コード例 #4
0
        protected override async Task ReaderAsync(ChannelReader <IKafkaEventData[]> reader, CancellationToken cancellationToken, ILogger logger)
        {
            var pendingTasks = new List <Task <FunctionResult> >();

            while (!cancellationToken.IsCancellationRequested && await reader.WaitToReadAsync(cancellationToken))
            {
                while (!cancellationToken.IsCancellationRequested && reader.TryRead(out var itemsToExecute))
                {
                    try
                    {
                        // Execute multiple topic in parallel.
                        // Order in a partition must be followed.
                        var partitionOffsets = new Dictionary <int, long>();
                        var itemsByPartition = itemsToExecute.GroupBy(x => x.Partition);

                        var i = 0;
                        do
                        {
                            pendingTasks.Clear();

                            foreach (var partition in itemsByPartition)
                            {
                                var kafkaEventData = partition.ElementAtOrDefault(i);
                                if (kafkaEventData != null)
                                {
                                    var triggerInput = KafkaTriggerInput.New(kafkaEventData);
                                    var triggerData  = new TriggeredFunctionData
                                    {
                                        TriggerValue = triggerInput,
                                    };

                                    partitionOffsets[partition.Key] = kafkaEventData.Offset + 1;  // offset is inclusive when resuming

                                    pendingTasks.Add(this.ExecuteFunctionAsync(triggerData, cancellationToken));
                                }
                            }

                            i++;
                            await Task.WhenAll(pendingTasks);
                        } while (!cancellationToken.IsCancellationRequested && pendingTasks.Count > 0);

                        if (!cancellationToken.IsCancellationRequested)
                        {
                            this.Commit(partitionOffsets.Select((kv) => new TopicPartitionOffset(new TopicPartition(itemsToExecute[0].Topic, kv.Key), kv.Value)));
                        }
                    }
                    catch (Exception ex)
                    {
                        logger.LogError(ex, $"Error in executor reader");
                    }
                }
            }

            logger.LogInformation("Exiting reader {processName}", nameof(SingleItemFunctionExecutor <TKey, TValue>));
        }
        public Dictionary <string, object> GetBindingData(KafkaTriggerInput value)
        {
            if (value == null)
            {
                throw new ArgumentNullException(nameof(value));
            }

            var bindingData = new Dictionary <string, object>(StringComparer.OrdinalIgnoreCase);

            if (value.IsSingleDispatch)
            {
                AddBindingData(bindingData, value.GetSingleEventData());
            }
            else
            {
                AddBindingData(bindingData, value.Events);
            }

            return(bindingData);
        }
コード例 #6
0
        protected override async Task ReaderAsync(ChannelReader <IKafkaEventData[]> reader, CancellationToken cancellationToken, ILogger logger)
        {
            while (!cancellationToken.IsCancellationRequested && await reader.WaitToReadAsync(cancellationToken))
            {
                while (!cancellationToken.IsCancellationRequested && reader.TryRead(out var itemsToExecute))
                {
                    try
                    {
                        // Try to publish them
                        var triggerInput = KafkaTriggerInput.New(itemsToExecute);
                        var triggerData  = new TriggeredFunctionData
                        {
                            TriggerValue = triggerInput,
                        };

                        var functionResult = await this.ExecuteFunctionAsync(triggerData, cancellationToken);

                        var offsetsToCommit = new Dictionary <int, TopicPartitionOffset>();
                        for (var i = itemsToExecute.Length - 1; i >= 0; i--)
                        {
                            if (!offsetsToCommit.ContainsKey(itemsToExecute[i].Partition))
                            {
                                offsetsToCommit.Add(
                                    itemsToExecute[i].Partition,
                                    new TopicPartitionOffset(
                                        itemsToExecute[i].Topic,
                                        itemsToExecute[i].Partition,
                                        itemsToExecute[i].Offset + 1)); // offset is inclusive when resuming
                            }
                        }

                        if (!cancellationToken.IsCancellationRequested)
                        {
                            this.Commit(offsetsToCommit.Values);

                            if (functionResult.Succeeded)
                            {
                                if (logger.IsEnabled(LogLevel.Debug))
                                {
                                    logger.LogDebug("Function executed with {batchSize} items in {topic} / {partitions} / {offsets}",
                                                    itemsToExecute.Length,
                                                    itemsToExecute[0].Topic,
                                                    string.Join(",", offsetsToCommit.Keys),
                                                    string.Join(",", offsetsToCommit.Values.Select(x => x.Offset)));
                                }
                            }
                            else
                            {
                                logger.LogError(functionResult.Exception, "Failed to executed function with {batchSize} items in {topic} / {partitions} / {offsets}",
                                                itemsToExecute.Length,
                                                itemsToExecute[0].Topic,
                                                string.Join(",", offsetsToCommit.Keys),
                                                string.Join(",", offsetsToCommit.Values.Select(x => x.Offset)));
                            }
                        }
                    }
                    catch (Exception ex)
                    {
                        logger.LogError(ex, $"Error in executor reader");
                    }
                }
            }

            logger.LogInformation("Exiting reader {processName}", nameof(MultipleItemFunctionExecutor <TKey, TValue>));
        }