static void Main(string[] args) { var host = "192.168.33.12:9092"; var topic = "test2"; var count = 50000000; var size = 100; var prop = new Properties(); prop.setProperty("bootstrap.servers", "192.168.33.12:9092"); prop.setProperty("acks", "1"); prop.setProperty("buffer.memory", "67108864"); prop.setProperty("batch.size", "8196"); var producer = new KafkaProducer(prop, new ByteArraySerializer(), new ByteArraySerializer()); var payload = new byte[size]; for (int i = 0; i < size; i++) payload[i] = (byte)'a'; var record = new ProducerRecord(topic, payload); var stats = new Stats(count, 5000, Console.WriteLine); for (int i = 0; i < count; i++) { //var payload = Encoding.UTF8.GetBytes(i.ToString()); //var record = new ProducerRecord(topic, payload); var sendStart = DateTimeExtensions.CurrentTimeMillis(); var cb = new StatsCallback { Action = stats.NextCompletion(sendStart, payload.Length, stats) }; producer.send(record, cb); } producer.close(); stats.PrintTotal(); }
/// <summary> /// Produce <see cref="ProducerRecord{K,V}"/> /// </summary> public static void Produce <K, V>(this IProducer <K, V> producer, ProducerRecord <K, V> message, Action <DeliveryReport <K, V> > reportHandler) { if (message.Partition.HasValue) { producer.Produce(new TopicPartition(message.Topic, message.Partition.Value), message.Message, reportHandler); } else { producer.Produce(message.Topic, message.Message, reportHandler); } }
public async ValueTask <ProduceResponse> Produce(Message <TKey, TValue> message) { var record = ProducerRecord.Create(message, keySerializer, valueSerializer); record.Topic = topic; var t = await cluster.GetTopic(record.Topic); if (message.PartitionId.HasValue) { record.PartitionId = message.PartitionId.Value; } else if (t.Partitions.Count == 1) { record.PartitionId = 0; } else { record.PartitionId = await partitioner.GetPartition(t, record.KeyBytes); } if (!record.PartitionId.HasValue) { throw new InvalidOperationException("PartitionId not specified"); } var partitionLeader = t.Partitions[record.PartitionId.Value].Leader; var b = cluster.GetBroker(partitionLeader); var response = await b.Produce(record); if (response.Topics.Count > 1 || response.Topics[0].Partitions.Count > 1) { throw new KafkaException("Expected single partition in Produce request"); } var partitionResponse = response.Topics[0].Partitions[0]; switch (partitionResponse.ErrorCode) { case ResponseError.LEADER_NOT_AVAILABLE: case ResponseError.NOT_LEADER_FOR_PARTITION: case ResponseError.PREFERRED_LEADER_NOT_AVAILABLE: throw new KafkaException("oops"); } return(response); }
static void Main(string[] args) { var host = "192.168.33.12:9092"; var topic = "test2"; var count = 50000000; var size = 100; var prop = new Properties(); prop.setProperty("bootstrap.servers", "192.168.33.12:9092"); prop.setProperty("acks", "1"); prop.setProperty("buffer.memory", "67108864"); prop.setProperty("batch.size", "8196"); var producer = new KafkaProducer(prop, new ByteArraySerializer(), new ByteArraySerializer()); var payload = new byte[size]; for (int i = 0; i < size; i++) { payload[i] = (byte)'a'; } var record = new ProducerRecord(topic, payload); var stats = new Stats(count, 5000, Console.WriteLine); for (int i = 0; i < count; i++) { //var payload = Encoding.UTF8.GetBytes(i.ToString()); //var record = new ProducerRecord(topic, payload); var sendStart = DateTimeExtensions.CurrentTimeMillis(); var cb = new StatsCallback { Action = stats.NextCompletion(sendStart, payload.Length, stats) }; producer.send(record, cb); } producer.close(); stats.PrintTotal(); }
public ValueTask <ProduceResponse> Produce(ProducerRecord message) => SendRequest(ApiKey.Produce, 7, message, new ProducerRecordRequestWriter(), new ProduceResponseReader());