原文:https://blog.csdn.net/weixin_44367006/article/details/103075173?utm_medium=distribute.pc_aggpage_search_result.none-task-blog-2~all~first_rank_v2~rank_v28-4-103075173.nonecase&utm_term=java%20kafka%E7%9B%91%E5%90%AC&spm=1000.2123.3001.4430
Kafka 再均衡监听器示例
依赖
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>2.3.0</version>
</dependency>
- 1
- 2
- 3
- 4
- 5
介绍
本示例中,生产者发送50条消息给有3个消费者的群组。消费者群组中,第三个线程会中途退出群组,借此,我们可以观察分区再均衡现象。
代码
生产者
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
public class RebalanceProducer {
private static final int MSG_SIZE = 50;
private static ExecutorService executorService
= Executors.newFixedThreadPool(
Runtime.getRuntime().availableProcessors());
private static CountDownLatch countDownLatch
= new CountDownLatch(MSG_SIZE);
public static void main(String[] args) {
Properties properties = new Properties();
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.100.14:9092");
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
KafkaProducer<String,String> producer = new KafkaProducer(properties);
try {
for(int i=0;i<MSG_SIZE;i++){
ProducerRecord<String,String> record
= new ProducerRecord(
"rebalance-topic-three-part",
"value" + i);
executorService.submit(new ProduceWorker(record,producer,countDownLatch));
Thread.sleep(600);
}
countDownLatch.await();
} catch (Exception e) {
e.printStackTrace();
} finally {
producer.close();
executorService.shutdown();
}
}
}
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
生产任务
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import java.util.concurrent.CountDownLatch;
public class ProduceWorker implements Runnable{
private ProducerRecord<String,String> record;
private KafkaProducer<String,String> producer;
private CountDownLatch countDownLatch;
public ProduceWorker(ProducerRecord<String, String> record,
KafkaProducer<String, String> producer, CountDownLatch countDownLatch) {
this.record = record;
this.producer = producer;
this.countDownLatch = countDownLatch;
}
public void run() {
final String id = "" + Thread.currentThread().getId();
try {
producer.send(record, new Callback() {
public void onCompletion(RecordMetadata metadata,
Exception exception) {
if(null!=exception){
exception.printStackTrace();
}
if(null!=metadata){
System.out.println(id+"|"
+String.format("偏移量:%s,分区:%s",
metadata.offset(),metadata.partition()));
}
}
});
System.out.println(id+":数据["+record.key()+ "-" + record.value()+"]已发送。");
countDownLatch.countDown();
} catch (Exception e) {
e.printStackTrace();
}
}
}
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
消费者
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
public class RebalanceConsumer {
public static final String GROUP_ID = "RebalanceConsumer";
private static ExecutorService executorService
= Executors.newFixedThreadPool(3);
public static void main(String[] args) throws InterruptedException {
Properties properties = new Properties();
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.100.14:9092");
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
properties.put(ConsumerConfig.GROUP_ID_CONFIG, RebalanceConsumer.GROUP_ID);
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
for(int i = 0; i < 2; i++){
executorService.submit(new ConsumerWorker(false, properties));
}
Thread.sleep(5000);
//用来被停止,观察保持运行的消费者情况
new Thread(new ConsumerWorker(true, properties)).start();
}
}
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
消费任务
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.time.Duration;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
public class ConsumerWorker implements Runnable{
private final KafkaConsumer<String,String> consumer;
/*用来保存每个消费者当前读取分区的偏移量*/
private final Map<TopicPartition, OffsetAndMetadata> currOffsets;
private final boolean isStop;
/*消息消费者配置*/
public ConsumerWorker(boolean isStop, Properties properties) {
this.isStop = isStop;
this.consumer
= new KafkaConsumer(properties);
this.currOffsets
= new HashMap();
consumer.subscribe(Collections.singletonList("rebalance-topic-three-part"),
new HandlerRebalance(currOffsets,consumer));
}
public void run() {
final String id = "" + Thread.currentThread().getId();
int count = 0;
TopicPartition topicPartition;
long offset;
try {
while(true){
ConsumerRecords<String, String> records
= consumer.poll(Duration.ofMillis(500));
//业务处理
//开始事务
for(ConsumerRecord<String, String> record:records){
System.out.println(id+"|"+String.format(
"处理主题:%s,分区:%d,偏移量:%d," +
"key:%s,value:%s",
record.topic(),record.partition(),
record.offset(),record.key(),record.value()));
topicPartition = new TopicPartition(record.topic(),
record.partition());
offset = record.offset()+1;
currOffsets.put(topicPartition,new OffsetAndMetadata(offset,
"no"));
count++