• java向kafka发送消息


    pom.xml:

            <dependency>
                <groupId>org.apache.kafka</groupId>
                <artifactId>kafka-clients</artifactId>
                <version>2.3.0</version>
            </dependency>

    自定义分区器:

    import org.apache.kafka.clients.producer.Partitioner;
    import org.apache.kafka.common.Cluster;
    import java.util.Map;
    import java.util.concurrent.atomic.AtomicInteger;
    
    public class RoundRobinPartitioner implements Partitioner {
    
        /**
         * 计数器,每次生产一条消息+1
         */
        private AtomicInteger counter = new AtomicInteger();
        private String topic = "";
    
        @Override
        public int partition(String s, Object o, byte[] bytes, Object o1, byte[] bytes1, Cluster cluster) {
            // 获取分区数量
            Integer partitions = cluster.partitionCountForTopic(s);
    
            int curPartition = counter.incrementAndGet() % partitions;
    
            if(counter.get() > 65536) {
                counter.set(0);
            }
    
            return curPartition;
        }
    
        @Override
        public void close() {
    
        }
    
        @Override
        public void configure(Map<String, ?> map) {
    
        }
    }

    生产者:

    import org.apache.kafka.clients.producer.*;
    import org.slf4j.Logger;
    import org.slf4j.LoggerFactory;
    
    import java.util.Properties;
    
    public class Producer{
        private static final Logger logger = LoggerFactory.getLogger(Producer.class);
        private static KafkaProducer<String, String> kafkaProducer;
    
        public static void main(String[] args) {
            KafkaProducer producer = getKafkaProducer();
            for (int i = 0; i < 100; i++) {
                //同步发送
                producer.send(new ProducerRecord<>("test", Integer.toString(i)));
    
                //异步发送
                /*producer.send(new ProducerRecord<String, String>("test", Integer.toString(i + 100)), new Callback() {
                    @Override
                    public void onCompletion(RecordMetadata metadata, Exception exception) {
                        if (exception != null) {
                            exception.printStackTrace();
                        }
                    }
                });*/
            }
            producer.close();
        }
    
        /**
         * 获取kafka客户端
         *
         * @return
         */
        public static KafkaProducer getKafkaProducer() {
            if (kafkaProducer == null) {
                synchronized (Producer.class) {
                    try {
                        if (kafkaProducer == null) {
                            kafkaProducer = initKafkaProducer();
                        }
                    } catch (Exception e) {
                        logger.error("KafkaClient创建失败...." + kafkaProducer, e);
                    }
                }
            }
            return kafkaProducer;
        }
    
        /**
         * 初始化kafka客户端
         *
         * @return
         */
        private static KafkaProducer initKafkaProducer() {
            Properties props = new Properties();
            props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "master:9092");
            // 0异步 1同步(主) -1/all同步(主,副本)
            props.put(ProducerConfig.ACKS_CONFIG, "1");
            //失败允许重试的次数
            props.put(ProducerConfig.RETRIES_CONFIG, 0);
            //每个批次发送多大的数据
            props.put(ProducerConfig.BATCH_SIZE_CONFIG, 4096);
            //定时发送, 达到1ms发送
            props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
            //缓存的大小
            props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 40960);
            //自定义分区
            props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, RoundRobinPartitioner.class);
            props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
            props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
    
            KafkaProducer producer = new KafkaProducer(props);
            return producer;
        }
    
    }

    消费者:

    import org.apache.kafka.clients.consumer.ConsumerRecord;
    import org.apache.kafka.clients.consumer.ConsumerRecords;
    import org.apache.kafka.clients.consumer.KafkaConsumer;
    import org.apache.kafka.common.serialization.StringDeserializer;
    import java.time.Duration;
    import java.util.Arrays;
    import java.util.Properties;
    
    
    public class Consumer {
    
        public static void main(String[] args) {
            Properties props = new Properties();
            props.put("bootstrap.servers", "192.168.100.100:9092");
            // cousumer的分组id
            props.put("group.id", "test1");
            // 自动提交offsets
            props.put("enable.auto.commit", "true");
            //earliest最早  latest最新
            props.put("auto.offset.reset","latest");
            // 每隔1s,自动提交offsets
            props.put("auto.commit.interval.ms", "1000");
            // Consumer向集群发送自己的心跳,超时则认为Consumer已经死了,kafka会把它的分区分配给其他进程
            props.put("session.timeout.ms", "30000");
            // 反序列化器
            props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
            // 订阅的topic,可以多个
            consumer.subscribe(Arrays.asList("test"));
            while (true) {
                ConsumerRecords<String, String> records = consumer.poll(100);
                for (ConsumerRecord<String, String> record : records) {
                    System.out.printf("offset = %d, key = %s, value = %s",
                            record.offset(), record.key(), record.value());
                    System.out.println();
                }
            }
        }
    
    }
  • 相关阅读:
    序列合并(luogu 1631)题解
    邮递员送信(luogu 1629)题解
    敲砖块(codevs 1257)题解
    货车运输(codevs 3287)题解
    分布式服务框架 Zookeeper -- 管理分布式环境中的数据
    dubbo学习
    第一章 1.20 多线程基础
    第一章 1.19 网络编程基础
    练习 : 面向对象
    练习 : 正则表达式
  • 原文地址:https://www.cnblogs.com/chong-zuo3322/p/13793635.html
Copyright © 2020-2023  润新知