• Kafka——SpringBoot整合(消费者)



    选择自动提交还是手动提交方式和业务场景相关,可以查看前面的博客,根据原理进行选择。

    单线程消费

    pom

            <dependency>
                <groupId>org.springframework.boot</groupId>
                <artifactId>spring-boot-starter-web</artifactId>
            </dependency>
            <dependency>
                <groupId>org.apache.kafka</groupId>
                <artifactId>kafka-streams</artifactId>
            </dependency>
            <dependency>
                <groupId>org.springframework.kafka</groupId>
                <artifactId>spring-kafka</artifactId>
            </dependency>
    
            <dependency>
                <groupId>org.springframework.boot</groupId>
                <artifactId>spring-boot-starter-test</artifactId>
                <scope>test</scope>
                <exclusions>
                    <exclusion>
                        <groupId>org.junit.vintage</groupId>
                        <artifactId>junit-vintage-engine</artifactId>
                    </exclusion>
                </exclusions>
            </dependency>
            <dependency>
                <groupId>org.springframework.kafka</groupId>
                <artifactId>spring-kafka-test</artifactId>
                <scope>test</scope>
            </dependency>
    
            <dependency>
                <groupId>com.google.code.gson</groupId>
                <artifactId>gson</artifactId>
                <version>2.8.2</version>
            </dependency>
    

    consumerConfig

    @Configuration
    @EnableKafka
    public class KafkaConsumerConfig {
    
        @Value("${kafka.bootstrap-servers}")
        private String bootstrapServers;
    
        /**
         * 单线程-单条消费
         * @return
         */
        @Bean
        public KafkaListenerContainerFactory<?> stringKafkaListenerContainerFactory() {
            Map<String, Object> configProps = new HashMap<>();
            configProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
            configProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
            configProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
            configProps.put(ConsumerConfig.GROUP_ID_CONFIG, "topic");
    
            ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
            factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(configProps));
            return factory;
        }
    
    }
    

    consumer

    @Component
    public class KafkaReceiver {
    	
    	private static Logger logger = LoggerFactory.getLogger(KafkaReceiver.class);
    
    
    	//@KafkaListener(topics = "hello", containerFactory = "stringKafkaListenerContainerFactory")
    	public void receiveString(String message) {
    		logger.info("Message : %s" +message);
    	}
    
    
    
    	/**
    	 * 注解方式获取消息头及消息体
    	 *
    	 * @Payload:获取的是消息的消息体,也就是发送内容
    	 * @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY):获取发送消息的key
    	 * @Header(KafkaHeaders.RECEIVED_PARTITION_ID):获取当前消息是从哪个分区中监听到的
    	 * @Header(KafkaHeaders.RECEIVED_TOPIC):获取监听的TopicName
    	 * @Header(KafkaHeaders.RECEIVED_TIMESTAMP):获取时间戳
    	 *
    	 */
    
    	//@KafkaListener(topics = "hello", containerFactory = "stringKafkaListenerContainerFactory")
    	public void receive(@Payload String message,
    						 //@Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) Integer key,
    						@Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition,
    						@Header(KafkaHeaders.RECEIVED_TOPIC) String topic,
    						@Header(KafkaHeaders.RECEIVED_TIMESTAMP) long ts) {
    		logger.info("topic : " +topic);
    		logger.info("partition : " +partition);
    		//logger.info("key : " +key.toString());
    		logger.info("TIMESTAMP : " +ts);
    		logger.info("message : " +message);
    	}
    
    
    
    /**
    	 * 指定消费分区和初始偏移量
    	 *
    	 * @TopicPartition:topic--需要监听的Topic的名称,partitions --需要监听Topic的分区id,partitionOffsets --可以设置从某个偏移量开始监听
    	 * @PartitionOffset:partition --分区Id,非数组,initialOffset --初始偏移量
    	 *
    	 */
    
        @KafkaListener(containerFactory = "stringKafkaListenerContainerFactory",
                       topicPartitions = @TopicPartition(topic = "hello", partitionOffsets = @PartitionOffset(partition = "0" , initialOffset = "2")))
    	public void receiveFromBegin(@Payload String payload,
    								 @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition) {
    		System.out.println(String.format("Read all message from partition %d : %s", partition, payload));
    	}
    
    
    /**
    	 * ConsumerRecord 接收
    	 *
    	 * @param record
    	 */
    
        //@KafkaListener(topics = "hello", containerFactory = "stringKafkaListenerContainerFactory")
    	public void receive(ConsumerRecord<?, ?> record) {
    		System.out.println("Message is :" + record.toString());
    	}
    }
    

    批量消费

    开启批量消费需要3步
    1、消费者设置 max.poll.records/
    2、消费者 开启批量消费 factory.setBatchListener(true);
    3、消费者批量接收 public void consumerBatch(List<ConsumerRecord> records)

    javaConfig

    @Configuration
    @EnableKafka
    public class BatchConsumerConfig {
        @Value("${kafka.bootstrap-servers}")
        private String bootstrapServers;
    
        /**
         * 多线程-批量消费
         * @return
         */
        @Bean
        public KafkaListenerContainerFactory<?> batchFactory(){
            ConcurrentKafkaListenerContainerFactory<String, String> factory =new ConcurrentKafkaListenerContainerFactory<>();
            factory.setConsumerFactory(consumerFactory());
            // 控制多线程消费,并发数(如果topic有3各分区。设置成3,并发数就是3个线程,加快消费), 不设置setConcurrency就会变成单线程配置, MAX_POLL_RECORDS_CONFIG也会失效,接收的消息列表也不会是ConsumerRecord
            factory.setConcurrency(10);
            // poll超时时间
            factory.getContainerProperties().setPollTimeout(1500);
            // 控制批量消费
            // 设置为批量消费,每个批次数量在Kafka配置参数中设置(max.poll.records)
            factory.setBatchListener(true);
            return factory;
        }
    
        public ConsumerFactory<String, String> consumerFactory() {
            return new DefaultKafkaConsumerFactory<>(consumerConfigs());
        }
    
        /**
         * 消费者配置
         * @return
         */
        public Map<String, Object> consumerConfigs() {
            Map<String, Object> configProps = new HashMap<>();
            // 不用指定全部的broker,它将自动发现集群中的其余的borker, 最好指定多个,万一有服务器故障
            configProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
            // key序列化方式
            configProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
            // value序列化方式
            configProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
            // GroupID
            configProps.put(ConsumerConfig.GROUP_ID_CONFIG, "test-group");
            // 批量消费消息数量
            configProps.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 2);
            // 自动提交偏移量
            // 如果设置成true,偏移量由auto.commit.interval.ms控制自动提交的频率
            // 如果设置成false,不需要定时的提交offset,可以自己控制offset,当消息认为已消费过了,这个时候再去提交它们的偏移量。
            // 这个很有用的,当消费的消息结合了一些处理逻辑,这个消息就不应该认为是已经消费的,直到它完成了整个处理。
            configProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
            // 自动提交的频率
            configProps.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
            // Session超时设置
            configProps.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
            // 该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理:
            // latest(默认值)在偏移量无效的情况下,消费者将从最新的记录开始读取数据(在消费者启动之后生成的记录)
            // earliest :在偏移量无效的情况下,消费者将从起始位置读取分区的记录
            configProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
            return configProps;
        }
    }
    

    消费者 BatchConsumer

    @Component
    @Slf4j
    public class BatchConsumer {
        /**
         * 批量消息
         * @param records
         */
        @KafkaListener(topics = "hello", containerFactory="batchFactory")
        public void consumerBatch(List<ConsumerRecord<?, ?>> records){
            log.info("接收到消息数量:{}",records.size());
            for(ConsumerRecord record: records) {
                Optional<?> kafkaMessage = Optional.ofNullable(record.value());
                log.info("Received: " + record);
                if (kafkaMessage.isPresent()) {
                    Object message = record.value();
                    String topic = record.topic();
                    System.out.println("接收到消息:" + message);
                }
            }
        }
        
    }
    

    参考博客:https://blog.csdn.net/yy756127197/article/details/103895413

  • 相关阅读:
    编译i386 Linux 内核并基于 QEMU 运行
    在 Linux 上编译运行并测试 LwIP 协议栈性能
    gdb 重定位源文件目录
    SkyWalking 分布式追踪系统
    微服务化的基石——持续集成(二)
    微服务容器化的分工与合作,促进DevOps (一)
    预订餐位
    单词记录1.26
    What do you do on weekends
    make a travel plan(LC)
  • 原文地址:https://www.cnblogs.com/luckyhui28/p/12488574.html
Copyright © 2020-2023  润新知