• kafka 高级应用 springboot2.1 (2) 手动创建topic


    依赖:

            <dependency>
                <groupId>org.springframework.boot</groupId>
                <artifactId>spring-boot-starter-web</artifactId>
            </dependency>
    
            
    
            <dependency>
                <groupId>org.springframework.kafka</groupId>
                <artifactId>spring-kafka</artifactId>
            </dependency>

    application.properties:

    ### kafka configure
    spring.kafka.bootstrap-servers=10.160.3.70:9092
    spring.kafka.consumer.group-id=sea-test
    spring.kafka.consumer.enable-auto-commit=false
    spring.kafka.consumer.auto-offset-reset=earliest
    spring.kafka.consumer.max-poll-records=2000
    #spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer
    #spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer
    spring.kafka.producer.retries=3
    spring.kafka.producer.batch-size=16384
    spring.kafka.producer.buffer-memory=33554432
    spring.kafka.producer.linger=10
    #spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
    #spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer

    KafkaConfig:

    package com.icil.topic.config;
    
    import java.util.HashMap;
    import java.util.Map;
    
    import org.apache.kafka.clients.admin.AdminClient;
    import org.apache.kafka.clients.admin.AdminClientConfig;
    import org.apache.kafka.clients.consumer.ConsumerConfig;
    import org.apache.kafka.clients.producer.ProducerConfig;
    import org.apache.kafka.common.serialization.StringDeserializer;
    import org.apache.kafka.common.serialization.StringSerializer;
    import org.springframework.beans.factory.annotation.Value;
    import org.springframework.context.annotation.Bean;
    import org.springframework.context.annotation.Configuration;
    import org.springframework.kafka.annotation.EnableKafka;
    import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
    import org.springframework.kafka.config.KafkaListenerContainerFactory;
    import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
    import org.springframework.kafka.core.DefaultKafkaProducerFactory;
    import org.springframework.kafka.core.KafkaAdmin;
    import org.springframework.kafka.core.KafkaTemplate;
    import org.springframework.kafka.core.ProducerFactory;
    import org.springframework.kafka.listener.ContainerProperties;
    
    import com.google.common.collect.Maps;
    
    @Configuration
    @EnableKafka
    public class KafkaConfig {
    
        @Value("${spring.kafka.bootstrap-servers}")
        private String bootstrapServers;
    
        @Value("${spring.kafka.consumer.group-id}")
        private String groupId;
    
        @Value("${spring.kafka.consumer.enable-auto-commit}")
        private Boolean autoCommit;
    
        @Value("${spring.kafka.consumer.auto-offset-reset}")
        private String autoOffsetReset;
    
        @Value("${spring.kafka.consumer.max-poll-records}")
        private Integer maxPollRecords;
        
        @Value("${spring.kafka.producer.linger}")
        private int linger;
    
        @Value("${spring.kafka.producer.retries}")
        private Integer retries;
    
        @Value("${spring.kafka.producer.batch-size}")
        private Integer batchSize;
    
        @Value("${spring.kafka.producer.buffer-memory}")
        private Integer bufferMemory;
    
    
        //cankao :https://blog.csdn.net/tmeng521/article/details/90901925
        public Map<String, Object> producerConfigs() {
             
            Map<String, Object> props = new HashMap<>();
            props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
            //设置重试次数
            props.put(ProducerConfig.RETRIES_CONFIG, retries);
            //达到batchSize大小的时候会发送消息
            props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
            //延时时间,延时时间到达之后计算批量发送的大小没达到也发送消息
            props.put(ProducerConfig.LINGER_MS_CONFIG, linger);
            //缓冲区的值
            props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
            //序列化手段
            props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
            props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
            //producer端的消息确认机制,-1和all都表示消息不仅要写入本地的leader中还要写入对应的副本中
            props.put(ProducerConfig.ACKS_CONFIG, "-1");//单个brok 推荐使用'1'
            //单条消息的最大值以字节为单位,默认值为1048576
            props.put(ProducerConfig.LINGER_MS_CONFIG, 10485760);
            //设置broker响应时间,如果broker在60秒之内还是没有返回给producer确认消息,则认为发送失败
            props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 60000);
            //指定拦截器(value为对应的class)
            //props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, "com.te.handler.KafkaProducerInterceptor");
            //设置压缩算法(默认是木有压缩算法的)
            props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");//snappy
            return props;
        }
        
        
        
        @Bean //创建一个kafka管理类,相当于rabbitMQ的管理类rabbitAdmin,没有此bean无法自定义的使用adminClient创建topic
        public KafkaAdmin kafkaAdmin() {
            Map<String, Object> props = new HashMap<>();
            //配置Kafka实例的连接地址                                                                   
            //kafka的地址,不是zookeeper
            props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
            KafkaAdmin admin = new KafkaAdmin(props);
            return admin;
        }
     
        @Bean  //kafka客户端,在spring中创建这个bean之后可以注入并且创建topic,用于集群环境,创建对个副本
        public AdminClient adminClient() {
            return AdminClient.create(kafkaAdmin().getConfig());
        }
    
        
    
        @Bean
        public ProducerFactory<String, String> producerFactory() {
            return new DefaultKafkaProducerFactory<>(producerConfigs());
        }
    
        @Bean
        public KafkaTemplate<String, String> kafkaTemplate() {
            return new KafkaTemplate<>(producerFactory());
        }
    
        
        
        
        
        @Bean
        public Map<String, Object> consumerConfigs() {
            Map<String, Object> props = Maps.newHashMap();
            props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
            props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommit);
            props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
            props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
            props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
    //        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 180000);
    //        props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 900000);
    //        props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 900000);
            props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
            props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
            return props;
        }
    
    
        @Bean
        public KafkaListenerContainerFactory<?> batchFactory() {
            ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
            factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigs()));
            //设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
            factory.setBatchListener(true);
            // set the retry template
    //        factory.setRetryTemplate(retryTemplate());
            factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL);
            return factory;
        }
    
    
    }

    如果topic需要初始化:可以配置//cankao :https://blog.csdn.net/tmeng521/article/details/90901925

     
    @Configuration
    public class KafkaInitialConfiguration {
     
        //创建TopicName为topic.quick.initial的Topic并设置分区数为8以及副本数为1
        @Bean//通过bean创建(bean的名字为initialTopic)
        public NewTopic initialTopic() {
            return new NewTopic("topic.quick.initial",8, (short) 1 );
        }
        /**
         * 此种@Bean的方式,如果topic的名字相同,那么会覆盖以前的那个
         * @return
         */
    //    //修改后|分区数量会变成11个 注意分区数量只能增加不能减少
        @Bean
        public NewTopic initialTopic2() {
            return new NewTopic("topic.quick.initial",11, (short) 1 );
        }
        @Bean //创建一个kafka管理类,相当于rabbitMQ的管理类rabbitAdmin,没有此bean无法自定义的使用adminClient创建topic
        public KafkaAdmin kafkaAdmin() {
            Map<String, Object> props = new HashMap<>();
            //配置Kafka实例的连接地址                                                                    //kafka的地址,不是zookeeper
            props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:9092");
            KafkaAdmin admin = new KafkaAdmin(props);
            return admin;
        }
     
        @Bean  //kafka客户端,在spring中创建这个bean之后可以注入并且创建topic
        public AdminClient adminClient() {
            return AdminClient.create(kafkaAdmin().getConfig());
        }
        
     
    }

    test 手动创建topic ,手动查看所有topic

        @Autowired // adminClien需要自己生成配置bean
        private AdminClient adminClient;
        
        
         @Autowired
            private KafkaTemplate<String, String> kafkaTemplate;
     
        @Test//自定义手动创建topic和分区
        public void testCreateTopic() throws InterruptedException {
            // 这种是手动创建 //10个分区,一个副本
            // 分区多的好处是能快速的处理并发量,但是也要根据机器的配置
            NewTopic topic = new NewTopic("topic.manual.create", 10, (short) 1);
            adminClient.createTopics(Arrays.asList(topic));
            Thread.sleep(1000);
        }
        
        
        /**
         * 获取所有的topic
         * @throws Exception
         */
        @Test
        public void getAllTopic() throws Exception {
            ListTopicsResult listTopics = adminClient.listTopics();
             Set<String> topics = listTopics.names().get();
             
             for (String topic : topics) {
                 System.err.println(topic);
                
            }
        }
        
  • 相关阅读:
    使用Gulp压缩静态资源
    Docker实践之08使用网络
    Docker实践之10图形化管理
    Docker实践之07数据管理
    在静态页面内实现关键字搜索并高亮显示
    OpenTK第二章: Introduction to OpenTK(简介)
    opentk第1章 chapter1:Installation(安装)
    关于浏览器:如何计算一个物体在WebGL中的渲染时间?看上去我计算错了
    stats.js一个JavaScript性能监视器:dom和domElement
    glew
  • 原文地址:https://www.cnblogs.com/lshan/p/11544111.html
Copyright © 2020-2023  润新知