• kafka支持认证SASL_PLAINTEXT


    docker配置

    docker run --name kafka
    --restart=always
    --net=host
    --volume /data/kafka:/data
    --volume /data/kafka_server_jaas.conf:/opt/kafka/config/kafka_server_jaas.conf
    -e KAFKA_BROKER_ID=1
    -e KAFKA_LISTENERS=PLAINTEXT://kafka-1:9092
    -e KAFKA_LOG_DIRS=/data/kafka
    -e KAFKA_ZOOKEEPER_CONNECT="127.0.0.1:2181"
    -e KAFKA_NUM_PARTITIONS=10
    -e KAFKA_DEFAULT_REPLICATION_FACTOR=1
    -e KAFKA_LOG_RETENTION_HOURS=4
    -e KAFKA_LOG_RETENTION_BYTES=2073741824
    -e KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL=PLAINTEXT
    -e KAFKA_SASL_ENABLED_MECHANISMS=PLAINTEXT
    -d wurstmeister/kafka:2.12-2.1.1

    KAFKA broker配置

    # cat scripts/kafka_server_jaas.conf
    KafkaServer {
    org.apache.kafka.common.security.plain.PlainLoginModule required
    username="admin"
    password="admin-secret"
    user_admin="admin-secret"
    user_alice="alice-secret";
    };

    server.properties

    # Licensed to the Apache Software Foundation (ASF) under one or more
    # contributor license agreements.  See the NOTICE file distributed with
    # this work for additional information regarding copyright ownership.
    # The ASF licenses this file to You under the Apache License, Version 2.0
    # (the "License"); you may not use this file except in compliance with
    # the License.  You may obtain a copy of the License at
    #
    #    http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS,
    # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    # See the License for the specific language governing permissions and
    # limitations under the License.
    
    # see kafka.server.KafkaConfig for additional details and defaults
    
    ############################# Server Basics #############################
    
    # The id of the broker. This must be set to a unique integer for each broker.
    broker.id=2
    
    ############################# Socket Server Settings #############################
    
    # The address the socket server listens on. It will get the value returned from 
    # java.net.InetAddress.getCanonicalHostName() if not configured.
    #   FORMAT:
    #     listeners = listener_name://host_name:port
    #   EXAMPLE:
    #     listeners = PLAINTEXT://your.host.name:9092
    listeners=SASL_PLAINTEXT://kafka-1:9092
    security.inter.broker.protocol=SASL_PLAINTEXT
    sasl.mechanism.inter.broker.protocol=PLAIN
    sasl.enabled.mechanisms=PLAIN
    
    # Hostname and port the broker will advertise to producers and consumers. If not set, 
    # it uses the value for "listeners" if configured.  Otherwise, it will use the value
    # returned from java.net.InetAddress.getCanonicalHostName().
    #advertised.listeners=PLAINTEXT://your.host.name:9092
    
    # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
    #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
    
    # The number of threads that the server uses for receiving requests from the network and sending responses to the network
    num.network.threads=3
    
    # The number of threads that the server uses for processing requests, which may include disk I/O
    num.io.threads=8
    
    # The send buffer (SO_SNDBUF) used by the socket server
    socket.send.buffer.bytes=102400
    
    # The receive buffer (SO_RCVBUF) used by the socket server
    socket.receive.buffer.bytes=102400
    
    # The maximum size of a request that the socket server will accept (protection against OOM)
    socket.request.max.bytes=104857600
    
    
    ############################# Log Basics #############################
    
    # A comma separated list of directories under which to store log files
    log.dirs=/tmp/kafka-logs
    
    # The default number of log partitions per topic. More partitions allow greater
    # parallelism for consumption, but this will also result in more files across
    # the brokers.
    num.partitions=10
    
    # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
    # This value is recommended to be increased for installations with data dirs located in RAID array.
    num.recovery.threads.per.data.dir=1
    
    ############################# Internal Topic Settings  #############################
    # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
    # For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
    offsets.topic.replication.factor=1
    transaction.state.log.replication.factor=1
    transaction.state.log.min.isr=1
    
    ############################# Log Flush Policy #############################
    
    # Messages are immediately written to the filesystem but by default we only fsync() to sync
    # the OS cache lazily. The following configurations control the flush of data to disk.
    # There are a few important trade-offs here:
    #    1. Durability: Unflushed data may be lost if you are not using replication.
    #    2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
    #    3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
    # The settings below allow one to configure the flush policy to flush data after a period of time or
    # every N messages (or both). This can be done globally and overridden on a per-topic basis.
    
    # The number of messages to accept before forcing a flush of data to disk
    #log.flush.interval.messages=10000
    
    # The maximum amount of time a message can sit in a log before we force a flush
    #log.flush.interval.ms=1000
    
    ############################# Log Retention Policy #############################
    
    # The following configurations control the disposal of log segments. The policy can
    # be set to delete segments after a period of time, or after a given size has accumulated.
    # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
    # from the end of the log.
    
    # The minimum age of a log file to be eligible for deletion due to age
    log.retention.hours=168
    
    # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
    # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
    #log.retention.bytes=1073741824
    
    # The maximum size of a log segment file. When this size is reached a new log segment will be created.
    log.segment.bytes=1073741824
    
    # The interval at which log segments are checked to see if they can be deleted according
    # to the retention policies
    log.retention.check.interval.ms=300000
    
    ############################# Zookeeper #############################
    
    # Zookeeper connection string (see zookeeper docs for details).
    # This is a comma separated host:port pairs, each corresponding to a zk
    # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
    # You can also append an optional chroot string to the urls to specify the
    # root directory for all kafka znodes.
    zookeeper.connect=localhost:2181
    
    # Timeout in ms for connecting to zookeeper
    zookeeper.connection.timeout.ms=6000
    
    
    ############################# Group Coordinator Settings #############################
    
    # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
    # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
    # The default value for this is 3 seconds.
    # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
    # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
    group.initial.rebalance.delay.ms=0
    View Code

    kafka-server-start.sh

    # cat kafka_2.12-2.1.1/bin/kafka-server-start.sh 
    #!/bin/bash
    # Licensed to the Apache Software Foundation (ASF) under one or more
    # contributor license agreements.  See the NOTICE file distributed with
    # this work for additional information regarding copyright ownership.
    # The ASF licenses this file to You under the Apache License, Version 2.0
    # (the "License"); you may not use this file except in compliance with
    # the License.  You may obtain a copy of the License at
    #
    #    http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS,
    # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    # See the License for the specific language governing permissions and
    # limitations under the License.
    
    if [ $# -lt 1 ];
    then
            echo "USAGE: $0 [-daemon] server.properties [--override property=value]*"
            exit 1
    fi
    base_dir=$(dirname $0)
    
    if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
        export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
    fi
    
    if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
        export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G"
    fi
    
    EXTRA_ARGS=${EXTRA_ARGS-'-name kafkaServer -loggc'}
    
    COMMAND=$1
    case $COMMAND in
      -daemon)
        EXTRA_ARGS="-daemon "$EXTRA_ARGS
        shift
        ;;
      *)
        ;;
    esac
    
    export KAFKA_OPTS="-Djava.security.auth.login.config=/root/scripts/kafka_server_jaas.conf"
    #export KAFKA_OPTS="/root/scripts/kafka_server_jaas.conf"
    exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@"
    View Code

    /etc/profile

     cat /etc/profile
    # /etc/profile
    
    # System wide environment and startup programs, for login setup
    # Functions and aliases go in /etc/bashrc
    
    # It's NOT a good idea to change this file unless you know what you
    # are doing. It's much better to create a custom.sh shell script in
    # /etc/profile.d/ to make custom changes to your environment, as this
    # will prevent the need for merging in future updates.
    
    pathmunge () {
        case ":${PATH}:" in
            *:"$1":*)
                ;;
            *)
                if [ "$2" = "after" ] ; then
                    PATH=$PATH:$1
                else
                    PATH=$1:$PATH
                fi
        esac
    }
    
    
    if [ -x /usr/bin/id ]; then
        if [ -z "$EUID" ]; then
            # ksh workaround
            EUID=`/usr/bin/id -u`
            UID=`/usr/bin/id -ru`
        fi
        USER="`/usr/bin/id -un`"
        LOGNAME=$USER
        MAIL="/var/spool/mail/$USER"
    fi
    
    # Path manipulation
    if [ "$EUID" = "0" ]; then
        pathmunge /usr/sbin
        pathmunge /usr/local/sbin
    else
        pathmunge /usr/local/sbin after
        pathmunge /usr/sbin after
    fi
    
    HOSTNAME=`/usr/bin/hostname 2>/dev/null`
    if [ "$HISTCONTROL" = "ignorespace" ] ; then
        export HISTCONTROL=ignoreboth
    else
        export HISTCONTROL=ignoredups
    fi
    
    export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL
    
    GOPATH=/root/go
    export GOPATH
    
    # By default, we want umask to get set. This sets it for login shell
    # Current threshold for system reserved uid/gids is 200
    # You could check uidgid reservation validity in
    # /usr/share/doc/setup-*/uidgid file
    if [ $UID -gt 199 ] && [ "`/usr/bin/id -gn`" = "`/usr/bin/id -un`" ]; then
        umask 002
    else
        umask 022
    fi
    
    for i in /etc/profile.d/*.sh /etc/profile.d/sh.local ; do
        if [ -r "$i" ]; then
            if [ "${-#*i}" != "$-" ]; then 
                . "$i"
            else
                . "$i" >/dev/null
            fi
        fi
    done
    
    unset i
    unset -f pathmunge
    
    KAFKA_OPTS="-Djava.security.auth.login.config=/root/scripts/kafka_server_jaas.conf"
    export KAFKA_OPTS
    View Code

    执行 source /etc/profile

    bin/kafka-server-start.sh config/server.properties

    kafka命令

    创建topic

     bin/kafka-topics.sh --create --zookeeper 127.0.0.1:2181 --topic test --partitions 10 --replication-factor 1

    生产者输入数据

    bin/kafka-console-producer.sh  --broker-list kafka-1:9092 --topic test

    消费者消费数据:

     bin/kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=127.0.0.1:2181 --add --allow-principal User:reader --operation Read --topic test         

    OpenJDK 64-Bit Server VM warning: If the number of processors is expected to increase from one, then you should configure the number of parallel GC threads appropriately using -XX:ParallelGCThreads=N
    [2020-01-25 21:31:30,022] WARN SASL configuration failed: javax.security.auth.login.LoginException: No JAAS configuration section named 'Client' was found in specified JAAS configuration file: '/root/scripts/kafka_server_jaas.conf'. Will continue connection to Zookeeper server without SASL authentication, if Zookeeper server allows it. (org.apache.zookeeper.ClientCnxn)
    [2020-01-25 21:31:30,033] ERROR [ZooKeeperClient] Auth failed. (kafka.zookeeper.ZooKeeperClient)
    Adding ACLs for resource `Topic:LITERAL:test`:
    User:reader has Allow permission for operations: Read from hosts: *

    [2020-01-25 21:31:30,437] WARN SASL configuration failed: javax.security.auth.login.LoginException: No JAAS configuration section named 'Client' was found in specified JAAS configuration file: '/root/scripts/kafka_server_jaas.conf'. Will continue connection to Zookeeper server without SASL authentication, if Zookeeper server allows it. (org.apache.zookeeper.ClientCnxn)
    [2020-01-25 21:31:30,438] ERROR [ZooKeeperClient] Auth failed. (kafka.zookeeper.ZooKeeperClient)
    Current ACLs for resource `Topic:LITERAL:test`:
    User:reader has Allow permission for operations: Read from hosts: *

    生产者配置:

    producer.conf

    cat /root/scripts/producer.conf
    security.protocol=SASL_PLAINTEXT
    sasl.mechanism=PLAIN
    sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret";

    生产者命令:

    bin/kafka-console-producer.sh  --broker-list kafka-1:9092 --topic test --producer.config /root/scripts/producer.conf

    消费者的配置:

    security.protocol=SASL_PLAINTEXT
    sasl.mechanism=PLAIN
    sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="reader" password="reader-secret";

    消费数据报错:

    bin/kafka-console-consumer.sh --bootstrap-server kafka-1:9092 --topic test --from-beginning --consumer.config /root/scripts/consumer.conf --group test-group

    OpenJDK 64-Bit Server VM warning: If the number of processors is expected to increase from one, then you should configure the number of parallel GC threads appropriately using -XX:ParallelGCThreads=N
    [2020-01-25 21:35:03,072] ERROR [Consumer clientId=consumer-1, groupId=test-group] Connection to node -1 (kafka-1/172.21.0.17:9092) failed authentication due to: Authentication failed: Invalid username or password (org.apache.kafka.clients.NetworkClient)
    [2020-01-25 21:35:03,074] ERROR Error processing message, terminating consumer process: (kafka.tools.ConsoleConsumer$)
    org.apache.kafka.common.errors.SaslAuthenticationException: Authentication failed: Invalid username or password
    Processed a total of 0 messages

    重新修改consumer.conf用户名为admin:

    security.protocol=SASL_PLAINTEXT
    sasl.mechanism=PLAIN
    sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret";

    消费者命令:

    bin/kafka-console-consumer.sh --bootstrap-server kafka-1:9092 --topic test --from-beginning --consumer.config /root/scripts/consumer.conf --group test-group 

    java代码

     producer

    import com.alibaba.fastjson.JSON;
    import com.alibaba.fastjson.JSONObject;
    import org.apache.kafka.clients.producer.*;
    
    import java.util.Properties;
    
    /**
     * Created by matt on 16/7/26.
     */
    public class KafkaProducerDemo {
        private static String topicName;
        private static int msgNum;
        private static int key;
    
        public static void main(String[] args) {
            Properties props = new Properties();
            props.put("bootstrap.servers", "152.136.200.213:9092");
    //        props.put("bootstrap.servers", "10.155.200.214:9092");
    //        props.put("bootstrap.servers", "kafka-1:9092");
    //        props.put("bootstrap.servers", "172.31.62.250:9092");
            props.put("batch.size", 16384);// 16KB
            props.put("linger.ms", 5);
            props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
            props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
            props.put("buffer.memory", 335544320);
            props.put("security.protocol", "SASL_PLAINTEXT");
    
            props.put("sasl.mechanism", "PLAIN");
            System.setProperty("java.security.auth.login.config", "F:\conf\prod_client_jaas.conf");
    //        props.put("compresstion.type","snappy");
    
    
    //        topicName = "MAIN_PACKET_HTTP_REQUEST";
            topicName = "test";
            msgNum = 10; // 发送的消息数
    
    //        String msg = "{"logdata":[{"RecTimeReq":1571993306357,"DSReq":"20191025","HHReq":"16","LogIDReq":245393518,"DeviceNameReq":"Undefine","LocalTimeZoneReq":"As","LogTypeReq":"http_request","CurrentIDReq":245,"SrcIPReq":"10.160.1.177","SrcPortReq":56126,"DstIPReq":"10.159.129.198","DstPortReq":8015,"sessionIDReq":"10_1571993236_3883992","TTLReq":128,"LenReq":414,"TransProtoReq":"TCP","AppProtoReq":"HTTP","MethodReq":"GET","UriReq":"/openApi/user/getUserNodes?Authorization=appId%3Dcbbapp%2Ctime%3D1571993359374%2Csign%3Dowa29hDQWjP2w9Hj9se5BCEJDv96fBt7LixbXPCNHnI%3D&userId=409737736626897896","AuthorizationReq":"appId=cbbapp,time=1571993359374","HostReq":"10.159.129.198:8015","UserAgentReq":"Jak"},{"RecTime":1571993306361,"DS":"20191025","HH":"16","LogID":230832590,"DeviceName":"Undefine","LocalTimeZone":"Asia/)","LogType":"http_response","CurrentID":247,"RelationID":246,"SrcIP":"10.159.129.198","SrcPort":8015,"DstIP":"10.160.1.177","DstPort":56126,"sessionID":"10_","TTL":62,"Len":263,"TransProto":"TCP","AppProto":"HTTP","ResCode":200,"VersionStr":"HTTP/1.1","ResPhrase":"OK","ContentType":"application/json","Date":"Fri, 25 Oct 2019 08:36:46 GMT","Server":"Apache-Coyote/1.1","TransferEncoding":"chunked","PayloadType":"Get","HttpBody":{"code":20000,"msg":"success","subCode":null,"subMsg":null,"data":[{"id":-2,"type":3,"category":"15","code":"002","pid":0,"level":0,"pathTrace":"/","name":"机构列表","status":1,"orderNo":0,"version":0,"isHide":true,"createdAt":1563950548000,"updatedAt":1563950548000}]}}]}";
    
    //        String msg = "{"logdata":[{"RecTimeReq":1571993306357,"DSReq":"20191025","HHReq":"16","LogIDReq":245393518,"DeviceNameReq":"Undefine","LocalTimeZoneReq":"As","LogTypeReq":"http_request","CurrentIDReq":245,"SrcIPReq":"10.160.1.177","SrcPortReq":56126,"DstIPReq":"10.159.129.198","DstPortReq":8015,"sessionIDReq":"10_1571993236_3883992","TTLReq":128,"LenReq":414,"TransProtoReq":"TCP","AppProtoReq":"HTTP","MethodReq":"GET","UriReq":"/openApi/user/getUserNodes?Authorization=appId%3Dcbbapp%2Ctime%3D1571993359374%2Csign%3Dowa29hDQWjP2w9Hj9se5BCEJDv96fBt7LixbXPCNHnI%3D&userId=409737736626897896","AuthorizationReq":"appId=cbbapp,time=1571993359374","HostReq":"10.159.129.198:8015","UserAgentReq":"Jak"},{"RecTime":1571993306361,"DS":"20191025","HH":"16","LogID":230832590,"DeviceName":"Undefine","LocalTimeZone":"Asia/)","LogType":"http_response","CurrentID":247,"RelationID":246,"SrcIP":"10.159.129.198","SrcPort":8015,"DstIP":"10.160.1.177","DstPort":56126,"sessionID":"10_","TTL":62,"Len":263,"TransProto":"TCP","AppProto":"HTTP","ResCode":200,"VersionStr":"HTTP/1.1","ResPhrase":"OK","ContentType":"application/json","Date":"Fri, 25 Oct 2019 08:36:46 GMT","Server":"Apache-Coyote/1.1","TransferEncoding":"chunked","PayloadType":"Get","HttpBody":{"code":20000,"msg":"success","subCode":null,"subMsg":null,"data":"abcd"}}]}";
            String msg = "{"logdata":[{"RecTimeReq":1571993306357,"DSReq":"20191025","HHReq":"16"}]}";
            JSONObject jsonObj = JSON.parseObject(msg);
            System.out.println(jsonObj.toJSONString());
            Long startTime = System.currentTimeMillis();
            Producer<String, String> producer = null;
            try {
                producer = new KafkaProducer<>(props);
                for (int i = 0; i < msgNum; i++) {
    //                producer.send(new ProducerRecord<String, String>(topicName, msg), new DemoCallback(startTime, i, msg));
                    producer.send(new ProducerRecord<String, String>(topicName, msg));
                }
            }catch (Exception ex){
                ex.printStackTrace();
            }finally {
                if (producer != null){
                    producer.close();
                }
            }
    //
    //        try {
    //            Thread.sleep(10);
    //        } catch (InterruptedException e) {
    //            e.printStackTrace();
    //        }
    
            Long endTime = System.currentTimeMillis();
            System.out.println("Kafka producer send, topic:" + topicName + ", startTime: " + startTime + ", endTime: " + endTime + ", duration: " + (endTime-startTime));
    
    
    
        }
    
    
    }
    class DemoCallback implements Callback {
        private final long startTime;
        private final int key;
        private final String message;
    
        public DemoCallback(long startTime, int key, String message) {
            this.startTime = startTime;
            this.key = key;
            this.message = message;
        }
    
        public void onCompletion(RecordMetadata metadata, Exception exception) {
            long elapsedTime = System.currentTimeMillis() - startTime;
            if (metadata != null) {
                System.out.println(
                    "message(" + key + ", " + ") sent to partition(" + metadata.partition() +
                        "), " +
                        "offset(" + metadata.offset() + ") in " + elapsedTime + " ms");
            } else {
                exception.printStackTrace();
            }
        }
    }
    View Code

    配置文件prod_client_jaas.conf

    KafkaClient {
    org.apache.kafka.common.security.plain.PlainLoginModule required
    username="admin"
    password="admin-secret";
    };



    consumer

    System.setProperty("java.security.auth.login.config", "F:\conf\prod_client_jaas.conf");
            ConsumerGroup consumerGroup = new ConsumerGroup(brokers,groupId,topic,consumerNumber);
            consumerGroup.start();
    
    public class ConsumerThread implements Runnable {
        private static KafkaConsumer<String, String> kafkaConsumer;
        private final String topic;
    //    private List<Map<String, Object>> ruleListAndValue;
        private final List<String> topic1 = new ArrayList<>();
    
        private Logger logger = LoggerFactory.getLogger(this.getClass());
    
        public ConsumerThread(String brokers, String groupId, String topic) {
            Properties properties = buildKafkaProperty(brokers, groupId);
            this.topic = topic;
            this.kafkaConsumer = new KafkaConsumer<String, String>(properties);
    //        this.kafkaConsumer.subscribe(Arrays.asList(this.topic, "PACKET_DNS_RESPONSE", "PACKET_DNS_REQUEST", "STATS_TCP"));
            this.kafkaConsumer.subscribe(Arrays.asList(this.topic));
        }
    
        private static Properties buildKafkaProperty(String brokers, String groupId) {
            Properties properties = new Properties();
            properties.put("bootstrap.servers", brokers);
            properties.put("group.id", groupId);
            properties.put("enable.auto.commit", "true");
            properties.put("auto.commit.interval.ms", "1000");
            properties.put("session.timeout.ms", "30000");
            properties.put("auto.offset.reset", "latest");
    //        properties.put("auto.offset.reset", "earliest");
            properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            properties.put("security.protocol", "SASL_PLAINTEXT");
            properties.put("sasl.mechanism", "PLAIN");
            return properties;
        }
    
        @Override
        public void run() {
            while (true) {
                ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(100);
                for (ConsumerRecord<String, String> item : consumerRecords) {
                    System.out.println("Consumer Message:" + item.value() + ",Partition:" + item.partition() + "Offset:" + item.offset());
                    logger.info("Consumer Message:" + item.value() + ",Partition:" + item.partition() + "Offset:" + item.offset());
                    try {
                        Thread.sleep(50);
                    } catch (InterruptedException e) {
                        e.printStackTrace();
                    }
                }
            }
        }
    }
    View Code

    参考:https://www.cnblogs.com/smartloli/p/9191929.html

    https://access.redhat.com/documentation/en-us/red_hat_amq/7.2/html/using_amq_streams_on_red_hat_enterprise_linux_rhel/configuring_zookeeper#assembly-configuring-zookeeper-authentication-str

    https://studygolang.com/articles/17216

  • 相关阅读:
    由博客园页面样式挖出的一款心机软件
    SQL Server 各任务所维护
    [转载]SQL Server查找包含某关键字的存储过程3种方法
    Hibernate简单注解开发和事务处理(四)
    Hibernate实现简单的CRUD操作和常见类(三)
    Hibernate中hibernate.cfg.xml文件和Xxx.hbm.xml文件的详细解释(二)
    Hibernate开发环境的简单搭建及常见错误(一)
    Struts2配置异常错误处理(十六)
    Struts2实现类型转换器(十五)
    Struts2实现JSON和Ajax操作(十四)
  • 原文地址:https://www.cnblogs.com/beilong/p/12232531.html
Copyright © 2020-2023  润新知