zookeeper参照https://www.cnblogs.com/wintersoft/p/11128484.html
mkdir /opt/kafka -p
vim /opt/kafka/Dockerfile
FROM wurstmeister/kafka:2.12-2.3.0 EXPOSE 9092
sudo mkdir -p /var/log/kafka;sudo chmod -R 777 /var/log/kafka
vim /opt/kafka/docker-compose.yml
version: '2' services: kafka: image: v-kafka container_name: kafka build: context: . dockerfile: Dockerfile restart: always ports: - "9092:9092" environment: KAFKA_ADVERTISED_HOST_NAME: kafkaserver KAFKA_ADVERTISED_PORT: 9092 KAFKA_ZOOKEEPER_CONNECT: "zookeeperserver:2181" volumes: - /var/log/kafka/:/kafka - /var/run/docker.sock:/var/run/docker.sock extra_hosts: - "kafkaserver:192.168.0.101" - "zookeeperserver:192.168.0.101"
生成启动
cd /opt/kafka/
docker-compose build
docker-compose up -d --force-recreate
docker-compose down
docker-compose restart
查看进程
netstat -anltp|grep 9092
查看日志
docker logs --tail="500" kafka
docker logs -f kafka
进入容器
docker exec -it kafka /bin/bash
伪集群
sudo mkdir -p /var/log/kafka/node1;sudo chmod -R 777 /var/log/kafka/node1
sudo mkdir -p /var/log/kafka/node2;sudo chmod -R 777 /var/log/kafka/node2
sudo mkdir -p /var/log/kafka/node3;sudo chmod -R 777 /var/log/kafka/node3
vim /opt/kafka/docker-compose.yml
version: '2' services: kafka1: image: v-kafka1 container_name: kafka1 build: context: . dockerfile: Dockerfile restart: always ports: - 9011:9092 environment: KAFKA_PORT: 9092 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafkaserver1:9011 KAFKA_ADVERTISED_HOST_NAME: kafkaserver1 KAFKA_ADVERTISED_PORT: 9011 KAFKA_BROKER_ID: 1 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2 KAFKA_ZOOKEEPER_CONNECT: zookeeperserver1:2181,zookeeperserver2:2182,zookeeperserver3:2183 KAFKA_DELETE_TOPIC_ENABLE: "true" volumes: - /var/log/kafka/node1:/kafka - /var/run/docker.sock:/var/run/docker.sock extra_hosts: - "kafkaserver1:192.168.0.101" - "kafkaserver2:192.168.0.101" - "kafkaserver3:192.168.0.101" - "zookeeperserver1:192.168.0.101" - "zookeeperserver2:192.168.0.101" - "zookeeperserver3:192.168.0.101" kafka2: image: v-kafka2 container_name: kafka2 build: context: . dockerfile: Dockerfile restart: always ports: - 9012:9092 environment: KAFKA_PORT: 9092 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafkaserver2:9012 KAFKA_ADVERTISED_HOST_NAME: kafkaserver2 KAFKA_ADVERTISED_PORT: 9012 KAFKA_BROKER_ID: 2 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2 KAFKA_ZOOKEEPER_CONNECT: zookeeperserver1:2181,zookeeperserver2:2182,zookeeperserver3:2183 KAFKA_DELETE_TOPIC_ENABLE: "true" volumes: - /var/log/kafka/node2:/kafka - /var/run/docker.sock:/var/run/docker.sock extra_hosts: - "kafkaserver1:192.168.0.101" - "kafkaserver2:192.168.0.101" - "kafkaserver3:192.168.0.101" - "zookeeperserver1:192.168.0.101" - "zookeeperserver2:192.168.0.101" - "zookeeperserver3:192.168.0.101" kafka3: image: v-kafka3 container_name: kafka3 build: context: . dockerfile: Dockerfile restart: always ports: - 9013:9092 environment: KAFKA_PORT: 9092 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafkaserver3:9013 KAFKA_ADVERTISED_HOST_NAME: kafkaserver3 KAFKA_ADVERTISED_PORT: 9013 KAFKA_BROKER_ID: 3 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2 KAFKA_ZOOKEEPER_CONNECT: zookeeperserver1:2181,zookeeperserver2:2182,zookeeperserver3:2183 KAFKA_DELETE_TOPIC_ENABLE: "true" volumes: - /var/log/kafka/node3:/kafka - /var/run/docker.sock:/var/run/docker.sock extra_hosts: - "kafkaserver1:192.168.0.101" - "kafkaserver2:192.168.0.101" - "kafkaserver3:192.168.0.101" - "zookeeperserver1:192.168.0.101" - "zookeeperserver2:192.168.0.101" - "zookeeperserver3:192.168.0.101"
配置key规则:在前面加KAFKA_前缀 全部大写 “.”用“_”代替
如:
增加 Kafka 堆的内存大小 KAFKA_HEAP_OPTS=-Xmx4G -Xms4G
KAFKA_LOG_DIRS=/kafka/logs 时 volumes:- "./kafka3/logs:/kafka/logs"
kafka-manager的environment可设置APPLICATION_SECRET: "xxx"
KAFKA_LISTENERS的值 为内网地址
没配置delete.topic.enable=true,只是软删除
如果将topic软删除,java客户端会报:
WARN Error while fetching metadata with correlation id 0 : {test=LEADER_NOT_AVAILABLE} (org.apache.kafka.clients.NetworkClient)
报 org.apache.kafka.clients.NetworkClient : [Producer clientId=producer-3] 1 partitions have leader brokers without a matching listener, including [log-0]
一般是zookeeper的ip:port配置导致kafka报错问题,配置好后需清理zookeeper数据才会正常。
复制配置
docker cp kafka1:/opt/kafka/config/ /opt/kafka/kafka1_config_bak/
kafka-manager需在界面手动添加集群配置才能显示。
测试kafka
进入容器
docker exec -it kafka1 /bin/bash
创建topic
/opt/kafka/bin/kafka-topics.sh --create --bootstrap-server 192.168.0.101:9011,192.168.0.101:9012,192.168.0.101:9013 --topic myTestTopic --partitions 3 --replication-factor 3
注:replication-factor个数不能超过broker的个数
查看当前topic列表
/opt/kafka/bin/kafka-topics.sh --list --bootstrap-server 192.168.0.101:9011,192.168.0.101:9012,192.168.0.101:9013
运行一个消息生产者,指定topic为刚刚创建的myTestTopic
/opt/kafka/bin/kafka-console-producer.sh --broker-list 192.168.0.101:9011,192.168.0.101:9012,192.168.0.101:9013 --topic myTestTopic
输入任意字符 然后ctrl+c退出
查看指定topic明细
/opt/kafka/bin/kafka-topics.sh --describe --bootstrap-server 192.168.0.101:9011,192.168.0.101:9012,192.168.0.101:9013 --topic myTestTopic
消费消息
/opt/kafka/bin/kafka-console-consumer.sh --bootstrap-server 192.168.0.101:9011,192.168.0.101:9012,192.168.0.101:9013 --topic myTestTopic --from-beginning
ctrl+c退出
删除topic
/opt/kafka/bin/kafka-topics.sh --delete --bootstrap-server 192.168.0.101:9011,192.168.0.101:9012,192.168.0.101:9013 --topic myTestTopic
如果不能删除,docker启动时配置KAFKA_DELETE_TOPIC_ENABLE: "true"