用到的源码包
openjdk-11.0.1_linux-x64_bin.tar.gz
zookeeper-3.4.11.tar.gz
kafka_2.11-1.0.0.tgz
zookeeper相关配置
[root@node-1 soft]# grep "^[a-Z]" /data/soft/zookeeper1/conf/zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
clientPort=2181
dataDir=/data/soft/zookeeper1/data
server.1=10.104.41.91:2888:3888
server.2=10.104.41.91:4888:5888
server.3=10.104.41.91:6888:7888
[root@node-1 soft]# grep "^[a-Z]" /data/soft/zookeeper2/conf/zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
clientPort=2182
dataDir=/data/soft/zookeeper2/data
server.1=10.104.41.91:2888:3888
server.2=10.104.41.91:4888:5888
server.3=10.104.41.91:6888:7888
[root@node-1 soft]# grep "^[a-Z]" /data/soft/zookeeper3/conf/zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
clientPort=2183
dataDir=/data/soft/zookeeper3/data
server.1=10.104.41.91:2888:3888
server.2=10.104.41.91:4888:5888
server.3=10.104.41.91:6888:7888
创建监听ID
echo "1" > /data/soft/zookeeper1/data/myid
echo "2" > /data/soft/zookeeper2/data/myid
echo "3" > /data/soft/zookeeper3/data/myid
kafka相关配置
[root@node-1 soft]# grep "^[a-Z]" /data/soft/kafka1/config/server.properties
broker.id=1
listeners=PLAINTEXT://10.104.41.91:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/data/soft/kafka1/logs/
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=24
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=10.104.41.91:2181,10.104.41.91:2182,10.104.41.91:2183
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
broker.id=2
listeners=PLAINTEXT://10.104.41.91:9093
advertised.listeners=PLAINTEXT://10.104.41.91:9093
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/data/soft/kafka2/logs/
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=24
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=10.104.41.91:2181,10.104.41.91:2182,10.104.41.91:2183
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
broker.id=3
listeners=PLAINTEXT://10.104.41.91:9094
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/data/soft/kafka3/logs/
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=24
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=10.104.41.91:2181,10.104.41.91:2182,10.104.41.91:2183
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
试运行kafka,相关报错1
[root@node-1 soft]# /data/soft/kafka3/bin/kafka-server-start.sh -daemon /data/soft/kafka3/config/server.properties
/data/soft/kafka3/bin/kafka-run-class.sh: line 252: [[: 10 2018-04-17: syntax error in expression (error token is "2018-04-17")
[0.000s][warning][gc] -Xloggc is deprecated. Will use -Xlog:gc:/root/kafka_2.11-1.1.0/bin/../logs/zookeeper-gc.log instead.
Unrecognized VM option 'PrintGCDateStamps'
Error: Could not create the Java Virtual Machine.
Error: A fatal exception has occurred. Program will exit.
原因分析
kafka-run-class.sh文件中获取JDK版本出错。
解决:
方法1:修改kafka-run-class.sh文件,将PrintGCDateStamps相关注释
https://stackoverflow.com/questions/36970622/kafka-unrecognized-vm-option-printgcdatestamps
方法2:
将 JAVA_MAJOR_VERSION=$($JAVA -version 2>&1 | sed -E -n 's/.* version "([^.-]*).*"/1/p')
修改为 JAVA_MAJOR_VERSION=$($JAVA -version 2>&1 | sed -E -n 's/.* version "([^.-]*).*/1/p')
试运行kafka,相关报错2
Kafka:Configured broker.id 2 doesn't match stored broker.id 0 in meta.properties.
原因分析
1、Kafka配置目录下文件server.properties中一个broker.id
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=2
# A comma seperated list of directories under which to store log files
log.dirs=/tmp/kafka-logs
另外还有一个属性log.dirs,这是kafka产生log目录,log目录 下有meta.properties文件,而meta.properties文件中也写有broker.id,这是在运行时产生的。
[root@node-1 soft]# cat /tmp/kafka-logs/meta.properties
#
#Tue Jun 29 10:42:22 CST 2021
broker.id=2
version=0
2、实际操作的因素
我是先搭建了单台的kafka,在上面试验了一把(注意:这个时候已经在kafka的日志记录下产生了相应的日志文件和meta.properties文件)。然后开始搭建kafka集群,以前面的一台虚拟机为副本,直接克隆了另外2台kafka主机。
虽然改了另外两台的server.properties的broker.id,第1台产生的日志记录在另外两台仍然存在,用meta.properties文件中的broker.id也需要修改成与server.properties中的broker.id一样,所以造成了这个问题。
参考文档
https://www.cnblogs.com/gudi/p/7847100.html
http://blog.csdn.net/shandadadada/article/details/50615866
解决方案
每个kafka日志目录创建一个meta.properties文件并指定与server.properties文件一致的broker.id
[root@node-1 soft]# cat /data/soft/kafka3/logs/meta.properties
#
#Tue Jun 29 10:42:22 CST 2021
broker.id=3
version=0
[root@node-1 soft]# cat /data/soft/kafka1/logs/meta.properties
#
#Tue Jun 29 10:42:22 CST 2021
broker.id=1
version=0
[root@node-1 soft]# cat /data/soft/kafka2/logs/meta.properties
#
#Tue Jun 29 10:42:22 CST 2021
broker.id=2
version=0