1. Kafka 安装
Kafka 不依赖于hadoop运作 只依赖zookeeper运行
要先事先安装好zookeeper 并运行
http://kafka.apache.org/downloads
tar -zxvf kafka_2.11-2.4.1.tgz -C /opt/module/cd /opt/module/mv kafka_2.11-2.4.1/ kafkacd kafkamkdir logs#环境变量sudo vim /etc/profile.d/my_env.sh#KAFKA_HOMEexport KAFKA_HOME=/opt/module/kafkaexport PATH=$PATH:$KAFKA_HOME/binsource /etc/profile.d/my_env.sh
配置kafka
vim /opt/module/kafka/config/server.properties
#broker的全局唯一编号,不能重复broker.id=0#删除topic功能使能delete.topic.enable=true#处理网络请求的线程数量num.network.threads=3#用来处理磁盘IO的现成数量num.io.threads=8#发送套接字的缓冲区大小socket.send.buffer.bytes=102400#接收套接字的缓冲区大小socket.receive.buffer.bytes=102400#请求套接字的缓冲区大小socket.request.max.bytes=104857600#kafka运行日志存放的路径log.dirs=/opt/module/kafka/logs#topic在当前broker上的分区个数num.partitions=1#用来恢复和清理data下数据的线程数量num.recovery.threads.per.data.dir=1#segment文件保留的最长时间,超时将被删除log.retention.hours=168#配置连接Zookeeper集群地址zookeeper.connect=hadoop102:2181,hadoop103:2181,hadoop104:2181/kafka
分发环境变量和kafka
sudo xsync /opt/module/kafka
sudo xsync /etc/profile.d/my_env.sh
修改每个kafka中 broker.id
vim /opt/module/kafka/config/server.properties
#102 为 2, 103为3 , 104为4
启动
kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties #每台机器单独起
kafka-server-stop.sh #关闭
群起脚本 记得先启动zookeeper
sudo vim /bin/kafkalist.sh
for i in `cat /opt/module/hadoop-3.1.3/etc/hadoop/workers`
do
echo "========== $i =========="
ssh $i 'kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties'
echo $?
done
sudo chmod +x /bin/kafkalist.sh
如果遇kafka闪退 可以尝试删除logs文件夹下的内容
查看zookeeper中是否有kafka id已经记录
zkCli.sh
ls /kafka/brokers/ids
