跳到主要内容

安装

安装包下载

链接:https://pan.baidu.com/s/1tURBIZCZEh6Bp_H8r_iGsQ 
提取码:yyds
--来自百度网盘超级会员V5的分享

安装

解压

tar -zxvf kafka_2.12-3.6.0.tgz -C ../module/

创建数据存放目录

mkdir -p /datadrive/kafka/log

修改配置文件

server.properties

vi /home/bigdata/module/kafka_2.12-3.6.0/config/server.properties
broker.id=0

num.network.threads=9

num.io.threads=16

socket.send.buffer.bytes=204800

socket.receive.buffer.bytes=204800

# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600

num.replica.fetchers=3

message.max.bytes=5242880

replica.fetch.min.bytes=1

replica.fetch.max.bytes=5242880

auto.create.topics.enable=true

default.replication.factor=3


############################# Log Basics #############################

# A comma separated list of directories under which to store log files
log.dirs=/datadrive/kafka/log

num.partitions=5

# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
# This value is recommended to be increased for installations with data dirs located in RAID array.
num.recovery.threads.per.data.dir=3

############################# Internal Topic Settings #############################
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
offsets.topic.replication.factor=3
transaction.state.log.replication.factor=3
transaction.state.log.min.isr=3

############################# Log Flush Policy #############################
log.flush.interval.messages=10000
log.flush.interval.ms=1000

############################# Log Retention Policy #############################

log.retention.hours=168

log.segment.bytes=1073741824

# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=300000

############################# Zookeeper #############################

zookeeper.connect=master1:2181,master2:2181,node1:2181/kafka
zookeeper.connection.timeout.ms=18000

############################# Group Coordinator Settings #############################
group.initial.rebalance.delay.ms=3

启动

重启前修改其他集群机器

分发kafka到所有的机器

./xsync.sh /home/bigdata/module/kafka_2.12-3.6.0

分别修改机器的

vi /home/bigdata/module/kafka_2.12-3.6.0/config/server.properties

# 每台机器的id不一样
broker.id=1

集群启停脚本

vi kafka_cluster.sh
#!/bin/bash

case $1 in
"start"){
for i in master1 master2 node1
do
echo " --------启动 $i Kafka-------"
ssh $i "/home/bigdata/module/kafka_2.12-3.6.0/bin/kafka-server-start.sh -daemon /home/bigdata/module/kafka_2.12-3.6.0/config/server.properties "
done
};;
"stop"){
for i in master1 master2 node1
do
echo " --------停止 $i Kafka-------"
ssh $i "/home/bigdata/module/kafka_2.12-3.6.0/bin/kafka-server-stop.sh stop"
done
};;
esac
chmod 744 kafka_cluster.sh 
./kafka_cluster.sh start