配置文件明细
01 connect-console-sink.properties02 connect-console-source.properties03 connect-distributed.properties04 connect-file-sink.properties05 connect-file-source.properties06 connect-log4j.properties07 connect-standalone.properties08 consumer.properties #09 log4j.properties10 producer.properties #11 server.properties #12 tools-log4j.properties13 trogdor.conf14 zookeeper.properties #
zookeeper.properties
[zookeeper.properties]
:dataDir=/tmp/zookeeper # 快照存放地址
:clientPort=2181 # 客户端连接zookeeper服务的端口
:maxClientCnxns=0
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# the directory where the snapshot is stored.
dataDir=/tmp/zookeeper
# the port at which the clients will connect
clientPort=2181
# disable the per-ip limit on the number of connections since this is a non-production config
maxClientCnxns=0
producer.properties
[producer.properties]
:bootstrap.servers=localhost:9092
:compression.type=none
:#partitioner.class=
:#request.timeout.ms=
:#max.block.ms=
:#linger.ms=
:#max.request.size=
:#batch.size=
:#buffer.memory=
consumer.properties
[consumer.properties]
:bootstrap.servers=localhost:9092
:group.id=test-consumer-group
:#auto.offset.reset=
server.properties
[server.properties]
:broker.id=0
:#listeners=PLAINTEXT://:9092
:#advertised.listeners=PLAINTEXT://your.host.name:9092
:num.network.threads=3
:num.io.threads=8
:socket.send.buffer.bytes=102400
:socket.receive.buffer.bytes=102400
:socket.request.max.bytes=104857600
:log.dirs=/tmp/kafka-logs # 日志存放文件夹
:num.partitions=1
:num.recovery.threads.per.data.dir=1
:offsets.topic.replication.factor=1
:transaction.state.log.replication.factor=1
:transaction.state.log.min.isr=1
:#log.flush.interval.messages=10000
:#log.flush.interval.ms=1000
:log.retention.hours=168
:#log.retention.bytes=1073741824
:log.segment.bytes=1073741824
:log.retention.check.interval.ms=300000
:zookeeper.connect=localhost:2181 # zookeeper 连接地址
:zookeeper.connection.timeout.ms=6000
脚本命令明细
[run]
01 connect-distributed.sh
02 connect-standalone.sh
03 kafka-acls.sh
04 kafka-broker-api-versions.sh
05 kafka-configs.sh
06 kafka-console-consumer.sh $
07 kafka-console-producer.sh $
08 kafka-consumer-groups.sh
09 kafka-consumer-perf-test.sh
10 kafka-delegation-tokens.sh
11 kafka-delete-records.sh
12 kafka-dump-log.sh
13 kafka-log-dirs.sh
14 kafka-mirror-maker.sh
15 kafka-preferred-replica-election.sh
16 kafka-producer-perf-test.sh
17 kafka-reassign-partitions.sh
18 kafka-replica-verification.sh
19 kafka-run-class.sh $
20 kafka-server-start.sh #
21 kafka-server-stop.sh #
22 kafka-streams-application-reset.sh
23 kafka-topics.sh $
24 kafka-verifiable-consumer.sh
25 kafka-verifiable-producer.sh
26 trogdor.sh
27 zookeeper-security-migration.sh
28 zookeeper-server-start.sh #
29 zookeeper-server-stop.sh #
30 zookeeper-shell.sh