拉取镜像

  1. $ docker pull mirrorgooglecontainers/kubernetes-kafka:1.0-10.2.1

创建pv

  1. $ vi kfkpv

将下列代码复制进去

  1. kind: PersistentVolume
  2. apiVersion: v1
  3. metadata:
  4. name: pv-kafka1
  5. namespace: bigdata
  6. annotations:
  7. volume.beta.kubernetes.io/storage-class: "anything"
  8. labels:
  9. type: local
  10. spec:
  11. capacity:
  12. storage: 5Gi
  13. accessModes:
  14. - ReadWriteOnce
  15. hostPath:
  16. path: "/data/kafka1"
  17. persistentVolumeReclaimPolicy: Recycle
  18. ---
  19. kind: PersistentVolume
  20. apiVersion: v1
  21. metadata:
  22. name: pv-kafka2
  23. namespace: bigdata
  24. annotations:
  25. volume.beta.kubernetes.io/storage-class: "anything"
  26. labels:
  27. type: local
  28. spec:
  29. capacity:
  30. storage: 5Gi
  31. accessModes:
  32. - ReadWriteOnce
  33. hostPath:
  34. path: "/data/kafka2"
  35. persistentVolumeReclaimPolicy: Recycle
  36. ---
  37. kind: PersistentVolume
  38. apiVersion: v1
  39. metadata:
  40. name: pv-kafka3
  41. namespace: bigdata
  42. annotations:
  43. volume.beta.kubernetes.io/storage-class: "anything"
  44. labels:
  45. type: local
  46. spec:
  47. capacity:
  48. storage: 5Gi
  49. accessModes:
  50. - ReadWriteOnce
  51. hostPath:
  52. path: "/data/kafka3"
  53. persistentVolumeReclaimPolicy: Recycle

修改文件格式

  1. $ mv kfkpv kfkpv.yaml

运行kfkpv.yaml文件

  1. $ kubectl create -f kfkpv.yaml

查看pv

  1. $ kubectl get pv

创建kafka statefulset

  1. $ vi kfksts

将下列代码复制进去

  1. apiVersion: v1
  2. kind: Service
  3. metadata:
  4. name: kafka-hs
  5. namespace: bigdata
  6. labels:
  7. app: kafka
  8. spec:
  9. ports:
  10. - port: 9093
  11. name: server
  12. clusterIP: None
  13. selector:
  14. app: kafka
  15. ---
  16. apiVersion: policy/v1beta1
  17. kind: PodDisruptionBudget
  18. metadata:
  19. name: kafka-pdb
  20. namespace: bigdata
  21. spec:
  22. selector:
  23. matchLabels:
  24. app: kafka
  25. maxUnavailable: 1
  26. ---
  27. apiVersion: apps/v1
  28. kind: StatefulSet
  29. metadata:
  30. name: kafka
  31. namespace: bigdata
  32. spec:
  33. selector:
  34. matchLabels:
  35. app: kafka
  36. serviceName: kafka-hs
  37. replicas: 3
  38. podManagementPolicy: Parallel
  39. updateStrategy:
  40. type: RollingUpdate
  41. template:
  42. metadata:
  43. labels:
  44. app: kafka
  45. spec:
  46. terminationGracePeriodSeconds: 300
  47. containers:
  48. - name: k8skafka
  49. imagePullPolicy: IfNotPresent
  50. image: mirrorgooglecontainers/kubernetes-kafka:1.0-10.2.1
  51. resources:
  52. requests:
  53. memory: "256Mi"
  54. cpu: "0.1"
  55. ports:
  56. - containerPort: 9093
  57. name: server
  58. command:
  59. - sh
  60. - -c
  61. - "exec kafka-server-start.sh /opt/kafka/config/server.properties --override broker.id=${HOSTNAME##*-} \
  62. --override listeners=PLAINTEXT://:9093 \
  63. --override zookeeper.connect=zk-cs.bigdata.svc.cluster.local:2181 \
  64. --override log.dir=/var/lib/kafka \
  65. --override auto.create.topics.enable=true \
  66. --override auto.leader.rebalance.enable=true \
  67. --override background.threads=10 \
  68. --override compression.type=producer \
  69. --override delete.topic.enable=false \
  70. --override leader.imbalance.check.interval.seconds=300 \
  71. --override leader.imbalance.per.broker.percentage=10 \
  72. --override log.flush.interval.messages=9223372036854775807 \
  73. --override log.flush.offset.checkpoint.interval.ms=60000 \
  74. --override log.flush.scheduler.interval.ms=9223372036854775807 \
  75. --override log.retention.bytes=-1 \
  76. --override log.retention.hours=168 \
  77. --override log.roll.hours=168 \
  78. --override log.roll.jitter.hours=0 \
  79. --override log.segment.bytes=1073741824 \
  80. --override log.segment.delete.delay.ms=60000 \
  81. --override message.max.bytes=1000012 \
  82. --override min.insync.replicas=1 \
  83. --override num.io.threads=8 \
  84. --override num.network.threads=3 \
  85. --override num.recovery.threads.per.data.dir=1 \
  86. --override num.replica.fetchers=1 \
  87. --override offset.metadata.max.bytes=4096 \
  88. --override offsets.commit.required.acks=-1 \
  89. --override offsets.commit.timeout.ms=5000 \
  90. --override offsets.load.buffer.size=5242880 \
  91. --override offsets.retention.check.interval.ms=600000 \
  92. --override offsets.retention.minutes=1440 \
  93. --override offsets.topic.compression.codec=0 \
  94. --override offsets.topic.num.partitions=50 \
  95. --override offsets.topic.replication.factor=3 \
  96. --override offsets.topic.segment.bytes=104857600 \
  97. --override queued.max.requests=500 \
  98. --override quota.consumer.default=9223372036854775807 \
  99. --override quota.producer.default=9223372036854775807 \
  100. --override replica.fetch.min.bytes=1 \
  101. --override replica.fetch.wait.max.ms=500 \
  102. --override replica.high.watermark.checkpoint.interval.ms=5000 \
  103. --override replica.lag.time.max.ms=10000 \
  104. --override replica.socket.receive.buffer.bytes=65536 \
  105. --override replica.socket.timeout.ms=30000 \
  106. --override request.timeout.ms=30000 \
  107. --override socket.receive.buffer.bytes=102400 \
  108. --override socket.request.max.bytes=104857600 \
  109. --override socket.send.buffer.bytes=102400 \
  110. --override unclean.leader.election.enable=true \
  111. --override zookeeper.session.timeout.ms=6000 \
  112. --override zookeeper.set.acl=false \
  113. --override broker.id.generation.enable=true \
  114. --override connections.max.idle.ms=600000 \
  115. --override controlled.shutdown.enable=true \
  116. --override controlled.shutdown.max.retries=3 \
  117. --override controlled.shutdown.retry.backoff.ms=5000 \
  118. --override controller.socket.timeout.ms=30000 \
  119. --override default.replication.factor=1 \
  120. --override fetch.purgatory.purge.interval.requests=1000 \
  121. --override group.max.session.timeout.ms=300000 \
  122. --override group.min.session.timeout.ms=6000 \
  123. --override inter.broker.protocol.version=0.10.2-IV0 \
  124. --override log.cleaner.backoff.ms=15000 \
  125. --override log.cleaner.dedupe.buffer.size=134217728 \
  126. --override log.cleaner.delete.retention.ms=86400000 \
  127. --override log.cleaner.enable=true \
  128. --override log.cleaner.io.buffer.load.factor=0.9 \
  129. --override log.cleaner.io.buffer.size=524288 \
  130. --override log.cleaner.io.max.bytes.per.second=1.7976931348623157E308 \
  131. --override log.cleaner.min.cleanable.ratio=0.5 \
  132. --override log.cleaner.min.compaction.lag.ms=0 \
  133. --override log.cleaner.threads=1 \
  134. --override log.cleanup.policy=delete \
  135. --override log.index.interval.bytes=4096 \
  136. --override log.index.size.max.bytes=10485760 \
  137. --override log.message.timestamp.difference.max.ms=9223372036854775807 \
  138. --override log.message.timestamp.type=CreateTime \
  139. --override log.preallocate=false \
  140. --override log.retention.check.interval.ms=300000 \
  141. --override max.connections.per.ip=2147483647 \
  142. --override num.partitions=3 \
  143. --override producer.purgatory.purge.interval.requests=1000 \
  144. --override replica.fetch.backoff.ms=1000 \
  145. --override replica.fetch.max.bytes=1048576 \
  146. --override replica.fetch.response.max.bytes=10485760 \
  147. --override reserved.broker.max.id=1000 "
  148. env:
  149. - name: KAFKA_HEAP_OPTS
  150. value : "-Xmx256M -Xms256M"
  151. - name: KAFKA_OPTS
  152. value: "-Dlogging.level=INFO"
  153. volumeMounts:
  154. - name: datadir
  155. mountPath: /var/lib/kafka
  156. readinessProbe:
  157. exec:
  158. command:
  159. - sh
  160. - -c
  161. - "/opt/kafka/bin/kafka-broker-api-versions.sh --bootstrap-server=localhost:9093"
  162. volumeClaimTemplates:
  163. - metadata:
  164. name: datadir
  165. annotations:
  166. volume.beta.kubernetes.io/storage-class: "anything"
  167. spec:
  168. accessModes: [ "ReadWriteOnce" ]
  169. resources:
  170. requests:
  171. storage: 5Gi

修改文件格式

  1. $ mv kfksts kfksts.yaml

运行kfksts.yaml文件

  1. $ kubectl create -f kfksts.yaml

查看pod

  1. $ kubectl get pods -n bigdata
  2. NAME READY STATUS RESTARTS AGE
  3. kafka-0 1/1 Running 0 88m
  4. kafka-1 1/1 Running 0 88m
  5. kafka-2 1/1 Running 0 88m

全部running说明启动成功了