1. $ cat <<EOF >./zookeeper-svc.yaml
    2. apiVersion: v1
    3. kind: Service
    4. metadata:
    5. labels:
    6. app: zookeeper-cluster-service-1
    7. name: zookeeper-cluster1
    8. spec:
    9. ports:
    10. - name: client
    11. port: 2181
    12. protocol: TCP
    13. - name: follower
    14. port: 2888
    15. protocol: TCP
    16. - name: leader
    17. port: 3888
    18. protocol: TCP
    19. selector:
    20. app: zookeeper-cluster-service-1
    21. ---
    22. apiVersion: v1
    23. kind: Service
    24. metadata:
    25. labels:
    26. app: zookeeper-cluster-service-2
    27. name: zookeeper-cluster2
    28. spec:
    29. ports:
    30. - name: client
    31. port: 2181
    32. protocol: TCP
    33. - name: follower
    34. port: 2888
    35. protocol: TCP
    36. - name: leader
    37. port: 3888
    38. protocol: TCP
    39. selector:
    40. app: zookeeper-cluster-service-2
    41. ---
    42. apiVersion: v1
    43. kind: Service
    44. metadata:
    45. labels:
    46. app: zookeeper-cluster-service-3
    47. name: zookeeper-cluster3
    48. spec:
    49. ports:
    50. - name: client
    51. port: 2181
    52. protocol: TCP
    53. - name: follower
    54. port: 2888
    55. protocol: TCP
    56. - name: leader
    57. port: 3888
    58. protocol: TCP
    59. selector:
    60. app: zookeeper-cluster-service-3
    61. EOF
    1. $ kubectl apply -f zookeeper-svc.yaml
    2. service/zookeeper-cluster1 created
    3. service/zookeeper-cluster2 created
    4. service/zookeeper-cluster3 created
    1. $ cat <<EOF >./zookeeper-deployment.yaml
    2. apiVersion: extensions/v1beta1
    3. kind: Deployment
    4. metadata:
    5. labels:
    6. app: zookeeper-cluster-service-1
    7. name: zookeeper-cluster-1
    8. spec:
    9. replicas: 1
    10. template:
    11. metadata:
    12. labels:
    13. app: zookeeper-cluster-service-1
    14. name: zookeeper-cluster-1
    15. spec:
    16. containers:
    17. - image: zookeeper:3.4.12
    18. imagePullPolicy: IfNotPresent
    19. name: zookeeper-cluster-1
    20. ports:
    21. - containerPort: 2181
    22. env:
    23. - name: ZOO_MY_ID
    24. value: "1"
    25. - name: ZOO_SERVERS
    26. value: "server.1=0.0.0.0:2888:3888 server.2=zookeeper-cluster2:2888:3888 server.3=zookeeper-cluster3:2888:3888"
    27. ---
    28. apiVersion: extensions/v1beta1
    29. kind: Deployment
    30. metadata:
    31. labels:
    32. app: zookeeper-cluster-service-2
    33. name: zookeeper-cluster-2
    34. spec:
    35. replicas: 1
    36. template:
    37. metadata:
    38. labels:
    39. app: zookeeper-cluster-service-2
    40. name: zookeeper-cluster-2
    41. spec:
    42. containers:
    43. - image: zookeeper:3.4.12
    44. imagePullPolicy: IfNotPresent
    45. name: zookeeper-cluster-2
    46. ports:
    47. - containerPort: 2181
    48. env:
    49. - name: ZOO_MY_ID
    50. value: "2"
    51. - name: ZOO_SERVERS
    52. value: "server.1=zookeeper-cluster1:2888:3888 server.2=0.0.0.0:2888:3888 server.3=zookeeper-cluster3:2888:3888"
    53. ---
    54. apiVersion: extensions/v1beta1
    55. kind: Deployment
    56. metadata:
    57. labels:
    58. app: zookeeper-cluster-service-1
    59. name: zookeeper-cluster-3
    60. spec:
    61. replicas: 1
    62. template:
    63. metadata:
    64. labels:
    65. app: zookeeper-cluster-service-3
    66. name: zookeeper-cluster-3
    67. spec:
    68. containers:
    69. - image: zookeeper:3.4.12
    70. imagePullPolicy: IfNotPresent
    71. name: zookeeper-cluster-3
    72. ports:
    73. - containerPort: 2181
    74. env:
    75. - name: ZOO_MY_ID
    76. value: "3"
    77. - name: ZOO_SERVERS
    78. value: "server.1=zookeeper-cluster1:2888:3888 server.2=zookeeper-cluster2:2888:3888 server.3=0.0.0.0:2888:3888"
    79. EOF
    1. $ kubectl apply -f zookeeper-deployment.yaml
    2. deployment.extensions/zookeeper-cluster-1 created
    3. deployment.extensions/zookeeper-cluster-2 created
    4. deployment.extensions/zookeeper-cluster-3 created
    5. $ kubectl log zookeeper-cluster-1-7cbccbdd6b-f7lql
    6. $ kubectl exec -it zookeeper-cluster-1-7cbccbdd6b-f7lql /bin/bash
    7. $ /bin/zkCli.sh
    1. $ cat <<EOF >./kafka-svc.yaml
    2. apiVersion: v1
    3. kind: Service
    4. metadata:
    5. name: kafka-cluster1
    6. labels:
    7. app: kafka-cluster-1
    8. spec:
    9. type: NodePort
    10. ports:
    11. - port: 9092
    12. name: kafka-cluster-1
    13. targetPort: 9092
    14. nodePort: 30091
    15. protocol: TCP
    16. selector:
    17. app: kafka-cluster-1
    18. ---
    19. apiVersion: v1
    20. kind: Service
    21. metadata:
    22. name: kafka-cluster2
    23. labels:
    24. app: kafka-cluster-2
    25. spec:
    26. type: NodePort
    27. ports:
    28. - port: 9092
    29. name: kafka-cluster-2
    30. targetPort: 9092
    31. nodePort: 30092
    32. protocol: TCP
    33. selector:
    34. app: kafka-cluster-2
    35. ---
    36. apiVersion: v1
    37. kind: Service
    38. metadata:
    39. name: kafka-cluster3
    40. labels:
    41. app: kafka-cluster-3
    42. spec:
    43. type: NodePort
    44. ports:
    45. - port: 9092
    46. name: kafka-cluster-3
    47. targetPort: 9092
    48. nodePort: 30093
    49. protocol: TCP
    50. selector:
    51. app: kafka-cluster-3
    52. EOF
    1. $ kubectl apply -f kafka-svc.yaml
    2. service/kafka-cluster1 created
    3. service/kafka-cluster2 created
    4. service/kafka-cluster3 created
    1. cat <<EOF >./kafka-deployment.yaml
    2. kind: Deployment
    3. apiVersion: extensions/v1beta1
    4. metadata:
    5. name: kafka-cluster-1
    6. spec:
    7. replicas: 1
    8. selector:
    9. matchLabels:
    10. name: kafka-cluster-1
    11. template:
    12. metadata:
    13. labels:
    14. name: kafka-cluster-1
    15. app: kafka-cluster-1
    16. spec:
    17. containers:
    18. - name: kafka-cluster-1
    19. image: wurstmeister/kafka
    20. imagePullPolicy: IfNotPresent
    21. ports:
    22. - containerPort: 9092
    23. env:
    24. - name: KAFKA_ADVERTISED_PORT
    25. value: "9092"
    26. - name: KAFKA_ADVERTISED_HOST_NAME
    27. value: "kafka-cluster1"
    28. - name: KAFKA_ZOOKEEPER_CONNECT
    29. value: zookeeper-cluster1:2181,zookeeper-cluster2:2181,zookeeper-cluster3:2181
    30. - name: KAFKA_BROKER_ID
    31. value: "1"
    32. ---
    33. kind: Deployment
    34. apiVersion: extensions/v1beta1
    35. metadata:
    36. name: kafka-cluster-2
    37. spec:
    38. replicas: 1
    39. selector:
    40. matchLabels:
    41. name: kafka-cluster-2
    42. template:
    43. metadata:
    44. labels:
    45. name: kafka-cluster-2
    46. app: kafka-cluster-2
    47. spec:
    48. containers:
    49. - name: kafka-cluster-2
    50. image: wurstmeister/kafka
    51. imagePullPolicy: IfNotPresent
    52. ports:
    53. - containerPort: 9092
    54. env:
    55. - name: KAFKA_ADVERTISED_PORT
    56. value: "9092"
    57. - name: KAFKA_ADVERTISED_HOST_NAME
    58. value: "kafka-cluster2"
    59. - name: KAFKA_ZOOKEEPER_CONNECT
    60. value: zookeeper-cluster1:2181,zookeeper-cluster2:2181,zookeeper-cluster3:2181
    61. - name: KAFKA_BROKER_ID
    62. value: "2"
    63. ---
    64. kind: Deployment
    65. apiVersion: extensions/v1beta1
    66. metadata:
    67. name: kafka-cluster-3
    68. spec:
    69. replicas: 1
    70. selector:
    71. matchLabels:
    72. name: kafka-cluster-3
    73. template:
    74. metadata:
    75. labels:
    76. name: kafka-cluster-3
    77. app: kafka-cluster-3
    78. spec:
    79. containers:
    80. - name: kafka-cluster-3
    81. image: wurstmeister/kafka
    82. imagePullPolicy: IfNotPresent
    83. ports:
    84. - containerPort: 9092
    85. env:
    86. - name: KAFKA_ADVERTISED_PORT
    87. value: "9092"
    88. - name: KAFKA_ADVERTISED_HOST_NAME
    89. value: "kafka-cluster3"
    90. - name: KAFKA_ZOOKEEPER_CONNECT
    91. value: zookeeper-cluster1:2181,zookeeper-cluster2:2181,zookeeper-cluster3:2181
    92. - name: KAFKA_BROKER_ID
    93. value: "3"
    94. EOF
    1. $ kubectl apply -f kafka-deployment.yaml
    2. deployment.extensions/kafka-cluster-1 created
    3. deployment.extensions/kafka-cluster-2 created
    4. deployment.extensions/kafka-cluster-3 created
    5. # # 查看 pod 日志
    6. $ kubectl log kafka-cluster-1-5f8b447bc9-wb79q -f
    7. # # 启动 producer 生产测试
    8. $ kubectl exec -it kafka-cluster-1-5f8b447bc9-ftcd5 /bin/bash
    9. $ kafka-console-producer.sh --broker-list kafka-cluster1:9092 --topic test
    10. # # 启动 consumer 消费测试
    11. $ kubectl exec -it kafka-cluster-2-6ddcd7c7cb-r4sbp /bin/bash
    12. $ kafka-console-consumer.sh --bootstrap-server kafka-cluster2:9092 --topic test --from-beginning

    删除 pod 测试得出结论,删除全部 kafka 节点时,数据会丢失。保留一个 pod 恢复后会自动同步备份