1. 拉取镜像

  1. docker pull leolee32/kubernetes-library:kubernetes-zookeeper1.0-3.4.10
  2. docker pull k8s.gcr.io/kubernetes-zookeeper:1.0-3.4.10

2. 创建命名空间

  1. # 创建命名空间
  2. kubectl create ns bigdata
  3. # 查看命名空
  4. kubectl get ns
  5. kubectl get namespace

3. 创建pv

  1. cd /home/bigdata/k8s
  2. vi zkpv.yaml

编排pv:

  1. kind: PersistentVolume
  2. apiVersion: v1
  3. metadata:
  4. name: pv-zk1
  5. namespace: bigdata
  6. annotations:
  7. volume.beta.kubernetes.io/storage-class: "anything"
  8. labels:
  9. type: local
  10. spec:
  11. capacity:
  12. storage: 3Gi
  13. accessModes:
  14. - ReadWriteOnce
  15. hostPath:
  16. path: "/data/zookeeper1"
  17. persistentVolumeReclaimPolicy: Recycle
  18. ---
  19. kind: PersistentVolume
  20. apiVersion: v1
  21. metadata:
  22. name: pv-zk2
  23. namespace: bigdata
  24. annotations:
  25. volume.beta.kubernetes.io/storage-class: "anything"
  26. labels:
  27. type: local
  28. spec:
  29. capacity:
  30. storage: 3Gi
  31. accessModes:
  32. - ReadWriteOnce
  33. hostPath:
  34. path: "/data/zookeeper2"
  35. persistentVolumeReclaimPolicy: Recycle
  36. ---
  37. kind: PersistentVolume
  38. apiVersion: v1
  39. metadata:
  40. name: pv-zk3
  41. namespace: bigdata
  42. annotations:
  43. volume.beta.kubernetes.io/storage-class: "anything"
  44. labels:
  45. type: local
  46. spec:
  47. capacity:
  48. storage: 3Gi
  49. accessModes:
  50. - ReadWriteOnce
  51. hostPath:
  52. path: "/data/zookeeper3"
  53. persistentVolumeReclaimPolicy: Recycle

创建pv:

  1. kubectl create -f zkpv.yaml

查看pv:

  1. kubectl get pv

4. 编排ZooKeeper**

  1. cd /home/bigdata/k8s
  2. vi zksts.yaml

服务发现实例编排:

  1. apiVersion: v1
  2. kind: Service
  3. metadata:
  4. name: zk-hs
  5. namespace: bigdata
  6. labels:
  7. app: zk
  8. spec:
  9. ports:
  10. - port: 2888
  11. name: server
  12. - port: 3888
  13. name: leader-election
  14. clusterIP: None
  15. selector:
  16. app: zk
  17. ---
  18. apiVersion: v1
  19. kind: Service
  20. metadata:
  21. name: zk-cs
  22. namespace: bigdata
  23. labels:
  24. app: zk
  25. spec:
  26. type: NodePort
  27. ports:
  28. - port: 2181
  29. nodePort: 32181
  30. name: client
  31. selector:
  32. app: zk
  33. ---
  34. apiVersion: policy/v1beta1
  35. kind: PodDisruptionBudget
  36. metadata:
  37. name: zk-pdb
  38. namespace: bigdata
  39. spec:
  40. selector:
  41. matchLabels:
  42. app: zk
  43. maxUnavailable: 1
  44. ---
  45. apiVersion: apps/v1
  46. kind: StatefulSet
  47. metadata:
  48. name: zk
  49. namespace: bigdata
  50. spec:
  51. selector:
  52. matchLabels:
  53. app: zk
  54. serviceName: zk-hs
  55. replicas: 3
  56. updateStrategy:
  57. type: RollingUpdate
  58. podManagementPolicy: Parallel
  59. updateStrategy:
  60. type: RollingUpdate
  61. template:
  62. metadata:
  63. labels:
  64. app: zk
  65. spec:
  66. containers:
  67. - name: kubernetes-zookeeper
  68. imagePullPolicy: IfNotPresent
  69. image: "leolee32/kubernetes-library:kubernetes-zookeeper1.0-3.4.10"
  70. resources:
  71. requests:
  72. memory: "128Mi"
  73. cpu: "0.1"
  74. ports:
  75. - containerPort: 2181
  76. name: client
  77. - containerPort: 2888
  78. name: server
  79. - containerPort: 3888
  80. name: leader-election
  81. command:
  82. - sh
  83. - -c
  84. - "start-zookeeper \
  85. --servers=3 \
  86. --data_dir=/var/lib/zookeeper/data \
  87. --data_log_dir=/var/lib/zookeeper/data/log \
  88. --conf_dir=/opt/zookeeper/conf \
  89. --client_port=2181 \
  90. --election_port=3888 \
  91. --server_port=2888 \
  92. --tick_time=2000 \
  93. --init_limit=10 \
  94. --sync_limit=5 \
  95. --heap=512M \
  96. --max_client_cnxns=60 \
  97. --snap_retain_count=3 \
  98. --purge_interval=12 \
  99. --max_session_timeout=40000 \
  100. --min_session_timeout=4000 \
  101. --log_level=INFO"
  102. readinessProbe:
  103. exec:
  104. command:
  105. - sh
  106. - -c
  107. - "zookeeper-ready 2181"
  108. initialDelaySeconds: 10
  109. timeoutSeconds: 5
  110. livenessProbe:
  111. exec:
  112. command:
  113. - sh
  114. - -c
  115. - "zookeeper-ready 2181"
  116. initialDelaySeconds: 10
  117. timeoutSeconds: 5
  118. volumeMounts:
  119. - name: datadir
  120. mountPath: /var/lib/zookeeper
  121. volumeClaimTemplates:
  122. - metadata:
  123. name: datadir
  124. annotations:
  125. volume.beta.kubernetes.io/storage-class: "anything"
  126. spec:
  127. accessModes: [ "ReadWriteOnce" ]
  128. resources:
  129. requests:
  130. storage: 3Gi

注意:暴露端口,需要遵守“The range of valid ports is 30000-32767”。
创建sts:

  1. kubectl create -f zksts.yaml

5. 验证

查看pods:

  1. kubectl get pods -n bigdata

如果三个pod都处于“running”状态则说明启动成功了。
查看主机名:

  1. for i in 0 1 2; do kubectl exec zk-$i -n bigdata -- hostname; done

查看myid:

  1. for i in 0 1 2; do echo "myid zk-$i";kubectl exec zk-$i -n bigdata -- cat /var/lib/zookeeper/data/myid; done

查看完整域名:

  1. for i in 0 1 2; do kubectl exec zk-$i -n bigdata -- hostname -f; done

查看ZooKeeper状态:

  1. for i in 0 1 2; do kubectl exec zk-$i -n bigdata -- zkServer.sh status; done

客户端查看(ZooInspector):
使用K8s的任意节点加上暴露端口号(LTSR003:32181/LTSR005:32181/LTSR006:32181)均可连接ZooKeeper。
image.png
image.png