1. 拉取镜像
docker pull leolee32/kubernetes-library:kubernetes-zookeeper1.0-3.4.10
docker pull k8s.gcr.io/kubernetes-zookeeper:1.0-3.4.10
2. 创建命名空间
# 创建命名空间
kubectl create ns bigdata
# 查看命名空
kubectl get ns
kubectl get namespace
3. 创建pv
cd /home/bigdata/k8s
vi zkpv.yaml
编排pv:
kind: PersistentVolume
apiVersion: v1
metadata:
name: pv-zk1
namespace: bigdata
annotations:
volume.beta.kubernetes.io/storage-class: "anything"
labels:
type: local
spec:
capacity:
storage: 3Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/data/zookeeper1"
persistentVolumeReclaimPolicy: Recycle
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: pv-zk2
namespace: bigdata
annotations:
volume.beta.kubernetes.io/storage-class: "anything"
labels:
type: local
spec:
capacity:
storage: 3Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/data/zookeeper2"
persistentVolumeReclaimPolicy: Recycle
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: pv-zk3
namespace: bigdata
annotations:
volume.beta.kubernetes.io/storage-class: "anything"
labels:
type: local
spec:
capacity:
storage: 3Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/data/zookeeper3"
persistentVolumeReclaimPolicy: Recycle
创建pv:
kubectl create -f zkpv.yaml
查看pv:
kubectl get pv
4. 编排ZooKeeper**
cd /home/bigdata/k8s
vi zksts.yaml
服务发现实例编排:
apiVersion: v1
kind: Service
metadata:
name: zk-hs
namespace: bigdata
labels:
app: zk
spec:
ports:
- port: 2888
name: server
- port: 3888
name: leader-election
clusterIP: None
selector:
app: zk
---
apiVersion: v1
kind: Service
metadata:
name: zk-cs
namespace: bigdata
labels:
app: zk
spec:
type: NodePort
ports:
- port: 2181
nodePort: 32181
name: client
selector:
app: zk
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: zk-pdb
namespace: bigdata
spec:
selector:
matchLabels:
app: zk
maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zk
namespace: bigdata
spec:
selector:
matchLabels:
app: zk
serviceName: zk-hs
replicas: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: zk
spec:
containers:
- name: kubernetes-zookeeper
imagePullPolicy: IfNotPresent
image: "leolee32/kubernetes-library:kubernetes-zookeeper1.0-3.4.10"
resources:
requests:
memory: "128Mi"
cpu: "0.1"
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
command:
- sh
- -c
- "start-zookeeper \
--servers=3 \
--data_dir=/var/lib/zookeeper/data \
--data_log_dir=/var/lib/zookeeper/data/log \
--conf_dir=/opt/zookeeper/conf \
--client_port=2181 \
--election_port=3888 \
--server_port=2888 \
--tick_time=2000 \
--init_limit=10 \
--sync_limit=5 \
--heap=512M \
--max_client_cnxns=60 \
--snap_retain_count=3 \
--purge_interval=12 \
--max_session_timeout=40000 \
--min_session_timeout=4000 \
--log_level=INFO"
readinessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
livenessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /var/lib/zookeeper
volumeClaimTemplates:
- metadata:
name: datadir
annotations:
volume.beta.kubernetes.io/storage-class: "anything"
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 3Gi
注意:暴露端口,需要遵守“The range of valid ports is 30000-32767”。
创建sts:
kubectl create -f zksts.yaml
5. 验证
查看pods:
kubectl get pods -n bigdata
如果三个pod都处于“running”状态则说明启动成功了。
查看主机名:
for i in 0 1 2; do kubectl exec zk-$i -n bigdata -- hostname; done
查看myid:
for i in 0 1 2; do echo "myid zk-$i";kubectl exec zk-$i -n bigdata -- cat /var/lib/zookeeper/data/myid; done
查看完整域名:
for i in 0 1 2; do kubectl exec zk-$i -n bigdata -- hostname -f; done
查看ZooKeeper状态:
for i in 0 1 2; do kubectl exec zk-$i -n bigdata -- zkServer.sh status; done
客户端查看(ZooInspector):
使用K8s的任意节点加上暴露端口号(LTSR003:32181/LTSR005:32181/LTSR006:32181)均可连接ZooKeeper。