准备 NFS
注意: 以下步骤需要在 NFS 服务端操作
- 创建 Nacos 所需共享目录
mkdir -p /data/nfs-sharemkdir -p /data/mysql-mastermkdir -p /data/mysql-slave
- 给目录增加读写权限
chmod a+rw /data/nfs-share
chmod a+rw /data/mysql-master
chmod a+rw /data/mysql-slave
- 配置 NFS 服务目录
vi /etc/exports
# 底部增加
/data/nfs-share *(rw,sync,no_subtree_check,no_root_squash)
/data/mysql-master *(rw,sync,no_subtree_check,no_root_squash)
/data/mysql-slave *(rw,sync,no_subtree_check,no_root_squash)
- 重启服务,使配置生效
/etc/init.d/nfs-kernel-server restart
克隆 Nacos
git clone https://github.com/nacos-group/nacos-k8s.git
部署 NFS
- 创建角色
kubectl create -f deploy/nfs/rbac.yaml
- 创建
ServiceAccount和部署NFS-Client Provisioner
vi deploy/nfs/deployment.yaml
# 进行如下修改
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: nfs-client-provisioner
spec:
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccount: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
# 修改为 NFS 服务端 IP
value: 192.168.1.220
- name: NFS_PATH
# 修改为 NFS 服务端共享目录
value: /data/nfs-share
volumes:
- name: nfs-client-root
nfs:
# 修改为 NFS 服务端 IP
server: 192.168.1.220
# 修改为 NFS 服务端共享目录
path: /data/nfs-share
kubectl create -f deploy/nfs/deployment.yaml
- 创建
NFS StorageClass
kubectl create -f deploy/nfs/class.yaml
- 验证 NFS 部署成功
kubectl get pod -l app=nfs-client-provisioner
# 输出如下
NAME READY STATUS RESTARTS AGE
nfs-client-provisioner-5f9f96c4dc-mrzr6 1/1 Running 0 177m
部署数据库
- 部署主库
vi deploy/mysql/mysql-master-nfs.yaml
# 进行如下修改
apiVersion: v1
kind: ReplicationController
metadata:
name: mysql-master
labels:
name: mysql-master
spec:
replicas: 1
selector:
name: mysql-master
template:
metadata:
labels:
name: mysql-master
spec:
containers:
- name: master
image: nacos/nacos-mysql-master:latest
ports:
- containerPort: 3306
volumeMounts:
- name: mysql-master-data
mountPath: /var/lib/mysql
env:
- name: MYSQL_ROOT_PASSWORD
value: "root"
- name: MYSQL_DATABASE
value: "nacos_devtest"
- name: MYSQL_USER
value: "nacos"
- name: MYSQL_PASSWORD
value: "nacos"
- name: MYSQL_REPLICATION_USER
value: 'nacos_ru'
- name: MYSQL_REPLICATION_PASSWORD
value: 'nacos_ru'
volumes:
- name: mysql-master-data
nfs:
# 修改为 NFS 服务端 IP
server: 192.168.1.220
# 修改为 NFS 服务端共享目录
path: /data/mysql-master
---
apiVersion: v1
kind: Service
metadata:
name: mysql-master
labels:
name: mysql-master
spec:
ports:
- port: 3306
targetPort: 3306
selector:
name: mysql-master
kubectl create -f deploy/mysql/mysql-master-nfs.yaml
- 部署从库
vi deploy/mysql/mysql-slave-nfs.yaml
# 进行如下修改
apiVersion: v1
kind: ReplicationController
metadata:
name: mysql-slave
labels:
name: mysql-slave
spec:
replicas: 1
selector:
name: mysql-slave
template:
metadata:
labels:
name: mysql-slave
spec:
containers:
- name: slave
image: nacos/nacos-mysql-slave:latest
ports:
- containerPort: 3306
volumeMounts:
- name: mysql-slave-data
mountPath: /var/lib/mysql
env:
- name: MYSQL_ROOT_PASSWORD
value: "root"
- name: MYSQL_REPLICATION_USER
value: 'nacos_ru'
- name: MYSQL_REPLICATION_PASSWORD
value: 'nacos_ru'
volumes:
- name: mysql-slave-data
nfs:
# 修改为 NFS 服务端 IP
server: 192.168.1.220
# 修改为 NFS 服务端共享目录
path: /data/mysql-slave
---
apiVersion: v1
kind: Service
metadata:
name: mysql-slave
labels:
name: mysql-slave
spec:
ports:
- port: 3306
targetPort: 3306
selector:
name: mysql-slave
kubectl create -f deploy/mysql/mysql-slave-nfs.yaml
- 验证数据库是否正常工作
kubectl get pod
# 输出如下
NAME READY STATUS RESTARTS AGE
mysql-master-82tk4 1/1 Running 1 176m
mysql-slave-2pbkk 1/1 Running 0 174m
部署 Nacos
- 创建 Nacos
kubectl create -f deploy/nacos/nacos-pvc-nfs.yaml
- 验证 Nacos 节点启动成功
kubectl get pod -l app=nacos
# 输出如下
NAME READY STATUS RESTARTS AGE
nacos-0 1/1 Running 0 174m
nacos-1 1/1 Running 0 171m
扩容选举 Leader
注意: StatefulSet 控制器根据其序数索引为每个 Pod 提供唯一的主机名。 主机名采用
-的形式。 因为 nacos StatefulSet 的副本字段设置为 2,所以当前集群文件中只有两个 Nacos 节点地址,故无法选举出 Leader,此时我们就需要手动扩容了
- 在扩容前,使用
kubectl exec获取在 pod 中的 Nacos 集群配置文件信息
for i in 0 1; do echo nacos-$i; kubectl exec nacos-$i cat conf/cluster.conf; done
- 使用
kubectl scale对 Nacos 动态扩容
kubectl scale sts nacos --replicas=3
- 在扩容后,使用
kubectl exec获取在 pod 中的 Nacos 集群配置文件信息
for i in 0 1; do echo nacos-$i; kubectl exec nacos-$i cat conf/cluster.conf; done
- 使用
kubectl exec执行 Nacos API 在每台节点上获取当前 Leader 是否一致
for i in 0 1 2; do echo nacos-$i; kubectl exec nacos-$i curl GET "http://localhost:8848/nacos/v1/ns/raft/state"; done
注意: 扩容后可能依然无法正确选举出 Leader,使用
kubectl delete pods nacos-$i的形式手动删除 Pod ,让 k8s 自动恢复,再观察naming-raft.log日志
路由 Nacos
- 创建一个名为
nacos.yml的 Ingress 配置文件
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: nacos-web
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/proxy-connect-timeout: "600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
nginx.ingress.kubernetes.io/proxy-body-size: "10m"
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- host: nacos.hzlim.cn
http:
paths:
- path:
backend:
serviceName: nacos-headless
servicePort: 8848
- 部署 Ingress
kubectl apply -f nacos.yml
- 验证是否成功(别忘记修改 Hosts)
打开浏览器访问 http://nacos.hzlim.cn/nacos/
