首先安装好GlusterFS集群
K8S 的存储卷使用稍有点古怪,Gluster FS 的使用,需要首先定义一个 Endpoint + Service 形式的代理,来定义 Gluster FS 集群,然后就可以通过持久卷或者用 Pod 直接加载了。
准备
(1)、在各节点安装客户端
# yum install -y glusterfs glusterfs-fuse
(2)、配置endpoints
# curl -O https://raw.githubusercontent.com/kubernetes/examples/master/volumes/glusterfs/glusterfs-endpoints.json
修改glusterfs-endpoints.json,配置GlusterFS集群信息
{
"kind": "Endpoints",
"apiVersion": "v1",
"metadata": {
"name": "glusterfs-cluster"
},
"subsets": [
{
"addresses": [
{
"ip": "10.1.10.128"
}
],
"ports": [
{
"port": 2020
}
]
}
]
}
port可以随意写,ip为GlusterFS的IP地址
创建配置文件
# kubectl apply -f glusterfs-endpoints.json
# kubectl get ep
NAME ENDPOINTS AGE
glusterfs-cluster 10.1.10.128:2020 7m26s
kubernetes 10.1.10.128:6443 27d
(3)、创建Serivce
curl -O https://raw.githubusercontent.com/kubernetes/examples/master/volumes/glusterfs/glusterfs-service.json
修改配置文件,我这里仅修改了端口
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "glusterfs-cluster"
},
"spec": {
"ports": [
{"port": 2020}
]
}
}
创建配置文件对象
# kubectl apply -f glusterfs-service.json
# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
glusterfs-cluster ClusterIP 10.254.44.189 <none> 2020/TCP 10m
kubernetes ClusterIP 10.254.0.1 <none> 443/TCP 27d
测试
创建测试Pod
(1)、下载测试pod文件
curl -O https://raw.githubusercontent.com/kubernetes/examples/master/volumes/glusterfs/glusterfs-pod.json
(2)、修改配置文件,修改volumes下的path为我们上面创建的volume名
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "glusterfs"
},
"spec": {
"containers": [
{
"name": "glusterfs",
"image": "nginx",
"volumeMounts": [
{
"mountPath": "/mnt/glusterfs",
"name": "glusterfsvol"
}
]
}
],
"volumes": [
{
"name": "glusterfsvol",
"glusterfs": {
"endpoints": "glusterfs-cluster",
"path": "models",
"readOnly": true
}
}
]
}
}
(3)、创建Pod
# kubectl apply -f glusterfs-pod.yaml
# kubectl get pod
NAME READY STATUS RESTARTS AGE
glusterfs 1/1 Running 0 51s
pod-demo 1/1 Running 8 25h
# kubectl exec -it glusterfs -- df -h
Filesystem Size Used Avail Use% Mounted on
overlay 17G 2.5G 15G 15% /
tmpfs 64M 0 64M 0% /dev
tmpfs 910M 0 910M 0% /sys/fs/cgroup
/dev/mapper/centos-root 17G 2.5G 15G 15% /etc/hosts
10.1.10.128:gluster_volume 17G 5.3G 12G 31% /mnt/glusterfs
shm 64M 0 64M 0% /dev/shm
tmpfs 910M 12K 910M 1% /run/secrets/kubernetes.io/serviceaccount
tmpfs 910M 0 910M 0% /proc/acpi
tmpfs 910M 0 910M 0% /proc/scsi
tmpfs 910M 0 910M 0% /sys/firmware
我们可以看到挂载成功。
用PVC挂载
1、直接创建PV和PVC进行关联
(1)、创建PV
apiVersion: v1
kind: PersistentVolume
metadata:
name: glusterfs-pv
spec:
capacity:
storage: 5Mi
accessModes:
- ReadWriteMany
glusterfs:
endpoints: glusterfs-cluster
path: gluster_volume
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: glusterfs-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 5Mi
(2)、创建存储
# kubectl apply -f glusterfs-pv.yaml
# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
glusterfs-pv 5Mi RWX Retain Bound default/glusterfs-pvc 15s
# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
glusterfs-pvc Bound glusterfs-pv 5Mi RWX 18s
2、用SC动态创建PVC
2.1、安装Heketi
Heketi提供了一个RESTful管理界面,可以用来管理GlusterFS卷的生命周期。Heketi会动态在集群内选择bricks构建所需的volumes,从而确保数据的副本会分散到集群不同的故障域内。同时Heketi还支持任意数量的ClusterFS集群。
(1)、安装
# yum -y install heketi heketi-client
(2)、配置Heketi
/etc/heketi/heketi.json
{
"_port_comment": "Heketi Server Port Number",
"port": "48080", # 请求端口,默认是8080
"_use_auth": "Enable JWT authorization. Please enable for deployment",
"use_auth": false,
"_jwt": "Private keys for access",
"jwt": {
"_admin": "Admin has access to all APIs",
"admin": {
"key": "admin@P@ssW0rd" # 管理员密码
},
"_user": "User only has access to /volumes endpoint",
"user": {
"key": "user@P@ssW0rd" # 普通用户密码
}
},
"_glusterfs_comment": "GlusterFS Configuration",
"glusterfs": {
"_executor_comment": [
"Execute plugin. Possible choices: mock, ssh",
"mock: This setting is used for testing and development.",
" It will not send commands to any node.",
"ssh: This setting will notify Heketi to ssh to the nodes.",
" It will need the values in sshexec to be configured.",
"kubernetes: Communicate with GlusterFS containers over",
" Kubernetes exec api."
],
"executor": "ssh",
"_sshexec_comment": "SSH username and private key file information",
"sshexec": {
"keyfile": "/etc/hekeit/private_key", # ssh私钥目录
"user": "root", # ssh用户
"port": "22", # ssh端口
"fstab": "/etc/fstab"
},
"_kubeexec_comment": "Kubernetes configuration",
"kubeexec": {
"host" :"https://kubernetes.host:8443",
"cert" : "/path/to/crt.file",
"insecure": false,
"user": "kubernetes username",
"password": "password for kubernetes user",
"namespace": "OpenShift project or Kubernetes namespace",
"fstab": "Optional: Specify fstab file on node. Default is /etc/fstab"
},
"_db_comment": "Database file name",
"db": "/var/lib/heketi/heketi.db",
"_loglevel_comment": [
"Set log level. Choices are:",
" none, critical, error, warning, info, debug",
"Default is warning"
],
"loglevel" : "debug"
}
}
(3)、配置免密
# ssh-keygen -t rsa -q -f /etc/heketi/private_key -N ""
# ssh-copy-id -i /etc/heketi/private_key.pub root@10.1.10.128
# ssh-copy-id -i /etc/heketi/private_key.pub root@10.1.10.129
# ssh-copy-id -i /etc/heketi/private_key.pub root@10.1.10.130
(4)、启动Heketi
# systemctl enable heketi.service && systemctl start heketi.service
# 测试
# curl http://10.1.10.128:48080/hello
Hello from Heketi
(5)、配置Topology
{
"clusters": [
{
"nodes": [
{
"node": {
"hostnames": {
"manage": [
"glusterfs-master"
],
"storage": [
"10.1.10.128"
]
},
"zone": 1
},
"devices": [
"/dev/sdb1" # 必须是未创建文件系统的裸磁盘
]
},
{
"node": {
"hostnames": {
"manage": [
"glusterfs-node01"
],
"storage": [
"10.1.10.129"
]
},
"zone": 1
},
"devices": [
"/dev/sdb1"
]
},
{
"node": {
"hostnames": {
"manage": [
"glusterfs-node02"
],
"storage": [
"10.1.10.130"
]
},
"zone": 1
},
"devices": [
"/dev/sdb1"
]
}
]
}
]
}
# echo "export HEKETI_CLI_SERVER=http://10.1.10.128:48080" >> /etc/profile.d/heketi.sh
# echo "alias heketi-cli='heketi-cli --user admin --secret admin@P@ssW0rd'" >> ~/.bashrc
# source /etc/profile.d/heketi.sh
# source ~/.bashrc
# echo $HEKETI_CLI_SERVER
http://10.1.10.128:48080
添加Cluster
# heketi-cli --server $HEKETI_CLI_SERVER --user admin --secret admin@P@ssW0rd topology load --json=/etc/heketi/topology.json
Creating cluster ... ID: 48a31ff76104514d187f7e7ef8c2e056
Allowing file volumes on cluster.
Allowing block volumes on cluster.
Creating node glusterfs-master ... ID: e374f9d94f47e0fd3223732a383b7d03
Adding device /dev/sdb1 ... OK
Creating node glusterfs-node01 ... ID: 68f16b2d54acf1c18e354ec46aa736ad
Adding device /dev/sdb1 ... OK
Creating node glusterfs-node02 ... ID: 86f5a124f8c4c296042d6207a641cf81
Adding device /dev/sdb1 ... OK
查看集群信息
# heketi-cli cluster list
Clusters:
Id:48a31ff76104514d187f7e7ef8c2e056 [file][block]
# 查看详细信息
# heketi-cli cluster info 48a31ff76104514d187f7e7ef8c2e056
Cluster id: 48a31ff76104514d187f7e7ef8c2e056
Nodes:
68f16b2d54acf1c18e354ec46aa736ad
86f5a124f8c4c296042d6207a641cf81
e374f9d94f47e0fd3223732a383b7d03
Volumes:
Block: true
File: true
# 查看节点信息
# heketi-cli node list
Id:68f16b2d54acf1c18e354ec46aa736ad Cluster:48a31ff76104514d187f7e7ef8c2e056
Id:86f5a124f8c4c296042d6207a641cf81 Cluster:48a31ff76104514d187f7e7ef8c2e056
Id:e374f9d94f47e0fd3223732a383b7d03 Cluster:48a31ff76104514d187f7e7ef8c2e056
# 查看节点详细信息
# heketi-cli node info 68f16b2d54acf1c18e354ec46aa736ad
Node Id: 68f16b2d54acf1c18e354ec46aa736ad
State: online
Cluster Id: 48a31ff76104514d187f7e7ef8c2e056
Zone: 1
Management Hostname: glusterfs-node01
Storage Hostname: 10.1.10.129
Devices:
Id:dacaffdbd8a2a00bb76acdc7b2420fbd Name:/dev/sdb1 State:online Size (GiB):500 Used (GiB):0 Free (GiB):500 Bricks:0
创建volume
# heketi-cli volume create --size=2 --replica=2
Name: vol_4f1a171ab06adf80460c84f2132e96e0
Size: 2
Volume Id: 4f1a171ab06adf80460c84f2132e96e0
Cluster Id: 48a31ff76104514d187f7e7ef8c2e056
Mount: 10.1.10.129:vol_4f1a171ab06adf80460c84f2132e96e0
Mount Options: backup-volfile-servers=10.1.10.130,10.1.10.128
Block: false
Free Size: 0
Reserved Size: 0
Block Hosting Restriction: (none)
Block Volumes: []
Durability Type: replicate
Distribute Count: 1
Replica Count: 2
# heketi-cli volume list
Id:4f1a171ab06adf80460c84f2132e96e0 Cluster:48a31ff76104514d187f7e7ef8c2e056 Name:vol_4f1a171ab06adf80460c84f2132e96e0
# heketi-cli volume info 4f1a171ab06adf80460c84f2132e96e0
Name: vol_4f1a171ab06adf80460c84f2132e96e0
Size: 2
Volume Id: 4f1a171ab06adf80460c84f2132e96e0
Cluster Id: 48a31ff76104514d187f7e7ef8c2e056
Mount: 10.1.10.129:vol_4f1a171ab06adf80460c84f2132e96e0
Mount Options: backup-volfile-servers=10.1.10.130,10.1.10.128
Block: false
Free Size: 0
Reserved Size: 0
Block Hosting Restriction: (none)
Block Volumes: []
Durability Type: replicate
Distribute Count: 1
Replica Count: 2
# 挂载
# mount -t glusterfs 10.1.10.129:vol_4f1a171ab06adf80460c84f2132e96e0 /mnt
# 删除
# heketi-cli volume delete 4f1a171ab06adf80460c84f2132e96e0
2.2、在k8s中测试
(1)、创建连接使用的secret(heketi-secret.yaml)
apiVersion: v1
kind: Secret
metadata:
name: heketi-secret
data:
key: YWRtaW5AUEBzc1cwcmQ=
type: kubernetes.io/glusterfs
(2)、创建sc(heketi-storageclass.yaml)
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: heketi-storageclass
parameters:
resturl: "http://10.1.10.128:48080"
clusterid: "cca360f44db482f03297a151886eea19"
restauthenabled: "true" #若heketi开启认证此处也必须开启auth认证
restuser: "admin"
secretName: "heketi-secret" #name/namespace与secret资源中定义一致
secretNamespace: "default"
volumetype: "replicate:3"
provisioner: kubernetes.io/glusterfs
reclaimPolicy: Delete
(3)、创建pvc(heketi-pvc.yaml)
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: heketi-pvc
annotations:
volume.beta.kubernetes.io/storage-class: heketi-storageclass
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
(4)、查看PVC情况
# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
heketi-storageclass kubernetes.io/glusterfs Delete Immediate false 6m53s
# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
glusterfs-pvc Bound glusterfs-pv 5Mi RWX 26h
heketi-pvc Bound pvc-0feb8666-6e7f-451d-ae6f-7f205206b225 1Gi RWO heketi-storageclass 82s
(5)、创建Pod测试挂载
heketi-pod.yaml
kind: Pod
apiVersion: v1
metadata:
name: heketi-pod
spec:
containers:
- name: heketi-container
image: busybox
command:
- sleep
- "3600"
volumeMounts:
- name: heketi-volume
mountPath: "/pv-data"
readOnly: false
volumes:
- name: heketi-volume
persistentVolumeClaim:
claimName: heketi-pvc
创建Pod并查看结果
# kubectl get pod
NAME READY STATUS RESTARTS AGE
glusterfs 1/1 Running 0 26h
heketi-pod 1/1 Running 0 2m55s
创建文件测试
# kubectl exec -it heketi-pod -- /bin/sh
/ # cd /pv-data/
/pv-data # echo "text" > 1111.txt
/pv-data # ls
1111.txt
然后在本地查看
# cd /var/lib/heketi/mounts/vg_bffb11849513dded78f671f64e76750c/brick_6ff640a2d45a7f146a296473e7145ee7
[root@k8s-master brick_6ff640a2d45a7f146a296473e7145ee7]# ll
total 0
drwxrwsr-x 3 root 2000 40 Feb 7 14:27 brick
[root@k8s-master brick_6ff640a2d45a7f146a296473e7145ee7]# cd brick/
[root@k8s-master brick]# ll
total 4
-rw-r--r-- 2 root 2000 5 Feb 7 14:27 1111.txt