k8s-v1.25.0持久化存储ceph-csi方式(ceph-v17.2.3、rbd块存储)
ceph-v17.2.3对接k8s-v1.25.0(动态存储)
1、cephadm部署ceph-v17.2.3集群
2、k8s-v1.25.0部署ceph-csi-v3.7.0
ceph集群+k8s集群(ubuntu22.04)
ceph-v17.2.3
k8s-v1.25.0(containerd-1.6.8)
ceph-csi-v3.7.0
动态pvc
ceph集群操作
ceph osd pool create kubernetes 8 8
ceph osd pool application enable kubernetes rbd
rbd pool init -p kubernetes
ceph osd pool ls
ceph mon dump
ceph auth get client.admin 2>&1 |grep "key = " |awk '{print $3'}
ceph auth add client.kube mon 'allow r' osd 'allow rwx pool=kubernetes'
ceph auth get-key client.kube
k8s集群操作
apt update
apt install ceph-common=17.2.0-0ubuntu0.22.04.1 -y
mkdir ~/{ceph-csi,000}
git clone https://github.com/ceph/ceph-csi.git
cd ~/000 && git clone https://hub.fastgit.xyz/ceph/ceph-csi.git
kubectl create ns ceph-csi
kubectl describe node master |grep Taint
kubectl taint node master node-role.kubernetes.io/master-
1、csi-config-map.yaml
# 1、csi-config-map.yaml
cat > ~/ceph-csi/csi-config-map.yaml << 'EOF'
---
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
[
{
"clusterID": "4c4d9c82-267d-11ed-888e-000c29db6f93",
"monitors": [
"192.168.1.204:6789",
"192.168.1.205:6789",
"192.168.1.206:6789",
"192.168.1.207:6789"
]
}
]
metadata:
name: ceph-csi-config
namespace: ceph-csi
EOF
clusterID使用
ceph mon dump
命令在ceph集群上查看
2、csi-nodeplugin-rbac.yaml
3、csi-provisioner-rbac.yaml
4、csidriver.yaml
5、csi-rbdplugin-provisioner.yaml
6、csi-rbdplugin.yaml
cat > ceph.yml < 'eof'
# 2、csi-nodeplugin-rbac.yaml
cat > ~/ceph-csi/csi-nodeplugin-rbac.yaml << 'EOF'
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-csi-nodeplugin
namespace: ceph-csi
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["list", "get"]
- apiGroups: [""]
resources: ["serviceaccounts/token"]
verbs: ["create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin
subjects:
- kind: ServiceAccount
name: rbd-csi-nodeplugin
namespace: ceph-csi
roleRef:
kind: ClusterRole
name: rbd-csi-nodeplugin
apiGroup: rbac.authorization.k8s.io
EOF
# 3、csi-provisioner-rbac.yaml
cat > ~/ceph-csi/csi-provisioner-rbac.yaml << 'EOF'
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-csi-provisioner
namespace: ceph-csi
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["get", "list", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update", "patch"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts/token"]
verbs: ["create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-provisioner-role
subjects:
- kind: ServiceAccount
name: rbd-csi-provisioner
namespace: ceph-csi
roleRef:
kind: ClusterRole
name: rbd-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: ceph-csi
name: rbd-external-provisioner-cfg
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-provisioner-role-cfg
namespace: ceph-csi
subjects:
- kind: ServiceAccount
name: rbd-csi-provisioner
namespace: ceph-csi
roleRef:
kind: Role
name: rbd-external-provisioner-cfg
apiGroup: rbac.authorization.k8s.io
EOF
# 4、csidriver.yaml
cat > ~/ceph-csi/csidriver.yaml << 'EOF'
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: "rbd.csi.ceph.com"
namespace: ceph-csi
spec:
attachRequired: true
podInfoOnMount: false
EOF
# 5、csi-rbdplugin-provisioner.yaml
cat > ~/ceph-csi/csi-rbdplugin-provisioner.yaml << 'EOF'
---
kind: Service
apiVersion: v1
metadata:
name: csi-rbdplugin-provisioner
namespace: ceph-csi
labels:
app: csi-metrics
spec:
selector:
app: csi-rbdplugin-provisioner
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8680
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: csi-rbdplugin-provisioner
namespace: ceph-csi
spec:
replicas: 3
selector:
matchLabels:
app: csi-rbdplugin-provisioner
template:
metadata:
labels:
app: csi-rbdplugin-provisioner
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- csi-rbdplugin-provisioner
topologyKey: "kubernetes.io/hostname"
serviceAccountName: rbd-csi-provisioner
priorityClassName: system-cluster-critical
containers:
- name: csi-provisioner
image: dyrnq/csi-provisioner:v3.2.1
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"
- "--timeout=150s"
- "--retry-interval-start=500ms"
- "--leader-election=true"
- "--feature-gates=Topology=false"
- "--feature-gates=HonorPVReclaimPolicy=true"
- "--prevent-volume-mode-conversion=true"
- "--default-fstype=ext4"
- "--extra-create-metadata=true"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-snapshotter
image: dyrnq/csi-snapshotter:v6.0.1
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"
- "--timeout=150s"
- "--leader-election=true"
- "--extra-create-metadata=true"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-attacher
image: dyrnq/csi-attacher:v3.5.0
args:
- "--v=1"
- "--csi-address=$(ADDRESS)"
- "--leader-election=true"
- "--retry-interval-start=500ms"
env:
- name: ADDRESS
value: /csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-resizer
image: dyrnq/csi-resizer:v1.5.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"
- "--timeout=150s"
- "--leader-election"
- "--retry-interval-start=500ms"
- "--handle-volume-inuse-error=false"
- "--feature-gates=RecoverVolumeExpansionFailure=true"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-rbdplugin
image: quay.io/cephcsi/cephcsi:canary
args:
- "--nodeid=$(NODE_ID)"
- "--type=rbd"
- "--controllerserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--csi-addons-endpoint=$(CSI_ADDONS_ENDPOINT)"
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
- "--pidlimit=-1"
- "--rbdhardmaxclonedepth=8"
- "--rbdsoftmaxclonedepth=4"
- "--enableprofiling=false"
- "--setmetadata=true"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CSI_ENDPOINT
value: unix:///csi/csi-provisioner.sock
- name: CSI_ADDONS_ENDPOINT
value: unix:///csi/csi-addons.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- mountPath: /dev
name: host-dev
- mountPath: /sys
name: host-sys
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: oidc-token
mountPath: /run/secrets/tokens
readOnly: true
- name: csi-rbdplugin-controller
image: quay.io/cephcsi/cephcsi:canary
args:
- "--type=controller"
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
- "--drivernamespace=$(DRIVER_NAMESPACE)"
- "--setmetadata=true"
env:
- name: DRIVER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: liveness-prometheus
image: quay.io/cephcsi/cephcsi:canary
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport=8680"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi-provisioner.sock
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
imagePullPolicy: "IfNotPresent"
volumes:
- name: host-dev
hostPath:
path: /dev
- name: host-sys
hostPath:
path: /sys
- name: lib-modules
hostPath:
path: /lib/modules
- name: socket-dir
emptyDir: {
medium: "Memory"
}
- name: ceph-csi-config
configMap:
name: ceph-csi-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
- name: oidc-token
projected:
sources:
- serviceAccountToken:
path: oidc-token
expirationSeconds: 3600
audience: ceph-csi-kms
EOF
# 6、csi-rbdplugin.yaml
cat > ~/ceph-csi/csi-rbdplugin.yaml << 'EOF'
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-rbdplugin
namespace: ceph-csi
spec:
selector:
matchLabels:
app: csi-rbdplugin
template:
metadata:
labels:
app: csi-rbdplugin
spec:
serviceAccountName: rbd-csi-nodeplugin
hostNetwork: true
hostPID: true
priorityClassName: system-node-critical
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: driver-registrar
securityContext:
privileged: true
allowPrivilegeEscalation: true
image: dyrnq/csi-node-driver-registrar:v2.5.1
args:
- "--v=1"
- "--csi-address=/csi/csi.sock"
- "--kubelet-registration-path=/var/lib/kubelet/plugins/rbd.csi.ceph.com/csi.sock"
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: csi-rbdplugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: quay.io/cephcsi/cephcsi:canary
args:
- "--nodeid=$(NODE_ID)"
- "--pluginpath=/var/lib/kubelet/plugins"
- "--stagingpath=/var/lib/kubelet/plugins/kubernetes.io/csi/"
- "--type=rbd"
- "--nodeserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--csi-addons-endpoint=$(CSI_ADDONS_ENDPOINT)"
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
- "--enableprofiling=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: CSI_ADDONS_ENDPOINT
value: unix:///csi/csi-addons.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- mountPath: /dev
name: host-dev
- mountPath: /sys
name: host-sys
- mountPath: /run/mount
name: host-mount
- mountPath: /etc/selinux
name: etc-selinux
readOnly: true
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: plugin-dir
mountPath: /var/lib/kubelet/plugins
mountPropagation: "Bidirectional"
- name: mountpoint-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: ceph-logdir
mountPath: /var/log/ceph
- name: oidc-token
mountPath: /run/secrets/tokens
readOnly: true
- name: liveness-prometheus
securityContext:
privileged: true
allowPrivilegeEscalation: true
image: quay.io/cephcsi/cephcsi:canary
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport=8680"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
imagePullPolicy: "IfNotPresent"
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/rbd.csi.ceph.com
type: DirectoryOrCreate
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins
type: Directory
- name: mountpoint-dir
hostPath:
path: /var/lib/kubelet/pods
type: DirectoryOrCreate
- name: ceph-logdir
hostPath:
path: /var/log/ceph
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: host-dev
hostPath:
path: /dev
- name: host-sys
hostPath:
path: /sys
- name: etc-selinux
hostPath:
path: /etc/selinux
- name: host-mount
hostPath:
path: /run/mount
- name: lib-modules
hostPath:
path: /lib/modules
- name: ceph-csi-config
configMap:
name: ceph-csi-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
- name: oidc-token
projected:
sources:
- serviceAccountToken:
path: oidc-token
expirationSeconds: 3600
audience: ceph-csi-kms
---
apiVersion: v1
kind: Service
metadata:
name: csi-metrics-rbdplugin
namespace: ceph-csi
labels:
app: csi-metrics
spec:
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8680
selector:
app: csi-rbdplugin
EOF
eof
7、secret.yaml
# 7、secret.yaml
cat > ~/ceph-csi/secret.yaml << 'EOF'
---
apiVersion: v1
kind: Secret
metadata:
name: csi-rbd-secret
namespace: ceph-csi
stringData:
userID: kube
userKey: AQD/4gpjDnt0KhAANfaKRnFlXIXVqPdf1IHOUA==
EOF
userID就是ceph auth add client.kube mon ‘allow r’ osd ‘allow rwx pool=kubernetes’创建的
kube
userKey使用
ceph auth get-key client.kube
命令在ceph集群上查看
8、storageclass.yaml
# 8、storageclass.yaml
cat > ~/ceph-csi/storageclass.yaml << 'EOF'
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-rbd-sc
provisioner: rbd.csi.ceph.com
parameters:
clusterID: 4c4d9c82-267d-11ed-888e-000c29db6f93
pool: kubernetes
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: ceph-csi
csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
csi.storage.k8s.io/controller-expand-secret-namespace: ceph-csi
csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-stage-secret-namespace: ceph-csi
csi.storage.k8s.io/fstype: ext4
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
- discard
EOF
clusterID在ceph集群上用
ceph mon dump
命令查看pool使用
ceph osd pool create kubernetes 8 8
命令在ceph集群上创建的kubernetes
下载镜像
# 1、master上操作
# 查看镜像
find ./ -type f |xargs grep 'image: '|sort|uniq|awk '{print $3}'|grep ^[a-zA-Z]|grep -Evw 'error|kubeRbacProxy'|sort -rn|uniq |grep -n ".*"
1:quay.io/cephcsi/cephcsi:canary
2:dyrnq/csi-snapshotter:v6.0.1
3:dyrnq/csi-resizer:v1.5.0
4:dyrnq/csi-provisioner:v3.2.1
5:dyrnq/csi-node-driver-registrar:v2.5.1
6:dyrnq/csi-attacher:v3.5.0
#在线预拉取镜像
find ./ -type f |xargs grep 'image: '|sort|uniq|awk '{print $3}'|grep ^[a-zA-Z]|grep -Evw 'error|kubeRbacProxy'|sort -rn|uniq |xargs -i docker pull {}
find ./ -type f |xargs grep 'image: '|sort|uniq|awk '{print $3}'|grep ^[a-zA-Z]|grep -Evw 'error|kubeRbacProxy'|sort -rn|uniq |xargs -i ctr -n k8s.io i pull {}
#2、node上操作
cat > pull.sh << 'EOF'
quay.io/cephcsi/cephcsi:canary
dyrnq/csi-snapshotter:v6.0.1
dyrnq/csi-resizer:v1.5.0
dyrnq/csi-provisioner:v3.2.1
dyrnq/csi-node-driver-registrar:v2.5.1
dyrnq/csi-attacher:v3.5.0
EOF
cat pull.sh |xargs -i docker pull {}
cat pull.sh |xargs -i crictl pull {}
kubectl apply \
-f csi-config-map.yaml \
-f csi-nodeplugin-rbac.yaml \
-f csi-provisioner-rbac.yaml \
-f csidriver.yaml
kubectl apply \
-f csi-rbdplugin-provisioner.yaml \
-f csi-rbdplugin.yaml
kubectl apply \
-f secret.yaml \
-f storageclass.yaml
kubectl delete \
-f storageclass.yaml \
-f secret.yaml
kubectl delete \
-f csi-rbdplugin.yaml \
-f csi-rbdplugin-provisioner.yaml
kubectl delete \
-f csidriver.yaml \
-f csi-provisioner-rbac.yaml \
-f csi-nodeplugin-rbac.yaml \
-f csi-config-map.yaml
测试动态pvc
9、pvc.yaml
cat > ~/000/pvc.yaml << 'EOF'
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: rbd-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: csi-rbd-sc
EOF
10、pod.yaml
cat > ~/000/pod.yaml << 'EOF'
---
apiVersion: v1
kind: Pod
metadata:
name: csi-rbd-demo-pod
spec:
containers:
- name: web-server
image: nginx:alpine
imagePullPolicy: IfNotPresent
volumeMounts:
- name: mypvc
mountPath: /var/lib/www/html
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: rbd-pvc
readOnly: false
EOF