系统环境:
root@node01:~# lsb_release -a
LSB Version: core-9.20160110ubuntu0.2-amd64:core-9.20160110ubuntu0.2-noarch:security-9.20160110ubuntu0.2-amd64:security-9.20160110ubuntu0.2-noarch
Distributor ID: Ubuntu
Description: Ubuntu 16.04.6 LTS
Release: 16.04
Codename: xenial

root@node01:~# uname -r
4.4.0-142-generic

/etc/hosts
# Ceph RBD
172.31.53.95 node01
172.31.53.94 node02
172.31.53.93 node03

角色划分:
node01:admin-node,mon,mgr,osd
node02:osd
node03:osd

所有节点创建用户配置具备sudo权限(注意,username不能叫ceph)
sudo useradd -d /home/deploy -m deploy

sudo passwd deploy
echo “deploy ALL = (root) NOPASSWD:ALL” | sudo tee /etc/sudoers.d/deploy sudo chmod 0440 /etc/sudoers.d/deploy

三台机器root无密码登陆 在node01操作:
su - deploy
ssh-keygen -t rsa
ssh-copy-id node01
ssh-copy-id node02
ssh-copy-id node03

node01安装ceph-deploy工具
# 添加密钥
wget -q -O- ‘https://download.ceph.com/keys/release.asc‘ | sudo apt-key add -

添加apt库
echo deb http://download.ceph.com/debian-luminous/ bionic main | sudo tee /etc/apt/sources.list.d/ceph.list

更新您的存储库并安装ceph-deploy
sudo apt update
sudo apt -y install ceph-deploy

编辑node01(deploy节点)~/.ssh/config
root@node01:~# vim ~/.ssh/config
Host node01

Hostname node01 User deploy Host node02 Hostname node02 User deploy Host node03 Hostname node03 User deploy

在node01(ceph-deploy)部署ceph集群
su - deploy
$mkdir ~/ceph-cluster && cd ~/ceph-cluster

可选 非第一次操作清理环境
#从头开始(非第一次部署ceph,清理环境)
ceph-deploy purge {ceph-node}[{ceph-node}]
ceph-deploy purgedata {ceph-node}[{ceph-node}]
ceph-deploy forgetkeys
rm ceph.*

创建集群
### 创建monitor节点
$ ceph-deploy new node01

安装ceph包到所有节点
$ ceph-deploy install node01 node02 node03

初始化monitor节点,获取keys
ceph-deploy mon create-initial
结果:执行成功后生成以下keyring文件
ceph.client.admin.keyring
ceph.bootstrap-mgr.keyring
ceph.bootstrap-osd.keyring
ceph.bootstrap-mds.keyring
ceph.bootstrap-rgw.keyring
ceph.bootstrap-rbd.keyring

将keyring文件分发到各个节点
ceph-deploy admin node01 node02 node03

osd守护进程创建目录
mkdir /var/local/osd1 && chmod 777 /var/local/osd1/
ssh node02
mkdir /var/local/osd2 && chmod 777 /var/local/osd2/
ssh node03
mkdir /var/local/osd3 && chmod 777 /var/local/osd3/

部署osd节点
ceph-deploy osd create node01:/dev/vdb node02:/dev/vdb node03:/dev/vdb

检查ceph是否健康
ceph health
ceph -s

Ceph操作
### ceph 为kubernetes创建一个存储池
ceph osd pool create k8s 128

创建kube用户
ceph auth get-or-create client.kube mon ‘allow r’ osd ‘allow rwx pool=k8s’ -o ceph.client.k8s.keyring

获取client.kube的key,并用base64加密
echo -n “$(ceph auth get-key client.kube)”|base64
QVFCV05QSmRwY24zSEJBQVV6SUtMMGlIY1BrNWZ1NElmcjZIL0E9PQ==

获取client.admin的key,并用base64加密
echo “$(ceph auth get-key client.admin)”|base64
QVFDRUlQSmR4djUxSmhBQWh2VGFWNkdDaDJTbzAzenhsVjFVK3c9PQo=

kubernetes master操作

由于kubelet本身并不支持rbd的命令,所以需要添加一个kube系统插件:
https://github.com/kubernetes-incubator/external-storage
git clone https://github.com/kubernetes-incubator/external-storage.git

cd /root/external-storage/ceph/rbd/deploy/rbac && kubectl apply -f .

获取client.admin与client.kube的key创建成Secret

  1. cat << EOF > ceph-secret.yaml
  2. apiVersion: v1
  3. data:
  4. key: QVFCV05QSmRwY24zSEJBQVV6SUtMMGlIY1BrNWZ1NElmcjZIL0E9PQ==
  5. kind: Secret
  6. metadata:
  7. name: ceph-secret-user
  8. namespace: default
  9. type: kubernetes.io/rbd
  10. ---
  11. apiVersion: v1
  12. data:
  13. key: QVFDRUlQSmR4djUxSmhBQWh2VGFWNkdDaDJTbzAzenhsVjFVK3c9PQo=
  14. kind: Secret
  15. metadata:
  16. name: ceph-secret-admin
  17. namespace: default
  18. type: kubernetes.io/rbd
  19. EOF

kubectl create -f ceph-secret.yaml

创建storage class,并通过annotations设置为默认storageclass . 注意:fsType不能为xfs 。 两个secret在同一个namespaces下。

  1. cat << EOF > ceph-storage-class.yaml
  2. apiVersion: storage.k8s.io/v1
  3. kind: StorageClass
  4. metadata:
  5. name: rbd
  6. provisioner: ceph.com/rbd
  7. parameters:
  8. monitors: 172.31.53.95:6789
  9. adminId: admin
  10. adminSecretName: ceph-secret-admin
  11. adminSecretNamespace: default
  12. pool: k8s
  13. userId: kube
  14. userSecretName: ceph-secret-user
  15. fsType: ext4
  16. imageFormat: "2"
  17. imageFeatures: "layering"
  18. EOF

kubectl apply -f ceph-storage-class.yaml
### 测试 一 创建pvc自动创建pv并bond

  1. cat << EOF > pvc-ceph.yaml
  2. apiVersion: v1
  3. kind: PersistentVolumeClaim
  4. metadata:
  5. name: nginx-test
  6. spec:
  7. accessModes:
  8. - ReadWriteOnce
  9. storageClassName: rbd
  10. resources:
  11. requests:
  12. storage: 1Gi
  13. EOF

kubectl apply -f pvc-ceph.yaml
结果如下:
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-2f4ecccc-0d28-419d-b54a-5f03ecaad75e 10Gi RWO Delete Bound default/nginx-test rbd 86s

测试 二 创建pod使用pvc,自动创建pvc与pv

  1. cat << EOF > statefulset-pvc.yaml
  2. apiVersion: apps/v1
  3. kind: StatefulSet
  4. metadata:
  5. name: web
  6. spec:
  7. selector:
  8. matchLabels:
  9. app: nginx # has to match .spec.template.metadata.labels
  10. serviceName: "nginx"
  11. replicas: 3 # by default is 1
  12. template:
  13. metadata:
  14. labels:
  15. app: nginx # has to match .spec.selector.matchLabels
  16. spec:
  17. terminationGracePeriodSeconds: 10
  18. containers:
  19. - name: nginx
  20. image: nginx
  21. ports:
  22. - containerPort: 80
  23. name: web
  24. volumeMounts:
  25. - name: www
  26. mountPath: /usr/share/nginx/html
  27. volumeClaimTemplates:
  28. - metadata:
  29. name: www
  30. spec:
  31. accessModes: [ "ReadWriteOnce" ]
  32. storageClassName: "rbd"
  33. resources:
  34. requests:
  35. storage: 1Gi
  36. EOF

kubectl apply -f statefulset-pvc.yaml

报错解决方法:apt-get install ceph-common
在ceph存储的管理节点 rbd ls -p k8s 查看创建的块存储

设置storageclass rbd为默认
kubectl patch storageclass rbd -p ‘{“metadata”: {“annotations”:{“storageclass.kubernetes.io/is-default-class”:”true”}}}’

测试,编写yaml文件不指定storageclass

  1. cat <<EOF> statefulset2.yaml
  2. apiVersion: apps/v1
  3. kind: StatefulSet
  4. metadata:
  5. name: web
  6. spec:
  7. selector:
  8. matchLabels:
  9. app: nginx # has to match .spec.template.metadata.labels
  10. serviceName: "nginx"
  11. replicas: 1 # by default is 1
  12. template:
  13. metadata:
  14. labels:
  15. app: nginx # has to match .spec.selector.matchLabels
  16. spec:
  17. terminationGracePeriodSeconds: 10
  18. containers:
  19. - name: nginx
  20. image: nginx
  21. ports:
  22. - containerPort: 80
  23. name: web
  24. volumeMounts:
  25. - name: html
  26. mountPath: /usr/share/nginx/html
  27. volumeClaimTemplates:
  28. - metadata:
  29. name: html
  30. spec:
  31. accessModes: [ "ReadWriteOnce" ]
  32. resources:
  33. requests:
  34. storage: 1Gi
  35. EOF

kubectl apply -f statefulset2.yaml