- 系统环境:
root@node01:~# lsb_release -a
LSB Version: core-9.20160110ubuntu0.2-amd64:core-9.20160110ubuntu0.2-noarch:security-9.20160110ubuntu0.2-amd64:security-9.20160110ubuntu0.2-noarch
Distributor ID: Ubuntu
Description: Ubuntu 16.04.6 LTS
Release: 16.04
Codename: xenial - /etc/hosts
# Ceph RBD
172.31.53.95 node01
172.31.53.94 node02
172.31.53.93 node03 - 所有节点创建用户配置具备sudo权限(注意,username不能叫ceph)
sudo useradd -d /home/deploy -m deploy - 三台机器root无密码登陆 在node01操作:
su - deploy
ssh-keygen -t rsa
ssh-copy-id node01
ssh-copy-id node02
ssh-copy-id node03 - https://download.ceph.com/keys/release.asc‘ | sudo apt-key add -">node01安装ceph-deploy工具
# 添加密钥
wget -q -O- ‘https://download.ceph.com/keys/release.asc‘ | sudo apt-key add - - http://download.ceph.com/debian-luminous/ bionic main | sudo tee /etc/apt/sources.list.d/ceph.list">添加apt库
echo deb http://download.ceph.com/debian-luminous/ bionic main | sudo tee /etc/apt/sources.list.d/ceph.list - 更新您的存储库并安装ceph-deploy
sudo apt update
sudo apt -y install ceph-deploy - 编辑node01(deploy节点)~/.ssh/config
root@node01:~# vim ~/.ssh/config
Host node01 - 安装ceph包到所有节点
$ ceph-deploy install node01 node02 node03 - 初始化monitor节点,获取keys
ceph-deploy mon create-initial
结果:执行成功后生成以下keyring文件
ceph.client.admin.keyring
ceph.bootstrap-mgr.keyring
ceph.bootstrap-osd.keyring
ceph.bootstrap-mds.keyring
ceph.bootstrap-rgw.keyring
ceph.bootstrap-rbd.keyring - 将keyring文件分发到各个节点
ceph-deploy admin node01 node02 node03 - osd守护进程创建目录
mkdir /var/local/osd1 && chmod 777 /var/local/osd1/
ssh node02
mkdir /var/local/osd2 && chmod 777 /var/local/osd2/
ssh node03
mkdir /var/local/osd3 && chmod 777 /var/local/osd3/ - 部署osd节点
ceph-deploy osd create node01:/dev/vdb node02:/dev/vdb node03:/dev/vdb - 检查ceph是否健康
ceph health
ceph -s - Ceph操作
### ceph 为kubernetes创建一个存储池
ceph osd pool create k8s 128- 创建kube用户
ceph auth get-or-create client.kube mon ‘allow r’ osd ‘allow rwx pool=k8s’ -o ceph.client.k8s.keyring - 获取client.kube的key,并用base64加密
echo -n “$(ceph auth get-key client.kube)”|base64
QVFCV05QSmRwY24zSEJBQVV6SUtMMGlIY1BrNWZ1NElmcjZIL0E9PQ== - 获取client.admin的key,并用base64加密
echo “$(ceph auth get-key client.admin)”|base64
QVFDRUlQSmR4djUxSmhBQWh2VGFWNkdDaDJTbzAzenhsVjFVK3c9PQo= - kubernetes master操作
- https://github.com/kubernetes-incubator/external-storage
git clone https://github.com/kubernetes-incubator/external-storage.git">由于kubelet本身并不支持rbd的命令,所以需要添加一个kube系统插件:
https://github.com/kubernetes-incubator/external-storage
git clone https://github.com/kubernetes-incubator/external-storage.git - 获取client.admin与client.kube的key创建成Secret
- 创建storage class,并通过annotations设置为默认storageclass . 注意:fsType不能为xfs 。 两个secret在同一个namespaces下。
- 测试 二 创建pod使用pvc,自动创建pvc与pv
- 设置storageclass rbd为默认
kubectl patch storageclass rbd -p ‘{“metadata”: {“annotations”:{“storageclass.kubernetes.io/is-default-class”:”true”}}}’
- 创建kube用户
- 测试,编写yaml文件不指定storageclass
系统环境:
root@node01:~# lsb_release -a
LSB Version: core-9.20160110ubuntu0.2-amd64:core-9.20160110ubuntu0.2-noarch:security-9.20160110ubuntu0.2-amd64:security-9.20160110ubuntu0.2-noarch
Distributor ID: Ubuntu
Description: Ubuntu 16.04.6 LTS
Release: 16.04
Codename: xenial
root@node01:~# uname -r
4.4.0-142-generic
/etc/hosts
# Ceph RBD
172.31.53.95 node01
172.31.53.94 node02
172.31.53.93 node03
角色划分:
node01:admin-node,mon,mgr,osd
node02:osd
node03:osd
所有节点创建用户配置具备sudo权限(注意,username不能叫ceph)
sudo useradd -d /home/deploy -m deploy
sudo passwd deploy
echo “deploy ALL = (root) NOPASSWD:ALL” | sudo tee /etc/sudoers.d/deploy
sudo chmod 0440 /etc/sudoers.d/deploy
三台机器root无密码登陆 在node01操作:
su - deploy
ssh-keygen -t rsa
ssh-copy-id node01
ssh-copy-id node02
ssh-copy-id node03
node01安装ceph-deploy工具
# 添加密钥
wget -q -O- ‘https://download.ceph.com/keys/release.asc‘ | sudo apt-key add -
添加apt库
echo deb http://download.ceph.com/debian-luminous/ bionic main | sudo tee /etc/apt/sources.list.d/ceph.list
更新您的存储库并安装ceph-deploy
sudo apt update
sudo apt -y install ceph-deploy
编辑node01(deploy节点)~/.ssh/config
root@node01:~# vim ~/.ssh/config
Host node01
Hostname node01 User deploy Host node02 Hostname node02 User deploy Host node03 Hostname node03 User deploy
在node01(ceph-deploy)部署ceph集群
su - deploy
$mkdir ~/ceph-cluster && cd ~/ceph-cluster
可选 非第一次操作清理环境
#从头开始(非第一次部署ceph,清理环境)
ceph-deploy purge {ceph-node}[{ceph-node}]
ceph-deploy purgedata {ceph-node}[{ceph-node}]
ceph-deploy forgetkeys
rm ceph.*
创建集群
### 创建monitor节点
$ ceph-deploy new node01
安装ceph包到所有节点
$ ceph-deploy install node01 node02 node03
初始化monitor节点,获取keys
ceph-deploy mon create-initial
结果:执行成功后生成以下keyring文件
ceph.client.admin.keyring
ceph.bootstrap-mgr.keyring
ceph.bootstrap-osd.keyring
ceph.bootstrap-mds.keyring
ceph.bootstrap-rgw.keyring
ceph.bootstrap-rbd.keyring
将keyring文件分发到各个节点
ceph-deploy admin node01 node02 node03
osd守护进程创建目录
mkdir /var/local/osd1 && chmod 777 /var/local/osd1/
ssh node02
mkdir /var/local/osd2 && chmod 777 /var/local/osd2/
ssh node03
mkdir /var/local/osd3 && chmod 777 /var/local/osd3/
部署osd节点
ceph-deploy osd create node01:/dev/vdb node02:/dev/vdb node03:/dev/vdb
检查ceph是否健康
ceph health
ceph -s
Ceph操作
### ceph 为kubernetes创建一个存储池
ceph osd pool create k8s 128
创建kube用户
ceph auth get-or-create client.kube mon ‘allow r’ osd ‘allow rwx pool=k8s’ -o ceph.client.k8s.keyring
获取client.kube的key,并用base64加密
echo -n “$(ceph auth get-key client.kube)”|base64
QVFCV05QSmRwY24zSEJBQVV6SUtMMGlIY1BrNWZ1NElmcjZIL0E9PQ==
获取client.admin的key,并用base64加密
echo “$(ceph auth get-key client.admin)”|base64
QVFDRUlQSmR4djUxSmhBQWh2VGFWNkdDaDJTbzAzenhsVjFVK3c9PQo=
kubernetes master操作
由于kubelet本身并不支持rbd的命令,所以需要添加一个kube系统插件:
https://github.com/kubernetes-incubator/external-storage
git clone https://github.com/kubernetes-incubator/external-storage.git
cd /root/external-storage/ceph/rbd/deploy/rbac && kubectl apply -f .
获取client.admin与client.kube的key创建成Secret
cat << EOF > ceph-secret.yaml
apiVersion: v1
data:
key: QVFCV05QSmRwY24zSEJBQVV6SUtMMGlIY1BrNWZ1NElmcjZIL0E9PQ==
kind: Secret
metadata:
name: ceph-secret-user
namespace: default
type: kubernetes.io/rbd
---
apiVersion: v1
data:
key: QVFDRUlQSmR4djUxSmhBQWh2VGFWNkdDaDJTbzAzenhsVjFVK3c9PQo=
kind: Secret
metadata:
name: ceph-secret-admin
namespace: default
type: kubernetes.io/rbd
EOF
kubectl create -f ceph-secret.yaml
创建storage class,并通过annotations设置为默认storageclass . 注意:fsType不能为xfs 。 两个secret在同一个namespaces下。
cat << EOF > ceph-storage-class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rbd
provisioner: ceph.com/rbd
parameters:
monitors: 172.31.53.95:6789
adminId: admin
adminSecretName: ceph-secret-admin
adminSecretNamespace: default
pool: k8s
userId: kube
userSecretName: ceph-secret-user
fsType: ext4
imageFormat: "2"
imageFeatures: "layering"
EOF
kubectl apply -f ceph-storage-class.yaml
### 测试 一 创建pvc自动创建pv并bond
cat << EOF > pvc-ceph.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nginx-test
spec:
accessModes:
- ReadWriteOnce
storageClassName: rbd
resources:
requests:
storage: 1Gi
EOF
kubectl apply -f pvc-ceph.yaml
结果如下:
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-2f4ecccc-0d28-419d-b54a-5f03ecaad75e 10Gi RWO Delete Bound default/nginx-test rbd 86s
测试 二 创建pod使用pvc,自动创建pvc与pv
cat << EOF > statefulset-pvc.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
spec:
selector:
matchLabels:
app: nginx # has to match .spec.template.metadata.labels
serviceName: "nginx"
replicas: 3 # by default is 1
template:
metadata:
labels:
app: nginx # has to match .spec.selector.matchLabels
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "rbd"
resources:
requests:
storage: 1Gi
EOF
kubectl apply -f statefulset-pvc.yaml
报错解决方法:apt-get install ceph-common
在ceph存储的管理节点 rbd ls -p k8s 查看创建的块存储
设置storageclass rbd为默认
kubectl patch storageclass rbd -p ‘{“metadata”: {“annotations”:{“storageclass.kubernetes.io/is-default-class”:”true”}}}’
测试,编写yaml文件不指定storageclass
cat <<EOF> statefulset2.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
spec:
selector:
matchLabels:
app: nginx # has to match .spec.template.metadata.labels
serviceName: "nginx"
replicas: 1 # by default is 1
template:
metadata:
labels:
app: nginx # has to match .spec.selector.matchLabels
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
name: web
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: html
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi
EOF
kubectl apply -f statefulset2.yaml