#关闭selinux、防⽕墙
systemctl stop firewalld.service
systemctl disable firewalld.service
firewall-cmd --state
sed -i '/^SELINUX=.*/c SELINUX=disabled' /etc/selinux/config
sed -i 's/^SELINUXTYPE=.*/SELINUXTYPE=disabled/g' /etc/selinux/config
grep --color=auto '^SELINUX' /etc/selinux/config
setenforce 0
getenforce
创建ceph管理用户
[liu@sv2 桌面]$ sudo useradd -d /home/ceph-admin -m ceph-admin
[sudo] password for liu:
[liu@sv2 桌面]$ sudo passwd ceph-admin
更改用户 ceph-admin 的密码 。
新的 密码:
无效的密码: 密码是一个回文
重新输入新的 密码:
passwd:所有的身份验证令牌已经成功更新。
[liu@sv2 桌面]$ echo "ceph-admin ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/ceph-admin
ceph-admin ALL = (root) NOPASSWD:ALL
[liu@sv2 桌面]$ sudo chmod 0440 /etc/sudoers.d/ceph-admin
[liu@sv2 桌面]$ su ceph-admin
#安装NTP
yum install -y ntp ntpdate
sudo vi /etc/ntp.conf
#加入这些
server ntp2.aliyun.com iburst
server ntp3.aliyun.com iburst
server ntp4.aliyun.com iburst
#开机自启动
sudo service ntpd start
sudo systemctl enable ntpd
#在管理节点设置允许无密码 SSH 登录
[ceph-admin@nimbus ~]$ ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/home/ceph-admin/.ssh/id_rsa):
Created directory '/home/ceph-admin/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/ceph-admin/.ssh/id_rsa.
Your public key has been saved in /home/ceph-admin/.ssh/id_rsa.pub.
The key fingerprint is:
1c:82:1a:19:38:b3:d0:9f:79:d7:4b:56:08:2e:7a:04 ceph-admin@nimbus
The key's randomart image is:
#修改文件
vi .ssh/config
Host node1
Hostname node1
User {username}
Host node2
Hostname node2
User {username}
Host node3
Hostname node3
User {username}
#修改权限,发送过去
sudo chmod 600 config
ssh-copy-id {username}@node2
ssh-copy-id {username}@node3
# 配置sudo不需要tty
sed -i 's/Default requiretty/#Default requiretty/' /etc/sudoers
或者在某些发行版(如 CentOS )上,执行 ceph-deploy 命令时,如果你的 Ceph 节点默认设置了
requiretty 那就会遇到报错。可以这样禁用此功能:执行 sudo visudo ,找到 Defaults requiretty 选项,
把它改为 Defaults:ceph !requiretty ,这样 ceph-deploy 就能用 ceph 用户登录并使用 sudo 了。
#设置yum
vi /etc/yum.repos.d/ceph.repo
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/x86_64/
gpgcheck=0
priority=1
[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/noarch/
gpgcheck=0
priority=1
[ceph-source]
name=Ceph source packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS
enabled=0
gpgcheck=1
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
#所有节点
yum install -y ceph
ceph -v
#管理节点
yum install -y ceph-deploy
su ceph-admin
mkdir ceph-cluster
cd ceph-cluster
ceph-deploy new docker-node1
vi ceph.conf
osd pool default size = 2
#创建keyring
ceph-deploy --overwrite-conf mon create-initial
ls
#ceph.bootstrap-mds.keyring
ceph.bootstrap-osd.keyring ceph.client.admin.keyring cephdeploy-ceph.log
ceph-deploy admin nimbux sv1 sv2
#在每个节点
sudo chmod +r /etc/ceph/ceph.client.admin.keyring
#安装mgr
ceph-deploy mgr create docker-node1 docker-node2 docker-node3
#添加OSD
sudo mkdir /var/local/osd0
ssh sv1
sudo mkdir /var/local/osd1
exit
ssh sv2
sudo mkdir /var/local/osd2
exit
#从管理节点执行 ceph-deploy 来准备 OSD 。
eph-deploy osd prepare nimbus:/var/local/osd0 sv1:/var/local/osd1 sv2:/var/local/osd2
#激活OSD
ceph-deploy osd activate nimbus:/var/local/osd0 sv1:/var/local/osd1 sv2:/var/local/osd2
"access_key": "LY8T3X06P1OB70VFBLGH",
"secret_key": "Jr3QuQ34xgR7LMc0z9f6zI5GDG1imtTC6ygNwLWQ"