#关闭selinux、防⽕墙 systemctl stop firewalld.service systemctl disable firewalld.service firewall-cmd --state sed -i '/^SELINUX=.*/c SELINUX=disabled' /etc/selinux/config sed -i 's/^SELINUXTYPE=.*/SELINUXTYPE=disabled/g' /etc/selinux/config grep --color=auto '^SELINUX' /etc/selinux/config setenforce 0 getenforce 创建ceph管理用户[liu@sv2 桌面]$ sudo useradd -d /home/ceph-admin -m ceph-admin[sudo] password for liu: [liu@sv2 桌面]$ sudo passwd ceph-admin更改用户 ceph-admin 的密码 。新的 密码:无效的密码: 密码是一个回文重新输入新的 密码:passwd:所有的身份验证令牌已经成功更新。[liu@sv2 桌面]$ echo "ceph-admin ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/ceph-adminceph-admin ALL = (root) NOPASSWD:ALL[liu@sv2 桌面]$ sudo chmod 0440 /etc/sudoers.d/ceph-admin[liu@sv2 桌面]$ su ceph-admin#安装NTPyum install -y ntp ntpdatesudo vi /etc/ntp.conf#加入这些server ntp2.aliyun.com iburstserver ntp3.aliyun.com iburstserver ntp4.aliyun.com iburst#开机自启动sudo service ntpd startsudo systemctl enable ntpd#在管理节点设置允许无密码 SSH 登录[ceph-admin@nimbus ~]$ ssh-keygenGenerating public/private rsa key pair.Enter file in which to save the key (/home/ceph-admin/.ssh/id_rsa): Created directory '/home/ceph-admin/.ssh'.Enter passphrase (empty for no passphrase): Enter same passphrase again: Your identification has been saved in /home/ceph-admin/.ssh/id_rsa.Your public key has been saved in /home/ceph-admin/.ssh/id_rsa.pub.The key fingerprint is:1c:82:1a:19:38:b3:d0:9f:79:d7:4b:56:08:2e:7a:04 ceph-admin@nimbusThe key's randomart image is:#修改文件vi .ssh/configHost node1 Hostname node1 User {username}Host node2 Hostname node2 User {username}Host node3 Hostname node3 User {username}#修改权限,发送过去sudo chmod 600 configssh-copy-id {username}@node2ssh-copy-id {username}@node3# 配置sudo不需要ttysed -i 's/Default requiretty/#Default requiretty/' /etc/sudoers或者在某些发行版(如 CentOS )上,执行 ceph-deploy 命令时,如果你的 Ceph 节点默认设置了 requiretty 那就会遇到报错。可以这样禁用此功能:执行 sudo visudo ,找到 Defaults requiretty 选项,把它改为 Defaults:ceph !requiretty ,这样 ceph-deploy 就能用 ceph 用户登录并使用 sudo 了。#设置yumvi /etc/yum.repos.d/ceph.repo [ceph]name=cephbaseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/x86_64/gpgcheck=0priority=1[ceph-noarch]name=cephnoarchbaseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/noarch/gpgcheck=0priority=1[ceph-source]name=Ceph source packagesbaseurl=https://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS enabled=0gpgcheck=1type=rpm-mdgpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc priority=1#所有节点 yum install -y ceph ceph -v#管理节点 yum install -y ceph-deploysu ceph-admin mkdir ceph-cluster cd ceph-cluster ceph-deploy new docker-node1 vi ceph.conf osd pool default size = 2 #创建keyringceph-deploy --overwrite-conf mon create-initialls #ceph.bootstrap-mds.keyring ceph.bootstrap-osd.keyring ceph.client.admin.keyring cephdeploy-ceph.log ceph-deploy admin nimbux sv1 sv2#在每个节点sudo chmod +r /etc/ceph/ceph.client.admin.keyring#安装mgrceph-deploy mgr create docker-node1 docker-node2 docker-node3 #添加OSDsudo mkdir /var/local/osd0ssh sv1sudo mkdir /var/local/osd1exitssh sv2sudo mkdir /var/local/osd2exit#从管理节点执行 ceph-deploy 来准备 OSD 。eph-deploy osd prepare nimbus:/var/local/osd0 sv1:/var/local/osd1 sv2:/var/local/osd2#激活OSDceph-deploy osd activate nimbus:/var/local/osd0 sv1:/var/local/osd1 sv2:/var/local/osd2"access_key": "LY8T3X06P1OB70VFBLGH","secret_key": "Jr3QuQ34xgR7LMc0z9f6zI5GDG1imtTC6ygNwLWQ"