1. #关闭selinux、防⽕墙
    2. systemctl stop firewalld.service
    3. systemctl disable firewalld.service
    4. firewall-cmd --state
    5. sed -i '/^SELINUX=.*/c SELINUX=disabled' /etc/selinux/config
    6. sed -i 's/^SELINUXTYPE=.*/SELINUXTYPE=disabled/g' /etc/selinux/config
    7. grep --color=auto '^SELINUX' /etc/selinux/config
    8. setenforce 0
    9. getenforce
    10. 创建ceph管理用户
    11. [liu@sv2 桌面]$ sudo useradd -d /home/ceph-admin -m ceph-admin
    12. [sudo] password for liu:
    13. [liu@sv2 桌面]$ sudo passwd ceph-admin
    14. 更改用户 ceph-admin 的密码
    15. 新的 密码:
    16. 无效的密码: 密码是一个回文
    17. 重新输入新的 密码:
    18. passwd:所有的身份验证令牌已经成功更新。
    19. [liu@sv2 桌面]$ echo "ceph-admin ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/ceph-admin
    20. ceph-admin ALL = (root) NOPASSWD:ALL
    21. [liu@sv2 桌面]$ sudo chmod 0440 /etc/sudoers.d/ceph-admin
    22. [liu@sv2 桌面]$ su ceph-admin
    23. #安装NTP
    24. yum install -y ntp ntpdate
    25. sudo vi /etc/ntp.conf
    26. #加入这些
    27. server ntp2.aliyun.com iburst
    28. server ntp3.aliyun.com iburst
    29. server ntp4.aliyun.com iburst
    30. #开机自启动
    31. sudo service ntpd start
    32. sudo systemctl enable ntpd
    33. #在管理节点设置允许无密码 SSH 登录
    34. [ceph-admin@nimbus ~]$ ssh-keygen
    35. Generating public/private rsa key pair.
    36. Enter file in which to save the key (/home/ceph-admin/.ssh/id_rsa):
    37. Created directory '/home/ceph-admin/.ssh'.
    38. Enter passphrase (empty for no passphrase):
    39. Enter same passphrase again:
    40. Your identification has been saved in /home/ceph-admin/.ssh/id_rsa.
    41. Your public key has been saved in /home/ceph-admin/.ssh/id_rsa.pub.
    42. The key fingerprint is:
    43. 1c:82:1a:19:38:b3:d0:9f:79:d7:4b:56:08:2e:7a:04 ceph-admin@nimbus
    44. The key's randomart image is:
    45. #修改文件
    46. vi .ssh/config
    47. Host node1
    48. Hostname node1
    49. User {username}
    50. Host node2
    51. Hostname node2
    52. User {username}
    53. Host node3
    54. Hostname node3
    55. User {username}
    56. #修改权限,发送过去
    57. sudo chmod 600 config
    58. ssh-copy-id {username}@node2
    59. ssh-copy-id {username}@node3
    60. # 配置sudo不需要tty
    61. sed -i 's/Default requiretty/#Default requiretty/' /etc/sudoers
    62. 或者在某些发行版(如 CentOS )上,执行 ceph-deploy 命令时,如果你的 Ceph 节点默认设置了
    63. requiretty 那就会遇到报错。可以这样禁用此功能:执行 sudo visudo ,找到 Defaults requiretty 选项,
    64. 把它改为 Defaults:ceph !requiretty ,这样 ceph-deploy 就能用 ceph 用户登录并使用 sudo 了。
    65. #设置yum
    66. vi /etc/yum.repos.d/ceph.repo
    67. [ceph]
    68. name=ceph
    69. baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/x86_64/
    70. gpgcheck=0
    71. priority=1
    72. [ceph-noarch]
    73. name=cephnoarch
    74. baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/noarch/
    75. gpgcheck=0
    76. priority=1
    77. [ceph-source]
    78. name=Ceph source packages
    79. baseurl=https://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS
    80. enabled=0
    81. gpgcheck=1
    82. type=rpm-md
    83. gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
    84. priority=1
    85. #所有节点
    86. yum install -y ceph
    87. ceph -v
    88. #管理节点
    89. yum install -y ceph-deploy
    90. su ceph-admin
    91. mkdir ceph-cluster
    92. cd ceph-cluster
    93. ceph-deploy new docker-node1
    94. vi ceph.conf
    95. osd pool default size = 2
    96. #创建keyring
    97. ceph-deploy --overwrite-conf mon create-initial
    98. ls
    99. #ceph.bootstrap-mds.keyring
    100. ceph.bootstrap-osd.keyring ceph.client.admin.keyring cephdeploy-ceph.log
    101. ceph-deploy admin nimbux sv1 sv2
    102. #在每个节点
    103. sudo chmod +r /etc/ceph/ceph.client.admin.keyring
    104. #安装mgr
    105. ceph-deploy mgr create docker-node1 docker-node2 docker-node3
    106. #添加OSD
    107. sudo mkdir /var/local/osd0
    108. ssh sv1
    109. sudo mkdir /var/local/osd1
    110. exit
    111. ssh sv2
    112. sudo mkdir /var/local/osd2
    113. exit
    114. #从管理节点执行 ceph-deploy 来准备 OSD 。
    115. eph-deploy osd prepare nimbus:/var/local/osd0 sv1:/var/local/osd1 sv2:/var/local/osd2
    116. #激活OSD
    117. ceph-deploy osd activate nimbus:/var/local/osd0 sv1:/var/local/osd1 sv2:/var/local/osd2
    118. "access_key": "LY8T3X06P1OB70VFBLGH",
    119. "secret_key": "Jr3QuQ34xgR7LMc0z9f6zI5GDG1imtTC6ygNwLWQ"