1. mkdir backup && cd backup && wget http://192.168.0.1/cephadm
    2. curl --silent --remote-name --location https://github.com/ceph/ceph/raw/pacific/src/cephadm/cephadm
    3. wget https://github.com/ceph/ceph/raw/pacific/src/cephadm/cephadm
    4. chmod +x cephadm
    5. ./cephadm add-repo --release pacific
    6. ./cephadm install
    7. which cephadm
    8. snap install docker # version 20.10.11, or
    9. apt install docker.io # version 20.10.7-0ubuntu5~20.04.2
    10. docker pull quay.io/ceph/ceph:v16
    11. cephadm bootstrap --mon-ip 192.168.0.11
    12. vim /etc/hosts
    13. #添加到/etc/hosts
    14. 192.168.0.11 ceph1
    15. 192.168.0.21 ceph2
    16. 192.168.0.31 ceph3
    17. 192.168.0.41 ceph4
    18. ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph1
    19. ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph2
    20. ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph3
    21. ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph4
    22. cephadm shell ceph orch host add ceph1 192.168.0.11
    23. cephadm shell ceph orch host add ceph2 192.168.0.21
    24. cephadm shell ceph orch host add ceph3 192.168.0.31
    25. cephadm shell ceph orch host add ceph4 192.168.0.41
    26. dd if=/dev/zero of=/plot1/ceph/osd-1.img bs=1G count=1
    27. dd if=/dev/zero of=/plot1/ceph/osd-1.img bs=1G seek=97 count=0
    28. losetup -f /plot1/ceph/osd-1.img
    29. losetup -f /dev/sdz /plot1/ceph/osd-1.img
    30. losetup -d /dev/loop16
    31. fallocate -l 97g /plot1/ceph/osd-1.img
    32. cephadm shell ceph -s
    33. cephadm shell ceph orch ps
    34. cephadm shell ceph orch ls
    35. cephadm shell ceph orch device ls
    36. cephadm shell ceph orch apply osd --all-available-devices
    37. cephadm shell ceph orch apply osd --all-available-devices --unmanaged=true
    38. cephadm shell ceph orch daemon add osd ceph1:/dev/vdb
    39. cephadm shell ceph orch daemon add osd ceph2:/dev/vdb
    40. cephadm shell ceph orch daemon add osd ceph3:/dev/vdb
    41. cephadm shell ceph orch daemon add osd ceph4:/dev/vdb
    42. cephadm shell ceph orch osd rm 0
    43. cephadm shell ceph orch osd rm status
    44. cephadm shell ceph osd out 0
    45. # 清空已删除磁盘中的内容
    46. wipefs -af /dev/vdb
    47. cephadm shell ceph-volume lvm zap /dev/vdb
    48. radosgw-admin user list
    49. ceph dashboard set-rgw-credentials
    50. ceph dashboard feature status
    51. ceph mgr services
    52. ceph orch host ls
    53. ceph osd pool ls
    54. ceph osd tree
    55. ceph orch apply rgw --help
    56. ceph orch daemon add rgw --help
    57. ceph orch apply rgw default --placement=4
    58. radosgw-admin realm create --rgw-realm=default-realm --default
    59. radosgw-admin zonegroup create --rgw-zonegroup=default-zonegroup --master --default
    60. radosgw-admin zone create --rgw-zonegroup=default-zonegroup --rgw-zone=default-zone --master --default
    61. radosgw-admin period update --rgw-realm=default-realm --commit
    62. radosgw-admin zone get --rgw-zone=default
    63. radosgw-admin user list
    64. # 有问题,未成功
    65. ceph orch apply rgw default-realm default-zone --unmanaged
    66. ceph orch daemon add rgw default-realm default-zone --placement="ceph1 ceph2 ceph3 ceph4"
    67. ceph orch daemon add rgw default default --placement="4 ceph1 ceph2 ceph3 ceph4"
    68. ceph orch daemon add rgw default default --port=7480 --placement="3 ceph01 ceph02 ceph03"
    69. systemctl | grep rgw
    70. ceph orch daemon rm rgw.rgw.ceph1.fdhraw
    71. ceph orch daemon rm rgw.rgw.ceph2.ddkwry
    72. systemctl stop ceph-64871c1a-92e9-11ec-ac27-b1478704adb0@rgw.rgw.ceph1.fdhraw.service
    73. rm -f /etc/systemd/system/ceph-64871c1a-92e9-11ec-ac27-b1478704adb0.target.wants/'ceph-64871c1a-92e9-11ec-ac27-b1478704adb0@rgw.rgw.ceph1.ddkwry.service'
    74. systemctl stop ceph-64871c1a-92e9-11ec-ac27-b1478704adb0@rgw.rgw.ceph2.ddkwry.service
    75. rm -f /etc/systemd/system/ceph-64871c1a-92e9-11ec-ac27-b1478704adb0.target.wants/'ceph-64871c1a-92e9-11ec-ac27-b1478704adb0@rgw.rgw.ceph2.fdhraw.service'
    76. ceph osd pool rm .rgw.root .rgw.root --yes-i-really-really-mean-it
    77. ceph osd pool rm default.rgw.log default.rgw.log --yes-i-really-really-mean-it
    78. ceph osd pool rm default.rgw.control default.rgw.control --yes-i-really-really-mean-it
    79. ceph osd pool rm default.rgw.meta default.rgw.meta --yes-i-really-really-mean-it
    80. # base directory for daemon data (default: /var/lib/ceph)
    81. # base directory for daemon logs (default: /var/log/ceph)
    82. # location of logrotate configuration files (default: /etc/logrotate.d)
    83. # location of sysctl configuration files (default: /usr/lib/sysctl.d)
    84. # base directory for systemd units (default: /etc/systemd/system)
    85. # LATEST_STABLE_RELEASE = 'pacific'
    86. # DATA_DIR = '/var/lib/ceph'
    87. # LOG_DIR = '/var/log/ceph'
    88. # LOCK_DIR = '/run/cephadm'
    89. # LOGROTATE_DIR = '/etc/logrotate.d'
    90. # SYSCTL_DIR = '/usr/lib/sysctl.d'
    91. # UNIT_DIR = '/etc/systemd/system'
    92. # SHELL_DEFAULT_CONF = '/etc/ceph/ceph.conf'
    93. # SHELL_DEFAULT_KEYRING = '/etc/ceph/ceph.client.admin.keyring'
    94. docker stop $(docker ps -a -q)
    95. rm -rf /etc/ceph
    96. rm -rf /var/lib/ceph
    97. rm -rf /var/log/ceph
    98. rm -rf /etc/logrotate.d/ceph*
    99. rm -rf /etc/systemd/system/ceph*
    100. docker ps -a
    101. ll /etc/ceph
    102. ll /var/lib/ceph
    103. ll /etc/logrotate.d/ceph*
    104. ll /etc/systemd/system/ceph*
    1. $ cephadm bootstrap --mon-ip 192.168.0.11
    2. Creating directory /etc/ceph for ceph.conf
    3. Verifying podman|docker is present...
    4. Verifying lvm2 is present...
    5. Verifying time synchronization is in place...
    6. Unit systemd-timesyncd.service is enabled and running
    7. Repeating the final host check...
    8. docker (/usr/bin/docker) is present
    9. systemctl is present
    10. lvcreate is present
    11. Unit systemd-timesyncd.service is enabled and running
    12. Host looks OK
    13. Cluster fsid: c109aaae-9387-11ec-b2d2-61a194f6b77f
    14. Verifying IP 192.168.0.11 port 3300 ...
    15. Verifying IP 192.168.0.11 port 6789 ...
    16. Mon IP `192.168.0.11` is in CIDR network `192.168.0.0/24`
    17. - internal network (--cluster-network) has not been provided, OSD replication will default to the public_network
    18. Pulling container image quay.io/ceph/ceph:v16...
    19. Ceph version: ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)
    20. Extracting ceph user uid/gid from container image...
    21. Creating initial keys...
    22. Creating initial monmap...
    23. Creating mon...
    24. Waiting for mon to start...
    25. Waiting for mon...
    26. mon is available
    27. Assimilating anything we can from ceph.conf...
    28. Generating new minimal ceph.conf...
    29. Restarting the monitor...
    30. Setting mon public_network to 192.168.0.0/24
    31. Wrote config to /etc/ceph/ceph.conf
    32. Wrote keyring to /etc/ceph/ceph.client.admin.keyring
    33. Creating mgr...
    34. Verifying port 9283 ...
    35. Waiting for mgr to start...
    36. Waiting for mgr...
    37. mgr not available, waiting (1/15)...
    38. mgr not available, waiting (2/15)...
    39. mgr is available
    40. Enabling cephadm module...
    41. Waiting for the mgr to restart...
    42. Waiting for mgr epoch 4...
    43. mgr epoch 4 is available
    44. Setting orchestrator backend to cephadm...
    45. Generating ssh key...
    46. Wrote public SSH key to /etc/ceph/ceph.pub
    47. Adding key to root@localhost authorized_keys...
    48. Adding host ceph1...
    49. Deploying mon service with default placement...
    50. Deploying mgr service with default placement...
    51. Deploying crash service with default placement...
    52. Deploying prometheus service with default placement...
    53. Deploying grafana service with default placement...
    54. Deploying node-exporter service with default placement...
    55. Deploying alertmanager service with default placement...
    56. Enabling the dashboard module...
    57. Waiting for the mgr to restart...
    58. Waiting for mgr epoch 8...
    59. mgr epoch 8 is available
    60. Generating a dashboard self-signed certificate...
    61. Creating initial admin user...
    62. Fetching dashboard port number...
    63. Ceph Dashboard is now available at:
    64. URL: https://ceph1:8443/
    65. User: admin
    66. Password: 0qn42wpnjs
    67. Enabling client.admin keyring and conf on hosts with "admin" label
    68. You can access the Ceph CLI with:
    69. sudo /usr/sbin/cephadm shell --fsid c109aaae-9387-11ec-b2d2-61a194f6b77f -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring
    70. Please consider enabling telemetry to help improve Ceph:
    71. ceph telemetry on
    72. For more information see:
    73. https://docs.ceph.com/docs/pacific/mgr/telemetry/
    74. Bootstrap complete.