mkdir backup && cd backup && wget http://192.168.0.1/cephadm
curl --silent --remote-name --location https://github.com/ceph/ceph/raw/pacific/src/cephadm/cephadm
wget https://github.com/ceph/ceph/raw/pacific/src/cephadm/cephadm
chmod +x cephadm
./cephadm add-repo --release pacific
./cephadm install
which cephadm
snap install docker # version 20.10.11, or
apt install docker.io # version 20.10.7-0ubuntu5~20.04.2
docker pull quay.io/ceph/ceph:v16
cephadm bootstrap --mon-ip 192.168.0.11
vim /etc/hosts
#添加到/etc/hosts
192.168.0.11 ceph1
192.168.0.21 ceph2
192.168.0.31 ceph3
192.168.0.41 ceph4
ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph1
ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph2
ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph3
ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph4
cephadm shell ceph orch host add ceph1 192.168.0.11
cephadm shell ceph orch host add ceph2 192.168.0.21
cephadm shell ceph orch host add ceph3 192.168.0.31
cephadm shell ceph orch host add ceph4 192.168.0.41
dd if=/dev/zero of=/plot1/ceph/osd-1.img bs=1G count=1
dd if=/dev/zero of=/plot1/ceph/osd-1.img bs=1G seek=97 count=0
losetup -f /plot1/ceph/osd-1.img
losetup -f /dev/sdz /plot1/ceph/osd-1.img
losetup -d /dev/loop16
fallocate -l 97g /plot1/ceph/osd-1.img
cephadm shell ceph -s
cephadm shell ceph orch ps
cephadm shell ceph orch ls
cephadm shell ceph orch device ls
cephadm shell ceph orch apply osd --all-available-devices
cephadm shell ceph orch apply osd --all-available-devices --unmanaged=true
cephadm shell ceph orch daemon add osd ceph1:/dev/vdb
cephadm shell ceph orch daemon add osd ceph2:/dev/vdb
cephadm shell ceph orch daemon add osd ceph3:/dev/vdb
cephadm shell ceph orch daemon add osd ceph4:/dev/vdb
cephadm shell ceph orch osd rm 0
cephadm shell ceph orch osd rm status
cephadm shell ceph osd out 0
# 清空已删除磁盘中的内容
wipefs -af /dev/vdb
cephadm shell ceph-volume lvm zap /dev/vdb
radosgw-admin user list
ceph dashboard set-rgw-credentials
ceph dashboard feature status
ceph mgr services
ceph orch host ls
ceph osd pool ls
ceph osd tree
ceph orch apply rgw --help
ceph orch daemon add rgw --help
ceph orch apply rgw default --placement=4
radosgw-admin realm create --rgw-realm=default-realm --default
radosgw-admin zonegroup create --rgw-zonegroup=default-zonegroup --master --default
radosgw-admin zone create --rgw-zonegroup=default-zonegroup --rgw-zone=default-zone --master --default
radosgw-admin period update --rgw-realm=default-realm --commit
radosgw-admin zone get --rgw-zone=default
radosgw-admin user list
# 有问题,未成功
ceph orch apply rgw default-realm default-zone --unmanaged
ceph orch daemon add rgw default-realm default-zone --placement="ceph1 ceph2 ceph3 ceph4"
ceph orch daemon add rgw default default --placement="4 ceph1 ceph2 ceph3 ceph4"
ceph orch daemon add rgw default default --port=7480 --placement="3 ceph01 ceph02 ceph03"
systemctl | grep rgw
ceph orch daemon rm rgw.rgw.ceph1.fdhraw
ceph orch daemon rm rgw.rgw.ceph2.ddkwry
systemctl stop ceph-64871c1a-92e9-11ec-ac27-b1478704adb0@rgw.rgw.ceph1.fdhraw.service
rm -f /etc/systemd/system/ceph-64871c1a-92e9-11ec-ac27-b1478704adb0.target.wants/'ceph-64871c1a-92e9-11ec-ac27-b1478704adb0@rgw.rgw.ceph1.ddkwry.service'
systemctl stop ceph-64871c1a-92e9-11ec-ac27-b1478704adb0@rgw.rgw.ceph2.ddkwry.service
rm -f /etc/systemd/system/ceph-64871c1a-92e9-11ec-ac27-b1478704adb0.target.wants/'ceph-64871c1a-92e9-11ec-ac27-b1478704adb0@rgw.rgw.ceph2.fdhraw.service'
ceph osd pool rm .rgw.root .rgw.root --yes-i-really-really-mean-it
ceph osd pool rm default.rgw.log default.rgw.log --yes-i-really-really-mean-it
ceph osd pool rm default.rgw.control default.rgw.control --yes-i-really-really-mean-it
ceph osd pool rm default.rgw.meta default.rgw.meta --yes-i-really-really-mean-it
# base directory for daemon data (default: /var/lib/ceph)
# base directory for daemon logs (default: /var/log/ceph)
# location of logrotate configuration files (default: /etc/logrotate.d)
# location of sysctl configuration files (default: /usr/lib/sysctl.d)
# base directory for systemd units (default: /etc/systemd/system)
# LATEST_STABLE_RELEASE = 'pacific'
# DATA_DIR = '/var/lib/ceph'
# LOG_DIR = '/var/log/ceph'
# LOCK_DIR = '/run/cephadm'
# LOGROTATE_DIR = '/etc/logrotate.d'
# SYSCTL_DIR = '/usr/lib/sysctl.d'
# UNIT_DIR = '/etc/systemd/system'
# SHELL_DEFAULT_CONF = '/etc/ceph/ceph.conf'
# SHELL_DEFAULT_KEYRING = '/etc/ceph/ceph.client.admin.keyring'
docker stop $(docker ps -a -q)
rm -rf /etc/ceph
rm -rf /var/lib/ceph
rm -rf /var/log/ceph
rm -rf /etc/logrotate.d/ceph*
rm -rf /etc/systemd/system/ceph*
docker ps -a
ll /etc/ceph
ll /var/lib/ceph
ll /etc/logrotate.d/ceph*
ll /etc/systemd/system/ceph*
$ cephadm bootstrap --mon-ip 192.168.0.11
Creating directory /etc/ceph for ceph.conf
Verifying podman|docker is present...
Verifying lvm2 is present...
Verifying time synchronization is in place...
Unit systemd-timesyncd.service is enabled and running
Repeating the final host check...
docker (/usr/bin/docker) is present
systemctl is present
lvcreate is present
Unit systemd-timesyncd.service is enabled and running
Host looks OK
Cluster fsid: c109aaae-9387-11ec-b2d2-61a194f6b77f
Verifying IP 192.168.0.11 port 3300 ...
Verifying IP 192.168.0.11 port 6789 ...
Mon IP `192.168.0.11` is in CIDR network `192.168.0.0/24`
- internal network (--cluster-network) has not been provided, OSD replication will default to the public_network
Pulling container image quay.io/ceph/ceph:v16...
Ceph version: ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)
Extracting ceph user uid/gid from container image...
Creating initial keys...
Creating initial monmap...
Creating mon...
Waiting for mon to start...
Waiting for mon...
mon is available
Assimilating anything we can from ceph.conf...
Generating new minimal ceph.conf...
Restarting the monitor...
Setting mon public_network to 192.168.0.0/24
Wrote config to /etc/ceph/ceph.conf
Wrote keyring to /etc/ceph/ceph.client.admin.keyring
Creating mgr...
Verifying port 9283 ...
Waiting for mgr to start...
Waiting for mgr...
mgr not available, waiting (1/15)...
mgr not available, waiting (2/15)...
mgr is available
Enabling cephadm module...
Waiting for the mgr to restart...
Waiting for mgr epoch 4...
mgr epoch 4 is available
Setting orchestrator backend to cephadm...
Generating ssh key...
Wrote public SSH key to /etc/ceph/ceph.pub
Adding key to root@localhost authorized_keys...
Adding host ceph1...
Deploying mon service with default placement...
Deploying mgr service with default placement...
Deploying crash service with default placement...
Deploying prometheus service with default placement...
Deploying grafana service with default placement...
Deploying node-exporter service with default placement...
Deploying alertmanager service with default placement...
Enabling the dashboard module...
Waiting for the mgr to restart...
Waiting for mgr epoch 8...
mgr epoch 8 is available
Generating a dashboard self-signed certificate...
Creating initial admin user...
Fetching dashboard port number...
Ceph Dashboard is now available at:
URL: https://ceph1:8443/
User: admin
Password: 0qn42wpnjs
Enabling client.admin keyring and conf on hosts with "admin" label
You can access the Ceph CLI with:
sudo /usr/sbin/cephadm shell --fsid c109aaae-9387-11ec-b2d2-61a194f6b77f -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring
Please consider enabling telemetry to help improve Ceph:
ceph telemetry on
For more information see:
https://docs.ceph.com/docs/pacific/mgr/telemetry/
Bootstrap complete.