mkdir backup && cd backup && wget http://192.168.0.1/cephadmcurl --silent --remote-name --location https://github.com/ceph/ceph/raw/pacific/src/cephadm/cephadmwget https://github.com/ceph/ceph/raw/pacific/src/cephadm/cephadmchmod +x cephadm./cephadm add-repo --release pacific./cephadm installwhich cephadmsnap install docker # version 20.10.11, orapt install docker.io # version 20.10.7-0ubuntu5~20.04.2docker pull quay.io/ceph/ceph:v16cephadm bootstrap --mon-ip 192.168.0.11vim /etc/hosts#添加到/etc/hosts192.168.0.11 ceph1192.168.0.21 ceph2192.168.0.31 ceph3192.168.0.41 ceph4ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph1ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph2ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph3ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph4cephadm shell ceph orch host add ceph1 192.168.0.11cephadm shell ceph orch host add ceph2 192.168.0.21cephadm shell ceph orch host add ceph3 192.168.0.31cephadm shell ceph orch host add ceph4 192.168.0.41dd if=/dev/zero of=/plot1/ceph/osd-1.img bs=1G count=1dd if=/dev/zero of=/plot1/ceph/osd-1.img bs=1G seek=97 count=0losetup -f /plot1/ceph/osd-1.imglosetup -f /dev/sdz /plot1/ceph/osd-1.imglosetup -d /dev/loop16fallocate -l 97g /plot1/ceph/osd-1.imgcephadm shell ceph -scephadm shell ceph orch pscephadm shell ceph orch lscephadm shell ceph orch device lscephadm shell ceph orch apply osd --all-available-devicescephadm shell ceph orch apply osd --all-available-devices --unmanaged=truecephadm shell ceph orch daemon add osd ceph1:/dev/vdbcephadm shell ceph orch daemon add osd ceph2:/dev/vdbcephadm shell ceph orch daemon add osd ceph3:/dev/vdbcephadm shell ceph orch daemon add osd ceph4:/dev/vdbcephadm shell ceph orch osd rm 0cephadm shell ceph orch osd rm statuscephadm shell ceph osd out 0# 清空已删除磁盘中的内容wipefs -af /dev/vdbcephadm shell ceph-volume lvm zap /dev/vdbradosgw-admin user listceph dashboard set-rgw-credentialsceph dashboard feature statusceph mgr servicesceph orch host lsceph osd pool lsceph osd treeceph orch apply rgw --helpceph orch daemon add rgw --helpceph orch apply rgw default --placement=4radosgw-admin realm create --rgw-realm=default-realm --defaultradosgw-admin zonegroup create --rgw-zonegroup=default-zonegroup --master --defaultradosgw-admin zone create --rgw-zonegroup=default-zonegroup --rgw-zone=default-zone --master --defaultradosgw-admin period update --rgw-realm=default-realm --commit radosgw-admin zone get --rgw-zone=defaultradosgw-admin user list# 有问题,未成功ceph orch apply rgw default-realm default-zone --unmanagedceph orch daemon add rgw default-realm default-zone --placement="ceph1 ceph2 ceph3 ceph4"ceph orch daemon add rgw default default --placement="4 ceph1 ceph2 ceph3 ceph4"ceph orch daemon add rgw default default --port=7480 --placement="3 ceph01 ceph02 ceph03"systemctl | grep rgwceph orch daemon rm rgw.rgw.ceph1.fdhrawceph orch daemon rm rgw.rgw.ceph2.ddkwrysystemctl stop ceph-64871c1a-92e9-11ec-ac27-b1478704adb0@rgw.rgw.ceph1.fdhraw.servicerm -f /etc/systemd/system/ceph-64871c1a-92e9-11ec-ac27-b1478704adb0.target.wants/'ceph-64871c1a-92e9-11ec-ac27-b1478704adb0@rgw.rgw.ceph1.ddkwry.service'systemctl stop ceph-64871c1a-92e9-11ec-ac27-b1478704adb0@rgw.rgw.ceph2.ddkwry.servicerm -f /etc/systemd/system/ceph-64871c1a-92e9-11ec-ac27-b1478704adb0.target.wants/'ceph-64871c1a-92e9-11ec-ac27-b1478704adb0@rgw.rgw.ceph2.fdhraw.service'ceph osd pool rm .rgw.root .rgw.root --yes-i-really-really-mean-itceph osd pool rm default.rgw.log default.rgw.log --yes-i-really-really-mean-itceph osd pool rm default.rgw.control default.rgw.control --yes-i-really-really-mean-itceph osd pool rm default.rgw.meta default.rgw.meta --yes-i-really-really-mean-it# base directory for daemon data (default: /var/lib/ceph)# base directory for daemon logs (default: /var/log/ceph)# location of logrotate configuration files (default: /etc/logrotate.d)# location of sysctl configuration files (default: /usr/lib/sysctl.d)# base directory for systemd units (default: /etc/systemd/system)# LATEST_STABLE_RELEASE = 'pacific'# DATA_DIR = '/var/lib/ceph'# LOG_DIR = '/var/log/ceph'# LOCK_DIR = '/run/cephadm'# LOGROTATE_DIR = '/etc/logrotate.d'# SYSCTL_DIR = '/usr/lib/sysctl.d'# UNIT_DIR = '/etc/systemd/system'# SHELL_DEFAULT_CONF = '/etc/ceph/ceph.conf'# SHELL_DEFAULT_KEYRING = '/etc/ceph/ceph.client.admin.keyring'docker stop $(docker ps -a -q)rm -rf /etc/cephrm -rf /var/lib/cephrm -rf /var/log/cephrm -rf /etc/logrotate.d/ceph*rm -rf /etc/systemd/system/ceph*docker ps -all /etc/ceph ll /var/lib/cephll /etc/logrotate.d/ceph*ll /etc/systemd/system/ceph*
$ cephadm bootstrap --mon-ip 192.168.0.11Creating directory /etc/ceph for ceph.confVerifying podman|docker is present...Verifying lvm2 is present...Verifying time synchronization is in place...Unit systemd-timesyncd.service is enabled and runningRepeating the final host check...docker (/usr/bin/docker) is presentsystemctl is presentlvcreate is presentUnit systemd-timesyncd.service is enabled and runningHost looks OKCluster fsid: c109aaae-9387-11ec-b2d2-61a194f6b77fVerifying IP 192.168.0.11 port 3300 ...Verifying IP 192.168.0.11 port 6789 ...Mon IP `192.168.0.11` is in CIDR network `192.168.0.0/24`- internal network (--cluster-network) has not been provided, OSD replication will default to the public_networkPulling container image quay.io/ceph/ceph:v16...Ceph version: ceph version 16.2.7 (dd0603118f56ab514f133c8d2e3adfc983942503) pacific (stable)Extracting ceph user uid/gid from container image...Creating initial keys...Creating initial monmap...Creating mon...Waiting for mon to start...Waiting for mon...mon is availableAssimilating anything we can from ceph.conf...Generating new minimal ceph.conf...Restarting the monitor...Setting mon public_network to 192.168.0.0/24Wrote config to /etc/ceph/ceph.confWrote keyring to /etc/ceph/ceph.client.admin.keyringCreating mgr...Verifying port 9283 ...Waiting for mgr to start...Waiting for mgr...mgr not available, waiting (1/15)...mgr not available, waiting (2/15)...mgr is availableEnabling cephadm module...Waiting for the mgr to restart...Waiting for mgr epoch 4...mgr epoch 4 is availableSetting orchestrator backend to cephadm...Generating ssh key...Wrote public SSH key to /etc/ceph/ceph.pubAdding key to root@localhost authorized_keys...Adding host ceph1...Deploying mon service with default placement...Deploying mgr service with default placement...Deploying crash service with default placement...Deploying prometheus service with default placement...Deploying grafana service with default placement...Deploying node-exporter service with default placement...Deploying alertmanager service with default placement...Enabling the dashboard module...Waiting for the mgr to restart...Waiting for mgr epoch 8...mgr epoch 8 is availableGenerating a dashboard self-signed certificate...Creating initial admin user...Fetching dashboard port number...Ceph Dashboard is now available at: URL: https://ceph1:8443/ User: admin Password: 0qn42wpnjsEnabling client.admin keyring and conf on hosts with "admin" labelYou can access the Ceph CLI with: sudo /usr/sbin/cephadm shell --fsid c109aaae-9387-11ec-b2d2-61a194f6b77f -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyringPlease consider enabling telemetry to help improve Ceph: ceph telemetry onFor more information see: https://docs.ceph.com/docs/pacific/mgr/telemetry/Bootstrap complete.