[root@c4 ceph]# ceph-deploy disk zap c6 /dev/sdc
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy disk zap c6 /dev/sdc
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] debug : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] subcommand : zap
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fd6e987bc68>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] host : c6
[ceph_deploy.cli][INFO ] func : <function disk at 0x7fd6e9ccbde8>
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] disk : ['/dev/sdc']
[ceph_deploy.osd][DEBUG ] zapping /dev/sdc on c6
[c6][DEBUG ] connected to host: c6
[c6][DEBUG ] detect platform information from remote host
[c6][DEBUG ] detect machine type
[c6][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.6.1810 Core
[c6][DEBUG ] zeroing last few blocks of device
[c6][DEBUG ] find the location of an executable
[c6][INFO ] Running command: /usr/sbin/ceph-volume lvm zap /dev/sdc
[c6][DEBUG ] --> Zapping: /dev/sdc
[c6][DEBUG ] --> --destroy was not specified, but zapping a whole device will remove the partition table
[c6][DEBUG ] Running command: /usr/sbin/wipefs --all /dev/sdc
[c6][DEBUG ] Running command: /bin/dd if=/dev/zero of=/dev/sdc bs=1M count=10
[c6][DEBUG ] stderr: 10+0 records in
[c6][DEBUG ] 10+0 records out
[c6][DEBUG ] 10485760 bytes (10 MB) copied
[c6][DEBUG ] stderr: , 6.61296 s, 1.6 MB/s
[c6][DEBUG ] --> Zapping successful for: <Raw Device: /dev/sdc>
[root@c4 ceph]#
[root@c4 ceph]#
[root@c4 ceph]# ceph-deploy osd create --data /dev/sdc c5
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create --data /dev/sdc c5
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] bluestore : None
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fcc66c92908>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] fs_type : xfs
[ceph_deploy.cli][INFO ] block_wal : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] journal : None
[ceph_deploy.cli][INFO ] subcommand : create
[ceph_deploy.cli][INFO ] host : c5
[ceph_deploy.cli][INFO ] filestore : None
[ceph_deploy.cli][INFO ] func : <function osd at 0x7fcc670dbd70>
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] zap_disk : False
[ceph_deploy.cli][INFO ] data : /dev/sdc
[ceph_deploy.cli][INFO ] block_db : None
[ceph_deploy.cli][INFO ] dmcrypt : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] debug : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdc
[c5][DEBUG ] connected to host: c5
[c5][DEBUG ] detect platform information from remote host
[c5][DEBUG ] detect machine type
[c5][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.3.1611 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to c5
[c5][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[c5][DEBUG ] find the location of an executable
[c5][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdc
[c5][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
[c5][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 4ade4076-2981-4adf-a90b-221541b1314c
[c5][DEBUG ] Running command: /usr/sbin/vgcreate --force --yes ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad /dev/sdc
[c5][DEBUG ] stdout: Physical volume "/dev/sdc" successfully created.
[c5][DEBUG ] stdout: Volume group "ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad" successfully created
[c5][DEBUG ] Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-4ade4076-2981-4adf-a90b-221541b1314c ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad
[c5][DEBUG ] stdout: Logical volume "osd-block-4ade4076-2981-4adf-a90b-221541b1314c" created.
[c5][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
[c5][DEBUG ] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-4
[c5][DEBUG ] Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-4
[c5][DEBUG ] Running command: /bin/chown -h ceph:ceph /dev/ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad/osd-block-4ade4076-2981-4adf-a90b-221541b1314c
[c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-1
[c5][DEBUG ] Running command: /bin/ln -s /dev/ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad/osd-block-4ade4076-2981-4adf-a90b-221541b1314c /var/lib/ceph/osd/ceph-4/block
[c5][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-4/activate.monmap
[c5][DEBUG ] stderr: got monmap epoch 1
[c5][DEBUG ] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-4/keyring --create-keyring --name osd.4 --add-key AQC619Ncgk+gBhAA19h+Ij0G6+LUcuxzob/FkA==
[c5][DEBUG ] stdout: creating /var/lib/ceph/osd/ceph-4/keyring
[c5][DEBUG ] added entity osd.4 auth auth(auid = 18446744073709551615 key=AQC619Ncgk+gBhAA19h+Ij0G6+LUcuxzob/FkA== with 0 caps)
[c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/keyring
[c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/
[c5][DEBUG ] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 4 --monmap /var/lib/ceph/osd/ceph-4/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-4/ --osd-uuid 4ade4076-2981-4adf-a90b-221541b1314c --setuser ceph --setgroup ceph
[c5][DEBUG ] --> ceph-volume lvm prepare successful for: /dev/sdc
[c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4
[c5][DEBUG ] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad/osd-block-4ade4076-2981-4adf-a90b-221541b1314c --path /var/lib/ceph/osd/ceph-4 --no-mon-config
[c5][DEBUG ] Running command: /bin/ln -snf /dev/ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad/osd-block-4ade4076-2981-4adf-a90b-221541b1314c /var/lib/ceph/osd/ceph-4/block
[c5][DEBUG ] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block
[c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-1
[c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4
[c5][DEBUG ] Running command: /bin/systemctl enable ceph-volume@lvm-4-4ade4076-2981-4adf-a90b-221541b1314c
[c5][DEBUG ] stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-4-4ade4076-2981-4adf-a90b-221541b1314c.service to /usr/lib/systemd/system/ceph-volume@.service.
[c5][DEBUG ] Running command: /bin/systemctl enable --runtime ceph-osd@4
[c5][DEBUG ] stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@4.service to /usr/lib/systemd/system/ceph-osd@.service.
[c5][DEBUG ] Running command: /bin/systemctl start ceph-osd@4
[c5][DEBUG ] --> ceph-volume lvm activate successful for osd ID: 4
[c5][DEBUG ] --> ceph-volume lvm create successful for: /dev/sdc
[c5][INFO ] checking OSD status...
[c5][DEBUG ] find the location of an executable
[c5][INFO ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[root@c4 ceph]# ceph-deploy osd create --data /dev/sdc c4
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create --data /dev/sdc c4
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] bluestore : None
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f0611e11908>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] fs_type : xfs
[ceph_deploy.cli][INFO ] block_wal : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] journal : None
[ceph_deploy.cli][INFO ] subcommand : create
[ceph_deploy.cli][INFO ] host : c4
[ceph_deploy.cli][INFO ] filestore : None
[ceph_deploy.cli][INFO ] func : <function osd at 0x7f061225ad70>
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] zap_disk : False
[ceph_deploy.cli][INFO ] data : /dev/sdc
[ceph_deploy.cli][INFO ] block_db : None
[ceph_deploy.cli][INFO ] dmcrypt : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] debug : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdc
[c4][DEBUG ] connected to host: c4
[c4][DEBUG ] detect platform information from remote host
[c4][DEBUG ] detect machine type
[c4][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.3.1611 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to c4
[c4][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[c4][DEBUG ] find the location of an executable
[c4][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdc
[c4][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
[c4][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 0b4c05f0-56b1-4063-afdb-46246cd3a10f
[c4][DEBUG ] Running command: /usr/sbin/vgcreate --force --yes ceph-5075a290-e557-4847-890f-9196fbe44ddd /dev/sdc
[c4][DEBUG ] stdout: Physical volume "/dev/sdc" successfully created.
[c4][DEBUG ] stdout: Volume group "ceph-5075a290-e557-4847-890f-9196fbe44ddd" successfully created
[c4][DEBUG ] Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-0b4c05f0-56b1-4063-afdb-46246cd3a10f ceph-5075a290-e557-4847-890f-9196fbe44ddd
[c4][DEBUG ] stdout: Logical volume "osd-block-0b4c05f0-56b1-4063-afdb-46246cd3a10f" created.
[c4][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
[c4][DEBUG ] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-3
[c4][DEBUG ] Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-3
[c4][DEBUG ] Running command: /bin/chown -h ceph:ceph /dev/ceph-5075a290-e557-4847-890f-9196fbe44ddd/osd-block-0b4c05f0-56b1-4063-afdb-46246cd3a10f
[c4][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-1
[c4][DEBUG ] Running command: /bin/ln -s /dev/ceph-5075a290-e557-4847-890f-9196fbe44ddd/osd-block-0b4c05f0-56b1-4063-afdb-46246cd3a10f /var/lib/ceph/osd/ceph-3/block
[c4][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-3/activate.monmap
[c4][DEBUG ] stderr: got monmap epoch 1
[c4][DEBUG ] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-3/keyring --create-keyring --name osd.3 --add-key AQA/19Nc09fnCBAA19NSlNAxs8YRyYz86hALyg==
[c4][DEBUG ] stdout: creating /var/lib/ceph/osd/ceph-3/keyring
[c4][DEBUG ] added entity osd.3 auth auth(auid = 18446744073709551615 key=AQA/19Nc09fnCBAA19NSlNAxs8YRyYz86hALyg== with 0 caps)
[c4][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/keyring
[c4][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/
[c4][DEBUG ] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 3 --monmap /var/lib/ceph/osd/ceph-3/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-3/ --osd-uuid 0b4c05f0-56b1-4063-afdb-46246cd3a10f --setuser ceph --setgroup ceph
[c4][DEBUG ] --> ceph-volume lvm prepare successful for: /dev/sdc
[c4][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3
[c4][DEBUG ] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-5075a290-e557-4847-890f-9196fbe44ddd/osd-block-0b4c05f0-56b1-4063-afdb-46246cd3a10f --path /var/lib/ceph/osd/ceph-3 --no-mon-config
[c4][DEBUG ] Running command: /bin/ln -snf /dev/ceph-5075a290-e557-4847-890f-9196fbe44ddd/osd-block-0b4c05f0-56b1-4063-afdb-46246cd3a10f /var/lib/ceph/osd/ceph-3/block
[c4][DEBUG ] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block
[c4][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-1
[c4][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3
[c4][DEBUG ] Running command: /bin/systemctl enable ceph-volume@lvm-3-0b4c05f0-56b1-4063-afdb-46246cd3a10f
[c4][DEBUG ] stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-3-0b4c05f0-56b1-4063-afdb-46246cd3a10f.service to /usr/lib/systemd/system/ceph-volume@.service.
[c4][DEBUG ] Running command: /bin/systemctl enable --runtime ceph-osd@3
[c4][DEBUG ] stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@3.service to /usr/lib/systemd/system/ceph-osd@.service.
[c4][DEBUG ] Running command: /bin/systemctl start ceph-osd@3
[c4][DEBUG ] --> ceph-volume lvm activate successful for osd ID: 3
[c4][DEBUG ] --> ceph-volume lvm create successful for: /dev/sdc
[c4][INFO ] checking OSD status...
[c4][DEBUG ] find the location of an executable
[c4][INFO ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host c4 is now ready for osd use.
[root@c4 ceph]#
[root@c4 ceph]#
[root@c4 ceph]# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.07794 root default
-3 0.03897 host c4
0 hdd 0.01949 osd.0 up 1.00000 1.00000
3 hdd 0.01949 osd.3 up 1.00000 1.00000
-5 0.01949 host c5
1 hdd 0.01949 osd.1 up 1.00000 1.00000
-7 0.01949 host c6
2 hdd 0.01949 osd.2 up 1.00000 1.00000
[root@c4 ceph]# ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
80 GiB 53 GiB 27 GiB 34.30
POOLS:
NAME ID USED %USED MAX AVAIL OBJECTS
.rgw.root 1 3.5 KiB 0 20 GiB 9
default.rgw.control 2 0 B 0 20 GiB 8
default.rgw.meta 3 1.2 KiB 0 20 GiB 7
default.rgw.log 4 0 B 0 20 GiB 175
.test.data 5 0 B 0 20 GiB 0
default.rgw.buckets.index 6 0 B 0 20 GiB 110
default.rgw.buckets.data 7 77 MiB 0.39 20 GiB 191057
[root@c4 ceph]# ceph-deploy disk zap c5 /dev/sdc
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy disk zap c5 /dev/sdc
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] debug : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] subcommand : zap
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f13b7d8bc68>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] host : c5
[ceph_deploy.cli][INFO ] func : <function disk at 0x7f13b81dbde8>
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] disk : ['/dev/sdc']
[ceph_deploy.osd][DEBUG ] zapping /dev/sdc on c5
[c5][DEBUG ] connected to host: c5
[c5][DEBUG ] detect platform information from remote host
[c5][DEBUG ] detect machine type
[c5][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.3.1611 Core
[c5][DEBUG ] zeroing last few blocks of device
[c5][DEBUG ] find the location of an executable
[c5][INFO ] Running command: /usr/sbin/ceph-volume lvm zap /dev/sdc
[c5][DEBUG ] --> Zapping: /dev/sdc
[c5][DEBUG ] --> --destroy was not specified, but zapping a whole device will remove the partition table
[c5][DEBUG ] Running command: /usr/sbin/wipefs --all /dev/sdc
[c5][DEBUG ] Running command: /bin/dd if=/dev/zero of=/dev/sdc bs=1M count=10
[c5][DEBUG ] stderr: 10+0 records in
[c5][DEBUG ] 10+0 records out
[c5][DEBUG ] 10485760 bytes (10 MB) copied
[c5][DEBUG ] stderr: , 0.277108 s, 37.8 MB/s
[c5][DEBUG ] --> Zapping successful for: <Raw Device: /dev/sdc>
[root@c4 ceph]#
[root@c4 ceph]#
[root@c4 ceph]# ceph-deploy disk zap c6 /dev/sdc
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy disk zap c6 /dev/sdc
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] debug : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] subcommand : zap
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fd6e987bc68>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] host : c6
[ceph_deploy.cli][INFO ] func : <function disk at 0x7fd6e9ccbde8>
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] disk : ['/dev/sdc']
[ceph_deploy.osd][DEBUG ] zapping /dev/sdc on c6
[c6][DEBUG ] connected to host: c6
[c6][DEBUG ] detect platform information from remote host
[c6][DEBUG ] detect machine type
[c6][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.6.1810 Core
[c6][DEBUG ] zeroing last few blocks of device
[c6][DEBUG ] find the location of an executable
[c6][INFO ] Running command: /usr/sbin/ceph-volume lvm zap /dev/sdc
[c6][DEBUG ] --> Zapping: /dev/sdc
[c6][DEBUG ] --> --destroy was not specified, but zapping a whole device will remove the partition table
[c6][DEBUG ] Running command: /usr/sbin/wipefs --all /dev/sdc
[c6][DEBUG ] Running command: /bin/dd if=/dev/zero of=/dev/sdc bs=1M count=10
[c6][DEBUG ] stderr: 10+0 records in
[c6][DEBUG ] 10+0 records out
[c6][DEBUG ] 10485760 bytes (10 MB) copied
[c6][DEBUG ] stderr: , 6.61296 s, 1.6 MB/s
[c6][DEBUG ] --> Zapping successful for: <Raw Device: /dev/sdc>
[root@c4 ceph]#
[root@c4 ceph]#
[root@c4 ceph]# ceph-deploy osd create --data /dev/sdc c5
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create --data /dev/sdc c5
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] bluestore : None
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fcc66c92908>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] fs_type : xfs
[ceph_deploy.cli][INFO ] block_wal : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] journal : None
[ceph_deploy.cli][INFO ] subcommand : create
[ceph_deploy.cli][INFO ] host : c5
[ceph_deploy.cli][INFO ] filestore : None
[ceph_deploy.cli][INFO ] func : <function osd at 0x7fcc670dbd70>
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] zap_disk : False
[ceph_deploy.cli][INFO ] data : /dev/sdc
[ceph_deploy.cli][INFO ] block_db : None
[ceph_deploy.cli][INFO ] dmcrypt : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] debug : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdc
[c5][DEBUG ] connected to host: c5
[c5][DEBUG ] detect platform information from remote host
[c5][DEBUG ] detect machine type
[c5][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.3.1611 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to c5
[c5][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[c5][DEBUG ] find the location of an executable
[c5][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdc
[c5][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
[c5][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 4ade4076-2981-4adf-a90b-221541b1314c
[c5][DEBUG ] Running command: /usr/sbin/vgcreate --force --yes ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad /dev/sdc
[c5][DEBUG ] stdout: Physical volume "/dev/sdc" successfully created.
[c5][DEBUG ] stdout: Volume group "ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad" successfully created
[c5][DEBUG ] Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-4ade4076-2981-4adf-a90b-221541b1314c ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad
[c5][DEBUG ] stdout: Logical volume "osd-block-4ade4076-2981-4adf-a90b-221541b1314c" created.
[c5][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
[c5][DEBUG ] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-4
[c5][DEBUG ] Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-4
[c5][DEBUG ] Running command: /bin/chown -h ceph:ceph /dev/ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad/osd-block-4ade4076-2981-4adf-a90b-221541b1314c
[c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-1
[c5][DEBUG ] Running command: /bin/ln -s /dev/ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad/osd-block-4ade4076-2981-4adf-a90b-221541b1314c /var/lib/ceph/osd/ceph-4/block
[c5][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-4/activate.monmap
[c5][DEBUG ] stderr: got monmap epoch 1
[c5][DEBUG ] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-4/keyring --create-keyring --name osd.4 --add-key AQC619Ncgk+gBhAA19h+Ij0G6+LUcuxzob/FkA==
[c5][DEBUG ] stdout: creating /var/lib/ceph/osd/ceph-4/keyring
[c5][DEBUG ] added entity osd.4 auth auth(auid = 18446744073709551615 key=AQC619Ncgk+gBhAA19h+Ij0G6+LUcuxzob/FkA== with 0 caps)
[c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/keyring
[c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/
[c5][DEBUG ] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 4 --monmap /var/lib/ceph/osd/ceph-4/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-4/ --osd-uuid 4ade4076-2981-4adf-a90b-221541b1314c --setuser ceph --setgroup ceph
[c5][DEBUG ] --> ceph-volume lvm prepare successful for: /dev/sdc
[c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4
[c5][DEBUG ] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad/osd-block-4ade4076-2981-4adf-a90b-221541b1314c --path /var/lib/ceph/osd/ceph-4 --no-mon-config
[c5][DEBUG ] Running command: /bin/ln -snf /dev/ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad/osd-block-4ade4076-2981-4adf-a90b-221541b1314c /var/lib/ceph/osd/ceph-4/block
[c5][DEBUG ] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block
[c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-1
[c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4
[c5][DEBUG ] Running command: /bin/systemctl enable ceph-volume@lvm-4-4ade4076-2981-4adf-a90b-221541b1314c
[c5][DEBUG ] stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-4-4ade4076-2981-4adf-a90b-221541b1314c.service to /usr/lib/systemd/system/ceph-volume@.service.
[c5][DEBUG ] Running command: /bin/systemctl enable --runtime ceph-osd@4
[c5][DEBUG ] stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@4.service to /usr/lib/systemd/system/ceph-osd@.service.
[c5][DEBUG ] Running command: /bin/systemctl start ceph-osd@4
[c5][DEBUG ] --> ceph-volume lvm activate successful for osd ID: 4
[c5][DEBUG ] --> ceph-volume lvm create successful for: /dev/sdc
[c5][INFO ] checking OSD status...
[c5][DEBUG ] find the location of an executable
[c5][INFO ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host c5 is now ready for osd use.
[root@c4 ceph]# ceph-deploy osd create --data /dev/sdc c6
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create --data /dev/sdc c6
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] bluestore : None
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f56649c8908>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] fs_type : xfs
[ceph_deploy.cli][INFO ] block_wal : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] journal : None
[ceph_deploy.cli][INFO ] subcommand : create
[ceph_deploy.cli][INFO ] host : c6
[ceph_deploy.cli][INFO ] filestore : None
[ceph_deploy.cli][INFO ] func : <function osd at 0x7f5664e11d70>
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] zap_disk : False
[ceph_deploy.cli][INFO ] data : /dev/sdc
[ceph_deploy.cli][INFO ] block_db : None
[ceph_deploy.cli][INFO ] dmcrypt : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] debug : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdc
[c6][DEBUG ] connected to host: c6
[c6][DEBUG ] detect platform information from remote host
[c6][DEBUG ] detect machine type
[c6][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.6.1810 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to c6
[c6][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[c6][DEBUG ] find the location of an executable
[c6][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdc
[c6][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
[c6][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 254f856a-c8f2-4c89-8676-d3b3468f5744
[c6][DEBUG ] Running command: /usr/sbin/vgcreate --force --yes ceph-ddf09fb9-4150-48fa-9337-569912e8ffb2 /dev/sdc
[c6][DEBUG ] stdout: Physical volume "/dev/sdc" successfully created.
[c6][DEBUG ] stdout: Volume group "ceph-ddf09fb9-4150-48fa-9337-569912e8ffb2" successfully created
[c6][DEBUG ] Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-254f856a-c8f2-4c89-8676-d3b3468f5744 ceph-ddf09fb9-4150-48fa-9337-569912e8ffb2
[c6][DEBUG ] stdout: Logical volume "osd-block-254f856a-c8f2-4c89-8676-d3b3468f5744" created.
[c6][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
[c6][DEBUG ] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-5
[c6][DEBUG ] Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-5
[c6][DEBUG ] Running command: /bin/chown -h ceph:ceph /dev/ceph-ddf09fb9-4150-48fa-9337-569912e8ffb2/osd-block-254f856a-c8f2-4c89-8676-d3b3468f5744
[c6][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-1
[c6][DEBUG ] Running command: /bin/ln -s /dev/ceph-ddf09fb9-4150-48fa-9337-569912e8ffb2/osd-block-254f856a-c8f2-4c89-8676-d3b3468f5744 /var/lib/ceph/osd/ceph-5/block
[c6][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-5/activate.monmap
[c6][DEBUG ] stderr: got monmap epoch 1
[c6][DEBUG ] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-5/keyring --create-keyring --name osd.5 --add-key AQAG2NNcpFQCKxAA/BQVlRj/LbLFSkfZSyikMA==
[c6][DEBUG ] stdout: creating /var/lib/ceph/osd/ceph-5/keyring
[c6][DEBUG ] added entity osd.5 auth auth(auid = 18446744073709551615 key=AQAG2NNcpFQCKxAA/BQVlRj/LbLFSkfZSyikMA== with 0 caps)
[c6][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5/keyring
[c6][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5/
[c6][DEBUG ] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 5 --monmap /var/lib/ceph/osd/ceph-5/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-5/ --osd-uuid 254f856a-c8f2-4c89-8676-d3b3468f5744 --setuser ceph --setgroup ceph
[c6][DEBUG ] --> ceph-volume lvm prepare successful for: /dev/sdc
[c6][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5
[c6][DEBUG ] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-ddf09fb9-4150-48fa-9337-569912e8ffb2/osd-block-254f856a-c8f2-4c89-8676-d3b3468f5744 --path /var/lib/ceph/osd/ceph-5 --no-mon-config
[c6][DEBUG ] Running command: /bin/ln -snf /dev/ceph-ddf09fb9-4150-48fa-9337-569912e8ffb2/osd-block-254f856a-c8f2-4c89-8676-d3b3468f5744 /var/lib/ceph/osd/ceph-5/block
[c6][DEBUG ] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-5/block
[c6][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-1
[c6][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5
[c6][DEBUG ] Running command: /bin/systemctl enable ceph-volume@lvm-5-254f856a-c8f2-4c89-8676-d3b3468f5744
[c6][DEBUG ] stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-5-254f856a-c8f2-4c89-8676-d3b3468f5744.service to /usr/lib/systemd/system/ceph-volume@.service.
[c6][DEBUG ] Running command: /bin/systemctl enable --runtime ceph-osd@5
[c6][DEBUG ] stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@5.service to /usr/lib/systemd/system/ceph-osd@.service.
[c6][DEBUG ] Running command: /bin/systemctl start ceph-osd@5
[c6][DEBUG ] --> ceph-volume lvm activate successful for osd ID: 5
[c6][DEBUG ] --> ceph-volume lvm create successful for: /dev/sdc
[c6][INFO ] checking OSD status...
[c6][DEBUG ] find the location of an executable
[c6][INFO ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
data + journal
[root@c4 ceph-cluster]# ceph-deploy osd create --data /dev/sdb --journal /dev/sdc c5
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create --data /dev/sdb --journal /dev/sdc c5
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] bluestore : None
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f4200cb77e8>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] fs_type : xfs
[ceph_deploy.cli][INFO ] block_wal : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] journal : /dev/sdc
[ceph_deploy.cli][INFO ] subcommand : create
[ceph_deploy.cli][INFO ] host : c5
[ceph_deploy.cli][INFO ] filestore : None
[ceph_deploy.cli][INFO ] func : <function osd at 0x7f42010fdd70>
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] zap_disk : False
[ceph_deploy.cli][INFO ] data : /dev/sdb
[ceph_deploy.cli][INFO ] block_db : None
[ceph_deploy.cli][INFO ] dmcrypt : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] debug : False
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdb
[c5][DEBUG ] connected to host: c5
[c5][DEBUG ] detect platform information from remote host
[c5][DEBUG ] detect machine type
[c5][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.3.1611 Core
[ceph_deploy.osd][DEBUG ] Deploying osd to c5
[c5][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[c5][WARNIN] osd keyring does not exist yet, creating one
[c5][DEBUG ] create a keyring file
[c5][DEBUG ] find the location of an executable
[c5][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdb
[c5][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
[c5][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 4ae6fb71-39ee-468a-9c8b-45de6379e535
[c5][DEBUG ] Running command: /usr/sbin/vgcreate --force --yes ceph-5d110da8-9f10-4b85-a34c-aeaa3600d2c7 /dev/sdb
[c5][DEBUG ] stdout: Physical volume "/dev/sdb" successfully created.
[c5][DEBUG ] stdout: Volume group "ceph-5d110da8-9f10-4b85-a34c-aeaa3600d2c7" successfully created
[c5][DEBUG ] Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-4ae6fb71-39ee-468a-9c8b-45de6379e535 ceph-5d110da8-9f10-4b85-a34c-aeaa3600d2c7
[c5][DEBUG ] stdout: Logical volume "osd-block-4ae6fb71-39ee-468a-9c8b-45de6379e535" created.
[c5][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
[c5][DEBUG ] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0
[c5][DEBUG ] Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-0
[c5][DEBUG ] Running command: /bin/chown -h ceph:ceph /dev/ceph-5d110da8-9f10-4b85-a34c-aeaa3600d2c7/osd-block-4ae6fb71-39ee-468a-9c8b-45de6379e535
[c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-0
[c5][DEBUG ] Running command: /bin/ln -s /dev/ceph-5d110da8-9f10-4b85-a34c-aeaa3600d2c7/osd-block-4ae6fb71-39ee-468a-9c8b-45de6379e535 /var/lib/ceph/osd/ceph-0/block
[c5][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap
[c5][DEBUG ] stderr: got monmap epoch 1
[c5][DEBUG ] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-0/keyring --create-keyring --name osd.0 --add-key AQB3LNlc+/f5KxAAWQHaWpsmBDOmVhx0lAeTOQ==
[c5][DEBUG ] stdout: creating /var/lib/ceph/osd/ceph-0/keyring
[c5][DEBUG ] added entity osd.0 auth auth(auid = 18446744073709551615 key=AQB3LNlc+/f5KxAAWQHaWpsmBDOmVhx0lAeTOQ== with 0 caps)
[c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring
[c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/
[c5][DEBUG ] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid 4ae6fb71-39ee-468a-9c8b-45de6379e535 --setuser ceph --setgroup ceph
[c5][DEBUG ] --> ceph-volume lvm prepare successful for: /dev/sdb
[c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
[c5][DEBUG ] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-5d110da8-9f10-4b85-a34c-aeaa3600d2c7/osd-block-4ae6fb71-39ee-468a-9c8b-45de6379e535 --path /var/lib/ceph/osd/ceph-0 --no-mon-config
[c5][DEBUG ] Running command: /bin/ln -snf /dev/ceph-5d110da8-9f10-4b85-a34c-aeaa3600d2c7/osd-block-4ae6fb71-39ee-468a-9c8b-45de6379e535 /var/lib/ceph/osd/ceph-0/block
[c5][DEBUG ] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block
[c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-0
[c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
[c5][DEBUG ] Running command: /bin/systemctl enable ceph-volume@lvm-0-4ae6fb71-39ee-468a-9c8b-45de6379e535
[c5][DEBUG ] stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-0-4ae6fb71-39ee-468a-9c8b-45de6379e535.service to /usr/lib/systemd/system/ceph-volume@.service.
[c5][DEBUG ] Running command: /bin/systemctl enable --runtime ceph-osd@0
[c5][DEBUG ] stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@0.service to /usr/lib/systemd/system/ceph-osd@.service.
[c5][DEBUG ] Running command: /bin/systemctl start ceph-osd@0
[c5][DEBUG ] --> ceph-volume lvm activate successful for osd ID: 0
[c5][DEBUG ] --> ceph-volume lvm create successful for: /dev/sdb
[c5][INFO ] checking OSD status...
[c5][DEBUG ] find the location of an executable
[c5][INFO ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
[ceph_deploy.osd][DEBUG ] Host c5 is now ready for osd use.
[root@c4 ceph-cluster]#