1. [root@c4 ceph]# ceph-deploy disk zap c6 /dev/sdc
    2. [ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
    3. [ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy disk zap c6 /dev/sdc
    4. [ceph_deploy.cli][INFO ] ceph-deploy options:
    5. [ceph_deploy.cli][INFO ] username : None
    6. [ceph_deploy.cli][INFO ] verbose : False
    7. [ceph_deploy.cli][INFO ] debug : False
    8. [ceph_deploy.cli][INFO ] overwrite_conf : False
    9. [ceph_deploy.cli][INFO ] subcommand : zap
    10. [ceph_deploy.cli][INFO ] quiet : False
    11. [ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fd6e987bc68>
    12. [ceph_deploy.cli][INFO ] cluster : ceph
    13. [ceph_deploy.cli][INFO ] host : c6
    14. [ceph_deploy.cli][INFO ] func : <function disk at 0x7fd6e9ccbde8>
    15. [ceph_deploy.cli][INFO ] ceph_conf : None
    16. [ceph_deploy.cli][INFO ] default_release : False
    17. [ceph_deploy.cli][INFO ] disk : ['/dev/sdc']
    18. [ceph_deploy.osd][DEBUG ] zapping /dev/sdc on c6
    19. [c6][DEBUG ] connected to host: c6
    20. [c6][DEBUG ] detect platform information from remote host
    21. [c6][DEBUG ] detect machine type
    22. [c6][DEBUG ] find the location of an executable
    23. [ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.6.1810 Core
    24. [c6][DEBUG ] zeroing last few blocks of device
    25. [c6][DEBUG ] find the location of an executable
    26. [c6][INFO ] Running command: /usr/sbin/ceph-volume lvm zap /dev/sdc
    27. [c6][DEBUG ] --> Zapping: /dev/sdc
    28. [c6][DEBUG ] --> --destroy was not specified, but zapping a whole device will remove the partition table
    29. [c6][DEBUG ] Running command: /usr/sbin/wipefs --all /dev/sdc
    30. [c6][DEBUG ] Running command: /bin/dd if=/dev/zero of=/dev/sdc bs=1M count=10
    31. [c6][DEBUG ] stderr: 10+0 records in
    32. [c6][DEBUG ] 10+0 records out
    33. [c6][DEBUG ] 10485760 bytes (10 MB) copied
    34. [c6][DEBUG ] stderr: , 6.61296 s, 1.6 MB/s
    35. [c6][DEBUG ] --> Zapping successful for: <Raw Device: /dev/sdc>
    36. [root@c4 ceph]#
    37. [root@c4 ceph]#
    38. [root@c4 ceph]# ceph-deploy osd create --data /dev/sdc c5
    39. [ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
    40. [ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create --data /dev/sdc c5
    41. [ceph_deploy.cli][INFO ] ceph-deploy options:
    42. [ceph_deploy.cli][INFO ] verbose : False
    43. [ceph_deploy.cli][INFO ] bluestore : None
    44. [ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fcc66c92908>
    45. [ceph_deploy.cli][INFO ] cluster : ceph
    46. [ceph_deploy.cli][INFO ] fs_type : xfs
    47. [ceph_deploy.cli][INFO ] block_wal : None
    48. [ceph_deploy.cli][INFO ] default_release : False
    49. [ceph_deploy.cli][INFO ] username : None
    50. [ceph_deploy.cli][INFO ] journal : None
    51. [ceph_deploy.cli][INFO ] subcommand : create
    52. [ceph_deploy.cli][INFO ] host : c5
    53. [ceph_deploy.cli][INFO ] filestore : None
    54. [ceph_deploy.cli][INFO ] func : <function osd at 0x7fcc670dbd70>
    55. [ceph_deploy.cli][INFO ] ceph_conf : None
    56. [ceph_deploy.cli][INFO ] zap_disk : False
    57. [ceph_deploy.cli][INFO ] data : /dev/sdc
    58. [ceph_deploy.cli][INFO ] block_db : None
    59. [ceph_deploy.cli][INFO ] dmcrypt : False
    60. [ceph_deploy.cli][INFO ] overwrite_conf : False
    61. [ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys
    62. [ceph_deploy.cli][INFO ] quiet : False
    63. [ceph_deploy.cli][INFO ] debug : False
    64. [ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdc
    65. [c5][DEBUG ] connected to host: c5
    66. [c5][DEBUG ] detect platform information from remote host
    67. [c5][DEBUG ] detect machine type
    68. [c5][DEBUG ] find the location of an executable
    69. [ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.3.1611 Core
    70. [ceph_deploy.osd][DEBUG ] Deploying osd to c5
    71. [c5][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
    72. [c5][DEBUG ] find the location of an executable
    73. [c5][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdc
    74. [c5][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
    75. [c5][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 4ade4076-2981-4adf-a90b-221541b1314c
    76. [c5][DEBUG ] Running command: /usr/sbin/vgcreate --force --yes ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad /dev/sdc
    77. [c5][DEBUG ] stdout: Physical volume "/dev/sdc" successfully created.
    78. [c5][DEBUG ] stdout: Volume group "ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad" successfully created
    79. [c5][DEBUG ] Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-4ade4076-2981-4adf-a90b-221541b1314c ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad
    80. [c5][DEBUG ] stdout: Logical volume "osd-block-4ade4076-2981-4adf-a90b-221541b1314c" created.
    81. [c5][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
    82. [c5][DEBUG ] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-4
    83. [c5][DEBUG ] Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-4
    84. [c5][DEBUG ] Running command: /bin/chown -h ceph:ceph /dev/ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad/osd-block-4ade4076-2981-4adf-a90b-221541b1314c
    85. [c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-1
    86. [c5][DEBUG ] Running command: /bin/ln -s /dev/ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad/osd-block-4ade4076-2981-4adf-a90b-221541b1314c /var/lib/ceph/osd/ceph-4/block
    87. [c5][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-4/activate.monmap
    88. [c5][DEBUG ] stderr: got monmap epoch 1
    89. [c5][DEBUG ] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-4/keyring --create-keyring --name osd.4 --add-key AQC619Ncgk+gBhAA19h+Ij0G6+LUcuxzob/FkA==
    90. [c5][DEBUG ] stdout: creating /var/lib/ceph/osd/ceph-4/keyring
    91. [c5][DEBUG ] added entity osd.4 auth auth(auid = 18446744073709551615 key=AQC619Ncgk+gBhAA19h+Ij0G6+LUcuxzob/FkA== with 0 caps)
    92. [c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/keyring
    93. [c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/
    94. [c5][DEBUG ] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 4 --monmap /var/lib/ceph/osd/ceph-4/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-4/ --osd-uuid 4ade4076-2981-4adf-a90b-221541b1314c --setuser ceph --setgroup ceph
    95. [c5][DEBUG ] --> ceph-volume lvm prepare successful for: /dev/sdc
    96. [c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4
    97. [c5][DEBUG ] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad/osd-block-4ade4076-2981-4adf-a90b-221541b1314c --path /var/lib/ceph/osd/ceph-4 --no-mon-config
    98. [c5][DEBUG ] Running command: /bin/ln -snf /dev/ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad/osd-block-4ade4076-2981-4adf-a90b-221541b1314c /var/lib/ceph/osd/ceph-4/block
    99. [c5][DEBUG ] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block
    100. [c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-1
    101. [c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4
    102. [c5][DEBUG ] Running command: /bin/systemctl enable ceph-volume@lvm-4-4ade4076-2981-4adf-a90b-221541b1314c
    103. [c5][DEBUG ] stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-4-4ade4076-2981-4adf-a90b-221541b1314c.service to /usr/lib/systemd/system/ceph-volume@.service.
    104. [c5][DEBUG ] Running command: /bin/systemctl enable --runtime ceph-osd@4
    105. [c5][DEBUG ] stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@4.service to /usr/lib/systemd/system/ceph-osd@.service.
    106. [c5][DEBUG ] Running command: /bin/systemctl start ceph-osd@4
    107. [c5][DEBUG ] --> ceph-volume lvm activate successful for osd ID: 4
    108. [c5][DEBUG ] --> ceph-volume lvm create successful for: /dev/sdc
    109. [c5][INFO ] checking OSD status...
    110. [c5][DEBUG ] find the location of an executable
    111. [c5][INFO ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
    1. [root@c4 ceph]# ceph-deploy osd create --data /dev/sdc c4
    2. [ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
    3. [ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create --data /dev/sdc c4
    4. [ceph_deploy.cli][INFO ] ceph-deploy options:
    5. [ceph_deploy.cli][INFO ] verbose : False
    6. [ceph_deploy.cli][INFO ] bluestore : None
    7. [ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f0611e11908>
    8. [ceph_deploy.cli][INFO ] cluster : ceph
    9. [ceph_deploy.cli][INFO ] fs_type : xfs
    10. [ceph_deploy.cli][INFO ] block_wal : None
    11. [ceph_deploy.cli][INFO ] default_release : False
    12. [ceph_deploy.cli][INFO ] username : None
    13. [ceph_deploy.cli][INFO ] journal : None
    14. [ceph_deploy.cli][INFO ] subcommand : create
    15. [ceph_deploy.cli][INFO ] host : c4
    16. [ceph_deploy.cli][INFO ] filestore : None
    17. [ceph_deploy.cli][INFO ] func : <function osd at 0x7f061225ad70>
    18. [ceph_deploy.cli][INFO ] ceph_conf : None
    19. [ceph_deploy.cli][INFO ] zap_disk : False
    20. [ceph_deploy.cli][INFO ] data : /dev/sdc
    21. [ceph_deploy.cli][INFO ] block_db : None
    22. [ceph_deploy.cli][INFO ] dmcrypt : False
    23. [ceph_deploy.cli][INFO ] overwrite_conf : False
    24. [ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys
    25. [ceph_deploy.cli][INFO ] quiet : False
    26. [ceph_deploy.cli][INFO ] debug : False
    27. [ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdc
    28. [c4][DEBUG ] connected to host: c4
    29. [c4][DEBUG ] detect platform information from remote host
    30. [c4][DEBUG ] detect machine type
    31. [c4][DEBUG ] find the location of an executable
    32. [ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.3.1611 Core
    33. [ceph_deploy.osd][DEBUG ] Deploying osd to c4
    34. [c4][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
    35. [c4][DEBUG ] find the location of an executable
    36. [c4][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdc
    37. [c4][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
    38. [c4][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 0b4c05f0-56b1-4063-afdb-46246cd3a10f
    39. [c4][DEBUG ] Running command: /usr/sbin/vgcreate --force --yes ceph-5075a290-e557-4847-890f-9196fbe44ddd /dev/sdc
    40. [c4][DEBUG ] stdout: Physical volume "/dev/sdc" successfully created.
    41. [c4][DEBUG ] stdout: Volume group "ceph-5075a290-e557-4847-890f-9196fbe44ddd" successfully created
    42. [c4][DEBUG ] Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-0b4c05f0-56b1-4063-afdb-46246cd3a10f ceph-5075a290-e557-4847-890f-9196fbe44ddd
    43. [c4][DEBUG ] stdout: Logical volume "osd-block-0b4c05f0-56b1-4063-afdb-46246cd3a10f" created.
    44. [c4][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
    45. [c4][DEBUG ] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-3
    46. [c4][DEBUG ] Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-3
    47. [c4][DEBUG ] Running command: /bin/chown -h ceph:ceph /dev/ceph-5075a290-e557-4847-890f-9196fbe44ddd/osd-block-0b4c05f0-56b1-4063-afdb-46246cd3a10f
    48. [c4][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-1
    49. [c4][DEBUG ] Running command: /bin/ln -s /dev/ceph-5075a290-e557-4847-890f-9196fbe44ddd/osd-block-0b4c05f0-56b1-4063-afdb-46246cd3a10f /var/lib/ceph/osd/ceph-3/block
    50. [c4][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-3/activate.monmap
    51. [c4][DEBUG ] stderr: got monmap epoch 1
    52. [c4][DEBUG ] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-3/keyring --create-keyring --name osd.3 --add-key AQA/19Nc09fnCBAA19NSlNAxs8YRyYz86hALyg==
    53. [c4][DEBUG ] stdout: creating /var/lib/ceph/osd/ceph-3/keyring
    54. [c4][DEBUG ] added entity osd.3 auth auth(auid = 18446744073709551615 key=AQA/19Nc09fnCBAA19NSlNAxs8YRyYz86hALyg== with 0 caps)
    55. [c4][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/keyring
    56. [c4][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/
    57. [c4][DEBUG ] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 3 --monmap /var/lib/ceph/osd/ceph-3/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-3/ --osd-uuid 0b4c05f0-56b1-4063-afdb-46246cd3a10f --setuser ceph --setgroup ceph
    58. [c4][DEBUG ] --> ceph-volume lvm prepare successful for: /dev/sdc
    59. [c4][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3
    60. [c4][DEBUG ] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-5075a290-e557-4847-890f-9196fbe44ddd/osd-block-0b4c05f0-56b1-4063-afdb-46246cd3a10f --path /var/lib/ceph/osd/ceph-3 --no-mon-config
    61. [c4][DEBUG ] Running command: /bin/ln -snf /dev/ceph-5075a290-e557-4847-890f-9196fbe44ddd/osd-block-0b4c05f0-56b1-4063-afdb-46246cd3a10f /var/lib/ceph/osd/ceph-3/block
    62. [c4][DEBUG ] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block
    63. [c4][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-1
    64. [c4][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3
    65. [c4][DEBUG ] Running command: /bin/systemctl enable ceph-volume@lvm-3-0b4c05f0-56b1-4063-afdb-46246cd3a10f
    66. [c4][DEBUG ] stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-3-0b4c05f0-56b1-4063-afdb-46246cd3a10f.service to /usr/lib/systemd/system/ceph-volume@.service.
    67. [c4][DEBUG ] Running command: /bin/systemctl enable --runtime ceph-osd@3
    68. [c4][DEBUG ] stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@3.service to /usr/lib/systemd/system/ceph-osd@.service.
    69. [c4][DEBUG ] Running command: /bin/systemctl start ceph-osd@3
    70. [c4][DEBUG ] --> ceph-volume lvm activate successful for osd ID: 3
    71. [c4][DEBUG ] --> ceph-volume lvm create successful for: /dev/sdc
    72. [c4][INFO ] checking OSD status...
    73. [c4][DEBUG ] find the location of an executable
    74. [c4][INFO ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
    75. [ceph_deploy.osd][DEBUG ] Host c4 is now ready for osd use.
    76. [root@c4 ceph]#
    77. [root@c4 ceph]#
    78. [root@c4 ceph]# ceph osd tree
    79. ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
    80. -1 0.07794 root default
    81. -3 0.03897 host c4
    82. 0 hdd 0.01949 osd.0 up 1.00000 1.00000
    83. 3 hdd 0.01949 osd.3 up 1.00000 1.00000
    84. -5 0.01949 host c5
    85. 1 hdd 0.01949 osd.1 up 1.00000 1.00000
    86. -7 0.01949 host c6
    87. 2 hdd 0.01949 osd.2 up 1.00000 1.00000
    88. [root@c4 ceph]# ceph df
    89. GLOBAL:
    90. SIZE AVAIL RAW USED %RAW USED
    91. 80 GiB 53 GiB 27 GiB 34.30
    92. POOLS:
    93. NAME ID USED %USED MAX AVAIL OBJECTS
    94. .rgw.root 1 3.5 KiB 0 20 GiB 9
    95. default.rgw.control 2 0 B 0 20 GiB 8
    96. default.rgw.meta 3 1.2 KiB 0 20 GiB 7
    97. default.rgw.log 4 0 B 0 20 GiB 175
    98. .test.data 5 0 B 0 20 GiB 0
    99. default.rgw.buckets.index 6 0 B 0 20 GiB 110
    100. default.rgw.buckets.data 7 77 MiB 0.39 20 GiB 191057
    101. [root@c4 ceph]# ceph-deploy disk zap c5 /dev/sdc
    102. [ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
    103. [ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy disk zap c5 /dev/sdc
    104. [ceph_deploy.cli][INFO ] ceph-deploy options:
    105. [ceph_deploy.cli][INFO ] username : None
    106. [ceph_deploy.cli][INFO ] verbose : False
    107. [ceph_deploy.cli][INFO ] debug : False
    108. [ceph_deploy.cli][INFO ] overwrite_conf : False
    109. [ceph_deploy.cli][INFO ] subcommand : zap
    110. [ceph_deploy.cli][INFO ] quiet : False
    111. [ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f13b7d8bc68>
    112. [ceph_deploy.cli][INFO ] cluster : ceph
    113. [ceph_deploy.cli][INFO ] host : c5
    114. [ceph_deploy.cli][INFO ] func : <function disk at 0x7f13b81dbde8>
    115. [ceph_deploy.cli][INFO ] ceph_conf : None
    116. [ceph_deploy.cli][INFO ] default_release : False
    117. [ceph_deploy.cli][INFO ] disk : ['/dev/sdc']
    118. [ceph_deploy.osd][DEBUG ] zapping /dev/sdc on c5
    119. [c5][DEBUG ] connected to host: c5
    120. [c5][DEBUG ] detect platform information from remote host
    121. [c5][DEBUG ] detect machine type
    122. [c5][DEBUG ] find the location of an executable
    123. [ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.3.1611 Core
    124. [c5][DEBUG ] zeroing last few blocks of device
    125. [c5][DEBUG ] find the location of an executable
    126. [c5][INFO ] Running command: /usr/sbin/ceph-volume lvm zap /dev/sdc
    127. [c5][DEBUG ] --> Zapping: /dev/sdc
    128. [c5][DEBUG ] --> --destroy was not specified, but zapping a whole device will remove the partition table
    129. [c5][DEBUG ] Running command: /usr/sbin/wipefs --all /dev/sdc
    130. [c5][DEBUG ] Running command: /bin/dd if=/dev/zero of=/dev/sdc bs=1M count=10
    131. [c5][DEBUG ] stderr: 10+0 records in
    132. [c5][DEBUG ] 10+0 records out
    133. [c5][DEBUG ] 10485760 bytes (10 MB) copied
    134. [c5][DEBUG ] stderr: , 0.277108 s, 37.8 MB/s
    135. [c5][DEBUG ] --> Zapping successful for: <Raw Device: /dev/sdc>
    136. [root@c4 ceph]#
    137. [root@c4 ceph]#
    138. [root@c4 ceph]# ceph-deploy disk zap c6 /dev/sdc
    139. [ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
    140. [ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy disk zap c6 /dev/sdc
    141. [ceph_deploy.cli][INFO ] ceph-deploy options:
    142. [ceph_deploy.cli][INFO ] username : None
    143. [ceph_deploy.cli][INFO ] verbose : False
    144. [ceph_deploy.cli][INFO ] debug : False
    145. [ceph_deploy.cli][INFO ] overwrite_conf : False
    146. [ceph_deploy.cli][INFO ] subcommand : zap
    147. [ceph_deploy.cli][INFO ] quiet : False
    148. [ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fd6e987bc68>
    149. [ceph_deploy.cli][INFO ] cluster : ceph
    150. [ceph_deploy.cli][INFO ] host : c6
    151. [ceph_deploy.cli][INFO ] func : <function disk at 0x7fd6e9ccbde8>
    152. [ceph_deploy.cli][INFO ] ceph_conf : None
    153. [ceph_deploy.cli][INFO ] default_release : False
    154. [ceph_deploy.cli][INFO ] disk : ['/dev/sdc']
    155. [ceph_deploy.osd][DEBUG ] zapping /dev/sdc on c6
    156. [c6][DEBUG ] connected to host: c6
    157. [c6][DEBUG ] detect platform information from remote host
    158. [c6][DEBUG ] detect machine type
    159. [c6][DEBUG ] find the location of an executable
    160. [ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.6.1810 Core
    161. [c6][DEBUG ] zeroing last few blocks of device
    162. [c6][DEBUG ] find the location of an executable
    163. [c6][INFO ] Running command: /usr/sbin/ceph-volume lvm zap /dev/sdc
    164. [c6][DEBUG ] --> Zapping: /dev/sdc
    165. [c6][DEBUG ] --> --destroy was not specified, but zapping a whole device will remove the partition table
    166. [c6][DEBUG ] Running command: /usr/sbin/wipefs --all /dev/sdc
    167. [c6][DEBUG ] Running command: /bin/dd if=/dev/zero of=/dev/sdc bs=1M count=10
    168. [c6][DEBUG ] stderr: 10+0 records in
    169. [c6][DEBUG ] 10+0 records out
    170. [c6][DEBUG ] 10485760 bytes (10 MB) copied
    171. [c6][DEBUG ] stderr: , 6.61296 s, 1.6 MB/s
    172. [c6][DEBUG ] --> Zapping successful for: <Raw Device: /dev/sdc>
    173. [root@c4 ceph]#
    174. [root@c4 ceph]#
    175. [root@c4 ceph]# ceph-deploy osd create --data /dev/sdc c5
    176. [ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
    177. [ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create --data /dev/sdc c5
    178. [ceph_deploy.cli][INFO ] ceph-deploy options:
    179. [ceph_deploy.cli][INFO ] verbose : False
    180. [ceph_deploy.cli][INFO ] bluestore : None
    181. [ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fcc66c92908>
    182. [ceph_deploy.cli][INFO ] cluster : ceph
    183. [ceph_deploy.cli][INFO ] fs_type : xfs
    184. [ceph_deploy.cli][INFO ] block_wal : None
    185. [ceph_deploy.cli][INFO ] default_release : False
    186. [ceph_deploy.cli][INFO ] username : None
    187. [ceph_deploy.cli][INFO ] journal : None
    188. [ceph_deploy.cli][INFO ] subcommand : create
    189. [ceph_deploy.cli][INFO ] host : c5
    190. [ceph_deploy.cli][INFO ] filestore : None
    191. [ceph_deploy.cli][INFO ] func : <function osd at 0x7fcc670dbd70>
    192. [ceph_deploy.cli][INFO ] ceph_conf : None
    193. [ceph_deploy.cli][INFO ] zap_disk : False
    194. [ceph_deploy.cli][INFO ] data : /dev/sdc
    195. [ceph_deploy.cli][INFO ] block_db : None
    196. [ceph_deploy.cli][INFO ] dmcrypt : False
    197. [ceph_deploy.cli][INFO ] overwrite_conf : False
    198. [ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys
    199. [ceph_deploy.cli][INFO ] quiet : False
    200. [ceph_deploy.cli][INFO ] debug : False
    201. [ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdc
    202. [c5][DEBUG ] connected to host: c5
    203. [c5][DEBUG ] detect platform information from remote host
    204. [c5][DEBUG ] detect machine type
    205. [c5][DEBUG ] find the location of an executable
    206. [ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.3.1611 Core
    207. [ceph_deploy.osd][DEBUG ] Deploying osd to c5
    208. [c5][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
    209. [c5][DEBUG ] find the location of an executable
    210. [c5][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdc
    211. [c5][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
    212. [c5][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 4ade4076-2981-4adf-a90b-221541b1314c
    213. [c5][DEBUG ] Running command: /usr/sbin/vgcreate --force --yes ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad /dev/sdc
    214. [c5][DEBUG ] stdout: Physical volume "/dev/sdc" successfully created.
    215. [c5][DEBUG ] stdout: Volume group "ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad" successfully created
    216. [c5][DEBUG ] Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-4ade4076-2981-4adf-a90b-221541b1314c ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad
    217. [c5][DEBUG ] stdout: Logical volume "osd-block-4ade4076-2981-4adf-a90b-221541b1314c" created.
    218. [c5][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
    219. [c5][DEBUG ] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-4
    220. [c5][DEBUG ] Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-4
    221. [c5][DEBUG ] Running command: /bin/chown -h ceph:ceph /dev/ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad/osd-block-4ade4076-2981-4adf-a90b-221541b1314c
    222. [c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-1
    223. [c5][DEBUG ] Running command: /bin/ln -s /dev/ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad/osd-block-4ade4076-2981-4adf-a90b-221541b1314c /var/lib/ceph/osd/ceph-4/block
    224. [c5][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-4/activate.monmap
    225. [c5][DEBUG ] stderr: got monmap epoch 1
    226. [c5][DEBUG ] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-4/keyring --create-keyring --name osd.4 --add-key AQC619Ncgk+gBhAA19h+Ij0G6+LUcuxzob/FkA==
    227. [c5][DEBUG ] stdout: creating /var/lib/ceph/osd/ceph-4/keyring
    228. [c5][DEBUG ] added entity osd.4 auth auth(auid = 18446744073709551615 key=AQC619Ncgk+gBhAA19h+Ij0G6+LUcuxzob/FkA== with 0 caps)
    229. [c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/keyring
    230. [c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4/
    231. [c5][DEBUG ] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 4 --monmap /var/lib/ceph/osd/ceph-4/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-4/ --osd-uuid 4ade4076-2981-4adf-a90b-221541b1314c --setuser ceph --setgroup ceph
    232. [c5][DEBUG ] --> ceph-volume lvm prepare successful for: /dev/sdc
    233. [c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4
    234. [c5][DEBUG ] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad/osd-block-4ade4076-2981-4adf-a90b-221541b1314c --path /var/lib/ceph/osd/ceph-4 --no-mon-config
    235. [c5][DEBUG ] Running command: /bin/ln -snf /dev/ceph-42549ad6-85d8-43d3-bab0-cdafcdd492ad/osd-block-4ade4076-2981-4adf-a90b-221541b1314c /var/lib/ceph/osd/ceph-4/block
    236. [c5][DEBUG ] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block
    237. [c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-1
    238. [c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4
    239. [c5][DEBUG ] Running command: /bin/systemctl enable ceph-volume@lvm-4-4ade4076-2981-4adf-a90b-221541b1314c
    240. [c5][DEBUG ] stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-4-4ade4076-2981-4adf-a90b-221541b1314c.service to /usr/lib/systemd/system/ceph-volume@.service.
    241. [c5][DEBUG ] Running command: /bin/systemctl enable --runtime ceph-osd@4
    242. [c5][DEBUG ] stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@4.service to /usr/lib/systemd/system/ceph-osd@.service.
    243. [c5][DEBUG ] Running command: /bin/systemctl start ceph-osd@4
    244. [c5][DEBUG ] --> ceph-volume lvm activate successful for osd ID: 4
    245. [c5][DEBUG ] --> ceph-volume lvm create successful for: /dev/sdc
    246. [c5][INFO ] checking OSD status...
    247. [c5][DEBUG ] find the location of an executable
    248. [c5][INFO ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
    249. [ceph_deploy.osd][DEBUG ] Host c5 is now ready for osd use.
    250. [root@c4 ceph]# ceph-deploy osd create --data /dev/sdc c6
    251. [ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
    252. [ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create --data /dev/sdc c6
    253. [ceph_deploy.cli][INFO ] ceph-deploy options:
    254. [ceph_deploy.cli][INFO ] verbose : False
    255. [ceph_deploy.cli][INFO ] bluestore : None
    256. [ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f56649c8908>
    257. [ceph_deploy.cli][INFO ] cluster : ceph
    258. [ceph_deploy.cli][INFO ] fs_type : xfs
    259. [ceph_deploy.cli][INFO ] block_wal : None
    260. [ceph_deploy.cli][INFO ] default_release : False
    261. [ceph_deploy.cli][INFO ] username : None
    262. [ceph_deploy.cli][INFO ] journal : None
    263. [ceph_deploy.cli][INFO ] subcommand : create
    264. [ceph_deploy.cli][INFO ] host : c6
    265. [ceph_deploy.cli][INFO ] filestore : None
    266. [ceph_deploy.cli][INFO ] func : <function osd at 0x7f5664e11d70>
    267. [ceph_deploy.cli][INFO ] ceph_conf : None
    268. [ceph_deploy.cli][INFO ] zap_disk : False
    269. [ceph_deploy.cli][INFO ] data : /dev/sdc
    270. [ceph_deploy.cli][INFO ] block_db : None
    271. [ceph_deploy.cli][INFO ] dmcrypt : False
    272. [ceph_deploy.cli][INFO ] overwrite_conf : False
    273. [ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys
    274. [ceph_deploy.cli][INFO ] quiet : False
    275. [ceph_deploy.cli][INFO ] debug : False
    276. [ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdc
    277. [c6][DEBUG ] connected to host: c6
    278. [c6][DEBUG ] detect platform information from remote host
    279. [c6][DEBUG ] detect machine type
    280. [c6][DEBUG ] find the location of an executable
    281. [ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.6.1810 Core
    282. [ceph_deploy.osd][DEBUG ] Deploying osd to c6
    283. [c6][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
    284. [c6][DEBUG ] find the location of an executable
    285. [c6][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdc
    286. [c6][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
    287. [c6][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 254f856a-c8f2-4c89-8676-d3b3468f5744
    288. [c6][DEBUG ] Running command: /usr/sbin/vgcreate --force --yes ceph-ddf09fb9-4150-48fa-9337-569912e8ffb2 /dev/sdc
    289. [c6][DEBUG ] stdout: Physical volume "/dev/sdc" successfully created.
    290. [c6][DEBUG ] stdout: Volume group "ceph-ddf09fb9-4150-48fa-9337-569912e8ffb2" successfully created
    291. [c6][DEBUG ] Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-254f856a-c8f2-4c89-8676-d3b3468f5744 ceph-ddf09fb9-4150-48fa-9337-569912e8ffb2
    292. [c6][DEBUG ] stdout: Logical volume "osd-block-254f856a-c8f2-4c89-8676-d3b3468f5744" created.
    293. [c6][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
    294. [c6][DEBUG ] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-5
    295. [c6][DEBUG ] Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-5
    296. [c6][DEBUG ] Running command: /bin/chown -h ceph:ceph /dev/ceph-ddf09fb9-4150-48fa-9337-569912e8ffb2/osd-block-254f856a-c8f2-4c89-8676-d3b3468f5744
    297. [c6][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-1
    298. [c6][DEBUG ] Running command: /bin/ln -s /dev/ceph-ddf09fb9-4150-48fa-9337-569912e8ffb2/osd-block-254f856a-c8f2-4c89-8676-d3b3468f5744 /var/lib/ceph/osd/ceph-5/block
    299. [c6][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-5/activate.monmap
    300. [c6][DEBUG ] stderr: got monmap epoch 1
    301. [c6][DEBUG ] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-5/keyring --create-keyring --name osd.5 --add-key AQAG2NNcpFQCKxAA/BQVlRj/LbLFSkfZSyikMA==
    302. [c6][DEBUG ] stdout: creating /var/lib/ceph/osd/ceph-5/keyring
    303. [c6][DEBUG ] added entity osd.5 auth auth(auid = 18446744073709551615 key=AQAG2NNcpFQCKxAA/BQVlRj/LbLFSkfZSyikMA== with 0 caps)
    304. [c6][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5/keyring
    305. [c6][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5/
    306. [c6][DEBUG ] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 5 --monmap /var/lib/ceph/osd/ceph-5/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-5/ --osd-uuid 254f856a-c8f2-4c89-8676-d3b3468f5744 --setuser ceph --setgroup ceph
    307. [c6][DEBUG ] --> ceph-volume lvm prepare successful for: /dev/sdc
    308. [c6][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5
    309. [c6][DEBUG ] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-ddf09fb9-4150-48fa-9337-569912e8ffb2/osd-block-254f856a-c8f2-4c89-8676-d3b3468f5744 --path /var/lib/ceph/osd/ceph-5 --no-mon-config
    310. [c6][DEBUG ] Running command: /bin/ln -snf /dev/ceph-ddf09fb9-4150-48fa-9337-569912e8ffb2/osd-block-254f856a-c8f2-4c89-8676-d3b3468f5744 /var/lib/ceph/osd/ceph-5/block
    311. [c6][DEBUG ] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-5/block
    312. [c6][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-1
    313. [c6][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5
    314. [c6][DEBUG ] Running command: /bin/systemctl enable ceph-volume@lvm-5-254f856a-c8f2-4c89-8676-d3b3468f5744
    315. [c6][DEBUG ] stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-5-254f856a-c8f2-4c89-8676-d3b3468f5744.service to /usr/lib/systemd/system/ceph-volume@.service.
    316. [c6][DEBUG ] Running command: /bin/systemctl enable --runtime ceph-osd@5
    317. [c6][DEBUG ] stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@5.service to /usr/lib/systemd/system/ceph-osd@.service.
    318. [c6][DEBUG ] Running command: /bin/systemctl start ceph-osd@5
    319. [c6][DEBUG ] --> ceph-volume lvm activate successful for osd ID: 5
    320. [c6][DEBUG ] --> ceph-volume lvm create successful for: /dev/sdc
    321. [c6][INFO ] checking OSD status...
    322. [c6][DEBUG ] find the location of an executable
    323. [c6][INFO ] Running command: /bin/ceph --cluster=ceph osd stat --format=json

    data + journal

    1. [root@c4 ceph-cluster]# ceph-deploy osd create --data /dev/sdb --journal /dev/sdc c5
    2. [ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
    3. [ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create --data /dev/sdb --journal /dev/sdc c5
    4. [ceph_deploy.cli][INFO ] ceph-deploy options:
    5. [ceph_deploy.cli][INFO ] verbose : False
    6. [ceph_deploy.cli][INFO ] bluestore : None
    7. [ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f4200cb77e8>
    8. [ceph_deploy.cli][INFO ] cluster : ceph
    9. [ceph_deploy.cli][INFO ] fs_type : xfs
    10. [ceph_deploy.cli][INFO ] block_wal : None
    11. [ceph_deploy.cli][INFO ] default_release : False
    12. [ceph_deploy.cli][INFO ] username : None
    13. [ceph_deploy.cli][INFO ] journal : /dev/sdc
    14. [ceph_deploy.cli][INFO ] subcommand : create
    15. [ceph_deploy.cli][INFO ] host : c5
    16. [ceph_deploy.cli][INFO ] filestore : None
    17. [ceph_deploy.cli][INFO ] func : <function osd at 0x7f42010fdd70>
    18. [ceph_deploy.cli][INFO ] ceph_conf : None
    19. [ceph_deploy.cli][INFO ] zap_disk : False
    20. [ceph_deploy.cli][INFO ] data : /dev/sdb
    21. [ceph_deploy.cli][INFO ] block_db : None
    22. [ceph_deploy.cli][INFO ] dmcrypt : False
    23. [ceph_deploy.cli][INFO ] overwrite_conf : False
    24. [ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys
    25. [ceph_deploy.cli][INFO ] quiet : False
    26. [ceph_deploy.cli][INFO ] debug : False
    27. [ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdb
    28. [c5][DEBUG ] connected to host: c5
    29. [c5][DEBUG ] detect platform information from remote host
    30. [c5][DEBUG ] detect machine type
    31. [c5][DEBUG ] find the location of an executable
    32. [ceph_deploy.osd][INFO ] Distro info: CentOS Linux 7.3.1611 Core
    33. [ceph_deploy.osd][DEBUG ] Deploying osd to c5
    34. [c5][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
    35. [c5][WARNIN] osd keyring does not exist yet, creating one
    36. [c5][DEBUG ] create a keyring file
    37. [c5][DEBUG ] find the location of an executable
    38. [c5][INFO ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdb
    39. [c5][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
    40. [c5][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 4ae6fb71-39ee-468a-9c8b-45de6379e535
    41. [c5][DEBUG ] Running command: /usr/sbin/vgcreate --force --yes ceph-5d110da8-9f10-4b85-a34c-aeaa3600d2c7 /dev/sdb
    42. [c5][DEBUG ] stdout: Physical volume "/dev/sdb" successfully created.
    43. [c5][DEBUG ] stdout: Volume group "ceph-5d110da8-9f10-4b85-a34c-aeaa3600d2c7" successfully created
    44. [c5][DEBUG ] Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-4ae6fb71-39ee-468a-9c8b-45de6379e535 ceph-5d110da8-9f10-4b85-a34c-aeaa3600d2c7
    45. [c5][DEBUG ] stdout: Logical volume "osd-block-4ae6fb71-39ee-468a-9c8b-45de6379e535" created.
    46. [c5][DEBUG ] Running command: /bin/ceph-authtool --gen-print-key
    47. [c5][DEBUG ] Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0
    48. [c5][DEBUG ] Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-0
    49. [c5][DEBUG ] Running command: /bin/chown -h ceph:ceph /dev/ceph-5d110da8-9f10-4b85-a34c-aeaa3600d2c7/osd-block-4ae6fb71-39ee-468a-9c8b-45de6379e535
    50. [c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-0
    51. [c5][DEBUG ] Running command: /bin/ln -s /dev/ceph-5d110da8-9f10-4b85-a34c-aeaa3600d2c7/osd-block-4ae6fb71-39ee-468a-9c8b-45de6379e535 /var/lib/ceph/osd/ceph-0/block
    52. [c5][DEBUG ] Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap
    53. [c5][DEBUG ] stderr: got monmap epoch 1
    54. [c5][DEBUG ] Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-0/keyring --create-keyring --name osd.0 --add-key AQB3LNlc+/f5KxAAWQHaWpsmBDOmVhx0lAeTOQ==
    55. [c5][DEBUG ] stdout: creating /var/lib/ceph/osd/ceph-0/keyring
    56. [c5][DEBUG ] added entity osd.0 auth auth(auid = 18446744073709551615 key=AQB3LNlc+/f5KxAAWQHaWpsmBDOmVhx0lAeTOQ== with 0 caps)
    57. [c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring
    58. [c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/
    59. [c5][DEBUG ] Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid 4ae6fb71-39ee-468a-9c8b-45de6379e535 --setuser ceph --setgroup ceph
    60. [c5][DEBUG ] --> ceph-volume lvm prepare successful for: /dev/sdb
    61. [c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
    62. [c5][DEBUG ] Running command: /bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-5d110da8-9f10-4b85-a34c-aeaa3600d2c7/osd-block-4ae6fb71-39ee-468a-9c8b-45de6379e535 --path /var/lib/ceph/osd/ceph-0 --no-mon-config
    63. [c5][DEBUG ] Running command: /bin/ln -snf /dev/ceph-5d110da8-9f10-4b85-a34c-aeaa3600d2c7/osd-block-4ae6fb71-39ee-468a-9c8b-45de6379e535 /var/lib/ceph/osd/ceph-0/block
    64. [c5][DEBUG ] Running command: /bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block
    65. [c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /dev/dm-0
    66. [c5][DEBUG ] Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
    67. [c5][DEBUG ] Running command: /bin/systemctl enable ceph-volume@lvm-0-4ae6fb71-39ee-468a-9c8b-45de6379e535
    68. [c5][DEBUG ] stderr: Created symlink from /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-0-4ae6fb71-39ee-468a-9c8b-45de6379e535.service to /usr/lib/systemd/system/ceph-volume@.service.
    69. [c5][DEBUG ] Running command: /bin/systemctl enable --runtime ceph-osd@0
    70. [c5][DEBUG ] stderr: Created symlink from /run/systemd/system/ceph-osd.target.wants/ceph-osd@0.service to /usr/lib/systemd/system/ceph-osd@.service.
    71. [c5][DEBUG ] Running command: /bin/systemctl start ceph-osd@0
    72. [c5][DEBUG ] --> ceph-volume lvm activate successful for osd ID: 0
    73. [c5][DEBUG ] --> ceph-volume lvm create successful for: /dev/sdb
    74. [c5][INFO ] checking OSD status...
    75. [c5][DEBUG ] find the location of an executable
    76. [c5][INFO ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
    77. [ceph_deploy.osd][DEBUG ] Host c5 is now ready for osd use.
    78. [root@c4 ceph-cluster]#