查看 OSD 状态

    1. root@ceph1:~# ceph osd tree

    查看存储空间列表

    1. radosgw-admin bucket list
    2. [
    3. "rgw",
    4. "my-new-bucket",
    5. "ptmind-test-bucket
    6. 查看所有的bucket

    列出存储池

    1. $ rados lspools
    2. rbd
    3. libvirt-pool

    创建存储池

    1. ceph osd pool create {pool-name} {pg-num}
    2. pool-name : 存储池名称,必须唯一。
    3. pg-num : 存储池拥有的归置组总数。
    4. 少于 5 OSD 时可把 pg_num 设置为 128
    5. OSD 数量在 5 10 个时,可把 pg_num 设置为 512
    6. OSD 数量在 10 50 个时,可把 pg_num 设置为 4096

    查看集群状态

    1. ceph health detail

    检查网络统计信息。

    1. netstat -s

    查看osd映射信息
    [root@c4 dev]# ceph osd dump

    1. epoch 24
    2. fsid 41d4b8c7-5599-4cd6-bae5-0fbf23b13f82
    3. created 2019-05-05 10:11:37.453100
    4. modified 2019-05-05 10:24:43.576555
    5. flags sortbitwise,recovery_deletes,purged_snapdirs
    6. crush_version 7
    7. full_ratio 0.95
    8. backfillfull_ratio 0.9
    9. nearfull_ratio 0.85
    10. require_min_compat_client jewel
    11. min_compat_client jewel
    12. require_osd_release mimic
    13. pool 1 '.rgw.root' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 15 owner 18446744073709551615 flags hashpspool stripe_width 0 application rgw
    14. pool 2 'default.rgw.meta' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 18 owner 18446744073709551615 flags hashpspool stripe_width 0 application rgw
    15. pool 3 'default.rgw.log' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 20 owner 18446744073709551615 flags hashpspool stripe_width 0 application rgw
    16. pool 4 'default.rgw.control' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 23 owner 18446744073709551615 flags hashpspool stripe_width 0 application rgw
    17. max_osd 3
    18. osd.0 up in weight 1 up_from 5 up_thru 22 down_at 0 last_clean_interval [0,0) 192.168.1.34:6800/6420 192.168.1.34:6801/6420 192.168.1.34:6802/6420 192.168.1.34:6803/6420 exists,up b698e4cc-496d-44e6-9a97-7d37105ecf8b
    19. osd.1 up in weight 1 up_from 9 up_thru 22 down_at 0 last_clean_interval [0,0) 192.168.1.35:6800/5892 192.168.1.35:6801/5892 192.168.1.35:6802/5892 192.168.1.35:6803/5892 exists,up 7f300e51-9052-4ef0-ae7f-6351874e735b
    20. osd.2 up in weight 1 up_from 13 up_thru 22 down_at 0 last_clean_interval [0,0) 192.168.1.36:6800/5922 192.168.1.36:6801/5922 192.168.1.36:6802/5922 192.168.1.36:6803/5922 exists,up e5e2793e-0d74-451d-b620-9ed69e458c97

    查看最大osd个数

    1. [root@c4 dev]# ceph osd getmaxosd
    2. max_osd = 3 in epoch 24

    设置最大osd个数

    1. #设置最大osd的个数,当扩大osd节点的时候必须扣大这个值
    2. $ ceph osd setmaxosd 60

    检查rbd这个pool里已存在的PG和PGP数量:

    1. $ ceph osd pool get rbd pg_num
    2. pg_num: 128
    3. $ ceph osd pool get rbd pgp_num
    4. pgp_num: 128

    检查集群的使用情况

    ceph df

    输出的 GLOBAL 段展示了数据所占用集群存储空间的概要

    1. [root@c4 ~]# ceph df
    2. GLOBAL:
    3. SIZE AVAIL RAW USED %RAW USED
    4. 60 GiB 56 GiB 3.5 GiB 5.82
    5. POOLS:
    6. NAME ID USED %USED MAX AVAIL OBJECTS
    7. .rgw.root 1 3.2 KiB 0 27 GiB 8
    8. default.rgw.control 2 0 B 0 27 GiB 8
    9. default.rgw.meta 3 788 B 0 27 GiB 5
    10. default.rgw.log 4 0 B 0 27 GiB 175
    11. .test.data 5 0 B 0 27 GiB 0
    12. default.rgw.buckets.index 6 0 B 0 27 GiB 1
    13. default.rgw.buckets.data 7 1.5 MiB 0 27 GiB 3792
    14. SIZE: 集群的总容量;
    15. AVAIL: 集群的空闲空间总量;
    16. RAW USED: 已用存储空间总量;
    17. % RAW USED: 已用存储空间比率。用此值参照 full ratio near full \ ratio 来确保不会用尽集群空间。
    18. 详情见存储容量。
    19. 输出的 POOLS 段展示了存储池列表及各存储池的大致使用率。没有副本、克隆品和快照占用情况。例如,如果你把 1MB 的数据存储为对象,
    20. 理论使用率将是 1MB ,但考虑到副本数、克隆数、和快照数,实际使用率可能是 2MB 或更多。
    21. NAME: 存储池名字;
    22. ID: 存储池唯一标识符;
    23. USED: 大概数据量,单位为 KB MB GB
    24. %USED: 各存储池的大概使用率;
    25. Objects: 各存储池内的大概对象数。

    ceph osd df

    1. [root@c4 ~]# ceph osd df
    2. ID CLASS WEIGHT REWEIGHT SIZE USE AVAIL %USE VAR PGS
    3. 0 hdd 0.01949 1.00000 20 GiB 1.2 GiB 19 GiB 6.16 0.99 177
    4. 1 hdd 0.01949 1.00000 20 GiB 1.3 GiB 19 GiB 6.36 1.02 188
    5. 2 hdd 0.01949 1.00000 20 GiB 1.2 GiB 19 GiB 6.14 0.99 179
    6. TOTAL 60 GiB 3.7 GiB 56 GiB 6.22
    7. MIN/MAX VAR: 0.99/1.02 STDDEV: 0.10

    查看所有用户

    1. [root@c4 ~]# radosgw-admin metadata list user
    2. [
    3. "admin"
    4. ]

    查看用户信息

    1. [root@c4 ~]# radosgw-admin user info --uid=admin
    2. {
    3. "user_id": "admin",
    4. "display_name": "admin",
    5. "email": "",
    6. "suspended": 0,
    7. "max_buckets": 1000,
    8. "auid": 0,
    9. "subusers": [],
    10. "keys": [
    11. {
    12. "user": "admin",
    13. "access_key": "DQ2P7AIUZB956AMAE94Z",
    14. "secret_key": "4CacXZRCKxjp3fssyIIX51Ai4xeQrQW7EpWwIuIs"
    15. }
    16. ],
    17. "swift_keys": [],
    18. "caps": [],
    19. "op_mask": "read, write, delete",
    20. "default_placement": "",
    21. "placement_tags": [],
    22. "bucket_quota": {
    23. "enabled": false,
    24. "check_on_raw": false,
    25. "max_size": -1,
    26. "max_size_kb": 0,
    27. "max_objects": -1
    28. },
    29. "user_quota": {
    30. "enabled": false,
    31. "check_on_raw": false,
    32. "max_size": -1,
    33. "max_size_kb": 0,
    34. "max_objects": -1
    35. },
    36. "temp_url_keys": [],
    37. "type": "rgw",
    38. "mfa_ids": []
    39. }

    查看所有桶

    1. [root@c4 ~]# radosgw-admin bucket list
    2. [
    3. "bucket1",
    4. "bucket"
    5. ]

    查看桶内对象

    1. [root@c4 ~]# radosgw-admin bucket list --bucket=bucket

    查看桶信息

    1. [root@c4 ~]# radosgw-admin bucket stats --bucket=bucket
    2. {
    3. "bucket": "bucket",
    4. "zonegroup": "e305419c-f0c0-4d3b-aeec-13490e3f4f36",
    5. "placement_rule": "default-placement",
    6. "explicit_placement": {
    7. "data_pool": "",
    8. "data_extra_pool": "",
    9. "index_pool": ""
    10. },
    11. "id": "4bc62fac-bd14-44d5-9f0a-d91496f2edfd.4332.1",
    12. "marker": "4bc62fac-bd14-44d5-9f0a-d91496f2edfd.4332.1",
    13. "index_type": "Normal",
    14. "owner": "admin",
    15. "ver": "0#34015",
    16. "master_ver": "0#0",
    17. "mtime": "2019-05-06 11:27:34.513449",
    18. "max_marker": "0#",
    19. "usage": {
    20. "rgw.main": {
    21. "size": 14455950,
    22. "size_actual": 139321344,
    23. "size_utilized": 14455950,
    24. "size_kb": 14118,
    25. "size_kb_actual": 136056,
    26. "size_kb_utilized": 14118,
    27. "num_objects": 34014
    28. }
    29. },
    30. "bucket_quota": {
    31. "enabled": false,
    32. "check_on_raw": false,
    33. "max_size": -1,
    34. "max_size_kb": 0,
    35. "max_objects": -1
    36. }
    37. }
    38. [root@c4 ~]#

    查询当前日志位置方法

    1. ceph --admin-daemon /var/run/ceph/ceph-osd.0.asok config show | grep journal
    1. ./bin/radosgw-admin bucket reshard --num-shards=10 --bucket=xxx

    删除已经挂载的硬盘信息

    1. vgs
    2. lvs
    3. lvremove osd-block-4ae6fb71-39ee-468a-9c8b-45de6379e535 ceph-5d110da8-9f10-4b85-a34c-aeaa3600d2c7

    查看一进程的运行时配置

    1. ceph daemon osd.0 config show | less

    调试日志等级

    列出bucket的所有对象并删除

    1. rados -p rbd ls | xargs rados -p rbd rm

    调节osd的权重信息

    1. ceph osd crush reweight osd.134 1.5