查看 OSD 状态
root@ceph1:~# ceph osd tree
查看存储空间列表
radosgw-admin bucket list["rgw","my-new-bucket","ptmind-test-bucket查看所有的bucket
列出存储池
$ rados lspoolsrbdlibvirt-pool
创建存储池
ceph osd pool create {pool-name} {pg-num}pool-name : 存储池名称,必须唯一。pg-num : 存储池拥有的归置组总数。少于 5 个 OSD 时可把 pg_num 设置为 128OSD 数量在 5 到 10 个时,可把 pg_num 设置为 512OSD 数量在 10 到 50 个时,可把 pg_num 设置为 4096
查看集群状态
ceph health detail
检查网络统计信息。
netstat -s
查看osd映射信息
[root@c4 dev]# ceph osd dump
epoch 24fsid 41d4b8c7-5599-4cd6-bae5-0fbf23b13f82created 2019-05-05 10:11:37.453100modified 2019-05-05 10:24:43.576555flags sortbitwise,recovery_deletes,purged_snapdirscrush_version 7full_ratio 0.95backfillfull_ratio 0.9nearfull_ratio 0.85require_min_compat_client jewelmin_compat_client jewelrequire_osd_release mimicpool 1 '.rgw.root' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 15 owner 18446744073709551615 flags hashpspool stripe_width 0 application rgwpool 2 'default.rgw.meta' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 18 owner 18446744073709551615 flags hashpspool stripe_width 0 application rgwpool 3 'default.rgw.log' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 20 owner 18446744073709551615 flags hashpspool stripe_width 0 application rgwpool 4 'default.rgw.control' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 23 owner 18446744073709551615 flags hashpspool stripe_width 0 application rgwmax_osd 3osd.0 up in weight 1 up_from 5 up_thru 22 down_at 0 last_clean_interval [0,0) 192.168.1.34:6800/6420 192.168.1.34:6801/6420 192.168.1.34:6802/6420 192.168.1.34:6803/6420 exists,up b698e4cc-496d-44e6-9a97-7d37105ecf8bosd.1 up in weight 1 up_from 9 up_thru 22 down_at 0 last_clean_interval [0,0) 192.168.1.35:6800/5892 192.168.1.35:6801/5892 192.168.1.35:6802/5892 192.168.1.35:6803/5892 exists,up 7f300e51-9052-4ef0-ae7f-6351874e735bosd.2 up in weight 1 up_from 13 up_thru 22 down_at 0 last_clean_interval [0,0) 192.168.1.36:6800/5922 192.168.1.36:6801/5922 192.168.1.36:6802/5922 192.168.1.36:6803/5922 exists,up e5e2793e-0d74-451d-b620-9ed69e458c97
查看最大osd个数
[root@c4 dev]# ceph osd getmaxosdmax_osd = 3 in epoch 24
设置最大osd个数
#设置最大osd的个数,当扩大osd节点的时候必须扣大这个值$ ceph osd setmaxosd 60
检查rbd这个pool里已存在的PG和PGP数量:
$ ceph osd pool get rbd pg_numpg_num: 128$ ceph osd pool get rbd pgp_numpgp_num: 128
检查集群的使用情况
ceph df
输出的 GLOBAL 段展示了数据所占用集群存储空间的概要
[root@c4 ~]# ceph dfGLOBAL:SIZE AVAIL RAW USED %RAW USED60 GiB 56 GiB 3.5 GiB 5.82POOLS:NAME ID USED %USED MAX AVAIL OBJECTS.rgw.root 1 3.2 KiB 0 27 GiB 8default.rgw.control 2 0 B 0 27 GiB 8default.rgw.meta 3 788 B 0 27 GiB 5default.rgw.log 4 0 B 0 27 GiB 175.test.data 5 0 B 0 27 GiB 0default.rgw.buckets.index 6 0 B 0 27 GiB 1default.rgw.buckets.data 7 1.5 MiB 0 27 GiB 3792SIZE: 集群的总容量;AVAIL: 集群的空闲空间总量;RAW USED: 已用存储空间总量;% RAW USED: 已用存储空间比率。用此值参照 full ratio 和 near full \ ratio 来确保不会用尽集群空间。详情见存储容量。输出的 POOLS 段展示了存储池列表及各存储池的大致使用率。没有副本、克隆品和快照占用情况。例如,如果你把 1MB 的数据存储为对象,理论使用率将是 1MB ,但考虑到副本数、克隆数、和快照数,实际使用率可能是 2MB 或更多。NAME: 存储池名字;ID: 存储池唯一标识符;USED: 大概数据量,单位为 KB 、 MB 或 GB ;%USED: 各存储池的大概使用率;Objects: 各存储池内的大概对象数。
ceph osd df
[root@c4 ~]# ceph osd dfID CLASS WEIGHT REWEIGHT SIZE USE AVAIL %USE VAR PGS0 hdd 0.01949 1.00000 20 GiB 1.2 GiB 19 GiB 6.16 0.99 1771 hdd 0.01949 1.00000 20 GiB 1.3 GiB 19 GiB 6.36 1.02 1882 hdd 0.01949 1.00000 20 GiB 1.2 GiB 19 GiB 6.14 0.99 179TOTAL 60 GiB 3.7 GiB 56 GiB 6.22MIN/MAX VAR: 0.99/1.02 STDDEV: 0.10
查看所有用户
[root@c4 ~]# radosgw-admin metadata list user["admin"]
查看用户信息
[root@c4 ~]# radosgw-admin user info --uid=admin{"user_id": "admin","display_name": "admin","email": "","suspended": 0,"max_buckets": 1000,"auid": 0,"subusers": [],"keys": [{"user": "admin","access_key": "DQ2P7AIUZB956AMAE94Z","secret_key": "4CacXZRCKxjp3fssyIIX51Ai4xeQrQW7EpWwIuIs"}],"swift_keys": [],"caps": [],"op_mask": "read, write, delete","default_placement": "","placement_tags": [],"bucket_quota": {"enabled": false,"check_on_raw": false,"max_size": -1,"max_size_kb": 0,"max_objects": -1},"user_quota": {"enabled": false,"check_on_raw": false,"max_size": -1,"max_size_kb": 0,"max_objects": -1},"temp_url_keys": [],"type": "rgw","mfa_ids": []}
查看所有桶
[root@c4 ~]# radosgw-admin bucket list["bucket1","bucket"]
查看桶内对象
[root@c4 ~]# radosgw-admin bucket list --bucket=bucket
查看桶信息
[root@c4 ~]# radosgw-admin bucket stats --bucket=bucket{"bucket": "bucket","zonegroup": "e305419c-f0c0-4d3b-aeec-13490e3f4f36","placement_rule": "default-placement","explicit_placement": {"data_pool": "","data_extra_pool": "","index_pool": ""},"id": "4bc62fac-bd14-44d5-9f0a-d91496f2edfd.4332.1","marker": "4bc62fac-bd14-44d5-9f0a-d91496f2edfd.4332.1","index_type": "Normal","owner": "admin","ver": "0#34015","master_ver": "0#0","mtime": "2019-05-06 11:27:34.513449","max_marker": "0#","usage": {"rgw.main": {"size": 14455950,"size_actual": 139321344,"size_utilized": 14455950,"size_kb": 14118,"size_kb_actual": 136056,"size_kb_utilized": 14118,"num_objects": 34014}},"bucket_quota": {"enabled": false,"check_on_raw": false,"max_size": -1,"max_size_kb": 0,"max_objects": -1}}[root@c4 ~]#
查询当前日志位置方法
ceph --admin-daemon /var/run/ceph/ceph-osd.0.asok config show | grep journal
./bin/radosgw-admin bucket reshard --num-shards=10 --bucket=xxx
删除已经挂载的硬盘信息
vgslvslvremove osd-block-4ae6fb71-39ee-468a-9c8b-45de6379e535 ceph-5d110da8-9f10-4b85-a34c-aeaa3600d2c7
查看一进程的运行时配置
ceph daemon osd.0 config show | less
调试日志等级
列出bucket的所有对象并删除
rados -p rbd ls | xargs rados -p rbd rm
调节osd的权重信息
ceph osd crush reweight osd.134 1.5
