ceph 基本查询命令


查看pool

[root@node1 ~]# ceph osd lspools
1 .rgw.root
2 default.rgw.control
3 default.rgw.meta
4 default.rgw.log
5 default.rgw.buckets.index
6 default.rgw.buckets.data
7 default.rgw.buckets.non-ec

统计存储

[root@node1 ~]# rados df
POOL_NAME                     USED OBJECTS CLONES COPIES MISSING_ON_PRIMARY UNFOUND DEGRADED RD_OPS      RD  WR_OPS      WR
.rgw.root                  1.1 KiB       4      0      8                  0       0        0      0     0 B       4   4 KiB
default.rgw.buckets.data   404 GiB  209634      0 419268                  0       0        0  99404 3.9 GiB 2715217 461 GiB
default.rgw.buckets.index      0 B       3      0      9                  0       0        0 797162 816 MiB  497071 350 MiB
default.rgw.buckets.non-ec     0 B       3      0      9                  0       0        0   1479 701 KiB     977 378 KiB
default.rgw.control            0 B       8      0     24                  0       0        0      0     0 B       0     0 B
default.rgw.log                0 B     207      0    414                  0       0        0 206448 216 MiB  166841  24 MiB
default.rgw.meta           1.3 KiB       7      0     21                  0       0        0   3360 2.9 MiB    1239 599 KiB

total_objects    209866
total_used       825 GiB
total_avail      1.2 TiB
total_space      2.1 TiB

dump osd info

[root@node1 ~]# ceph osd dump
epoch 73
fsid 14b3f84e-0163-4fe6-9c29-685843a2d100
created 2019-04-23 03:01:37.822156
modified 2019-04-25 10:48:25.055609
flags sortbitwise,recovery_deletes,purged_snapdirs
crush_version 7
full_ratio 0.95
backfillfull_ratio 0.9
nearfull_ratio 0.85
require_min_compat_client jewel
min_compat_client jewel
require_osd_release mimic
pool 1 '.rgw.root' replicated size 2 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 68 owner 18446744073709551615 flags hashpspool stripe_width 0 application rgw
pool 2 'default.rgw.control' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 18 owner 18446744073709551615 flags hashpspool stripe_width 0 application rgw
pool 3 'default.rgw.meta' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 20 owner 18446744073709551615 flags hashpspool stripe_width 0 application rgw
pool 4 'default.rgw.log' replicated size 2 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 70 owner 18446744073709551615 flags hashpspool stripe_width 0 application rgw
pool 5 'default.rgw.buckets.index' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 37 owner 18446744073709551615 flags hashpspool stripe_width 0 application rgw
pool 6 'default.rgw.buckets.data' replicated size 2 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 72 owner 18446744073709551615 flags hashpspool stripe_width 0 application rgw
pool 7 'default.rgw.buckets.non-ec' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 43 owner 18446744073709551615 flags hashpspool stripe_width 0 application rgw
max_osd 3
osd.0 up   in  weight 1 up_from 58 up_thru 72 down_at 49 last_clean_interval [30,48) 192.168.8.197:6800/3515 192.168.8.197:6801/3515 192.168.8.197:6802/3515 192.168.8.197:6803/3515 exists,up befd6b52-bb7e-4eb4-a47d-996eef0a5898
osd.1 up   in  weight 1 up_from 62 up_thru 72 down_at 57 last_clean_interval [53,56) 192.168.8.198:6800/3444 192.168.8.198:6801/3444 192.168.8.198:6802/3444 192.168.8.198:6803/3444 exists,up c48a302b-bcb1-4e4a-b4a0-a4160262f026
osd.2 up   in  weight 1 up_from 66 up_thru 72 down_at 64 last_clean_interval [55,63) 192.168.8.199:6800/3400 192.168.8.199:6801/3400 192.168.8.199:6802/3400 192.168.8.199:6803/3400 exists,up 9ba386bc-1627-45cc-a62a-5fb3dd43e9de

查询用户

[root@node1 ~]# ceph auth list
installed auth entries:

osd.0
    key: AQCguL5c7lQEIBAAXNRWkShiruwPQgBGUzde5w==
    caps: [mgr] allow profile osd
    caps: [mon] allow profile osd
    caps: [osd] allow *
osd.1
    key: AQC6uL5cokFGIRAAYTxDxGUX+AdUzliBjDH85w==
    caps: [mgr] allow profile osd
    caps: [mon] allow profile osd
    caps: [osd] allow *
osd.2
    key: AQDRuL5cew8EGBAAiYs067hd2DkP4fuJEPYpLA==
    caps: [mgr] allow profile osd
    caps: [mon] allow profile osd
    caps: [osd] allow *
client.admin
    key: AQBRuL5ccXcGMRAAEML4MeCLxz8ZdJVUtrHbxw==
    caps: [mds] allow *
    caps: [mgr] allow *
    caps: [mon] allow *
    caps: [osd] allow *
client.bootstrap-mds
    key: AQBRuL5cMJsGMRAA5shArDczUkbFzVew+7QdHA==
    caps: [mon] allow profile bootstrap-mds
client.bootstrap-mgr
    key: AQBRuL5cT7EGMRAAQ6z+kNHC1v0ai/J4peIRHg==
    caps: [mon] allow profile bootstrap-mgr
client.bootstrap-osd
    key: AQBRuL5c4cQGMRAAIYRYPuLex5JcLVlFWh1aPQ==
    caps: [mon] allow profile bootstrap-osd
client.bootstrap-rbd
    key: AQBRuL5cjNoGMRAASWGT1RyCt7vP4Zv0C7qvyg==
    caps: [mon] allow profile bootstrap-rbd
client.bootstrap-rgw
    key: AQBRuL5cau0GMRAATmZNjZa0QcOAwR5tAlU0Ww==
    caps: [mon] allow profile bootstrap-rgw
client.rgw.node1
    key: AQBEur5c4bZQORAAXREXM96NMqjtgViicJMSQA==
    caps: [mon] allow rwx
    caps: [osd] allow rwx
mgr.node2
    key: AQC1wb5c+Bo+LBAAvO6qWg7fOsoQuhhZXr/62A==
    caps: [mds] allow *
    caps: [mon] allow profile mgr
    caps: [osd] allow *

print ceph runtime configuration

ceph --admin-daemon /var/run/ceph/ceph-osd.0.asok config show | less
ceph --admin-daemon /var/run/ceph/ceph-client.rgw.node1.5041.94749140287488.asok config show 
ceph --admin-daemon /var/run/ceph/ceph-mon.node1.asok config show 
,

Leave a Reply

Your email address will not be published.

This site uses Akismet to reduce spam. Learn how your comment data is processed.

%d bloggers like this: