empty-rgw

remove osd

 ceph-deploy disk list node1 node2 node3 
  # ssh host
 systemctl stop ceph-osd@0

 ceph osd purge 0  --yes-i-really-mean-it
 ceph osd purge 1  --yes-i-really-mean-it
 ceph osd purge 2  --yes-i-really-mean-it

 ceph osd out  0
 ceph osd out  1
 ceph osd out  2

zap lvm

 #ssh host
 ceph-volume lvm zap /dev/vdb  --destroy

reinstall osd

 rados df
 ceph-deploy disk zap node1 /dev/vdb
 ceph-deploy disk zap node2 /dev/vdb
 ceph-deploy disk zap node3 /dev/vdb
 ceph-deploy osd create node1 --data /dev/vdb
 ceph-deploy osd create node2 --data /dev/vdb
 ceph-deploy osd create node3 --data /dev/vdb
 lsblk 
 ceph -s

remove pool

 rados df
 ceph osd pool delete '.rgw.root' '.rgw.root' --yes-i-really-really-mean-it
 ll /var/run/ceph/ceph-mon.node1.asok 
  ceph daemon mon.node1 config set mon_allow_pool_delete true
 ceph osd pool delete '.rgw.root' '.rgw.root' --yes-i-really-really-mean-it
 ceph osd pool delete 'default.rgw.buckets.data' 'default.rgw.buckets.data' --yes-i-really-really-mean-it
 ceph osd pool delete 'default.rgw.buckets.index' 'default.rgw.buckets.index' --yes-i-really-really-mean-it
 ceph osd pool delete default.rgw.buckets.non-ec default.rgw.buckets.non-ec   --yes-i-really-really-mean-it
 ceph osd pool delete default.rgw.control default.rgw.control  --yes-i-really-really-mean-it
 ceph osd pool delete default.rgw.log default.rgw.log  --yes-i-really-really-mean-it
 ceph osd pool delete default.rgw.meta default.rgw.meta   --yes-i-really-really-mean-it
 ceph -s

reinstall rgw

 ceph-deploy install --rgw node1
 ceph-deploy --overwrite-conf config push node1 node2 node3 
 systemctl 
 systemctl status ceph-radosgw@rgw.node1.service
 systemctl restart  ceph-radosgw@rgw.node1.service
 systemctl status ceph-radosgw@rgw.node1.service

create rgw user

 #specify key for test
 radosgw-admin user create --uid="testuser" --display-name="First User"  --access-key="DH8FY413RG632DQOXWZS" --secret-key="rnIJ2qlgKLcCvxuitZrMjuDOXBOoBjPFeWmxWiOy"

reduce replication size

ceph osd dump | grep 'replicated size'
ceph osd pool set default.rgw.buckets.data  size 2

检查状态

 rados df
 ceph health
 ceph -s
 ceph auth list

reference

Published
Categorized as ceph

Leave a comment

Your email address will not be published. Required fields are marked *

This site uses Akismet to reduce spam. Learn how your comment data is processed.