# ceph --version
ceph version 12.2.13 luminous (stable)
# ceph osd tree
0 hdd 7.27739 osd.0 up 1.00000 1.00000
# ceph osd out 0
marked out osd.0.
# ceph osd tree0 hdd 7.27739 osd.0 up 0 1.00000
- 等待数据迁移
# ceph -w
2,停止OSD
- 登陆到OSD的主机
ssh {osd-host}
systemctl stop ceph-osd@0.service
# ceph osd tree0 hdd 7.27739 osd.0 down 0 1.00000
3,删除OSD
# ceph osd purge 0 --yes-i-really-mean-it
purged osd.0
- 配置文件如果有配置OSD则删除
# cat /etc/ceph/ceph.conf
- 删除 CRUSH Map 中的对应 OSD
# ceph osd crush remove osd.0
device 'osd.0' does not appear in the crush map
# ceph auth del osd.0
entity osd.0 does not exist
# ceph osd rm 0
osd.0 does not exist.
- 查看已经没有
osd.0
# ceph osd tree
4,格式化磁盘
# ls -lh /var/lib/ceph/osd/ceph-0
lrwxrwxrwx 1 ceph ceph 93 8月 3 15:50 block -> /dev/ceph-03bdcf51-248a-4f09-a131-fcdc4a9a7398/osd-block-dde657b7-88c6-4cc6-8e04-fae98b1f5472
# lsblk
sda 8:0 0 7.3T 0 disk
└─ceph--03bdcf51--248a--4f09--a131--fcdc4a9a7398-osd--block--dde657b7--88c6--4cc6--8e04--fae98b1f5472 253:7 0 7.3T 0 lvm
# umount /var/lib/ceph/osd/ceph-0
lvmdiskscan | grep LVM
# vgsVG #PV #LV #SN Attr VSize VFreeceph-03bdcf51-248a-4f09-a131-fcdc4a9a7398 1 1 0 wz--n- <7.28t 0
# lvdisplay
--- Logical volume ---LV Path /dev/ceph-03bdcf51-248a-4f09-a131-fcdc4a9a7398/osd-block-dde657b7-88c6-4cc6-8e04-fae98b1f5472LV Name osd-block-dde657b7-88c6-4cc6-8e04-fae98b1f5472VG Name ceph-03bdcf51-248a-4f09-a131-fcdc4a9a7398LV UUID Inu45D-a9be-kFBk-mIQ9-sHLW-VQKP-MJRVOgLV Write Access read/writeLV Creation host, time zj, 2020-07-16 09:02:55 +0800LV Status available# open 0LV Size <7.28 TiBCurrent LE 1907721Segments 1Allocation inheritRead ahead sectors auto- currently set to 256Block device 253:7
- LVM 删除
参考:
- 删除OSD
- 手动移除osd