# auto-detect available devices (need time to sync the status 1 by 1) # NOTE: Strangely enough, the command automatically recognizes all devices, including the ZRAM! QAQ..... [ceph: root@ceph01 /]$ ceph orch apply osd --all-available-devices
# list devices [ceph: root@ceph01 /]$ ceph orch device ls Hostname Path Type Serial Size Health Ident Fault Available ceph01.liarlee.site /dev/vdb hdd 21.4G Unknown N/A N/A Yes ceph01.liarlee.site /dev/vdc hdd 21.4G Unknown N/A N/A Yes ceph01.liarlee.site /dev/vdd hdd 21.4G Unknown N/A N/A Yes ceph01.liarlee.site /dev/zram0 ssd 2071M Unknown N/A N/A No
# MANUAL ADD OSD # It is not necessary. # INIT DISK in MON node [ceph: root@ceph01 /]$ ceph orch daemon add osd ceph01.liarlee.site:/dev/vdb Created osd(s) 0 on host 'ceph01.liarlee.site' [ceph: root@ceph01 /]$ ceph orch daemon add osd ceph01.liarlee.site:/dev/vdc Created osd(s) 1 on host 'ceph01.liarlee.site' [ceph: root@ceph01 /]$ ceph orch daemon add osd ceph01.liarlee.site:/dev/vdd Created osd(s) 2 on host 'ceph01.liarlee.site'
# MANUAL DELETE OSD # It is not necessary. [ceph: root@ceph01 /]$ ceph orch osd rm 0 Scheduled OSD(s) for removal [ceph: root@ceph01 /]$ ceph orch osd rm 1 Scheduled OSD(s) for removal [ceph: root@ceph01 /]$ ceph orch osd rm 2 Scheduled OSD(s) for removal
# Enable the device scan enhencement, show the infomation about Health, Ident, Fault. ceph config set mgr mgr/cephadm/device_enhanced_scan true
# OSD memory auto tune, for performance maybe. ceph config set osd osd_memory_target_autotune true
# mark the auto manage to true. [ceph: root@ceph01 /]$ ceph orch apply osd --all-available-devices --unmanaged=true [ceph: root@ceph01 /]$ ceph orch device ls --wide [ceph: root@ceph01 /]$ ceph osd status
# Check Deleting Status [ceph: root@ceph01 /]$ ceph orch osd rm status OSD_ID HOST STATE PG_COUNT REPLACE FORCE DRAIN_STARTED_AT 0 ceph01.liarlee.site done, waiting for purge 0 False False None 1 ceph01.liarlee.site started 0 False False None 2 ceph01.liarlee.site started 0 False False None
[ceph: root@ceph01 /]$ ceph orch ls mon mon 4/3 4m ago 39s count:3
[ceph: root@ceph01 /]$ ceph orch redeploy mon Scheduled to redeploy mon.ceph01.liarlee.site on host 'ceph01.liarlee.site' Scheduled to redeploy mon.ceph02 on host 'ceph02.liarlee.site' Scheduled to redeploy mon.ceph03 on host 'ceph03.liarlee.site'