Grow a Cached LV
Table of Contents
Just the steps needed to grow a cached LV.
Again?
Yeah, this was already covered, but I renamed some parts of my VG and I’d like to have a post to refer to on the next growing.
Current state
root@epyc ~ # vgs
VG #PV #LV #SN Attr VSize VFree
VG_epyc 3 11 0 wz--n- 16,39t 7,00t
root@epyc ~ # df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/VG_epyc-LV_root 25G 2,3G 23G 9% /
devtmpfs 63G 0 63G 0% /dev
tmpfs 63G 0 63G 0% /dev/shm
tmpfs 63G 2,4M 63G 1% /run
tmpfs 63G 0 63G 0% /sys/fs/cgroup
/dev/mapper/VG_epyc-LV_var 16G 605M 16G 4% /var
/dev/nvme0n1p2 1014M 403M 612M 40% /boot
/dev/mapper/VG_epyc-LV_var_log 5,0G 818M 4,2G 16% /var/log
/dev/mapper/VG_epyc-LV_home 25G 39M 25G 1% /home
/dev/nvme0n1p1 200M 12M 189M 6% /boot/efi
/dev/mapper/VG_epyc-LV_var_lib_libvirt_images_SSD 500G 400G 100G 81% /var/lib/libvirt/images/on_SSD
/dev/mapper/VG_epyc-LV_var_lib_libvirt_images_HDD 7,5T 6,5T 1,1T 87% /var/lib/libvirt/images/on_HDD
/dev/mapper/VG_epyc-LV_nfs_openshift 100G 4,0G 96G 4% /opt/nfs4osp
/dev/mapper/VG_epyc-LV_ISO_images 50G 30G 21G 60% /var/lib/libvirt/images/ISOs
[...]
root@epyc ~ # pvs -o+tags
PV VG Fmt Attr PSize PFree PV Tags
/dev/md127 VG_epyc lvm2 a-- 14,55t 6,04t hdd
/dev/nvme0n1p3 VG_epyc lvm2 a-- 952,67g 253,67g 970pro,ssd
/dev/nvme1n1p1 VG_epyc lvm2 a-- <931,51g 731,31g 960evo,ssd
root@epyc ~ # lvextend -L+25G --resizefs /dev/VG_epyc/LV_var_lib_libvirt_images_HDD @hdd
Unable to resize logical volumes of cache type.
root@epyc ~ # lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
LV_ISO_images VG_epyc -wi-ao---- 50,00g
LV_Janine_backup_tmp VG_epyc -wi-a----- 150,00g
LV_home VG_epyc -wi-ao---- 25,00g
LV_nfs_openshift VG_epyc -wi-ao---- 100,00g
LV_root VG_epyc -wi-ao---- 25,00g
LV_swap VG_epyc -wi-ao---- 128,00g
LV_var VG_epyc -wi-ao---- 16,00g
LV_var_lib_libvirt_images_HDD VG_epyc Cwi-aoC--- <7,49t [LV_cache] [LV_var_lib_libvirt_images_HDD_corig] 99,99 5,51 0,00
LV_var_lib_libvirt_images_SSD VG_epyc -wi-ao---- 500,00g
LV_var_lib_libvirt_images_size_fuckup VG_epyc -wi-a----- 750,00g
LV_var_log VG_epyc -wi-ao---- 5,00g
Remove Cache LV
root@epyc ~ # lvremove VG_epyc/LV_cache
Flushing 0 blocks for cache VG_epyc/LV_var_lib_libvirt_images_HDD.
Logical volume "LV_cache" successfully removed
Grow LV
root@epyc ~ # lvextend -L+25G --resizefs /dev/VG_epyc/LV_var_lib_libvirt_images_HDD @hdd
Size of logical volume VG_epyc/LV_var_lib_libvirt_images_HDD changed from <7,49 TiB (1963008 extents) to 7,51 TiB (1969408 extents).
Logical volume VG_epyc/LV_var_lib_libvirt_images_HDD successfully resized.
meta-data=/dev/mapper/VG_epyc-LV_var_lib_libvirt_images_HDD isize=512 agcount=240, agsize=8388592 blks
= sectsz=4096 attr=2, projid32bit=1
= crc=1 finobt=0 spinodes=0
data = bsize=4096 blocks=2010120192, imaxpct=25
= sunit=16 swidth=32 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal bsize=4096 blocks=131071, version=2
= sectsz=4096 sunit=1 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
data blocks changed from 2010120192 to 2016673792
root@epyc ~ # df -h
Filesystem Size Used Avail Use% Mounted on
[...]
/dev/mapper/VG_epyc-LV_var_lib_libvirt_images_HDD 7,6T 6,5T 1,1T 87% /var/lib/libvirt/images/on_HDD
[...]
Create Cache LV
root@epyc ~ # lvcreate -L 1G -n LV_cache_metadata VG_epyc @960evo
Logical volume "LV_cache_metadata" created.
root@epyc ~ # lvcreate -l 100%FREE -n LV_cache VG_epyc @960evo
Logical volume "LV_cache" created.
root@epyc ~ # pvs -o+tags
PV VG Fmt Attr PSize PFree PV Tags
/dev/md127 VG_epyc lvm2 a-- 14,55t <6,02t hdd
/dev/nvme0n1p3 VG_epyc lvm2 a-- 952,67g 253,67g 970pro,ssd
/dev/nvme1n1p1 VG_epyc lvm2 a-- <931,51g 0 960evo,ssd
Check location of LV
root@epyc ~ # lvdisplay --maps VG_epyc/LV_cache_metadata
--- Logical volume ---
LV Path /dev/VG_epyc/LV_cache_metadata
LV Name LV_cache_metadata
VG Name VG_epyc
LV UUID dSvbuB-WDrr-RRr3-Isys-lsHa-85Xc-fAyJxp
LV Write Access read/write
LV Creation host, time epyc.internal.pcfe.net, 2019-04-28 11:12:49 +0200
LV Status available
# open 0
LV Size 1,00 GiB
Current LE 256
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 256
Block device 253:6
--- Segments ---
Logical extents 0 to 255:
Type linear
Physical volume /dev/nvme1n1p1
Physical extents 0 to 255
root@epyc ~ # lvdisplay --maps VG_epyc/LV_cache
--- Logical volume ---
LV Path /dev/VG_epyc/LV_cache
LV Name LV_cache
VG Name VG_epyc
LV UUID Vf9UF5-wmJs-8Qa0-FfCd-L5qr-TaZb-wiaegw
LV Write Access read/write
LV Creation host, time epyc.internal.pcfe.net, 2019-04-28 11:13:08 +0200
LV Status available
# open 0
LV Size <930,51 GiB
Current LE 238210
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 256
Block device 253:7
--- Segments ---
Logical extents 0 to 238209:
Type linear
Physical volume /dev/nvme1n1p1
Physical extents 256 to 238465
Create Cache Pool
root@epyc ~ # lvconvert --type cache-pool --poolmetadata VG_epyc/LV_cache_metadata VG_epyc/LV_cache
Using 992,00 KiB chunk size instead of default 64,00 KiB, so cache pool has less than 1000000 chunks.
WARNING: Converting VG_epyc/LV_cache and VG_epyc/LV_cache_metadata to cache pool's data and metadata volumes with metadata wiping.
THIS WILL DESTROY CONTENT OF LOGICAL VOLUME (filesystem etc.)
Do you really want to convert VG_epyc/LV_cache and VG_epyc/LV_cache_metadata? [y/n]: y
Converted VG_epyc/LV_cache and VG_epyc/LV_cache_metadata to cache pool.
root@epyc ~ # lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
LV_ISO_images VG_epyc -wi-ao---- 50,00g
LV_Janine_backup_tmp VG_epyc -wi-a----- 150,00g
LV_cache VG_epyc Cwi---C--- <930,51g
LV_home VG_epyc -wi-ao---- 25,00g
LV_nfs_openshift VG_epyc -wi-ao---- 100,00g
LV_root VG_epyc -wi-ao---- 25,00g
LV_swap VG_epyc -wi-ao---- 128,00g
LV_var VG_epyc -wi-ao---- 16,00g
LV_var_lib_libvirt_images_HDD VG_epyc -wi-ao---- 7,51t
LV_var_lib_libvirt_images_SSD VG_epyc -wi-ao---- 500,00g
LV_var_lib_libvirt_images_size_fuckup VG_epyc -wi-a----- 750,00g
LV_var_log VG_epyc -wi-ao---- 5,00g
Use Cache Pool for the LV
root@epyc ~ # lvconvert --type cache --cachemode writeback --cachepool VG_epyc/LV_cache VG_epyc/LV_var_lib_libvirt_images_HDD
Do you want wipe existing metadata of cache pool VG_epyc/LV_cache? [y/n]: y
Logical volume VG_epyc/LV_var_lib_libvirt_images_HDD is now cached.
Check Cache Usage
some time later
root@epyc ~ # lvdisplay VG_epyc/LV_var_lib_libvirt_images_HDD
--- Logical volume ---
LV Path /dev/VG_epyc/LV_var_lib_libvirt_images_HDD
LV Name LV_var_lib_libvirt_images_HDD
VG Name VG_epyc
LV UUID AbOUd3-Dw2u-jdyL-D4Ff-MjrM-IEy1-qcfycf
LV Write Access read/write
LV Creation host, time epyc.internal.pcfe.net, 2018-08-31 09:44:31 +0200
LV Cache pool name LV_cache
LV Cache origin name LV_var_lib_libvirt_images_HDD_corig
LV Status available
# open 1
LV Size 7,51 TiB
Cache used blocks 2,81%
Cache metadata blocks 0,76%
Cache dirty blocks 0,00%
Cache read hits/misses 2637 / 25220
Cache wrt hits/misses 93896 / 765381
Cache demotions 0
Cache promotions 27597
Current LE 1969408
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:9