[PVE-User] LVM autoactivation failed with multipath over iSCSI

nada nada at verdnatura.es
Tue Jan 14 13:47:11 CET 2020


good day
a week ago I upgraded our proxmox 'cluster' to buster
(it has just 2 nodes and both nodes are upgraded)
MANY thanks for your guidelines at proxmox wiki !!! it works great :-)

now i am testing multipath over iSCSI to SAN storage
it works good, snapshots, volume resizing and migrations are functional
but after reboot
the LVMThin VG and relevant LVs are NOT autoactivated
what am i missing ?

temporally i created simple rc-local service/script to activate VG
PLS can anybody write me how it should be done ?
following are some details
thank you
Nada

root at mox:~# pveversion
pve-manager/6.1-5/9bf06119 (running kernel: 5.3.13-1-pve)

root at mox:~# grep pve /etc/lvm/lvm.conf
	# Also do not scan LVM disks from guests on both VGs named & not named 
'pve'
	global_filter = [ "r|/dev/zd.*|", "r|/dev/mapper/pve-.*|", 
"r|/dev/mapper/pve-(vm|base)--[0-9]+--disk--[0-9]+|", 
"a|/dev/mapper/3600.*|", "a|/dev/mapper/san.*|" ]

root at mox:~# multipath -ll
3600c0ff000195f8e2172de5d01000000 dm-8 HP,P2000 G3 iSCSI
size=23G features='1 queue_if_no_path' hwhandler='1 alua' wp=rw
|-+- policy='round-robin 0' prio=50 status=active
| |- 9:0:0:5  sdn 8:208 active ready running
| |- 8:0:0:5  sdh 8:112 active ready running
| |- 11:0:0:5 sdo 8:224 active ready running
| `- 7:0:0:5  sdm 8:192 active ready running
`-+- policy='round-robin 0' prio=10 status=enabled
   |- 10:0:0:5 sde 8:64  active ready running
   |- 5:0:0:5  sdj 8:144 active ready running
   `- 6:0:0:5  sdk 8:160 active ready running

root at mox:~# la /dev/mapper/
total 0
drwxr-xr-x  2 root root     340 Jan 13 19:09 .
drwxr-xr-x 20 root root    5820 Jan 13 19:31 ..
lrwxrwxrwx  1 root root       7 Jan 13 19:09 
3600c0ff000195f8e2172de5d01000000 -> ../dm-8
crw-------  1 root root 10, 236 Jan 13 19:08 control
lrwxrwxrwx  1 root root       7 Jan 13 19:08 pve-data -> ../dm-5
lrwxrwxrwx  1 root root       7 Jan 13 19:08 pve-data_tdata -> ../dm-3
lrwxrwxrwx  1 root root       7 Jan 13 19:08 pve-data_tmeta -> ../dm-2
lrwxrwxrwx  1 root root       7 Jan 13 19:08 pve-data-tpool -> ../dm-4
lrwxrwxrwx  1 root root       7 Jan 13 19:08 pve-root -> ../dm-1
lrwxrwxrwx  1 root root       7 Jan 13 19:08 pve-swap -> ../dm-0
lrwxrwxrwx  1 root root       7 Jan 13 19:08 pve-vm--104--disk--1 -> 
../dm-7
lrwxrwxrwx  1 root root       7 Jan 13 19:08 pve-zfs -> ../dm-6
lrwxrwxrwx  1 root root       8 Jan 13 19:09 santest-santestpool -> 
../dm-12
lrwxrwxrwx  1 root root       8 Jan 13 19:09 santest-santestpool_tdata 
-> ../dm-10
lrwxrwxrwx  1 root root       7 Jan 13 19:09 santest-santestpool_tmeta 
-> ../dm-9
lrwxrwxrwx  1 root root       8 Jan 13 19:09 santest-santestpool-tpool 
-> ../dm-11
lrwxrwxrwx  1 root root       8 Jan 13 19:09 santest-vm--901--disk--0 -> 
../dm-13

root at mox:~# pvs -a
   PV                                            VG      Fmt  Attr PSize  
   PFree
   /dev/mapper/3600c0ff000195f8e2172de5d01000000 santest lvm2 a--    
23.28g  3.24g
   /dev/mapper/santest-vm--901--disk--0                       ---        
0      0
   /dev/sda2                                                  ---        
0      0
   /dev/sda3                                     pve     lvm2 a--  
<232.57g <1.86g
   /dev/sdb1                                                  ---        
0      0
   /dev/sdb9                                                  ---        
0      0
   /dev/sdc1                                                  ---        
0      0
   /dev/sdc9                                                  ---        
0      0
   /dev/sdd1                                                  ---        
0      0
   /dev/sdd9                                                  ---        
0      0
   /dev/sdf1                                                  ---        
0      0
   /dev/sdf9                                                  ---        
0      0
   /dev/sdg1                                                  ---        
0      0
   /dev/sdg9                                                  ---        
0      0
   /dev/sdi1                                                  ---        
0      0
   /dev/sdi9                                                  ---        
0      0
   /dev/sdl1                                                  ---        
0      0
   /dev/sdl9                                                  ---        
0      0

root at mox:~# lvs -a
   LV                  VG      Attr       LSize   Pool        Origin 
Data%  Meta%  Move Log Cpy%Sync Convert
   data                pve     twi-aotz-- 138.57g                    0.59 
   10.69
   [data_tdata]        pve     Twi-ao---- 138.57g
   [data_tmeta]        pve     ewi-ao----  72.00m
   [lvol0_pmspare]     pve     ewi-------  72.00m
   root                pve     -wi-ao----  58.00g
   swap                pve     -wi-ao----   4.00g
   vm-104-disk-1       pve     Vwi-a-tz--  50.00g data               1.62
   zfs                 pve     -wi-ao----  30.00g
   [lvol0_pmspare]     santest ewi-------  20.00m
   santestpool         santest twi-aotz--  20.00g                    3.40 
   12.21
   [santestpool_tdata] santest Twi-ao----  20.00g
   [santestpool_tmeta] santest ewi-ao----  20.00m
   vm-901-disk-0       santest Vwi-aotz--   2.50g santestpool        
27.23

root at mox:~# grep santest /var/log/syslog.1 |tail
Jan 13 19:02:26 mox lvm[2005]:   santest: autoactivation failed.
Jan 13 19:04:43 mox lvm[441]: Monitoring thin pool 
santest-santestpool-tpool.
Jan 13 19:06:14 mox lvm[441]: No longer monitoring thin pool 
santest-santestpool-tpool.
Jan 13 19:06:14 mox blkdeactivate[12609]:   [LVM]: deactivating Volume 
Group santest... done
Jan 13 19:09:02 mox lvm[2003]:   Cannot activate LVs in VG santest while 
PVs appear on duplicate devices.
Jan 13 19:09:02 mox lvm[2003]:   Cannot activate LVs in VG santest while 
PVs appear on duplicate devices.
Jan 13 19:09:02 mox lvm[2003]:   0 logical volume(s) in volume group 
"santest" now active
Jan 13 19:09:02 mox lvm[2003]:   santest: autoactivation failed.
Jan 13 19:09:11 mox lvm[442]: Monitoring thin pool 
santest-santestpool-tpool.
Jan 13 19:09:12 mox rc.local[1767]:   2 logical volume(s) in volume 
group "santest" now active

root at mox:~# pvesm status
Name               Type     Status           Total            Used       
Available        %
backup              dir     active        59600812        28874144       
  27669416   48.45%
local               dir     active        59600812        28874144       
  27669416   48.45%
local-lvm       lvmthin     active       145301504          857278       
144444225    0.59%
santestpool     lvmthin     active        20971520          713031       
  20258488    3.40%
zfs             zfspool     active        30219964        13394036       
  16825928   44.32%

root at mox:~# cat /lib/systemd/system/rc-local.service
[Unit]
Description=/etc/rc.local Compatibility
Documentation=man:systemd-rc-local-generator(8)
ConditionFileIsExecutable=/etc/rc.local
After=network.target iscsid.service multipathd.service 
open-iscsi.service

[Service]
Type=forking
ExecStart=/etc/rc.local
TimeoutSec=0
RemainAfterExit=yes
GuessMainPID=no

[Install]
WantedBy=multi-user.target


root at mox:~# cat /etc/rc.local
#!/bin/bash
# just to activate VGs from SAN
/bin/sleep 10
/sbin/vgchange -aly santest



More information about the pve-user mailing list