[pve-devel] [PATCH] Use block storage migration for migration of KVM machines with local based storages

Alexandre DERUMIER aderumier at odiso.com
Mon Nov 3 13:24:10 CET 2014


Hi,

I didn't known about the blk=> option in qmp migrate.

Seem to be a lot easier than before.


I'll try to test it this week.


>>- enable migration caps: xbzrle and zero_blocks 

Why do you need xbzrle ? (I'm not sure It's 100% stable yet)





----- Mail original ----- 

De: "Kamil Trzcinski" <ayufan at ayufan.eu> 
À: pve-devel at pve.proxmox.com 
Envoyé: Lundi 3 Novembre 2014 12:12:57 
Objet: [pve-devel] [PATCH] Use block storage migration for migration of KVM machines with local based storages 

- allow to migrate only VMs with either local or shared storage 
- for stopped VM start it for migration 
- allocate remote storage using ssh 
- enable migration caps: xbzrle and zero_blocks 

Signed-off-by: Kamil Trzcinski <ayufan at ayufan.eu> 
--- 
PVE/QemuMigrate.pm | 77 ++++++++++++++++++++++++++++++++++++++++-------------- 
PVE/QemuServer.pm | 4 +-- 
2 files changed, 60 insertions(+), 21 deletions(-) 

diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm 
index a49cdcc..8dcc8ab 100644 
--- a/PVE/QemuMigrate.pm 
+++ b/PVE/QemuMigrate.pm 
@@ -3,6 +3,7 @@ package PVE::QemuMigrate; 
use strict; 
use warnings; 
use PVE::AbstractMigrate; 
+use JSON; 
use IO::File; 
use IPC::Open2; 
use PVE::INotify; 
@@ -141,17 +142,14 @@ sub prepare { 
if (my $pid = PVE::QemuServer::check_running($vmid)) { 
die "cant migrate running VM without --online\n" if !$online; 
$running = $pid; 
+ $self->{livemigration} = 1; 
$self->{forcemachine} = PVE::QemuServer::get_current_qemu_machine($vmid); 
- } 
- 
- if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) { 
- if ($self->{running} || !$self->{opts}->{force}) { 
- die "can't migrate VM which uses local devices\n"; 
} else { 
- $self->log('info', "migrating VM which uses local devices"); 
- } 
+ $self->log('info', "starting VM $vmid on local node to perform migration"); 
+ PVE::QemuServer::vm_start($self->{storecfg}, $vmid, undef, 1, undef, 1); 
} 

+ # do we need it? if we already do vm_start? 
# activate volumes 
my $vollist = PVE::QemuServer::get_vm_volumes($conf); 
PVE::Storage::activate_volumes($self->{storecfg}, $vollist); 
@@ -163,13 +161,14 @@ sub prepare { 
eval { $self->cmd_quiet($cmd); }; 
die "Can't connect to destination address using public key\n" if $@; 

- return $running; 
+ # always perform online migration 
+ return 1; 
} 

sub sync_disks { 
my ($self, $vmid) = @_; 

- $self->log('info', "copying disk images"); 
+ $self->log('info', "allocating disk images"); 

my $conf = $self->{vmconf}; 

@@ -238,8 +237,8 @@ sub sync_disks { 
$volhash->{$volid} = 1; 
}); 

- if ($self->{running} && !$sharedvm) { 
- die "can't do online migration - VM uses local disks\n"; 
+ if (%$volhash) { 
+ die "can't do migration - VM uses shared and local storage\n" if $sharedvm; 
} 

# do some checks first 
@@ -247,9 +246,6 @@ sub sync_disks { 
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); 
my $scfg = PVE::Storage::storage_config($self->{storecfg}, $sid); 

- die "can't migrate '$volid' - storagy type '$scfg->{type}' not supported\n" 
- if $scfg->{type} ne 'dir'; 
- 
# if file, check if a backing file exist 
if (($scfg->{type} eq 'dir') && (!$sharedvm)) { 
my (undef, undef, undef, $parent) = PVE::Storage::volume_size_info($self->{storecfg}, $volid, 1); 
@@ -259,8 +255,8 @@ sub sync_disks { 

foreach my $volid (keys %$volhash) { 
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); 
+ PVE::Storage::storage_migrate_alloc($self, $self->{storecfg}, $volid, $self->{nodeip}, $sid); 
push @{$self->{volumes}}, $volid; 
- PVE::Storage::storage_migrate($self->{storecfg}, $volid, $self->{nodeip}, $sid); 
} 
}; 
die "Failed to sync data - $@" if $@; 
@@ -286,6 +282,15 @@ sub phase1_cleanup { 

$self->log('info', "aborting phase 1 - cleanup resources"); 

+ # always stop local VM if not doing livemigration 
+ if(!$self->{livemigration}) { 
+ eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); }; 
+ if (my $err = $@) { 
+ $self->log('err', "stopping vm failed - $err"); 
+ $self->{errors} = 1; 
+ } 
+ } 
+ 
my $conf = $self->{vmconf}; 
delete $conf->{lock}; 
eval { PVE::QemuServer::update_config_nolock($vmid, $conf, 1) }; 
@@ -296,7 +301,8 @@ sub phase1_cleanup { 
if ($self->{volumes}) { 
foreach my $volid (@{$self->{volumes}}) { 
$self->log('err', "found stale volume copy '$volid' on node '$self->{node}'"); 
- # fixme: try to remove ? 
+ my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); 
+ PVE::Storage::storage_migrate_free($self, $self->{storecfg}, $volid, $self->{nodeip}, $sid); 
} 
} 
} 
@@ -359,7 +365,6 @@ sub phase2 { 

my $start = time(); 
$self->log('info', "starting online/live migration on $raddr:$rport"); 
- $self->{livemigration} = 1; 

# load_defaults 
my $defaults = PVE::QemuServer::load_defaults(); 
@@ -417,7 +422,8 @@ sub phase2 { 
} 

eval { 
- PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => "tcp:$raddr:$rport"); 
+ my $blk = $self->{volumes} ? JSON::true : JSON::false; 
+ PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => "tcp:$raddr:$rport", blk => $blk); 
}; 
my $merr = $@; 
$self->log('info', "migrate uri => tcp:$raddr:$rport failed: $merr") if $merr; 
@@ -533,6 +539,11 @@ sub phase2_cleanup { 
}; 
$self->log('info', "migrate_cancel error: $@") if $@; 

+ # stop started VM 
+ if (!$self->{livemigration}) { 
+ eval{ PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1); } 
+ } 
+ 
my $conf = $self->{vmconf}; 
delete $conf->{lock}; 
eval { PVE::QemuServer::update_config_nolock($vmid, $conf, 1) }; 
@@ -549,6 +560,14 @@ sub phase2_cleanup { 
$self->log('err', $err); 
$self->{errors} = 1; 
} 
+ 
+ if ($self->{volumes}) { 
+ foreach my $volid (@{$self->{volumes}}) { 
+ $self->log('err', "found stale volume copy '$volid' on node '$self->{node}'"); 
+ my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); 
+ PVE::Storage::storage_migrate_free($self, $self->{storecfg}, $volid, $self->{nodeip}, $sid); 
+ } 
+ } 
} 

sub phase3 { 
@@ -557,6 +576,13 @@ sub phase3 { 
my $volids = $self->{volumes}; 
return if $self->{phase2errors}; 

+ # stop local VM so we can destroy local copies 
+ eval { PVE::QemuServer::vm_stop($self->{storecfg}, $vmid, 1, 1); }; 
+ if (my $err = $@) { 
+ $self->log('err', "stopping vm failed - $err"); 
+ $self->{errors} = 1; 
+ } 
+ 
# destroy local copies 
foreach my $volid (@$volids) { 
eval { PVE::Storage::vdisk_free($self->{storecfg}, $volid); }; 
@@ -594,12 +620,25 @@ sub phase3_cleanup { 
$self->log('err', $err); 
$self->{errors} = 1; 
} 
+ } else { 
+ # now that config file is move, we can stop vm on target if not doing livemigrate 
+ my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock']; 
+ eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, 
+ errfunc => sub { 
+ my $line = shift; 
+ $self->log('err', $line); 
+ }); 
+ }; 
+ if (my $err = $@) { 
+ $self->log('err', $err); 
+ $self->{errors} = 1; 
+ } 
} 

eval { 

my $timer = 0; 
- if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{running}) { 
+ if (PVE::QemuServer::vga_conf_has_spice($conf->{vga}) && $self->{livemigration}) { 
$self->log('info', "Waiting for spice server migration"); 
while (1) { 
my $res = PVE::QemuServer::vm_mon_cmd_nocheck($vmid, 'query-spice'); 
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm 
index 98264d1..da4c2b7 100644 
--- a/PVE/QemuServer.pm 
+++ b/PVE/QemuServer.pm 
@@ -3330,9 +3330,9 @@ sub set_migration_caps { 

my $enabled_cap = { 
"auto-converge" => 1, 
- "xbzrle" => 0, 
+ "xbzrle" => 1, 
"x-rdma-pin-all" => 0, 
- "zero-blocks" => 0, 
+ "zero-blocks" => 1, 
}; 

my $supported_capabilities = vm_mon_cmd_nocheck($vmid, "query-migrate-capabilities"); 
-- 
1.9.3 (Apple Git-50) 

_______________________________________________ 
pve-devel mailing list 
pve-devel at pve.proxmox.com 
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel 



More information about the pve-devel mailing list