[pve-devel] [PATCH ha-manager v3 2/3] Manager: record tried node on relocation policy

Thomas Lamprecht t.lamprecht at proxmox.com
Fri Jun 17 17:11:15 CEST 2016


Instead of counting up an integer on each failed start trial, record
the already tried nodes. We can then use the size of the trial
record array as 'try count' and achieve so the same behaviour as with
the 'relocate_trial' hash earlier.

Log the tried nodes after the service started or if it could not be
started at all, so an admin can follow the behaviour and investigate
the reason of the failure on a specific node.

This prepares us also for a more intelligent recovery node selection,
as we can skip already tried nodes from the current recovery cycle.

Signed-off-by: Thomas Lamprecht <t.lamprecht at proxmox.com>
---

changes since v2:
* ensure that tried_nodes hash gets also cleaned up when service next state is
  error, here we wait until the user disables the service so that the information
  stays in the manager status until then
* no extra deletion loop for stale services, just delete the tried node entry of
  a service at the time it gets deleted from the service status hash, that no
  deprecated hash entry stays in ms is now ensured by the new cleanup patch
* ensure that we always write in the tried nodes hash saved by $ms

 src/PVE/HA/Manager.pm                      | 36 +++++++++++++++++++-----------
 src/test/test-resource-failure2/log.expect |  1 +
 src/test/test-resource-failure5/log.expect |  2 +-
 3 files changed, 25 insertions(+), 14 deletions(-)

diff --git a/src/PVE/HA/Manager.pm b/src/PVE/HA/Manager.pm
index 017e4b2..c9e53a0 100644
--- a/src/PVE/HA/Manager.pm
+++ b/src/PVE/HA/Manager.pm
@@ -364,12 +364,9 @@ sub manage {
     foreach my $sid (keys %$ss) {
 	next if $sc->{$sid};
 	$haenv->log('info', "removing stale service '$sid' (no config)");
+	# remove all service related state information
 	delete $ss->{$sid};
-    }
-
-    # remove stale relocation try entries
-    foreach my $sid (keys %{$ms->{relocate_trial}}) {
-	delete $ms->{relocate_trial}->{$sid} if !$ss->{$sid};
+	delete $ms->{relocate_tried_nodes}->{$sid};
     }
 
     $self->update_crm_commands();
@@ -608,31 +605,40 @@ sub next_state_started {
 	} else {
 
 	    my $try_next = 0;
+
+	    $master_status->{relocate_tried_nodes}->{$sid} = $master_status->{relocate_tried_nodes}->{$sid} || [];
+	    my $tried_nodes = $master_status->{relocate_tried_nodes}->{$sid};
+
 	    if ($lrm_res) {
+		# add current node always to tried node list
+		push @$tried_nodes, $sd->{node};
+
 		my $ec = $lrm_res->{exit_code};
 		if ($ec == SUCCESS) {
 
-		    $master_status->{relocate_trial}->{$sid} = 0;
+		    if (scalar(@$tried_nodes) > 1) {
+			$haenv->log('info', "relocation policy successful for '$sid'," .
+				    " tried nodes: " . join(', ', @$tried_nodes) );
+		    }
+
+		    delete $master_status->{relocate_tried_nodes}->{$sid};
 
 		} elsif ($ec == ERROR) {
 		    # apply our relocate policy if we got ERROR from the LRM
 
-		    my $try = $master_status->{relocate_trial}->{$sid} || 0;
-
-		    if ($try < $cd->{max_relocate}) {
+		    if (scalar(@$tried_nodes) <= $cd->{max_relocate}) {
 
-			$try++;
 			# tell select_service_node to relocate if possible
 			$try_next = 1;
 
 			$haenv->log('warning', "starting service $sid on node".
 				   " '$sd->{node}' failed, relocating service.");
-			$master_status->{relocate_trial}->{$sid} = $try;
 
 		    } else {
 
-			$haenv->log('err', "recovery policy for service".
-				   " $sid failed, entering error state!");
+			$haenv->log('err', "recovery policy for service $sid " .
+			            "failed, entering error state. Tried nodes: ".
+			            join(', ', @$tried_nodes));
 			&$change_service_state($self, $sid, 'error');
 			return;
 
@@ -673,8 +679,12 @@ sub next_state_error {
     my ($self, $sid, $cd, $sd, $lrm_res) = @_;
 
     my $ns = $self->{ns};
+    my $ms = $self->{ms};
 
     if ($cd->{state} eq 'disabled') {
+	# clean up on error recovery
+	delete $ms->{relocate_tried_nodes}->{$sid};
+
 	&$change_service_state($self, $sid, 'stopped');
 	return;
     }
diff --git a/src/test/test-resource-failure2/log.expect b/src/test/test-resource-failure2/log.expect
index 604ad95..aa34e35 100644
--- a/src/test/test-resource-failure2/log.expect
+++ b/src/test/test-resource-failure2/log.expect
@@ -41,4 +41,5 @@ info    201    node1/lrm: got lock 'ha_agent_node1_lock'
 info    201    node1/lrm: status change wait_for_agent_lock => active
 info    201    node1/lrm: starting service fa:130
 info    201    node1/lrm: service status fa:130 started
+info    220    node1/crm: relocation policy successful for 'fa:130', tried nodes: node2, node1
 info    720     hardware: exit simulation - done
diff --git a/src/test/test-resource-failure5/log.expect b/src/test/test-resource-failure5/log.expect
index eb87f9f..a15603e 100644
--- a/src/test/test-resource-failure5/log.expect
+++ b/src/test/test-resource-failure5/log.expect
@@ -28,7 +28,7 @@ warn    123    node2/lrm: restart policy: retry number 1 for service 'fa:130'
 info    143    node2/lrm: starting service fa:130
 warn    143    node2/lrm: unable to start service fa:130
 err     143    node2/lrm: unable to start service fa:130 on local node after 1 retries
-err     160    node1/crm: recovery policy for service fa:130 failed, entering error state!
+err     160    node1/crm: recovery policy for service fa:130 failed, entering error state. Tried nodes: node2
 info    160    node1/crm: service 'fa:130': state changed from 'started' to 'error'
 err     163    node2/lrm: service fa:130 is in an error state and needs manual intervention. Look up 'ERROR RECOVERY' in the documentation.
 info    220      cmdlist: execute service fa:130 disabled
-- 
2.1.4




More information about the pve-devel mailing list