[pve-devel] [RFC v2 pve-ha-manager 3/3] fixing typos, also whitespace cleanup in PVE2 env class

Thomas Lamprecht t.lamprecht at proxmox.com
Tue Sep 15 14:18:52 CEST 2015


Signed-off-by: Thomas Lamprecht <t.lamprecht at proxmox.com>
---
 README                     |  6 +++---
 src/PVE/HA/Env.pm          |  4 ++--
 src/PVE/HA/Env/PVE2.pm     | 49 +++++++++++++++++++++++-----------------------
 src/PVE/HA/Manager.pm      |  2 +-
 src/PVE/HA/Sim/Hardware.pm |  2 +-
 src/PVE/HA/Tools.pm        |  2 +-
 6 files changed, 32 insertions(+), 33 deletions(-)

diff --git a/README b/README
index 93e67cb..5f1a1d5 100644
--- a/README
+++ b/README
@@ -41,7 +41,7 @@ The Proxmox 'pmxcfs' implements this on top of corosync.
 === Watchdog ===
 
 We need a reliable watchdog mechanism, which is able to provide hard
-timeouts. It must be guaranteed that the node reboot withing specified
+timeouts. It must be guaranteed that the node reboots within the specified
 timeout if we do not update the watchdog. For me it looks that neither
 systemd nor the standard watchdog(8) daemon provides such guarantees.
 
@@ -52,7 +52,7 @@ provides that service to several other daemons using a local socket.
 
 == Self fencing ==
 
-A node needs to aquire a special 'ha_agent_${node}_lock' (one separate
+A node needs to acquire a special 'ha_agent_${node}_lock' (one separate
 lock for each node) before starting HA resources, and the node updates
 the watchdog device once it get that lock. If the node loose quorum,
 or is unable to get the 'ha_agent_${node}_lock', the watchdog is no
@@ -63,7 +63,7 @@ This makes sure that the node holds the 'ha_agent_${node}_lock' as
 long as there are running services on that node.
 
 The HA manger can assume that the watchdog triggered a reboot when he
-is able to aquire the 'ha_agent_${node}_lock' for that node.
+is able to acquire the 'ha_agent_${node}_lock' for that node.
 
 === Problems with "two_node" Clusters ===
 
diff --git a/src/PVE/HA/Env.pm b/src/PVE/HA/Env.pm
index d05044d..5c7a544 100644
--- a/src/PVE/HA/Env.pm
+++ b/src/PVE/HA/Env.pm
@@ -105,14 +105,14 @@ sub log {
     return $self->{plug}->log($level, @args);
 }
 
-# aquire a cluster wide manager lock 
+# acquire a cluster wide manager lock
 sub get_ha_manager_lock {
     my ($self) = @_;
 
     return $self->{plug}->get_ha_manager_lock();
 }
 
-# aquire a cluster wide node agent lock 
+# acquire a cluster wide node agent lock
 sub get_ha_agent_lock {
     my ($self, $node) = @_;
 
diff --git a/src/PVE/HA/Env/PVE2.pm b/src/PVE/HA/Env/PVE2.pm
index 22cedca..d053dcc 100644
--- a/src/PVE/HA/Env/PVE2.pm
+++ b/src/PVE/HA/Env/PVE2.pm
@@ -47,7 +47,7 @@ sub read_manager_status {
 
 sub write_manager_status {
     my ($self, $status_obj) = @_;
-    
+
     PVE::HA::Config::write_manager_status($status_obj);
 }
 
@@ -63,7 +63,7 @@ sub write_lrm_status {
     my ($self, $status_obj) = @_;
 
     my $node = $self->{nodename};
-    
+
     PVE::HA::Config::write_lrm_status($node, $status_obj);
 }
 
@@ -81,7 +81,7 @@ sub read_crm_commands {
 
 sub service_config_exists {
     my ($self) = @_;
-    
+
     return PVE::HA::Config::resources_config_exists();
 }
 
@@ -89,7 +89,7 @@ sub read_service_config {
     my ($self) = @_;
 
     my $res = PVE::HA::Config::read_resources_config();
-    
+
     my $vmlist = PVE::Cluster::get_vmlist();
     my $conf = {};
 
@@ -116,7 +116,7 @@ sub read_service_config {
 	    }
 	}
     }
-    
+
     return $conf;
 }
 
@@ -147,7 +147,7 @@ sub get_node_info {
     my ($self) = @_;
 
     my ($node_info, $quorate) = ({}, 0);
-   
+
     my $nodename = $self->{nodename};
 
     $quorate = PVE::Cluster::check_cfs_quorum(1) || 0;
@@ -156,11 +156,11 @@ sub get_node_info {
 
     foreach my $node (keys %$members) {
 	my $d = $members->{$node};
-	$node_info->{$node}->{online} = $d->{online}; 
+	$node_info->{$node}->{online} = $d->{online};
     }
-	
+
     $node_info->{$nodename}->{online} = 1; # local node is always up
-    
+
     return ($node_info, $quorate);
 }
 
@@ -187,7 +187,7 @@ sub get_pve_lock {
 
     my $retry = 0;
     my $retry_timeout = 100; # fixme: what timeout
-    
+
     eval {
 
 	mkdir $lockdir;
@@ -219,15 +219,15 @@ sub get_pve_lock {
 	# $self->log('err', $err) if $err; # for debugging
 	return 0;
     }
-    
+
     $last_lock_status->{$lockid} = $got_lock ? $ctime : 0;
 
     if (!!$got_lock != !!$last) {
 	if ($got_lock) {
-	    $self->log('info', "successfully aquired lock '$lockid'");
+	    $self->log('info', "successfully acquired lock '$lockid'");
 	} else {
 	    my $msg = "lost lock '$lockid";
-	    $msg .= " - $err" if $err; 
+	    $msg .= " - $err" if $err;
 	    $self->log('err', $msg);
 	}
     } else {
@@ -245,7 +245,7 @@ sub get_ha_manager_lock {
 
 sub get_ha_agent_lock {
     my ($self, $node) = @_;
-    
+
     $node = $self->nodename() if !defined($node);
 
     return $self->get_pve_lock("ha_agent_${node}_lock");
@@ -255,10 +255,10 @@ sub quorate {
     my ($self) = @_;
 
     my $quorate = 0;
-    eval { 
-	$quorate = PVE::Cluster::check_cfs_quorum(); 
+    eval {
+	$quorate = PVE::Cluster::check_cfs_quorum();
     };
-   
+
     return $quorate;
 }
 
@@ -290,7 +290,7 @@ sub loop_start_hook {
     my ($self) = @_;
 
     PVE::Cluster::cfs_update();
-    
+
     $self->{loop_start} = $self->get_time();
 }
 
@@ -298,7 +298,7 @@ sub loop_end_hook {
     my ($self) = @_;
 
     my $delay = $self->get_time() - $self->{loop_start};
- 
+
     warn "loop take too long ($delay seconds)\n" if $delay > 30;
 }
 
@@ -313,7 +313,7 @@ sub watchdog_open {
 	Type => SOCK_STREAM(),
 	Peer => "/run/watchdog-mux.sock") ||
 	die "unable to open watchdog socket - $!\n";
-      
+
     $self->log('info', "watchdog active");
 }
 
@@ -367,15 +367,15 @@ sub exec_resource_agent {
     my ($self, $sid, $service_config, $cmd, @params) = @_;
 
     # setup execution environment
-    
+
     $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
 
     PVE::INotify::inotify_close();
-    
+
     PVE::INotify::inotify_init();
 
     PVE::Cluster::cfs_update();
- 
+
     my $nodename = $self->{nodename};
 
     # fixme: return valid_exit code (instead of using die) ?
@@ -473,8 +473,7 @@ sub exec_resource_agent {
 
     } elsif ($cmd eq 'error') {
 
-
-	if($running) {
+	if ($running) {
 	    $self->log("err", "service $sid is in an error state while running");
 	} else {
 	    $self->log("warning", "service $sid is not running and in an error state");
diff --git a/src/PVE/HA/Manager.pm b/src/PVE/HA/Manager.pm
index ca20c1b..129140f 100644
--- a/src/PVE/HA/Manager.pm
+++ b/src/PVE/HA/Manager.pm
@@ -379,7 +379,7 @@ sub manage {
 
 	    next if !$fenced_nodes->{$sd->{node}};
 
-	    # node fence was sucessful - mark service as stopped
+	    # node fence was successful - mark service as stopped
 	    &$change_service_state($self, $sid, 'stopped');	    
 	}
 
diff --git a/src/PVE/HA/Sim/Hardware.pm b/src/PVE/HA/Sim/Hardware.pm
index 6597a93..a77668f 100644
--- a/src/PVE/HA/Sim/Hardware.pm
+++ b/src/PVE/HA/Sim/Hardware.pm
@@ -315,7 +315,7 @@ sub global_lock {
 	}
 	if (!$success) {
 	    close($fh);
-	    die "can't aquire lock '$lockfile' - $!\n";
+	    die "can't acquire lock '$lockfile' - $!\n";
 	}
     }
 
diff --git a/src/PVE/HA/Tools.pm b/src/PVE/HA/Tools.pm
index 896b7ab..1d1040c 100644
--- a/src/PVE/HA/Tools.pm
+++ b/src/PVE/HA/Tools.pm
@@ -53,7 +53,7 @@ sub pve_verify_ha_group_node {
 }
 
 PVE::JSONSchema::register_standard_option('pve-ha-group-node-list', {
-    description => "List of cluster node names with optional priority. We use priority '0' as default. The CRM tries to run services on the node with higest priority (also see option 'nofailback').",
+    description => "List of cluster node names with optional priority. We use priority '0' as default. The CRM tries to run services on the node with highest priority (also see option 'nofailback').",
     type => 'string', format => 'pve-ha-group-node-list',
     typetext => '<node>[:<pri>]{,<node>[:<pri>]}*',
 });
-- 
2.1.4




More information about the pve-devel mailing list