[pve-devel] [PATCH manager] add wipe_disk option when destroying ceph disk

Alwin Antreich a.antreich at proxmox.com
Tue Oct 23 16:02:44 CEST 2018


Nice, was on my list too. ;) Some comments inline.

On Tue, Oct 23, 2018 at 03:33:44PM +0200, David Limbeck wrote:
> this allows the disk to be reused as ceph disk by zeroing the first 200M
> of the destroyed disk
> 
> Signed-off-by: David Limbeck <d.limbeck at proxmox.com>
> ---
>  PVE/API2/Ceph.pm         | 22 ++++++++++++++++++++++
>  www/manager6/ceph/OSD.js | 18 +++++++++++++++++-
>  2 files changed, 39 insertions(+), 1 deletion(-)
> 
> diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm
> index 69489a70..6dce2f01 100644
> --- a/PVE/API2/Ceph.pm
> +++ b/PVE/API2/Ceph.pm
> @@ -347,6 +347,12 @@ __PACKAGE__->register_method ({
>  		optional => 1,
>  		default => 0,
>  	    },
> +	    wipe_disk => {
> +		description => 'Wipe first 200M of disk to make it reusable as a ceph OSD.',
> +		type => 'boolean',
> +		optional => 1,
> +		default => 0,
> +	    },
I suggest to not expose this as a separate option, as the 'cleanup'
should do this in one go. If I want to set the cleanup option, I
definitely want the wipe too.

>  	},
>      },
>      returns => { type => 'string' },
> @@ -434,6 +440,15 @@ __PACKAGE__->register_method ({
>  		}
>  	    }
>  
> +	    my $disks_to_wipe = {};
> +	    if ($param->{wipe_disk}) {
> +		foreach my $part (@$partitions_to_remove) {
> +		    next if !$part || (! -b $part );
> +		    my $devpath = PVE::Diskmanage::get_blockdev($part);
> +		    $disks_to_wipe->{$devpath} = 1;
> +		}
> +	    }
> +
>  	    print "Unmount OSD $osdsection from  $mountpoint\n";
>  	    eval { run_command(['/bin/umount', $mountpoint]); };
>  	    if (my $err = $@) {
> @@ -443,6 +458,13 @@ __PACKAGE__->register_method ({
>  		foreach my $part (@$partitions_to_remove) {
>  		    $remove_partition->($part);
>  		}
> +		if ($param->{wipe_disk}) {
> +		    foreach my $devpath (keys %$disks_to_wipe) {
> +			print "wipe disk: $devpath\n";
> +			eval { run_command(['/bin/dd', 'if=/dev/zero', "of=${devpath}", 'bs=1M', 'count=200']); };
The dd needs the fdatasync option and maybe additionally as input
/dev/urandom, as some disks or NVMe will not write the data out.

> +			warn $@ if $@;
> +		    }
> +		}
>  	    }
>  	};
>  
> diff --git a/www/manager6/ceph/OSD.js b/www/manager6/ceph/OSD.js
> index 8fe7e794..6c8a5aaf 100644
> --- a/www/manager6/ceph/OSD.js
> +++ b/www/manager6/ceph/OSD.js
> @@ -68,7 +68,23 @@ Ext.define('PVE.CephRemoveOsd', {
>  	    name: 'cleanup',
>  	    checked: true,
>  	    labelWidth: 130,
> -	    fieldLabel: gettext('Remove Partitions')
> +	    fieldLabel: gettext('Remove Partitions'),
> +	    handler: function(value) {
> +		var wipe_disk_checkbox = Ext.getCmp('wipe_disk_checkbox');
> +		if (value.checked) {
> +		    wipe_disk_checkbox.setDisabled(false);
> +		} else {
> +		    wipe_disk_checkbox.setDisabled(true);
> +		}
> +	    }
> +	},
> +	{
> +	    xtype: 'proxmoxcheckbox',
> +	    name: 'wipe_disk',
> +	    checked: false,
> +	    disabled: false,
> +	    fieldLabel: gettext('Wipe first 200M of disk'),
> +	    id: 'wipe_disk_checkbox'
>  	}
>      ],
>      initComponent : function() {
> -- 
> 2.11.0
> 
> 




More information about the pve-devel mailing list