mirror of
https://github.com/valitydev/salt.git
synced 2024-11-08 09:23:56 +00:00
Merge pull request #43751 from alexbleotu/esxi_vsan_states-gh
ESXi diskgroup and host cache creation/configuration + dependencies
This commit is contained in:
commit
8ddf2e9442
219
salt/config/schemas/esxi.py
Normal file
219
salt/config/schemas/esxi.py
Normal file
@ -0,0 +1,219 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Alexandru Bleotu (alexandru.bleotu@morganstanley.com)`
|
||||
|
||||
|
||||
salt.config.schemas.esxi
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
ESXi host configuration schemas
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt libs
|
||||
from salt.utils.schema import (DefinitionsSchema,
|
||||
Schema,
|
||||
ComplexSchemaItem,
|
||||
ArrayItem,
|
||||
IntegerItem,
|
||||
BooleanItem,
|
||||
StringItem,
|
||||
OneOfItem)
|
||||
|
||||
|
||||
class VMwareScsiAddressItem(StringItem):
|
||||
pattern = r'vmhba\d+:C\d+:T\d+:L\d+'
|
||||
|
||||
|
||||
class DiskGroupDiskScsiAddressItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of a ESXi host disk group containing disk SCSI addresses
|
||||
'''
|
||||
|
||||
title = 'Diskgroup Disk Scsi Address Item'
|
||||
description = 'ESXi host diskgroup item containing disk SCSI addresses'
|
||||
|
||||
cache_scsi_addr = VMwareScsiAddressItem(
|
||||
title='Cache Disk Scsi Address',
|
||||
description='Specifies the SCSI address of the cache disk',
|
||||
required=True)
|
||||
|
||||
capacity_scsi_addrs = ArrayItem(
|
||||
title='Capacity Scsi Addresses',
|
||||
description='Array with the SCSI addresses of the capacity disks',
|
||||
items=VMwareScsiAddressItem(),
|
||||
min_items=1)
|
||||
|
||||
|
||||
class DiskGroupDiskIdItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of a ESXi host disk group containg disk ids
|
||||
'''
|
||||
|
||||
title = 'Diskgroup Disk Id Item'
|
||||
description = 'ESXi host diskgroup item containing disk ids'
|
||||
|
||||
cache_id = StringItem(
|
||||
title='Cache Disk Id',
|
||||
description='Specifies the id of the cache disk',
|
||||
pattern=r'[^\s]+')
|
||||
|
||||
capacity_ids = ArrayItem(
|
||||
title='Capacity Disk Ids',
|
||||
description='Array with the ids of the capacity disks',
|
||||
items=StringItem(pattern=r'[^\s]+'),
|
||||
min_items=1)
|
||||
|
||||
|
||||
class DiskGroupsDiskScsiAddressSchema(DefinitionsSchema):
|
||||
'''
|
||||
Schema of ESXi host diskgroups containing disk SCSI addresses
|
||||
'''
|
||||
|
||||
title = 'Diskgroups Disk Scsi Address Schema'
|
||||
description = 'ESXi host diskgroup schema containing disk SCSI addresses'
|
||||
diskgroups = ArrayItem(
|
||||
title='Diskgroups',
|
||||
description='List of diskgroups in an ESXi host',
|
||||
min_items=1,
|
||||
items=DiskGroupDiskScsiAddressItem(),
|
||||
required=True)
|
||||
erase_disks = BooleanItem(
|
||||
title='Erase Diskgroup Disks',
|
||||
required=True)
|
||||
|
||||
|
||||
class DiskGroupsDiskIdSchema(DefinitionsSchema):
|
||||
'''
|
||||
Schema of ESXi host diskgroups containing disk ids
|
||||
'''
|
||||
|
||||
title = 'Diskgroups Disk Id Schema'
|
||||
description = 'ESXi host diskgroup schema containing disk ids'
|
||||
diskgroups = ArrayItem(
|
||||
title='DiskGroups',
|
||||
description='List of disk groups in an ESXi host',
|
||||
min_items=1,
|
||||
items=DiskGroupDiskIdItem(),
|
||||
required=True)
|
||||
|
||||
|
||||
class VmfsDatastoreDiskIdItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of a VMFS datastore referencing a backing disk id
|
||||
'''
|
||||
|
||||
title = 'VMFS Datastore Disk Id Item'
|
||||
description = 'VMFS datastore item referencing a backing disk id'
|
||||
name = StringItem(
|
||||
title='Name',
|
||||
description='Specifies the name of the VMFS datastore',
|
||||
required=True)
|
||||
backing_disk_id = StringItem(
|
||||
title='Backing Disk Id',
|
||||
description=('Specifies the id of the disk backing the VMFS '
|
||||
'datastore'),
|
||||
pattern=r'[^\s]+',
|
||||
required=True)
|
||||
vmfs_version = IntegerItem(
|
||||
title='VMFS Version',
|
||||
description='VMFS version',
|
||||
enum=[1, 2, 3, 5])
|
||||
|
||||
|
||||
class VmfsDatastoreDiskScsiAddressItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of a VMFS datastore referencing a backing disk SCSI address
|
||||
'''
|
||||
|
||||
title = 'VMFS Datastore Disk Scsi Address Item'
|
||||
description = 'VMFS datastore item referencing a backing disk SCSI address'
|
||||
name = StringItem(
|
||||
title='Name',
|
||||
description='Specifies the name of the VMFS datastore',
|
||||
required=True)
|
||||
backing_disk_scsi_addr = VMwareScsiAddressItem(
|
||||
title='Backing Disk Scsi Address',
|
||||
description=('Specifies the SCSI address of the disk backing the VMFS '
|
||||
'datastore'),
|
||||
required=True)
|
||||
vmfs_version = IntegerItem(
|
||||
title='VMFS Version',
|
||||
description='VMFS version',
|
||||
enum=[1, 2, 3, 5])
|
||||
|
||||
|
||||
class VmfsDatastoreSchema(DefinitionsSchema):
|
||||
'''
|
||||
Schema of a VMFS datastore
|
||||
'''
|
||||
|
||||
title = 'VMFS Datastore Schema'
|
||||
description = 'Schema of a VMFS datastore'
|
||||
datastore = OneOfItem(
|
||||
items=[VmfsDatastoreDiskScsiAddressItem(),
|
||||
VmfsDatastoreDiskIdItem()],
|
||||
required=True)
|
||||
|
||||
|
||||
class HostCacheSchema(DefinitionsSchema):
|
||||
'''
|
||||
Schema of ESXi host cache
|
||||
'''
|
||||
|
||||
title = 'Host Cache Schema'
|
||||
description = 'Schema of the ESXi host cache'
|
||||
enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
required=True)
|
||||
datastore = VmfsDatastoreDiskScsiAddressItem(required=True)
|
||||
swap_size = StringItem(
|
||||
title='Host cache swap size (in GB or %)',
|
||||
pattern=r'(\d+GiB)|(([0-9]|([1-9][0-9])|100)%)',
|
||||
required=True)
|
||||
erase_backing_disk = BooleanItem(
|
||||
title='Erase Backup Disk',
|
||||
required=True)
|
||||
|
||||
|
||||
class SimpleHostCacheSchema(Schema):
|
||||
'''
|
||||
Simplified Schema of ESXi host cache
|
||||
'''
|
||||
|
||||
title = 'Simple Host Cache Schema'
|
||||
description = 'Simplified schema of the ESXi host cache'
|
||||
enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
required=True)
|
||||
datastore_name = StringItem(title='Datastore Name',
|
||||
required=True)
|
||||
swap_size_MiB = IntegerItem(title='Host cache swap size in MiB',
|
||||
minimum=1)
|
||||
|
||||
|
||||
class EsxiProxySchema(Schema):
|
||||
'''
|
||||
Schema of the esxi proxy input
|
||||
'''
|
||||
|
||||
title = 'Esxi Proxy Schema'
|
||||
description = 'Esxi proxy schema'
|
||||
additional_properties = False
|
||||
proxytype = StringItem(required=True,
|
||||
enum=['esxi'])
|
||||
host = StringItem(pattern=r'[^\s]+') # Used when connecting directly
|
||||
vcenter = StringItem(pattern=r'[^\s]+') # Used when connecting via a vCenter
|
||||
esxi_host = StringItem()
|
||||
username = StringItem()
|
||||
passwords = ArrayItem(min_items=1,
|
||||
items=StringItem(),
|
||||
unique_items=True)
|
||||
mechanism = StringItem(enum=['userpass', 'sspi'])
|
||||
# TODO Should be changed when anyOf is supported for schemas
|
||||
domain = StringItem()
|
||||
principal = StringItem()
|
||||
protocol = StringItem()
|
||||
port = IntegerItem(minimum=1)
|
@ -442,6 +442,18 @@ class VMwareObjectRetrievalError(VMwareSaltError):
|
||||
'''
|
||||
|
||||
|
||||
class VMwareObjectExistsError(VMwareSaltError):
|
||||
'''
|
||||
Used when a VMware object exists
|
||||
'''
|
||||
|
||||
|
||||
class VMwareObjectNotFoundError(VMwareSaltError):
|
||||
'''
|
||||
Used when a VMware object was not found
|
||||
'''
|
||||
|
||||
|
||||
class VMwareApiError(VMwareSaltError):
|
||||
'''
|
||||
Used when representing a generic VMware API error
|
||||
|
@ -56,3 +56,7 @@ def cmd(command, *args, **kwargs):
|
||||
proxy_cmd = proxy_prefix + '.ch_config'
|
||||
|
||||
return __proxy__[proxy_cmd](command, *args, **kwargs)
|
||||
|
||||
|
||||
def get_details():
|
||||
return __proxy__['esxi.get_details']()
|
||||
|
@ -180,11 +180,15 @@ import salt.utils.vsan
|
||||
import salt.utils.pbm
|
||||
from salt.exceptions import CommandExecutionError, VMwareSaltError, \
|
||||
ArgumentValueError, InvalidConfigError, VMwareObjectRetrievalError, \
|
||||
VMwareApiError, InvalidEntityError
|
||||
VMwareApiError, InvalidEntityError, VMwareObjectExistsError
|
||||
from salt.utils.decorators import depends, ignores_kwargs
|
||||
from salt.config.schemas.esxcluster import ESXClusterConfigSchema, \
|
||||
ESXClusterEntitySchema
|
||||
from salt.config.schemas.vcenter import VCenterEntitySchema
|
||||
from salt.config.schemas.esxi import DiskGroupsDiskIdSchema, \
|
||||
VmfsDatastoreSchema, SimpleHostCacheSchema
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Import Third Party Libs
|
||||
try:
|
||||
@ -195,6 +199,14 @@ except ImportError:
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, vmodl, pbm, VmomiSupport
|
||||
|
||||
# We check the supported vim versions to infer the pyVmomi version
|
||||
if 'vim25/6.0' in VmomiSupport.versionMap and \
|
||||
sys.version_info > (2, 7) and sys.version_info < (2, 7, 9):
|
||||
|
||||
log.error('pyVmomi not loaded: Incompatible versions '
|
||||
'of Python. See Issue #29537.')
|
||||
raise ImportError()
|
||||
HAS_PYVMOMI = True
|
||||
except ImportError:
|
||||
HAS_PYVMOMI = False
|
||||
@ -205,24 +217,11 @@ if esx_cli:
|
||||
else:
|
||||
HAS_ESX_CLI = False
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'vsphere'
|
||||
__proxyenabled__ = ['esxi', 'esxcluster', 'esxdatacenter', 'vcenter']
|
||||
|
||||
|
||||
def __virtual__():
|
||||
if not HAS_JSONSCHEMA:
|
||||
return False, 'Execution module did not load: jsonschema not found'
|
||||
if not HAS_PYVMOMI:
|
||||
return False, 'Execution module did not load: pyVmomi not found'
|
||||
|
||||
# We check the supported vim versions to infer the pyVmomi version
|
||||
if 'vim25/6.0' in VmomiSupport.versionMap and \
|
||||
sys.version_info > (2, 7) and sys.version_info < (2, 7, 9):
|
||||
|
||||
return False, ('Execution module did not load: Incompatible versions '
|
||||
'of Python and pyVmomi present. See Issue #29537.')
|
||||
return __virtualname__
|
||||
|
||||
|
||||
@ -5561,6 +5560,60 @@ def list_datastores_via_proxy(datastore_names=None, backing_disk_ids=None,
|
||||
return ret_dict
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@depends(HAS_JSONSCHEMA)
|
||||
@supports_proxies('esxi')
|
||||
@gets_service_instance_via_proxy
|
||||
def create_vmfs_datastore(datastore_name, disk_id, vmfs_major_version,
|
||||
safety_checks=True, service_instance=None):
|
||||
'''
|
||||
Creates a ESXi host disk group with the specified cache and capacity disks.
|
||||
|
||||
datastore_name
|
||||
The name of the datastore to be created.
|
||||
|
||||
disk_id
|
||||
The disk id (canonical name) on which the datastore is created.
|
||||
|
||||
vmfs_major_version
|
||||
The VMFS major version.
|
||||
|
||||
safety_checks
|
||||
Specify whether to perform safety check or to skip the checks and try
|
||||
performing the required task. Default is True.
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
|
||||
Default is None.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.create_vmfs_datastore datastore_name=ds1 disk_id=
|
||||
vmfs_major_version=5
|
||||
'''
|
||||
log.debug('Validating vmfs datastore input')
|
||||
schema = VmfsDatastoreSchema.serialize()
|
||||
try:
|
||||
jsonschema.validate(
|
||||
{'datastore': {'name': datastore_name,
|
||||
'backing_disk_id': disk_id,
|
||||
'vmfs_version': vmfs_major_version}},
|
||||
schema)
|
||||
except jsonschema.exceptions.ValidationError as exc:
|
||||
raise ArgumentValueError(exc)
|
||||
host_ref = _get_proxy_target(service_instance)
|
||||
hostname = __proxy__['esxi.get_details']()['esxi_host']
|
||||
if safety_checks:
|
||||
disks = salt.utils.vmware.get_disks(host_ref, disk_ids=[disk_id])
|
||||
if not disks:
|
||||
raise VMwareObjectRetrievalError(
|
||||
'Disk \'{0}\' was not found in host \'{1}\''.format(disk_id,
|
||||
hostname))
|
||||
ds_ref = salt.utils.vmware.create_vmfs_datastore(
|
||||
host_ref, datastore_name, disks[0], vmfs_major_version)
|
||||
return True
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxi', 'esxcluster', 'esxdatacenter')
|
||||
@gets_service_instance_via_proxy
|
||||
@ -5599,6 +5652,41 @@ def rename_datastore(datastore_name, new_datastore_name,
|
||||
return True
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxi', 'esxcluster', 'esxdatacenter')
|
||||
@gets_service_instance_via_proxy
|
||||
def remove_datastore(datastore, service_instance=None):
|
||||
'''
|
||||
Removes a datastore. If multiple datastores an error is raised.
|
||||
|
||||
datastore
|
||||
Datastore name
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
|
||||
Default is None.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.remove_datastore ds_name
|
||||
'''
|
||||
log.trace('Removing datastore \'{0}\''.format(datastore))
|
||||
target = _get_proxy_target(service_instance)
|
||||
taget_name = target.name
|
||||
datastores = salt.utils.vmware.get_datastores(
|
||||
service_instance,
|
||||
reference=target,
|
||||
datastore_names=[datastore])
|
||||
if not datastores:
|
||||
raise VMwareObjectRetrievalError(
|
||||
'Datastore \'{0}\' was not found'.format(datastore))
|
||||
if len(datastores) > 1:
|
||||
raise VMwareObjectRetrievalError(
|
||||
'Multiple datastores \'{0}\' were found'.format(datastore))
|
||||
salt.utils.vmware.remove_datastore(service_instance, datastores[0])
|
||||
return True
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxcluster', 'esxdatacenter')
|
||||
@gets_service_instance_via_proxy
|
||||
@ -5813,6 +5901,601 @@ def assign_license(license_key, license_name, entity, entity_display_name,
|
||||
entity_name=entity_display_name)
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxi', 'esxcluster', 'esxdatacenter', 'vcenter')
|
||||
@gets_service_instance_via_proxy
|
||||
def list_hosts_via_proxy(hostnames=None, datacenter=None,
|
||||
cluster=None, service_instance=None):
|
||||
'''
|
||||
Returns a list of hosts for the the specified VMware environment. The list
|
||||
of hosts can be filtered by datacenter name and/or cluster name
|
||||
|
||||
hostnames
|
||||
Hostnames to filter on.
|
||||
|
||||
datacenter_name
|
||||
Name of datacenter. Only hosts in this datacenter will be retrieved.
|
||||
Default is None.
|
||||
|
||||
cluster_name
|
||||
Name of cluster. Only hosts in this cluster will be retrieved. If a
|
||||
datacenter is not specified the first cluster with this name will be
|
||||
considerred. Default is None.
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
|
||||
Default is None.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.list_hosts_via_proxy
|
||||
|
||||
salt '*' vsphere.list_hosts_via_proxy hostnames=[esxi1.example.com]
|
||||
|
||||
salt '*' vsphere.list_hosts_via_proxy datacenter=dc1 cluster=cluster1
|
||||
'''
|
||||
if cluster:
|
||||
if not datacenter:
|
||||
raise salt.exceptions.ArgumentValueError(
|
||||
'Datacenter is required when cluster is specified')
|
||||
get_all_hosts = False
|
||||
if not hostnames:
|
||||
get_all_hosts = True
|
||||
hosts = salt.utils.vmware.get_hosts(service_instance,
|
||||
datacenter_name=datacenter,
|
||||
host_names=hostnames,
|
||||
cluster_name=cluster,
|
||||
get_all_hosts=get_all_hosts)
|
||||
return [salt.utils.vmware.get_managed_object_name(h) for h in hosts]
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxi')
|
||||
@gets_service_instance_via_proxy
|
||||
def list_disks(disk_ids=None, scsi_addresses=None, service_instance=None):
|
||||
'''
|
||||
Returns a list of dict representations of the disks in an ESXi host.
|
||||
The list of disks can be filtered by disk canonical names or
|
||||
scsi addresses.
|
||||
|
||||
disk_ids:
|
||||
List of disk canonical names to be retrieved. Default is None.
|
||||
|
||||
scsi_addresses
|
||||
List of scsi addresses of disks to be retrieved. Default is None
|
||||
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
|
||||
Default is None.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.list_disks
|
||||
|
||||
salt '*' vsphere.list_disks disk_ids='[naa.00, naa.001]'
|
||||
|
||||
salt '*' vsphere.list_disks
|
||||
scsi_addresses='[vmhba0:C0:T0:L0, vmhba1:C0:T0:L0]'
|
||||
'''
|
||||
host_ref = _get_proxy_target(service_instance)
|
||||
hostname = __proxy__['esxi.get_details']()['esxi_host']
|
||||
log.trace('Retrieving disks if host \'{0}\''.format(hostname))
|
||||
log.trace('disk ids = {0}'.format(disk_ids))
|
||||
log.trace('scsi_addresses = {0}'.format(scsi_addresses))
|
||||
# Default to getting all disks if no filtering is done
|
||||
get_all_disks = True if not (disk_ids or scsi_addresses) else False
|
||||
ret_list = []
|
||||
scsi_address_to_lun = salt.utils.vmware.get_scsi_address_to_lun_map(
|
||||
host_ref, hostname=hostname)
|
||||
canonical_name_to_scsi_address = {
|
||||
lun.canonicalName: scsi_addr
|
||||
for scsi_addr, lun in six.iteritems(scsi_address_to_lun)}
|
||||
for d in salt.utils.vmware.get_disks(host_ref, disk_ids, scsi_addresses,
|
||||
get_all_disks):
|
||||
ret_list.append({'id': d.canonicalName,
|
||||
'scsi_address':
|
||||
canonical_name_to_scsi_address[d.canonicalName]})
|
||||
return ret_list
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxi')
|
||||
@gets_service_instance_via_proxy
|
||||
def erase_disk_partitions(disk_id=None, scsi_address=None,
|
||||
service_instance=None):
|
||||
'''
|
||||
Erases the partitions on a disk.
|
||||
The disk can be specified either by the canonical name, or by the
|
||||
scsi_address.
|
||||
|
||||
disk_id
|
||||
Canonical name of the disk.
|
||||
Either ``disk_id`` or ``scsi_address`` needs to be specified
|
||||
(``disk_id`` supersedes ``scsi_address``.
|
||||
|
||||
scsi_address
|
||||
Scsi address of the disk.
|
||||
``disk_id`` or ``scsi_address`` needs to be specified
|
||||
(``disk_id`` supersedes ``scsi_address``.
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
|
||||
Default is None.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.erase_disk_partitions scsi_address='vmhaba0:C0:T0:L0'
|
||||
|
||||
salt '*' vsphere.erase_disk_partitions disk_id='naa.000000000000001'
|
||||
'''
|
||||
if not disk_id and not scsi_address:
|
||||
raise ArgumentValueError('Either \'disk_id\' or \'scsi_address\' '
|
||||
'needs to be specified')
|
||||
host_ref = _get_proxy_target(service_instance)
|
||||
hostname = __proxy__['esxi.get_details']()['esxi_host']
|
||||
if not disk_id:
|
||||
scsi_address_to_lun = \
|
||||
salt.utils.vmware.get_scsi_address_to_lun_map(host_ref)
|
||||
if scsi_address not in scsi_address_to_lun:
|
||||
raise VMwareObjectRetrievalError(
|
||||
'Scsi lun with address \'{0}\' was not found on host \'{1}\''
|
||||
''.format(scsi_address, hostname))
|
||||
disk_id = scsi_address_to_lun[scsi_address].canonicalName
|
||||
log.trace('[{0}] Got disk id \'{1}\' for scsi address \'{2}\''
|
||||
''.format(hostname, disk_id, scsi_address))
|
||||
log.trace('Erasing disk partitions on disk \'{0}\' in host \'{1}\''
|
||||
''.format(disk_id, hostname))
|
||||
salt.utils.vmware.erase_disk_partitions(service_instance,
|
||||
host_ref, disk_id,
|
||||
hostname=hostname)
|
||||
log.info('Erased disk partitions on disk \'{0}\' on host \'{1}\''
|
||||
''.format(disk_id, hostname))
|
||||
return True
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxi')
|
||||
@gets_service_instance_via_proxy
|
||||
def list_disk_partitions(disk_id=None, scsi_address=None,
|
||||
service_instance=None):
|
||||
'''
|
||||
Lists the partitions on a disk.
|
||||
The disk can be specified either by the canonical name, or by the
|
||||
scsi_address.
|
||||
|
||||
disk_id
|
||||
Canonical name of the disk.
|
||||
Either ``disk_id`` or ``scsi_address`` needs to be specified
|
||||
(``disk_id`` supersedes ``scsi_address``.
|
||||
|
||||
scsi_address`
|
||||
Scsi address of the disk.
|
||||
``disk_id`` or ``scsi_address`` needs to be specified
|
||||
(``disk_id`` supersedes ``scsi_address``.
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
|
||||
Default is None.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.list_disk_partitions scsi_address='vmhaba0:C0:T0:L0'
|
||||
|
||||
salt '*' vsphere.list_disk_partitions disk_id='naa.000000000000001'
|
||||
'''
|
||||
if not disk_id and not scsi_address:
|
||||
raise ArgumentValueError('Either \'disk_id\' or \'scsi_address\' '
|
||||
'needs to be specified')
|
||||
host_ref = _get_proxy_target(service_instance)
|
||||
hostname = __proxy__['esxi.get_details']()['esxi_host']
|
||||
if not disk_id:
|
||||
scsi_address_to_lun = \
|
||||
salt.utils.vmware.get_scsi_address_to_lun_map(host_ref)
|
||||
if scsi_address not in scsi_address_to_lun:
|
||||
raise VMwareObjectRetrievalError(
|
||||
'Scsi lun with address \'{0}\' was not found on host \'{1}\''
|
||||
''.format(scsi_address, hostname))
|
||||
disk_id = scsi_address_to_lun[scsi_address].canonicalName
|
||||
log.trace('[{0}] Got disk id \'{1}\' for scsi address \'{2}\''
|
||||
''.format(hostname, disk_id, scsi_address))
|
||||
log.trace('Listing disk partitions on disk \'{0}\' in host \'{1}\''
|
||||
''.format(disk_id, hostname))
|
||||
partition_info = \
|
||||
salt.utils.vmware.get_disk_partition_info(host_ref, disk_id)
|
||||
ret_list = []
|
||||
# NOTE: 1. The layout view has an extra 'None' partition for free space
|
||||
# 2. The orders in the layout/partition views are not the same
|
||||
for part_spec in partition_info.spec.partition:
|
||||
part_layout = [p for p in partition_info.layout.partition
|
||||
if p.partition == part_spec.partition][0]
|
||||
part_dict = {'hostname': hostname,
|
||||
'device': disk_id,
|
||||
'format': partition_info.spec.partitionFormat,
|
||||
'partition': part_spec.partition,
|
||||
'type': part_spec.type,
|
||||
'sectors':
|
||||
part_spec.endSector - part_spec.startSector + 1,
|
||||
'size_KB':
|
||||
(part_layout.end.block - part_layout.start.block + 1) *
|
||||
part_layout.start.blockSize / 1024}
|
||||
ret_list.append(part_dict)
|
||||
return ret_list
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxi')
|
||||
@gets_service_instance_via_proxy
|
||||
def list_diskgroups(cache_disk_ids=None, service_instance=None):
|
||||
'''
|
||||
Returns a list of disk group dict representation on an ESXi host.
|
||||
The list of disk groups can be filtered by the cache disks
|
||||
canonical names. If no filtering is applied, all disk groups are returned.
|
||||
|
||||
cache_disk_ids:
|
||||
List of cache disk canonical names of the disk groups to be retrieved.
|
||||
Default is None.
|
||||
|
||||
use_proxy_details
|
||||
Specify whether to use the proxy minion's details instead of the
|
||||
arguments
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
|
||||
Default is None.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.list_diskgroups
|
||||
|
||||
salt '*' vsphere.list_diskgroups cache_disk_ids='[naa.000000000000001]'
|
||||
'''
|
||||
host_ref = _get_proxy_target(service_instance)
|
||||
hostname = __proxy__['esxi.get_details']()['esxi_host']
|
||||
log.trace('Listing diskgroups in \'{0}\''.format(hostname))
|
||||
get_all_diskgroups = True if not cache_disk_ids else False
|
||||
ret_list = []
|
||||
for dg in salt.utils.vmware.get_diskgroups(host_ref, cache_disk_ids,
|
||||
get_all_diskgroups):
|
||||
ret_list.append(
|
||||
{'cache_disk': dg.ssd.canonicalName,
|
||||
'capacity_disks': [d.canonicalName for d in dg.nonSsd]})
|
||||
return ret_list
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@depends(HAS_JSONSCHEMA)
|
||||
@supports_proxies('esxi')
|
||||
@gets_service_instance_via_proxy
|
||||
def create_diskgroup(cache_disk_id, capacity_disk_ids, safety_checks=True,
|
||||
service_instance=None):
|
||||
'''
|
||||
Creates disk group on an ESXi host with the specified cache and
|
||||
capacity disks.
|
||||
|
||||
cache_disk_id
|
||||
The canonical name of the disk to be used as a cache. The disk must be
|
||||
ssd.
|
||||
|
||||
capacity_disk_ids
|
||||
A list containing canonical names of the capacity disks. Must contain at
|
||||
least one id. Default is True.
|
||||
|
||||
safety_checks
|
||||
Specify whether to perform safety check or to skip the checks and try
|
||||
performing the required task. Default value is True.
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
|
||||
Default is None.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.create_diskgroup cache_disk_id='naa.000000000000001'
|
||||
capacity_disk_ids='[naa.000000000000002, naa.000000000000003]'
|
||||
'''
|
||||
log.trace('Validating diskgroup input')
|
||||
schema = DiskGroupsDiskIdSchema.serialize()
|
||||
try:
|
||||
jsonschema.validate(
|
||||
{'diskgroups': [{'cache_id': cache_disk_id,
|
||||
'capacity_ids': capacity_disk_ids}]},
|
||||
schema)
|
||||
except jsonschema.exceptions.ValidationError as exc:
|
||||
raise ArgumentValueError(exc)
|
||||
host_ref = _get_proxy_target(service_instance)
|
||||
hostname = __proxy__['esxi.get_details']()['esxi_host']
|
||||
if safety_checks:
|
||||
diskgroups = \
|
||||
salt.utils.vmware.get_diskgroups(host_ref, [cache_disk_id])
|
||||
if diskgroups:
|
||||
raise VMwareObjectExistsError(
|
||||
'Diskgroup with cache disk id \'{0}\' already exists ESXi '
|
||||
'host \'{1}\''.format(cache_disk_id, hostname))
|
||||
disk_ids = capacity_disk_ids[:]
|
||||
disk_ids.insert(0, cache_disk_id)
|
||||
disks = salt.utils.vmware.get_disks(host_ref, disk_ids=disk_ids)
|
||||
for id in disk_ids:
|
||||
if not [d for d in disks if d.canonicalName == id]:
|
||||
raise VMwareObjectRetrievalError(
|
||||
'No disk with id \'{0}\' was found in ESXi host \'{1}\''
|
||||
''.format(id, hostname))
|
||||
cache_disk = [d for d in disks if d.canonicalName == cache_disk_id][0]
|
||||
capacity_disks = [d for d in disks if d.canonicalName in capacity_disk_ids]
|
||||
vsan_disk_mgmt_system = \
|
||||
salt.utils.vsan.get_vsan_disk_management_system(service_instance)
|
||||
dg = salt.utils.vsan.create_diskgroup(service_instance,
|
||||
vsan_disk_mgmt_system,
|
||||
host_ref,
|
||||
cache_disk,
|
||||
capacity_disks)
|
||||
return True
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@depends(HAS_JSONSCHEMA)
|
||||
@supports_proxies('esxi')
|
||||
@gets_service_instance_via_proxy
|
||||
def add_capacity_to_diskgroup(cache_disk_id, capacity_disk_ids,
|
||||
safety_checks=True, service_instance=None):
|
||||
'''
|
||||
Adds capacity disks to the disk group with the specified cache disk.
|
||||
|
||||
cache_disk_id
|
||||
The canonical name of the cache disk.
|
||||
|
||||
capacity_disk_ids
|
||||
A list containing canonical names of the capacity disks to add.
|
||||
|
||||
safety_checks
|
||||
Specify whether to perform safety check or to skip the checks and try
|
||||
performing the required task. Default value is True.
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
|
||||
Default is None.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.add_capacity_to_diskgroup
|
||||
cache_disk_id='naa.000000000000001'
|
||||
capacity_disk_ids='[naa.000000000000002, naa.000000000000003]'
|
||||
'''
|
||||
log.trace('Validating diskgroup input')
|
||||
schema = DiskGroupsDiskIdSchema.serialize()
|
||||
try:
|
||||
jsonschema.validate(
|
||||
{'diskgroups': [{'cache_id': cache_disk_id,
|
||||
'capacity_ids': capacity_disk_ids}]},
|
||||
schema)
|
||||
except jsonschema.exceptions.ValidationError as exc:
|
||||
raise ArgumentValueError(exc)
|
||||
host_ref = _get_proxy_target(service_instance)
|
||||
hostname = __proxy__['esxi.get_details']()['esxi_host']
|
||||
disks = salt.utils.vmware.get_disks(host_ref, disk_ids=capacity_disk_ids)
|
||||
if safety_checks:
|
||||
for id in capacity_disk_ids:
|
||||
if not [d for d in disks if d.canonicalName == id]:
|
||||
raise VMwareObjectRetrievalError(
|
||||
'No disk with id \'{0}\' was found in ESXi host \'{1}\''
|
||||
''.format(id, hostname))
|
||||
diskgroups = \
|
||||
salt.utils.vmware.get_diskgroups(
|
||||
host_ref, cache_disk_ids=[cache_disk_id])
|
||||
if not diskgroups:
|
||||
raise VMwareObjectRetrievalError(
|
||||
'No diskgroup with cache disk id \'{0}\' was found in ESXi '
|
||||
'host \'{1}\''.format(cache_disk_id, hostname))
|
||||
vsan_disk_mgmt_system = \
|
||||
salt.utils.vsan.get_vsan_disk_management_system(service_instance)
|
||||
salt.utils.vsan.add_capacity_to_diskgroup(service_instance,
|
||||
vsan_disk_mgmt_system,
|
||||
host_ref,
|
||||
diskgroups[0],
|
||||
disks)
|
||||
return True
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@depends(HAS_JSONSCHEMA)
|
||||
@supports_proxies('esxi')
|
||||
@gets_service_instance_via_proxy
|
||||
def remove_capacity_from_diskgroup(cache_disk_id, capacity_disk_ids,
|
||||
data_evacuation=True, safety_checks=True,
|
||||
service_instance=None):
|
||||
'''
|
||||
Remove capacity disks from the disk group with the specified cache disk.
|
||||
|
||||
cache_disk_id
|
||||
The canonical name of the cache disk.
|
||||
|
||||
capacity_disk_ids
|
||||
A list containing canonical names of the capacity disks to add.
|
||||
|
||||
data_evacuation
|
||||
Specifies whether to gracefully evacuate the data on the capacity disks
|
||||
before removing them from the disk group. Default value is True.
|
||||
|
||||
safety_checks
|
||||
Specify whether to perform safety check or to skip the checks and try
|
||||
performing the required task. Default value is True.
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
|
||||
Default is None.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.remove_capacity_from_diskgroup
|
||||
cache_disk_id='naa.000000000000001'
|
||||
capacity_disk_ids='[naa.000000000000002, naa.000000000000003]'
|
||||
'''
|
||||
log.trace('Validating diskgroup input')
|
||||
schema = DiskGroupsDiskIdSchema.serialize()
|
||||
try:
|
||||
jsonschema.validate(
|
||||
{'diskgroups': [{'cache_id': cache_disk_id,
|
||||
'capacity_ids': capacity_disk_ids}]},
|
||||
schema)
|
||||
except jsonschema.exceptions.ValidationError as exc:
|
||||
raise ArgumentValueError(exc)
|
||||
host_ref = _get_proxy_target(service_instance)
|
||||
hostname = __proxy__['esxi.get_details']()['esxi_host']
|
||||
disks = salt.utils.vmware.get_disks(host_ref, disk_ids=capacity_disk_ids)
|
||||
if safety_checks:
|
||||
for id in capacity_disk_ids:
|
||||
if not [d for d in disks if d.canonicalName == id]:
|
||||
raise VMwareObjectRetrievalError(
|
||||
'No disk with id \'{0}\' was found in ESXi host \'{1}\''
|
||||
''.format(id, hostname))
|
||||
diskgroups = \
|
||||
salt.utils.vmware.get_diskgroups(host_ref,
|
||||
cache_disk_ids=[cache_disk_id])
|
||||
if not diskgroups:
|
||||
raise VMwareObjectRetrievalError(
|
||||
'No diskgroup with cache disk id \'{0}\' was found in ESXi '
|
||||
'host \'{1}\''.format(cache_disk_id, hostname))
|
||||
log.trace('data_evacuation = {0}'.format(data_evacuation))
|
||||
salt.utils.vsan.remove_capacity_from_diskgroup(
|
||||
service_instance, host_ref, diskgroups[0],
|
||||
capacity_disks=[d for d in disks
|
||||
if d.canonicalName in capacity_disk_ids],
|
||||
data_evacuation=data_evacuation)
|
||||
return True
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@depends(HAS_JSONSCHEMA)
|
||||
@supports_proxies('esxi')
|
||||
@gets_service_instance_via_proxy
|
||||
def remove_diskgroup(cache_disk_id, data_accessibility=True,
|
||||
service_instance=None):
|
||||
'''
|
||||
Remove the diskgroup with the specified cache disk.
|
||||
|
||||
cache_disk_id
|
||||
The canonical name of the cache disk.
|
||||
|
||||
data_accessibility
|
||||
Specifies whether to ensure data accessibility. Default value is True.
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
|
||||
Default is None.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.remove_diskgroup cache_disk_id='naa.000000000000001'
|
||||
'''
|
||||
log.trace('Validating diskgroup input')
|
||||
schema = DiskGroupsDiskIdSchema.serialize()
|
||||
host_ref = _get_proxy_target(service_instance)
|
||||
hostname = __proxy__['esxi.get_details']()['esxi_host']
|
||||
diskgroups = \
|
||||
salt.utils.vmware.get_diskgroups(host_ref,
|
||||
cache_disk_ids=[cache_disk_id])
|
||||
if not diskgroups:
|
||||
raise VMwareObjectRetrievalError(
|
||||
'No diskgroup with cache disk id \'{0}\' was found in ESXi '
|
||||
'host \'{1}\''.format(cache_disk_id, hostname))
|
||||
log.trace('data accessibility = {0}'.format(data_accessibility))
|
||||
salt.utils.vsan.remove_diskgroup(
|
||||
service_instance, host_ref, diskgroups[0],
|
||||
data_accessibility=data_accessibility)
|
||||
return True
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxi')
|
||||
@gets_service_instance_via_proxy
|
||||
def get_host_cache(service_instance=None):
|
||||
'''
|
||||
Returns the host cache configuration on the proxy host.
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
|
||||
Default is None.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.get_host_cache
|
||||
'''
|
||||
# Default to getting all disks if no filtering is done
|
||||
ret_dict = {}
|
||||
host_ref = _get_proxy_target(service_instance)
|
||||
hostname = __proxy__['esxi.get_details']()['esxi_host']
|
||||
hci = salt.utils.vmware.get_host_cache(host_ref)
|
||||
if not hci:
|
||||
log.debug('Host cache not configured on host \'{0}\''.format(hostname))
|
||||
ret_dict['enabled'] = False
|
||||
return ret_dict
|
||||
|
||||
# TODO Support multiple host cache info objects (on multiple datastores)
|
||||
return {'enabled': True,
|
||||
'datastore': {'name': hci.key.name},
|
||||
'swap_size': '{}MiB'.format(hci.swapSize)}
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@depends(HAS_JSONSCHEMA)
|
||||
@supports_proxies('esxi')
|
||||
@gets_service_instance_via_proxy
|
||||
def configure_host_cache(enabled, datastore=None, swap_size_MiB=None,
|
||||
service_instance=None):
|
||||
'''
|
||||
Configures the host cache on the selected host.
|
||||
|
||||
enabled
|
||||
Boolean flag specifying whether the host cache is enabled.
|
||||
|
||||
datastore
|
||||
Name of the datastore that contains the host cache. Must be set if
|
||||
enabled is ``true``.
|
||||
|
||||
swap_size_MiB
|
||||
Swap size in Mibibytes. Needs to be set if enabled is ``true``. Must be
|
||||
smaller thant the datastore size.
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter/ESXi host.
|
||||
Default is None.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.configure_host_cache enabled=False
|
||||
|
||||
salt '*' vsphere.configure_host_cache enabled=True datastore=ds1
|
||||
swap_size_MiB=1024
|
||||
'''
|
||||
log.debug('Validating host cache input')
|
||||
schema = SimpleHostCacheSchema.serialize()
|
||||
try:
|
||||
jsonschema.validate({'enabled': enabled,
|
||||
'datastore_name': datastore,
|
||||
'swap_size_MiB': swap_size_MiB},
|
||||
schema)
|
||||
except jsonschema.exceptions.ValidationError as exc:
|
||||
raise ArgumentValueError(exc)
|
||||
if not enabled:
|
||||
raise ArgumentValueError('Disabling the host cache is not supported')
|
||||
ret_dict = {'enabled': False}
|
||||
|
||||
host_ref = _get_proxy_target(service_instance)
|
||||
hostname = __proxy__['esxi.get_details']()['esxi_host']
|
||||
if datastore:
|
||||
ds_refs = salt.utils.vmware.get_datastores(
|
||||
service_instance, host_ref, datastore_names=[datastore])
|
||||
if not ds_refs:
|
||||
raise VMwareObjectRetrievalError(
|
||||
'Datastore \'{0}\' was not found on host '
|
||||
'\'{1}\''.format(datastore, hostname))
|
||||
ds_ref = ds_refs[0]
|
||||
salt.utils.vmware.configure_host_cache(host_ref, ds_ref, swap_size_MiB)
|
||||
return True
|
||||
|
||||
|
||||
def _check_hosts(service_instance, host, host_names):
|
||||
'''
|
||||
Helper function that checks to see if the host provided is a vCenter Server or
|
||||
@ -6441,7 +7124,7 @@ def add_host_to_dvs(host, username, password, vmknic_name, vmnic_name,
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxcluster', 'esxdatacenter', 'vcenter')
|
||||
@supports_proxies('esxi', 'esxcluster', 'esxdatacenter', 'vcenter')
|
||||
def _get_proxy_target(service_instance):
|
||||
'''
|
||||
Returns the target object of a proxy.
|
||||
@ -6472,6 +7155,18 @@ def _get_proxy_target(service_instance):
|
||||
elif proxy_type == 'vcenter':
|
||||
# vcenter proxy - the target is the root folder
|
||||
reference = salt.utils.vmware.get_root_folder(service_instance)
|
||||
elif proxy_type == 'esxi':
|
||||
# esxi proxy
|
||||
details = __proxy__['esxi.get_details']()
|
||||
if 'vcenter' not in details:
|
||||
raise InvalidEntityError('Proxies connected directly to ESXi '
|
||||
'hosts are not supported')
|
||||
references = salt.utils.vmware.get_hosts(
|
||||
service_instance, host_names=details['esxi_host'])
|
||||
if not references:
|
||||
raise VMwareObjectRetrievalError(
|
||||
'ESXi host \'{0}\' was not found'.format(details['esxi_host']))
|
||||
reference = references[0]
|
||||
log.trace('reference = {0}'.format(reference))
|
||||
return reference
|
||||
|
||||
@ -6495,3 +7190,19 @@ def _get_esxcluster_proxy_details():
|
||||
det.get('protocol'), det.get('port'), det.get('mechanism'), \
|
||||
det.get('principal'), det.get('domain'), det.get('datacenter'), \
|
||||
det.get('cluster')
|
||||
|
||||
|
||||
def _get_esxi_proxy_details():
|
||||
'''
|
||||
Returns the running esxi's proxy details
|
||||
'''
|
||||
det = __proxy__['esxi.get_details']()
|
||||
host = det.get('host')
|
||||
if det.get('vcenter'):
|
||||
host = det['vcenter']
|
||||
esxi_hosts = None
|
||||
if det.get('esxi_host'):
|
||||
esxi_hosts = [det['esxi_host']]
|
||||
return host, det.get('username'), det.get('password'), \
|
||||
det.get('protocol'), det.get('port'), det.get('mechanism'), \
|
||||
det.get('principal'), det.get('domain'), esxi_hosts
|
||||
|
@ -273,13 +273,22 @@ for standing up an ESXi host from scratch.
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import os
|
||||
|
||||
# Import Salt Libs
|
||||
from salt.exceptions import SaltSystemExit
|
||||
from salt.exceptions import SaltSystemExit, InvalidConfigError
|
||||
from salt.config.schemas.esxi import EsxiProxySchema
|
||||
from salt.utils.dictupdate import merge
|
||||
|
||||
# This must be present or the Salt loader won't load this module.
|
||||
__proxyenabled__ = ['esxi']
|
||||
|
||||
# External libraries
|
||||
try:
|
||||
import jsonschema
|
||||
HAS_JSONSCHEMA = True
|
||||
except ImportError:
|
||||
HAS_JSONSCHEMA = False
|
||||
|
||||
# Variables are scoped to this module so we can have persistent data
|
||||
# across calls to fns in here.
|
||||
@ -288,7 +297,6 @@ DETAILS = {}
|
||||
|
||||
# Set up logging
|
||||
log = logging.getLogger(__file__)
|
||||
|
||||
# Define the module's virtual name
|
||||
__virtualname__ = 'esxi'
|
||||
|
||||
@ -297,7 +305,7 @@ def __virtual__():
|
||||
'''
|
||||
Only load if the ESXi execution module is available.
|
||||
'''
|
||||
if 'vsphere.system_info' in __salt__:
|
||||
if HAS_JSONSCHEMA:
|
||||
return __virtualname__
|
||||
|
||||
return False, 'The ESXi Proxy Minion module did not load.'
|
||||
@ -309,17 +317,32 @@ def init(opts):
|
||||
ESXi devices, the host, login credentials, and, if configured,
|
||||
the protocol and port are cached.
|
||||
'''
|
||||
if 'host' not in opts['proxy']:
|
||||
log.critical('No \'host\' key found in pillar for this proxy.')
|
||||
log.debug('Initting esxi proxy module in process \'{}\''
|
||||
''.format(os.getpid()))
|
||||
log.debug('Validating esxi proxy input')
|
||||
schema = EsxiProxySchema.serialize()
|
||||
log.trace('esxi_proxy_schema = {}'.format(schema))
|
||||
proxy_conf = merge(opts.get('proxy', {}), __pillar__.get('proxy', {}))
|
||||
log.trace('proxy_conf = {0}'.format(proxy_conf))
|
||||
try:
|
||||
jsonschema.validate(proxy_conf, schema)
|
||||
except jsonschema.exceptions.ValidationError as exc:
|
||||
raise InvalidConfigError(exc)
|
||||
|
||||
DETAILS['proxytype'] = proxy_conf['proxytype']
|
||||
if ('host' not in proxy_conf) and ('vcenter' not in proxy_conf):
|
||||
log.critical('Neither \'host\' nor \'vcenter\' keys found in pillar '
|
||||
'for this proxy.')
|
||||
return False
|
||||
if 'username' not in opts['proxy']:
|
||||
if 'host' in proxy_conf:
|
||||
# We have started the proxy by connecting directly to the host
|
||||
if 'username' not in proxy_conf:
|
||||
log.critical('No \'username\' key found in pillar for this proxy.')
|
||||
return False
|
||||
if 'passwords' not in opts['proxy']:
|
||||
if 'passwords' not in proxy_conf:
|
||||
log.critical('No \'passwords\' key found in pillar for this proxy.')
|
||||
return False
|
||||
|
||||
host = opts['proxy']['host']
|
||||
host = proxy_conf['host']
|
||||
|
||||
# Get the correct login details
|
||||
try:
|
||||
@ -332,9 +355,66 @@ def init(opts):
|
||||
DETAILS['host'] = host
|
||||
DETAILS['username'] = username
|
||||
DETAILS['password'] = password
|
||||
DETAILS['protocol'] = opts['proxy'].get('protocol', 'https')
|
||||
DETAILS['port'] = opts['proxy'].get('port', '443')
|
||||
DETAILS['credstore'] = opts['proxy'].get('credstore')
|
||||
DETAILS['protocol'] = proxy_conf.get('protocol')
|
||||
DETAILS['port'] = proxy_conf.get('port')
|
||||
return True
|
||||
|
||||
if 'vcenter' in proxy_conf:
|
||||
vcenter = proxy_conf['vcenter']
|
||||
if not proxy_conf.get('esxi_host'):
|
||||
log.critical('No \'esxi_host\' key found in pillar for this proxy.')
|
||||
DETAILS['esxi_host'] = proxy_conf['esxi_host']
|
||||
# We have started the proxy by connecting via the vCenter
|
||||
if 'mechanism' not in proxy_conf:
|
||||
log.critical('No \'mechanism\' key found in pillar for this proxy.')
|
||||
return False
|
||||
mechanism = proxy_conf['mechanism']
|
||||
# Save mandatory fields in cache
|
||||
for key in ('vcenter', 'mechanism'):
|
||||
DETAILS[key] = proxy_conf[key]
|
||||
|
||||
if mechanism == 'userpass':
|
||||
if 'username' not in proxy_conf:
|
||||
log.critical('No \'username\' key found in pillar for this '
|
||||
'proxy.')
|
||||
return False
|
||||
if 'passwords' not in proxy_conf and \
|
||||
len(proxy_conf['passwords']) > 0:
|
||||
|
||||
log.critical('Mechanism is set to \'userpass\' , but no '
|
||||
'\'passwords\' key found in pillar for this '
|
||||
'proxy.')
|
||||
return False
|
||||
for key in ('username', 'passwords'):
|
||||
DETAILS[key] = proxy_conf[key]
|
||||
elif mechanism == 'sspi':
|
||||
if 'domain' not in proxy_conf:
|
||||
log.critical('Mechanism is set to \'sspi\' , but no '
|
||||
'\'domain\' key found in pillar for this proxy.')
|
||||
return False
|
||||
if 'principal' not in proxy_conf:
|
||||
log.critical('Mechanism is set to \'sspi\' , but no '
|
||||
'\'principal\' key found in pillar for this '
|
||||
'proxy.')
|
||||
return False
|
||||
for key in ('domain', 'principal'):
|
||||
DETAILS[key] = proxy_conf[key]
|
||||
|
||||
if mechanism == 'userpass':
|
||||
# Get the correct login details
|
||||
log.debug('Retrieving credentials and testing vCenter connection'
|
||||
' for mehchanism \'userpass\'')
|
||||
try:
|
||||
username, password = find_credentials(DETAILS['vcenter'])
|
||||
DETAILS['password'] = password
|
||||
except SaltSystemExit as err:
|
||||
log.critical('Error: {0}'.format(err))
|
||||
return False
|
||||
|
||||
# Save optional
|
||||
DETAILS['protocol'] = proxy_conf.get('protocol', 'https')
|
||||
DETAILS['port'] = proxy_conf.get('port', '443')
|
||||
DETAILS['credstore'] = proxy_conf.get('credstore')
|
||||
|
||||
|
||||
def grains():
|
||||
@ -358,8 +438,9 @@ def grains_refresh():
|
||||
|
||||
def ping():
|
||||
'''
|
||||
Check to see if the host is responding. Returns False if the host didn't
|
||||
respond, True otherwise.
|
||||
Returns True if connection is to be done via a vCenter (no connection is attempted).
|
||||
Check to see if the host is responding when connecting directly via an ESXi
|
||||
host.
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -367,7 +448,12 @@ def ping():
|
||||
|
||||
salt esxi-host test.ping
|
||||
'''
|
||||
# find_credentials(DETAILS['host'])
|
||||
if DETAILS.get('esxi_host'):
|
||||
return True
|
||||
else:
|
||||
# TODO Check connection if mechanism is SSPI
|
||||
if DETAILS['mechanism'] == 'userpass':
|
||||
find_credentials(DETAILS['host'])
|
||||
try:
|
||||
__salt__['vsphere.system_info'](host=DETAILS['host'],
|
||||
username=DETAILS['username'],
|
||||
@ -375,7 +461,6 @@ def ping():
|
||||
except SaltSystemExit as err:
|
||||
log.warning(err)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
@ -461,3 +546,14 @@ def _grains(host, protocol=None, port=None):
|
||||
port=port)
|
||||
GRAINS_CACHE.update(ret)
|
||||
return GRAINS_CACHE
|
||||
|
||||
|
||||
def is_connected_via_vcenter():
|
||||
return True if 'vcenter' in DETAILS else False
|
||||
|
||||
|
||||
def get_details():
|
||||
'''
|
||||
Return the proxy details
|
||||
'''
|
||||
return DETAILS
|
||||
|
@ -90,20 +90,47 @@ ESXi Proxy Minion, please refer to the
|
||||
configuration examples, dependency installation instructions, how to run remote
|
||||
execution functions against ESXi hosts via a Salt Proxy Minion, and a larger state
|
||||
example.
|
||||
|
||||
'''
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import sys
|
||||
import re
|
||||
|
||||
# Import Salt Libs
|
||||
from salt.ext import six
|
||||
import salt.utils.files
|
||||
from salt.exceptions import CommandExecutionError
|
||||
from salt.exceptions import CommandExecutionError, InvalidConfigError, \
|
||||
VMwareObjectRetrievalError, VMwareSaltError, VMwareApiError, \
|
||||
ArgumentValueError
|
||||
from salt.utils.decorators import depends
|
||||
from salt.config.schemas.esxi import DiskGroupsDiskScsiAddressSchema, \
|
||||
HostCacheSchema
|
||||
|
||||
# External libraries
|
||||
try:
|
||||
import jsonschema
|
||||
HAS_JSONSCHEMA = True
|
||||
except ImportError:
|
||||
HAS_JSONSCHEMA = False
|
||||
|
||||
# Get Logging Started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
from pyVmomi import VmomiSupport
|
||||
|
||||
# We check the supported vim versions to infer the pyVmomi version
|
||||
if 'vim25/6.0' in VmomiSupport.versionMap and \
|
||||
sys.version_info > (2, 7) and sys.version_info < (2, 7, 9):
|
||||
|
||||
log.error('pyVmomi not loaded: Incompatible versions '
|
||||
'of Python. See Issue #29537.')
|
||||
raise ImportError()
|
||||
HAS_PYVMOMI = True
|
||||
except ImportError:
|
||||
HAS_PYVMOMI = False
|
||||
|
||||
|
||||
def __virtual__():
|
||||
return 'esxi.cmd' in __salt__
|
||||
@ -998,6 +1025,577 @@ def syslog_configured(name,
|
||||
return ret
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@depends(HAS_JSONSCHEMA)
|
||||
def diskgroups_configured(name, diskgroups, erase_disks=False):
|
||||
'''
|
||||
Configures the disk groups to use for vsan.
|
||||
|
||||
It will do the following:
|
||||
(1) checks for if all disks in the diskgroup spec exist and errors if they
|
||||
don't
|
||||
(2) creates diskgroups with the correct disk configurations if diskgroup
|
||||
(identified by the cache disk canonical name) doesn't exist
|
||||
(3) adds extra capacity disks to the existing diskgroup
|
||||
|
||||
State input example
|
||||
-------------------
|
||||
|
||||
.. code:: python
|
||||
|
||||
{
|
||||
'cache_scsi_addr': 'vmhba1:C0:T0:L0',
|
||||
'capacity_scsi_addrs': [
|
||||
'vmhba2:C0:T0:L0',
|
||||
'vmhba3:C0:T0:L0',
|
||||
'vmhba4:C0:T0:L0',
|
||||
]
|
||||
}
|
||||
|
||||
name
|
||||
Mandatory state name.
|
||||
|
||||
diskgroups
|
||||
Disk group representation containing scsi disk addresses.
|
||||
Scsi addresses are expected for disks in the diskgroup:
|
||||
|
||||
erase_disks
|
||||
Specifies whether to erase all partitions on all disks member of the
|
||||
disk group before the disk group is created. Default vaule is False.
|
||||
'''
|
||||
proxy_details = __salt__['esxi.get_details']()
|
||||
hostname = proxy_details['host'] if not proxy_details.get('vcenter') \
|
||||
else proxy_details['esxi_host']
|
||||
log.info('Running state {0} for host \'{1}\''.format(name, hostname))
|
||||
# Variable used to return the result of the invocation
|
||||
ret = {'name': name, 'result': None, 'changes': {},
|
||||
'pchanges': {}, 'comments': None}
|
||||
# Signals if errors have been encountered
|
||||
errors = False
|
||||
# Signals if changes are required
|
||||
changes = False
|
||||
comments = []
|
||||
diskgroup_changes = {}
|
||||
si = None
|
||||
try:
|
||||
log.trace('Validating diskgroups_configured input')
|
||||
schema = DiskGroupsDiskScsiAddressSchema.serialize()
|
||||
try:
|
||||
jsonschema.validate({'diskgroups': diskgroups,
|
||||
'erase_disks': erase_disks}, schema)
|
||||
except jsonschema.exceptions.ValidationError as exc:
|
||||
raise InvalidConfigError(exc)
|
||||
si = __salt__['vsphere.get_service_instance_via_proxy']()
|
||||
host_disks = __salt__['vsphere.list_disks'](service_instance=si)
|
||||
if not host_disks:
|
||||
raise VMwareObjectRetrievalError(
|
||||
'No disks retrieved from host \'{0}\''.format(hostname))
|
||||
scsi_addr_to_disk_map = {d['scsi_address']: d for d in host_disks}
|
||||
log.trace('scsi_addr_to_disk_map = {0}'.format(scsi_addr_to_disk_map))
|
||||
existing_diskgroups = \
|
||||
__salt__['vsphere.list_diskgroups'](service_instance=si)
|
||||
cache_disk_to_existing_diskgroup_map = \
|
||||
{dg['cache_disk']: dg for dg in existing_diskgroups}
|
||||
except CommandExecutionError as err:
|
||||
log.error('Error: {0}'.format(err))
|
||||
if si:
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
ret.update({
|
||||
'result': False if not __opts__['test'] else None,
|
||||
'comment': str(err)})
|
||||
return ret
|
||||
|
||||
# Iterate through all of the disk groups
|
||||
for idx, dg in enumerate(diskgroups):
|
||||
# Check for cache disk
|
||||
if not dg['cache_scsi_addr'] in scsi_addr_to_disk_map:
|
||||
comments.append('No cache disk with scsi address \'{0}\' was '
|
||||
'found.'.format(dg['cache_scsi_addr']))
|
||||
log.error(comments[-1])
|
||||
errors = True
|
||||
continue
|
||||
|
||||
# Check for capacity disks
|
||||
cache_disk_id = scsi_addr_to_disk_map[dg['cache_scsi_addr']]['id']
|
||||
cache_disk_display = '{0} (id:{1})'.format(dg['cache_scsi_addr'],
|
||||
cache_disk_id)
|
||||
bad_scsi_addrs = []
|
||||
capacity_disk_ids = []
|
||||
capacity_disk_displays = []
|
||||
for scsi_addr in dg['capacity_scsi_addrs']:
|
||||
if scsi_addr not in scsi_addr_to_disk_map:
|
||||
bad_scsi_addrs.append(scsi_addr)
|
||||
continue
|
||||
capacity_disk_ids.append(scsi_addr_to_disk_map[scsi_addr]['id'])
|
||||
capacity_disk_displays.append(
|
||||
'{0} (id:{1})'.format(scsi_addr, capacity_disk_ids[-1]))
|
||||
if bad_scsi_addrs:
|
||||
comments.append('Error in diskgroup #{0}: capacity disks with '
|
||||
'scsi addresses {1} were not found.'
|
||||
''.format(idx,
|
||||
', '.join(['\'{0}\''.format(a)
|
||||
for a in bad_scsi_addrs])))
|
||||
log.error(comments[-1])
|
||||
errors = True
|
||||
continue
|
||||
|
||||
if not cache_disk_to_existing_diskgroup_map.get(cache_disk_id):
|
||||
# A new diskgroup needs to be created
|
||||
log.trace('erase_disks = {0}'.format(erase_disks))
|
||||
if erase_disks:
|
||||
if __opts__['test']:
|
||||
comments.append('State {0} will '
|
||||
'erase all disks of disk group #{1}; '
|
||||
'cache disk: \'{2}\', '
|
||||
'capacity disk(s): {3}.'
|
||||
''.format(name, idx, cache_disk_display,
|
||||
', '.join(
|
||||
['\'{}\''.format(a) for a in
|
||||
capacity_disk_displays])))
|
||||
else:
|
||||
# Erase disk group disks
|
||||
for disk_id in [cache_disk_id] + capacity_disk_ids:
|
||||
__salt__['vsphere.erase_disk_partitions'](
|
||||
disk_id=disk_id, service_instance=si)
|
||||
comments.append('Erased disks of diskgroup #{0}; '
|
||||
'cache disk: \'{1}\', capacity disk(s): '
|
||||
'{2}'.format(
|
||||
idx, cache_disk_display,
|
||||
', '.join(['\'{0}\''.format(a) for a in
|
||||
capacity_disk_displays])))
|
||||
log.info(comments[-1])
|
||||
|
||||
if __opts__['test']:
|
||||
comments.append('State {0} will create '
|
||||
'the disk group #{1}; cache disk: \'{2}\', '
|
||||
'capacity disk(s): {3}.'
|
||||
.format(name, idx, cache_disk_display,
|
||||
', '.join(['\'{0}\''.format(a) for a in
|
||||
capacity_disk_displays])))
|
||||
log.info(comments[-1])
|
||||
changes = True
|
||||
continue
|
||||
try:
|
||||
__salt__['vsphere.create_diskgroup'](cache_disk_id,
|
||||
capacity_disk_ids,
|
||||
safety_checks=False,
|
||||
service_instance=si)
|
||||
except VMwareSaltError as err:
|
||||
comments.append('Error creating disk group #{0}: '
|
||||
'{1}.'.format(idx, err))
|
||||
log.error(comments[-1])
|
||||
errors = True
|
||||
continue
|
||||
|
||||
comments.append('Created disk group #\'{0}\'.'.format(idx))
|
||||
log.info(comments[-1])
|
||||
diskgroup_changes[str(idx)] = \
|
||||
{'new': {'cache': cache_disk_display,
|
||||
'capacity': capacity_disk_displays}}
|
||||
changes = True
|
||||
continue
|
||||
|
||||
# The diskgroup exists; checking the capacity disks
|
||||
log.debug('Disk group #{0} exists. Checking capacity disks: '
|
||||
'{1}.'.format(idx, capacity_disk_displays))
|
||||
existing_diskgroup = \
|
||||
cache_disk_to_existing_diskgroup_map.get(cache_disk_id)
|
||||
existing_capacity_disk_displays = \
|
||||
['{0} (id:{1})'.format([d['scsi_address'] for d in host_disks
|
||||
if d['id'] == disk_id][0], disk_id)
|
||||
for disk_id in existing_diskgroup['capacity_disks']]
|
||||
# Populate added disks and removed disks and their displays
|
||||
added_capacity_disk_ids = []
|
||||
added_capacity_disk_displays = []
|
||||
removed_capacity_disk_ids = []
|
||||
removed_capacity_disk_displays = []
|
||||
for disk_id in capacity_disk_ids:
|
||||
if disk_id not in existing_diskgroup['capacity_disks']:
|
||||
disk_scsi_addr = [d['scsi_address'] for d in host_disks
|
||||
if d['id'] == disk_id][0]
|
||||
added_capacity_disk_ids.append(disk_id)
|
||||
added_capacity_disk_displays.append(
|
||||
'{0} (id:{1})'.format(disk_scsi_addr, disk_id))
|
||||
for disk_id in existing_diskgroup['capacity_disks']:
|
||||
if disk_id not in capacity_disk_ids:
|
||||
disk_scsi_addr = [d['scsi_address'] for d in host_disks
|
||||
if d['id'] == disk_id][0]
|
||||
removed_capacity_disk_ids.append(disk_id)
|
||||
removed_capacity_disk_displays.append(
|
||||
'{0} (id:{1})'.format(disk_scsi_addr, disk_id))
|
||||
|
||||
log.debug('Disk group #{0}: existing capacity disk ids: {1}; added '
|
||||
'capacity disk ids: {2}; removed capacity disk ids: {3}'
|
||||
''.format(idx, existing_capacity_disk_displays,
|
||||
added_capacity_disk_displays,
|
||||
removed_capacity_disk_displays))
|
||||
|
||||
#TODO revisit this when removing capacity disks is supported
|
||||
if removed_capacity_disk_ids:
|
||||
comments.append(
|
||||
'Error removing capacity disk(s) {0} from disk group #{1}; '
|
||||
'operation is not supported.'
|
||||
''.format(', '.join(['\'{0}\''.format(id) for id in
|
||||
removed_capacity_disk_displays]), idx))
|
||||
log.error(comments[-1])
|
||||
errors = True
|
||||
continue
|
||||
|
||||
if added_capacity_disk_ids:
|
||||
# Capacity disks need to be added to disk group
|
||||
|
||||
# Building a string representation of the capacity disks
|
||||
# that need to be added
|
||||
s = ', '.join(['\'{0}\''.format(id) for id in
|
||||
added_capacity_disk_displays])
|
||||
if __opts__['test']:
|
||||
comments.append('State {0} will add '
|
||||
'capacity disk(s) {1} to disk group #{2}.'
|
||||
''.format(name, s, idx))
|
||||
log.info(comments[-1])
|
||||
changes = True
|
||||
continue
|
||||
try:
|
||||
__salt__['vsphere.add_capacity_to_diskgroup'](
|
||||
cache_disk_id,
|
||||
added_capacity_disk_ids,
|
||||
safety_checks=False,
|
||||
service_instance=si)
|
||||
except VMwareSaltError as err:
|
||||
comments.append('Error adding capacity disk(s) {0} to '
|
||||
'disk group #{1}: {2}.'.format(s, idx, err))
|
||||
log.error(comments[-1])
|
||||
errors = True
|
||||
continue
|
||||
|
||||
com = ('Added capacity disk(s) {0} to disk group #{1}'
|
||||
''.format(s, idx))
|
||||
log.info(com)
|
||||
comments.append(com)
|
||||
diskgroup_changes[str(idx)] = \
|
||||
{'new': {'cache': cache_disk_display,
|
||||
'capacity': capacity_disk_displays},
|
||||
'old': {'cache': cache_disk_display,
|
||||
'capacity': existing_capacity_disk_displays}}
|
||||
changes = True
|
||||
continue
|
||||
|
||||
# No capacity needs to be added
|
||||
s = ('Disk group #{0} is correctly configured. Nothing to be done.'
|
||||
''.format(idx))
|
||||
log.info(s)
|
||||
comments.append(s)
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
|
||||
#Build the final return message
|
||||
result = (True if not (changes or errors) else # no changes/errors
|
||||
None if __opts__['test'] else # running in test mode
|
||||
False if errors else True) # found errors; defaults to True
|
||||
ret.update({'result': result,
|
||||
'comment': '\n'.join(comments)})
|
||||
if changes:
|
||||
if __opts__['test']:
|
||||
ret['pchanges'] = diskgroup_changes
|
||||
elif changes:
|
||||
ret['changes'] = diskgroup_changes
|
||||
return ret
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@depends(HAS_JSONSCHEMA)
|
||||
def host_cache_configured(name, enabled, datastore, swap_size='100%',
|
||||
dedicated_backing_disk=False,
|
||||
erase_backing_disk=False):
|
||||
'''
|
||||
Configures the host cache used for swapping.
|
||||
|
||||
It will do the following:
|
||||
(1) checks if backing disk exists
|
||||
(2) creates the VMFS datastore if doesn't exist (datastore partition will
|
||||
be created and use the entire disk
|
||||
(3) raises an error if dedicated_backing_disk is True and partitions
|
||||
already exist on the backing disk
|
||||
(4) configures host_cache to use a portion of the datastore for caching
|
||||
(either a specific size or a percentage of the datastore)
|
||||
|
||||
State input examples
|
||||
--------------------
|
||||
|
||||
Percentage swap size (can't be 100%)
|
||||
|
||||
.. code:: python
|
||||
|
||||
{
|
||||
'enabled': true,
|
||||
'datastore': {
|
||||
'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0',
|
||||
'vmfs_version': 5,
|
||||
'name': 'hostcache'
|
||||
}
|
||||
'dedicated_backing_disk': false
|
||||
'swap_size': '98%',
|
||||
}
|
||||
|
||||
|
||||
.. code:: python
|
||||
|
||||
Fixed sized swap size
|
||||
|
||||
{
|
||||
'enabled': true,
|
||||
'datastore': {
|
||||
'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0',
|
||||
'vmfs_version': 5,
|
||||
'name': 'hostcache'
|
||||
}
|
||||
'dedicated_backing_disk': true
|
||||
'swap_size': '10GiB',
|
||||
}
|
||||
|
||||
name
|
||||
Mandatory state name.
|
||||
|
||||
enabled
|
||||
Specifies whether the host cache is enabled.
|
||||
|
||||
datastore
|
||||
Specifies the host cache datastore.
|
||||
|
||||
swap_size
|
||||
Specifies the size of the host cache swap. Can be a percentage or a
|
||||
value in GiB. Default value is ``100%``.
|
||||
|
||||
dedicated_backing_disk
|
||||
Specifies whether the backing disk is dedicated to the host cache which
|
||||
means it must have no other partitions. Default is False
|
||||
|
||||
erase_backing_disk
|
||||
Specifies whether to erase all partitions on the backing disk before
|
||||
the datastore is created. Default vaule is False.
|
||||
'''
|
||||
log.trace('enabled = {0}'.format(enabled))
|
||||
log.trace('datastore = {0}'.format(datastore))
|
||||
log.trace('swap_size = {0}'.format(swap_size))
|
||||
log.trace('erase_backing_disk = {0}'.format(erase_backing_disk))
|
||||
# Variable used to return the result of the invocation
|
||||
proxy_details = __salt__['esxi.get_details']()
|
||||
hostname = proxy_details['host'] if not proxy_details.get('vcenter') \
|
||||
else proxy_details['esxi_host']
|
||||
log.trace('hostname = {0}'.format(hostname))
|
||||
log.info('Running host_cache_swap_configured for host '
|
||||
'\'{0}\''.format(hostname))
|
||||
ret = {'name': hostname, 'comment': 'Default comments',
|
||||
'result': None, 'changes': {}, 'pchanges': {}}
|
||||
result = None if __opts__['test'] else True # We assume success
|
||||
needs_setting = False
|
||||
comments = []
|
||||
changes = {}
|
||||
si = None
|
||||
try:
|
||||
log.debug('Validating host_cache_configured input')
|
||||
schema = HostCacheSchema.serialize()
|
||||
try:
|
||||
jsonschema.validate({'enabled': enabled,
|
||||
'datastore': datastore,
|
||||
'swap_size': swap_size,
|
||||
'erase_backing_disk': erase_backing_disk},
|
||||
schema)
|
||||
except jsonschema.exceptions.ValidationError as exc:
|
||||
raise InvalidConfigError(exc)
|
||||
m = re.match(r'(\d+)(%|GiB)', swap_size)
|
||||
swap_size_value = int(m.group(1))
|
||||
swap_type = m.group(2)
|
||||
log.trace('swap_size_value = {0}; swap_type = {1}'.format(
|
||||
swap_size_value, swap_type))
|
||||
si = __salt__['vsphere.get_service_instance_via_proxy']()
|
||||
host_cache = __salt__['vsphere.get_host_cache'](service_instance=si)
|
||||
|
||||
# Check enabled
|
||||
if host_cache['enabled'] != enabled:
|
||||
changes.update({'enabled': {'old': host_cache['enabled'],
|
||||
'new': enabled}})
|
||||
needs_setting = True
|
||||
|
||||
# Check datastores
|
||||
existing_datastores = None
|
||||
if host_cache.get('datastore'):
|
||||
existing_datastores = \
|
||||
__salt__['vsphere.list_datastores_via_proxy'](
|
||||
datastore_names=[datastore['name']],
|
||||
service_instance=si)
|
||||
# Retrieve backing disks
|
||||
existing_disks = __salt__['vsphere.list_disks'](
|
||||
scsi_addresses=[datastore['backing_disk_scsi_addr']],
|
||||
service_instance=si)
|
||||
if not existing_disks:
|
||||
raise VMwareObjectRetrievalError(
|
||||
'Disk with scsi address \'{0}\' was not found in host \'{1}\''
|
||||
''.format(datastore['backing_disk_scsi_addr'], hostname))
|
||||
backing_disk = existing_disks[0]
|
||||
backing_disk_display = '{0} (id:{1})'.format(
|
||||
backing_disk['scsi_address'], backing_disk['id'])
|
||||
log.trace('backing_disk = {0}'.format(backing_disk_display))
|
||||
|
||||
existing_datastore = None
|
||||
if not existing_datastores:
|
||||
# Check if disk needs to be erased
|
||||
if erase_backing_disk:
|
||||
if __opts__['test']:
|
||||
comments.append('State {0} will erase '
|
||||
'the backing disk \'{1}\' on host \'{2}\'.'
|
||||
''.format(name, backing_disk_display,
|
||||
hostname))
|
||||
log.info(comments[-1])
|
||||
else:
|
||||
# Erase disk
|
||||
__salt__['vsphere.erase_disk_partitions'](
|
||||
disk_id=backing_disk['id'], service_instance=si)
|
||||
comments.append('Erased backing disk \'{0}\' on host '
|
||||
'\'{1}\'.'.format(backing_disk_display,
|
||||
hostname))
|
||||
log.info(comments[-1])
|
||||
# Create the datastore
|
||||
if __opts__['test']:
|
||||
comments.append('State {0} will create '
|
||||
'the datastore \'{1}\', with backing disk '
|
||||
'\'{2}\', on host \'{3}\'.'
|
||||
''.format(name, datastore['name'],
|
||||
backing_disk_display, hostname))
|
||||
log.info(comments[-1])
|
||||
else:
|
||||
if dedicated_backing_disk:
|
||||
# Check backing disk doesn't already have partitions
|
||||
partitions = __salt__['vsphere.list_disk_partitions'](
|
||||
disk_id=backing_disk['id'], service_instance=si)
|
||||
log.trace('partitions = {0}'.format(partitions))
|
||||
# We will ignore the mbr partitions
|
||||
non_mbr_partitions = [p for p in partitions
|
||||
if p['format'] != 'mbr']
|
||||
if len(non_mbr_partitions) > 0:
|
||||
raise VMwareApiError(
|
||||
'Backing disk \'{0}\' has unexpected partitions'
|
||||
''.format(backing_disk_display))
|
||||
__salt__['vsphere.create_vmfs_datastore'](
|
||||
datastore['name'], existing_disks[0]['id'],
|
||||
datastore['vmfs_version'], service_instance=si)
|
||||
comments.append('Created vmfs datastore \'{0}\', backed by '
|
||||
'disk \'{1}\', on host \'{2}\'.'
|
||||
''.format(datastore['name'],
|
||||
backing_disk_display, hostname))
|
||||
log.info(comments[-1])
|
||||
changes.update(
|
||||
{'datastore':
|
||||
{'new': {'name': datastore['name'],
|
||||
'backing_disk': backing_disk_display}}})
|
||||
existing_datastore = \
|
||||
__salt__['vsphere.list_datastores_via_proxy'](
|
||||
datastore_names=[datastore['name']],
|
||||
service_instance=si)[0]
|
||||
needs_setting = True
|
||||
else:
|
||||
# Check datastore is backed by the correct disk
|
||||
if not existing_datastores[0].get('backing_disk_ids'):
|
||||
raise VMwareSaltError('Datastore \'{0}\' doesn\'t have a '
|
||||
'backing disk'
|
||||
''.format(datastore['name']))
|
||||
if backing_disk['id'] not in \
|
||||
existing_datastores[0]['backing_disk_ids']:
|
||||
|
||||
raise VMwareSaltError(
|
||||
'Datastore \'{0}\' is not backed by the correct disk: '
|
||||
'expected \'{1}\'; got {2}'
|
||||
''.format(
|
||||
datastore['name'], backing_disk['id'],
|
||||
', '.join(
|
||||
['\'{0}\''.format(disk) for disk in
|
||||
existing_datastores[0]['backing_disk_ids']])))
|
||||
|
||||
comments.append('Datastore \'{0}\' already exists on host \'{1}\' '
|
||||
'and is backed by disk \'{2}\'. Nothing to be '
|
||||
'done.'.format(datastore['name'], hostname,
|
||||
backing_disk_display))
|
||||
existing_datastore = existing_datastores[0]
|
||||
log.trace('existing_datastore = {0}'.format(existing_datastore))
|
||||
log.info(comments[-1])
|
||||
|
||||
if existing_datastore:
|
||||
# The following comparisons can be done if the existing_datastore
|
||||
# is set; it may not be set if running in test mode
|
||||
#
|
||||
# We support percent, as well as MiB, we will convert the size
|
||||
# to MiB, multiples of 1024 (VMware SDK limitation)
|
||||
if swap_type == '%':
|
||||
# Percentage swap size
|
||||
# Convert from bytes to MiB
|
||||
raw_size_MiB = (swap_size_value/100.0) * \
|
||||
(existing_datastore['capacity']/1024/1024)
|
||||
else:
|
||||
raw_size_MiB = swap_size_value * 1024
|
||||
log.trace('raw_size = {0}MiB'.format(raw_size_MiB))
|
||||
swap_size_MiB = int(raw_size_MiB/1024)*1024
|
||||
log.trace('adjusted swap_size = {0}MiB'.format(swap_size_MiB))
|
||||
existing_swap_size_MiB = 0
|
||||
m = re.match(r'(\d+)MiB', host_cache.get('swap_size')) if \
|
||||
host_cache.get('swap_size') else None
|
||||
if m:
|
||||
# if swap_size from the host is set and has an expected value
|
||||
# we are going to parse it to get the number of MiBs
|
||||
existing_swap_size_MiB = int(m.group(1))
|
||||
if not existing_swap_size_MiB == swap_size_MiB:
|
||||
needs_setting = True
|
||||
changes.update(
|
||||
{'swap_size':
|
||||
{'old': '{}GiB'.format(existing_swap_size_MiB/1024),
|
||||
'new': '{}GiB'.format(swap_size_MiB/1024)}})
|
||||
|
||||
if needs_setting:
|
||||
if __opts__['test']:
|
||||
comments.append('State {0} will configure '
|
||||
'the host cache on host \'{1}\' to: {2}.'
|
||||
''.format(name, hostname,
|
||||
{'enabled': enabled,
|
||||
'datastore_name': datastore['name'],
|
||||
'swap_size': swap_size}))
|
||||
else:
|
||||
if (existing_datastore['capacity'] / 1024.0**2) < \
|
||||
swap_size_MiB:
|
||||
|
||||
raise ArgumentValueError(
|
||||
'Capacity of host cache datastore \'{0}\' ({1} MiB) is '
|
||||
'smaller than the required swap size ({2} MiB)'
|
||||
''.format(existing_datastore['name'],
|
||||
existing_datastore['capacity'] / 1024.0**2,
|
||||
swap_size_MiB))
|
||||
__salt__['vsphere.configure_host_cache'](
|
||||
enabled,
|
||||
datastore['name'],
|
||||
swap_size_MiB=swap_size_MiB,
|
||||
service_instance=si)
|
||||
comments.append('Host cache configured on host '
|
||||
'\'{0}\'.'.format(hostname))
|
||||
else:
|
||||
comments.append('Host cache on host \'{0}\' is already correctly '
|
||||
'configured. Nothing to be done.'.format(hostname))
|
||||
result = True
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
log.info(comments[-1])
|
||||
ret.update({'comment': '\n'.join(comments),
|
||||
'result': result})
|
||||
if __opts__['test']:
|
||||
ret['pchanges'] = changes
|
||||
else:
|
||||
ret['changes'] = changes
|
||||
return ret
|
||||
except CommandExecutionError as err:
|
||||
log.error('Error: {0}.'.format(err))
|
||||
if si:
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
ret.update({
|
||||
'result': False if not __opts__['test'] else None,
|
||||
'comment': '{}.'.format(err)})
|
||||
return ret
|
||||
|
||||
|
||||
def _lookup_syslog_config(config):
|
||||
'''
|
||||
Helper function that looks up syslog_config keys available from
|
||||
|
@ -1909,7 +1909,7 @@ def get_datastores(service_instance, reference, datastore_names=None,
|
||||
'is set'.format(reference.__class__.__name__))
|
||||
if (not get_all_datastores) and backing_disk_ids:
|
||||
# At this point we know the reference is a vim.HostSystem
|
||||
log.debug('Filtering datastores with backing disk ids: {}'
|
||||
log.trace('Filtering datastores with backing disk ids: {}'
|
||||
''.format(backing_disk_ids))
|
||||
storage_system = get_storage_system(service_instance, reference,
|
||||
obj_name)
|
||||
@ -1925,11 +1925,11 @@ def get_datastores(service_instance, reference, datastore_names=None,
|
||||
# Skip volume if it doesn't contain an extent with a
|
||||
# canonical name of interest
|
||||
continue
|
||||
log.debug('Found datastore \'{0}\' for disk id(s) \'{1}\''
|
||||
log.trace('Found datastore \'{0}\' for disk id(s) \'{1}\''
|
||||
''.format(vol.name,
|
||||
[e.diskName for e in vol.extent]))
|
||||
disk_datastores.append(vol.name)
|
||||
log.debug('Datastore found for disk filter: {}'
|
||||
log.trace('Datastore found for disk filter: {}'
|
||||
''.format(disk_datastores))
|
||||
if datastore_names:
|
||||
datastore_names.extend(disk_datastores)
|
||||
@ -2006,7 +2006,7 @@ def rename_datastore(datastore_ref, new_datastore_name):
|
||||
New datastore name
|
||||
'''
|
||||
ds_name = get_managed_object_name(datastore_ref)
|
||||
log.debug('Renaming datastore \'{0}\' to '
|
||||
log.trace('Renaming datastore \'{0}\' to '
|
||||
'\'{1}\''.format(ds_name, new_datastore_name))
|
||||
try:
|
||||
datastore_ref.RenameDatastore(new_datastore_name)
|
||||
@ -2048,6 +2048,224 @@ def get_storage_system(service_instance, host_ref, hostname=None):
|
||||
return objs[0]['object']
|
||||
|
||||
|
||||
def _get_partition_info(storage_system, device_path):
|
||||
'''
|
||||
Returns partition informations for a device path, of type
|
||||
vim.HostDiskPartitionInfo
|
||||
'''
|
||||
try:
|
||||
partition_infos = \
|
||||
storage_system.RetrieveDiskPartitionInfo(
|
||||
devicePath=[device_path])
|
||||
except vim.fault.NoPermission as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(
|
||||
'Not enough permissions. Required privilege: '
|
||||
'{0}'.format(exc.privilegeId))
|
||||
except vim.fault.VimFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(exc.msg)
|
||||
except vmodl.RuntimeFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
||||
log.trace('partition_info = {0}'.format(partition_infos[0]))
|
||||
return partition_infos[0]
|
||||
|
||||
|
||||
def _get_new_computed_partition_spec(hostname, storage_system, device_path,
|
||||
partition_info):
|
||||
'''
|
||||
Computes the new disk partition info when adding a new vmfs partition that
|
||||
uses up the remainder of the disk; returns a tuple
|
||||
(new_partition_number, vim.HostDiskPartitionSpec
|
||||
'''
|
||||
log.trace('Adding a partition at the end of the disk and getting the new '
|
||||
'computed partition spec')
|
||||
#TODO implement support for multiple partitions
|
||||
# We support adding a partition add the end of the disk with partitions
|
||||
free_partitions = [p for p in partition_info.layout.partition
|
||||
if p.type == 'none']
|
||||
if not free_partitions:
|
||||
raise salt.exceptions.VMwareObjectNotFoundError(
|
||||
'Free partition was not found on device \'{0}\''
|
||||
''.format(partition_info.deviceName))
|
||||
free_partition = free_partitions[0]
|
||||
|
||||
# Create a layout object that copies the existing one
|
||||
layout = vim.HostDiskPartitionLayout(
|
||||
total=partition_info.layout.total,
|
||||
partition=partition_info.layout.partition)
|
||||
# Create a partition with the free space on the disk
|
||||
# Change the free partition type to vmfs
|
||||
free_partition.type = 'vmfs'
|
||||
try:
|
||||
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
|
||||
devicePath=device_path,
|
||||
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
|
||||
layout=layout)
|
||||
except vim.fault.NoPermission as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(
|
||||
'Not enough permissions. Required privilege: '
|
||||
'{0}'.format(exc.privilegeId))
|
||||
except vim.fault.VimFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(exc.msg)
|
||||
except vmodl.RuntimeFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
||||
log.trace('computed partition info = {0}'
|
||||
''.format(computed_partition_info))
|
||||
log.trace('Retrieving new partition number')
|
||||
partition_numbers = [p.partition for p in
|
||||
computed_partition_info.layout.partition
|
||||
if (p.start.block == free_partition.start.block or
|
||||
# XXX If the entire disk is free (i.e. the free
|
||||
# disk partition starts at block 0) the newily
|
||||
# created partition is created from block 1
|
||||
(free_partition.start.block == 0 and
|
||||
p.start.block == 1)) and
|
||||
p.end.block == free_partition.end.block and
|
||||
p.type == 'vmfs']
|
||||
if not partition_numbers:
|
||||
raise salt.exceptions.VMwareNotFoundError(
|
||||
'New partition was not found in computed partitions of device '
|
||||
'\'{0}\''.format(partition_info.deviceName))
|
||||
log.trace('new partition number = {0}'.format(partition_numbers[0]))
|
||||
return (partition_numbers[0], computed_partition_info.spec)
|
||||
|
||||
|
||||
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
|
||||
vmfs_major_version, storage_system=None):
|
||||
'''
|
||||
Creates a VMFS datastore from a disk_id
|
||||
|
||||
host_ref
|
||||
vim.HostSystem object referencing a host to create the datastore on
|
||||
|
||||
datastore_name
|
||||
Name of the datastore
|
||||
|
||||
disk_ref
|
||||
vim.HostScsiDislk on which the datastore is created
|
||||
|
||||
vmfs_major_version
|
||||
VMFS major version to use
|
||||
'''
|
||||
# TODO Support variable sized partitions
|
||||
hostname = get_managed_object_name(host_ref)
|
||||
disk_id = disk_ref.canonicalName
|
||||
log.debug('Creating datastore \'{0}\' on host \'{1}\', scsi disk \'{2}\', '
|
||||
'vmfs v{3}'.format(datastore_name, hostname, disk_id,
|
||||
vmfs_major_version))
|
||||
if not storage_system:
|
||||
si = get_service_instance_from_managed_object(host_ref, name=hostname)
|
||||
storage_system = get_storage_system(si, host_ref, hostname)
|
||||
|
||||
target_disk = disk_ref
|
||||
partition_info = _get_partition_info(storage_system,
|
||||
target_disk.devicePath)
|
||||
log.trace('partition_info = {0}'.format(partition_info))
|
||||
new_partition_number, partition_spec = _get_new_computed_partition_spec(
|
||||
hostname, storage_system, target_disk.devicePath, partition_info)
|
||||
spec = vim.VmfsDatastoreCreateSpec(
|
||||
vmfs=vim.HostVmfsSpec(
|
||||
majorVersion=vmfs_major_version,
|
||||
volumeName=datastore_name,
|
||||
extent=vim.HostScsiDiskPartition(
|
||||
diskName=disk_id,
|
||||
partition=new_partition_number)),
|
||||
diskUuid=target_disk.uuid,
|
||||
partition=partition_spec)
|
||||
try:
|
||||
ds_ref = \
|
||||
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
|
||||
except vim.fault.NoPermission as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(
|
||||
'Not enough permissions. Required privilege: '
|
||||
'{0}'.format(exc.privilegeId))
|
||||
except vim.fault.VimFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(exc.msg)
|
||||
except vmodl.RuntimeFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
||||
log.debug('Created datastore \'{0}\' on host '
|
||||
'\'{1}\''.format(datastore_name, hostname))
|
||||
return ds_ref
|
||||
|
||||
|
||||
def get_host_datastore_system(host_ref, hostname=None):
|
||||
'''
|
||||
Returns a host's datastore system
|
||||
|
||||
host_ref
|
||||
Reference to the ESXi host
|
||||
|
||||
hostname
|
||||
Name of the host. This argument is optional.
|
||||
'''
|
||||
|
||||
if not hostname:
|
||||
hostname = get_managed_object_name(host_ref)
|
||||
service_instance = get_service_instance_from_managed_object(host_ref)
|
||||
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
|
||||
path='configManager.datastoreSystem',
|
||||
type=vim.HostSystem,
|
||||
skip=False)
|
||||
objs = get_mors_with_properties(service_instance,
|
||||
vim.HostDatastoreSystem,
|
||||
property_list=['datastore'],
|
||||
container_ref=host_ref,
|
||||
traversal_spec=traversal_spec)
|
||||
if not objs:
|
||||
raise salt.exceptions.VMwareObjectRetrievalError(
|
||||
'Host\'s \'{0}\' datastore system was not retrieved'
|
||||
''.format(hostname))
|
||||
log.trace('[{0}] Retrieved datastore system'.format(hostname))
|
||||
return objs[0]['object']
|
||||
|
||||
|
||||
def remove_datastore(service_instance, datastore_ref):
|
||||
'''
|
||||
Creates a VMFS datastore from a disk_id
|
||||
|
||||
service_instance
|
||||
The Service Instance Object containing the datastore
|
||||
|
||||
datastore_ref
|
||||
The reference to the datastore to remove
|
||||
'''
|
||||
ds_props = get_properties_of_managed_object(
|
||||
datastore_ref, ['host', 'info', 'name'])
|
||||
ds_name = ds_props['name']
|
||||
log.debug('Removing datastore \'{}\''.format(ds_name))
|
||||
ds_info = ds_props['info']
|
||||
ds_hosts = ds_props.get('host')
|
||||
if not ds_hosts:
|
||||
raise salt.exceptions.VMwareApiError(
|
||||
'Datastore \'{0}\' can\'t be removed. No '
|
||||
'attached hosts found'.format(ds_name))
|
||||
hostname = get_managed_object_name(ds_hosts[0].key)
|
||||
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
|
||||
hostname=hostname)
|
||||
try:
|
||||
host_ds_system.RemoveDatastore(datastore_ref)
|
||||
except vim.fault.NoPermission as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(
|
||||
'Not enough permissions. Required privilege: '
|
||||
'{0}'.format(exc.privilegeId))
|
||||
except vim.fault.VimFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(exc.msg)
|
||||
except vmodl.RuntimeFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
||||
log.trace('[{0}] Removed datastore \'{1}\''.format(hostname, ds_name))
|
||||
|
||||
|
||||
def get_hosts(service_instance, datacenter_name=None, host_names=None,
|
||||
cluster_name=None, get_all_hosts=False):
|
||||
'''
|
||||
@ -2072,44 +2290,541 @@ def get_hosts(service_instance, datacenter_name=None, host_names=None,
|
||||
Default value is False.
|
||||
'''
|
||||
properties = ['name']
|
||||
if cluster_name and not datacenter_name:
|
||||
raise salt.exceptions.ArgumentValueError(
|
||||
'Must specify the datacenter when specifying the cluster')
|
||||
if not host_names:
|
||||
host_names = []
|
||||
if cluster_name:
|
||||
properties.append('parent')
|
||||
if datacenter_name:
|
||||
if not datacenter_name:
|
||||
# Assume the root folder is the starting point
|
||||
start_point = get_root_folder(service_instance)
|
||||
else:
|
||||
start_point = get_datacenter(service_instance, datacenter_name)
|
||||
if cluster_name:
|
||||
# Retrieval to test if cluster exists. Cluster existence only makes
|
||||
# sense if the cluster has been specified
|
||||
# sense if the datacenter has been specified
|
||||
cluster = get_cluster(start_point, cluster_name)
|
||||
else:
|
||||
# Assume the root folder is the starting point
|
||||
start_point = get_root_folder(service_instance)
|
||||
properties.append('parent')
|
||||
|
||||
# Search for the objects
|
||||
hosts = get_mors_with_properties(service_instance,
|
||||
vim.HostSystem,
|
||||
container_ref=start_point,
|
||||
property_list=properties)
|
||||
log.trace('Retrieved hosts: {0}'.format(h['name'] for h in hosts))
|
||||
filtered_hosts = []
|
||||
for h in hosts:
|
||||
# Complex conditions checking if a host should be added to the
|
||||
# filtered list (either due to its name and/or cluster membership)
|
||||
name_condition = get_all_hosts or (h['name'] in host_names)
|
||||
# the datacenter_name needs to be set in order for the cluster
|
||||
# condition membership to be checked, otherwise the condition is
|
||||
# ignored
|
||||
cluster_condition = \
|
||||
(not datacenter_name or not cluster_name or
|
||||
(isinstance(h['parent'], vim.ClusterComputeResource) and
|
||||
h['parent'].name == cluster_name))
|
||||
|
||||
if name_condition and cluster_condition:
|
||||
if cluster_name:
|
||||
if not isinstance(h['parent'], vim.ClusterComputeResource):
|
||||
continue
|
||||
parent_name = get_managed_object_name(h['parent'])
|
||||
if parent_name != cluster_name:
|
||||
continue
|
||||
|
||||
if get_all_hosts:
|
||||
filtered_hosts.append(h['object'])
|
||||
continue
|
||||
|
||||
if h['name'] in host_names:
|
||||
filtered_hosts.append(h['object'])
|
||||
return filtered_hosts
|
||||
|
||||
|
||||
def _get_scsi_address_to_lun_key_map(service_instance,
|
||||
host_ref,
|
||||
storage_system=None,
|
||||
hostname=None):
|
||||
'''
|
||||
Returns a map between the scsi addresses and the keys of all luns on an ESXi
|
||||
host.
|
||||
map[<scsi_address>] = <lun key>
|
||||
|
||||
service_instance
|
||||
The Service Instance Object from which to obtain the hosts
|
||||
|
||||
host_ref
|
||||
The vim.HostSystem object representing the host that contains the
|
||||
requested disks.
|
||||
|
||||
storage_system
|
||||
The host's storage system. Default is None.
|
||||
|
||||
hostname
|
||||
Name of the host. Default is None.
|
||||
'''
|
||||
map = {}
|
||||
if not hostname:
|
||||
hostname = get_managed_object_name(host_ref)
|
||||
if not storage_system:
|
||||
storage_system = get_storage_system(service_instance, host_ref,
|
||||
hostname)
|
||||
try:
|
||||
device_info = storage_system.storageDeviceInfo
|
||||
except vim.fault.NoPermission as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(
|
||||
'Not enough permissions. Required privilege: '
|
||||
'{0}'.format(exc.privilegeId))
|
||||
except vim.fault.VimFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(exc.msg)
|
||||
except vmodl.RuntimeFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
||||
if not device_info:
|
||||
raise salt.exceptions.VMwareObjectRetrievalError(
|
||||
'Host\'s \'{0}\' storage device '
|
||||
'info was not retrieved'.format(hostname))
|
||||
multipath_info = device_info.multipathInfo
|
||||
if not multipath_info:
|
||||
raise salt.exceptions.VMwareObjectRetrievalError(
|
||||
'Host\'s \'{0}\' multipath info was not retrieved'
|
||||
''.format(hostname))
|
||||
if multipath_info.lun is None:
|
||||
raise salt.exceptions.VMwareObjectRetrievalError(
|
||||
'No luns were retrieved from host \'{0}\''.format(hostname))
|
||||
lun_key_by_scsi_addr = {}
|
||||
for l in multipath_info.lun:
|
||||
# The vmware scsi_address may have multiple comma separated values
|
||||
# The first one is the actual scsi address
|
||||
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
|
||||
for p in l.path})
|
||||
log.trace('Scsi address to lun id map on host \'{0}\': '
|
||||
'{1}'.format(hostname, lun_key_by_scsi_addr))
|
||||
return lun_key_by_scsi_addr
|
||||
|
||||
|
||||
def get_all_luns(host_ref, storage_system=None, hostname=None):
|
||||
'''
|
||||
Returns a list of all vim.HostScsiDisk objects in a disk
|
||||
|
||||
host_ref
|
||||
The vim.HostSystem object representing the host that contains the
|
||||
requested disks.
|
||||
|
||||
storage_system
|
||||
The host's storage system. Default is None.
|
||||
|
||||
hostname
|
||||
Name of the host. This argument is optional.
|
||||
'''
|
||||
if not hostname:
|
||||
hostname = get_managed_object_name(host_ref)
|
||||
if not storage_system:
|
||||
si = get_service_instance_from_managed_object(host_ref, name=hostname)
|
||||
storage_system = get_storage_system(si, host_ref, hostname)
|
||||
if not storage_system:
|
||||
raise salt.exceptions.VMwareObjectRetrievalError(
|
||||
'Host\'s \'{0}\' storage system was not retrieved'
|
||||
''.format(hostname))
|
||||
try:
|
||||
device_info = storage_system.storageDeviceInfo
|
||||
except vim.fault.NoPermission as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(
|
||||
'Not enough permissions. Required privilege: '
|
||||
'{0}'.format(exc.privilegeId))
|
||||
except vim.fault.VimFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(exc.msg)
|
||||
except vmodl.RuntimeFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
||||
if not device_info:
|
||||
raise salt.exceptions.VMwareObjectRetrievalError(
|
||||
'Host\'s \'{0}\' storage device info was not retrieved'
|
||||
''.format(hostname))
|
||||
|
||||
scsi_luns = device_info.scsiLun
|
||||
if scsi_luns:
|
||||
log.trace('Retrieved scsi luns in host \'{0}\': {1}'
|
||||
''.format(hostname, [l.canonicalName for l in scsi_luns]))
|
||||
return scsi_luns
|
||||
log.trace('Retrieved no scsi_luns in host \'{0}\''.format(hostname))
|
||||
return []
|
||||
|
||||
|
||||
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
|
||||
'''
|
||||
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
|
||||
scsi address
|
||||
|
||||
host_ref
|
||||
The vim.HostSystem object representing the host that contains the
|
||||
requested disks.
|
||||
|
||||
storage_system
|
||||
The host's storage system. Default is None.
|
||||
|
||||
hostname
|
||||
Name of the host. This argument is optional.
|
||||
'''
|
||||
if not hostname:
|
||||
hostname = get_managed_object_name(host_ref)
|
||||
si = get_service_instance_from_managed_object(host_ref, name=hostname)
|
||||
if not storage_system:
|
||||
storage_system = get_storage_system(si, host_ref, hostname)
|
||||
lun_ids_to_scsi_addr_map = \
|
||||
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
|
||||
hostname)
|
||||
luns_to_key_map = {d.key: d for d in
|
||||
get_all_luns(host_ref, storage_system, hostname)}
|
||||
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
|
||||
six.iteritems(lun_ids_to_scsi_addr_map)}
|
||||
|
||||
|
||||
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
|
||||
get_all_disks=False):
|
||||
'''
|
||||
Returns a list of vim.HostScsiDisk objects representing disks
|
||||
in a ESXi host, filtered by their cannonical names and scsi_addresses
|
||||
|
||||
host_ref
|
||||
The vim.HostSystem object representing the host that contains the
|
||||
requested disks.
|
||||
|
||||
disk_ids
|
||||
The list of canonical names of the disks to be retrieved. Default value
|
||||
is None
|
||||
|
||||
scsi_addresses
|
||||
The list of scsi addresses of the disks to be retrieved. Default value
|
||||
is None
|
||||
|
||||
get_all_disks
|
||||
Specifies whether to retrieve all disks in the host.
|
||||
Default value is False.
|
||||
'''
|
||||
hostname = get_managed_object_name(host_ref)
|
||||
if get_all_disks:
|
||||
log.trace('Retrieving all disks in host \'{0}\''.format(hostname))
|
||||
else:
|
||||
log.trace('Retrieving disks in host \'{0}\': ids = ({1}); scsi '
|
||||
'addresses = ({2})'.format(hostname, disk_ids,
|
||||
scsi_addresses))
|
||||
if not (disk_ids or scsi_addresses):
|
||||
return []
|
||||
si = get_service_instance_from_managed_object(host_ref, name=hostname)
|
||||
storage_system = get_storage_system(si, host_ref, hostname)
|
||||
disk_keys = []
|
||||
if scsi_addresses:
|
||||
# convert the scsi addresses to disk keys
|
||||
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
|
||||
storage_system,
|
||||
hostname)
|
||||
disk_keys = [key for scsi_addr, key
|
||||
in six.iteritems(lun_key_by_scsi_addr)
|
||||
if scsi_addr in scsi_addresses]
|
||||
log.trace('disk_keys based on scsi_addresses = {0}'.format(disk_keys))
|
||||
|
||||
scsi_luns = get_all_luns(host_ref, storage_system)
|
||||
scsi_disks = [disk for disk in scsi_luns
|
||||
if isinstance(disk, vim.HostScsiDisk) and (
|
||||
get_all_disks or
|
||||
# Filter by canonical name
|
||||
(disk_ids and (disk.canonicalName in disk_ids)) or
|
||||
# Filter by disk keys from scsi addresses
|
||||
(disk.key in disk_keys))]
|
||||
log.trace('Retrieved disks in host \'{0}\': {1}'
|
||||
''.format(hostname, [d.canonicalName for d in scsi_disks]))
|
||||
return scsi_disks
|
||||
|
||||
|
||||
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
|
||||
'''
|
||||
Returns all partitions on a disk
|
||||
|
||||
host_ref
|
||||
The reference of the ESXi host containing the disk
|
||||
|
||||
disk_id
|
||||
The canonical name of the disk whose partitions are to be removed
|
||||
|
||||
storage_system
|
||||
The ESXi host's storage system. Default is None.
|
||||
'''
|
||||
hostname = get_managed_object_name(host_ref)
|
||||
service_instance = get_service_instance_from_managed_object(host_ref)
|
||||
if not storage_system:
|
||||
storage_system = get_storage_system(service_instance, host_ref,
|
||||
hostname)
|
||||
|
||||
props = get_properties_of_managed_object(storage_system,
|
||||
['storageDeviceInfo.scsiLun'])
|
||||
if not props.get('storageDeviceInfo.scsiLun'):
|
||||
raise salt.exceptions.VMwareObjectRetrievalError(
|
||||
'No devices were retrieved in host \'{0}\''.format(hostname))
|
||||
log.trace('[{0}] Retrieved {1} devices: {2}'.format(
|
||||
hostname, len(props['storageDeviceInfo.scsiLun']),
|
||||
', '.join([l.canonicalName
|
||||
for l in props['storageDeviceInfo.scsiLun']])))
|
||||
disks = [l for l in props['storageDeviceInfo.scsiLun']
|
||||
if isinstance(l, vim.HostScsiDisk) and
|
||||
l.canonicalName == disk_id]
|
||||
if not disks:
|
||||
raise salt.exceptions.VMwareObjectRetrievalError(
|
||||
'Disk \'{0}\' was not found in host \'{1}\''
|
||||
''.format(disk_id, hostname))
|
||||
log.trace('[{0}] device_path = {1}'.format(hostname, disks[0].devicePath))
|
||||
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
|
||||
log.trace('[{0}] Retrieved {1} partition(s) on disk \'{2}\''
|
||||
''.format(hostname, len(partition_info.spec.partition), disk_id))
|
||||
return partition_info
|
||||
|
||||
|
||||
def erase_disk_partitions(service_instance, host_ref, disk_id,
|
||||
hostname=None, storage_system=None):
|
||||
'''
|
||||
Erases all partitions on a disk
|
||||
|
||||
in a vcenter filtered by their names and/or datacenter, cluster membership
|
||||
|
||||
service_instance
|
||||
The Service Instance Object from which to obtain all information
|
||||
|
||||
host_ref
|
||||
The reference of the ESXi host containing the disk
|
||||
|
||||
disk_id
|
||||
The canonical name of the disk whose partitions are to be removed
|
||||
|
||||
hostname
|
||||
The ESXi hostname. Default is None.
|
||||
|
||||
storage_system
|
||||
The ESXi host's storage system. Default is None.
|
||||
'''
|
||||
|
||||
if not hostname:
|
||||
hostname = get_managed_object_name(host_ref)
|
||||
if not storage_system:
|
||||
storage_system = get_storage_system(service_instance, host_ref,
|
||||
hostname)
|
||||
|
||||
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
|
||||
path='configManager.storageSystem',
|
||||
type=vim.HostSystem,
|
||||
skip=False)
|
||||
results = get_mors_with_properties(service_instance,
|
||||
vim.HostStorageSystem,
|
||||
['storageDeviceInfo.scsiLun'],
|
||||
container_ref=host_ref,
|
||||
traversal_spec=traversal_spec)
|
||||
if not results:
|
||||
raise salt.exceptions.VMwareObjectRetrievalError(
|
||||
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
|
||||
log.trace('[{0}] Retrieved {1} devices: {2}'.format(
|
||||
hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])),
|
||||
', '.join([l.canonicalName for l in
|
||||
results[0].get('storageDeviceInfo.scsiLun', [])])))
|
||||
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
|
||||
if isinstance(l, vim.HostScsiDisk) and
|
||||
l.canonicalName == disk_id]
|
||||
if not disks:
|
||||
raise salt.exceptions.VMwareObjectRetrievalError(
|
||||
'Disk \'{0}\' was not found in host \'{1}\''
|
||||
''.format(disk_id, hostname))
|
||||
log.trace('[{0}] device_path = {1}'.format(hostname, disks[0].devicePath))
|
||||
# Erase the partitions by setting an empty partition spec
|
||||
try:
|
||||
storage_system.UpdateDiskPartitions(disks[0].devicePath,
|
||||
vim.HostDiskPartitionSpec())
|
||||
except vim.fault.NoPermission as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(
|
||||
'Not enough permissions. Required privilege: '
|
||||
'{0}'.format(exc.privilegeId))
|
||||
except vim.fault.VimFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(exc.msg)
|
||||
except vmodl.RuntimeFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
||||
log.trace('[{0}] Erased partitions on disk \'{1}\''
|
||||
''.format(hostname, disk_id))
|
||||
|
||||
|
||||
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
|
||||
'''
|
||||
Returns a list of vim.VsanHostDiskMapping objects representing disks
|
||||
in a ESXi host, filtered by their cannonical names.
|
||||
|
||||
host_ref
|
||||
The vim.HostSystem object representing the host that contains the
|
||||
requested disks.
|
||||
|
||||
cache_disk_ids
|
||||
The list of cannonical names of the cache disks to be retrieved. The
|
||||
canonical name of the cache disk is enough to identify the disk group
|
||||
because it is guaranteed to have one and only one cache disk.
|
||||
Default is None.
|
||||
|
||||
get_all_disk_groups
|
||||
Specifies whether to retrieve all disks groups in the host.
|
||||
Default value is False.
|
||||
'''
|
||||
hostname = get_managed_object_name(host_ref)
|
||||
if get_all_disk_groups:
|
||||
log.trace('Retrieving all disk groups on host \'{0}\''
|
||||
''.format(hostname))
|
||||
else:
|
||||
log.trace('Retrieving disk groups from host \'{0}\', with cache disk '
|
||||
'ids : ({1})'.format(hostname, cache_disk_ids))
|
||||
if not cache_disk_ids:
|
||||
return []
|
||||
try:
|
||||
vsan_host_config = host_ref.config.vsanHostConfig
|
||||
except vim.fault.NoPermission as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(
|
||||
'Not enough permissions. Required privilege: '
|
||||
'{0}'.format(exc.privilegeId))
|
||||
except vim.fault.VimFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(exc.msg)
|
||||
except vmodl.RuntimeFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
||||
if not vsan_host_config:
|
||||
raise salt.exceptions.VMwareObjectRetrievalError(
|
||||
'No host config found on host \'{0}\''.format(hostname))
|
||||
vsan_storage_info = vsan_host_config.storageInfo
|
||||
if not vsan_storage_info:
|
||||
raise salt.exceptions.VMwareObjectRetrievalError(
|
||||
'No vsan storage info found on host \'{0}\''.format(hostname))
|
||||
vsan_disk_mappings = vsan_storage_info.diskMapping
|
||||
if not vsan_disk_mappings:
|
||||
return []
|
||||
disk_groups = [dm for dm in vsan_disk_mappings if
|
||||
(get_all_disk_groups or
|
||||
(dm.ssd.canonicalName in cache_disk_ids))]
|
||||
log.trace('Retrieved disk groups on host \'{0}\', with cache disk ids : '
|
||||
'{1}'.format(hostname,
|
||||
[d.ssd.canonicalName for d in disk_groups]))
|
||||
return disk_groups
|
||||
|
||||
|
||||
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
|
||||
'''
|
||||
Checks that the disks in a disk group are as expected and raises
|
||||
CheckError exceptions if the check fails
|
||||
'''
|
||||
if not disk_group.ssd.canonicalName == cache_disk_id:
|
||||
raise salt.exceptions.ArgumentValueError(
|
||||
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
|
||||
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
|
||||
if sorted([d.canonicalName for d in disk_group.nonSsd]) != \
|
||||
sorted(capacity_disk_ids):
|
||||
|
||||
raise salt.exceptions.ArgumentValueError(
|
||||
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
|
||||
''.format(sorted([d.canonicalName for d in disk_group.nonSsd]),
|
||||
sorted(capacity_disk_ids)))
|
||||
log.trace('Checked disks in diskgroup with cache disk id \'{0}\''
|
||||
''.format(cache_disk_id))
|
||||
return True
|
||||
|
||||
|
||||
#TODO Support host caches on multiple datastores
|
||||
def get_host_cache(host_ref, host_cache_manager=None):
|
||||
'''
|
||||
Returns a vim.HostScsiDisk if the host cache is configured on the specified
|
||||
host, other wise returns None
|
||||
|
||||
host_ref
|
||||
The vim.HostSystem object representing the host that contains the
|
||||
requested disks.
|
||||
|
||||
host_cache_manager
|
||||
The vim.HostCacheConfigurationManager object representing the cache
|
||||
configuration manager on the specified host. Default is None. If None,
|
||||
it will be retrieved in the method
|
||||
'''
|
||||
hostname = get_managed_object_name(host_ref)
|
||||
service_instance = get_service_instance_from_managed_object(host_ref)
|
||||
log.trace('Retrieving the host cache on host \'{0}\''.format(hostname))
|
||||
if not host_cache_manager:
|
||||
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
|
||||
path='configManager.cacheConfigurationManager',
|
||||
type=vim.HostSystem,
|
||||
skip=False)
|
||||
results = get_mors_with_properties(service_instance,
|
||||
vim.HostCacheConfigurationManager,
|
||||
['cacheConfigurationInfo'],
|
||||
container_ref=host_ref,
|
||||
traversal_spec=traversal_spec)
|
||||
if not results or not results[0].get('cacheConfigurationInfo'):
|
||||
log.trace('Host \'{0}\' has no host cache'.format(hostname))
|
||||
return None
|
||||
return results[0]['cacheConfigurationInfo'][0]
|
||||
else:
|
||||
results = get_properties_of_managed_object(host_cache_manager,
|
||||
['cacheConfigurationInfo'])
|
||||
if not results:
|
||||
log.trace('Host \'{0}\' has no host cache'.format(hostname))
|
||||
return None
|
||||
return results['cacheConfigurationInfo'][0]
|
||||
|
||||
|
||||
#TODO Support host caches on multiple datastores
|
||||
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
|
||||
host_cache_manager=None):
|
||||
'''
|
||||
Configures the host cahe of the specified host
|
||||
|
||||
host_ref
|
||||
The vim.HostSystem object representing the host that contains the
|
||||
requested disks.
|
||||
|
||||
datastore_ref
|
||||
The vim.Datastore opject representing the datastore the host cache will
|
||||
be configured on.
|
||||
|
||||
swap_size_MiB
|
||||
The size in Mibibytes of the swap.
|
||||
|
||||
host_cache_manager
|
||||
The vim.HostCacheConfigurationManager object representing the cache
|
||||
configuration manager on the specified host. Default is None. If None,
|
||||
it will be retrieved in the method
|
||||
'''
|
||||
hostname = get_managed_object_name(host_ref)
|
||||
if not host_cache_manager:
|
||||
props = get_properties_of_managed_object(
|
||||
host_ref, ['configManager.cacheConfigurationManager'])
|
||||
if not props.get('configManager.cacheConfigurationManager'):
|
||||
raise salt.exceptions.VMwareObjectRetrievalError(
|
||||
'Host \'{0}\' has no host cache'.format(hostname))
|
||||
host_cache_manager = props['configManager.cacheConfigurationManager']
|
||||
log.trace('Configuring the host cache on host \'{0}\', datastore \'{1}\', '
|
||||
'swap size={2} MiB'.format(hostname, datastore_ref.name,
|
||||
swap_size_MiB))
|
||||
|
||||
spec = vim.HostCacheConfigurationSpec(
|
||||
datastore=datastore_ref,
|
||||
swapSize=swap_size_MiB)
|
||||
log.trace('host_cache_spec={0}'.format(spec))
|
||||
try:
|
||||
task = host_cache_manager.ConfigureHostCache_Task(spec)
|
||||
except vim.fault.NoPermission as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(
|
||||
'Not enough permissions. Required privilege: '
|
||||
'{0}'.format(exc.privilegeId))
|
||||
except vim.fault.VimFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(exc.msg)
|
||||
except vmodl.RuntimeFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
||||
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
|
||||
log.trace('Configured host cache on host \'{0}\''.format(hostname))
|
||||
return True
|
||||
|
||||
|
||||
def list_hosts(service_instance):
|
||||
'''
|
||||
Returns a list of hosts associated with a given service instance.
|
||||
|
@ -49,7 +49,8 @@ import logging
|
||||
import ssl
|
||||
|
||||
# Import Salt Libs
|
||||
from salt.exceptions import VMwareApiError, VMwareRuntimeError
|
||||
from salt.exceptions import VMwareApiError, VMwareRuntimeError, \
|
||||
VMwareObjectRetrievalError
|
||||
import salt.utils.vmware
|
||||
|
||||
try:
|
||||
@ -129,6 +130,308 @@ def get_vsan_cluster_config_system(service_instance):
|
||||
return vc_mos['vsan-cluster-config-system']
|
||||
|
||||
|
||||
def get_vsan_disk_management_system(service_instance):
|
||||
'''
|
||||
Returns a vim.VimClusterVsanVcDiskManagementSystem object
|
||||
|
||||
service_instance
|
||||
Service instance to the host or vCenter
|
||||
'''
|
||||
|
||||
#TODO Replace when better connection mechanism is available
|
||||
|
||||
#For python 2.7.9 and later, the defaul SSL conext has more strict
|
||||
#connection handshaking rule. We may need turn of the hostname checking
|
||||
#and client side cert verification
|
||||
context = None
|
||||
if sys.version_info[:3] > (2, 7, 8):
|
||||
context = ssl.create_default_context()
|
||||
context.check_hostname = False
|
||||
context.verify_mode = ssl.CERT_NONE
|
||||
|
||||
stub = service_instance._stub
|
||||
vc_mos = vsanapiutils.GetVsanVcMos(stub, context=context)
|
||||
return vc_mos['vsan-disk-management-system']
|
||||
|
||||
|
||||
def get_host_vsan_system(service_instance, host_ref, hostname=None):
|
||||
'''
|
||||
Returns a host's vsan system
|
||||
|
||||
service_instance
|
||||
Service instance to the host or vCenter
|
||||
|
||||
host_ref
|
||||
Refernce to ESXi host
|
||||
|
||||
hostname
|
||||
Name of ESXi host. Default value is None.
|
||||
'''
|
||||
if not hostname:
|
||||
hostname = salt.utils.vmware.get_managed_object_name(host_ref)
|
||||
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
|
||||
path='configManager.vsanSystem',
|
||||
type=vim.HostSystem,
|
||||
skip=False)
|
||||
objs = salt.utils.vmware.get_mors_with_properties(
|
||||
service_instance, vim.HostVsanSystem, property_list=['config.enabled'],
|
||||
container_ref=host_ref, traversal_spec=traversal_spec)
|
||||
if not objs:
|
||||
raise VMwareObjectRetrievalError('Host\'s \'{0}\' VSAN system was '
|
||||
'not retrieved'.format(hostname))
|
||||
log.trace('[{0}] Retrieved VSAN system'.format(hostname))
|
||||
return objs[0]['object']
|
||||
|
||||
|
||||
def create_diskgroup(service_instance, vsan_disk_mgmt_system,
|
||||
host_ref, cache_disk, capacity_disks):
|
||||
'''
|
||||
Creates a disk group
|
||||
|
||||
service_instance
|
||||
Service instance to the host or vCenter
|
||||
|
||||
vsan_disk_mgmt_system
|
||||
vim.VimClusterVsanVcDiskManagemenetSystem representing the vSan disk
|
||||
management system retrieved from the vsan endpoint.
|
||||
|
||||
host_ref
|
||||
vim.HostSystem object representing the target host the disk group will
|
||||
be created on
|
||||
|
||||
cache_disk
|
||||
The vim.HostScsidisk to be used as a cache disk. It must be an ssd disk.
|
||||
|
||||
capacity_disks
|
||||
List of vim.HostScsiDisk objects representing of disks to be used as
|
||||
capacity disks. Can be either ssd or non-ssd. There must be a minimum
|
||||
of 1 capacity disk in the list.
|
||||
'''
|
||||
hostname = salt.utils.vmware.get_managed_object_name(host_ref)
|
||||
cache_disk_id = cache_disk.canonicalName
|
||||
log.debug('Creating a new disk group with cache disk \'{0}\' on host '
|
||||
'\'{1}\''.format(cache_disk_id, hostname))
|
||||
log.trace('capacity_disk_ids = {0}'.format([c.canonicalName for c in
|
||||
capacity_disks]))
|
||||
spec = vim.VimVsanHostDiskMappingCreationSpec()
|
||||
spec.cacheDisks = [cache_disk]
|
||||
spec.capacityDisks = capacity_disks
|
||||
# All capacity disks must be either ssd or non-ssd (mixed disks are not
|
||||
# supported)
|
||||
spec.creationType = 'allFlash' if getattr(capacity_disks[0], 'ssd') \
|
||||
else 'hybrid'
|
||||
spec.host = host_ref
|
||||
try:
|
||||
task = vsan_disk_mgmt_system.InitializeDiskMappings(spec)
|
||||
except vim.fault.NoPermission as exc:
|
||||
log.exception(exc)
|
||||
raise VMwareApiError('Not enough permissions. Required privilege: '
|
||||
'{0}'.format(exc.privilegeId))
|
||||
except vim.fault.VimFault as exc:
|
||||
log.exception(exc)
|
||||
raise VMwareApiError(exc.msg)
|
||||
except vmodl.fault.MethodNotFound as exc:
|
||||
log.exception(exc)
|
||||
raise VMwareRuntimeError('Method \'{0}\' not found'.format(exc.method))
|
||||
except vmodl.RuntimeFault as exc:
|
||||
log.exception(exc)
|
||||
raise VMwareRuntimeError(exc.msg)
|
||||
_wait_for_tasks([task], service_instance)
|
||||
return True
|
||||
|
||||
|
||||
def add_capacity_to_diskgroup(service_instance, vsan_disk_mgmt_system,
|
||||
host_ref, diskgroup, new_capacity_disks):
|
||||
'''
|
||||
Adds capacity disk(s) to a disk group.
|
||||
|
||||
service_instance
|
||||
Service instance to the host or vCenter
|
||||
|
||||
vsan_disk_mgmt_system
|
||||
vim.VimClusterVsanVcDiskManagemenetSystem representing the vSan disk
|
||||
management system retrieved from the vsan endpoint.
|
||||
|
||||
host_ref
|
||||
vim.HostSystem object representing the target host the disk group will
|
||||
be created on
|
||||
|
||||
diskgroup
|
||||
The vsan.HostDiskMapping object representing the host's diskgroup where
|
||||
the additional capacity needs to be added
|
||||
|
||||
new_capacity_disks
|
||||
List of vim.HostScsiDisk objects representing the disks to be added as
|
||||
capacity disks. Can be either ssd or non-ssd. There must be a minimum
|
||||
of 1 new capacity disk in the list.
|
||||
'''
|
||||
hostname = salt.utils.vmware.get_managed_object_name(host_ref)
|
||||
cache_disk = diskgroup.ssd
|
||||
cache_disk_id = cache_disk.canonicalName
|
||||
log.debug('Adding capacity to disk group with cache disk \'{0}\' on host '
|
||||
'\'{1}\''.format(cache_disk_id, hostname))
|
||||
log.trace('new_capacity_disk_ids = {0}'.format([c.canonicalName for c in
|
||||
new_capacity_disks]))
|
||||
spec = vim.VimVsanHostDiskMappingCreationSpec()
|
||||
spec.cacheDisks = [cache_disk]
|
||||
spec.capacityDisks = new_capacity_disks
|
||||
# All new capacity disks must be either ssd or non-ssd (mixed disks are not
|
||||
# supported); also they need to match the type of the existing capacity
|
||||
# disks; we assume disks are already validated
|
||||
spec.creationType = 'allFlash' if getattr(new_capacity_disks[0], 'ssd') \
|
||||
else 'hybrid'
|
||||
spec.host = host_ref
|
||||
try:
|
||||
task = vsan_disk_mgmt_system.InitializeDiskMappings(spec)
|
||||
except vim.fault.NoPermission as exc:
|
||||
log.exception(exc)
|
||||
raise VMwareApiError('Not enough permissions. Required privilege: '
|
||||
'{0}'.format(exc.privilegeId))
|
||||
except vim.fault.VimFault as exc:
|
||||
log.exception(exc)
|
||||
raise VMwareApiError(exc.msg)
|
||||
except vmodl.fault.MethodNotFound as exc:
|
||||
log.exception(exc)
|
||||
raise VMwareRuntimeError('Method \'{0}\' not found'.format(exc.method))
|
||||
except vmodl.RuntimeFault as exc:
|
||||
raise VMwareRuntimeError(exc.msg)
|
||||
_wait_for_tasks([task], service_instance)
|
||||
return True
|
||||
|
||||
|
||||
def remove_capacity_from_diskgroup(service_instance, host_ref, diskgroup,
|
||||
capacity_disks, data_evacuation=True,
|
||||
hostname=None,
|
||||
host_vsan_system=None):
|
||||
'''
|
||||
Removes capacity disk(s) from a disk group.
|
||||
|
||||
service_instance
|
||||
Service instance to the host or vCenter
|
||||
|
||||
host_vsan_system
|
||||
ESXi host's VSAN system
|
||||
|
||||
host_ref
|
||||
Reference to the ESXi host
|
||||
|
||||
diskgroup
|
||||
The vsan.HostDiskMapping object representing the host's diskgroup from
|
||||
where the capacity needs to be removed
|
||||
|
||||
capacity_disks
|
||||
List of vim.HostScsiDisk objects representing the capacity disks to be
|
||||
removed. Can be either ssd or non-ssd. There must be a minimum
|
||||
of 1 capacity disk in the list.
|
||||
|
||||
data_evacuation
|
||||
Specifies whether to gracefully evacuate the data on the capacity disks
|
||||
before removing them from the disk group. Default value is True.
|
||||
|
||||
hostname
|
||||
Name of ESXi host. Default value is None.
|
||||
|
||||
host_vsan_system
|
||||
ESXi host's VSAN system. Default value is None.
|
||||
'''
|
||||
if not hostname:
|
||||
hostname = salt.utils.vmware.get_managed_object_name(host_ref)
|
||||
cache_disk = diskgroup.ssd
|
||||
cache_disk_id = cache_disk.canonicalName
|
||||
log.debug('Removing capacity from disk group with cache disk \'{0}\' on '
|
||||
'host \'{1}\''.format(cache_disk_id, hostname))
|
||||
log.trace('capacity_disk_ids = {0}'.format([c.canonicalName for c in
|
||||
capacity_disks]))
|
||||
if not host_vsan_system:
|
||||
host_vsan_system = get_host_vsan_system(service_instance,
|
||||
host_ref, hostname)
|
||||
# Set to evacuate all data before removing the disks
|
||||
maint_spec = vim.HostMaintenanceSpec()
|
||||
maint_spec.vsanMode = vim.VsanHostDecommissionMode()
|
||||
if data_evacuation:
|
||||
maint_spec.vsanMode.objectAction = \
|
||||
vim.VsanHostDecommissionModeObjectAction.evacuateAllData
|
||||
else:
|
||||
maint_spec.vsanMode.objectAction = \
|
||||
vim.VsanHostDecommissionModeObjectAction.noAction
|
||||
try:
|
||||
task = host_vsan_system.RemoveDisk_Task(disk=capacity_disks,
|
||||
maintenanceSpec=maint_spec)
|
||||
except vim.fault.NoPermission as exc:
|
||||
log.exception(exc)
|
||||
raise VMwareApiError('Not enough permissions. Required privilege: '
|
||||
'{0}'.format(exc.privilegeId))
|
||||
except vim.fault.VimFault as exc:
|
||||
log.exception(exc)
|
||||
raise VMwareApiError(exc.msg)
|
||||
except vmodl.RuntimeFault as exc:
|
||||
log.exception(exc)
|
||||
raise VMwareRuntimeError(exc.msg)
|
||||
salt.utils.vmware.wait_for_task(task, hostname, 'remove_capacity')
|
||||
return True
|
||||
|
||||
|
||||
def remove_diskgroup(service_instance, host_ref, diskgroup, hostname=None,
|
||||
host_vsan_system=None, erase_disk_partitions=False,
|
||||
data_accessibility=True):
|
||||
'''
|
||||
Removes a disk group.
|
||||
|
||||
service_instance
|
||||
Service instance to the host or vCenter
|
||||
|
||||
host_ref
|
||||
Reference to the ESXi host
|
||||
|
||||
diskgroup
|
||||
The vsan.HostDiskMapping object representing the host's diskgroup from
|
||||
where the capacity needs to be removed
|
||||
|
||||
hostname
|
||||
Name of ESXi host. Default value is None.
|
||||
|
||||
host_vsan_system
|
||||
ESXi host's VSAN system. Default value is None.
|
||||
|
||||
data_accessibility
|
||||
Specifies whether to ensure data accessibility. Default value is True.
|
||||
'''
|
||||
if not hostname:
|
||||
hostname = salt.utils.vmware.get_managed_object_name(host_ref)
|
||||
cache_disk_id = diskgroup.ssd.canonicalName
|
||||
log.debug('Removing disk group with cache disk \'{0}\' on '
|
||||
'host \'{1}\''.format(cache_disk_id, hostname))
|
||||
if not host_vsan_system:
|
||||
host_vsan_system = get_host_vsan_system(
|
||||
service_instance, host_ref, hostname)
|
||||
# Set to evacuate all data before removing the disks
|
||||
maint_spec = vim.HostMaintenanceSpec()
|
||||
maint_spec.vsanMode = vim.VsanHostDecommissionMode()
|
||||
object_action = vim.VsanHostDecommissionModeObjectAction
|
||||
if data_accessibility:
|
||||
maint_spec.vsanMode.objectAction = \
|
||||
object_action.ensureObjectAccessibility
|
||||
else:
|
||||
maint_spec.vsanMode.objectAction = object_action.noAction
|
||||
try:
|
||||
task = host_vsan_system.RemoveDiskMapping_Task(
|
||||
mapping=[diskgroup], maintenanceSpec=maint_spec)
|
||||
except vim.fault.NoPermission as exc:
|
||||
log.exception(exc)
|
||||
raise VMwareApiError('Not enough permissions. Required privilege: '
|
||||
'{0}'.format(exc.privilegeId))
|
||||
except vim.fault.VimFault as exc:
|
||||
log.exception(exc)
|
||||
raise VMwareApiError(exc.msg)
|
||||
except vmodl.RuntimeFault as exc:
|
||||
log.exception(exc)
|
||||
raise VMwareRuntimeError(exc.msg)
|
||||
salt.utils.vmware.wait_for_task(task, hostname, 'remove_diskgroup')
|
||||
log.debug('Removed disk group with cache disk \'{0}\' '
|
||||
'on host \'{1}\''.format(cache_disk_id, hostname))
|
||||
return True
|
||||
|
||||
|
||||
def get_cluster_vsan_info(cluster_ref):
|
||||
'''
|
||||
Returns the extended cluster vsan configuration object
|
||||
|
@ -14,6 +14,7 @@ from tests.support.unit import TestCase, skipIf
|
||||
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock
|
||||
|
||||
# Import Salt libraries
|
||||
from salt.exceptions import ArgumentValueError
|
||||
import salt.utils.vmware
|
||||
# Import Third Party Libs
|
||||
try:
|
||||
@ -54,6 +55,14 @@ class GetHostsTestCase(TestCase):
|
||||
self.mock_prop_hosts = [self.mock_prop_host1, self.mock_prop_host2,
|
||||
self.mock_prop_host3]
|
||||
|
||||
def test_cluster_no_datacenter(self):
|
||||
with self.assertRaises(ArgumentValueError) as excinfo:
|
||||
salt.utils.vmware.get_hosts(self.mock_si,
|
||||
cluster_name='fake_cluster')
|
||||
self.assertEqual(excinfo.exception.strerror,
|
||||
'Must specify the datacenter when specifying the '
|
||||
'cluster')
|
||||
|
||||
def test_get_si_no_datacenter_no_cluster(self):
|
||||
mock_get_mors = MagicMock()
|
||||
mock_get_root_folder = MagicMock(return_value=self.mock_root_folder)
|
||||
@ -124,23 +133,20 @@ class GetHostsTestCase(TestCase):
|
||||
self.assertEqual(res, [])
|
||||
|
||||
def test_filter_cluster(self):
|
||||
cluster1 = vim.ClusterComputeResource('fake_good_cluster')
|
||||
cluster2 = vim.ClusterComputeResource('fake_bad_cluster')
|
||||
# Mock cluster1.name and cluster2.name
|
||||
cluster1._stub = MagicMock(InvokeAccessor=MagicMock(
|
||||
return_value='fake_good_cluster'))
|
||||
cluster2._stub = MagicMock(InvokeAccessor=MagicMock(
|
||||
return_value='fake_bad_cluster'))
|
||||
self.mock_prop_host1['parent'] = cluster2
|
||||
self.mock_prop_host2['parent'] = cluster1
|
||||
self.mock_prop_host3['parent'] = cluster1
|
||||
self.mock_prop_host1['parent'] = vim.ClusterComputeResource('cluster')
|
||||
self.mock_prop_host2['parent'] = vim.ClusterComputeResource('cluster')
|
||||
self.mock_prop_host3['parent'] = vim.Datacenter('dc')
|
||||
mock_get_cl_name = MagicMock(
|
||||
side_effect=['fake_bad_cluster', 'fake_good_cluster'])
|
||||
with patch('salt.utils.vmware.get_mors_with_properties',
|
||||
MagicMock(return_value=self.mock_prop_hosts)):
|
||||
res = salt.utils.vmware.get_hosts(self.mock_si,
|
||||
datacenter_name='fake_datacenter',
|
||||
cluster_name='fake_good_cluster',
|
||||
get_all_hosts=True)
|
||||
self.assertEqual(res, [self.mock_host2, self.mock_host3])
|
||||
with patch('salt.utils.vmware.get_managed_object_name',
|
||||
mock_get_cl_name):
|
||||
res = salt.utils.vmware.get_hosts(
|
||||
self.mock_si, datacenter_name='fake_datacenter',
|
||||
cluster_name='fake_good_cluster', get_all_hosts=True)
|
||||
self.assertEqual(mock_get_cl_name.call_count, 2)
|
||||
self.assertEqual(res, [self.mock_host2])
|
||||
|
||||
def test_no_hosts(self):
|
||||
with patch('salt.utils.vmware.get_mors_with_properties',
|
||||
|
Loading…
Reference in New Issue
Block a user