Merge branch 'develop' into develop

This commit is contained in:
assafShapira 2017-09-26 08:34:17 +03:00 committed by GitHub
commit 993dfdb1f2
11 changed files with 2898 additions and 70 deletions

View File

@ -26,7 +26,7 @@ _XCCDF_MAP = {
'cmd_pattern': (
"oscap xccdf eval "
"--oval-results --results results.xml --report report.html "
"--profile {0} {1} {2}"
"--profile {0} {1}"
)
}
}
@ -73,7 +73,6 @@ def xccdf(params):
'''
params = shlex.split(params)
policy = params[-1]
del params[-1]
success = True
error = None
@ -90,7 +89,7 @@ def xccdf(params):
error = str(err)
if success:
cmd = _XCCDF_MAP[action]['cmd_pattern'].format(args.profile, " ".join(argv), policy)
cmd = _XCCDF_MAP[action]['cmd_pattern'].format(args.profile, policy)
tempdir = tempfile.mkdtemp()
proc = Popen(
shlex.split(cmd), stdout=PIPE, stderr=PIPE, cwd=tempdir)

View File

@ -27,6 +27,20 @@ Installation Prerequisites
pip install purestorage
- Configure Pure Storage FlashArray authentication. Use one of the following
three methods.
1) From the minion config
.. code-block:: yaml
pure_tags:
fa:
san_ip: management vip or hostname for the FlashArray
api_token: A valid api token for the FlashArray being managed
2) From environment (PUREFA_IP and PUREFA_API)
3) From the pillar (PUREFA_IP and PUREFA_API)
:maintainer: Simon Dodsley (simon@purestorage.com)
:maturity: new
:requires: purestorage
@ -195,7 +209,7 @@ def snap_create(name, suffix=None):
Will return False is volume selected to snap does not exist.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume to snapshot
@ -231,7 +245,7 @@ def snap_delete(name, suffix=None, eradicate=False):
Will return False if selected snapshot does not exist.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume
@ -273,7 +287,7 @@ def snap_eradicate(name, suffix=None):
Will retunr False is snapshot is not in a deleted state.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume
@ -306,7 +320,7 @@ def volume_create(name, size=None):
Will return False if volume already exists.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume (truncated to 63 characters)
@ -344,7 +358,7 @@ def volume_delete(name, eradicate=False):
Will return False if volume doesn't exist is already in a deleted state.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume
@ -383,7 +397,7 @@ def volume_eradicate(name):
Will return False is volume is not in a deleted state.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume
@ -413,7 +427,7 @@ def volume_extend(name, size):
Will return False if new size is less than or equal to existing size.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume
@ -451,7 +465,7 @@ def snap_volume_create(name, target, overwrite=False):
Will return False if target volume already exists and
overwrite is not specified, or selected snapshot doesn't exist.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume snapshot
@ -497,7 +511,7 @@ def volume_clone(name, target, overwrite=False):
Will return False if source volume doesn't exist, or
target volume already exists and overwrite not specified.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume
@ -541,7 +555,7 @@ def volume_attach(name, host):
Host and volume must exist or else will return False.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume
@ -574,7 +588,7 @@ def volume_detach(name, host):
Will return False if either host or volume do not exist, or
if selected volume isn't already connected to the host.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume
@ -608,7 +622,7 @@ def host_create(name, iqn=None, wwn=None):
Fibre Channel parameters are not in a valid format.
See Pure Storage FlashArray documentation.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of host (truncated to 63 characters)
@ -659,7 +673,7 @@ def host_update(name, iqn=None, wwn=None):
by another host, or are not in a valid format.
See Pure Storage FlashArray documentation.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of host
@ -699,7 +713,7 @@ def host_delete(name):
Will return False if the host doesn't exist.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of host
@ -735,7 +749,7 @@ def hg_create(name, host=None, volume=None):
Will return False if hostgroup already exists, or if
named host or volume do not exist.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of hostgroup (truncated to 63 characters)
@ -791,7 +805,7 @@ def hg_update(name, host=None, volume=None):
Will return False is hostgroup doesn't exist, or host
or volume do not exist.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of hostgroup
@ -837,7 +851,7 @@ def hg_delete(name):
Will return False is hostgroup is already in a deleted state.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of hostgroup
@ -875,7 +889,7 @@ def hg_remove(name, volume=None, host=None):
Will return False is hostgroup does not exist, or named host or volume are
not in the hostgroup.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of hostgroup
@ -936,7 +950,7 @@ def pg_create(name, hostgroup=None, host=None, volume=None, enabled=True):
hostgroups, hosts or volumes
* Named type for protection group does not exist
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of protection group
@ -1029,7 +1043,7 @@ def pg_update(name, hostgroup=None, host=None, volume=None):
* Incorrect type selected for current protection group type
* Specified type does not exist
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of protection group
@ -1119,7 +1133,7 @@ def pg_delete(name, eradicate=False):
Will return False if protection group is already in a deleted state.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of protection group
@ -1156,7 +1170,7 @@ def pg_eradicate(name):
Will return False if protection group is not in a deleted state.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of protection group
@ -1188,7 +1202,7 @@ def pg_remove(name, hostgroup=None, host=None, volume=None):
* Protection group does not exist
* Specified type is not currently associated with the protection group
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of hostgroup

View File

@ -3622,6 +3622,992 @@ def vsan_enable(host, username, password, protocol=None, port=None, host_names=N
return ret
def _get_dvs_config_dict(dvs_name, dvs_config):
'''
Returns the dict representation of the DVS config
dvs_name
The name of the DVS
dvs_config
The DVS config
'''
log.trace('Building the dict of the DVS \'{0}\' config'.format(dvs_name))
conf_dict = {'name': dvs_name,
'contact_email': dvs_config.contact.contact,
'contact_name': dvs_config.contact.name,
'description': dvs_config.description,
'lacp_api_version': dvs_config.lacpApiVersion,
'network_resource_control_version':
dvs_config.networkResourceControlVersion,
'network_resource_management_enabled':
dvs_config.networkResourceManagementEnabled,
'max_mtu': dvs_config.maxMtu}
if isinstance(dvs_config.uplinkPortPolicy,
vim.DVSNameArrayUplinkPortPolicy):
conf_dict.update(
{'uplink_names': dvs_config.uplinkPortPolicy.uplinkPortName})
return conf_dict
def _get_dvs_link_discovery_protocol(dvs_name, dvs_link_disc_protocol):
'''
Returns the dict representation of the DVS link discovery protocol
dvs_name
The name of the DVS
dvs_link_disc_protocl
The DVS link discovery protocol
'''
log.trace('Building the dict of the DVS \'{0}\' link discovery '
'protocol'.format(dvs_name))
return {'operation': dvs_link_disc_protocol.operation,
'protocol': dvs_link_disc_protocol.protocol}
def _get_dvs_product_info(dvs_name, dvs_product_info):
'''
Returns the dict representation of the DVS product_info
dvs_name
The name of the DVS
dvs_product_info
The DVS product info
'''
log.trace('Building the dict of the DVS \'{0}\' product '
'info'.format(dvs_name))
return {'name': dvs_product_info.name,
'vendor': dvs_product_info.vendor,
'version': dvs_product_info.version}
def _get_dvs_capability(dvs_name, dvs_capability):
'''
Returns the dict representation of the DVS product_info
dvs_name
The name of the DVS
dvs_capability
The DVS capability
'''
log.trace('Building the dict of the DVS \'{0}\' capability'
''.format(dvs_name))
return {'operation_supported': dvs_capability.dvsOperationSupported,
'portgroup_operation_supported':
dvs_capability.dvPortGroupOperationSupported,
'port_operation_supported': dvs_capability.dvPortOperationSupported}
def _get_dvs_infrastructure_traffic_resources(dvs_name,
dvs_infra_traffic_ress):
'''
Returns a list of dict representations of the DVS infrastructure traffic
resource
dvs_name
The name of the DVS
dvs_infra_traffic_ress
The DVS infrastructure traffic resources
'''
log.trace('Building the dicts of the DVS \'{0}\' infrastructure traffic '
'resources'.format(dvs_name))
res_dicts = []
for res in dvs_infra_traffic_ress:
res_dict = {'key': res.key,
'limit': res.allocationInfo.limit,
'reservation': res.allocationInfo.reservation}
if res.allocationInfo.shares:
res_dict.update({'num_shares': res.allocationInfo.shares.shares,
'share_level': res.allocationInfo.shares.level})
res_dicts.append(res_dict)
return res_dicts
@depends(HAS_PYVMOMI)
@supports_proxies('esxdatacenter', 'esxcluster')
@gets_service_instance_via_proxy
def list_dvss(datacenter=None, dvs_names=None, service_instance=None):
'''
Returns a list of distributed virtual switches (DVSs).
The list can be filtered by the datacenter or DVS names.
datacenter
The datacenter to look for DVSs in.
Default value is None.
dvs_names
List of DVS names to look for. If None, all DVSs are returned.
Default value is None.
.. code-block:: bash
salt '*' vsphere.list_dvss
salt '*' vsphere.list_dvss dvs_names=[dvs1,dvs2]
'''
ret_list = []
proxy_type = get_proxy_type()
if proxy_type == 'esxdatacenter':
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
dc_ref = _get_proxy_target(service_instance)
elif proxy_type == 'esxcluster':
datacenter = __salt__['esxcluster.get_details']()['datacenter']
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
for dvs in salt.utils.vmware.get_dvss(dc_ref, dvs_names, (not dvs_names)):
dvs_dict = {}
# XXX: Because of how VMware did DVS object inheritance we can\'t
# be more restrictive when retrieving the dvs config, we have to
# retrieve the entire object
props = salt.utils.vmware.get_properties_of_managed_object(
dvs, ['name', 'config', 'capability', 'networkResourcePool'])
dvs_dict = _get_dvs_config_dict(props['name'], props['config'])
# Product info
dvs_dict.update(
{'product_info':
_get_dvs_product_info(props['name'],
props['config'].productInfo)})
# Link Discovery Protocol
if props['config'].linkDiscoveryProtocolConfig:
dvs_dict.update(
{'link_discovery_protocol':
_get_dvs_link_discovery_protocol(
props['name'],
props['config'].linkDiscoveryProtocolConfig)})
# Capability
dvs_dict.update({'capability':
_get_dvs_capability(props['name'],
props['capability'])})
# InfrastructureTrafficResourceConfig - available with vSphere 6.0
if hasattr(props['config'], 'infrastructureTrafficResourceConfig'):
dvs_dict.update({
'infrastructure_traffic_resource_pools':
_get_dvs_infrastructure_traffic_resources(
props['name'],
props['config'].infrastructureTrafficResourceConfig)})
ret_list.append(dvs_dict)
return ret_list
def _apply_dvs_config(config_spec, config_dict):
'''
Applies the values of the config dict dictionary to a config spec
(vim.VMwareDVSConfigSpec)
'''
if config_dict.get('name'):
config_spec.name = config_dict['name']
if config_dict.get('contact_email') or config_dict.get('contact_name'):
if not config_spec.contact:
config_spec.contact = vim.DVSContactInfo()
config_spec.contact.contact = config_dict.get('contact_email')
config_spec.contact.name = config_dict.get('contact_name')
if config_dict.get('description'):
config_spec.description = config_dict.get('description')
if config_dict.get('max_mtu'):
config_spec.maxMtu = config_dict.get('max_mtu')
if config_dict.get('lacp_api_version'):
config_spec.lacpApiVersion = config_dict.get('lacp_api_version')
if config_dict.get('network_resource_control_version'):
config_spec.networkResourceControlVersion = \
config_dict.get('network_resource_control_version')
if config_dict.get('uplink_names'):
if not config_spec.uplinkPortPolicy or \
not isinstance(config_spec.uplinkPortPolicy,
vim.DVSNameArrayUplinkPortPolicy):
config_spec.uplinkPortPolicy = \
vim.DVSNameArrayUplinkPortPolicy()
config_spec.uplinkPortPolicy.uplinkPortName = \
config_dict['uplink_names']
def _apply_dvs_link_discovery_protocol(disc_prot_config, disc_prot_dict):
'''
Applies the values of the disc_prot_dict dictionary to a link discovery
protocol config object (vim.LinkDiscoveryProtocolConfig)
'''
disc_prot_config.operation = disc_prot_dict['operation']
disc_prot_config.protocol = disc_prot_dict['protocol']
def _apply_dvs_product_info(product_info_spec, product_info_dict):
'''
Applies the values of the product_info_dict dictionary to a product info
spec (vim.DistributedVirtualSwitchProductSpec)
'''
if product_info_dict.get('name'):
product_info_spec.name = product_info_dict['name']
if product_info_dict.get('vendor'):
product_info_spec.vendor = product_info_dict['vendor']
if product_info_dict.get('version'):
product_info_spec.version = product_info_dict['version']
def _apply_dvs_capability(capability_spec, capability_dict):
'''
Applies the values of the capability_dict dictionary to a DVS capability
object (vim.vim.DVSCapability)
'''
if 'operation_supported' in capability_dict:
capability_spec.dvsOperationSupported = \
capability_dict['operation_supported']
if 'port_operation_supported' in capability_dict:
capability_spec.dvPortOperationSupported = \
capability_dict['port_operation_supported']
if 'portgroup_operation_supported' in capability_dict:
capability_spec.dvPortGroupOperationSupported = \
capability_dict['portgroup_operation_supported']
def _apply_dvs_infrastructure_traffic_resources(infra_traffic_resources,
resource_dicts):
'''
Applies the values of the resource dictionaries to infra traffic resources,
creating the infra traffic resource if required
(vim.DistributedVirtualSwitchProductSpec)
'''
for res_dict in resource_dicts:
filtered_traffic_resources = \
[r for r in infra_traffic_resources if r.key == res_dict['key']]
if filtered_traffic_resources:
traffic_res = filtered_traffic_resources[0]
else:
traffic_res = vim.DvsHostInfrastructureTrafficResource()
traffic_res.key = res_dict['key']
traffic_res.allocationInfo = \
vim.DvsHostInfrastructureTrafficResourceAllocation()
infra_traffic_resources.append(traffic_res)
if res_dict.get('limit'):
traffic_res.allocationInfo.limit = res_dict['limit']
if res_dict.get('reservation'):
traffic_res.allocationInfo.reservation = res_dict['reservation']
if res_dict.get('num_shares') or res_dict.get('share_level'):
if not traffic_res.allocationInfo.shares:
traffic_res.allocationInfo.shares = vim.SharesInfo()
if res_dict.get('share_level'):
traffic_res.allocationInfo.shares.level = \
vim.SharesLevel(res_dict['share_level'])
if res_dict.get('num_shares'):
#XXX Even though we always set the number of shares if provided,
#the vCenter will ignore it unless the share level is 'custom'.
traffic_res.allocationInfo.shares.shares = res_dict['num_shares']
def _apply_dvs_network_resource_pools(network_resource_pools, resource_dicts):
'''
Applies the values of the resource dictionaries to network resource pools,
creating the resource pools if required
(vim.DVSNetworkResourcePoolConfigSpec)
'''
for res_dict in resource_dicts:
ress = [r for r in network_resource_pools if r.key == res_dict['key']]
if ress:
res = ress[0]
else:
res = vim.DVSNetworkResourcePoolConfigSpec()
res.key = res_dict['key']
res.allocationInfo = \
vim.DVSNetworkResourcePoolAllocationInfo()
network_resource_pools.append(res)
if res_dict.get('limit'):
res.allocationInfo.limit = res_dict['limit']
if res_dict.get('num_shares') and res_dict.get('share_level'):
if not res.allocationInfo.shares:
res.allocationInfo.shares = vim.SharesInfo()
res.allocationInfo.shares.shares = res_dict['num_shares']
res.allocationInfo.shares.level = \
vim.SharesLevel(res_dict['share_level'])
@depends(HAS_PYVMOMI)
@supports_proxies('esxdatacenter', 'esxcluster')
@gets_service_instance_via_proxy
def create_dvs(dvs_dict, dvs_name, service_instance=None):
'''
Creates a distributed virtual switch (DVS).
Note: The ``dvs_name`` param will override any name set in ``dvs_dict``.
dvs_dict
Dict representation of the new DVS (exmaple in salt.states.dvs)
dvs_name
Name of the DVS to be created.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.create_dvs dvs dict=$dvs_dict dvs_name=dvs_name
'''
log.trace('Creating dvs \'{0}\' with dict = {1}'.format(dvs_name,
dvs_dict))
proxy_type = get_proxy_type()
if proxy_type == 'esxdatacenter':
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
dc_ref = _get_proxy_target(service_instance)
elif proxy_type == 'esxcluster':
datacenter = __salt__['esxcluster.get_details']()['datacenter']
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
# Make the name of the DVS consistent with the call
dvs_dict['name'] = dvs_name
# Build the config spec from the input
dvs_create_spec = vim.DVSCreateSpec()
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
_apply_dvs_config(dvs_create_spec.configSpec, dvs_dict)
if dvs_dict.get('product_info'):
dvs_create_spec.productInfo = vim.DistributedVirtualSwitchProductSpec()
_apply_dvs_product_info(dvs_create_spec.productInfo,
dvs_dict['product_info'])
if dvs_dict.get('capability'):
dvs_create_spec.capability = vim.DVSCapability()
_apply_dvs_capability(dvs_create_spec.capability,
dvs_dict['capability'])
if dvs_dict.get('link_discovery_protocol'):
dvs_create_spec.configSpec.linkDiscoveryProtocolConfig = \
vim.LinkDiscoveryProtocolConfig()
_apply_dvs_link_discovery_protocol(
dvs_create_spec.configSpec.linkDiscoveryProtocolConfig,
dvs_dict['link_discovery_protocol'])
if dvs_dict.get('infrastructure_traffic_resource_pools'):
dvs_create_spec.configSpec.infrastructureTrafficResourceConfig = []
_apply_dvs_infrastructure_traffic_resources(
dvs_create_spec.configSpec.infrastructureTrafficResourceConfig,
dvs_dict['infrastructure_traffic_resource_pools'])
log.trace('dvs_create_spec = {}'.format(dvs_create_spec))
salt.utils.vmware.create_dvs(dc_ref, dvs_name, dvs_create_spec)
if 'network_resource_management_enabled' in dvs_dict:
dvs_refs = salt.utils.vmware.get_dvss(dc_ref,
dvs_names=[dvs_name])
if not dvs_refs:
raise VMwareObjectRetrievalError(
'DVS \'{0}\' wasn\'t found in datacenter \'{1}\''
''.format(dvs_name, datacenter))
dvs_ref = dvs_refs[0]
salt.utils.vmware.set_dvs_network_resource_management_enabled(
dvs_ref, dvs_dict['network_resource_management_enabled'])
return True
@depends(HAS_PYVMOMI)
@supports_proxies('esxdatacenter', 'esxcluster')
@gets_service_instance_via_proxy
def update_dvs(dvs_dict, dvs, service_instance=None):
'''
Updates a distributed virtual switch (DVS).
Note: Updating the product info, capability, uplinks of a DVS is not
supported so the corresponding entries in ``dvs_dict`` will be
ignored.
dvs_dict
Dictionary with the values the DVS should be update with
(exmaple in salt.states.dvs)
dvs
Name of the DVS to be updated.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.update_dvs dvs_dict=$dvs_dict dvs=dvs1
'''
# Remove ignored properties
log.trace('Updating dvs \'{0}\' with dict = {1}'.format(dvs, dvs_dict))
for prop in ['product_info', 'capability', 'uplink_names', 'name']:
if prop in dvs_dict:
del dvs_dict[prop]
proxy_type = get_proxy_type()
if proxy_type == 'esxdatacenter':
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
dc_ref = _get_proxy_target(service_instance)
elif proxy_type == 'esxcluster':
datacenter = __salt__['esxcluster.get_details']()['datacenter']
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])
if not dvs_refs:
raise VMwareObjectRetrievalError('DVS \'{0}\' wasn\'t found in '
'datacenter \'{1}\''
''.format(dvs, datacenter))
dvs_ref = dvs_refs[0]
# Build the config spec from the input
dvs_props = salt.utils.vmware.get_properties_of_managed_object(
dvs_ref, ['config', 'capability'])
dvs_config = vim.VMwareDVSConfigSpec()
# Copy all of the properties in the config of the of the DVS to a
# DvsConfigSpec
skipped_properties = ['host']
for prop in dvs_config.__dict__.keys():
if prop in skipped_properties:
continue
if hasattr(dvs_props['config'], prop):
setattr(dvs_config, prop, getattr(dvs_props['config'], prop))
_apply_dvs_config(dvs_config, dvs_dict)
if dvs_dict.get('link_discovery_protocol'):
if not dvs_config.linkDiscoveryProtocolConfig:
dvs_config.linkDiscoveryProtocolConfig = \
vim.LinkDiscoveryProtocolConfig()
_apply_dvs_link_discovery_protocol(
dvs_config.linkDiscoveryProtocolConfig,
dvs_dict['link_discovery_protocol'])
if dvs_dict.get('infrastructure_traffic_resource_pools'):
if not dvs_config.infrastructureTrafficResourceConfig:
dvs_config.infrastructureTrafficResourceConfig = []
_apply_dvs_infrastructure_traffic_resources(
dvs_config.infrastructureTrafficResourceConfig,
dvs_dict['infrastructure_traffic_resource_pools'])
log.trace('dvs_config= {}'.format(dvs_config))
salt.utils.vmware.update_dvs(dvs_ref, dvs_config_spec=dvs_config)
if 'network_resource_management_enabled' in dvs_dict:
salt.utils.vmware.set_dvs_network_resource_management_enabled(
dvs_ref, dvs_dict['network_resource_management_enabled'])
return True
def _get_dvportgroup_out_shaping(pg_name, pg_default_port_config):
'''
Returns the out shaping policy of a distributed virtual portgroup
pg_name
The name of the portgroup
pg_default_port_config
The dafault port config of the portgroup
'''
log.trace('Retrieving portgroup\'s \'{0}\' out shaping '
'config'.format(pg_name))
out_shaping_policy = pg_default_port_config.outShapingPolicy
if not out_shaping_policy:
return {}
return {'average_bandwidth': out_shaping_policy.averageBandwidth.value,
'burst_size': out_shaping_policy.burstSize.value,
'enabled': out_shaping_policy.enabled.value,
'peak_bandwidth': out_shaping_policy.peakBandwidth.value}
def _get_dvportgroup_security_policy(pg_name, pg_default_port_config):
'''
Returns the security policy of a distributed virtual portgroup
pg_name
The name of the portgroup
pg_default_port_config
The dafault port config of the portgroup
'''
log.trace('Retrieving portgroup\'s \'{0}\' security policy '
'config'.format(pg_name))
sec_policy = pg_default_port_config.securityPolicy
if not sec_policy:
return {}
return {'allow_promiscuous': sec_policy.allowPromiscuous.value,
'forged_transmits': sec_policy.forgedTransmits.value,
'mac_changes': sec_policy.macChanges.value}
def _get_dvportgroup_teaming(pg_name, pg_default_port_config):
'''
Returns the teaming of a distributed virtual portgroup
pg_name
The name of the portgroup
pg_default_port_config
The dafault port config of the portgroup
'''
log.trace('Retrieving portgroup\'s \'{0}\' teaming'
'config'.format(pg_name))
teaming_policy = pg_default_port_config.uplinkTeamingPolicy
if not teaming_policy:
return {}
ret_dict = {'notify_switches': teaming_policy.notifySwitches.value,
'policy': teaming_policy.policy.value,
'reverse_policy': teaming_policy.reversePolicy.value,
'rolling_order': teaming_policy.rollingOrder.value}
if teaming_policy.failureCriteria:
failure_criteria = teaming_policy.failureCriteria
ret_dict.update({'failure_criteria': {
'check_beacon': failure_criteria.checkBeacon.value,
'check_duplex': failure_criteria.checkDuplex.value,
'check_error_percent': failure_criteria.checkErrorPercent.value,
'check_speed': failure_criteria.checkSpeed.value,
'full_duplex': failure_criteria.fullDuplex.value,
'percentage': failure_criteria.percentage.value,
'speed': failure_criteria.speed.value}})
if teaming_policy.uplinkPortOrder:
uplink_order = teaming_policy.uplinkPortOrder
ret_dict.update({'port_order': {
'active': uplink_order.activeUplinkPort,
'standby': uplink_order.standbyUplinkPort}})
return ret_dict
def _get_dvportgroup_dict(pg_ref):
'''
Returns a dictionary with a distributed virutal portgroup data
pg_ref
Portgroup reference
'''
props = salt.utils.vmware.get_properties_of_managed_object(
pg_ref, ['name', 'config.description', 'config.numPorts',
'config.type', 'config.defaultPortConfig'])
pg_dict = {'name': props['name'],
'description': props.get('config.description'),
'num_ports': props['config.numPorts'],
'type': props['config.type']}
if props['config.defaultPortConfig']:
dpg = props['config.defaultPortConfig']
if dpg.vlan and \
isinstance(dpg.vlan,
vim.VmwareDistributedVirtualSwitchVlanIdSpec):
pg_dict.update({'vlan_id': dpg.vlan.vlanId})
pg_dict.update({'out_shaping':
_get_dvportgroup_out_shaping(
props['name'],
props['config.defaultPortConfig'])})
pg_dict.update({'security_policy':
_get_dvportgroup_security_policy(
props['name'],
props['config.defaultPortConfig'])})
pg_dict.update({'teaming':
_get_dvportgroup_teaming(
props['name'],
props['config.defaultPortConfig'])})
return pg_dict
@depends(HAS_PYVMOMI)
@supports_proxies('esxdatacenter', 'esxcluster')
@gets_service_instance_via_proxy
def list_dvportgroups(dvs=None, portgroup_names=None, service_instance=None):
'''
Returns a list of distributed virtual switch portgroups.
The list can be filtered by the portgroup names or by the DVS.
dvs
Name of the DVS containing the portgroups.
Default value is None.
portgroup_names
List of portgroup names to look for. If None, all portgroups are
returned.
Default value is None
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.list_dvporgroups
salt '*' vsphere.list_dvportgroups dvs=dvs1
salt '*' vsphere.list_dvportgroups portgroup_names=[pg1]
salt '*' vsphere.list_dvportgroups dvs=dvs1 portgroup_names=[pg1]
'''
ret_dict = []
proxy_type = get_proxy_type()
if proxy_type == 'esxdatacenter':
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
dc_ref = _get_proxy_target(service_instance)
elif proxy_type == 'esxcluster':
datacenter = __salt__['esxcluster.get_details']()['datacenter']
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if dvs:
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])
if not dvs_refs:
raise VMwareObjectRetrievalError('DVS \'{0}\' was not '
'retrieved'.format(dvs))
dvs_ref = dvs_refs[0]
get_all_portgroups = True if not portgroup_names else False
for pg_ref in salt.utils.vmware.get_dvportgroups(
parent_ref=dvs_ref if dvs else dc_ref,
portgroup_names=portgroup_names,
get_all_portgroups=get_all_portgroups):
ret_dict.append(_get_dvportgroup_dict(pg_ref))
return ret_dict
@depends(HAS_PYVMOMI)
@supports_proxies('esxdatacenter', 'esxcluster')
@gets_service_instance_via_proxy
def list_uplink_dvportgroup(dvs, service_instance=None):
'''
Returns the uplink portgroup of a distributed virtual switch.
dvs
Name of the DVS containing the portgroup.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.list_uplink_dvportgroup dvs=dvs_name
'''
proxy_type = get_proxy_type()
if proxy_type == 'esxdatacenter':
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
dc_ref = _get_proxy_target(service_instance)
elif proxy_type == 'esxcluster':
datacenter = __salt__['esxcluster.get_details']()['datacenter']
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])
if not dvs_refs:
raise VMwareObjectRetrievalError('DVS \'{0}\' was not '
'retrieved'.format(dvs))
uplink_pg_ref = salt.utils.vmware.get_uplink_dvportgroup(dvs_refs[0])
return _get_dvportgroup_dict(uplink_pg_ref)
def _apply_dvportgroup_out_shaping(pg_name, out_shaping, out_shaping_conf):
'''
Applies the values in out_shaping_conf to an out_shaping object
pg_name
The name of the portgroup
out_shaping
The vim.DVSTrafficShapingPolicy to apply the config to
out_shaping_conf
The out shaping config
'''
log.trace('Building portgroup\'s \'{0}\' out shaping '
'policy'.format(pg_name))
if out_shaping_conf.get('average_bandwidth'):
out_shaping.averageBandwidth = vim.LongPolicy()
out_shaping.averageBandwidth.value = \
out_shaping_conf['average_bandwidth']
if out_shaping_conf.get('burst_size'):
out_shaping.burstSize = vim.LongPolicy()
out_shaping.burstSize.value = out_shaping_conf['burst_size']
if 'enabled' in out_shaping_conf:
out_shaping.enabled = vim.BoolPolicy()
out_shaping.enabled.value = out_shaping_conf['enabled']
if out_shaping_conf.get('peak_bandwidth'):
out_shaping.peakBandwidth = vim.LongPolicy()
out_shaping.peakBandwidth.value = out_shaping_conf['peak_bandwidth']
def _apply_dvportgroup_security_policy(pg_name, sec_policy, sec_policy_conf):
'''
Applies the values in sec_policy_conf to a security policy object
pg_name
The name of the portgroup
sec_policy
The vim.DVSTrafficShapingPolicy to apply the config to
sec_policy_conf
The out shaping config
'''
log.trace('Building portgroup\'s \'{0}\' security policy '.format(pg_name))
if 'allow_promiscuous' in sec_policy_conf:
sec_policy.allowPromiscuous = vim.BoolPolicy()
sec_policy.allowPromiscuous.value = \
sec_policy_conf['allow_promiscuous']
if 'forged_transmits' in sec_policy_conf:
sec_policy.forgedTransmits = vim.BoolPolicy()
sec_policy.forgedTransmits.value = sec_policy_conf['forged_transmits']
if 'mac_changes' in sec_policy_conf:
sec_policy.macChanges = vim.BoolPolicy()
sec_policy.macChanges.value = sec_policy_conf['mac_changes']
def _apply_dvportgroup_teaming(pg_name, teaming, teaming_conf):
'''
Applies the values in teaming_conf to a teaming policy object
pg_name
The name of the portgroup
teaming
The vim.VmwareUplinkPortTeamingPolicy to apply the config to
teaming_conf
The teaming config
'''
log.trace('Building portgroup\'s \'{0}\' teaming'.format(pg_name))
if 'notify_switches' in teaming_conf:
teaming.notifySwitches = vim.BoolPolicy()
teaming.notifySwitches.value = teaming_conf['notify_switches']
if 'policy' in teaming_conf:
teaming.policy = vim.StringPolicy()
teaming.policy.value = teaming_conf['policy']
if 'reverse_policy' in teaming_conf:
teaming.reversePolicy = vim.BoolPolicy()
teaming.reversePolicy.value = teaming_conf['reverse_policy']
if 'rolling_order' in teaming_conf:
teaming.rollingOrder = vim.BoolPolicy()
teaming.rollingOrder.value = teaming_conf['rolling_order']
if 'failure_criteria' in teaming_conf:
if not teaming.failureCriteria:
teaming.failureCriteria = vim.DVSFailureCriteria()
failure_criteria_conf = teaming_conf['failure_criteria']
if 'check_beacon' in failure_criteria_conf:
teaming.failureCriteria.checkBeacon = vim.BoolPolicy()
teaming.failureCriteria.checkBeacon.value = \
failure_criteria_conf['check_beacon']
if 'check_duplex' in failure_criteria_conf:
teaming.failureCriteria.checkDuplex = vim.BoolPolicy()
teaming.failureCriteria.checkDuplex.value = \
failure_criteria_conf['check_duplex']
if 'check_error_percent' in failure_criteria_conf:
teaming.failureCriteria.checkErrorPercent = vim.BoolPolicy()
teaming.failureCriteria.checkErrorPercent.value = \
failure_criteria_conf['check_error_percent']
if 'check_speed' in failure_criteria_conf:
teaming.failureCriteria.checkSpeed = vim.StringPolicy()
teaming.failureCriteria.checkSpeed.value = \
failure_criteria_conf['check_speed']
if 'full_duplex' in failure_criteria_conf:
teaming.failureCriteria.fullDuplex = vim.BoolPolicy()
teaming.failureCriteria.fullDuplex.value = \
failure_criteria_conf['full_duplex']
if 'percentage' in failure_criteria_conf:
teaming.failureCriteria.percentage = vim.IntPolicy()
teaming.failureCriteria.percentage.value = \
failure_criteria_conf['percentage']
if 'speed' in failure_criteria_conf:
teaming.failureCriteria.speed = vim.IntPolicy()
teaming.failureCriteria.speed.value = \
failure_criteria_conf['speed']
if 'port_order' in teaming_conf:
if not teaming.uplinkPortOrder:
teaming.uplinkPortOrder = vim.VMwareUplinkPortOrderPolicy()
if 'active' in teaming_conf['port_order']:
teaming.uplinkPortOrder.activeUplinkPort = \
teaming_conf['port_order']['active']
if 'standby' in teaming_conf['port_order']:
teaming.uplinkPortOrder.standbyUplinkPort = \
teaming_conf['port_order']['standby']
def _apply_dvportgroup_config(pg_name, pg_spec, pg_conf):
'''
Applies the values in conf to a distributed portgroup spec
pg_name
The name of the portgroup
pg_spec
The vim.DVPortgroupConfigSpec to apply the config to
pg_conf
The portgroup config
'''
log.trace('Building portgroup\'s \'{0}\' spec'.format(pg_name))
if 'name' in pg_conf:
pg_spec.name = pg_conf['name']
if 'description' in pg_conf:
pg_spec.description = pg_conf['description']
if 'num_ports' in pg_conf:
pg_spec.numPorts = pg_conf['num_ports']
if 'type' in pg_conf:
pg_spec.type = pg_conf['type']
if not pg_spec.defaultPortConfig:
for prop in ['vlan_id', 'out_shaping', 'security_policy', 'teaming']:
if prop in pg_conf:
pg_spec.defaultPortConfig = vim.VMwareDVSPortSetting()
if 'vlan_id' in pg_conf:
pg_spec.defaultPortConfig.vlan = \
vim.VmwareDistributedVirtualSwitchVlanIdSpec()
pg_spec.defaultPortConfig.vlan.vlanId = pg_conf['vlan_id']
if 'out_shaping' in pg_conf:
if not pg_spec.defaultPortConfig.outShapingPolicy:
pg_spec.defaultPortConfig.outShapingPolicy = \
vim.DVSTrafficShapingPolicy()
_apply_dvportgroup_out_shaping(
pg_name, pg_spec.defaultPortConfig.outShapingPolicy,
pg_conf['out_shaping'])
if 'security_policy' in pg_conf:
if not pg_spec.defaultPortConfig.securityPolicy:
pg_spec.defaultPortConfig.securityPolicy = \
vim.DVSSecurityPolicy()
_apply_dvportgroup_security_policy(
pg_name, pg_spec.defaultPortConfig.securityPolicy,
pg_conf['security_policy'])
if 'teaming' in pg_conf:
if not pg_spec.defaultPortConfig.uplinkTeamingPolicy:
pg_spec.defaultPortConfig.uplinkTeamingPolicy = \
vim.VmwareUplinkPortTeamingPolicy()
_apply_dvportgroup_teaming(
pg_name, pg_spec.defaultPortConfig.uplinkTeamingPolicy,
pg_conf['teaming'])
@depends(HAS_PYVMOMI)
@supports_proxies('esxdatacenter', 'esxcluster')
@gets_service_instance_via_proxy
def create_dvportgroup(portgroup_dict, portgroup_name, dvs,
service_instance=None):
'''
Creates a distributed virtual portgroup.
Note: The ``portgroup_name`` param will override any name already set
in ``portgroup_dict``.
portgroup_dict
Dictionary with the config values the portgroup should be created with
(exmaple in salt.states.dvs).
portgroup_name
Name of the portgroup to be created.
dvs
Name of the DVS that will contain the portgroup.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.create_dvportgroup portgroup_dict=<dict>
portgroup_name=pg1 dvs=dvs1
'''
log.trace('Creating portgroup\'{0}\' in dvs \'{1}\' '
'with dict = {2}'.format(portgroup_name, dvs, portgroup_dict))
proxy_type = get_proxy_type()
if proxy_type == 'esxdatacenter':
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
dc_ref = _get_proxy_target(service_instance)
elif proxy_type == 'esxcluster':
datacenter = __salt__['esxcluster.get_details']()['datacenter']
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])
if not dvs_refs:
raise VMwareObjectRetrievalError('DVS \'{0}\' was not '
'retrieved'.format(dvs))
# Make the name of the dvportgroup consistent with the parameter
portgroup_dict['name'] = portgroup_name
spec = vim.DVPortgroupConfigSpec()
_apply_dvportgroup_config(portgroup_name, spec, portgroup_dict)
salt.utils.vmware.create_dvportgroup(dvs_refs[0], spec)
return True
@depends(HAS_PYVMOMI)
@supports_proxies('esxdatacenter', 'esxcluster')
@gets_service_instance_via_proxy
def update_dvportgroup(portgroup_dict, portgroup, dvs, service_instance=True):
'''
Updates a distributed virtual portgroup.
portgroup_dict
Dictionary with the values the portgroup should be update with
(exmaple in salt.states.dvs).
portgroup
Name of the portgroup to be updated.
dvs
Name of the DVS containing the portgroups.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.update_dvportgroup portgroup_dict=<dict>
portgroup=pg1
salt '*' vsphere.update_dvportgroup portgroup_dict=<dict>
portgroup=pg1 dvs=dvs1
'''
log.trace('Updating portgroup\'{0}\' in dvs \'{1}\' '
'with dict = {2}'.format(portgroup, dvs, portgroup_dict))
proxy_type = get_proxy_type()
if proxy_type == 'esxdatacenter':
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
dc_ref = _get_proxy_target(service_instance)
elif proxy_type == 'esxcluster':
datacenter = __salt__['esxcluster.get_details']()['datacenter']
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])
if not dvs_refs:
raise VMwareObjectRetrievalError('DVS \'{0}\' was not '
'retrieved'.format(dvs))
pg_refs = salt.utils.vmware.get_dvportgroups(dvs_refs[0],
portgroup_names=[portgroup])
if not pg_refs:
raise VMwareObjectRetrievalError('Portgroup \'{0}\' was not '
'retrieved'.format(portgroup))
pg_props = salt.utils.vmware.get_properties_of_managed_object(pg_refs[0],
['config'])
spec = vim.DVPortgroupConfigSpec()
# Copy existing properties in spec
for prop in ['autoExpand', 'configVersion', 'defaultPortConfig',
'description', 'name', 'numPorts', 'policy', 'portNameFormat',
'scope', 'type', 'vendorSpecificConfig']:
setattr(spec, prop, getattr(pg_props['config'], prop))
_apply_dvportgroup_config(portgroup, spec, portgroup_dict)
salt.utils.vmware.update_dvportgroup(pg_refs[0], spec)
return True
@depends(HAS_PYVMOMI)
@supports_proxies('esxdatacenter', 'esxcluster')
@gets_service_instance_via_proxy
def remove_dvportgroup(portgroup, dvs, service_instance=None):
'''
Removes a distributed virtual portgroup.
portgroup
Name of the portgroup to be removed.
dvs
Name of the DVS containing the portgroups.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.remove_dvportgroup portgroup=pg1 dvs=dvs1
'''
log.trace('Removing portgroup\'{0}\' in dvs \'{1}\' '
''.format(portgroup, dvs))
proxy_type = get_proxy_type()
if proxy_type == 'esxdatacenter':
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
dc_ref = _get_proxy_target(service_instance)
elif proxy_type == 'esxcluster':
datacenter = __salt__['esxcluster.get_details']()['datacenter']
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])
if not dvs_refs:
raise VMwareObjectRetrievalError('DVS \'{0}\' was not '
'retrieved'.format(dvs))
pg_refs = salt.utils.vmware.get_dvportgroups(dvs_refs[0],
portgroup_names=[portgroup])
if not pg_refs:
raise VMwareObjectRetrievalError('Portgroup \'{0}\' was not '
'retrieved'.format(portgroup))
salt.utils.vmware.remove_dvportgroup(pg_refs[0])
return True
@depends(HAS_PYVMOMI)
@supports_proxies('esxdatacenter', 'esxcluster')
@gets_service_instance_via_proxy

717
salt/states/dvs.py Normal file
View File

@ -0,0 +1,717 @@
# -*- coding: utf-8 -*-
'''
Manage VMware distributed virtual switches (DVSs) and their distributed virtual
portgroups (DVportgroups).
:codeauthor: :email:`Alexandru Bleotu <alexandru.bleotu@morganstaley.com>`
Examples
========
Several settings can be changed for DVSs and DVporgroups. Here are two examples
covering all of the settings. Fewer settings can be used
DVS
---
.. code-block:: python
'name': 'dvs1',
'max_mtu': 1000,
'uplink_names': [
'dvUplink1',
'dvUplink2',
'dvUplink3'
],
'capability': {
'portgroup_operation_supported': false,
'operation_supported': true,
'port_operation_supported': false
},
'lacp_api_version': 'multipleLag',
'contact_email': 'foo@email.com',
'product_info': {
'version':
'6.0.0',
'vendor':
'VMware,
Inc.',
'name':
'DVS'
},
'network_resource_management_enabled': true,
'contact_name': 'me@email.com',
'infrastructure_traffic_resource_pools': [
{
'reservation': 0,
'limit': 1000,
'share_level': 'high',
'key': 'management',
'num_shares': 100
},
{
'reservation': 0,
'limit': -1,
'share_level': 'normal',
'key': 'faultTolerance',
'num_shares': 50
},
{
'reservation': 0,
'limit': 32000,
'share_level': 'normal',
'key': 'vmotion',
'num_shares': 50
},
{
'reservation': 10000,
'limit': -1,
'share_level': 'normal',
'key': 'virtualMachine',
'num_shares': 50
},
{
'reservation': 0,
'limit': -1,
'share_level': 'custom',
'key': 'iSCSI',
'num_shares': 75
},
{
'reservation': 0,
'limit': -1,
'share_level': 'normal',
'key': 'nfs',
'num_shares': 50
},
{
'reservation': 0,
'limit': -1,
'share_level': 'normal',
'key': 'hbr',
'num_shares': 50
},
{
'reservation': 8750,
'limit': 15000,
'share_level': 'high',
'key': 'vsan',
'num_shares': 100
},
{
'reservation': 0,
'limit': -1,
'share_level': 'normal',
'key': 'vdp',
'num_shares': 50
}
],
'link_discovery_protocol': {
'operation':
'listen',
'protocol':
'cdp'
},
'network_resource_control_version': 'version3',
'description': 'Managed by Salt. Random settings.'
Note: The mandatory attribute is: ``name``.
Portgroup
---------
.. code-block:: python
'security_policy': {
'allow_promiscuous': true,
'mac_changes': false,
'forged_transmits': true
},
'name': 'vmotion-v702',
'out_shaping': {
'enabled': true,
'average_bandwidth': 1500,
'burst_size': 4096,
'peak_bandwidth': 1500
},
'num_ports': 128,
'teaming': {
'port_order': {
'active': [
'dvUplink2'
],
'standby': [
'dvUplink1'
]
},
'notify_switches': false,
'reverse_policy': true,
'rolling_order': false,
'policy': 'failover_explicit',
'failure_criteria': {
'check_error_percent': true,
'full_duplex': false,
'check_duplex': false,
'percentage': 50,
'check_speed': 'minimum',
'speed': 20,
'check_beacon': true
}
},
'type': 'earlyBinding',
'vlan_id': 100,
'description': 'Managed by Salt. Random settings.'
Note: The mandatory attributes are: ``name``, ``type``.
Dependencies
============
- pyVmomi Python Module
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.7.9,
or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original ESXi State
Module was developed against.
'''
# Import Python Libs
from __future__ import absolute_import
import logging
import traceback
import sys
# Import Salt Libs
import salt.exceptions
from salt.ext.six.moves import range
# Import Third Party Libs
try:
from pyVmomi import VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
if not HAS_PYVMOMI:
return False, 'State module did not load: pyVmomi not found'
# We check the supported vim versions to infer the pyVmomi version
if 'vim25/6.0' in VmomiSupport.versionMap and \
sys.version_info > (2, 7) and sys.version_info < (2, 7, 9):
return False, ('State module did not load: Incompatible versions '
'of Python and pyVmomi present. See Issue #29537.')
return 'dvs'
def mod_init(low):
'''
Init function
'''
return True
def _get_datacenter_name():
'''
Returns the datacenter name configured on the proxy
Supported proxies: esxcluster, esxdatacenter
'''
proxy_type = __salt__['vsphere.get_proxy_type']()
details = None
if proxy_type == 'esxcluster':
details = __salt__['esxcluster.get_details']()
elif proxy_type == 'esxdatacenter':
details = __salt__['esxdatacenter.get_details']()
if not details:
raise salt.exceptions.CommandExecutionError(
'details for proxy type \'{0}\' not loaded'.format(proxy_type))
return details['datacenter']
def dvs_configured(name, dvs):
'''
Configures a DVS.
Creates a new DVS, if it doesn't exist in the provided datacenter or
reconfigures it if configured differently.
dvs
DVS dict representations (see module sysdocs)
'''
datacenter_name = _get_datacenter_name()
dvs_name = dvs['name'] if dvs.get('name') else name
log.info('Running state {0} for DVS \'{1}\' in datacenter '
'\'{2}\''.format(name, dvs_name, datacenter_name))
changes_required = False
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None}
comments = []
changes = {}
changes_required = False
try:
#TODO dvs validation
si = __salt__['vsphere.get_service_instance_via_proxy']()
dvss = __salt__['vsphere.list_dvss'](dvs_names=[dvs_name],
service_instance=si)
if not dvss:
changes_required = True
if __opts__['test']:
comments.append('State {0} will create a new DVS '
'\'{1}\' in datacenter \'{2}\''
''.format(name, dvs_name, datacenter_name))
log.info(comments[-1])
else:
dvs['name'] = dvs_name
__salt__['vsphere.create_dvs'](dvs_dict=dvs,
dvs_name=dvs_name,
service_instance=si)
comments.append('Created a new DVS \'{0}\' in datacenter '
'\'{1}\''.format(dvs_name, datacenter_name))
log.info(comments[-1])
changes.update({'dvs': {'new': dvs}})
else:
# DVS already exists. Checking various aspects of the config
props = ['description', 'contact_email', 'contact_name',
'lacp_api_version', 'link_discovery_protocol',
'max_mtu', 'network_resource_control_version',
'network_resource_management_enabled']
log.trace('DVS \'{0}\' found in datacenter \'{1}\'. Checking '
'for any updates in '
'{2}'.format(dvs_name, datacenter_name, props))
props_to_original_values = {}
props_to_updated_values = {}
current_dvs = dvss[0]
for prop in props:
if prop in dvs and dvs[prop] != current_dvs.get(prop):
props_to_original_values[prop] = current_dvs.get(prop)
props_to_updated_values[prop] = dvs[prop]
# Simple infrastructure traffic resource control compare doesn't
# work because num_shares is optional if share_level is not custom
# We need to do a dedicated compare for this property
infra_prop = 'infrastructure_traffic_resource_pools'
original_infra_res_pools = []
updated_infra_res_pools = []
if infra_prop in dvs:
if not current_dvs.get(infra_prop):
updated_infra_res_pools = dvs[infra_prop]
else:
for idx in range(len(dvs[infra_prop])):
if 'num_shares' not in dvs[infra_prop][idx] and \
current_dvs[infra_prop][idx]['share_level'] != \
'custom' and \
'num_shares' in current_dvs[infra_prop][idx]:
del current_dvs[infra_prop][idx]['num_shares']
if dvs[infra_prop][idx] != \
current_dvs[infra_prop][idx]:
original_infra_res_pools.append(
current_dvs[infra_prop][idx])
updated_infra_res_pools.append(
dict(dvs[infra_prop][idx]))
if updated_infra_res_pools:
props_to_original_values[
'infrastructure_traffic_resource_pools'] = \
original_infra_res_pools
props_to_updated_values[
'infrastructure_traffic_resource_pools'] = \
updated_infra_res_pools
if props_to_updated_values:
if __opts__['test']:
changes_string = ''
for p in props_to_updated_values:
if p == 'infrastructure_traffic_resource_pools':
changes_string += \
'\tinfrastructure_traffic_resource_pools:\n'
for idx in range(len(props_to_updated_values[p])):
d = props_to_updated_values[p][idx]
s = props_to_original_values[p][idx]
changes_string += \
('\t\t{0} from \'{1}\' to \'{2}\'\n'
''.format(d['key'], s, d))
else:
changes_string += \
('\t{0} from \'{1}\' to \'{2}\'\n'
''.format(p, props_to_original_values[p],
props_to_updated_values[p]))
comments.append(
'State dvs_configured will update DVS \'{0}\' '
'in datacenter \'{1}\':\n{2}'
''.format(dvs_name, datacenter_name, changes_string))
log.info(comments[-1])
else:
__salt__['vsphere.update_dvs'](
dvs_dict=props_to_updated_values,
dvs=dvs_name,
service_instance=si)
comments.append('Updated DVS \'{0}\' in datacenter \'{1}\''
''.format(dvs_name, datacenter_name))
log.info(comments[-1])
changes.update({'dvs': {'new': props_to_updated_values,
'old': props_to_original_values}})
__salt__['vsphere.disconnect'](si)
except salt.exceptions.CommandExecutionError as exc:
log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc()))
if si:
__salt__['vsphere.disconnect'](si)
if not __opts__['test']:
ret['result'] = False
ret.update({'comment': str(exc),
'result': False if not __opts__['test'] else None})
return ret
if not comments:
# We have no changes
ret.update({'comment': ('DVS \'{0}\' in datacenter \'{1}\' is '
'correctly configured. Nothing to be done.'
''.format(dvs_name, datacenter_name)),
'result': True})
else:
ret.update({'comment': '\n'.join(comments)})
if __opts__['test']:
ret.update({'pchanges': changes,
'result': None})
else:
ret.update({'changes': changes,
'result': True})
return ret
def _get_diff_dict(dict1, dict2):
'''
Returns a dictionary with the diffs between two dictionaries
It will ignore any key that doesn't exist in dict2
'''
ret_dict = {}
for p in dict2.keys():
if p not in dict1:
ret_dict.update({p: {'val1': None, 'val2': dict2[p]}})
elif dict1[p] != dict2[p]:
if isinstance(dict1[p], dict) and isinstance(dict2[p], dict):
sub_diff_dict = _get_diff_dict(dict1[p], dict2[p])
if sub_diff_dict:
ret_dict.update({p: sub_diff_dict})
else:
ret_dict.update({p: {'val1': dict1[p], 'val2': dict2[p]}})
return ret_dict
def _get_val2_dict_from_diff_dict(diff_dict):
'''
Returns a dictionaries with the values stored in val2 of a diff dict.
'''
ret_dict = {}
for p in diff_dict.keys():
if not isinstance(diff_dict[p], dict):
raise ValueError('Unexpected diff difct \'{0}\''.format(diff_dict))
if 'val2' in diff_dict[p].keys():
ret_dict.update({p: diff_dict[p]['val2']})
else:
ret_dict.update(
{p: _get_val2_dict_from_diff_dict(diff_dict[p])})
return ret_dict
def _get_val1_dict_from_diff_dict(diff_dict):
'''
Returns a dictionaries with the values stored in val1 of a diff dict.
'''
ret_dict = {}
for p in diff_dict.keys():
if not isinstance(diff_dict[p], dict):
raise ValueError('Unexpected diff difct \'{0}\''.format(diff_dict))
if 'val1' in diff_dict[p].keys():
ret_dict.update({p: diff_dict[p]['val1']})
else:
ret_dict.update(
{p: _get_val1_dict_from_diff_dict(diff_dict[p])})
return ret_dict
def _get_changes_from_diff_dict(diff_dict):
'''
Returns a list of string message of the differences in a diff dict.
Each inner message is tabulated one tab deeper
'''
changes_strings = []
for p in diff_dict.keys():
if not isinstance(diff_dict[p], dict):
raise ValueError('Unexpected diff difct \'{0}\''.format(diff_dict))
if sorted(diff_dict[p].keys()) == ['val1', 'val2']:
# Some string formatting
from_str = diff_dict[p]['val1']
if isinstance(diff_dict[p]['val1'], str):
from_str = '\'{0}\''.format(diff_dict[p]['val1'])
elif isinstance(diff_dict[p]['val1'], list):
from_str = '\'{0}\''.format(', '.join(diff_dict[p]['val1']))
to_str = diff_dict[p]['val2']
if isinstance(diff_dict[p]['val2'], str):
to_str = '\'{0}\''.format(diff_dict[p]['val2'])
elif isinstance(diff_dict[p]['val2'], list):
to_str = '\'{0}\''.format(', '.join(diff_dict[p]['val2']))
changes_strings.append('{0} from {1} to {2}'.format(
p, from_str, to_str))
else:
sub_changes = _get_changes_from_diff_dict(diff_dict[p])
if sub_changes:
changes_strings.append('{0}:'.format(p))
changes_strings.extend(['\t{0}'.format(c)
for c in sub_changes])
return changes_strings
def portgroups_configured(name, dvs, portgroups):
'''
Configures portgroups on a DVS.
Creates/updates/removes portgroups in a provided DVS
dvs
Name of the DVS
portgroups
Portgroup dict representations (see module sysdocs)
'''
datacenter = _get_datacenter_name()
log.info('Running state {0} on DVS \'{1}\', datacenter '
'\'{2}\''.format(name, dvs, datacenter))
changes_required = False
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None,
'pchanges': {}}
comments = []
changes = {}
changes_required = False
try:
#TODO portroups validation
si = __salt__['vsphere.get_service_instance_via_proxy']()
current_pgs = __salt__['vsphere.list_dvportgroups'](
dvs=dvs, service_instance=si)
expected_pg_names = []
for pg in portgroups:
pg_name = pg['name']
expected_pg_names.append(pg_name)
del pg['name']
log.info('Checking pg \'{0}\''.format(pg_name))
filtered_current_pgs = \
[p for p in current_pgs if p.get('name') == pg_name]
if not filtered_current_pgs:
changes_required = True
if __opts__['test']:
comments.append('State {0} will create a new portgroup '
'\'{1}\' in DVS \'{2}\', datacenter '
'\'{3}\''.format(name, pg_name, dvs,
datacenter))
else:
__salt__['vsphere.create_dvportgroup'](
portgroup_dict=pg, portgroup_name=pg_name, dvs=dvs,
service_instance=si)
comments.append('Created a new portgroup \'{0}\' in DVS '
'\'{1}\', datacenter \'{2}\''
''.format(pg_name, dvs, datacenter))
log.info(comments[-1])
changes.update({pg_name: {'new': pg}})
else:
# Porgroup already exists. Checking the config
log.trace('Portgroup \'{0}\' found in DVS \'{1}\', datacenter '
'\'{2}\'. Checking for any updates.'
''.format(pg_name, dvs, datacenter))
current_pg = filtered_current_pgs[0]
diff_dict = _get_diff_dict(current_pg, pg)
if diff_dict:
changes_required = True
if __opts__['test']:
changes_strings = \
_get_changes_from_diff_dict(diff_dict)
log.trace('changes_strings = '
'{0}'.format(changes_strings))
comments.append(
'State {0} will update portgroup \'{1}\' in '
'DVS \'{2}\', datacenter \'{3}\':\n{4}'
''.format(name, pg_name, dvs, datacenter,
'\n'.join(['\t{0}'.format(c) for c in
changes_strings])))
else:
__salt__['vsphere.update_dvportgroup'](
portgroup_dict=pg, portgroup=pg_name, dvs=dvs,
service_instance=si)
comments.append('Updated portgroup \'{0}\' in DVS '
'\'{1}\', datacenter \'{2}\''
''.format(pg_name, dvs, datacenter))
log.info(comments[-1])
changes.update(
{pg_name: {'new':
_get_val2_dict_from_diff_dict(diff_dict),
'old':
_get_val1_dict_from_diff_dict(diff_dict)}})
# Add the uplink portgroup to the expected pg names
uplink_pg = __salt__['vsphere.list_uplink_dvportgroup'](
dvs=dvs, service_instance=si)
expected_pg_names.append(uplink_pg['name'])
# Remove any extra portgroups
for current_pg in current_pgs:
if current_pg['name'] not in expected_pg_names:
changes_required = True
if __opts__['test']:
comments.append('State {0} will remove '
'the portgroup \'{1}\' from DVS \'{2}\', '
'datacenter \'{3}\''
''.format(name, current_pg['name'], dvs,
datacenter))
else:
__salt__['vsphere.remove_dvportgroup'](
portgroup=current_pg['name'], dvs=dvs,
service_instance=si)
comments.append('Removed the portgroup \'{0}\' from DVS '
'\'{1}\', datacenter \'{2}\''
''.format(current_pg['name'], dvs,
datacenter))
log.info(comments[-1])
changes.update({current_pg['name']:
{'old': current_pg}})
__salt__['vsphere.disconnect'](si)
except salt.exceptions.CommandExecutionError as exc:
log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc()))
if si:
__salt__['vsphere.disconnect'](si)
if not __opts__['test']:
ret['result'] = False
ret.update({'comment': exc.strerror,
'result': False if not __opts__['test'] else None})
return ret
if not changes_required:
# We have no changes
ret.update({'comment': ('All portgroups in DVS \'{0}\', datacenter '
'\'{1}\' exist and are correctly configured. '
'Nothing to be done.'.format(dvs, datacenter)),
'result': True})
else:
ret.update({'comment': '\n'.join(comments)})
if __opts__['test']:
ret.update({'pchanges': changes,
'result': None})
else:
ret.update({'changes': changes,
'result': True})
return ret
def uplink_portgroup_configured(name, dvs, uplink_portgroup):
'''
Configures the uplink portgroup on a DVS. The state assumes there is only
one uplink portgroup.
dvs
Name of the DVS
upling_portgroup
Uplink portgroup dict representations (see module sysdocs)
'''
datacenter = _get_datacenter_name()
log.info('Running {0} on DVS \'{1}\', datacenter \'{2}\''
''.format(name, dvs, datacenter))
changes_required = False
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None,
'pchanges': {}}
comments = []
changes = {}
changes_required = False
try:
#TODO portroups validation
si = __salt__['vsphere.get_service_instance_via_proxy']()
current_uplink_portgroup = __salt__['vsphere.list_uplink_dvportgroup'](
dvs=dvs, service_instance=si)
log.trace('current_uplink_portgroup = '
'{0}'.format(current_uplink_portgroup))
diff_dict = _get_diff_dict(current_uplink_portgroup, uplink_portgroup)
if diff_dict:
changes_required = True
if __opts__['test']:
changes_strings = \
_get_changes_from_diff_dict(diff_dict)
log.trace('changes_strings = '
'{0}'.format(changes_strings))
comments.append(
'State {0} will update the '
'uplink portgroup in DVS \'{1}\', datacenter '
'\'{2}\':\n{3}'
''.format(name, dvs, datacenter,
'\n'.join(['\t{0}'.format(c) for c in
changes_strings])))
else:
__salt__['vsphere.update_dvportgroup'](
portgroup_dict=uplink_portgroup,
portgroup=current_uplink_portgroup['name'],
dvs=dvs,
service_instance=si)
comments.append('Updated the uplink portgroup in DVS '
'\'{0}\', datacenter \'{1}\''
''.format(dvs, datacenter))
log.info(comments[-1])
changes.update(
{'uplink_portgroup':
{'new': _get_val2_dict_from_diff_dict(diff_dict),
'old': _get_val1_dict_from_diff_dict(diff_dict)}})
__salt__['vsphere.disconnect'](si)
except salt.exceptions.CommandExecutionError as exc:
log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc()))
if si:
__salt__['vsphere.disconnect'](si)
if not __opts__['test']:
ret['result'] = False
ret.update({'comment': exc.strerror,
'result': False if not __opts__['test'] else None})
return ret
if not changes_required:
# We have no changes
ret.update({'comment': ('Uplink portgroup in DVS \'{0}\', datacenter '
'\'{1}\' is correctly configured. '
'Nothing to be done.'.format(dvs, datacenter)),
'result': True})
else:
ret.update({'comment': '\n'.join(comments)})
if __opts__['test']:
ret.update({'pchanges': changes,
'result': None})
else:
ret.update({'changes': changes,
'result': True})
return ret

View File

@ -217,7 +217,7 @@ class RecursiveDictDiffer(DictDiffer):
Each inner difference is tabulated two space deeper
'''
changes_strings = []
for p in diff_dict.keys():
for p in sorted(diff_dict.keys()):
if sorted(diff_dict[p].keys()) == ['new', 'old']:
# Some string formatting
old_value = diff_dict[p]['old']
@ -267,7 +267,7 @@ class RecursiveDictDiffer(DictDiffer):
keys.append('{0}{1}'.format(prefix, key))
return keys
return _added(self._diffs, prefix='')
return sorted(_added(self._diffs, prefix=''))
def removed(self):
'''
@ -290,7 +290,7 @@ class RecursiveDictDiffer(DictDiffer):
prefix='{0}{1}.'.format(prefix, key)))
return keys
return _removed(self._diffs, prefix='')
return sorted(_removed(self._diffs, prefix=''))
def changed(self):
'''
@ -338,7 +338,7 @@ class RecursiveDictDiffer(DictDiffer):
return keys
return _changed(self._diffs, prefix='')
return sorted(_changed(self._diffs, prefix=''))
def unchanged(self):
'''
@ -363,7 +363,7 @@ class RecursiveDictDiffer(DictDiffer):
prefix='{0}{1}.'.format(prefix, key)))
return keys
return _unchanged(self.current_dict, self._diffs, prefix='')
return sorted(_unchanged(self.current_dict, self._diffs, prefix=''))
@property
def diffs(self):

View File

@ -981,6 +981,333 @@ def get_network_adapter_type(adapter_type):
return vim.vm.device.VirtualE1000e()
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving DVSs in datacenter \'{0}\', dvs_names=\'{1}\', '
'get_all_dvss={2}'.format(dc_name,
','.join(dvs_names) if dvs_names
else None,
get_all_dvss))
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter '
'\'{0}\''.format(dc_name))
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'{0}\' in datacenter '
'\'{1}\''.format(dvs_name, dc_name))
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, str(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'{0}\''.format(dvs_name))
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, str(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to {0} on '
'dvs \'{1}\''.format(enabled, dvs_name))
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref, vim.Datacenter) or
isinstance(parent_ref, vim.DistributedVirtualSwitch)):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in {0} \'{1}\', portgroups_names=\'{2}\', '
'get_all_portgroups={3}'.format(
type(parent_ref).__name__, parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups))
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'{0}\''.format(dvs_name))
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup {0} to dvs '
'\'{1}\''.format(spec.name, dvs_name))
log.trace('spec = {}'.format(spec))
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, str(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo {0}'.format(pg_name))
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, str(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgrouo {0}'.format(pg_name))
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, str(task.__class__))
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.

View File

@ -98,13 +98,13 @@ class Nilrt_ipModuleTest(ModuleCase):
def test_static_all(self):
interfaces = self.__interfaces()
for interface in interfaces:
result = self.run_function('ip.set_static_all', [interface, '192.168.10.4', '255.255.255.0', '192.168.10.1', '8.8.4.4 my.dns.com'])
result = self.run_function('ip.set_static_all', [interface, '192.168.10.4', '255.255.255.0', '192.168.10.1', '8.8.4.4 8.8.8.8'])
self.assertTrue(result)
info = self.run_function('ip.get_interfaces_details')
for interface in info['interfaces']:
self.assertIn('8.8.4.4', interface['ipv4']['dns'])
self.assertIn('my.dns.com', interface['ipv4']['dns'])
self.assertIn('8.8.8.8', interface['ipv4']['dns'])
self.assertEqual(interface['ipv4']['requestmode'], 'static')
self.assertEqual(interface['ipv4']['address'], '192.168.10.4')
self.assertEqual(interface['ipv4']['netmask'], '255.255.255.0')

View File

@ -49,7 +49,7 @@ class RecursiveDictDifferTestCase(TestCase):
def test_changed_without_ignore_unset_values(self):
self.recursive_diff.ignore_unset_values = False
self.assertEqual(self.recursive_diff.changed(),
['a.c', 'a.e', 'a.g', 'a.f', 'h', 'i'])
['a.c', 'a.e', 'a.f', 'a.g', 'h', 'i'])
def test_unchanged(self):
self.assertEqual(self.recursive_diff.unchanged(),
@ -89,7 +89,7 @@ class RecursiveDictDifferTestCase(TestCase):
'a:\n'
' c from 2 to 4\n'
' e from \'old_value\' to \'new_value\'\n'
' g from nothing to \'new_key\'\n'
' f from \'old_key\' to nothing\n'
' g from nothing to \'new_key\'\n'
'h from nothing to \'new_key\'\n'
'i from nothing to None')

View File

@ -32,34 +32,43 @@ class ListDictDifferTestCase(TestCase):
continue
def test_added(self):
self.assertEqual(self.list_diff.added,
[{'key': 5, 'value': 'foo5', 'int_value': 105}])
self.assertEqual(len(self.list_diff.added), 1)
self.assertDictEqual(self.list_diff.added[0],
{'key': 5, 'value': 'foo5', 'int_value': 105})
def test_removed(self):
self.assertEqual(self.list_diff.removed,
[{'key': 3, 'value': 'foo3', 'int_value': 103}])
self.assertEqual(len(self.list_diff.removed), 1)
self.assertDictEqual(self.list_diff.removed[0],
{'key': 3, 'value': 'foo3', 'int_value': 103})
def test_diffs(self):
self.assertEqual(self.list_diff.diffs,
[{2: {'int_value': {'new': 112, 'old': 102}}},
# Added items
{5: {'int_value': {'new': 105, 'old': NONE},
'key': {'new': 5, 'old': NONE},
'value': {'new': 'foo5', 'old': NONE}}},
# Removed items
{3: {'int_value': {'new': NONE, 'old': 103},
'key': {'new': NONE, 'old': 3},
'value': {'new': NONE, 'old': 'foo3'}}}])
self.assertEqual(len(self.list_diff.diffs), 3)
self.assertDictEqual(self.list_diff.diffs[0],
{2: {'int_value': {'new': 112, 'old': 102}}})
self.assertDictEqual(self.list_diff.diffs[1],
# Added items
{5: {'int_value': {'new': 105, 'old': NONE},
'key': {'new': 5, 'old': NONE},
'value': {'new': 'foo5', 'old': NONE}}})
self.assertDictEqual(self.list_diff.diffs[2],
# Removed items
{3: {'int_value': {'new': NONE, 'old': 103},
'key': {'new': NONE, 'old': 3},
'value': {'new': NONE, 'old': 'foo3'}}})
def test_new_values(self):
self.assertEqual(self.list_diff.new_values,
[{'key': 2, 'int_value': 112},
{'key': 5, 'value': 'foo5', 'int_value': 105}])
self.assertEqual(len(self.list_diff.new_values), 2)
self.assertDictEqual(self.list_diff.new_values[0],
{'key': 2, 'int_value': 112})
self.assertDictEqual(self.list_diff.new_values[1],
{'key': 5, 'value': 'foo5', 'int_value': 105})
def test_old_values(self):
self.assertEqual(self.list_diff.old_values,
[{'key': 2, 'int_value': 102},
{'key': 3, 'value': 'foo3', 'int_value': 103}])
self.assertEqual(len(self.list_diff.old_values), 2)
self.assertDictEqual(self.list_diff.old_values[0],
{'key': 2, 'int_value': 102})
self.assertDictEqual(self.list_diff.old_values[1],
{'key': 3, 'value': 'foo3', 'int_value': 103})
def test_changed_all(self):
self.assertEqual(self.list_diff.changed(selection='all'),
@ -78,11 +87,3 @@ class ListDictDifferTestCase(TestCase):
'\twill be removed\n'
'\tidentified by key 5:\n'
'\twill be added\n')
def test_changes_str2(self):
self.assertEqual(self.list_diff.changes_str2,
' key=2 (updated):\n'
' int_value from 102 to 112\n'
' key=3 (removed)\n'
' key=5 (added): {\'int_value\': 105, \'key\': 5, '
'\'value\': \'foo5\'}')

View File

@ -0,0 +1,784 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Alexandru Bleotu <alexandru.bleotu@morganstanley.com>`
Tests for dvs related functions in salt.utils.vmware
'''
# Import python libraries
from __future__ import absolute_import
import logging
# Import Salt testing libraries
from tests.support.unit import TestCase, skipIf
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, call
from salt.exceptions import VMwareObjectRetrievalError, VMwareApiError, \
ArgumentValueError, VMwareRuntimeError
#i Import Salt libraries
import salt.utils.vmware as vmware
# Import Third Party Libs
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
# Get Logging Started
log = logging.getLogger(__name__)
class FakeTaskClass(object):
pass
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
class GetDvssTestCase(TestCase):
def setUp(self):
self.mock_si = MagicMock()
self.mock_dc_ref = MagicMock()
self.mock_traversal_spec = MagicMock()
self.mock_items = [{'object': MagicMock(),
'name': 'fake_dvs1'},
{'object': MagicMock(),
'name': 'fake_dvs2'},
{'object': MagicMock(),
'name': 'fake_dvs3'}]
self.mock_get_mors = MagicMock(return_value=self.mock_items)
patches = (
('salt.utils.vmware.get_managed_object_name',
MagicMock()),
('salt.utils.vmware.get_mors_with_properties',
self.mock_get_mors),
('salt.utils.vmware.get_service_instance_from_managed_object',
MagicMock(return_value=self.mock_si)),
('salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec',
MagicMock(return_value=self.mock_traversal_spec)))
for mod, mock in patches:
patcher = patch(mod, mock)
patcher.start()
self.addCleanup(patcher.stop)
def tearDown(self):
for attr in ('mock_si', 'mock_dc_ref', 'mock_traversal_spec',
'mock_items', 'mock_get_mors'):
delattr(self, attr)
def test_get_managed_object_name_call(self):
mock_get_managed_object_name = MagicMock()
with patch('salt.utils.vmware.get_managed_object_name',
mock_get_managed_object_name):
vmware.get_dvss(self.mock_dc_ref)
mock_get_managed_object_name.assert_called_once_with(self.mock_dc_ref)
def test_traversal_spec(self):
mock_traversal_spec = MagicMock(return_value='traversal_spec')
with patch(
'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec',
mock_traversal_spec):
vmware.get_dvss(self.mock_dc_ref)
mock_traversal_spec.assert_has_calls(
[call(path='childEntity', skip=False, type=vim.Folder),
call(path='networkFolder', skip=True, type=vim.Datacenter,
selectSet=['traversal_spec'])])
def test_get_mors_with_properties(self):
vmware.get_dvss(self.mock_dc_ref)
self.mock_get_mors.assert_called_once_with(
self.mock_si, vim.DistributedVirtualSwitch,
container_ref=self.mock_dc_ref, property_list=['name'],
traversal_spec=self.mock_traversal_spec)
def test_get_no_dvss(self):
ret = vmware.get_dvss(self.mock_dc_ref)
self.assertEqual(ret, [])
def test_get_all_dvss(self):
ret = vmware.get_dvss(self.mock_dc_ref, get_all_dvss=True)
self.assertEqual(ret, [i['object'] for i in self.mock_items])
def test_filtered_all_dvss(self):
ret = vmware.get_dvss(self.mock_dc_ref,
dvs_names=['fake_dvs1', 'fake_dvs3', 'no_dvs'])
self.assertEqual(ret, [self.mock_items[0]['object'],
self.mock_items[2]['object']])
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
class GetNetworkFolderTestCase(TestCase):
def setUp(self):
self.mock_si = MagicMock()
self.mock_dc_ref = MagicMock()
self.mock_traversal_spec = MagicMock()
self.mock_entries = [{'object': MagicMock(),
'name': 'fake_netw_folder'}]
self.mock_get_mors = MagicMock(return_value=self.mock_entries)
patches = (
('salt.utils.vmware.get_managed_object_name',
MagicMock(return_value='fake_dc')),
('salt.utils.vmware.get_service_instance_from_managed_object',
MagicMock(return_value=self.mock_si)),
('salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec',
MagicMock(return_value=self.mock_traversal_spec)),
('salt.utils.vmware.get_mors_with_properties',
self.mock_get_mors))
for mod, mock in patches:
patcher = patch(mod, mock)
patcher.start()
self.addCleanup(patcher.stop)
def tearDown(self):
for attr in ('mock_si', 'mock_dc_ref', 'mock_traversal_spec',
'mock_entries', 'mock_get_mors'):
delattr(self, attr)
def test_get_managed_object_name_call(self):
mock_get_managed_object_name = MagicMock()
with patch('salt.utils.vmware.get_managed_object_name',
mock_get_managed_object_name):
vmware.get_network_folder(self.mock_dc_ref)
mock_get_managed_object_name.assert_called_once_with(self.mock_dc_ref)
def test_traversal_spec(self):
mock_traversal_spec = MagicMock(return_value='traversal_spec')
with patch(
'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec',
mock_traversal_spec):
vmware.get_network_folder(self.mock_dc_ref)
mock_traversal_spec.assert_called_once_with(
path='networkFolder', skip=False, type=vim.Datacenter)
def test_get_mors_with_properties(self):
vmware.get_network_folder(self.mock_dc_ref)
self.mock_get_mors.assert_called_once_with(
self.mock_si, vim.Folder, container_ref=self.mock_dc_ref,
property_list=['name'], traversal_spec=self.mock_traversal_spec)
def test_get_no_network_folder(self):
with patch('salt.utils.vmware.get_mors_with_properties',
MagicMock(return_value=[])):
with self.assertRaises(VMwareObjectRetrievalError) as excinfo:
vmware.get_network_folder(self.mock_dc_ref)
self.assertEqual(excinfo.exception.strerror,
'Network folder in datacenter \'fake_dc\' wasn\'t '
'retrieved')
def test_get_network_folder(self):
ret = vmware.get_network_folder(self.mock_dc_ref)
self.assertEqual(ret, self.mock_entries[0]['object'])
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
class CreateDvsTestCase(TestCase):
def setUp(self):
self.mock_dc_ref = MagicMock()
self.mock_dvs_create_spec = MagicMock()
self.mock_task = MagicMock(spec=FakeTaskClass)
self.mock_netw_folder = \
MagicMock(CreateDVS_Task=MagicMock(
return_value=self.mock_task))
self.mock_wait_for_task = MagicMock()
patches = (
('salt.utils.vmware.get_managed_object_name',
MagicMock(return_value='fake_dc')),
('salt.utils.vmware.get_network_folder',
MagicMock(return_value=self.mock_netw_folder)),
('salt.utils.vmware.wait_for_task', self.mock_wait_for_task))
for mod, mock in patches:
patcher = patch(mod, mock)
patcher.start()
self.addCleanup(patcher.stop)
def tearDown(self):
for attr in ('mock_dc_ref', 'mock_dvs_create_spec',
'mock_task', 'mock_netw_folder', 'mock_wait_for_task'):
delattr(self, attr)
def test_get_managed_object_name_call(self):
mock_get_managed_object_name = MagicMock()
with patch('salt.utils.vmware.get_managed_object_name',
mock_get_managed_object_name):
vmware.create_dvs(self.mock_dc_ref, 'fake_dvs')
mock_get_managed_object_name.assert_called_once_with(self.mock_dc_ref)
def test_no_dvs_create_spec(self):
mock_spec = MagicMock(configSpec=None)
mock_config_spec = MagicMock()
mock_dvs_create_spec = MagicMock(return_value=mock_spec)
mock_vmware_dvs_config_spec = \
MagicMock(return_value=mock_config_spec)
with patch('salt.utils.vmware.vim.DVSCreateSpec',
mock_dvs_create_spec):
with patch('salt.utils.vmware.vim.VMwareDVSConfigSpec',
mock_vmware_dvs_config_spec):
vmware.create_dvs(self.mock_dc_ref, 'fake_dvs')
mock_dvs_create_spec.assert_called_once_with()
mock_vmware_dvs_config_spec.assert_called_once_with()
self.assertEqual(mock_spec.configSpec, mock_config_spec)
self.assertEqual(mock_config_spec.name, 'fake_dvs')
self.mock_netw_folder.CreateDVS_Task.assert_called_once_with(mock_spec)
def test_get_network_folder(self):
mock_get_network_folder = MagicMock()
with patch('salt.utils.vmware.get_network_folder',
mock_get_network_folder):
vmware.create_dvs(self.mock_dc_ref, 'fake_dvs')
mock_get_network_folder.assert_called_once_with(self.mock_dc_ref)
def test_create_dvs_task_passed_in_spec(self):
vmware.create_dvs(self.mock_dc_ref, 'fake_dvs',
dvs_create_spec=self.mock_dvs_create_spec)
self.mock_netw_folder.CreateDVS_Task.assert_called_once_with(
self.mock_dvs_create_spec)
def test_create_dvs_task_raises_no_permission(self):
exc = vim.fault.NoPermission()
exc.privilegeId = 'Fake privilege'
self.mock_netw_folder.CreateDVS_Task = MagicMock(side_effect=exc)
with self.assertRaises(VMwareApiError) as excinfo:
vmware.create_dvs(self.mock_dc_ref, 'fake_dvs',
dvs_create_spec=self.mock_dvs_create_spec)
self.assertEqual(excinfo.exception.strerror,
'Not enough permissions. Required privilege: '
'Fake privilege')
def test_create_dvs_task_raises_vim_fault(self):
exc = vim.fault.VimFault()
exc.msg = 'VimFault msg'
self.mock_netw_folder.CreateDVS_Task = MagicMock(side_effect=exc)
with self.assertRaises(VMwareApiError) as excinfo:
vmware.create_dvs(self.mock_dc_ref, 'fake_dvs',
dvs_create_spec=self.mock_dvs_create_spec)
self.assertEqual(excinfo.exception.strerror, 'VimFault msg')
def test_create_dvs_task_raises_runtime_fault(self):
exc = vmodl.RuntimeFault()
exc.msg = 'RuntimeFault msg'
self.mock_netw_folder.CreateDVS_Task = MagicMock(side_effect=exc)
with self.assertRaises(VMwareRuntimeError) as excinfo:
vmware.create_dvs(self.mock_dc_ref, 'fake_dvs',
dvs_create_spec=self.mock_dvs_create_spec)
self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg')
def test_wait_for_tasks(self):
vmware.create_dvs(self.mock_dc_ref, 'fake_dvs',
dvs_create_spec=self.mock_dvs_create_spec)
self.mock_wait_for_task.assert_called_once_with(
self.mock_task, 'fake_dvs',
'<class \'unit.utils.vmware.test_dvs.FakeTaskClass\'>')
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
class UpdateDvsTestCase(TestCase):
def setUp(self):
self.mock_task = MagicMock(spec=FakeTaskClass)
self.mock_dvs_ref = MagicMock(
ReconfigureDvs_Task=MagicMock(return_value=self.mock_task))
self.mock_dvs_spec = MagicMock()
self.mock_wait_for_task = MagicMock()
patches = (
('salt.utils.vmware.get_managed_object_name',
MagicMock(return_value='fake_dvs')),
('salt.utils.vmware.wait_for_task', self.mock_wait_for_task))
for mod, mock in patches:
patcher = patch(mod, mock)
patcher.start()
self.addCleanup(patcher.stop)
def tearDown(self):
for attr in ('mock_dvs_ref', 'mock_task', 'mock_dvs_spec',
'mock_wait_for_task'):
delattr(self, attr)
def test_get_managed_object_name_call(self):
mock_get_managed_object_name = MagicMock()
with patch('salt.utils.vmware.get_managed_object_name',
mock_get_managed_object_name):
vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec)
mock_get_managed_object_name.assert_called_once_with(self.mock_dvs_ref)
def test_reconfigure_dvs_task(self):
vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec)
self.mock_dvs_ref.ReconfigureDvs_Task.assert_called_once_with(
self.mock_dvs_spec)
def test_reconfigure_dvs_task_raises_no_permission(self):
exc = vim.fault.NoPermission()
exc.privilegeId = 'Fake privilege'
self.mock_dvs_ref.ReconfigureDvs_Task = MagicMock(side_effect=exc)
with self.assertRaises(VMwareApiError) as excinfo:
vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec)
self.assertEqual(excinfo.exception.strerror,
'Not enough permissions. Required privilege: '
'Fake privilege')
def test_reconfigure_dvs_task_raises_vim_fault(self):
exc = vim.fault.VimFault()
exc.msg = 'VimFault msg'
self.mock_dvs_ref.ReconfigureDvs_Task = MagicMock(side_effect=exc)
with self.assertRaises(VMwareApiError) as excinfo:
vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec)
self.assertEqual(excinfo.exception.strerror, 'VimFault msg')
def test_reconfigure_dvs_task_raises_runtime_fault(self):
exc = vmodl.RuntimeFault()
exc.msg = 'RuntimeFault msg'
self.mock_dvs_ref.ReconfigureDvs_Task = MagicMock(side_effect=exc)
with self.assertRaises(VMwareRuntimeError) as excinfo:
vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec)
self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg')
def test_wait_for_tasks(self):
vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec)
self.mock_wait_for_task.assert_called_once_with(
self.mock_task, 'fake_dvs',
'<class \'unit.utils.vmware.test_dvs.FakeTaskClass\'>')
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
class SetDvsNetworkResourceManagementEnabledTestCase(TestCase):
def setUp(self):
self.mock_enabled = MagicMock()
self.mock_dvs_ref = MagicMock(
EnableNetworkResourceManagement=MagicMock())
patches = (
('salt.utils.vmware.get_managed_object_name',
MagicMock(return_value='fake_dvs')),)
for mod, mock in patches:
patcher = patch(mod, mock)
patcher.start()
self.addCleanup(patcher.stop)
def tearDown(self):
for attr in ('mock_dvs_ref', 'mock_enabled'):
delattr(self, attr)
def test_get_managed_object_name_call(self):
mock_get_managed_object_name = MagicMock()
with patch('salt.utils.vmware.get_managed_object_name',
mock_get_managed_object_name):
vmware.set_dvs_network_resource_management_enabled(
self.mock_dvs_ref, self.mock_enabled)
mock_get_managed_object_name.assert_called_once_with(self.mock_dvs_ref)
def test_enable_network_resource_management(self):
vmware.set_dvs_network_resource_management_enabled(
self.mock_dvs_ref, self.mock_enabled)
self.mock_dvs_ref.EnableNetworkResourceManagement.assert_called_once_with(
enable=self.mock_enabled)
def test_enable_network_resource_management_raises_no_permission(self):
exc = vim.fault.NoPermission()
exc.privilegeId = 'Fake privilege'
self.mock_dvs_ref.EnableNetworkResourceManagement = \
MagicMock(side_effect=exc)
with self.assertRaises(VMwareApiError) as excinfo:
vmware.set_dvs_network_resource_management_enabled(
self.mock_dvs_ref, self.mock_enabled)
self.assertEqual(excinfo.exception.strerror,
'Not enough permissions. Required privilege: '
'Fake privilege')
def test_enable_network_resource_management_raises_vim_fault(self):
exc = vim.fault.VimFault()
exc.msg = 'VimFault msg'
self.mock_dvs_ref.EnableNetworkResourceManagement = \
MagicMock(side_effect=exc)
with self.assertRaises(VMwareApiError) as excinfo:
vmware.set_dvs_network_resource_management_enabled(
self.mock_dvs_ref, self.mock_enabled)
def test_enable_network_resource_management_raises_runtime_fault(self):
exc = vmodl.RuntimeFault()
exc.msg = 'RuntimeFault msg'
self.mock_dvs_ref.EnableNetworkResourceManagement = \
MagicMock(side_effect=exc)
with self.assertRaises(VMwareRuntimeError) as excinfo:
vmware.set_dvs_network_resource_management_enabled(
self.mock_dvs_ref, self.mock_enabled)
self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg')
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
class GetDvportgroupsTestCase(TestCase):
def setUp(self):
self.mock_si = MagicMock()
self.mock_dc_ref = MagicMock(spec=vim.Datacenter)
self.mock_dvs_ref = MagicMock(spec=vim.DistributedVirtualSwitch)
self.mock_traversal_spec = MagicMock()
self.mock_items = [{'object': MagicMock(),
'name': 'fake_pg1'},
{'object': MagicMock(),
'name': 'fake_pg2'},
{'object': MagicMock(),
'name': 'fake_pg3'}]
self.mock_get_mors = MagicMock(return_value=self.mock_items)
patches = (
('salt.utils.vmware.get_managed_object_name',
MagicMock()),
('salt.utils.vmware.get_mors_with_properties',
self.mock_get_mors),
('salt.utils.vmware.get_service_instance_from_managed_object',
MagicMock(return_value=self.mock_si)),
('salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec',
MagicMock(return_value=self.mock_traversal_spec)))
for mod, mock in patches:
patcher = patch(mod, mock)
patcher.start()
self.addCleanup(patcher.stop)
def tearDown(self):
for attr in ('mock_si', 'mock_dc_ref', 'mock_dvs_ref',
'mock_traversal_spec', 'mock_items', 'mock_get_mors'):
delattr(self, attr)
def test_unsupported_parrent(self):
with self.assertRaises(ArgumentValueError) as excinfo:
vmware.get_dvportgroups(MagicMock())
self.assertEqual(excinfo.exception.strerror,
'Parent has to be either a datacenter, or a '
'distributed virtual switch')
def test_get_managed_object_name_call(self):
mock_get_managed_object_name = MagicMock()
with patch('salt.utils.vmware.get_managed_object_name',
mock_get_managed_object_name):
vmware.get_dvportgroups(self.mock_dc_ref)
mock_get_managed_object_name.assert_called_once_with(self.mock_dc_ref)
def test_traversal_spec_datacenter_parent(self):
mock_traversal_spec = MagicMock(return_value='traversal_spec')
with patch(
'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec',
mock_traversal_spec):
vmware.get_dvportgroups(self.mock_dc_ref)
mock_traversal_spec.assert_has_calls(
[call(path='childEntity', skip=False, type=vim.Folder),
call(path='networkFolder', skip=True, type=vim.Datacenter,
selectSet=['traversal_spec'])])
def test_traversal_spec_dvs_parent(self):
mock_traversal_spec = MagicMock(return_value='traversal_spec')
with patch(
'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec',
mock_traversal_spec):
vmware.get_dvportgroups(self.mock_dvs_ref)
mock_traversal_spec.assert_called_once_with(
path='portgroup', skip=False, type=vim.DistributedVirtualSwitch)
def test_get_mors_with_properties(self):
vmware.get_dvportgroups(self.mock_dvs_ref)
self.mock_get_mors.assert_called_once_with(
self.mock_si, vim.DistributedVirtualPortgroup,
container_ref=self.mock_dvs_ref, property_list=['name'],
traversal_spec=self.mock_traversal_spec)
def test_get_no_pgs(self):
ret = vmware.get_dvportgroups(self.mock_dvs_ref)
self.assertEqual(ret, [])
def test_get_all_pgs(self):
ret = vmware.get_dvportgroups(self.mock_dvs_ref,
get_all_portgroups=True)
self.assertEqual(ret, [i['object'] for i in self.mock_items])
def test_filtered_pgs(self):
ret = vmware.get_dvss(self.mock_dc_ref,
dvs_names=['fake_pg1', 'fake_pg3', 'no_pg'])
self.assertEqual(ret, [self.mock_items[0]['object'],
self.mock_items[2]['object']])
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
class GetUplinkDvportgroupTestCase(TestCase):
def setUp(self):
self.mock_si = MagicMock()
self.mock_dvs_ref = MagicMock(spec=vim.DistributedVirtualSwitch)
self.mock_traversal_spec = MagicMock()
self.mock_items = [{'object': MagicMock(),
'tag': [MagicMock(key='fake_tag')]},
{'object': MagicMock(),
'tag': [MagicMock(key='SYSTEM/DVS.UPLINKPG')]}]
self.mock_get_mors = MagicMock(return_value=self.mock_items)
patches = (
('salt.utils.vmware.get_managed_object_name',
MagicMock(return_value='fake_dvs')),
('salt.utils.vmware.get_mors_with_properties',
self.mock_get_mors),
('salt.utils.vmware.get_service_instance_from_managed_object',
MagicMock(return_value=self.mock_si)),
('salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec',
MagicMock(return_value=self.mock_traversal_spec)))
for mod, mock in patches:
patcher = patch(mod, mock)
patcher.start()
self.addCleanup(patcher.stop)
def tearDown(self):
for attr in ('mock_si', 'mock_dvs_ref', 'mock_traversal_spec',
'mock_items', 'mock_get_mors'):
delattr(self, attr)
def test_get_managed_object_name_call(self):
mock_get_managed_object_name = MagicMock()
with patch('salt.utils.vmware.get_managed_object_name',
mock_get_managed_object_name):
vmware.get_uplink_dvportgroup(self.mock_dvs_ref)
mock_get_managed_object_name.assert_called_once_with(self.mock_dvs_ref)
def test_traversal_spec(self):
mock_traversal_spec = MagicMock(return_value='traversal_spec')
with patch(
'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec',
mock_traversal_spec):
vmware.get_uplink_dvportgroup(self.mock_dvs_ref)
mock_traversal_spec.assert_called_once_with(
path='portgroup', skip=False, type=vim.DistributedVirtualSwitch)
def test_get_mors_with_properties(self):
vmware.get_uplink_dvportgroup(self.mock_dvs_ref)
self.mock_get_mors.assert_called_once_with(
self.mock_si, vim.DistributedVirtualPortgroup,
container_ref=self.mock_dvs_ref, property_list=['tag'],
traversal_spec=self.mock_traversal_spec)
def test_get_no_uplink_pg(self):
with patch('salt.utils.vmware.get_mors_with_properties',
MagicMock(return_value=[])):
with self.assertRaises(VMwareObjectRetrievalError) as excinfo:
vmware.get_uplink_dvportgroup(self.mock_dvs_ref)
self.assertEqual(excinfo.exception.strerror,
'Uplink portgroup of DVS \'fake_dvs\' wasn\'t found')
def test_get_uplink_pg(self):
ret = vmware.get_uplink_dvportgroup(self.mock_dvs_ref)
self.assertEqual(ret, self.mock_items[1]['object'])
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
class CreateDvportgroupTestCase(TestCase):
def setUp(self):
self.mock_pg_spec = MagicMock()
self.mock_task = MagicMock(spec=FakeTaskClass)
self.mock_dvs_ref = \
MagicMock(CreateDVPortgroup_Task=MagicMock(
return_value=self.mock_task))
self.mock_wait_for_task = MagicMock()
patches = (
('salt.utils.vmware.get_managed_object_name',
MagicMock(return_value='fake_dvs')),
('salt.utils.vmware.wait_for_task', self.mock_wait_for_task))
for mod, mock in patches:
patcher = patch(mod, mock)
patcher.start()
self.addCleanup(patcher.stop)
def tearDown(self):
for attr in ('mock_pg_spec', 'mock_dvs_ref', 'mock_task',
'mock_wait_for_task'):
delattr(self, attr)
def test_get_managed_object_name_call(self):
mock_get_managed_object_name = MagicMock()
with patch('salt.utils.vmware.get_managed_object_name',
mock_get_managed_object_name):
vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec)
mock_get_managed_object_name.assert_called_once_with(self.mock_dvs_ref)
def test_create_dvporgroup_task(self):
vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec)
self.mock_dvs_ref.CreateDVPortgroup_Task.assert_called_once_with(
self.mock_pg_spec)
def test_create_dvporgroup_task_raises_no_permission(self):
exc = vim.fault.NoPermission()
exc.privilegeId = 'Fake privilege'
self.mock_dvs_ref.CreateDVPortgroup_Task = MagicMock(side_effect=exc)
with self.assertRaises(VMwareApiError) as excinfo:
vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec)
self.assertEqual(excinfo.exception.strerror,
'Not enough permissions. Required privilege: '
'Fake privilege')
def test_create_dvporgroup_task_raises_vim_fault(self):
exc = vim.fault.VimFault()
exc.msg = 'VimFault msg'
self.mock_dvs_ref.CreateDVPortgroup_Task = MagicMock(side_effect=exc)
with self.assertRaises(VMwareApiError) as excinfo:
vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec)
self.assertEqual(excinfo.exception.strerror, 'VimFault msg')
def test_create_dvporgroup_task_raises_runtime_fault(self):
exc = vmodl.RuntimeFault()
exc.msg = 'RuntimeFault msg'
self.mock_dvs_ref.CreateDVPortgroup_Task = MagicMock(side_effect=exc)
with self.assertRaises(VMwareRuntimeError) as excinfo:
vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec)
self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg')
def test_wait_for_tasks(self):
vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec)
self.mock_wait_for_task.assert_called_once_with(
self.mock_task, 'fake_dvs',
'<class \'unit.utils.vmware.test_dvs.FakeTaskClass\'>')
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
class UpdateDvportgroupTestCase(TestCase):
def setUp(self):
self.mock_pg_spec = MagicMock()
self.mock_task = MagicMock(spec=FakeTaskClass)
self.mock_pg_ref = \
MagicMock(ReconfigureDVPortgroup_Task=MagicMock(
return_value=self.mock_task))
self.mock_wait_for_task = MagicMock()
patches = (
('salt.utils.vmware.get_managed_object_name',
MagicMock(return_value='fake_pg')),
('salt.utils.vmware.wait_for_task', self.mock_wait_for_task))
for mod, mock in patches:
patcher = patch(mod, mock)
patcher.start()
self.addCleanup(patcher.stop)
def tearDown(self):
for attr in ('mock_pg_spec', 'mock_pg_ref', 'mock_task',
'mock_wait_for_task'):
delattr(self, attr)
def test_get_managed_object_name_call(self):
mock_get_managed_object_name = MagicMock()
with patch('salt.utils.vmware.get_managed_object_name',
mock_get_managed_object_name):
vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec)
mock_get_managed_object_name.assert_called_once_with(self.mock_pg_ref)
def test_reconfigure_dvporgroup_task(self):
vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec)
self.mock_pg_ref.ReconfigureDVPortgroup_Task.assert_called_once_with(
self.mock_pg_spec)
def test_reconfigure_dvporgroup_task_raises_no_permission(self):
exc = vim.fault.NoPermission()
exc.privilegeId = 'Fake privilege'
self.mock_pg_ref.ReconfigureDVPortgroup_Task = \
MagicMock(side_effect=exc)
with self.assertRaises(VMwareApiError) as excinfo:
vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec)
self.assertEqual(excinfo.exception.strerror,
'Not enough permissions. Required privilege: '
'Fake privilege')
def test_reconfigure_dvporgroup_task_raises_vim_fault(self):
exc = vim.fault.VimFault()
exc.msg = 'VimFault msg'
self.mock_pg_ref.ReconfigureDVPortgroup_Task = \
MagicMock(side_effect=exc)
with self.assertRaises(VMwareApiError) as excinfo:
vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec)
self.assertEqual(excinfo.exception.strerror, 'VimFault msg')
def test_reconfigure_dvporgroup_task_raises_runtime_fault(self):
exc = vmodl.RuntimeFault()
exc.msg = 'RuntimeFault msg'
self.mock_pg_ref.ReconfigureDVPortgroup_Task = \
MagicMock(side_effect=exc)
with self.assertRaises(VMwareRuntimeError) as excinfo:
vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec)
self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg')
def test_wait_for_tasks(self):
vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec)
self.mock_wait_for_task.assert_called_once_with(
self.mock_task, 'fake_pg',
'<class \'unit.utils.vmware.test_dvs.FakeTaskClass\'>')
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
class RemoveDvportgroupTestCase(TestCase):
def setUp(self):
self.mock_task = MagicMock(spec=FakeTaskClass)
self.mock_pg_ref = \
MagicMock(Destroy_Task=MagicMock(
return_value=self.mock_task))
self.mock_wait_for_task = MagicMock()
patches = (
('salt.utils.vmware.get_managed_object_name',
MagicMock(return_value='fake_pg')),
('salt.utils.vmware.wait_for_task', self.mock_wait_for_task))
for mod, mock in patches:
patcher = patch(mod, mock)
patcher.start()
self.addCleanup(patcher.stop)
def tearDown(self):
for attr in ('mock_pg_ref', 'mock_task', 'mock_wait_for_task'):
delattr(self, attr)
def test_get_managed_object_name_call(self):
mock_get_managed_object_name = MagicMock()
with patch('salt.utils.vmware.get_managed_object_name',
mock_get_managed_object_name):
vmware.remove_dvportgroup(self.mock_pg_ref)
mock_get_managed_object_name.assert_called_once_with(self.mock_pg_ref)
def test_destroy_task(self):
vmware.remove_dvportgroup(self.mock_pg_ref)
self.mock_pg_ref.Destroy_Task.assert_called_once_with()
def test_destroy_task_raises_no_permission(self):
exc = vim.fault.NoPermission()
exc.privilegeId = 'Fake privilege'
self.mock_pg_ref.Destroy_Task = MagicMock(side_effect=exc)
with self.assertRaises(VMwareApiError) as excinfo:
vmware.remove_dvportgroup(self.mock_pg_ref)
self.assertEqual(excinfo.exception.strerror,
'Not enough permissions. Required privilege: '
'Fake privilege')
def test_destroy_treconfigure_dvporgroup_task_raises_vim_fault(self):
exc = vim.fault.VimFault()
exc.msg = 'VimFault msg'
self.mock_pg_ref.Destroy_Task = MagicMock(side_effect=exc)
with self.assertRaises(VMwareApiError) as excinfo:
vmware.remove_dvportgroup(self.mock_pg_ref)
self.assertEqual(excinfo.exception.strerror, 'VimFault msg')
def test_destroy_treconfigure_dvporgroup_task_raises_runtime_fault(self):
exc = vmodl.RuntimeFault()
exc.msg = 'RuntimeFault msg'
self.mock_pg_ref.Destroy_Task = MagicMock(side_effect=exc)
with self.assertRaises(VMwareRuntimeError) as excinfo:
vmware.remove_dvportgroup(self.mock_pg_ref)
self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg')
def test_wait_for_tasks(self):
vmware.remove_dvportgroup(self.mock_pg_ref)
self.mock_wait_for_task.assert_called_once_with(
self.mock_task, 'fake_pg',
'<class \'unit.utils.vmware.test_dvs.FakeTaskClass\'>')

View File

@ -264,14 +264,14 @@ class GetDatastoresTestCase(TestCase):
mock_reference,
get_all_datastores=True)
mock_traversal_spec_init.assert_called([
mock_traversal_spec_init.assert_has_calls([
call(path='datastore',
skip=False,
type=vim.Datacenter),
call(path='childEntity',
selectSet=['traversal'],
skip=False,
type=vim.Folder),
call(path='datastore',
skip=False,
type=vim.Datacenter)])
type=vim.Folder)])
def test_unsupported_reference_type(self):
class FakeClass(object):
@ -379,7 +379,7 @@ class RenameDatastoreTestCase(TestCase):
with self.assertRaises(VMwareApiError) as excinfo:
salt.utils.vmware.rename_datastore(self.mock_ds_ref,
'fake_new_name')
self.assertEqual(excinfo.exception.message, 'vim_fault')
self.assertEqual(excinfo.exception.strerror, 'vim_fault')
def test_rename_datastore_raise_runtime_fault(self):
exc = vmodl.RuntimeFault()
@ -388,7 +388,7 @@ class RenameDatastoreTestCase(TestCase):
with self.assertRaises(VMwareRuntimeError) as excinfo:
salt.utils.vmware.rename_datastore(self.mock_ds_ref,
'fake_new_name')
self.assertEqual(excinfo.exception.message, 'runtime_fault')
self.assertEqual(excinfo.exception.strerror, 'runtime_fault')
def test_rename_datastore(self):
salt.utils.vmware.rename_datastore(self.mock_ds_ref, 'fake_new_name')