diff --git a/doc/ref/clouds/all/salt.cloud.clouds.vmware.rst b/doc/ref/clouds/all/salt.cloud.clouds.vmware.rst new file mode 100644 index 0000000000..c8b1b88702 --- /dev/null +++ b/doc/ref/clouds/all/salt.cloud.clouds.vmware.rst @@ -0,0 +1,7 @@ +======================== +salt.cloud.clouds.vmware +======================== + +.. automodule:: salt.cloud.clouds.vmware + :members: + :exclude-members: get_configured_provider, script diff --git a/doc/ref/configuration/minion.rst b/doc/ref/configuration/minion.rst index 519ee43152..d76db540bf 100644 --- a/doc/ref/configuration/minion.rst +++ b/doc/ref/configuration/minion.rst @@ -329,7 +329,7 @@ to enable set grains_cache to ``True``. .. code-block:: yaml - cache_jobs: False + grains_cache: False .. conf_minion:: sock_dir diff --git a/doc/ref/states/highstate.rst b/doc/ref/states/highstate.rst index 3df6b45602..41209a9838 100644 --- a/doc/ref/states/highstate.rst +++ b/doc/ref/states/highstate.rst @@ -76,9 +76,8 @@ Extend declaration ------------------ Extends a :ref:`name-declaration` from an included ``SLS module``. The -keys of the extend declaration always define existing :ref`ID declaration` -which have been defined in included -``SLS modules``. +keys of the extend declaration always refer to an existing +:ref:`id-declaration` which have been defined in included ``SLS modules``. Occurs only in the top level and defines a dictionary. diff --git a/doc/topics/best_practices.rst b/doc/topics/best_practices.rst index 73c5518a17..5f6a69f5cd 100644 --- a/doc/topics/best_practices.rst +++ b/doc/topics/best_practices.rst @@ -451,7 +451,7 @@ accessible by the appropriate hosts: .. code-block:: yaml testdb: - mysql_database.present:: + mysql_database.present: - name: testerdb ``/srv/salt/mysql/user.sls``: diff --git a/doc/topics/cloud/vmware.rst b/doc/topics/cloud/vmware.rst new file mode 100644 index 0000000000..587f5ab670 --- /dev/null +++ b/doc/topics/cloud/vmware.rst @@ -0,0 +1,379 @@ +=========================== +Getting Started With VMware +=========================== + +.. versionadded:: 2015.5.4 + +**Author**: Nitin Madhok + +The VMware cloud module allows you to manage VMware ESX, ESXi, and vCenter. + + +Dependencies +============ +The vmware module for Salt Cloud requires the ``pyVmomi`` package, which is +available at PyPI: + +https://pypi.python.org/pypi/pyvmomi + +This package can be installed using `pip` or `easy_install`: + +.. code-block:: bash + + pip install pyvmomi + easy_install pyvmomi + + +Configuration +============= +The VMware cloud module needs the vCenter URL, username and password to be +set up in the cloud configuration at +``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/vmware.conf``: + +.. code-block:: yaml + + my-vmware-config: + provider: vmware + user: "DOMAIN\user" + password: "verybadpass" + url: "vcenter01.domain.com" + + vmware-vcenter02: + provider: vmware + user: "DOMAIN\user" + password: "verybadpass" + url: "vcenter02.domain.com" + + vmware-vcenter03: + provider: vmware + user: "DOMAIN\user" + password: "verybadpass" + url: "vcenter03.domain.com" + protocol: "http" + port: 80 + +.. note:: + + Optionally, ``protocol`` and ``port`` can be specified if the vCenter + server is not using the defaults. Default is ``protocol: https`` and + ``port: 443``. + +.. _vmware-cloud-profile: + +Profiles +======== +Set up an initial profile at ``/etc/salt/cloud.profiles`` or +``/etc/salt/cloud.profiles.d/vmware.conf``: + +.. code-block:: yaml + + vmware-centos6.5: + provider: vmware-vcenter01 + clonefrom: test-vm + + ## Optional arguments + num_cpus: 4 + memory: 8GB + devices: + cd: + CD/DVD drive 1: + device_type: datastore_iso_file + iso_path: "[nap004-1] vmimages/tools-isoimages/linux.iso" + CD/DVD drive 2: + device_type: client_device + mode: atapi + CD/DVD drive 3: + device_type: client_device + mode: passthrough + disk: + Hard disk 1: + size: 30 + Hard disk 2: + size: 20 + Hard disk 3: + size: 5 + network: + Network adapter 1: + name: 10.20.30-400-Test + switch_type: standard + ip: 10.20.30.123 + gateway: [10.20.30.110] + subnet_mask: 255.255.255.128 + domain: mycompany.com + Network adapter 2: + name: 10.30.40-500-Dev-DHCP + adapter_type: e1000 + switch_type: distributed + Network adapter 3: + name: 10.40.50-600-Prod + adapter_type: vmxnet3 + switch_type: distributed + ip: 10.40.50.123 + gateway: [10.40.50.110] + subnet_mask: 255.255.255.128 + domain: mycompany.com + scsi: + SCSI controller 1: + type: lsilogic + SCSI controller 2: + type: lsilogic_sas + bus_sharing: virtual + SCSI controller 3: + type: paravirtual + bus_sharing: physical + + domain: mycompany.com + dns_servers: + - 123.127.255.240 + - 123.127.255.241 + - 123.127.255.242 + + # If cloning from template, either resourcepool or cluster MUST be specified! + resourcepool: Resources + cluster: Prod + + datastore: HUGE-DATASTORE-Cluster + folder: Development + datacenter: DC1 + host: c4212n-002.domain.com + template: False + power_on: True + extra_config: + mem.hotadd: 'yes' + guestinfo.foo: bar + guestinfo.domain: foobar.com + guestinfo.customVariable: customValue + + deploy: True + private_key: /root/.ssh/mykey.pem + ssh_username: cloud-user + password: veryVeryBadPassword + minion: + master: 123.127.193.105 + + file_map: + /path/to/local/custom/script: /path/to/remote/script + /path/to/local/file: /path/to/remote/file + /srv/salt/yum/epel.repo: /etc/yum.repos.d/epel.repo + + +``provider`` + Enter the name that was specified when the cloud provider config was created. + +``clonefrom`` + Enter the name of the VM/template to clone from. + +``num_cpus`` + Enter the number of vCPUS that you want the VM/template to have. If not specified, + the current VM/template\'s vCPU count is used. + +``memory`` + Enter the memory size (in MB or GB) that you want the VM/template to have. If + not specified, the current VM/template\'s memory size is used. Example + ``memory: 8GB`` or ``memory: 8192MB``. + +``devices`` + Enter the device specifications here. Currently, the following devices can be + created or reconfigured: + + cd + Enter the CD/DVD drive specification here. If the CD/DVD drive doesn\'t exist, + it will be created with the specified configuration. If the CD/DVD drive + already exists, it will be reconfigured with the specifications. The following + options can be specified per CD/DVD drive: + + device_type + Specify how the CD/DVD drive should be used. Currently supported types are + ``client_device`` and ``datastore_iso_file``. Default is + ``device_type: client_device`` + iso_path + Enter the path to the iso file present on the datastore only if + ``device_type: datastore_iso_file``. The syntax to specify this is + ``iso_path: "[datastoreName] vmimages/tools-isoimages/linux.iso"``. This + field is ignored if ``device_type: client_device`` + mode + Enter the mode of connection only if ``device_type: client_device``. Currently + supported modes are ``passthrough`` and ``atapi``. This field is ignored if + ``device_type: datastore_iso_file``. Default is ``mode: passthrough`` + + disk + Enter the disk specification here. If the hard disk doesn\'t exist, it will + be created with the provided size. If the hard disk already exists, it will + be expanded if the provided size is greater than the current size of the disk. + + network + Enter the network adapter specification here. If the network adapter doesn\'t + exist, a new network adapter will be created with the specified network name, + type and other configuration. If the network adapter already exists, it will + be reconfigured with the specifications. The following additional options can + be specified per network adapter (See example above): + + name + Enter the network name you want the network adapter to be mapped to. + + adapter_type + Enter the network adapter type you want to create. Currently supported + types are ``vmxnet``, ``vmxnet2``, ``vmxnet3``, ``e1000`` and ``e1000e``. + If no type is specified, by default ``vmxnet3`` will be used. + + switch_type + Enter the type of switch to use. This decides whether to use a standard + switch network or a distributed virtual portgroup. Currently supported + types are ``standard`` for standard portgroups and ``distributed`` for + distributed virtual portgroups. + + ip + Enter the static IP you want the network adapter to be mapped to. If the + network specified is DHCP enabled, you do not have to specify this. + + gateway + Enter the gateway for the network as a list. If the network specified + is DHCP enabled, you do not have to specify this. + + subnet_mask + Enter the subnet mask for the network. If the network specified is DHCP + enabled, you do not have to specify this. + + domain + Enter the domain to be used with the network adapter. If the network + specified is DHCP enabled, you do not have to specify this. + + scsi + Enter the SCSI adapter specification here. If the SCSI adapter doesn\'t exist, + a new SCSI adapter will be created of the specified type. If the SCSI adapter + already exists, it will be reconfigured with the specifications. The following + additional options can be specified per SCSI adapter: + + type + Enter the SCSI adapter type you want to create. Currently supported + types are ``lsilogic``, ``lsilogic_sas`` and ``paravirtual``. Type must + be specified when creating a new SCSI adapter. + + bus_sharing + Specify this if sharing of virtual disks between virtual machines is desired. + The following can be specified: + + virtual + Virtual disks can be shared between virtual machines on the same server. + + physical + Virtual disks can be shared between virtual machines on any server. + + no + Virtual disks cannot be shared between virtual machines. + +``domain`` + Enter the global domain name to be used for DNS. If not specified and if the VM name + is a FQDN, ``domain`` is set to the domain from the VM name. Default is ``local``. + +``dns_servers`` + Enter the list of DNS servers to use in order of priority. + +``resourcepool`` + Enter the name of the resourcepool to which the new virtual machine should be + attached. This determines what compute resources will be available to the clone. + + .. note:: + + - For a clone operation from a virtual machine, it will use the same + resourcepool as the original virtual machine unless specified. + - For a clone operation from a template to a virtual machine, specifying + either this or cluster is required. If both are specified, the resourcepool + value will be used. + - For a clone operation to a template, this argument is ignored. + +``cluster`` + Enter the name of the cluster whose resource pool the new virtual machine should + be attached to. + + .. note:: + + - For a clone operation from a virtual machine, it will use the same cluster\'s + resourcepool as the original virtual machine unless specified. + - For a clone operation from a template to a virtual machine, specifying either + this or resourcepool is required. If both are specified, the resourcepool + value will be used. + - For a clone operation to a template, this argument is ignored. + +``datastore`` + Enter the name of the datastore or the datastore cluster where the virtual machine + should be located on physical storage. If not specified, the current datastore is + used. + + .. note:: + + - If you specify a datastore cluster name, DRS Storage recommendation is + automatically applied. + - If you specify a datastore name, DRS Storage recommendation is disabled. + +``folder`` + Enter the name of the folder that will contain the new virtual machine. + + .. note:: + + - For a clone operation from a VM/template, the new VM/template will be added + to the same folder that the original VM/template belongs to unless specified. + - If both folder and datacenter are specified, the folder value will be used. + +``datacenter`` + Enter the name of the datacenter that will contain the new virtual machine. + + .. note:: + + - For a clone operation from a VM/template, the new VM/template will be added + to the same folder that the original VM/template belongs to unless specified. + - If both folder and datacenter are specified, the folder value will be used. + +``host`` + Enter the name of the target host where the virtual machine should be registered. + + If not specified: + + .. note:: + + - If resource pool is not specified, current host is used. + - If resource pool is specified, and the target pool represents a stand-alone + host, the host is used. + - If resource pool is specified, and the target pool represents a DRS-enabled + cluster, a host selected by DRS is used. + - If resource pool is specified and the target pool represents a cluster without + DRS enabled, an InvalidArgument exception be thrown. + +``template`` + Specifies whether the new virtual machine should be marked as a template or not. + Default is ``template: False``. + +``power_on`` + Specifies whether the new virtual machine should be powered on or not. If + ``template: True`` is set, this field is ignored. Default is ``power_on: True``. + +``extra_config`` + Specifies the additional configuration information for the virtual machine. This + describes a set of modifications to the additional options. If the key is already + present, it will be reset with the new value provided. Otherwise, a new option is + added. Keys with empty values will be removed. + +``deploy`` + Specifies if salt should be installed on the newly created VM. Default is ``True`` + so salt will be installed using the bootstrap script. If ``template: True`` or + ``power_on: False`` is set, this field is ignored and salt will not be installed. + +``private_key`` + Specify the path to the private key to use to be able to ssh to the VM. + +``ssh_username`` + Specify the username to use in order to ssh to the VM. Default is ``root`` + +``password`` + Specify a password to use in order to ssh to the VM. If ``private_key`` is + specified, you do not need to specify this. + +``minion`` + Specify custom minion configuration you want the salt minion to have. A good example + would be to specify the ``master`` as the IP/DNS name of the master. + +``file_map`` + Specify file/files you want to copy to the VM before the bootstrap script is run + and salt is installed. A good example of using this would be if you need to put + custom repo files on the server in case your server will be in a private network + and cannot reach external networks. diff --git a/doc/topics/releases/2015.5.4.rst b/doc/topics/releases/2015.5.4.rst new file mode 100644 index 0000000000..4965356aa2 --- /dev/null +++ b/doc/topics/releases/2015.5.4.rst @@ -0,0 +1,17 @@ +=========================== +Salt 2015.5.4 Release Notes +=========================== + +:release: TBA + +Version 2015.5.4 is a bugfix release for :doc:`2015.5.0 +`. + +Changes: + +- When querying for VMs in ``ditigal_ocean_v2.py``, the number of VMs to include in a page was changed from 20 + (default) to 200 to reduce the number of API calls to Digital Ocean. + +- The ``vmware`` Salt-Cloud driver was back-ported from the develop branch in order for installations of Salt + that are older than 2015.8.0 to be able to use the ``vmware`` driver without stack-tracing on various + deprecation paths that were implemented in the 2015.8.0 release. diff --git a/salt/cloud/clouds/digital_ocean_v2.py b/salt/cloud/clouds/digital_ocean_v2.py index fa2132adfd..24220bef7d 100644 --- a/salt/cloud/clouds/digital_ocean_v2.py +++ b/salt/cloud/clouds/digital_ocean_v2.py @@ -112,7 +112,7 @@ def avail_images(call=None): ret = {} while fetch: - items = query(method='images', command='?page=' + str(page)) + items = query(method='images', command='?page=' + str(page) + '&per_page=200') for image in items['images']: ret[image['id']] = {} @@ -162,7 +162,7 @@ def list_nodes(call=None): ret = {} while fetch: - items = query(method='droplets', command='?page=' + str(page)) + items = query(method='droplets', command='?page=' + str(page) + '&per_page=200') for node in items['droplets']: ret[node['name']] = { 'id': node['id'], @@ -194,7 +194,7 @@ def list_nodes_full(call=None, forOutput=True): ret = {} while fetch: - items = query(method='droplets', command='?page=' + str(page)) + items = query(method='droplets', command='?page=' + str(page) + '&per_page=200') for node in items['droplets']: ret[node['name']] = {} for item in node.keys(): diff --git a/salt/cloud/clouds/opennebula.py b/salt/cloud/clouds/opennebula.py index 44a3299cbc..480fc9cd9f 100644 --- a/salt/cloud/clouds/opennebula.py +++ b/salt/cloud/clouds/opennebula.py @@ -6,6 +6,8 @@ OpenNebula Cloud Module The OpenNebula cloud module is used to control access to an OpenNebula cloud. +:depends: lxml + Use of this module requires the ``xml_rpc``, ``user`` and ``password`` parameter to be set. Set up the cloud configuration at ``/etc/salt/cloud.providers`` or diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py new file mode 100644 index 0000000000..cf667d388f --- /dev/null +++ b/salt/cloud/clouds/vmware.py @@ -0,0 +1,3609 @@ +# -*- coding: utf-8 -*- +''' +VMware Cloud Module +=================== + +.. versionadded:: 2015.5.4 + +The VMware cloud module allows you to manage VMware ESX, ESXi, and vCenter. + +See :doc:`Getting started with VMware ` to get started. + +:codeauthor: Nitin Madhok +:depends: pyVmomi Python module + +.. note:: + Ensure python pyVmomi module is installed by running following one-liner + check. The output should be 0. + + .. code-block:: bash + + python -c "import pyVmomi" ; echo $? + +To use this module, set up the vCenter URL, username and password in the +cloud configuration at +``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/vmware.conf``: + +.. code-block:: yaml + + my-vmware-config: + provider: vmware + user: "DOMAIN\\user" + password: "verybadpass" + url: "vcenter01.domain.com" + + vmware-vcenter02: + provider: vmware + user: "DOMAIN\\user" + password: "verybadpass" + url: "vcenter02.domain.com" + + vmware-vcenter03: + provider: vmware + user: "DOMAIN\\user" + password: "verybadpass" + url: "vcenter03.domain.com" + protocol: "http" + port: 80 + +.. note:: + + Optionally, ``protocol`` and ``port`` can be specified if the vCenter + server is not using the defaults. Default is ``protocol: https`` and + ``port: 443``. + +To test the connection for ``my-vmware-config`` specified in the cloud +configuration, run :py:func:`test_vcenter_connection` +''' + +# Import python libs +from __future__ import absolute_import +from random import randint +from re import match, findall +import atexit +import pprint +import logging +import time +import os.path +import subprocess + +# Import salt libs +import salt.utils +import salt.utils.cloud +import salt.utils.xmlutil +from salt.exceptions import SaltCloudSystemExit + +# Import salt cloud libs +import salt.config as config + +# Attempt to import pyVim and pyVmomi libs +HAS_LIBS = False +try: + from pyVim.connect import SmartConnect, Disconnect + from pyVmomi import vim, vmodl + HAS_LIBS = True +except Exception: + pass + +# Disable InsecureRequestWarning generated on python > 2.6 +try: + from requests.packages.urllib3 import disable_warnings + disable_warnings() +except Exception: + pass + +# Import third party libs +try: + import salt.ext.six as six +except ImportError: + # Salt version <= 2014.7.0 + try: + import six + except ImportError: + HAS_LIBS = False + +# Get logging started +log = logging.getLogger(__name__) + + +# Only load in this module if the VMware configurations are in place +def __virtual__(): + ''' + Check for VMware configuration and if required libs are available. + ''' + if not HAS_LIBS: + return False + + if get_configured_provider() is False: + return False + + return True + + +def get_configured_provider(): + ''' + Return the first configured instance. + ''' + return config.is_provider_configured( + __opts__, + __active_provider_name__ or 'vmware', + ('url', 'user', 'password',) + ) + + +def script(vm_): + ''' + Return the script deployment object + ''' + script_name = config.get_cloud_config_value('script', vm_, __opts__) + if not script_name: + script_name = 'bootstrap-salt' + + return salt.utils.cloud.os_script( + script_name, + vm_, + __opts__, + salt.utils.cloud.salt_config_to_yaml( + salt.utils.cloud.minion_config(__opts__, vm_) + ) + ) + + +def _str_to_bool(var): + if isinstance(var, bool): + return var + + if isinstance(var, six.string_types): + return True if var.lower() == 'true' else False + + return None + + +def _get_si(): + ''' + Authenticate with vCenter server and return service instance object. + ''' + + url = config.get_cloud_config_value( + 'url', get_configured_provider(), __opts__, search_global=False + ) + username = config.get_cloud_config_value( + 'user', get_configured_provider(), __opts__, search_global=False + ) + password = config.get_cloud_config_value( + 'password', get_configured_provider(), __opts__, search_global=False + ) + protocol = config.get_cloud_config_value( + 'protocol', get_configured_provider(), __opts__, search_global=False, default='https' + ) + port = config.get_cloud_config_value( + 'port', get_configured_provider(), __opts__, search_global=False, default=443 + ) + + try: + si = SmartConnect( + host=url, + user=username, + pwd=password, + protocol=protocol, + port=port + ) + except Exception as exc: + if isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg: + try: + import ssl + default_context = ssl._create_default_https_context + ssl._create_default_https_context = ssl._create_unverified_context + si = SmartConnect( + host=url, + user=username, + pwd=password, + protocol=protocol, + port=port + ) + ssl._create_default_https_context = default_context + except: + err_msg = exc.msg if isinstance(exc, vim.fault.InvalidLogin) and hasattr(exc, 'msg') else 'Could not connect to the specified vCenter server. Please check the specified protocol or url or port' + raise SaltCloudSystemExit(err_msg) + else: + err_msg = exc.msg if isinstance(exc, vim.fault.InvalidLogin) and hasattr(exc, 'msg') else 'Could not connect to the specified vCenter server. Please check the specified protocol or url or port' + raise SaltCloudSystemExit(err_msg) + + atexit.register(Disconnect, si) + + return si + + +def _get_inv(): + ''' + Return the inventory. + ''' + si = _get_si() + return si.RetrieveContent() + + +def _get_content(obj_type, property_list=None): + # Get service instance object + si = _get_si() + + # Refer to http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html for more information. + + # Create an object view + obj_view = si.content.viewManager.CreateContainerView(si.content.rootFolder, [obj_type], True) + + # Create traversal spec to determine the path for collection + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + name='traverseEntities', + path='view', + skip=False, + type=vim.view.ContainerView + ) + + # Create property spec to determine properties to be retrieved + property_spec = vmodl.query.PropertyCollector.PropertySpec( + type=obj_type, + all=True if not property_list else False, + pathSet=property_list + ) + + # Create object spec to navigate content + obj_spec = vmodl.query.PropertyCollector.ObjectSpec( + obj=obj_view, + skip=True, + selectSet=[traversal_spec] + ) + + # Create a filter spec and specify object, property spec in it + filter_spec = vmodl.query.PropertyCollector.FilterSpec( + objectSet=[obj_spec], + propSet=[property_spec], + reportMissingObjectsInResults=False + ) + + # Retrieve the contents + content = si.content.propertyCollector.RetrieveContents([filter_spec]) + + # Destroy the object view + obj_view.Destroy() + + return content + + +def _get_mors_with_properties(obj_type, property_list=None): + ''' + Returns list containing properties and managed object references for the managed object + ''' + # Get all the content + content = _get_content(obj_type, property_list) + + object_list = [] + for object in content: + properties = {} + for property in object.propSet: + properties[property.name] = property.val + properties['object'] = object.obj + object_list.append(properties) + + return object_list + + +def _get_mor_by_property(obj_type, property_value, property_name='name'): + ''' + Returns the first managed object reference having the specified property value + ''' + # Get list of all managed object references with specified property + object_list = _get_mors_with_properties(obj_type, [property_name]) + + for object in object_list: + if object[property_name] == property_value: + return object['object'] + + return None + + +def _edit_existing_hard_disk_helper(disk, size_kb): + disk.capacityInKB = size_kb + disk_spec = vim.vm.device.VirtualDeviceSpec() + disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit + disk_spec.device = disk + + return disk_spec + + +def _add_new_hard_disk_helper(disk_label, size_gb, unit_number): + random_key = randint(-2099, -2000) + + size_kb = int(size_gb * 1024.0 * 1024.0) + + disk_spec = vim.vm.device.VirtualDeviceSpec() + disk_spec.fileOperation = 'create' + disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add + + disk_spec.device = vim.vm.device.VirtualDisk() + disk_spec.device.key = random_key + disk_spec.device.deviceInfo = vim.Description() + disk_spec.device.deviceInfo.label = disk_label + disk_spec.device.deviceInfo.summary = "{0} GB".format(size_gb) + disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo() + disk_spec.device.backing.diskMode = 'persistent' + disk_spec.device.controllerKey = 1000 + disk_spec.device.unitNumber = unit_number + disk_spec.device.capacityInKB = size_kb + + return disk_spec + + +def _get_network_adapter_type(adapter_type): + if adapter_type == "vmxnet": + return vim.vm.device.VirtualVmxnet() + elif adapter_type == "vmxnet2": + return vim.vm.device.VirtualVmxnet2() + elif adapter_type == "vmxnet3": + return vim.vm.device.VirtualVmxnet3() + elif adapter_type == "e1000": + return vim.vm.device.VirtualE1000() + elif adapter_type == "e1000e": + return vim.vm.device.VirtualE1000e() + + +def _edit_existing_network_adapter_helper(network_adapter, new_network_name, adapter_type, switch_type): + adapter_type.strip().lower() + switch_type.strip().lower() + + if adapter_type in ["vmxnet", "vmxnet2", "vmxnet3", "e1000", "e1000e"]: + edited_network_adapter = _get_network_adapter_type(adapter_type) + if isinstance(network_adapter, type(edited_network_adapter)): + edited_network_adapter = network_adapter + else: + log.debug("Changing type of '{0}' from '{1}' to '{2}'".format(network_adapter.deviceInfo.label, type(network_adapter).__name__.rsplit(".", 1)[1][7:].lower(), adapter_type)) + else: + # If type not specified or does not match, dont change adapter type + if adapter_type: + log.error("Cannot change type of '{0}' to '{1}'. Not changing type".format(network_adapter.deviceInfo.label, adapter_type)) + edited_network_adapter = network_adapter + + if switch_type == 'standard': + network_ref = _get_mor_by_property(vim.Network, new_network_name) + edited_network_adapter.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() + edited_network_adapter.backing.deviceName = new_network_name + edited_network_adapter.backing.network = network_ref + elif switch_type == 'distributed': + network_ref = _get_mor_by_property(vim.dvs.DistributedVirtualPortgroup, new_network_name) + dvs_port_connection = vim.dvs.PortConnection( + portgroupKey=network_ref.key, + switchUuid=network_ref.config.distributedVirtualSwitch.uuid + ) + edited_network_adapter.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() + edited_network_adapter.backing.port = dvs_port_connection + else: + # If switch type not specified or does not match, show error and return + if not switch_type: + err_msg = "The switch type to be used by '{0}' has not been specified".format(network_adapter.deviceInfo.label) + else: + err_msg = "Cannot create '{0}'. Invalid/unsupported switch type '{1}'".format(network_adapter.deviceInfo.label, switch_type) + raise SaltCloudSystemExit(err_msg) + + edited_network_adapter.key = network_adapter.key + edited_network_adapter.deviceInfo = network_adapter.deviceInfo + edited_network_adapter.deviceInfo.summary = new_network_name + edited_network_adapter.connectable = network_adapter.connectable + edited_network_adapter.slotInfo = network_adapter.slotInfo + edited_network_adapter.controllerKey = network_adapter.controllerKey + edited_network_adapter.unitNumber = network_adapter.unitNumber + edited_network_adapter.addressType = network_adapter.addressType + edited_network_adapter.macAddress = network_adapter.macAddress + edited_network_adapter.wakeOnLanEnabled = network_adapter.wakeOnLanEnabled + network_spec = vim.vm.device.VirtualDeviceSpec() + network_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit + network_spec.device = edited_network_adapter + + return network_spec + + +def _add_new_network_adapter_helper(network_adapter_label, network_name, adapter_type, switch_type): + random_key = randint(-4099, -4000) + + adapter_type.strip().lower() + switch_type.strip().lower() + network_spec = vim.vm.device.VirtualDeviceSpec() + + if adapter_type in ["vmxnet", "vmxnet2", "vmxnet3", "e1000", "e1000e"]: + network_spec.device = _get_network_adapter_type(adapter_type) + else: + # If type not specified or does not match, create adapter of type vmxnet3 + if not adapter_type: + log.debug("The type of '{0}' has not been specified. Creating of default type 'vmxnet3'".format(network_adapter_label)) + else: + log.error("Cannot create network adapter of type '{0}'. Creating '{1}' of default type 'vmxnet3'".format(adapter_type, network_adapter_label)) + network_spec.device = vim.vm.device.VirtualVmxnet3() + + network_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add + + if switch_type == 'standard': + network_spec.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() + network_spec.device.backing.deviceName = network_name + network_spec.device.backing.network = _get_mor_by_property(vim.Network, network_name) + elif switch_type == 'distributed': + network_ref = _get_mor_by_property(vim.dvs.DistributedVirtualPortgroup, network_name) + dvs_port_connection = vim.dvs.PortConnection( + portgroupKey=network_ref.key, + switchUuid=network_ref.config.distributedVirtualSwitch.uuid + ) + network_spec.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() + network_spec.device.backing.port = dvs_port_connection + else: + # If switch type not specified or does not match, show error and return + if not switch_type: + err_msg = "The switch type to be used by '{0}' has not been specified".format(network_adapter_label) + else: + err_msg = "Cannot create '{0}'. Invalid/unsupported switch type '{1}'".format(network_adapter_label, switch_type) + raise SaltCloudSystemExit(err_msg) + + network_spec.device.key = random_key + network_spec.device.deviceInfo = vim.Description() + network_spec.device.deviceInfo.label = network_adapter_label + network_spec.device.deviceInfo.summary = network_name + network_spec.device.wakeOnLanEnabled = True + network_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() + network_spec.device.connectable.startConnected = True + network_spec.device.connectable.allowGuestControl = True + + return network_spec + + +def _edit_existing_scsi_adapter_helper(scsi_adapter, bus_sharing): + scsi_adapter.sharedBus = bus_sharing + scsi_spec = vim.vm.device.VirtualDeviceSpec() + scsi_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit + scsi_spec.device = scsi_adapter + + return scsi_spec + + +def _add_new_scsi_adapter_helper(scsi_adapter_label, properties, bus_number): + random_key = randint(-1050, -1000) + adapter_type = properties['type'].strip().lower() if 'type' in properties else None + bus_sharing = properties['bus_sharing'].strip().lower() if 'bus_sharing' in properties else None + + scsi_spec = vim.vm.device.VirtualDeviceSpec() + + if adapter_type == "lsilogic": + summary = "LSI Logic" + scsi_spec.device = vim.vm.device.VirtualLsiLogicController() + elif adapter_type == "lsilogic_sas": + summary = "LSI Logic Sas" + scsi_spec.device = vim.vm.device.VirtualLsiLogicSASController() + elif adapter_type == "paravirtual": + summary = "VMware paravirtual SCSI" + scsi_spec.device = vim.vm.device.ParaVirtualSCSIController() + else: + # If type not specified or does not match, show error and return + if not adapter_type: + err_msg = "The type of '{0}' has not been specified".format(scsi_adapter_label) + else: + err_msg = "Cannot create '{0}'. Invalid/unsupported type '{1}'".format(scsi_adapter_label, adapter_type) + raise SaltCloudSystemExit(err_msg) + + scsi_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add + + scsi_spec.device.key = random_key + scsi_spec.device.busNumber = bus_number + scsi_spec.device.deviceInfo = vim.Description() + scsi_spec.device.deviceInfo.label = scsi_adapter_label + scsi_spec.device.deviceInfo.summary = summary + + if bus_sharing == "virtual": + # Virtual disks can be shared between virtual machines on the same server + scsi_spec.device.sharedBus = vim.vm.device.VirtualSCSIController.Sharing.virtualSharing + + elif bus_sharing == "physical": + # Virtual disks can be shared between virtual machines on any server + scsi_spec.device.sharedBus = vim.vm.device.VirtualSCSIController.Sharing.physicalSharing + + else: + # Virtual disks cannot be shared between virtual machines + scsi_spec.device.sharedBus = vim.vm.device.VirtualSCSIController.Sharing.noSharing + + return scsi_spec + + +def _set_cd_or_dvd_backing_type(drive, device_type, mode, iso_path): + if device_type == "datastore_iso_file": + drive.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo() + drive.backing.fileName = iso_path + + datastore = iso_path.partition('[')[-1].rpartition(']')[0] + datastore_ref = _get_mor_by_property(vim.Datastore, datastore) + if datastore_ref: + drive.backing.datastore = datastore_ref + + drive.deviceInfo.summary = 'ISO {0}'.format(iso_path) + + elif device_type == "client_device": + if mode == 'passthrough': + drive.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo() + drive.deviceInfo.summary = 'Remote Device' + elif mode == 'atapi': + drive.backing = vim.vm.device.VirtualCdrom.RemoteAtapiBackingInfo() + drive.deviceInfo.summary = 'Remote ATAPI' + + return drive + + +def _edit_existing_cd_or_dvd_drive_helper(drive, device_type, mode, iso_path): + device_type.strip().lower() + mode.strip().lower() + + drive_spec = vim.vm.device.VirtualDeviceSpec() + drive_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit + drive_spec.device = _set_cd_or_dvd_backing_type(drive, device_type, mode, iso_path) + + return drive_spec + + +def _add_new_cd_or_dvd_drive_helper(drive_label, controller_key, device_type, mode, iso_path): + random_key = randint(-3025, -3000) + + device_type.strip().lower() + mode.strip().lower() + + drive_spec = vim.vm.device.VirtualDeviceSpec() + drive_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add + drive_spec.device = vim.vm.device.VirtualCdrom() + drive_spec.device.deviceInfo = vim.Description() + + if device_type in ['datastore_iso_file', 'client_device']: + drive_spec.device = _set_cd_or_dvd_backing_type(drive_spec.device, device_type, mode, iso_path) + else: + # If device_type not specified or does not match, create drive of Client type with Passthough mode + if not device_type: + log.debug("The 'device_type' of '{0}' has not been specified. Creating of default type 'client_device'".format(drive_label)) + else: + log.error("Cannot create CD/DVD drive of type '{0}'. Creating '{1}' of default type 'client_device'".format(device_type, drive_label)) + drive_spec.device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo() + drive_spec.device.deviceInfo.summary = 'Remote Device' + + drive_spec.device.key = random_key + drive_spec.device.deviceInfo.label = drive_label + drive_spec.device.controllerKey = controller_key + drive_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() + drive_spec.device.connectable.startConnected = True + drive_spec.device.connectable.allowGuestControl = True + + return drive_spec + + +def _set_network_adapter_mapping_helper(adapter_specs): + adapter_mapping = vim.vm.customization.AdapterMapping() + adapter_mapping.adapter = vim.vm.customization.IPSettings() + + if 'domain' in list(adapter_specs.keys()): + domain = adapter_specs['domain'] + adapter_mapping.adapter.dnsDomain = domain + if 'gateway' in list(adapter_specs.keys()): + gateway = adapter_specs['gateway'] + adapter_mapping.adapter.gateway = gateway + if 'ip' in list(adapter_specs.keys()): + ip = str(adapter_specs['ip']) + subnet_mask = str(adapter_specs['subnet_mask']) + adapter_mapping.adapter.ip = vim.vm.customization.FixedIp(ipAddress=ip) + adapter_mapping.adapter.subnetMask = subnet_mask + else: + adapter_mapping.adapter.ip = vim.vm.customization.DhcpIpGenerator() + + return adapter_mapping + + +def _manage_devices(devices, vm): + unit_number = 0 + bus_number = 0 + device_specs = [] + existing_disks_label = [] + existing_scsi_adapters_label = [] + existing_network_adapters_label = [] + existing_cd_drives_label = [] + ide_controllers = {} + nics_map = [] + + # loop through all the devices the vm/template has + # check if the device needs to be created or configured + for device in vm.config.hardware.device: + if isinstance(device, vim.vm.device.VirtualDisk): + # this is a hard disk + if 'disk' in list(devices.keys()): + # there is atleast one disk specified to be created/configured + unit_number += 1 + existing_disks_label.append(device.deviceInfo.label) + if device.deviceInfo.label in list(devices['disk'].keys()): + size_gb = float(devices['disk'][device.deviceInfo.label]['size']) + size_kb = int(size_gb * 1024.0 * 1024.0) + if device.capacityInKB < size_kb: + # expand the disk + disk_spec = _edit_existing_hard_disk_helper(device, size_kb) + device_specs.append(disk_spec) + + elif isinstance(device.backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo) or isinstance(device.backing, vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo): + # this is a network adapter + if 'network' in list(devices.keys()): + # there is atleast one network adapter specified to be created/configured + existing_network_adapters_label.append(device.deviceInfo.label) + if device.deviceInfo.label in list(devices['network'].keys()): + network_name = devices['network'][device.deviceInfo.label]['name'] + adapter_type = devices['network'][device.deviceInfo.label]['adapter_type'] if 'adapter_type' in devices['network'][device.deviceInfo.label] else '' + switch_type = devices['network'][device.deviceInfo.label]['switch_type'] if 'switch_type' in devices['network'][device.deviceInfo.label] else '' + network_spec = _edit_existing_network_adapter_helper(device, network_name, adapter_type, switch_type) + adapter_mapping = _set_network_adapter_mapping_helper(devices['network'][device.deviceInfo.label]) + device_specs.append(network_spec) + nics_map.append(adapter_mapping) + + elif hasattr(device, 'scsiCtlrUnitNumber'): + # this is a scsi adapter + if 'scsi' in list(devices.keys()): + # there is atleast one scsi adapter specified to be created/configured + bus_number += 1 + existing_scsi_adapters_label.append(device.deviceInfo.label) + if device.deviceInfo.label in list(devices['scsi'].keys()): + # Modify the existing SCSI adapter + scsi_adapter_properties = devices['scsi'][device.deviceInfo.label] + bus_sharing = scsi_adapter_properties['bus_sharing'].strip().lower() if 'bus_sharing' in scsi_adapter_properties else None + if bus_sharing and bus_sharing in ['virtual', 'physical', 'no']: + bus_sharing = '{0}Sharing'.format(bus_sharing) + if bus_sharing != device.sharedBus: + # Only edit the SCSI adapter if bus_sharing is different + scsi_spec = _edit_existing_scsi_adapter_helper(device, bus_sharing) + device_specs.append(scsi_spec) + + elif isinstance(device, vim.vm.device.VirtualCdrom): + # this is a cd/dvd drive + if 'cd' in list(devices.keys()): + # there is atleast one cd/dvd drive specified to be created/configured + existing_cd_drives_label.append(device.deviceInfo.label) + if device.deviceInfo.label in list(devices['cd'].keys()): + device_type = devices['cd'][device.deviceInfo.label]['device_type'] if 'device_type' in devices['cd'][device.deviceInfo.label] else '' + mode = devices['cd'][device.deviceInfo.label]['mode'] if 'mode' in devices['cd'][device.deviceInfo.label] else '' + iso_path = devices['cd'][device.deviceInfo.label]['iso_path'] if 'iso_path' in devices['cd'][device.deviceInfo.label] else '' + cd_drive_spec = _edit_existing_cd_or_dvd_drive_helper(device, device_type, mode, iso_path) + device_specs.append(cd_drive_spec) + + elif isinstance(device, vim.vm.device.VirtualIDEController): + # this is a controller to add new cd drives to + ide_controllers[device.key] = len(device.device) + + if 'disk' in list(devices.keys()): + disks_to_create = list(set(devices['disk'].keys()) - set(existing_disks_label)) + disks_to_create.sort() + log.debug("Hard disks to create: {0}".format(disks_to_create)) + for disk_label in disks_to_create: + # create the disk + size_gb = float(devices['disk'][disk_label]['size']) + disk_spec = _add_new_hard_disk_helper(disk_label, size_gb, unit_number) + device_specs.append(disk_spec) + unit_number += 1 + + if 'network' in list(devices.keys()): + network_adapters_to_create = list(set(devices['network'].keys()) - set(existing_network_adapters_label)) + network_adapters_to_create.sort() + log.debug("Networks adapters to create: {0}".format(network_adapters_to_create)) + for network_adapter_label in network_adapters_to_create: + network_name = devices['network'][network_adapter_label]['name'] + adapter_type = devices['network'][network_adapter_label]['adapter_type'] if 'adapter_type' in devices['network'][network_adapter_label] else '' + switch_type = devices['network'][network_adapter_label]['switch_type'] if 'switch_type' in devices['network'][network_adapter_label] else '' + # create the network adapter + network_spec = _add_new_network_adapter_helper(network_adapter_label, network_name, adapter_type, switch_type) + adapter_mapping = _set_network_adapter_mapping_helper(devices['network'][network_adapter_label]) + device_specs.append(network_spec) + nics_map.append(adapter_mapping) + + if 'scsi' in list(devices.keys()): + scsi_adapters_to_create = list(set(devices['scsi'].keys()) - set(existing_scsi_adapters_label)) + scsi_adapters_to_create.sort() + log.debug("SCSI devices to create: {0}".format(scsi_adapters_to_create)) + for scsi_adapter_label in scsi_adapters_to_create: + # create the scsi adapter + scsi_adapter_properties = devices['scsi'][scsi_adapter_label] + scsi_spec = _add_new_scsi_adapter_helper(scsi_adapter_label, scsi_adapter_properties, bus_number) + device_specs.append(scsi_spec) + bus_number += 1 + + if 'cd' in list(devices.keys()): + cd_drives_to_create = list(set(devices['cd'].keys()) - set(existing_cd_drives_label)) + cd_drives_to_create.sort() + log.debug("CD/DVD drives to create: {0}".format(cd_drives_to_create)) + for cd_drive_label in cd_drives_to_create: + # create the CD/DVD drive + device_type = devices['cd'][cd_drive_label]['device_type'] if 'device_type' in devices['cd'][cd_drive_label] else '' + mode = devices['cd'][cd_drive_label]['mode'] if 'mode' in devices['cd'][cd_drive_label] else '' + iso_path = devices['cd'][cd_drive_label]['iso_path'] if 'iso_path' in devices['cd'][cd_drive_label] else '' + for ide_controller_key, num_devices in six.iteritems(ide_controllers): + if num_devices < 2: + controller_key = ide_controller_key + break + else: + controller_key = None + if not controller_key: + log.error("No more available controllers for '{0}'. All IDE controllers are currently in use".format(cd_drive_label)) + else: + cd_drive_spec = _add_new_cd_or_dvd_drive_helper(cd_drive_label, controller_key, device_type, mode, iso_path) + device_specs.append(cd_drive_spec) + ide_controllers[controller_key] += 1 + + ret = { + 'device_specs': device_specs, + 'nics_map': nics_map + } + + return ret + + +def _wait_for_vmware_tools(vm_ref, max_wait_minute): + time_counter = 0 + starttime = time.time() + max_wait_second = int(max_wait_minute * 60) + while time_counter < max_wait_second: + if time_counter % 5 == 0: + log.info("[ {0} ] Waiting for VMware tools to be running [{1} s]".format(vm_ref.name, time_counter)) + if str(vm_ref.summary.guest.toolsRunningStatus) == "guestToolsRunning": + log.info("[ {0} ] Succesfully got VMware tools running on the guest in {1} seconds".format(vm_ref.name, time_counter)) + return True + + time.sleep(1.0 - ((time.time() - starttime) % 1.0)) + time_counter += 1 + log.warning("[ {0} ] Timeout Reached. VMware tools still not running after waiting for {1} minutes".format(vm_ref.name, max_wait_minute)) + return False + + +def _wait_for_ip(vm_ref, max_wait_minute): + max_wait_minute_vmware_tools = max_wait_minute - 5 + max_wait_minute_ip = max_wait_minute - max_wait_minute_vmware_tools + vmware_tools_status = _wait_for_vmware_tools(vm_ref, max_wait_minute_vmware_tools) + if not vmware_tools_status: + return False + + time_counter = 0 + starttime = time.time() + max_wait_second = int(max_wait_minute_ip * 60) + while time_counter < max_wait_second: + if time_counter % 5 == 0: + log.info("[ {0} ] Waiting to retrieve IPv4 information [{1} s]".format(vm_ref.name, time_counter)) + + if vm_ref.summary.guest.ipAddress: + if match(r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$', vm_ref.summary.guest.ipAddress) and vm_ref.summary.guest.ipAddress != '127.0.0.1': + log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter)) + return vm_ref.summary.guest.ipAddress + + for net in vm_ref.guest.net: + if net.ipConfig.ipAddress: + for current_ip in net.ipConfig.ipAddress: + if match(r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$', current_ip.ipAddress) and current_ip.ipAddress != '127.0.0.1': + log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter)) + return current_ip.ipAddress + time.sleep(1.0 - ((time.time() - starttime) % 1.0)) + time_counter += 1 + log.warning("[ {0} ] Timeout Reached. Unable to retrieve IPv4 information after waiting for {1} minutes".format(vm_ref.name, max_wait_minute_ip)) + return False + + +def _wait_for_task(task, vm_name, task_type, sleep_seconds=1, log_level='debug'): + time_counter = 0 + starttime = time.time() + while task.info.state == 'running': + if time_counter % sleep_seconds == 0: + message = "[ {0} ] Waiting for {1} task to finish [{2} s]".format(vm_name, task_type, time_counter) + if log_level == 'info': + log.info(message) + else: + log.debug(message) + time.sleep(1.0 - ((time.time() - starttime) % 1.0)) + time_counter += 1 + if task.info.state == 'success': + message = "[ {0} ] Successfully completed {1} task in {2} seconds".format(vm_name, task_type, time_counter) + if log_level == 'info': + log.info(message) + else: + log.debug(message) + else: + raise Exception(task.info.error) + + +def _wait_for_host(host_ref, task_type, sleep_seconds=5, log_level='debug'): + time_counter = 0 + starttime = time.time() + while host_ref.runtime.connectionState != 'notResponding': + if time_counter % sleep_seconds == 0: + message = "[ {0} ] Waiting for host {1} to finish [{2} s]".format(host_ref.name, task_type, time_counter) + if log_level == 'info': + log.info(message) + else: + log.debug(message) + time.sleep(1.0 - ((time.time() - starttime) % 1.0)) + time_counter += 1 + while host_ref.runtime.connectionState != 'connected': + if time_counter % sleep_seconds == 0: + message = "[ {0} ] Waiting for host {1} to finish [{2} s]".format(host_ref.name, task_type, time_counter) + if log_level == 'info': + log.info(message) + else: + log.debug(message) + time.sleep(1.0 - ((time.time() - starttime) % 1.0)) + time_counter += 1 + if host_ref.runtime.connectionState == 'connected': + message = "[ {0} ] Successfully completed host {1} in {2} seconds".format(host_ref.name, task_type, time_counter) + if log_level == 'info': + log.info(message) + else: + log.debug(message) + else: + log.error('Could not connect back to the host system') + + +def _format_instance_info_select(vm, selection): + vm_select_info = {} + + if 'id' in selection: + vm_select_info['id'] = vm["name"] + + if 'image' in selection: + vm_select_info['image'] = "{0} (Detected)".format(vm["config.guestFullName"]) + + if 'size' in selection: + vm_select_info['size'] = u"cpu: {0}\nram: {1}MB".format(vm["config.hardware.numCPU"], vm["config.hardware.memoryMB"]) + + if 'state' in selection: + vm_select_info['state'] = str(vm["summary.runtime.powerState"]) + + if 'guest_id' in selection: + vm_select_info['guest_id'] = vm["config.guestId"] + + if 'hostname' in selection: + vm_select_info['hostname'] = vm["object"].guest.hostName + + if 'path' in selection: + vm_select_info['path'] = vm["config.files.vmPathName"] + + if 'tools_status' in selection: + vm_select_info['tools_status'] = str(vm["guest.toolsStatus"]) if "guest.toolsStatus" in vm else "N/A" + + if ('private_ips' or 'mac_address' or 'networks') in selection: + network_full_info = {} + ip_addresses = [] + mac_addresses = [] + + for net in vm["guest.net"]: + network_full_info[net.network] = { + 'connected': net.connected, + 'ip_addresses': net.ipAddress, + 'mac_address': net.macAddress + } + ip_addresses.extend(net.ipAddress) + mac_addresses.append(net.macAddress) + + if 'private_ips' in selection: + vm_select_info['private_ips'] = ip_addresses + + if 'mac_address' in selection: + vm_select_info['mac_address'] = mac_addresses + + if 'networks' in selection: + vm_select_info['networks'] = network_full_info + + if 'devices' in selection: + device_full_info = {} + for device in vm["config.hardware.device"]: + device_full_info[device.deviceInfo.label] = { + 'key': device.key, + 'label': device.deviceInfo.label, + 'summary': device.deviceInfo.summary, + 'type': type(device).__name__.rsplit(".", 1)[1], + 'unitNumber': device.unitNumber + } + + if hasattr(device.backing, 'network'): + device_full_info[device.deviceInfo.label]['addressType'] = device.addressType + device_full_info[device.deviceInfo.label]['macAddress'] = device.macAddress + + if hasattr(device, 'busNumber'): + device_full_info[device.deviceInfo.label]['busNumber'] = device.busNumber + + if hasattr(device, 'device'): + device_full_info[device.deviceInfo.label]['devices'] = device.device + + if hasattr(device, 'videoRamSizeInKB'): + device_full_info[device.deviceInfo.label]['videoRamSizeInKB'] = device.videoRamSizeInKB + + if isinstance(device, vim.vm.device.VirtualDisk): + device_full_info[device.deviceInfo.label]['capacityInKB'] = device.capacityInKB + device_full_info[device.deviceInfo.label]['diskMode'] = device.backing.diskMode + device_full_info[device.deviceInfo.label]['fileName'] = device.backing.fileName + + vm_select_info['devices'] = device_full_info + + if 'storage' in selection: + storage_full_info = { + 'committed': vm["summary.storage.committed"], + 'uncommitted': vm["summary.storage.uncommitted"], + 'unshared': vm["summary.storage.unshared"] + } + vm_select_info['storage'] = storage_full_info + + if 'files' in selection: + file_full_info = {} + for file in vm["layoutEx.file"]: + file_full_info[file.key] = { + 'key': file.key, + 'name': file.name, + 'size': file.size, + 'type': file.type + } + vm_select_info['files'] = file_full_info + + return vm_select_info + + +def _format_instance_info(vm): + device_full_info = {} + for device in vm["config.hardware.device"]: + device_full_info[device.deviceInfo.label] = { + 'key': device.key, + 'label': device.deviceInfo.label, + 'summary': device.deviceInfo.summary, + 'type': type(device).__name__.rsplit(".", 1)[1], + 'unitNumber': device.unitNumber + } + + if hasattr(device.backing, 'network'): + device_full_info[device.deviceInfo.label]['addressType'] = device.addressType + device_full_info[device.deviceInfo.label]['macAddress'] = device.macAddress + + if hasattr(device, 'busNumber'): + device_full_info[device.deviceInfo.label]['busNumber'] = device.busNumber + + if hasattr(device, 'device'): + device_full_info[device.deviceInfo.label]['devices'] = device.device + + if hasattr(device, 'videoRamSizeInKB'): + device_full_info[device.deviceInfo.label]['videoRamSizeInKB'] = device.videoRamSizeInKB + + if isinstance(device, vim.vm.device.VirtualDisk): + device_full_info[device.deviceInfo.label]['capacityInKB'] = device.capacityInKB + device_full_info[device.deviceInfo.label]['diskMode'] = device.backing.diskMode + device_full_info[device.deviceInfo.label]['fileName'] = device.backing.fileName + + storage_full_info = { + 'committed': int(vm["summary.storage.committed"]), + 'uncommitted': int(vm["summary.storage.uncommitted"]), + 'unshared': int(vm["summary.storage.unshared"]) + } + + file_full_info = {} + for file in vm["layoutEx.file"]: + file_full_info[file.key] = { + 'key': file.key, + 'name': file.name, + 'size': file.size, + 'type': file.type + } + + network_full_info = {} + ip_addresses = [] + mac_addresses = [] + for net in vm["guest.net"]: + network_full_info[net.network] = { + 'connected': net.connected, + 'ip_addresses': net.ipAddress, + 'mac_address': net.macAddress + } + ip_addresses.extend(net.ipAddress) + mac_addresses.append(net.macAddress) + + vm_full_info = { + 'id': str(vm['name']), + 'image': "{0} (Detected)".format(vm["config.guestFullName"]), + 'size': u"cpu: {0}\nram: {1}MB".format(vm["config.hardware.numCPU"], vm["config.hardware.memoryMB"]), + 'state': str(vm["summary.runtime.powerState"]), + 'private_ips': ip_addresses, + 'public_ips': [], + 'devices': device_full_info, + 'storage': storage_full_info, + 'files': file_full_info, + 'guest_id': str(vm["config.guestId"]), + 'hostname': str(vm["object"].guest.hostName), + 'mac_address': mac_addresses, + 'networks': network_full_info, + 'path': str(vm["config.files.vmPathName"]), + 'tools_status': str(vm["guest.toolsStatus"]) if "guest.toolsStatus" in vm else "N/A" + } + + return vm_full_info + + +def _get_snapshots(snapshot_list, current_snapshot=None, parent_snapshot_path=""): + snapshots = {} + for snapshot in snapshot_list: + snapshot_path = "{0}/{1}".format(parent_snapshot_path, snapshot.name) + snapshots[snapshot_path] = { + 'name': snapshot.name, + 'description': snapshot.description, + 'created': str(snapshot.createTime).split('.')[0], + 'state': snapshot.state, + 'path': snapshot_path, + } + + if current_snapshot and current_snapshot == snapshot.snapshot: + return snapshots[snapshot_path] + + # Check if child snapshots exist + if snapshot.childSnapshotList: + ret = _get_snapshots(snapshot.childSnapshotList, current_snapshot, snapshot_path) + if current_snapshot: + return ret + snapshots.update(ret) + + return snapshots + + +def _upg_tools_helper(vm, reboot=False): + # Exit if template + if vm.config.template: + status = 'VMware tools cannot be updated on a template' + return status + + # Exit if VMware tools is already up to date + if vm.guest.toolsStatus == "toolsOk": + status = 'VMware tools is already up to date' + return status + + # Exit if VM is not powered on + if vm.summary.runtime.powerState != "poweredOn": + status = 'VM must be powered on to upgrade tools' + return status + + # Exit if VMware tools is either not running or not installed + if vm.guest.toolsStatus in ["toolsNotRunning", "toolsNotInstalled"]: + status = 'VMware tools is either not running or not installed' + return status + + # If vmware tools is out of date, check major OS family + # Upgrade tools on Linux and Windows guests + if vm.guest.toolsStatus == "toolsOld": + log.info('Upgrading VMware tools on {0}'.format(vm.name)) + try: + if vm.guest.guestFamily == "windowsGuest" and not reboot: + log.info('Reboot suppressed on {0}'.format(vm.name)) + task = vm.UpgradeTools('/S /v"/qn REBOOT=R"') + elif vm.guest.guestFamily in ["linuxGuest", "windowsGuest"]: + task = vm.UpgradeTools() + else: + status = 'Only Linux and Windows guests are currently supported' + return status + _wait_for_task(task, vm.name, "tools upgrade", 5, "info") + except Exception as exc: + log.error( + 'Error while upgrading VMware tools on VM {0}: {1}'.format( + vm.name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + status = 'VMware tools upgrade failed' + return status + status = 'VMware tools upgrade succeeded' + return status + + return 'VMware tools could not be upgraded' + + +def _get_hba_type(hba_type): + if hba_type == "parallel": + return vim.host.ParallelScsiHba + elif hba_type == "block": + return vim.host.BlockHba + elif hba_type == "iscsi": + return vim.host.InternetScsiHba + elif hba_type == "fibre": + return vim.host.FibreChannelHba + + +def test_vcenter_connection(kwargs=None, call=None): + ''' + Test if the connection can be made to the vCenter server using + the specified credentials inside ``/etc/salt/cloud.providers`` + or ``/etc/salt/cloud.providers.d/vmware.conf`` + + CLI Example: + + .. code-block:: bash + + salt-cloud -f test_vcenter_connection my-vmware-config + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The test_vcenter_connection function must be called with ' + '-f or --function.' + ) + + try: + # Get the service instance object + si = _get_si() + except Exception as exc: + return 'failed to connect: {0}'.format(exc) + + return 'connection successful' + + +def get_vcenter_version(kwargs=None, call=None): + ''' + Show the vCenter Server version with build number. + + CLI Example: + + .. code-block:: bash + + salt-cloud -f get_vcenter_version my-vmware-config + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The get_vcenter_version function must be called with ' + '-f or --function.' + ) + + # Get the inventory + inv = _get_inv() + + return inv.about.fullName + + +def list_datacenters(kwargs=None, call=None): + ''' + List all the data centers for this VMware environment + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_datacenters my-vmware-config + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The list_datacenters function must be called with ' + '-f or --function.' + ) + + datacenters = [] + datacenter_properties = ["name"] + + datacenter_list = _get_mors_with_properties(vim.Datacenter, datacenter_properties) + + for datacenter in datacenter_list: + datacenters.append(datacenter["name"]) + + return {'Datacenters': datacenters} + + +def list_clusters(kwargs=None, call=None): + ''' + List all the clusters for this VMware environment + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_clusters my-vmware-config + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The list_clusters function must be called with ' + '-f or --function.' + ) + + clusters = [] + cluster_properties = ["name"] + + cluster_list = _get_mors_with_properties(vim.ClusterComputeResource, cluster_properties) + + for cluster in cluster_list: + clusters.append(cluster["name"]) + + return {'Clusters': clusters} + + +def list_datastore_clusters(kwargs=None, call=None): + ''' + List all the datastore clusters for this VMware environment + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_datastore_clusters my-vmware-config + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The list_datastore_clusters function must be called with ' + '-f or --function.' + ) + + datastore_clusters = [] + datastore_cluster_properties = ["name"] + + datastore_cluster_list = _get_mors_with_properties(vim.StoragePod, datastore_cluster_properties) + + for datastore_cluster in datastore_cluster_list: + datastore_clusters.append(datastore_cluster["name"]) + + return {'Datastore Clusters': datastore_clusters} + + +def list_datastores(kwargs=None, call=None): + ''' + List all the datastores for this VMware environment + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_datastores my-vmware-config + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The list_datastores function must be called with ' + '-f or --function.' + ) + + datastores = [] + datastore_properties = ["name"] + + datastore_list = _get_mors_with_properties(vim.Datastore, datastore_properties) + + for datastore in datastore_list: + datastores.append(datastore["name"]) + + return {'Datastores': datastores} + + +def list_hosts(kwargs=None, call=None): + ''' + List all the hosts for this VMware environment + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_hosts my-vmware-config + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The list_hosts function must be called with ' + '-f or --function.' + ) + + hosts = [] + host_properties = ["name"] + + host_list = _get_mors_with_properties(vim.HostSystem, host_properties) + + for host in host_list: + hosts.append(host["name"]) + + return {'Hosts': hosts} + + +def list_resourcepools(kwargs=None, call=None): + ''' + List all the resource pools for this VMware environment + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_resourcepools my-vmware-config + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The list_resourcepools function must be called with ' + '-f or --function.' + ) + + resource_pools = [] + resource_pool_properties = ["name"] + + resource_pool_list = _get_mors_with_properties(vim.ResourcePool, resource_pool_properties) + + for resource_pool in resource_pool_list: + resource_pools.append(resource_pool["name"]) + + return {'Resource Pools': resource_pools} + + +def list_networks(kwargs=None, call=None): + ''' + List all the standard networks for this VMware environment + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_networks my-vmware-config + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The list_networks function must be called with ' + '-f or --function.' + ) + + networks = [] + network_properties = ["name"] + + network_list = _get_mors_with_properties(vim.Network, network_properties) + + for network in network_list: + networks.append(network["name"]) + + return {'Networks': networks} + + +def list_nodes_min(kwargs=None, call=None): + ''' + Return a list of all VMs and templates that are on the specified provider, with no details + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_nodes_min my-vmware-config + ''' + if call == 'action': + raise SaltCloudSystemExit( + 'The list_nodes_min function must be called ' + 'with -f or --function.' + ) + + ret = {} + vm_properties = ["name"] + + vm_list = _get_mors_with_properties(vim.VirtualMachine, vm_properties) + + for vm in vm_list: + ret[vm["name"]] = True + + return ret + + +def list_nodes(kwargs=None, call=None): + ''' + Return a list of all VMs and templates that are on the specified provider, with basic fields + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_nodes my-vmware-config + + To return a list of all VMs and templates present on ALL configured providers, with basic + fields: + + CLI Example: + + .. code-block:: bash + + salt-cloud -Q + ''' + if call == 'action': + raise SaltCloudSystemExit( + 'The list_nodes function must be called ' + 'with -f or --function.' + ) + + ret = {} + vm_properties = [ + "name", + "guest.ipAddress", + "config.guestFullName", + "config.hardware.numCPU", + "config.hardware.memoryMB", + "summary.runtime.powerState" + ] + + vm_list = _get_mors_with_properties(vim.VirtualMachine, vm_properties) + + for vm in vm_list: + vm_info = { + 'id': vm["name"], + 'image': "{0} (Detected)".format(vm["config.guestFullName"]), + 'size': u"cpu: {0}\nram: {1}MB".format(vm["config.hardware.numCPU"], vm["config.hardware.memoryMB"]), + 'state': str(vm["summary.runtime.powerState"]), + 'private_ips': [vm["guest.ipAddress"]] if "guest.ipAddress" in vm else [], + 'public_ips': [] + } + ret[vm_info['id']] = vm_info + + return ret + + +def list_nodes_full(kwargs=None, call=None): + ''' + Return a list of all VMs and templates that are on the specified provider, with full details + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_nodes_full my-vmware-config + + To return a list of all VMs and templates present on ALL configured providers, with full + details: + + CLI Example: + + .. code-block:: bash + + salt-cloud -F + ''' + if call == 'action': + raise SaltCloudSystemExit( + 'The list_nodes_full function must be called ' + 'with -f or --function.' + ) + + ret = {} + vm_properties = [ + "config.hardware.device", + "summary.storage.committed", + "summary.storage.uncommitted", + "summary.storage.unshared", + "layoutEx.file", + "config.guestFullName", + "config.guestId", + "guest.net", + "config.hardware.memoryMB", + "name", + "config.hardware.numCPU", + "config.files.vmPathName", + "summary.runtime.powerState", + "guest.toolsStatus" + ] + + vm_list = _get_mors_with_properties(vim.VirtualMachine, vm_properties) + + for vm in vm_list: + ret[vm["name"]] = _format_instance_info(vm) + + return ret + + +def list_nodes_select(call=None): + ''' + Return a list of all VMs and templates that are on the specified provider, with fields + specified under ``query.selection`` in ``/etc/salt/cloud`` + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_nodes_select my-vmware-config + + To return a list of all VMs and templates present on ALL configured providers, with + fields specified under ``query.selection`` in ``/etc/salt/cloud``: + + CLI Example: + + .. code-block:: bash + + salt-cloud -S + ''' + if call == 'action': + raise SaltCloudSystemExit( + 'The list_nodes_select function must be called ' + 'with -f or --function.' + ) + + ret = {} + vm_properties = [] + selection = __opts__.get('query.selection') + + if not selection: + raise SaltCloudSystemExit( + 'query.selection not found in /etc/salt/cloud' + ) + + if 'id' in selection: + vm_properties.append("name") + + if 'image' in selection: + vm_properties.append("config.guestFullName") + + if 'size' in selection: + vm_properties.extend(["config.hardware.numCPU", "config.hardware.memoryMB"]) + + if 'state' in selection: + vm_properties.append("summary.runtime.powerState") + + if ('private_ips' or 'mac_address' or 'networks') in selection: + vm_properties.append("guest.net") + + if 'devices' in selection: + vm_properties.append("config.hardware.device") + + if 'storage' in selection: + vm_properties.extend([ + "config.hardware.device", + "summary.storage.committed", + "summary.storage.uncommitted", + "summary.storage.unshared" + ]) + + if 'files' in selection: + vm_properties.append("layoutEx.file") + + if 'guest_id' in selection: + vm_properties.append("config.guestId") + + if 'hostname' in selection: + vm_properties.append("guest.hostName") + + if 'path' in selection: + vm_properties.append("config.files.vmPathName") + + if 'tools_status' in selection: + vm_properties.append("guest.toolsStatus") + + if not vm_properties: + return {} + elif 'name' not in vm_properties: + vm_properties.append("name") + + vm_list = _get_mors_with_properties(vim.VirtualMachine, vm_properties) + + for vm in vm_list: + ret[vm["name"]] = _format_instance_info_select(vm, selection) + return ret + + +def show_instance(name, call=None): + ''' + List all available details of the specified VM + + CLI Example: + + .. code-block:: bash + + salt-cloud -a show_instance vmname + ''' + if call != 'action': + raise SaltCloudSystemExit( + 'The show_instance action must be called with ' + '-a or --action.' + ) + + vm_properties = [ + "config.hardware.device", + "summary.storage.committed", + "summary.storage.uncommitted", + "summary.storage.unshared", + "layoutEx.file", + "config.guestFullName", + "config.guestId", + "guest.net", + "config.hardware.memoryMB", + "name", + "config.hardware.numCPU", + "config.files.vmPathName", + "summary.runtime.powerState", + "guest.toolsStatus" + ] + + vm_list = _get_mors_with_properties(vim.VirtualMachine, vm_properties) + + for vm in vm_list: + if vm['name'] == name: + return _format_instance_info(vm) + + +def avail_images(): + ''' + Return a list of all the templates present in this VMware environment with basic + details + + CLI Example: + + .. code-block:: bash + + salt-cloud --list-images my-vmware-config + ''' + + templates = {} + vm_properties = [ + "name", + "config.template", + "config.guestFullName", + "config.hardware.numCPU", + "config.hardware.memoryMB" + ] + + vm_list = _get_mors_with_properties(vim.VirtualMachine, vm_properties) + + for vm in vm_list: + if vm["config.template"]: + templates[vm["name"]] = { + 'name': vm["name"], + 'guest_fullname': vm["config.guestFullName"], + 'cpus': vm["config.hardware.numCPU"], + 'ram': vm["config.hardware.memoryMB"] + } + + return templates + + +def list_folders(kwargs=None, call=None): + ''' + List all the folders for this VMware environment + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_folders my-vmware-config + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The list_folders function must be called with ' + '-f or --function.' + ) + + folders = [] + folder_properties = ["name"] + + folder_list = _get_mors_with_properties(vim.Folder, folder_properties) + + for folder in folder_list: + folders.append(folder["name"]) + + return {'Folders': folders} + + +def list_snapshots(kwargs=None, call=None): + ''' + List snapshots either for all VMs and templates or for a specific VM/template + in this VMware environment + + To list snapshots for all VMs and templates: + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_snapshots my-vmware-config + + To list snapshots for a specific VM/template: + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_snapshots my-vmware-config name="vmname" + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The list_snapshots function must be called with ' + '-f or --function.' + ) + + ret = {} + vm_properties = [ + "name", + "rootSnapshot", + "snapshot" + ] + + vm_list = _get_mors_with_properties(vim.VirtualMachine, vm_properties) + + for vm in vm_list: + if vm["rootSnapshot"]: + if kwargs and kwargs.get('name') == vm["name"]: + return {vm["name"]: _get_snapshots(vm["snapshot"].rootSnapshotList)} + else: + ret[vm["name"]] = _get_snapshots(vm["snapshot"].rootSnapshotList) + + return ret + + +def start(name, call=None): + ''' + To start/power on a VM using its name + + CLI Example: + + .. code-block:: bash + + salt-cloud -a start vmname + ''' + if call != 'action': + raise SaltCloudSystemExit( + 'The start action must be called with ' + '-a or --action.' + ) + + vm_properties = [ + "name", + "summary.runtime.powerState" + ] + + vm_list = _get_mors_with_properties(vim.VirtualMachine, vm_properties) + + for vm in vm_list: + if vm["name"] == name: + if vm["summary.runtime.powerState"] == "poweredOn": + ret = 'already powered on' + log.info('VM {0} {1}'.format(name, ret)) + return ret + try: + log.info('Starting VM {0}'.format(name)) + task = vm["object"].PowerOn() + _wait_for_task(task, name, "power on") + except Exception as exc: + log.error( + 'Error while powering on VM {0}: {1}'.format( + name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return 'failed to power on' + + return 'powered on' + + +def stop(name, call=None): + ''' + To stop/power off a VM using its name + + CLI Example: + + .. code-block:: bash + + salt-cloud -a stop vmname + ''' + if call != 'action': + raise SaltCloudSystemExit( + 'The stop action must be called with ' + '-a or --action.' + ) + + vm_properties = [ + "name", + "summary.runtime.powerState" + ] + + vm_list = _get_mors_with_properties(vim.VirtualMachine, vm_properties) + + for vm in vm_list: + if vm["name"] == name: + if vm["summary.runtime.powerState"] == "poweredOff": + ret = 'already powered off' + log.info('VM {0} {1}'.format(name, ret)) + return ret + try: + log.info('Stopping VM {0}'.format(name)) + task = vm["object"].PowerOff() + _wait_for_task(task, name, "power off") + except Exception as exc: + log.error( + 'Error while powering off VM {0}: {1}'.format( + name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return 'failed to power off' + + return 'powered off' + + +def suspend(name, call=None): + ''' + To suspend a VM using its name + + CLI Example: + + .. code-block:: bash + + salt-cloud -a suspend vmname + ''' + if call != 'action': + raise SaltCloudSystemExit( + 'The suspend action must be called with ' + '-a or --action.' + ) + + vm_properties = [ + "name", + "summary.runtime.powerState" + ] + + vm_list = _get_mors_with_properties(vim.VirtualMachine, vm_properties) + + for vm in vm_list: + if vm["name"] == name: + if vm["summary.runtime.powerState"] == "poweredOff": + ret = 'cannot suspend in powered off state' + log.info('VM {0} {1}'.format(name, ret)) + return ret + elif vm["summary.runtime.powerState"] == "suspended": + ret = 'already suspended' + log.info('VM {0} {1}'.format(name, ret)) + return ret + try: + log.info('Suspending VM {0}'.format(name)) + task = vm["object"].Suspend() + _wait_for_task(task, name, "suspend") + except Exception as exc: + log.error( + 'Error while suspending VM {0}: {1}'.format( + name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return 'failed to suspend' + + return 'suspended' + + +def reset(name, call=None): + ''' + To reset a VM using its name + + CLI Example: + + .. code-block:: bash + + salt-cloud -a reset vmname + ''' + if call != 'action': + raise SaltCloudSystemExit( + 'The reset action must be called with ' + '-a or --action.' + ) + + vm_properties = [ + "name", + "summary.runtime.powerState" + ] + + vm_list = _get_mors_with_properties(vim.VirtualMachine, vm_properties) + + for vm in vm_list: + if vm["name"] == name: + if vm["summary.runtime.powerState"] == "suspended" or vm["summary.runtime.powerState"] == "poweredOff": + ret = 'cannot reset in suspended/powered off state' + log.info('VM {0} {1}'.format(name, ret)) + return ret + try: + log.info('Resetting VM {0}'.format(name)) + task = vm["object"].Reset() + _wait_for_task(task, name, "reset") + except Exception as exc: + log.error( + 'Error while resetting VM {0}: {1}'.format( + name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return 'failed to reset' + + return 'reset' + + +def terminate(name, call=None): + ''' + To do an immediate power off of a VM using its name. A ``SIGKILL`` + is issued to the vmx process of the VM + + CLI Example: + + .. code-block:: bash + + salt-cloud -a terminate vmname + ''' + if call != 'action': + raise SaltCloudSystemExit( + 'The terminate action must be called with ' + '-a or --action.' + ) + + vm_properties = [ + "name", + "summary.runtime.powerState" + ] + + vm_list = _get_mors_with_properties(vim.VirtualMachine, vm_properties) + + for vm in vm_list: + if vm["name"] == name: + if vm["summary.runtime.powerState"] == "poweredOff": + ret = 'already powered off' + log.info('VM {0} {1}'.format(name, ret)) + return ret + try: + log.info('Terminating VM {0}'.format(name)) + vm["object"].Terminate() + except Exception as exc: + log.error( + 'Error while terminating VM {0}: {1}'.format( + name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return 'failed to terminate' + + return 'terminated' + + +def destroy(name, call=None): + ''' + To destroy a VM from the VMware environment + + CLI Example: + + .. code-block:: bash + + salt-cloud -d vmname + salt-cloud --destroy vmname + salt-cloud -a destroy vmname + ''' + if call == 'function': + raise SaltCloudSystemExit( + 'The destroy action must be called with -d, --destroy, ' + '-a or --action.' + ) + + salt.utils.cloud.fire_event( + 'event', + 'destroying instance', + 'salt/cloud/{0}/destroying'.format(name), + {'name': name}, + transport=__opts__['transport'] + ) + + vm_properties = [ + "name", + "summary.runtime.powerState" + ] + + vm_list = _get_mors_with_properties(vim.VirtualMachine, vm_properties) + + for vm in vm_list: + if vm["name"] == name: + if vm["summary.runtime.powerState"] != "poweredOff": + #Power off the vm first + try: + log.info('Powering Off VM {0}'.format(name)) + task = vm["object"].PowerOff() + _wait_for_task(task, name, "power off") + except Exception as exc: + log.error( + 'Error while powering off VM {0}: {1}'.format( + name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return 'failed to destroy' + try: + log.info('Destroying VM {0}'.format(name)) + task = vm["object"].Destroy_Task() + _wait_for_task(task, name, "destroy") + except Exception as exc: + log.error( + 'Error while destroying VM {0}: {1}'.format( + name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return 'failed to destroy' + + salt.utils.cloud.fire_event( + 'event', + 'destroyed instance', + 'salt/cloud/{0}/destroyed'.format(name), + {'name': name}, + transport=__opts__['transport'] + ) + if __opts__.get('update_cachedir', False) is True: + salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__) + + return True + + +def create(vm_): + ''' + To create a single VM in the VMware environment. + + Sample profile and arguments that can be specified in it can be found + :ref:`here. ` + + CLI Example: + + .. code-block:: bash + + salt-cloud -p vmware-centos6.5 vmname + ''' + salt.utils.cloud.fire_event( + 'event', + 'starting create', + 'salt/cloud/{0}/creating'.format(vm_['name']), + { + 'name': vm_['name'], + 'profile': vm_['profile'], + 'provider': vm_['provider'], + }, + transport=__opts__['transport'] + ) + + vm_name = config.get_cloud_config_value( + 'name', vm_, __opts__, default=None + ) + folder = config.get_cloud_config_value( + 'folder', vm_, __opts__, default=None + ) + datacenter = config.get_cloud_config_value( + 'datacenter', vm_, __opts__, default=None + ) + resourcepool = config.get_cloud_config_value( + 'resourcepool', vm_, __opts__, default=None + ) + cluster = config.get_cloud_config_value( + 'cluster', vm_, __opts__, default=None + ) + datastore = config.get_cloud_config_value( + 'datastore', vm_, __opts__, default=None + ) + host = config.get_cloud_config_value( + 'host', vm_, __opts__, default=None + ) + template = config.get_cloud_config_value( + 'template', vm_, __opts__, default=False + ) + num_cpus = config.get_cloud_config_value( + 'num_cpus', vm_, __opts__, default=None + ) + memory = config.get_cloud_config_value( + 'memory', vm_, __opts__, default=None + ) + devices = config.get_cloud_config_value( + 'devices', vm_, __opts__, default=None + ) + extra_config = config.get_cloud_config_value( + 'extra_config', vm_, __opts__, default=None + ) + power = config.get_cloud_config_value( + 'power_on', vm_, __opts__, default=True + ) + key_filename = config.get_cloud_config_value( + 'private_key', vm_, __opts__, search_global=False, default=None + ) + deploy = config.get_cloud_config_value( + 'deploy', vm_, __opts__, search_global=False, default=True + ) + domain = config.get_cloud_config_value( + 'domain', vm_, __opts__, search_global=False, default='local' + ) + + if 'clonefrom' in vm_: + # Clone VM/template from specified VM/template + object_ref = _get_mor_by_property(vim.VirtualMachine, vm_['clonefrom']) + if object_ref: + clone_type = "template" if object_ref.config.template else "vm" + else: + raise SaltCloudSystemExit( + 'The VM/template that you have specified under clonefrom does not exist.' + ) + + # Either a cluster, or a resource pool must be specified when cloning from template. + if resourcepool: + resourcepool_ref = _get_mor_by_property(vim.ResourcePool, resourcepool) + if not resourcepool_ref: + log.error("Specified resource pool: '{0}' does not exist".format(resourcepool)) + if clone_type == "template": + raise SaltCloudSystemExit('You must specify a resource pool that exists.') + elif cluster: + cluster_ref = _get_mor_by_property(vim.ClusterComputeResource, cluster) + if not cluster_ref: + log.error("Specified cluster: '{0}' does not exist".format(cluster)) + if clone_type == "template": + raise SaltCloudSystemExit('You must specify a cluster that exists.') + else: + resourcepool_ref = cluster_ref.resourcePool + elif clone_type == "template": + raise SaltCloudSystemExit( + 'You must either specify a cluster or a resource pool when cloning from a template.' + ) + else: + log.debug("Using resource pool used by the {0} {1}".format(clone_type, vm_['clonefrom'])) + + # Either a datacenter or a folder can be optionally specified + # If not specified, the existing VM/template\'s parent folder is used. + if folder: + folder_ref = _get_mor_by_property(vim.Folder, folder) + if not folder_ref: + log.error("Specified folder: '{0}' does not exist".format(folder)) + log.debug("Using folder in which {0} {1} is present".format(clone_type, vm_['clonefrom'])) + folder_ref = object_ref.parent + elif datacenter: + datacenter_ref = _get_mor_by_property(vim.Datacenter, datacenter) + if not datacenter_ref: + log.error("Specified datacenter: '{0}' does not exist".format(datacenter)) + log.debug("Using datacenter folder in which {0} {1} is present".format(clone_type, vm_['clonefrom'])) + folder_ref = object_ref.parent + else: + folder_ref = datacenter_ref.vmFolder + else: + log.debug("Using folder in which {0} {1} is present".format(clone_type, vm_['clonefrom'])) + folder_ref = object_ref.parent + + # Create the relocation specs + reloc_spec = vim.vm.RelocateSpec() + + if (resourcepool and resourcepool_ref) or (cluster and cluster_ref): + reloc_spec.pool = resourcepool_ref + + # Either a datastore/datastore cluster can be optionally specified. + # If not specified, the current datastore is used. + if datastore: + datastore_ref = _get_mor_by_property(vim.Datastore, datastore) + if datastore_ref: + # specific datastore has been specified + reloc_spec.datastore = datastore_ref + else: + datastore_cluster_ref = _get_mor_by_property(vim.StoragePod, datastore) + if not datastore_cluster_ref: + log.error("Specified datastore/datastore cluster: '{0}' does not exist".format(datastore)) + log.debug("Using datastore used by the {0} {1}".format(clone_type, vm_['clonefrom'])) + else: + log.debug("No datastore/datastore cluster specified") + log.debug("Using datastore used by the {0} {1}".format(clone_type, vm_['clonefrom'])) + + if host: + host_ref = _get_mor_by_property(vim.HostSystem, host) + if host_ref: + reloc_spec.host = host_ref + else: + log.error("Specified host: '{0}' does not exist".format(host)) + + # Create the config specs + config_spec = vim.vm.ConfigSpec() + + if num_cpus: + log.debug("Setting cpu to: {0}".format(num_cpus)) + config_spec.numCPUs = int(num_cpus) + + if memory: + try: + memory_num, memory_unit = findall(r"[^\W\d_]+|\d+.\d+|\d+", memory) + if memory_unit.lower() == "mb": + memory_mb = int(memory_num) + elif memory_unit.lower() == "gb": + memory_mb = int(float(memory_num)*1024.0) + else: + err_msg = "Invalid memory type specified: '{0}'".format(memory_unit) + log.error(err_msg) + return {'Error': err_msg} + except (TypeError, ValueError): + memory_mb = int(memory) + log.debug("Setting memory to: {0} MB".format(memory_mb)) + config_spec.memoryMB = memory_mb + + if devices: + specs = _manage_devices(devices, object_ref) + config_spec.deviceChange = specs['device_specs'] + + if extra_config: + for key, value in six.iteritems(extra_config): + option = vim.option.OptionValue(key=key, value=value) + config_spec.extraConfig.append(option) + + # Create the clone specs + clone_spec = vim.vm.CloneSpec( + template=template, + location=reloc_spec, + config=config_spec + ) + + if devices and 'network' in list(devices.keys()): + if "Windows" not in object_ref.config.guestFullName: + global_ip = vim.vm.customization.GlobalIPSettings() + if 'dns_servers' in list(vm_.keys()): + global_ip.dnsServerList = vm_['dns_servers'] + + identity = vim.vm.customization.LinuxPrep() + hostName = vm_name.split('.')[0] + domainName = vm_name.split('.', 1)[-1] + identity.hostName = vim.vm.customization.FixedName(name=hostName) + identity.domain = domainName if hostName != domainName else domain + + custom_spec = vim.vm.customization.Specification( + globalIPSettings=global_ip, + identity=identity, + nicSettingMap=specs['nics_map'] + ) + clone_spec.customization = custom_spec + + if not template: + clone_spec.powerOn = power + + log.debug('clone_spec set to:\n{0}'.format( + pprint.pformat(clone_spec)) + ) + + try: + log.info("Creating {0} from {1}({2})".format(vm_['name'], clone_type, vm_['clonefrom'])) + salt.utils.cloud.fire_event( + 'event', + 'requesting instance', + 'salt/cloud/{0}/requesting'.format(vm_['name']), + {'kwargs': vm_}, + transport=__opts__['transport'] + ) + + if datastore and not datastore_ref and datastore_cluster_ref: + # datastore cluster has been specified so apply Storage DRS recomendations + pod_spec = vim.storageDrs.PodSelectionSpec(storagePod=datastore_cluster_ref) + + storage_spec = vim.storageDrs.StoragePlacementSpec( + type='clone', + vm=object_ref, + podSelectionSpec=pod_spec, + cloneSpec=clone_spec, + cloneName=vm_name, + folder=folder_ref + ) + + # get si instance to refer to the content + si = _get_si() + + # get recommended datastores + recommended_datastores = si.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec) + + # apply storage DRS recommendations + task = si.content.storageResourceManager.ApplyStorageDrsRecommendation_Task(recommended_datastores.recommendations[0].key) + _wait_for_task(task, vm_name, "apply storage DRS recommendations", 5, 'info') + else: + # clone the VM/template + task = object_ref.Clone(folder_ref, vm_name, clone_spec) + _wait_for_task(task, vm_name, "clone", 5, 'info') + except Exception as exc: + err_msg = 'Error creating {0}: {1}'.format(vm_['name'], exc) + log.error( + err_msg, + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return {'Error': err_msg} + + new_vm_ref = _get_mor_by_property(vim.VirtualMachine, vm_name) + + # If it a template or if it does not need to be powered on then do not wait for the IP + if not template and power: + ip = _wait_for_ip(new_vm_ref, 20) + if ip: + log.info("[ {0} ] IPv4 is: {1}".format(vm_name, ip)) + # ssh or smb using ip and install salt only if deploy is True + if deploy: + vm_['key_filename'] = key_filename + vm_['ssh_host'] = ip + + salt.utils.cloud.bootstrap(vm_, __opts__) + + data = show_instance(vm_name, call='action') + + salt.utils.cloud.fire_event( + 'event', + 'created instance', + 'salt/cloud/{0}/created'.format(vm_['name']), + { + 'name': vm_['name'], + 'profile': vm_['profile'], + 'provider': vm_['provider'], + }, + transport=__opts__['transport'] + ) + return data + + else: + err_msg = "clonefrom option hasn\'t been specified. Exiting." + log.error(err_msg) + return {'Error': err_msg} + + +def create_datacenter(kwargs=None, call=None): + ''' + Create a new data center in this VMware environment + + CLI Example: + + .. code-block:: bash + + salt-cloud -f create_datacenter my-vmware-config name="MyNewDatacenter" + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The create_datacenter function must be called with ' + '-f or --function.' + ) + + datacenter_name = kwargs.get('name') if kwargs and 'name' in kwargs else None + + if not datacenter_name: + raise SaltCloudSystemExit( + 'You must specify name of the new datacenter to be created.' + ) + + if len(datacenter_name) >= 80 or len(datacenter_name) <= 0: + raise SaltCloudSystemExit( + 'The datacenter name must be a non empty string of less than 80 characters.' + ) + + # Check if datacenter already exists + datacenter_ref = _get_mor_by_property(vim.Datacenter, datacenter_name) + if datacenter_ref: + return {datacenter_name: 'datacenter already exists'} + + # Get the service instance + si = _get_si() + + folder = si.content.rootFolder + + # Verify that the folder is of type vim.Folder + if isinstance(folder, vim.Folder): + try: + folder.CreateDatacenter(name=datacenter_name) + except Exception as exc: + log.error( + 'Error creating datacenter {0}: {1}'.format( + datacenter_name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return False + + log.debug("Created datacenter {0}".format(datacenter_name)) + return {datacenter_name: 'created'} + + return False + + +def create_cluster(kwargs=None, call=None): + ''' + Create a new cluster under the specified datacenter in this VMware environment + + CLI Example: + + .. code-block:: bash + + salt-cloud -f create_cluster my-vmware-config name="myNewCluster" datacenter="datacenterName" + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The create_cluster function must be called with ' + '-f or --function.' + ) + + cluster_name = kwargs.get('name') if kwargs and 'name' in kwargs else None + datacenter = kwargs.get('datacenter') if kwargs and 'datacenter' in kwargs else None + + if not cluster_name: + raise SaltCloudSystemExit( + 'You must specify name of the new cluster to be created.' + ) + + if not datacenter: + raise SaltCloudSystemExit( + 'You must specify name of the datacenter where the cluster should be created.' + ) + + if not isinstance(datacenter, vim.Datacenter): + datacenter = _get_mor_by_property(vim.Datacenter, datacenter) + if not datacenter: + raise SaltCloudSystemExit( + 'The specified datacenter does not exist.' + ) + + # Check if cluster already exists + cluster_ref = _get_mor_by_property(vim.ClusterComputeResource, cluster_name) + if cluster_ref: + return {cluster_name: 'cluster already exists'} + + cluster_spec = vim.cluster.ConfigSpecEx() + folder = datacenter.hostFolder + + # Verify that the folder is of type vim.Folder + if isinstance(folder, vim.Folder): + try: + folder.CreateClusterEx(name=cluster_name, spec=cluster_spec) + except Exception as exc: + log.error( + 'Error creating cluster {0}: {1}'.format( + cluster_name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return False + + log.debug("Created cluster {0} under datacenter {1}".format(cluster_name, datacenter.name)) + return {cluster_name: 'created'} + + return False + + +def rescan_hba(kwargs=None, call=None): + ''' + To rescan a specified HBA or all the HBAs on the Host System + + CLI Example: + + .. code-block:: bash + + salt-cloud -f rescan_hba my-vmware-config host="hostSystemName" + salt-cloud -f rescan_hba my-vmware-config hba="hbaDeviceName" host="hostSystemName" + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The rescan_hba function must be called with ' + '-f or --function.' + ) + + hba = kwargs.get('hba') if kwargs and 'hba' in kwargs else None + host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None + + if not host_name: + raise SaltCloudSystemExit( + 'You must specify name of the host system.' + ) + + host_ref = _get_mor_by_property(vim.HostSystem, host_name) + + try: + if hba: + log.info('Rescanning HBA {0} on host {1}'.format(hba, host_name)) + host_ref.configManager.storageSystem.RescanHba(hba) + ret = 'rescanned HBA {0}'.format(hba) + else: + log.info('Rescanning all HBAs on host {0}'.format(host_name)) + host_ref.configManager.storageSystem.RescanAllHba() + ret = 'rescanned all HBAs' + except Exception as exc: + log.error( + 'Error while rescaning HBA on host {0}: {1}'.format( + host_name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return {host_name: 'failed to rescan HBA'} + + return {host_name: ret} + + +def upgrade_tools_all(call=None): + ''' + To upgrade VMware Tools on all virtual machines present in + the specified provider + + .. note:: + + If the virtual machine is running Windows OS, this function + will attempt to suppress the automatic reboot caused by a + VMware Tools upgrade. + + CLI Example: + + .. code-block:: bash + + salt-cloud -f upgrade_tools_all my-vmware-config + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The upgrade_tools_all function must be called with ' + '-f or --function.' + ) + + ret = {} + vm_properties = ["name"] + + vm_list = _get_mors_with_properties(vim.VirtualMachine, vm_properties) + + for vm in vm_list: + ret[vm['name']] = _upg_tools_helper(vm['object']) + + return ret + + +def upgrade_tools(name, reboot=False, call=None): + ''' + To upgrade VMware Tools on a specified virtual machine. + + .. note:: + + If the virtual machine is running Windows OS, use ``reboot=True`` + to reboot the virtual machine after VMware tools upgrade. Default + is ``reboot=False`` + + CLI Example: + + .. code-block:: bash + + salt-cloud -a upgrade_tools vmname + salt-cloud -a upgrade_tools vmname reboot=True + ''' + if call != 'action': + raise SaltCloudSystemExit( + 'The upgrade_tools action must be called with ' + '-a or --action.' + ) + + vm_ref = _get_mor_by_property(vim.VirtualMachine, name) + + return _upg_tools_helper(vm_ref, reboot) + + +def list_hosts_by_cluster(kwargs=None, call=None): + ''' + List hosts for each cluster; or hosts for a specified cluster in + this VMware environment + + To list hosts for each cluster: + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_hosts_by_cluster my-vmware-config + + To list hosts for a specified cluster: + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_hosts_by_cluster my-vmware-config cluster="clusterName" + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The list_hosts_by_cluster function must be called with ' + '-f or --function.' + ) + + ret = {} + cluster_name = kwargs.get('cluster') if kwargs and 'cluster' in kwargs else None + cluster_properties = ["name"] + + cluster_list = _get_mors_with_properties(vim.ClusterComputeResource, cluster_properties) + + for cluster in cluster_list: + ret[cluster['name']] = [] + for host in cluster['object'].host: + if isinstance(host, vim.HostSystem): + ret[cluster['name']].append(host.name) + if cluster_name and cluster_name == cluster['name']: + return {'Hosts by Cluster': {cluster_name: ret[cluster_name]}} + + return {'Hosts by Cluster': ret} + + +def list_clusters_by_datacenter(kwargs=None, call=None): + ''' + List clusters for each datacenter; or clusters for a specified datacenter in + this VMware environment + + To list clusters for each datacenter: + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_clusters_by_datacenter my-vmware-config + + To list clusters for a specified datacenter: + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_clusters_by_datacenter my-vmware-config datacenter="datacenterName" + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The list_clusters_by_datacenter function must be called with ' + '-f or --function.' + ) + + ret = {} + datacenter_name = kwargs.get('datacenter') if kwargs and 'datacenter' in kwargs else None + datacenter_properties = ["name"] + + datacenter_list = _get_mors_with_properties(vim.Datacenter, datacenter_properties) + + for datacenter in datacenter_list: + ret[datacenter['name']] = [] + for cluster in datacenter['object'].hostFolder.childEntity: + if isinstance(cluster, vim.ClusterComputeResource): + ret[datacenter['name']].append(cluster.name) + if datacenter_name and datacenter_name == datacenter['name']: + return {'Clusters by Datacenter': {datacenter_name: ret[datacenter_name]}} + + return {'Clusters by Datacenter': ret} + + +def list_hosts_by_datacenter(kwargs=None, call=None): + ''' + List hosts for each datacenter; or hosts for a specified datacenter in + this VMware environment + + To list hosts for each datacenter: + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_hosts_by_datacenter my-vmware-config + + To list hosts for a specified datacenter: + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_hosts_by_datacenter my-vmware-config datacenter="datacenterName" + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The list_hosts_by_datacenter function must be called with ' + '-f or --function.' + ) + + ret = {} + datacenter_name = kwargs.get('datacenter') if kwargs and 'datacenter' in kwargs else None + datacenter_properties = ["name"] + + datacenter_list = _get_mors_with_properties(vim.Datacenter, datacenter_properties) + + for datacenter in datacenter_list: + ret[datacenter['name']] = [] + for cluster in datacenter['object'].hostFolder.childEntity: + if isinstance(cluster, vim.ClusterComputeResource): + for host in cluster.host: + if isinstance(host, vim.HostSystem): + ret[datacenter['name']].append(host.name) + if datacenter_name and datacenter_name == datacenter['name']: + return {'Hosts by Datacenter': {datacenter_name: ret[datacenter_name]}} + + return {'Hosts by Datacenter': ret} + + +def list_hbas(kwargs=None, call=None): + ''' + List all HBAs for each host system; or all HBAs for a specified host + system; or HBAs of specified type for each host system; or HBAs of + specified type for a specified host system in this VMware environment + + .. note:: + + You can specify type as either ``parallel``, ``iscsi``, ``block`` + or ``fibre``. + + To list all HBAs for each host system: + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_hbas my-vmware-config + + To list all HBAs for a specified host system: + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_hbas my-vmware-config host="hostSystemName" + + To list HBAs of specified type for each host system: + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_hbas my-vmware-config type="HBAType" + + To list HBAs of specified type for a specified host system: + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_hbas my-vmware-config host="hostSystemName" type="HBAtype" + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The list_hbas function must be called with ' + '-f or --function.' + ) + + ret = {} + hba_type = kwargs.get('type').lower() if kwargs and 'type' in kwargs else None + host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None + host_properties = [ + "name", + "config.storageDevice.hostBusAdapter" + ] + + if hba_type and hba_type not in ["parallel", "block", "iscsi", "fibre"]: + raise SaltCloudSystemExit( + 'Specified hba type {0} currently not supported.'.format(hba_type) + ) + + host_list = _get_mors_with_properties(vim.HostSystem, host_properties) + + for host in host_list: + ret[host['name']] = {} + for hba in host['config.storageDevice.hostBusAdapter']: + hba_spec = { + 'driver': hba.driver, + 'status': hba.status, + 'type': type(hba).__name__.rsplit(".", 1)[1] + } + if hba_type: + if isinstance(hba, _get_hba_type(hba_type)): + if hba.model in ret[host['name']]: + ret[host['name']][hba.model][hba.device] = hba_spec + else: + ret[host['name']][hba.model] = {hba.device: hba_spec} + else: + if hba.model in ret[host['name']]: + ret[host['name']][hba.model][hba.device] = hba_spec + else: + ret[host['name']][hba.model] = {hba.device: hba_spec} + if host['name'] == host_name: + return {'HBAs by Host': {host_name: ret[host_name]}} + + return {'HBAs by Host': ret} + + +def list_dvs(kwargs=None, call=None): + ''' + List all the distributed virtual switches for this VMware environment + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_dvs my-vmware-config + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The list_dvs function must be called with ' + '-f or --function.' + ) + + distributed_vswitches = [] + dvs_properties = ["name"] + + dvs_list = _get_mors_with_properties(vim.DistributedVirtualSwitch, dvs_properties) + + for dvs in dvs_list: + distributed_vswitches.append(dvs["name"]) + + return {'Distributed Virtual Switches': distributed_vswitches} + + +def list_vapps(kwargs=None, call=None): + ''' + List all the vApps for this VMware environment + + CLI Example: + + .. code-block:: bash + + salt-cloud -f list_vapps my-vmware-config + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The list_vapps function must be called with ' + '-f or --function.' + ) + + vapps = [] + vapp_properties = ["name"] + + vapp_list = _get_mors_with_properties(vim.VirtualApp, vapp_properties) + + for vapp in vapp_list: + vapps.append(vapp["name"]) + + return {'vApps': vapps} + + +def enter_maintenance_mode(kwargs=None, call=None): + ''' + To put the specified host system in maintenance mode in this VMware environment + + CLI Example: + + .. code-block:: bash + + salt-cloud -f enter_maintenance_mode my-vmware-config host="myHostSystemName" + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The enter_maintenance_mode function must be called with ' + '-f or --function.' + ) + + host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None + + host_ref = _get_mor_by_property(vim.HostSystem, host_name) + + if not host_name or not host_ref: + raise SaltCloudSystemExit( + 'You must specify a valid name of the host system.' + ) + + if host_ref.runtime.inMaintenanceMode: + return {host_name: 'already in maintenance mode'} + + try: + task = host_ref.EnterMaintenanceMode(timeout=0, evacuatePoweredOffVms=True) + _wait_for_task(task, host_name, "enter maintenance mode", 1) + except Exception as exc: + log.error( + 'Error while moving host system {0} in maintenance mode: {1}'.format( + host_name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return {host_name: 'failed to enter maintenance mode'} + + return {host_name: 'entered maintenance mode'} + + +def exit_maintenance_mode(kwargs=None, call=None): + ''' + To take the specified host system out of maintenance mode in this VMware environment + + CLI Example: + + .. code-block:: bash + + salt-cloud -f exit_maintenance_mode my-vmware-config host="myHostSystemName" + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The exit_maintenance_mode function must be called with ' + '-f or --function.' + ) + + host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None + + host_ref = _get_mor_by_property(vim.HostSystem, host_name) + + if not host_name or not host_ref: + raise SaltCloudSystemExit( + 'You must specify a valid name of the host system.' + ) + + if not host_ref.runtime.inMaintenanceMode: + return {host_name: 'already not in maintenance mode'} + + try: + task = host_ref.ExitMaintenanceMode(timeout=0) + _wait_for_task(task, host_name, "exit maintenance mode", 1) + except Exception as exc: + log.error( + 'Error while moving host system {0} out of maintenance mode: {1}'.format( + host_name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return {host_name: 'failed to exit maintenance mode'} + + return {host_name: 'exited maintenance mode'} + + +def create_folder(kwargs=None, call=None): + ''' + Create the specified folder path in this VMware environment + + .. note:: + + To create a Host and Cluster Folder under a Datacenter, specify + ``path="/yourDatacenterName/host/yourFolderName"`` + + To create a Network Folder under a Datacenter, specify + ``path="/yourDatacenterName/network/yourFolderName"`` + + To create a Storage Folder under a Datacenter, specify + ``path="/yourDatacenterName/datastore/yourFolderName"`` + + To create a VM and Template Folder under a Datacenter, specify + ``path="/yourDatacenterName/vm/yourFolderName"`` + + CLI Example: + + .. code-block:: bash + + salt-cloud -f create_folder my-vmware-config path="/Local/a/b/c" + salt-cloud -f create_folder my-vmware-config path="/MyDatacenter/vm/MyVMFolder" + salt-cloud -f create_folder my-vmware-config path="/MyDatacenter/host/MyHostFolder" + salt-cloud -f create_folder my-vmware-config path="/MyDatacenter/network/MyNetworkFolder" + salt-cloud -f create_folder my-vmware-config path="/MyDatacenter/storage/MyStorageFolder" + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The create_folder function must be called with ' + '-f or --function.' + ) + + # Get the service instance object + si = _get_si() + + folder_path = kwargs.get('path') if kwargs and 'path' in kwargs else None + + if not folder_path: + raise SaltCloudSystemExit( + 'You must specify a non empty folder path.' + ) + + folder_refs = [] + inventory_path = '/' + path_exists = True + + # Split the path in a list and loop over it to check for its existence + for index, folder_name in enumerate(os.path.normpath(folder_path.strip('/')).split('/')): + inventory_path = os.path.join(inventory_path, folder_name) + folder_ref = si.content.searchIndex.FindByInventoryPath(inventoryPath=inventory_path) + if isinstance(folder_ref, vim.Folder): + # This is a folder that exists so just append and skip it + log.debug("Path {0}/ exists in the inventory".format(inventory_path)) + folder_refs.append(folder_ref) + elif isinstance(folder_ref, vim.Datacenter): + # This is a datacenter that exists so just append and skip it + log.debug("Path {0}/ exists in the inventory".format(inventory_path)) + folder_refs.append(folder_ref) + else: + path_exists = False + if not folder_refs: + # If this is the first folder, create it under the rootFolder + log.debug("Creating folder {0} under rootFolder in the inventory".format(folder_name)) + folder_refs.append(si.content.rootFolder.CreateFolder(folder_name)) + else: + # Create the folder under the parent folder + log.debug("Creating path {0}/ in the inventory".format(inventory_path)) + folder_refs.append(folder_refs[index-1].CreateFolder(folder_name)) + + if path_exists: + return {inventory_path: 'specfied path already exists'} + else: + return {inventory_path: 'created the specified path'} + + +def create_snapshot(name, kwargs=None, call=None): + ''' + Create a snapshot of the specified virtual machine in this VMware + environment + + .. note:: + + If the VM is powered on, the internal state of the VM (memory + dump) is included in the snapshot by default which will also set + the power state of the snapshot to "powered on". You can set + ``memdump=False`` to override this. This field is ignored if + the virtual machine is powered off or if the VM does not support + snapshots with memory dumps. Default is ``memdump=True`` + + .. note:: + + If the VM is powered on when the snapshot is taken, VMware Tools + can be used to quiesce the file system in the virtual machine by + setting ``quiesce=True``. This field is ignored if the virtual + machine is powered off; if VMware Tools are not available or if + ``memdump=True``. Default is ``quiesce=False`` + + CLI Example: + + .. code-block:: bash + + salt-cloud -a create_snapshot vmname snapshot_name="mySnapshot" + salt-cloud -a create_snapshot vmname snapshot_name="mySnapshot" [description="My snapshot"] [memdump=False] [quiesce=True] + ''' + if call != 'action': + raise SaltCloudSystemExit( + 'The create_snapshot action must be called with ' + '-a or --action.' + ) + + snapshot_name = kwargs.get('snapshot_name') if kwargs and 'snapshot_name' in kwargs else None + + if not snapshot_name: + raise SaltCloudSystemExit( + 'You must specify snapshot name for the snapshot to be created.' + ) + + memdump = _str_to_bool(kwargs.get('memdump', True)) + quiesce = _str_to_bool(kwargs.get('quiesce', False)) + + vm_ref = _get_mor_by_property(vim.VirtualMachine, name) + + if vm_ref.summary.runtime.powerState != "poweredOn": + log.debug('VM {0} is not powered on. Setting both memdump and quiesce to False'.format(name)) + memdump = False + quiesce = False + + if memdump and quiesce: + # Either memdump or quiesce should be set to True + log.warning('You can only set either memdump or quiesce to True. Setting quiesce=False') + quiesce = False + + desc = kwargs.get('description') if 'description' in kwargs else '' + + try: + task = vm_ref.CreateSnapshot(snapshot_name, desc, memdump, quiesce) + _wait_for_task(task, name, "create snapshot", 5, 'info') + except Exception as exc: + log.error( + 'Error while creating snapshot of {0}: {1}'.format( + name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return 'failed to create snapshot' + + return {'Snapshot created successfully': _get_snapshots(vm_ref.snapshot.rootSnapshotList, vm_ref.snapshot.currentSnapshot)} + + +def revert_to_snapshot(name, kwargs=None, call=None): + ''' + Revert virtual machine to it's current snapshot. If no snapshot + exists, the state of the virtual machine remains unchanged + + .. note:: + + The virtual machine will be powered on if the power state of + the snapshot when it was created was set to "Powered On". Set + ``power_off=True`` so that the virtual machine stays powered + off regardless of the power state of the snapshot when it was + created. Default is ``power_off=False``. + + If the power state of the snapshot when it was created was + "Powered On" and if ``power_off=True``, the VM will be put in + suspended state after it has been reverted to the snapshot. + + CLI Example: + + .. code-block:: bash + + salt-cloud -a revert_to_snapshot vmame [power_off=True] + ''' + if call != 'action': + raise SaltCloudSystemExit( + 'The revert_to_snapshot action must be called with ' + '-a or --action.' + ) + + suppress_power_on = _str_to_bool(kwargs.get('power_off', False)) + + vm_ref = _get_mor_by_property(vim.VirtualMachine, name) + + if not vm_ref.rootSnapshot: + log.error('VM {0} does not contain any current snapshots'.format(name)) + return 'revert failed' + + try: + task = vm_ref.RevertToCurrentSnapshot(suppressPowerOn=suppress_power_on) + _wait_for_task(task, name, "revert to snapshot", 5, 'info') + + except Exception as exc: + log.error( + 'Error while reverting VM {0} to snapshot: {1}'.format( + name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return 'revert failed' + + return 'reverted to current snapshot' + + +def remove_all_snapshots(name, kwargs=None, call=None): + ''' + Remove all the snapshots present for the specified virtual machine. + + .. note:: + + All the snapshots higher up in the hierarchy of the current snapshot tree + are consolidated and their virtual disks are merged. To override this + behavior and only remove all snapshots, set ``merge_snapshots=False``. + Default is ``merge_snapshots=True`` + + CLI Example: + + .. code-block:: bash + + salt-cloud -a remove_all_snapshots vmname [merge_snapshots=False] + ''' + if call != 'action': + raise SaltCloudSystemExit( + 'The remove_all_snapshots action must be called with ' + '-a or --action.' + ) + + consolidate = _str_to_bool(kwargs.get('merge_snapshots')) if kwargs and 'merge_snapshots' in kwargs else True + + vm_ref = _get_mor_by_property(vim.VirtualMachine, name) + + try: + task = vm_ref.RemoveAllSnapshots() + _wait_for_task(task, name, "remove snapshots", 5, 'info') + except Exception as exc: + log.error( + 'Error while removing snapshots on VM {0}: {1}'.format( + name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return 'failed to remove snapshots' + + return 'removed all snapshots' + + +def add_host(kwargs=None, call=None): + ''' + Add a host system to the specified cluster or datacenter in this VMware environment + + .. note:: + + To use this function, you need to specify ``esxi_host_user`` and + ``esxi_host_password`` under your provider configuration set up at + ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/vmware.conf``: + + .. code-block:: yaml + + vmware-vcenter01: + provider: vmware + user: "DOMAIN\\user" + password: "verybadpass" + url: "vcenter01.domain.com" + + # Required when adding a host system + esxi_host_user: "root" + esxi_host_password: "myhostpassword" + # Optional fields that can be specified when adding a host system + esxi_host_ssl_thumbprint: "12:A3:45:B6:CD:7E:F8:90:A1:BC:23:45:D6:78:9E:FA:01:2B:34:CD" + + The SSL thumbprint of the host system can be optionally specified by setting + ``esxi_host_ssl_thumbprint`` under your provider configuration. To get the SSL + thumbprint of the host system, execute the following command from a remote + server: + + .. code-block:: bash + + echo -n | openssl s_client -connect :443 2>/dev/null | openssl x509 -noout -fingerprint -sha1 + + CLI Example: + + .. code-block:: bash + + salt-cloud -f add_host my-vmware-config host="myHostSystemName" cluster="myClusterName" + salt-cloud -f add_host my-vmware-config host="myHostSystemName" datacenter="myDatacenterName" + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The add_host function must be called with ' + '-f or --function.' + ) + + host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None + cluster_name = kwargs.get('cluster') if kwargs and 'cluster' in kwargs else None + datacenter_name = kwargs.get('datacenter') if kwargs and 'datacenter' in kwargs else None + + host_user = config.get_cloud_config_value( + 'esxi_host_user', get_configured_provider(), __opts__, search_global=False + ) + host_password = config.get_cloud_config_value( + 'esxi_host_password', get_configured_provider(), __opts__, search_global=False + ) + host_ssl_thumbprint = config.get_cloud_config_value( + 'esxi_host_ssl_thumbprint', get_configured_provider(), __opts__, search_global=False + ) + + if not host_user: + raise SaltCloudSystemExit( + 'You must specify the ESXi host username in your providers config.' + ) + + if not host_password: + raise SaltCloudSystemExit( + 'You must specify the ESXi host password in your providers config.' + ) + + if not host_name: + raise SaltCloudSystemExit( + 'You must specify either the IP or DNS name of the host system.' + ) + + if (cluster_name and datacenter_name) or not(cluster_name or datacenter_name): + raise SaltCloudSystemExit( + 'You must specify either the cluster name or the datacenter name.' + ) + + if cluster_name: + cluster_ref = _get_mor_by_property(vim.ClusterComputeResource, cluster_name) + if not cluster_ref: + raise SaltCloudSystemExit( + 'Specified cluster does not exist.' + ) + + if datacenter_name: + datacenter_ref = _get_mor_by_property(vim.Datacenter, datacenter_name) + if not datacenter_ref: + raise SaltCloudSystemExit( + 'Specified datacenter does not exist.' + ) + + spec = vim.host.ConnectSpec( + hostName=host_name, + userName=host_user, + password=host_password, + ) + + if host_ssl_thumbprint: + spec.sslThumbprint = host_ssl_thumbprint + else: + log.warning('SSL thumbprint has not been specified in provider configuration') + try: + log.debug('Trying to get the SSL thumbprint directly from the host system') + p1 = subprocess.Popen(('echo', '-n'), stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p2 = subprocess.Popen(('openssl', 's_client', '-connect', '{0}:443'.format(host_name)), stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p3 = subprocess.Popen(('openssl', 'x509', '-noout', '-fingerprint', '-sha1'), stdin=p2.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out = salt.utils.to_str(p3.stdout.read()) + ssl_thumbprint = out.split('=')[-1].strip() + log.debug('SSL thumbprint received from the host system: {0}'.format(ssl_thumbprint)) + spec.sslThumbprint = ssl_thumbprint + except Exception as exc: + log.error( + 'Error while trying to get SSL thumbprint of host {0}: {1}'.format( + host_name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return {host_name: 'failed to add host'} + + try: + if cluster_name: + task = cluster_ref.AddHost(spec=spec, asConnected=True) + ret = 'added host system to cluster {0}'.format(cluster_name) + if datacenter_name: + task = datacenter_ref.hostFolder.AddStandaloneHost(spec=spec, addConnected=True) + ret = 'added host system to datacenter {0}'.format(datacenter_name) + _wait_for_task(task, host_name, "add host system", 5, 'info') + except Exception as exc: + if isinstance(exc, vim.fault.SSLVerifyFault): + log.error('Authenticity of the host\'s SSL certificate is not verified') + log.info('Try again after setting the esxi_host_ssl_thumbprint to {0} in provider configuration'.format(exc.thumbprint)) + log.error( + 'Error while adding host {0}: {1}'.format( + host_name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return {host_name: 'failed to add host'} + + return {host_name: ret} + + +def remove_host(kwargs=None, call=None): + ''' + Remove the specified host system from this VMware environment + + CLI Example: + + .. code-block:: bash + + salt-cloud -f remove_host my-vmware-config host="myHostSystemName" + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The remove_host function must be called with ' + '-f or --function.' + ) + + host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None + + if not host_name: + raise SaltCloudSystemExit( + 'You must specify name of the host system.' + ) + + host_ref = _get_mor_by_property(vim.HostSystem, host_name) + if not host_ref: + raise SaltCloudSystemExit( + 'Specified host system does not exist.' + ) + + try: + if isinstance(host_ref.parent, vim.ComputeResource): + # This is a standalone host system + task = host_ref.parent.Destroy_Task() + else: + # This is a host system that is part of a Cluster + task = host_ref.Destroy_Task() + _wait_for_task(task, host_name, "remove host", 1, 'info') + except Exception as exc: + log.error( + 'Error while removing host {0}: {1}'.format( + host_name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return {host_name: 'failed to remove host'} + + return {host_name: 'removed host from vcenter'} + + +def connect_host(kwargs=None, call=None): + ''' + Connect the specified host system in this VMware environment + + CLI Example: + + .. code-block:: bash + + salt-cloud -f connect_host my-vmware-config host="myHostSystemName" + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The connect_host function must be called with ' + '-f or --function.' + ) + + host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None + + if not host_name: + raise SaltCloudSystemExit( + 'You must specify name of the host system.' + ) + + host_ref = _get_mor_by_property(vim.HostSystem, host_name) + if not host_ref: + raise SaltCloudSystemExit( + 'Specified host system does not exist.' + ) + + if host_ref.runtime.connectionState == 'connected': + return {host_name: 'host system already connected'} + + try: + task = host_ref.ReconnectHost_Task() + _wait_for_task(task, host_name, "connect host", 5, 'info') + except Exception as exc: + log.error( + 'Error while connecting host {0}: {1}'.format( + host_name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return {host_name: 'failed to connect host'} + + return {host_name: 'connected host'} + + +def disconnect_host(kwargs=None, call=None): + ''' + Disconnect the specified host system in this VMware environment + + CLI Example: + + .. code-block:: bash + + salt-cloud -f disconnect_host my-vmware-config host="myHostSystemName" + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The disconnect_host function must be called with ' + '-f or --function.' + ) + + host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None + + if not host_name: + raise SaltCloudSystemExit( + 'You must specify name of the host system.' + ) + + host_ref = _get_mor_by_property(vim.HostSystem, host_name) + if not host_ref: + raise SaltCloudSystemExit( + 'Specified host system does not exist.' + ) + + if host_ref.runtime.connectionState == 'disconnected': + return {host_name: 'host system already disconnected'} + + try: + task = host_ref.DisconnectHost_Task() + _wait_for_task(task, host_name, "disconnect host", 1, 'info') + except Exception as exc: + log.error( + 'Error while disconnecting host {0}: {1}'.format( + host_name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return {host_name: 'failed to disconnect host'} + + return {host_name: 'disconnected host'} + + +def reboot_host(kwargs=None, call=None): + ''' + Reboot the specified host system in this VMware environment + + .. note:: + + If the host system is not in maintenance mode, it will not be rebooted. If you + want to reboot the host system regardless of whether it is in maintenance mode, + set ``force=True``. Default is ``force=False``. + + CLI Example: + + .. code-block:: bash + + salt-cloud -f reboot_host my-vmware-config host="myHostSystemName" [force=True] + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The reboot_host function must be called with ' + '-f or --function.' + ) + + host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None + force = _str_to_bool(kwargs.get('force')) if kwargs and 'force' in kwargs else False + + if not host_name: + raise SaltCloudSystemExit( + 'You must specify name of the host system.' + ) + + host_ref = _get_mor_by_property(vim.HostSystem, host_name) + if not host_ref: + raise SaltCloudSystemExit( + 'Specified host system does not exist.' + ) + + if host_ref.runtime.connectionState == 'notResponding': + raise SaltCloudSystemExit( + 'Specified host system cannot be rebooted in it\'s current state (not responding).' + ) + + if not host_ref.capability.rebootSupported: + raise SaltCloudSystemExit( + 'Specified host system does not support reboot.' + ) + + if not host_ref.runtime.inMaintenanceMode: + raise SaltCloudSystemExit( + 'Specified host system is not in maintenance mode. Specify force=True to ' + 'force reboot even if there are virtual machines running or other operations ' + 'in progress.' + ) + + try: + host_ref.RebootHost_Task(force) + _wait_for_host(host_ref, "reboot", 10, 'info') + except Exception as exc: + log.error( + 'Error while rebooting host {0}: {1}'.format( + host_name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return {host_name: 'failed to reboot host'} + + return {host_name: 'rebooted host'} + + +def create_datastore_cluster(kwargs=None, call=None): + ''' + Create a new datastore cluster for the specified datacenter in this VMware environment + + CLI Example: + + .. code-block:: bash + + salt-cloud -f create_datastore_cluster my-vmware-config name="datastoreClusterName" datacenter="datacenterName" + ''' + if call != 'function': + raise SaltCloudSystemExit( + 'The create_datastore_cluster function must be called with ' + '-f or --function.' + ) + + datastore_cluster_name = kwargs.get('name') if kwargs and 'name' in kwargs else None + datacenter_name = kwargs.get('datacenter') if kwargs and 'datacenter' in kwargs else None + + if not datastore_cluster_name: + raise SaltCloudSystemExit( + 'You must specify name of the new datastore cluster to be created.' + ) + + if len(datastore_cluster_name) >= 80 or len(datastore_cluster_name) <= 0: + raise SaltCloudSystemExit( + 'The datastore cluster name must be a non empty string of less than 80 characters.' + ) + + if not datacenter_name: + raise SaltCloudSystemExit( + 'You must specify name of the datacenter where the datastore cluster should be created.' + ) + + # Check if datastore cluster already exists + datastore_cluster_ref = _get_mor_by_property(vim.StoragePod, datastore_cluster_name) + if datastore_cluster_ref: + return {datastore_cluster_name: 'datastore cluster already exists'} + + datacenter_ref = _get_mor_by_property(vim.Datacenter, datacenter_name) + if not datacenter_ref: + raise SaltCloudSystemExit( + 'The specified datacenter does not exist.' + ) + + try: + datacenter_ref.datastoreFolder.CreateStoragePod(name=datastore_cluster_name) + except Exception as exc: + log.error( + 'Error creating datastore cluster {0}: {1}'.format( + datastore_cluster_name, + exc + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + return False + + return {datastore_cluster_name: 'created'} diff --git a/salt/cloud/deploy/bootstrap-salt.sh b/salt/cloud/deploy/bootstrap-salt.sh index ebe6e264d7..7d872a28cf 100755 --- a/salt/cloud/deploy/bootstrap-salt.sh +++ b/salt/cloud/deploy/bootstrap-salt.sh @@ -17,7 +17,7 @@ # CREATED: 10/15/2012 09:49:37 PM WEST #====================================================================================================================== set -o nounset # Treat unset variables as an error -__ScriptVersion="2015.05.07" +__ScriptVersion="2015.07.17" __ScriptName="bootstrap-salt.sh" #====================================================================================================================== @@ -90,7 +90,7 @@ echoinfo() { #--- FUNCTION ------------------------------------------------------------------------------------------------------- # NAME: echowarn -# DESCRIPTION: Echo warning information to stdout. +# DESCRIPTION: Echo warning informations to stdout. #---------------------------------------------------------------------------------------------------------------------- echowarn() { printf "${YC} * WARN${EC}: %s\n" "$@"; @@ -211,6 +211,7 @@ _LIBCLOUD_MIN_VERSION="0.14.0" _PY_REQUESTS_MIN_VERSION="2.0" _EXTRA_PACKAGES="" _HTTP_PROXY="" +_DISABLE_SALT_CHECKS=$BS_FALSE __SALT_GIT_CHECKOUT_DIR=${BS_SALT_GIT_CHECKOUT_DIR:-/tmp/git/salt} @@ -277,6 +278,9 @@ usage() { -L Install the Apache Libcloud package if possible(required for salt-cloud) -p Extra-package to install while installing salt dependencies. One package per -p flag. You're responsible for providing the proper package name. + -d Disable check_service functions. Setting this flag disables the + 'install__check_services' checks. You can also do this by + touching /tmp/disable_salt_checks on the target host. Defaults \${BS_FALSE} -H Use the specified http proxy for the installation -Z Enable external software source for newer ZeroMQ(Only available for RHEL/CentOS/Fedora based distributions) @@ -284,7 +288,7 @@ EOT } # ---------- end of function usage ---------- -while getopts ":hvnDc:Gg:k:MSNXCPFUKIA:i:Lp:H:Z" opt +while getopts ":hvnDc:Gg:k:MSNXCPFUKIA:i:Lp:dH:Z" opt do case "${opt}" in @@ -333,6 +337,7 @@ do i ) _SALT_MINION_ID=$OPTARG ;; L ) _INSTALL_CLOUD=$BS_TRUE ;; p ) _EXTRA_PACKAGES="$_EXTRA_PACKAGES $OPTARG" ;; + d ) _DISABLE_SALT_CHECKS=$BS_TRUE ;; H ) _HTTP_PROXY="$OPTARG" ;; Z) _ENABLE_EXTERNAL_ZMQ_REPOS=$BS_TRUE ;; @@ -467,6 +472,12 @@ if [ "${CALLER}x" = "${0}x" ]; then CALLER="PIPED THROUGH" fi +# Work around for 'Docker + salt-bootstrap failure' https://github.com/saltstack/salt-bootstrap/issues/394 +if [ ${_DISABLE_SALT_CHECKS} -eq 0 ]; then + [ -f /tmp/disable_salt_checks ] && _DISABLE_SALT_CHECKS=$BS_TRUE && \ + echowarn "Found file: /tmp/disable_salt_checks, setting \$_DISABLE_SALT_CHECKS=true" +fi + echoinfo "${CALLER} ${0} -- Version ${__ScriptVersion}" #echowarn "Running the unstable version of ${__ScriptName}" @@ -1147,7 +1158,7 @@ fi if ([ "${DISTRO_NAME_L}" != "ubuntu" ] && [ "$ITYPE" = "daily" ]); then echoerror "${DISTRO_NAME} does not have daily packages support" exit 1 -elif ([ "${DISTRO_NAME_L}" != "ubuntu" ] && [ "$STABLE_REV" != "latest" ]); then +elif ([ "${DISTRO_NAME_L}" != "ubuntu" ] && [ "$ITYPE" = "stable" ] && [ "$STABLE_REV" != "latest" ]); then echoerror "${DISTRO_NAME} does not have major version pegged packages support" exit 1 fi @@ -1899,7 +1910,8 @@ install_ubuntu_daily() { install_ubuntu_git() { if [ -f "${__SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then - python setup.py install --install-layout=deb --salt-config-dir="$_SALT_ETC_DIR" || return 1 + python setup.py install --install-layout=deb --salt-config-dir="$_SALT_ETC_DIR" || \ + python setup.py --salt-config-dir="$_SALT_ETC_DIR" install --install-layout=deb || return 1 else python setup.py install --install-layout=deb || return 1 fi @@ -1915,7 +1927,7 @@ install_ubuntu_git_post() { [ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - if [ -f /bin/systemctl ]; then + if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 15 ]; then copyfile "${__SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" # Skip salt-api since the service should be opt-in and not necessarily started on boot @@ -1954,8 +1966,8 @@ install_ubuntu_git_post() { install_ubuntu_restart_daemons() { [ $_START_DAEMONS -eq $BS_FALSE ] && return - # Ensure upstart configs are loaded - if [ -f /bin/systemctl ]; then + # Ensure upstart configs / systemd units are loaded + if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 15 ]; then systemctl daemon-reload elif [ -f /sbin/initctl ]; then /sbin/initctl reload-configuration @@ -1970,7 +1982,7 @@ install_ubuntu_restart_daemons() { #[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - if [ -f /bin/systemctl ]; then + if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 15 ]; then echodebug "There's systemd support while checking salt-$fname" systemctl stop salt-$fname > /dev/null 2>&1 systemctl start salt-$fname.service @@ -2015,7 +2027,7 @@ install_ubuntu_check_services() { #[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - if [ -f /bin/systemctl ]; then + if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 15 ]; then __check_services_systemd salt-$fname || return 1 elif [ -f /sbin/initctl ] && [ -f /etc/init/salt-${fname}.conf ]; then __check_services_upstart salt-$fname || return 1 @@ -2137,7 +2149,7 @@ _eof # We NEED to install the unstable dpkg or mime-support WILL fail to install __apt_get_install_noinput -t unstable dpkg liblzma5 python mime-support || return 1 __apt_get_install_noinput -t unstable libzmq3 libzmq3-dev || return 1 - __apt_get_install_noinput build-essential python-dev python-pip || return 1 + __apt_get_install_noinput build-essential python-dev python-pip python-setuptools || return 1 # Saltstack's Unstable Debian repository if [ "$(grep -R 'debian.saltstack.com' /etc/apt)" = "" ]; then @@ -2179,6 +2191,14 @@ _eof __apt_get_install_noinput python-zmq || return 1 + if [ "$_PIP_ALLOWED" -eq $BS_TRUE ]; then + # Building pyzmq from source to build it against libzmq3. + # Should override current installation + # Using easy_install instead of pip because at least on Debian 6, + # there's no default virtualenv active. + easy_install -U pyzmq || return 1 + fi + if [ "${_EXTRA_PACKAGES}" != "" ]; then echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" # shellcheck disable=SC2086 @@ -2210,7 +2230,7 @@ install_debian_7_deps() { # Debian Backports if [ "$(grep -R 'wheezy-backports' /etc/apt | grep -v "^#")" = "" ]; then - echo "deb http://http.debian.net/debian wheezy-backports main" >> \ + echo "deb http://httpredir.debian.org/debian wheezy-backports main" >> \ /etc/apt/sources.list.d/backports.list fi @@ -2278,7 +2298,7 @@ install_debian_8_deps() { # Debian Backports if [ "$(grep -R 'jessie-backports' /etc/apt | grep -v "^#")" = "" ]; then - echo "deb http://http.debian.net/debian jessie-backports main" >> \ + echo "deb http://httpredir.debian.org/debian jessie-backports main" >> \ /etc/apt/sources.list.d/backports.list fi @@ -2380,7 +2400,7 @@ install_debian_6_git_deps() { install_debian_6_deps || return 1 if [ "$_PIP_ALLOWED" -eq $BS_TRUE ]; then __PACKAGES="build-essential lsb-release python python-dev python-pkg-resources python-crypto" - __PACKAGES="${__PACKAGES} python-m2crypto python-yaml msgpack-python python-pip" + __PACKAGES="${__PACKAGES} python-m2crypto python-yaml msgpack-python python-pip python-setuptools" if [ "$(which git)" = "" ]; then __PACKAGES="${__PACKAGES} git" @@ -2435,14 +2455,6 @@ __install_debian_stable() { # shellcheck disable=SC2086 __apt_get_install_noinput ${__PACKAGES} || return 1 - if [ "$_PIP_ALLOWED" -eq $BS_TRUE ]; then - # Building pyzmq from source to build it against libzmq3. - # Should override current installation - # Using easy_install instead of pip because at least on Debian 6, - # there's no default virtualenv active. - easy_install -U pyzmq || return 1 - fi - return 0 } @@ -2465,7 +2477,8 @@ install_debian_8_stable() { install_debian_git() { if [ -f "${__SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then - python setup.py install --install-layout=deb --salt-config-dir="$_SALT_ETC_DIR" || return 1 + python setup.py install --install-layout=deb --salt-config-dir="$_SALT_ETC_DIR" || \ + python setup.py --salt-config-dir="$_SALT_ETC_DIR" install --install-layout=deb || return 1 else python setup.py install --install-layout=deb || return 1 fi @@ -2509,6 +2522,8 @@ install_debian_git_post() { elif [ ! -f /etc/init.d/salt-$fname ] || ([ -f /etc/init.d/salt-$fname ] && [ $_FORCE_OVERWRITE -eq $BS_TRUE ]); then if [ -f "${__SALT_GIT_CHECKOUT_DIR}/debian/salt-$fname.init" ]; then copyfile "${__SALT_GIT_CHECKOUT_DIR}/debian/salt-$fname.init" "/etc/init.d/salt-$fname" + else + __fetch_url "/etc/init.d/salt-$fname" "http://anonscm.debian.org/cgit/pkg-salt/salt.git/plain/debian/salt-${fname}.init" fi if [ ! -f "/etc/init.d/salt-$fname" ]; then echowarn "The init script for salt-$fname was not found, skipping it..." @@ -2665,7 +2680,8 @@ install_fedora_git_deps() { install_fedora_git() { if [ -f "${__SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then - python setup.py install --salt-config-dir="$_SALT_ETC_DIR" || return 1 + python setup.py install --salt-config-dir="$_SALT_ETC_DIR" || \ + python setup.py --salt-config-dir="$_SALT_ETC_DIR" install || return 1 else python setup.py install || return 1 fi @@ -2989,7 +3005,8 @@ install_centos_git() { _PYEXE=python2 fi if [ -f "${__SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then - $_PYEXE setup.py install --salt-config-dir="$_SALT_ETC_DIR" || return 1 + $_PYEXE setup.py install --salt-config-dir="$_SALT_ETC_DIR" || \ + $_PYEXE setup.py --salt-config-dir="$_SALT_ETC_DIR" install || return 1 else $_PYEXE setup.py install || return 1 fi @@ -3137,11 +3154,17 @@ install_centos_check_services() { __test_rhel_optionals_packages() { __install_epel_repository || return 1 + # Make sure yum-utils is installed + yum list installed yum-utils > /dev/null 2>&1 || yum -y install yum-utils --enablerepo=${_EPEL_REPO} || return 1 + if [ "$DISTRO_MAJOR_VERSION" -ge 7 ]; then yum-config-manager --enable \*server-optional || return 1 fi if [ "$DISTRO_MAJOR_VERSION" -ge 6 ]; then + #python-jinja2 is in repo server-releases-optional in EC2/RHEL6 + yum-config-manager --enable rhui-\*-server-releases-optional || return 1 + # Let's enable package installation testing, kind of, --dry-run echoinfo "Testing if packages usually on the optionals repository are available:" __YUM_CONF_DIR="$(mktemp -d)" @@ -3746,7 +3769,8 @@ install_arch_linux_stable() { install_arch_linux_git() { if [ -f "${__SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then - python2 setup.py install --salt-config-dir="$_SALT_ETC_DIR" || return 1 + python2 setup.py install --salt-config-dir="$_SALT_ETC_DIR" || \ + python2 setup.py --salt-config-dir="$_SALT_ETC_DIR" install || return 1 else python2 setup.py install || return 1 fi @@ -4060,6 +4084,17 @@ install_freebsd_git() { --salt-base-master-roots-dir="${_SALT_ETC_DIR}/salt-master" \ --salt-logs-dir=/var/log/salt \ --salt-pidfile-dir=/var/run \ + || /usr/local/bin/python2 setup.py \ + --salt-root-dir=/usr/local \ + --salt-config-dir="${_SALT_ETC_DIR}" \ + --salt-cache-dir=/var/cache/salt \ + --salt-sock-dir=/var/run/salt \ + --salt-srv-root-dir=/srv \ + --salt-base-file-roots-dir="${_SALT_ETC_DIR}/states" \ + --salt-base-pillar-roots-dir="${_SALT_ETC_DIR}/pillar" \ + --salt-base-master-roots-dir="${_SALT_ETC_DIR}/salt-master" \ + --salt-logs-dir=/var/log/salt \ + --salt-pidfile-dir=/var/run install \ || return 1 fi @@ -4137,6 +4172,11 @@ install_freebsd_restart_daemons() { install_smartos_deps() { pkgin -y install zeromq py27-m2crypto py27-crypto py27-msgpack py27-yaml py27-jinja2 py27-zmq py27-requests || return 1 + # Set _SALT_ETC_DIR to SmartOS default if they didn't specify + _SALT_ETC_DIR=${BS_SALT_ETC_DIR:-/opt/local/etc/salt} + # We also need to redefine the PKI directory + _PKI_DIR=${_SALT_ETC_DIR}/pki + # Let's trigger config_salt() if [ "$_TEMP_CONFIG_DIR" = "null" ]; then # Let's set the configuration directory to /tmp @@ -4156,6 +4196,10 @@ install_smartos_deps() { fi fi + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + pkgin -y install py27-apache-libcloud || return 1 + fi + if [ "${_EXTRA_PACKAGES}" != "" ]; then echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" # shellcheck disable=SC2086 @@ -4169,8 +4213,9 @@ install_smartos_deps() { install_smartos_git_deps() { install_smartos_deps || return 1 - if [ "$(which git)" = "" ]; then - pkgin -y install scmgit || return 1 + which git > /dev/null 2>&1 + if [ $? -eq 1 ]; then + pkgin -y install git || return 1 fi if [ -f "${__SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then @@ -4202,7 +4247,9 @@ install_smartos_stable() { install_smartos_git() { # Use setuptools in order to also install dependencies - USE_SETUPTOOLS=1 /opt/local/bin/python setup.py install || return 1 + # lets force our config path on the setup for now, since salt/syspaths.py only got fixed in 2015.5.0 + USE_SETUPTOOLS=1 /opt/local/bin/python setup.py install --salt-config-dir="$_SALT_ETC_DIR" || \ + USE_SETUPTOOLS=1 /opt/local/bin/python setup.py --salt-config-dir="$_SALT_ETC_DIR" install || return 1 return 0 } @@ -4742,7 +4789,7 @@ __gentoo_config_protection() { # this point, manually merge the changes using etc-update/dispatch-conf/ # cfg-update and then restart the bootstrapping script, so instead we allow # at this point to modify certain config files directly - export CONFIG_PROTECT_MASK="$CONFIG_PROTECT_MASK /etc/portage/package.keywords /etc/portage/package.unmask /etc/portage/package.use /etc/portage/package.license" + export CONFIG_PROTECT_MASK="${CONFIG_PROTECT_MASK:-} /etc/portage/package.keywords /etc/portage/package.unmask /etc/portage/package.use /etc/portage/package.license" } __gentoo_pre_dep() { @@ -5166,12 +5213,17 @@ done echodebug "DAEMONS_RUNNING_FUNC=${DAEMONS_RUNNING_FUNC}" # Let's get the check services function -CHECK_SERVICES_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_check_services" -CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_check_services" -CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_check_services" -CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_check_services" -CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_check_services" -CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}_check_services" +if [ ${_DISABLE_SALT_CHECKS} -eq $BS_FALSE ]; then + CHECK_SERVICES_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}_check_services" +else + CHECK_SERVICES_FUNC_NAMES=False + echowarn "DISABLE_SALT_CHECKS set, not setting \$CHECK_SERVICES_FUNC_NAMES" +fi CHECK_SERVICES_FUNC="null" for FUNC_NAME in $(__strip_duplicates "$CHECK_SERVICES_FUNC_NAMES"); do diff --git a/salt/daemons/flo/core.py b/salt/daemons/flo/core.py index 61c6c3bb37..8341cb55ce 100644 --- a/salt/daemons/flo/core.py +++ b/salt/daemons/flo/core.py @@ -134,6 +134,8 @@ class SaltRaetRoadStackSetup(ioflo.base.deeding.Deed): RoadStack.Bk = raeting.BodyKind.msgpack.value RoadStack.JoinentTimeout = 0.0 + _prepare = postinitio + def action(self): ''' enter action @@ -237,6 +239,8 @@ class SaltRaetRoadStackJoiner(ioflo.base.deeding.Deed): self.masters = daemons.extract_masters(self.opts.value) # self.mha = (self.opts.value['master'], int(self.opts.value['master_port'])) + _prepare = postinitio + def action(self, **kwa): ''' Join with all masters @@ -499,6 +503,8 @@ class SaltLoadModules(ioflo.base.deeding.Deed): def postinitio(self): self._load_modules() + _prepare = postinitio + def action(self): self._load_modules() @@ -626,6 +632,8 @@ class SaltSchedule(ioflo.base.deeding.Deed): self.modules.value, self.returners.value) + _prepare = postinitio + def action(self): ''' Eval the schedule @@ -665,6 +673,8 @@ class SaltRaetManorLaneSetup(ioflo.base.deeding.Deed): ''' pass + _prepare = postinitio + def action(self): ''' Run once at enter @@ -1259,6 +1269,8 @@ class SaltRaetMasterEvents(ioflo.base.deeding.Deed): def postinitio(self): self.master_events.value = deque() + _prepare = postinitio + def action(self): if not self.master_events.value: return @@ -1308,6 +1320,8 @@ class SaltRaetThreadShellJobber(ioflo.base.deeding.Deed): def postinitio(self): self.threads.value = deque() + _prepare = postinitio + def action(self): ''' Evaluate the fun options and execute them via salt-call @@ -1383,6 +1397,8 @@ class SaltRaetNixJobber(ioflo.base.deeding.Deed): self.serial = salt.payload.Serial(self.opts) self.executors.value = {} + _prepare = postinitio + def _setup_jobber_stack(self): ''' Setup and return the LaneStack and Yard used by the jobber yard diff --git a/salt/fileclient.py b/salt/fileclient.py index 72f8e41109..30c6e33214 100644 --- a/salt/fileclient.py +++ b/salt/fileclient.py @@ -545,10 +545,6 @@ class Client(object): else: return '' elif not no_cache: - if salt.utils.is_windows(): - netloc = salt.utils.sanitize_win_path_string(url_data.netloc) - else: - netloc = url_data.netloc dest = self._extrn_path(url, saltenv) destdir = os.path.dirname(dest) if not os.path.isdir(destdir): @@ -695,12 +691,16 @@ class Client(object): Return the extn_filepath for a given url ''' url_data = urlparse(url) + if salt.utils.is_windows(): + netloc = salt.utils.sanitize_win_path_string(url_data.netloc) + else: + netloc = url_data.netloc return salt.utils.path_join( self.opts['cachedir'], 'extrn_files', saltenv, - url_data.netloc, + netloc, url_data.path ) diff --git a/salt/grains/core.py b/salt/grains/core.py index 736c740b50..fdf9df31c0 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -1039,32 +1039,33 @@ def os_data(): os.stat('/run/systemd/system') grains['init'] = 'systemd' except OSError: - with salt.utils.fopen('/proc/1/cmdline') as fhr: - init_cmdline = fhr.read().replace('\x00', ' ').split() - init_bin = salt.utils.which(init_cmdline[0]) - if init_bin: - supported_inits = ('upstart', 'sysvinit', 'systemd') - edge_len = max(len(x) for x in supported_inits) - 1 - buf_size = __opts__['file_buffer_size'] - try: - with open(init_bin, 'rb') as fp_: - buf = True - edge = '' - buf = fp_.read(buf_size).lower() - while buf: - buf = edge + buf - for item in supported_inits: - if item in buf: - grains['init'] = item - buf = '' - break - edge = buf[-edge_len:] + if os.path.exists('/proc/1/cmdline'): + with salt.utils.fopen('/proc/1/cmdline') as fhr: + init_cmdline = fhr.read().replace('\x00', ' ').split() + init_bin = salt.utils.which(init_cmdline[0]) + if init_bin: + supported_inits = ('upstart', 'sysvinit', 'systemd') + edge_len = max(len(x) for x in supported_inits) - 1 + buf_size = __opts__['file_buffer_size'] + try: + with open(init_bin, 'rb') as fp_: + buf = True + edge = '' buf = fp_.read(buf_size).lower() - except (IOError, OSError) as exc: - log.error( - 'Unable to read from init_bin ({0}): {1}' - .format(init_bin, exc) - ) + while buf: + buf = edge + buf + for item in supported_inits: + if item in buf: + grains['init'] = item + buf = '' + break + edge = buf[-edge_len:] + buf = fp_.read(buf_size).lower() + except (IOError, OSError) as exc: + log.error( + 'Unable to read from init_bin ({0}): {1}' + .format(init_bin, exc) + ) # Add lsb grains on any distro with lsb-release try: diff --git a/salt/modules/dig.py b/salt/modules/dig.py index c395717f9f..d9ce23b73b 100644 --- a/salt/modules/dig.py +++ b/salt/modules/dig.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- ''' -Compendium of generic DNS utilities +Compendium of generic DNS utilities. +The 'dig' command line tool must be installed in order to use this module. ''' from __future__ import absolute_import diff --git a/salt/modules/file.py b/salt/modules/file.py index 8ddb7bc0b4..5013f65044 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -3207,7 +3207,7 @@ def check_file_meta( if contents is not None: # Write a tempfile with the static contents tmp = salt.utils.mkstemp(text=True) - with salt.utils.fopen(tmp, 'w') as tmp_: + with salt.utils.fopen(tmp, 'wb') as tmp_: tmp_.write(str(contents)) # Compare the static contents with the named file with contextlib.nested( diff --git a/salt/modules/hipchat.py b/salt/modules/hipchat.py index f7a5302fd4..d6f2fb7a04 100644 --- a/salt/modules/hipchat.py +++ b/salt/modules/hipchat.py @@ -134,6 +134,9 @@ def _query(function, api_key=None, api_version=None, method='GET', data=None): elif api_version == 'v2': headers['Authorization'] = 'Bearer {0}'.format(api_key) data = json.dumps(data) + + if method == 'POST': + headers['Content-Type'] = 'application/json' else: log.error('Unsupported HipChat API version') return False diff --git a/salt/modules/lxc.py b/salt/modules/lxc.py index debc30cd11..df6f690530 100644 --- a/salt/modules/lxc.py +++ b/salt/modules/lxc.py @@ -2870,7 +2870,7 @@ def set_dns(name, dnsservers=None, searchdomains=None): name, 'sh -c "chmod +x {0};{0}"'.format(script), python_shell=True) # blindly delete the setter file run_all(name, - 'if [ -f "{0}" ];then rm -f "{0}";fi'.format(script), + 'sh -c \'if [ -f "{0}" ];then rm -f "{0}";fi\''.format(script), python_shell=True) if result['retcode'] != 0: error = ('Unable to write to /etc/resolv.conf in container \'{0}\'' @@ -2907,7 +2907,7 @@ def running_systemd(name, cache=True): '''\ #!/usr/bin/env bash set -x - if ! which systemctl 1>/dev/nulll 2>/dev/null;then exit 2;fi + if ! which systemctl 1>/dev/null 2>/dev/null;then exit 2;fi for i in \\ /run/systemd/journal/dev-log\\ /run/systemd/journal/flushed\\ @@ -3191,24 +3191,21 @@ def bootstrap(name, if install: rstr = __salt__['test.rand_str']() configdir = '/tmp/.c_{0}'.format(rstr) - run(name, - 'install -m 0700 -d {0}'.format(configdir), - python_shell=False) + + cmd = 'install -m 0700 -d {0}'.format(configdir) + if run(name, cmd, python_shell=False): + log.error('tmpdir {0} creation failed ({1}' + .format(configdir, cmd)) + return False + bs_ = __salt__['config.gather_bootstrap_script']( bootstrap=bootstrap_url) - dest_dir = os.path.join('/tmp', rstr) - for cmd in [ - 'mkdir -p {0}'.format(dest_dir), - 'chmod 700 {0}'.format(dest_dir), - ]: - if run_stdout(name, cmd): - log.error( - ('tmpdir {0} creation' - ' failed ({1}').format(dest_dir, cmd)) - return False - cp(name, - bs_, - '{0}/bootstrap.sh'.format(dest_dir)) + script = '/sbin/{0}_bootstrap.sh'.format(rstr) + cp(name, bs_, script) + result = run_all(name, + 'sh -c "chmod +x {0};{0}"'''.format(script), + python_shell=True) + cp(name, cfg_files['config'], os.path.join(configdir, 'minion')) cp(name, cfg_files['privkey'], @@ -3216,16 +3213,22 @@ def bootstrap(name, cp(name, cfg_files['pubkey'], os.path.join(configdir, 'minion.pub')) bootstrap_args = bootstrap_args.format(configdir) - cmd = ('{0} {2}/bootstrap.sh {1}' + cmd = ('{0} {2} {1}' .format(bootstrap_shell, bootstrap_args.replace("'", "''"), - dest_dir)) + script)) # log ASAP the forged bootstrap command which can be wrapped # out of the output in case of unexpected problem log.info('Running {0} in LXC container \'{1}\'' .format(cmd, name)) ret = retcode(name, cmd, output_loglevel='info', use_vt=True) == 0 + + run_all(name, + 'sh -c \'if [ -f "{0}" ];then rm -f "{0}";fi\'' + ''.format(script), + ignore_retcode=True, + python_shell=True) else: ret = False else: diff --git a/salt/modules/mount.py b/salt/modules/mount.py index d71bb3b6c5..42a2d088ca 100644 --- a/salt/modules/mount.py +++ b/salt/modules/mount.py @@ -379,6 +379,7 @@ def set_fstab( # Try to guess right criteria for auto.... # NOTE: missing some special fstypes here specialFSes = frozenset([ + 'none', 'tmpfs', 'sysfs', 'proc', diff --git a/salt/modules/rabbitmq.py b/salt/modules/rabbitmq.py index 91d2192112..8f3f256678 100644 --- a/salt/modules/rabbitmq.py +++ b/salt/modules/rabbitmq.py @@ -57,6 +57,27 @@ def _get_rabbitmq_plugin(): return rabbitmq +def _strip_listing_to_done(output_list): + '''Conditionally remove non-relevant first and last line, + "Listing ..." - "...done". + outputlist: rabbitmq command output split by newline + return value: list, conditionally modified, may be empty. + ''' + + # conditionally remove non-relevant first line + f_line = ''.join(output_list[:1]) + if f_line.startswith('Listing') and f_line.endswith('...'): + output_list.pop(0) + + # some versions of rabbitmq have no trailing '...done' line, + # which some versions do not output. + l_line = ''.join(output_list[-1:]) + if l_line == '...done': + output_list.pop() + + return output_list + + def _output_to_dict(cmdoutput, values_mapper=None): '''Convert rabbitmqctl output to a dict of data cmdoutput: string output of rabbitmqctl commands @@ -67,11 +88,11 @@ def _output_to_dict(cmdoutput, values_mapper=None): values_mapper = lambda string: string.split('\t') # remove first and last line: Listing ... - ...done - data_rows = cmdoutput.splitlines()[1:-1] + data_rows = _strip_listing_to_done(cmdoutput.splitlines()) + for row in data_rows: key, values = row.split('\t', 1) ret[key] = values_mapper(values) - return ret @@ -111,7 +132,7 @@ def list_vhosts(runas=None): runas=runas) # remove first and last line: Listing ... - ...done - return res.splitlines()[1:-1] + return _strip_listing_to_done(res.splitlines()) def user_exists(name, runas=None): diff --git a/salt/modules/rbenv.py b/salt/modules/rbenv.py index 35917af715..ac31f9103d 100644 --- a/salt/modules/rbenv.py +++ b/salt/modules/rbenv.py @@ -1,6 +1,11 @@ # -*- coding: utf-8 -*- ''' -Manage ruby installations with rbenv. +Manage ruby installations with rbenv. Rbenv is supported on Linux and Mac OS X. +Rbenv doesn't work on Windows (and isn't really necessary on Windows as there is +no system Ruby on Windows). On Windows, the RubyInstaller and/or Pik are both +good alternatives to work with multiple versions of Ruby on the same box. + +http://misheska.com/blog/2013/06/15/using-rbenv-to-manage-multiple-versions-of-ruby/ .. versionadded:: 0.16.0 ''' @@ -30,6 +35,15 @@ __opts__ = { } +def __virtual__(): + """ + Only work on POSIX-like systems + """ + if salt.utils.is_windows(): + return False + return True + + def _shlex_split(s): # from python:shlex.split: passing None for s will read # the string to split from standard input. diff --git a/salt/modules/schedule.py b/salt/modules/schedule.py index 912035297f..4bc6b2f21f 100644 --- a/salt/modules/schedule.py +++ b/salt/modules/schedule.py @@ -300,7 +300,7 @@ def add(name, **kwargs): salt '*' schedule.add job1 function='test.ping' seconds=3600 # If function have some arguments, use job_args - salt '*' schedule.add job2 function='cmd.run' job_args=['date >> /tmp/date.log'] seconds=60 + salt '*' schedule.add job2 function='cmd.run' job_args="['date >> /tmp/date.log']" seconds=60 ''' ret = {'comment': [], diff --git a/salt/modules/sysrc.py b/salt/modules/sysrc.py index b8954f48e9..e04d87dcea 100644 --- a/salt/modules/sysrc.py +++ b/salt/modules/sysrc.py @@ -40,6 +40,12 @@ def get(**kwargs): cmd = 'sysrc -v' + if 'file' in kwargs: + cmd += ' -f '+kwargs['file'] + + if 'jail' in kwargs: + cmd += ' -j '+kwargs['jail'] + if 'name' in kwargs: cmd += ' '+kwargs['name'] elif kwargs.get('includeDefaults', False): @@ -47,12 +53,6 @@ def get(**kwargs): else: cmd += ' -a' - if 'file' in kwargs: - cmd += ' -f '+kwargs['file'] - - if 'jail' in kwargs: - cmd += ' -j '+kwargs['jail'] - sysrcs = __salt__['cmd.run'](cmd) if "sysrc: unknown variable" in sysrcs: # raise CommandExecutionError(sysrcs) diff --git a/salt/modules/tls.py b/salt/modules/tls.py index 95a295caec..39bd01c4e2 100644 --- a/salt/modules/tls.py +++ b/salt/modules/tls.py @@ -4,7 +4,8 @@ A salt module for SSL/TLS. Can create a Certificate Authority (CA) or use Self-Signed certificates. -:depends: - PyOpenSSL Python module (0.14 or later) +:depends: - PyOpenSSL Python module (0.10 or later, 0.14 or later for + X509 extension support) :configuration: Add the following values in /etc/salt/minion for the CA module to function properly:: @@ -113,6 +114,7 @@ from distutils.version import LooseVersion import re HAS_SSL = False +X509_EXT_ENABLED = True try: import OpenSSL HAS_SSL = True @@ -133,9 +135,15 @@ def __virtual__(): ''' Only load this module if the ca config options are set ''' - if HAS_SSL and OpenSSL_version >= LooseVersion('0.14'): - if OpenSSL_version <= LooseVersion('0.15'): - log.warn('You should upgrade pyOpenSSL to at least 0.15.1') + global X509_EXT_ENABLED + if HAS_SSL and OpenSSL_version >= LooseVersion('0.10'): + if OpenSSL_version < LooseVersion('0.14'): + X509_EXT_ENABLED = False + log.error('You should upgrade pyOpenSSL to at least 0.14.1 ' + 'to enable the use of X509 extensions') + elif OpenSSL_version <= LooseVersion('0.15'): + log.warn('You should upgrade pyOpenSSL to at least 0.15.1 ' + 'to enable the full use of X509 extensions') # never EVER reactivate this code, this has been done too many times. # not having configured a cert path in the configuration does not # mean that users cant use this module as we provide methods @@ -147,9 +155,9 @@ def __virtual__(): # return False return True else: - return False, ['PyOpenSSL version 0.14 or later' - ' must be installed before ' - ' this module can be used.'] + X509_EXT_ENABLED = False + return False, ['PyOpenSSL version 0.10 or later must be installed ' + 'before this module can be used.'] def cert_base_path(cacert_path=None): @@ -686,20 +694,21 @@ def create_ca(ca_name, ca.set_issuer(ca.get_subject()) ca.set_pubkey(key) - ca.add_extensions([ - OpenSSL.crypto.X509Extension('basicConstraints', True, - 'CA:TRUE, pathlen:0'), - OpenSSL.crypto.X509Extension('keyUsage', True, - 'keyCertSign, cRLSign'), - OpenSSL.crypto.X509Extension('subjectKeyIdentifier', False, 'hash', - subject=ca)]) + if X509_EXT_ENABLED: + ca.add_extensions([ + OpenSSL.crypto.X509Extension('basicConstraints', True, + 'CA:TRUE, pathlen:0'), + OpenSSL.crypto.X509Extension('keyUsage', True, + 'keyCertSign, cRLSign'), + OpenSSL.crypto.X509Extension('subjectKeyIdentifier', False, + 'hash', subject=ca)]) - ca.add_extensions([ - OpenSSL.crypto.X509Extension( - 'authorityKeyIdentifier', - False, - 'issuer:always,keyid:always', - issuer=ca)]) + ca.add_extensions([ + OpenSSL.crypto.X509Extension( + 'authorityKeyIdentifier', + False, + 'issuer:always,keyid:always', + issuer=ca)]) ca.sign(key, digest) # alway backup existing keys in case @@ -754,6 +763,10 @@ def get_extensions(cert_type): ''' + assert X509_EXT_ENABLED, ('X509 extensions are not supported in ' + 'pyOpenSSL prior to version 0.15.1. Your ' + 'version: {0}'.format(OpenSSL_version)) + ext = {} if cert_type == '': log.error('cert_type set to empty in tls_ca.get_extensions(); ' @@ -974,21 +987,36 @@ def create_csr(ca_name, req.get_subject().CN = CN req.get_subject().emailAddress = emailAddress - extensions = get_extensions(cert_type)['csr'] - extension_adds = [] + try: + extensions = get_extensions(cert_type)['csr'] - for ext, value in extensions.items(): - extension_adds.append(OpenSSL.crypto.X509Extension(ext, False, value)) + extension_adds = [] + + for ext, value in extensions.items(): + extension_adds.append(OpenSSL.crypto.X509Extension(ext, False, + value)) + + except AssertionError as err: + log.error(err) + extensions = [] if subjectAltName: - if isinstance(subjectAltName, str): - subjectAltName = [subjectAltName] + if X509_EXT_ENABLED: + if isinstance(subjectAltName, str): + subjectAltName = [subjectAltName] - extension_adds.append( - OpenSSL.crypto.X509Extension( - 'subjectAltName', False, ", ".join(subjectAltName))) + extension_adds.append( + OpenSSL.crypto.X509Extension( + 'subjectAltName', False, ", ".join(subjectAltName))) + else: + raise ValueError('subjectAltName cannot be set as X509 ' + 'extensions are not supported in pyOpenSSL ' + 'prior to version 0.15.1. Your ' + 'version: {0}.'.format(OpenSSL_version)) + + if X509_EXT_ENABLED: + req.add_extensions(extension_adds) - req.add_extensions(extension_adds) req.set_pubkey(key) req.sign(key, digest) @@ -1344,8 +1372,6 @@ def create_ca_signed_cert(ca_name, exts = [] try: exts.extend(req.get_extensions()) - log.debug('req.get_extensions() supported in pyOpenSSL {0}'.format( - OpenSSL.__dict__.get('__version__', ''))) except AttributeError: try: # see: http://bazaar.launchpad.net/~exarkun/pyopenssl/master/revision/189 @@ -1353,9 +1379,9 @@ def create_ca_signed_cert(ca_name, # so we mimic the newly get_extensions method present in ultra # recent pyopenssl distros log.info('req.get_extensions() not supported in pyOpenSSL versions ' - 'prior to 0.15. Switching to Dark Magic(tm) ' + 'prior to 0.15. Processing extensions internally. ' ' Your version: {0}'.format( - OpenSSL.__dict__.get('__version__', 'pre-2014'))) + OpenSSL_version)) native_exts_obj = OpenSSL._util.lib.X509_REQ_get_extensions( req._req) @@ -1369,10 +1395,9 @@ def create_ca_signed_cert(ca_name, exts.append(ext) except Exception: log.error('X509 extensions are unsupported in pyOpenSSL ' - 'versions prior to 0.14. Upgrade required. Current ' - 'version: {0}'.format( - OpenSSL.__dict__.get('__version__', 'pre-2014')) - ) + 'versions prior to 0.14. Upgrade required to ' + 'use extensions. Current version: {0}'.format( + OpenSSL_version)) cert = OpenSSL.crypto.X509() cert.set_version(2) diff --git a/salt/modules/win_file.py b/salt/modules/win_file.py index 13bb774def..7b34b729d4 100644 --- a/salt/modules/win_file.py +++ b/salt/modules/win_file.py @@ -872,7 +872,8 @@ def stats(path, hash_type='md5', follow_symlinks=True): ret['ctime'] = pstat.st_ctime ret['size'] = pstat.st_size ret['mode'] = str(oct(stat.S_IMODE(pstat.st_mode))) - ret['sum'] = get_sum(path, hash_type) + if hash_type: + ret['sum'] = get_sum(path, hash_type) ret['type'] = 'file' if stat.S_ISDIR(pstat.st_mode): ret['type'] = 'dir' diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 6a43752ff3..606dfa0b92 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -643,7 +643,11 @@ def _get_name_map(): ''' Return a reverse map of full pkg names to the names recognized by winrepo. ''' - return get_repo_data().get('name_map', {}) + u_name_map = {} + name_map = get_repo_data().get('name_map', {}) + for k in name_map.keys(): + u_name_map[salt.utils.sdecode(k)] = name_map[k] + return u_name_map def _get_package_info(name): diff --git a/salt/netapi/rest_tornado/saltnado.py b/salt/netapi/rest_tornado/saltnado.py index 4f59a7ccbc..b8f092b297 100644 --- a/salt/netapi/rest_tornado/saltnado.py +++ b/salt/netapi/rest_tornado/saltnado.py @@ -216,6 +216,7 @@ class SaltClientsMixIn(object): 'local_batch': local_client.cmd_batch, 'local_async': local_client.run_job, 'runner': salt.runner.RunnerClient(opts=self.application.opts).async, + 'runner_async': None, # empty, since we use the same client as `runner` } return SaltClientsMixIn.__saltclients @@ -717,7 +718,7 @@ class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): # pylint: disable=W Content-Type: application/json Content-Legnth: 83 - {"clients": ["local", "local_batch", "local_async","runner"], "return": "Welcome"} + {"clients": ["local", "local_batch", "local_async", "runner", "runner_async"], "return": "Welcome"} ''' ret = {"clients": self.saltclients.keys(), "return": "Welcome"} @@ -1031,6 +1032,15 @@ class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): # pylint: disable=W except TimeoutException: raise tornado.gen.Return('Timeout waiting for runner to execute') + @tornado.gen.coroutine + def _disbatch_runner_async(self, chunk): + ''' + Disbatch runner client_async commands + ''' + f_call = {'args': [chunk['fun'], chunk]} + pub_data = self.saltclients['runner'](chunk['fun'], chunk) + raise tornado.gen.Return(pub_data) + class MinionSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223 ''' diff --git a/salt/pillar/s3.py b/salt/pillar/s3.py index 1c0921eaff..fb364faec3 100644 --- a/salt/pillar/s3.py +++ b/salt/pillar/s3.py @@ -109,7 +109,7 @@ def ext_pillar(minion_id, bucket, key, keyid, - verify_ssl, + verify_ssl=True, multiple_env=False, environment='base', prefix='', diff --git a/salt/states/dockerio.py b/salt/states/dockerio.py index 39eb768f55..c3afa47377 100644 --- a/salt/states/dockerio.py +++ b/salt/states/dockerio.py @@ -1011,7 +1011,7 @@ def running(name, image_exists = iinfos['status'] is_running = False if already_exists: - is_running = __salt__['docker.is_running'](container) + is_running = __salt__['docker.is_running'](name) # if container exists but is not started, try to start it if already_exists and (is_running or not start): return _valid(comment='container {0!r} already exists'.format(name)) diff --git a/salt/states/file.py b/salt/states/file.py index 1d2b58be37..8c619be38a 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -3606,11 +3606,11 @@ def copy( if force and os.path.isfile(name): hash1 = salt.utils.get_hash(name) hash2 = salt.utils.get_hash(source) - if hash1 != hash2: + if hash1 == hash2: changed = False if not force: changed = False - elif not __opts__['test']: + elif not __opts__['test'] and changed: # Remove the destination to prevent problems later try: if os.path.islink(name): diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index 340d7bcab5..bb29c683c5 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -1719,7 +1719,7 @@ def get_hash(path, form='md5', chunk_size=65536): ''' try: hash_type = getattr(hashlib, form) - except AttributeError: + except (AttributeError, TypeError): raise ValueError('Invalid hash type: {0}'.format(form)) with salt.utils.fopen(path, 'rb') as ifile: hash_obj = hash_type() diff --git a/salt/utils/aws.py b/salt/utils/aws.py index ac96a3cbfe..1d30d8476b 100644 --- a/salt/utils/aws.py +++ b/salt/utils/aws.py @@ -139,8 +139,9 @@ def sig2(method, endpoint, params, provider, aws_api_version): return params_with_headers -def sig4(method, endpoint, params, prov_dict, aws_api_version, location, - product='ec2', uri='/', requesturl=None): +def sig4(method, endpoint, params, prov_dict, + aws_api_version=DEFAULT_AWS_API_VERSION, location=DEFAULT_LOCATION, + product='ec2', uri='/', requesturl=None, data=''): ''' Sign a query against AWS services using Signature Version 4 Signing Process. This is documented at: @@ -155,7 +156,8 @@ def sig4(method, endpoint, params, prov_dict, aws_api_version, location, access_key_id, secret_access_key, token = creds(prov_dict) params_with_headers = params.copy() - params_with_headers['Version'] = aws_api_version + if product != 's3': + params_with_headers['Version'] = aws_api_version keys = sorted(params_with_headers.keys()) values = list(map(params_with_headers.get, keys)) querystring = urlencode(list(zip(keys, values))).replace('+', '%20') @@ -173,7 +175,7 @@ def sig4(method, endpoint, params, prov_dict, aws_api_version, location, # Create payload hash (hash of the request body content). For GET # requests, the payload is an empty string (''). - payload_hash = hashlib.sha256('').hexdigest() + payload_hash = hashlib.sha256(data).hexdigest() # Combine elements to create create canonical request canonical_request = '\n'.join(( @@ -223,7 +225,8 @@ def sig4(method, endpoint, params, prov_dict, aws_api_version, location, headers = { 'x-amz-date': amzdate, - 'Authorization': authorization_header + 'x-amz-content-sha256': payload_hash, + 'Authorization': authorization_header, } # Add in security token if we have one diff --git a/salt/utils/cloud.py b/salt/utils/cloud.py index fc90e57ee1..835adc17c7 100644 --- a/salt/utils/cloud.py +++ b/salt/utils/cloud.py @@ -423,6 +423,9 @@ def bootstrap(vm_, opts): 'win_installer', vm_, opts ) if win_installer: + deploy_kwargs['port'] = salt.config.get_cloud_config_value( + 'smb_port', vm_, opts, default=445 + ) deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(opts, vm_) deploy_kwargs['master'] = minion['master'] diff --git a/salt/utils/network.py b/salt/utils/network.py index 682fb62c0b..15c7d6713e 100644 --- a/salt/utils/network.py +++ b/salt/utils/network.py @@ -498,7 +498,7 @@ def _interfaces_ifconfig(out): else: pip = re.compile(r'.*?(?:inet addr:|inet )(.*?)\s') pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)') - pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+)).*') + pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+))(?: Scope:([a-zA-Z]+)| scopeid (0x[0-9a-fA-F]))?') pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))') pupdown = re.compile('UP') pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)') @@ -545,6 +545,9 @@ def _interfaces_ifconfig(out): mmask6 = pmask6.match(line) if mmask6: addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) + if not salt.utils.is_sunos(): + ipv6scope = mmask6.group(3) or mmask6.group(4) + addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope data['inet6'].append(addr_obj) data['up'] = updown if iface in ret: diff --git a/salt/utils/process.py b/salt/utils/process.py index da77bd4860..85a521291a 100644 --- a/salt/utils/process.py +++ b/salt/utils/process.py @@ -6,6 +6,7 @@ from __future__ import absolute_import import logging import os import time +import types import sys import multiprocessing import signal @@ -235,7 +236,20 @@ class ProcessManager(object): process = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs) process.start() - log.debug("Started '{0}' with pid {1}".format(tgt.__name__, process.pid)) + + # create a nicer name for the debug log + if isinstance(tgt, types.FunctionType): + name = '{0}.{1}'.format( + tgt.__module__, + tgt.__name__, + ) + else: + name = '{0}.{1}.{2}'.format( + tgt.__module__, + tgt.__class__, + tgt.__name__, + ) + log.debug("Started '{0}' with pid {1}".format(name, process.pid)) self._process_map[process.pid] = {'tgt': tgt, 'args': args, 'kwargs': kwargs, diff --git a/salt/utils/s3.py b/salt/utils/s3.py index 12da0dc05f..8a26c64d27 100644 --- a/salt/utils/s3.py +++ b/salt/utils/s3.py @@ -7,10 +7,6 @@ Connection library for Amazon S3 from __future__ import absolute_import # Import Python libs -import binascii -import datetime -import hashlib -import hmac import logging # Import 3rd-party libs @@ -19,21 +15,22 @@ try: HAS_REQUESTS = True # pylint: disable=W0612 except ImportError: HAS_REQUESTS = False # pylint: disable=W0612 -from salt.ext.six.moves.urllib.parse import urlencode # pylint: disable=no-name-in-module,import-error # Import Salt libs import salt.utils +import salt.utils.aws import salt.utils.xmlutil as xml import salt.utils.iam as iam from salt._compat import ElementTree as ET log = logging.getLogger(__name__) +DEFAULT_LOCATION = 'us-east-1' def query(key, keyid, method='GET', params=None, headers=None, requesturl=None, return_url=False, bucket=None, service_url=None, - path=None, return_bin=False, action=None, local_file=None, - verify_ssl=True): + path='', return_bin=False, action=None, local_file=None, + verify_ssl=True, location=DEFAULT_LOCATION): ''' Perform a query against an S3-like API. This function requires that a secret key and the id for that key are passed in. For instance: @@ -71,9 +68,6 @@ def query(key, keyid, method='GET', params=None, headers=None, if not params: params = {} - if path is None: - path = '' - if not service_url: service_url = 's3.amazonaws.com' @@ -90,75 +84,33 @@ def query(key, keyid, method='GET', params=None, headers=None, keyid = iam_creds['access_key'] token = iam_creds['security_token'] - if not requesturl: - x_amz_date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') - content_type = 'text/plain' - if method == 'GET': - if bucket: - can_resource = '/{0}/{1}'.format(bucket, path) - else: - can_resource = '/' - elif method == 'PUT' or method == 'HEAD' or method == 'DELETE': - if path: - can_resource = '/{0}/{1}'.format(bucket, path) - else: - can_resource = '/{0}/'.format(bucket) - - if action: - can_resource += '?{0}'.format(action) - - log.debug('CanonicalizedResource: {0}'.format(can_resource)) - - headers['Host'] = endpoint - headers['Content-type'] = content_type - headers['Date'] = x_amz_date - if token: - headers['x-amz-security-token'] = token - - string_to_sign = '{0}\n'.format(method) - - new_headers = [] - for header in sorted(headers): - if header.lower().startswith('x-amz'): - log.debug(header.lower()) - new_headers.append('{0}:{1}'.format(header.lower(), - headers[header])) - can_headers = '\n'.join(new_headers) - log.debug('CanonicalizedAmzHeaders: {0}'.format(can_headers)) - - string_to_sign += '\n{0}'.format(content_type) - string_to_sign += '\n{0}'.format(x_amz_date) - if can_headers: - string_to_sign += '\n{0}'.format(can_headers) - string_to_sign += '\n{0}'.format(can_resource) - log.debug('String To Sign:: \n{0}'.format(string_to_sign)) - - hashed = hmac.new(key, string_to_sign, hashlib.sha1) - sig = binascii.b2a_base64(hashed.digest()) - headers['Authorization'] = 'AWS {0}:{1}'.format(keyid, sig.strip()) - - querystring = urlencode(params) - if action: - if querystring: - querystring = '{0}&{1}'.format(action, querystring) - else: - querystring = action - requesturl = 'https://{0}/'.format(endpoint) - if path: - requesturl += path - if querystring: - requesturl += '?{0}'.format(querystring) - - data = None + data = '' if method == 'PUT': if local_file: with salt.utils.fopen(local_file, 'r') as ifile: data = ifile.read() + if not requesturl: + requesturl = 'https://{0}/{1}'.format(endpoint, path) + headers, requesturl = salt.utils.aws.sig4( + method, + endpoint, + params, + data=data, + uri='/{0}'.format(path), + prov_dict={'id': keyid, 'key': key}, + location=location, + product='s3', + requesturl=requesturl, + ) + log.debug('S3 Request: {0}'.format(requesturl)) log.debug('S3 Headers::') log.debug(' Authorization: {0}'.format(headers['Authorization'])) + if not data: + data = None + try: result = requests.request(method, requesturl, headers=headers, data=data, diff --git a/salt/utils/verify.py b/salt/utils/verify.py index f7dcb36a8c..7750a01744 100644 --- a/salt/utils/verify.py +++ b/salt/utils/verify.py @@ -203,7 +203,7 @@ def verify_env(dirs, user, permissive=False, pki_dir=''): err = ('Failed to prepare the Salt environment for user ' '{0}. The user is not available.\n').format(user) sys.stderr.write(err) - sys.exit(salt.defulats.exitcodes.EX_NOUSER) + sys.exit(salt.defaults.exitcodes.EX_NOUSER) for dir_ in dirs: if not dir_: continue diff --git a/tests/integration/netapi/rest_tornado/test_app.py b/tests/integration/netapi/rest_tornado/test_app.py index ff1f880fb1..d291d5035d 100644 --- a/tests/integration/netapi/rest_tornado/test_app.py +++ b/tests/integration/netapi/rest_tornado/test_app.py @@ -54,6 +54,7 @@ class TestSaltAPIHandler(SaltnadoTestCase): response_obj = json.loads(response.body) self.assertEqual(response_obj['clients'], ['runner', + 'runner_async', 'local_async', 'local', 'local_batch'] @@ -303,6 +304,25 @@ class TestSaltAPIHandler(SaltnadoTestCase): self.assertEqual(len(response_obj['return']), 1) self.assertEqual(set(response_obj['return'][0]), set(['minion', 'sub_minion'])) + # runner_async tests + def test_simple_local_runner_async_post(self): + low = [{'client': 'runner_async', + 'fun': 'manage.up', + }] + response = self.fetch('/', + method='POST', + body=json.dumps(low), + headers={'Content-Type': self.content_type_map['json'], + saltnado.AUTH_TOKEN_HEADER: self.token['token']}, + connect_timeout=10, + request_timeout=10, + ) + response_obj = json.loads(response.body) + self.assertIn('return', response_obj) + self.assertEqual(1, len(response_obj['return'])) + self.assertIn('jid', response_obj['return'][0]) + self.assertIn('tag', response_obj['return'][0]) + @skipIf(HAS_TORNADO is False, 'Tornado must be installed to run these tests') @skipIf(HAS_ZMQ_IOLOOP is False, 'PyZMQ version must be >= 14.0.1 to run these tests.') diff --git a/tests/integration/states/file.py b/tests/integration/states/file.py index c6dd5c3365..4f017fc51d 100644 --- a/tests/integration/states/file.py +++ b/tests/integration/states/file.py @@ -1056,6 +1056,17 @@ class FileTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn): 'file.append', name=name, text='cheese', makedirs=True ) self.assertSaltTrueReturn(ret) + finally: + if os.path.isfile(name): + os.remove(name) + + try: + # Parent directory exists but file does not and makedirs is False + ret = self.run_state( + 'file.append', name=name, text='cheese' + ) + self.assertSaltTrueReturn(ret) + self.assertTrue(os.path.isfile(name)) finally: shutil.rmtree( os.path.join(integration.TMP, 'issue_1864'), diff --git a/tests/unit/modules/tls_test.py b/tests/unit/modules/tls_test.py index 87ec4b6921..282632f201 100644 --- a/tests/unit/modules/tls_test.py +++ b/tests/unit/modules/tls_test.py @@ -11,6 +11,7 @@ NO_PYOPENSSL = False import shutil import tempfile import os +from distutils.version import LooseVersion try: # We're not going to actually use OpenSSL, we just want to check that # it's installed. @@ -642,6 +643,160 @@ class TLSAddTestCase(TestCase): if os.path.isdir(ca_path): shutil.rmtree(ca_path) + def test_pyOpenSSL_version(self): + ''' + Test extension logic with different pyOpenSSL versions + ''' + pillarval = {'csr': {'extendedKeyUsage': 'serverAuth'}} + mock_pgt = MagicMock(return_value=pillarval) + with patch.dict(tls.__dict__, { + 'OpenSSL_version': LooseVersion('0.1.1'), + 'X509_EXT_ENABLED': False}): + self.assertEqual(tls.__virtual__(), + (False, ['PyOpenSSL version 0.10 or later must be installed ' + 'before this module can be used.'])) + with patch.dict(tls.__salt__, {'pillar.get': mock_pgt}): + self.assertRaises(AssertionError, tls.get_extensions, 'server') + self.assertRaises(AssertionError, tls.get_extensions, 'client') + with patch.dict(tls.__dict__, { + 'OpenSSL_version': LooseVersion('0.14.1'), + 'X509_EXT_ENABLED': True}): + self.assertTrue(tls.__virtual__()) + with patch.dict(tls.__salt__, {'pillar.get': mock_pgt}): + self.assertEqual(tls.get_extensions('server'), pillarval) + self.assertEqual(tls.get_extensions('client'), pillarval) + with patch.dict(tls.__dict__, { + 'OpenSSL_version': LooseVersion('0.15.1'), + 'X509_EXT_ENABLED': True}): + self.assertTrue(tls.__virtual__()) + with patch.dict(tls.__salt__, {'pillar.get': mock_pgt}): + self.assertEqual(tls.get_extensions('server'), pillarval) + self.assertEqual(tls.get_extensions('client'), pillarval) + + @destructiveTest + def test_pyOpenSSL_version_destructive(self): + ''' + Test extension logic with different pyOpenSSL versions + ''' + pillarval = {'csr': {'extendedKeyUsage': 'serverAuth'}} + mock_pgt = MagicMock(return_value=pillarval) + ca_path = tempfile.mkdtemp(dir=integration.SYS_TMP_DIR) + ca_name = 'test_ca' + certp = '{0}/{1}/{2}_ca_cert.crt'.format( + ca_path, + ca_name, + ca_name) + certk = '{0}/{1}/{2}_ca_cert.key'.format( + ca_path, + ca_name, + ca_name) + ret = 'Created Private Key: "{0}." Created CA "{1}": "{2}."'.format( + certk, ca_name, certp) + mock_opt = MagicMock(return_value=ca_path) + mock_ret = MagicMock(return_value=0) + try: + with patch.dict(tls.__salt__, { + 'config.option': mock_opt, + 'cmd.retcode': mock_ret}): + with patch.dict(tls.__opts__, { + 'hash_type': 'sha256', + 'cachedir': ca_path}): + with patch.dict(_TLS_TEST_DATA['create_ca'], + {'replace': True}): + with patch.dict(tls.__dict__, { + 'OpenSSL_version': + LooseVersion('0.1.1'), + 'X509_EXT_ENABLED': False}): + self.assertEqual( + tls.create_ca( + ca_name, + days=365, + fixmode=False, + **_TLS_TEST_DATA['create_ca']), + ret) + with patch.dict(tls.__dict__, { + 'OpenSSL_version': + LooseVersion('0.14.1'), + 'X509_EXT_ENABLED': True}): + self.assertEqual( + tls.create_ca( + ca_name, + days=365, + fixmode=False, + **_TLS_TEST_DATA['create_ca']), + ret) + with patch.dict(tls.__dict__, { + 'OpenSSL_version': + LooseVersion('0.15.1'), + 'X509_EXT_ENABLED': True}): + self.assertEqual( + tls.create_ca( + ca_name, + days=365, + fixmode=False, + **_TLS_TEST_DATA['create_ca']), + ret) + finally: + if os.path.isdir(ca_path): + shutil.rmtree(ca_path) + + try: + certp = '{0}/{1}/certs/{2}.csr'.format( + ca_path, + ca_name, + _TLS_TEST_DATA['create_ca']['CN']) + certk = '{0}/{1}/certs/{2}.key'.format( + ca_path, + ca_name, + _TLS_TEST_DATA['create_ca']['CN']) + ret = ('Created Private Key: "{0}." ' + 'Created CSR for "{1}": "{2}."').format( + certk, _TLS_TEST_DATA['create_ca']['CN'], certp) + with patch.dict(tls.__salt__, { + 'config.option': mock_opt, + 'cmd.retcode': mock_ret, + 'pillar.get': mock_pgt}): + with patch.dict(tls.__opts__, {'hash_type': 'sha256', + 'cachedir': ca_path}): + with patch.dict(_TLS_TEST_DATA['create_ca'], { + 'subjectAltName': 'DNS:foo.bar', + 'replace': True}): + with patch.dict(tls.__dict__, { + 'OpenSSL_version': + LooseVersion('0.1.1'), + 'X509_EXT_ENABLED': False}): + tls.create_ca(ca_name) + tls.create_csr(ca_name) + self.assertRaises(ValueError, + tls.create_csr, + ca_name, + **_TLS_TEST_DATA['create_ca']) + with patch.dict(tls.__dict__, { + 'OpenSSL_version': + LooseVersion('0.14.1'), + 'X509_EXT_ENABLED': True}): + tls.create_ca(ca_name) + tls.create_csr(ca_name) + self.assertEqual( + tls.create_csr( + ca_name, + **_TLS_TEST_DATA['create_ca']), + ret) + with patch.dict(tls.__dict__, { + 'OpenSSL_version': + LooseVersion('0.15.1'), + 'X509_EXT_ENABLED': True}): + tls.create_ca(ca_name) + tls.create_csr(ca_name) + self.assertEqual( + tls.create_csr( + ca_name, + **_TLS_TEST_DATA['create_ca']), + ret) + finally: + if os.path.isdir(ca_path): + shutil.rmtree(ca_path) + if __name__ == '__main__': from integration import run_tests run_tests(TLSAddTestCase, needs_daemon=False) diff --git a/tests/unit/utils/network.py b/tests/unit/utils/network.py index 1c5f0f4f87..a77a24772f 100644 --- a/tests/unit/utils/network.py +++ b/tests/unit/utils/network.py @@ -101,12 +101,14 @@ class NetworkTestCase(TestCase): 'broadcast': '10.10.10.255', 'netmask': '255.255.252.0'}], 'inet6': [{'address': 'fe80::e23f:49ff:fe85:6aaf', - 'prefixlen': '64'}], + 'prefixlen': '64', + 'scope': 'link'}], 'up': True}, 'lo': {'inet': [{'address': '127.0.0.1', 'netmask': '255.0.0.0'}], 'inet6': [{'address': '::1', - 'prefixlen': '128'}], + 'prefixlen': '128', + 'scope': 'host'}], 'up': True}} ) @@ -127,9 +129,11 @@ class NetworkTestCase(TestCase): 'lo0': {'inet': [{'address': '127.0.0.1', 'netmask': '255.0.0.0'}], 'inet6': [{'address': 'fe80::1', - 'prefixlen': '64'}, + 'prefixlen': '64', + 'scope': '0x8'}, {'address': '::1', - 'prefixlen': '128'}], + 'prefixlen': '128', + 'scope': None}], 'up': True}, 'plip0': {'up': False}, 'tun0': {'inet': [{'address': '10.12.0.1',