From 893dc95e59cd1e13c74c04dd1b617eb95dc150bb Mon Sep 17 00:00:00 2001 From: root Date: Tue, 2 Feb 2016 07:12:31 +0000 Subject: [PATCH 01/65] added suport for static ip configuration for windows vm --- salt/cloud/clouds/vmware.py | 86 +++++++++++++++++++++++++++++++++---- 1 file changed, 78 insertions(+), 8 deletions(-) diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py index 928dfc6f39..f700a4007a 100644 --- a/salt/cloud/clouds/vmware.py +++ b/salt/cloud/clouds/vmware.py @@ -164,7 +164,6 @@ log = logging.getLogger(__name__) __virtualname__ = 'vmware' - # Only load in this module if the VMware configurations are in place def __virtual__(): ''' @@ -769,6 +768,51 @@ def _wait_for_vmware_tools(vm_ref, max_wait): log.warning("[ {0} ] Timeout Reached. VMware tools still not running after waiting for {1} seconds".format(vm_ref.name, max_wait)) return False +def valid_ip(ip_address): + ''' + Check if the IP address is valid + + Return either True or False + ''' + + # Make sure IP has four octets + octets = ip_address.split('.') + if len(octets) != 4: + return False + + # convert octet from string to int + for i, octet in enumerate(octets): + + try: + octets[i] = int(octet) + except ValueError: + # couldn't convert octet to an integer + return False + + + # map variables to elements of octets list + first_octet, second_octet, third_octet, fourth_octet = octets + + # Check first_octet meets conditions + if first_octet < 1: + return False + elif first_octet > 223: + return False + elif first_octet == 127: + return False + + # Check 169.254.X.X condition + if first_octet == 169 and second_octet == 254: + return False + + # Check 2nd - 4th octets + for octet in (second_octet, third_octet, fourth_octet): + if (octet < 0) or (octet > 255): + return False + + + # Passed all of the checks + return True def _wait_for_ip(vm_ref, max_wait): max_wait_vmware_tools = max_wait @@ -776,23 +820,22 @@ def _wait_for_ip(vm_ref, max_wait): vmware_tools_status = _wait_for_vmware_tools(vm_ref, max_wait_vmware_tools) if not vmware_tools_status: return False - time_counter = 0 starttime = time.time() while time_counter < max_wait_ip: if time_counter % 5 == 0: log.info("[ {0} ] Waiting to retrieve IPv4 information [{1} s]".format(vm_ref.name, time_counter)) - if vm_ref.summary.guest.ipAddress and match(IP_RE, vm_ref.summary.guest.ipAddress) and vm_ref.summary.guest.ipAddress != '127.0.0.1': - log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter)) + if vm_ref.summary.guest.ipAddress and valid_ip(vm_ref.summary.guest.ipAddress): + log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter)) return vm_ref.summary.guest.ipAddress - for net in vm_ref.guest.net: + for net in vm_ref.guest.net: if net.ipConfig.ipAddress: for current_ip in net.ipConfig.ipAddress: - if match(IP_RE, current_ip.ipAddress) and current_ip.ipAddress != '127.0.0.1': - log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter)) - return current_ip.ipAddress + if valid_ip( current_ip.ipAddress): + log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter)) + return current_ip.ipAddress time.sleep(1.0 - ((time.time() - starttime) % 1.0)) time_counter += 1 log.warning("[ {0} ] Timeout Reached. Unable to retrieve IPv4 information after waiting for {1} seconds".format(vm_ref.name, max_wait_ip)) @@ -2350,6 +2393,33 @@ def create(vm_): ) clone_spec.customization = custom_spec + if customization and (devices and 'network' in list(devices.keys())) and 'Windows' in object_ref.config.guestFullName: + global_ip = vim.vm.customization.GlobalIPSettings() + + if 'dns_servers' in list(vm_.keys()): + global_ip.dnsServerList = vm_['dns_servers'] + + identity = vim.vm.customization.Sysprep() + identity.guiUnattended = vim.vm.customization.GuiUnattended() + identity.guiUnattended.autoLogon = False + identity.guiUnattended.password = vim.vm.customization.Password() + identity.guiUnattended.password.value = vm_['win_password'] + identity.guiUnattended.password.plainText = True + identity.userData = vim.vm.customization.UserData() + hostName = vm_name.split('.')[0] + identity.userData.fullName = host if host!= None else hostName + identity.userData.orgName = "Organization-Name" + identity.userData.computerName = vim.vm.customization.FixedName() + identity.userData.computerName.name = domain + identity.identification = vim.vm.customization.Identification() + + custom_spec = vim.vm.customization.Specification( + globalIPSettings=global_ip, + identity=identity, + nicSettingMap=specs['nics_map'] + ) + clone_spec.customization = custom_spec + if not template: clone_spec.powerOn = power From 33c2f6d48b7e7bb5e22bbf9ee995811e27aa7ece Mon Sep 17 00:00:00 2001 From: Anand Nevase Date: Wed, 3 Feb 2016 15:26:22 +0530 Subject: [PATCH 02/65] Update hostname as vmname for windows vm --- salt/cloud/clouds/vmware.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py index f700a4007a..7f4452c63d 100644 --- a/salt/cloud/clouds/vmware.py +++ b/salt/cloud/clouds/vmware.py @@ -2407,7 +2407,7 @@ def create(vm_): identity.guiUnattended.password.plainText = True identity.userData = vim.vm.customization.UserData() hostName = vm_name.split('.')[0] - identity.userData.fullName = host if host!= None else hostName + identity.userData.fullName = hostName identity.userData.orgName = "Organization-Name" identity.userData.computerName = vim.vm.customization.FixedName() identity.userData.computerName.name = domain From 81ea2531c941dfb4d1b0b7323daa11295c17ba42 Mon Sep 17 00:00:00 2001 From: Anand Nevase Date: Thu, 4 Feb 2016 11:18:38 +0530 Subject: [PATCH 03/65] Update line indentation --- salt/cloud/clouds/vmware.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py index 7f4452c63d..29b4799770 100644 --- a/salt/cloud/clouds/vmware.py +++ b/salt/cloud/clouds/vmware.py @@ -768,10 +768,9 @@ def _wait_for_vmware_tools(vm_ref, max_wait): log.warning("[ {0} ] Timeout Reached. VMware tools still not running after waiting for {1} seconds".format(vm_ref.name, max_wait)) return False -def valid_ip(ip_address): +def _valid_ip(ip_address): ''' Check if the IP address is valid - Return either True or False ''' @@ -820,22 +819,23 @@ def _wait_for_ip(vm_ref, max_wait): vmware_tools_status = _wait_for_vmware_tools(vm_ref, max_wait_vmware_tools) if not vmware_tools_status: return False + time_counter = 0 starttime = time.time() while time_counter < max_wait_ip: if time_counter % 5 == 0: log.info("[ {0} ] Waiting to retrieve IPv4 information [{1} s]".format(vm_ref.name, time_counter)) - if vm_ref.summary.guest.ipAddress and valid_ip(vm_ref.summary.guest.ipAddress): - log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter)) + if vm_ref.summary.guest.ipAddress and _valid_ip(vm_ref.summary.guest.ipAddress): + log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter)) return vm_ref.summary.guest.ipAddress - - for net in vm_ref.guest.net: + + for net in vm_ref.guest.net: if net.ipConfig.ipAddress: for current_ip in net.ipConfig.ipAddress: - if valid_ip( current_ip.ipAddress): - log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter)) - return current_ip.ipAddress + if _valid_ip( current_ip.ipAddress): + log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter)) + return current_ip.ipAddress time.sleep(1.0 - ((time.time() - starttime) % 1.0)) time_counter += 1 log.warning("[ {0} ] Timeout Reached. Unable to retrieve IPv4 information after waiting for {1} seconds".format(vm_ref.name, max_wait_ip)) From dc27f79093ff3839fa3e65b747d5840d3f279bd0 Mon Sep 17 00:00:00 2001 From: Anand Nevase Date: Thu, 4 Feb 2016 11:45:46 +0530 Subject: [PATCH 04/65] fixed line indentation --- salt/cloud/clouds/vmware.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py index 29b4799770..1aef08dc27 100644 --- a/salt/cloud/clouds/vmware.py +++ b/salt/cloud/clouds/vmware.py @@ -788,7 +788,6 @@ def _valid_ip(ip_address): # couldn't convert octet to an integer return False - # map variables to elements of octets list first_octet, second_octet, third_octet, fourth_octet = octets @@ -808,8 +807,6 @@ def _valid_ip(ip_address): for octet in (second_octet, third_octet, fourth_octet): if (octet < 0) or (octet > 255): return False - - # Passed all of the checks return True @@ -826,11 +823,11 @@ def _wait_for_ip(vm_ref, max_wait): if time_counter % 5 == 0: log.info("[ {0} ] Waiting to retrieve IPv4 information [{1} s]".format(vm_ref.name, time_counter)) - if vm_ref.summary.guest.ipAddress and _valid_ip(vm_ref.summary.guest.ipAddress): - log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter)) + if vm_ref.summary.guest.ipAddress and _valid_ip(vm_ref.summary.guest.ipAddress): + log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter)) return vm_ref.summary.guest.ipAddress - - for net in vm_ref.guest.net: + + for net in vm_ref.guest.net: if net.ipConfig.ipAddress: for current_ip in net.ipConfig.ipAddress: if _valid_ip( current_ip.ipAddress): From 43d379f80ed82de6bc0985c1efbd092c870aada8 Mon Sep 17 00:00:00 2001 From: Anand Nevase Date: Thu, 4 Feb 2016 15:20:16 +0530 Subject: [PATCH 05/65] Fixed spacing --- salt/cloud/clouds/vmware.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py index 1aef08dc27..fe42d4d2f2 100644 --- a/salt/cloud/clouds/vmware.py +++ b/salt/cloud/clouds/vmware.py @@ -830,7 +830,7 @@ def _wait_for_ip(vm_ref, max_wait): for net in vm_ref.guest.net: if net.ipConfig.ipAddress: for current_ip in net.ipConfig.ipAddress: - if _valid_ip( current_ip.ipAddress): + if _valid_ip(current_ip.ipAddress): log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter)) return current_ip.ipAddress time.sleep(1.0 - ((time.time() - starttime) % 1.0)) @@ -2399,7 +2399,7 @@ def create(vm_): identity = vim.vm.customization.Sysprep() identity.guiUnattended = vim.vm.customization.GuiUnattended() identity.guiUnattended.autoLogon = False - identity.guiUnattended.password = vim.vm.customization.Password() + identity.guiUnattended.password = vim.vm.customization.Password() identity.guiUnattended.password.value = vm_['win_password'] identity.guiUnattended.password.plainText = True identity.userData = vim.vm.customization.UserData() From e8fcb902214a379829a05d9afa13532d8ca66095 Mon Sep 17 00:00:00 2001 From: Anand Nevase Date: Fri, 5 Feb 2016 10:52:07 +0530 Subject: [PATCH 06/65] Fixed lint errors --- salt/cloud/clouds/vmware.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py index fe42d4d2f2..c7a2887523 100644 --- a/salt/cloud/clouds/vmware.py +++ b/salt/cloud/clouds/vmware.py @@ -164,6 +164,7 @@ log = logging.getLogger(__name__) __virtualname__ = 'vmware' + # Only load in this module if the VMware configurations are in place def __virtual__(): ''' @@ -768,6 +769,7 @@ def _wait_for_vmware_tools(vm_ref, max_wait): log.warning("[ {0} ] Timeout Reached. VMware tools still not running after waiting for {1} seconds".format(vm_ref.name, max_wait)) return False + def _valid_ip(ip_address): ''' Check if the IP address is valid @@ -810,6 +812,7 @@ def _valid_ip(ip_address): # Passed all of the checks return True + def _wait_for_ip(vm_ref, max_wait): max_wait_vmware_tools = max_wait max_wait_ip = max_wait From dfcb61c49de1b494ae3d456f58843ff67d5972da Mon Sep 17 00:00:00 2001 From: Anand Nevase Date: Mon, 15 Feb 2016 15:15:55 +0530 Subject: [PATCH 07/65] Fixed formating using autopep8 --- salt/cloud/clouds/vmware.py | 1020 ++++++++++++++++++++++++----------- 1 file changed, 693 insertions(+), 327 deletions(-) diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py index c7a2887523..86e4add68e 100644 --- a/salt/cloud/clouds/vmware.py +++ b/salt/cloud/clouds/vmware.py @@ -247,11 +247,17 @@ def _get_si(): 'password', get_configured_provider(), __opts__, search_global=False ) protocol = config.get_cloud_config_value( - 'protocol', get_configured_provider(), __opts__, search_global=False, default='https' - ) + 'protocol', + get_configured_provider(), + __opts__, + search_global=False, + default='https') port = config.get_cloud_config_value( - 'port', get_configured_provider(), __opts__, search_global=False, default=443 - ) + 'port', + get_configured_provider(), + __opts__, + search_global=False, + default=443) return salt.utils.vmware.get_service_instance(url, username, @@ -269,7 +275,12 @@ def _edit_existing_hard_disk_helper(disk, size_kb): return disk_spec -def _add_new_hard_disk_helper(disk_label, size_gb, unit_number, controller_key=1000, thin_provision=False): +def _add_new_hard_disk_helper( + disk_label, + size_gb, + unit_number, + controller_key=1000, + thin_provision=False): random_key = randint(-2099, -2000) size_kb = int(size_gb * 1024.0 * 1024.0) @@ -293,20 +304,36 @@ def _add_new_hard_disk_helper(disk_label, size_gb, unit_number, controller_key=1 return disk_spec -def _edit_existing_network_adapter(network_adapter, new_network_name, adapter_type, switch_type, container_ref=None): +def _edit_existing_network_adapter( + network_adapter, + new_network_name, + adapter_type, + switch_type, + container_ref=None): adapter_type.strip().lower() switch_type.strip().lower() if adapter_type in ["vmxnet", "vmxnet2", "vmxnet3", "e1000", "e1000e"]: - edited_network_adapter = salt.utils.vmware.get_network_adapter_type(adapter_type) + edited_network_adapter = salt.utils.vmware.get_network_adapter_type( + adapter_type) if isinstance(network_adapter, type(edited_network_adapter)): edited_network_adapter = network_adapter else: - log.debug("Changing type of '{0}' from '{1}' to '{2}'".format(network_adapter.deviceInfo.label, type(network_adapter).__name__.rsplit(".", 1)[1][7:].lower(), adapter_type)) + log.debug( + "Changing type of '{0}' from '{1}' to '{2}'".format( + network_adapter.deviceInfo.label, + type(network_adapter).__name__.rsplit( + ".", + 1)[1][ + 7:].lower(), + adapter_type)) else: # If type not specified or does not match, don't change adapter type if adapter_type: - log.error("Cannot change type of '{0}' to '{1}'. Not changing type".format(network_adapter.deviceInfo.label, adapter_type)) + log.error( + "Cannot change type of '{0}' to '{1}'. Not changing type".format( + network_adapter.deviceInfo.label, + adapter_type)) edited_network_adapter = network_adapter if switch_type == 'standard': @@ -335,9 +362,11 @@ def _edit_existing_network_adapter(network_adapter, new_network_name, adapter_ty else: # If switch type not specified or does not match, show error and return if not switch_type: - err_msg = "The switch type to be used by '{0}' has not been specified".format(network_adapter.deviceInfo.label) + err_msg = "The switch type to be used by '{0}' has not been specified".format( + network_adapter.deviceInfo.label) else: - err_msg = "Cannot create '{0}'. Invalid/unsupported switch type '{1}'".format(network_adapter.deviceInfo.label, switch_type) + err_msg = "Cannot create '{0}'. Invalid/unsupported switch type '{1}'".format( + network_adapter.deviceInfo.label, switch_type) raise SaltCloudSystemExit(err_msg) edited_network_adapter.key = network_adapter.key @@ -357,7 +386,12 @@ def _edit_existing_network_adapter(network_adapter, new_network_name, adapter_ty return network_spec -def _add_new_network_adapter_helper(network_adapter_label, network_name, adapter_type, switch_type, container_ref=None): +def _add_new_network_adapter_helper( + network_adapter_label, + network_name, + adapter_type, + switch_type, + container_ref=None): random_key = randint(-4099, -4000) adapter_type.strip().lower() @@ -365,13 +399,18 @@ def _add_new_network_adapter_helper(network_adapter_label, network_name, adapter network_spec = vim.vm.device.VirtualDeviceSpec() if adapter_type in ["vmxnet", "vmxnet2", "vmxnet3", "e1000", "e1000e"]: - network_spec.device = salt.utils.vmware.get_network_adapter_type(adapter_type) + network_spec.device = salt.utils.vmware.get_network_adapter_type( + adapter_type) else: - # If type not specified or does not match, create adapter of type vmxnet3 + # If type not specified or does not match, create adapter of type + # vmxnet3 if not adapter_type: - log.debug("The type of '{0}' has not been specified. Creating of default type 'vmxnet3'".format(network_adapter_label)) + log.debug("The type of '{0}' has not been specified. Creating of default type 'vmxnet3'".format( + network_adapter_label)) else: - log.error("Cannot create network adapter of type '{0}'. Creating '{1}' of default type 'vmxnet3'".format(adapter_type, network_adapter_label)) + log.error( + "Cannot create network adapter of type '{0}'. Creating '{1}' of default type 'vmxnet3'".format( + adapter_type, network_adapter_label)) network_spec.device = vim.vm.device.VirtualVmxnet3() network_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add @@ -379,15 +418,14 @@ def _add_new_network_adapter_helper(network_adapter_label, network_name, adapter if switch_type == 'standard': network_spec.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() network_spec.device.backing.deviceName = network_name - network_spec.device.backing.network = salt.utils.vmware.get_mor_by_property(_get_si(), - vim.Network, - network_name, - container_ref=container_ref) + network_spec.device.backing.network = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.Network, network_name, container_ref=container_ref) elif switch_type == 'distributed': - network_ref = salt.utils.vmware.get_mor_by_property(_get_si(), - vim.dvs.DistributedVirtualPortgroup, - network_name, - container_ref=container_ref) + network_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), + vim.dvs.DistributedVirtualPortgroup, + network_name, + container_ref=container_ref) dvs_port_connection = vim.dvs.PortConnection( portgroupKey=network_ref.key, switchUuid=network_ref.config.distributedVirtualSwitch.uuid @@ -397,10 +435,11 @@ def _add_new_network_adapter_helper(network_adapter_label, network_name, adapter else: # If switch type not specified or does not match, show error and return if not switch_type: - err_msg = "The switch type to be used by '{0}' has not been specified".format(network_adapter_label) + err_msg = "The switch type to be used by '{0}' has not been specified".format( + network_adapter_label) else: - err_msg = "Cannot create '{0}'. Invalid/unsupported switch type '{1}'".format(network_adapter_label, - switch_type) + err_msg = "Cannot create '{0}'. Invalid/unsupported switch type '{1}'".format( + network_adapter_label, switch_type) raise SaltCloudSystemExit(err_msg) network_spec.device.key = random_key @@ -424,10 +463,15 @@ def _edit_existing_scsi_controller(scsi_controller, bus_sharing): return scsi_spec -def _add_new_scsi_controller_helper(scsi_controller_label, properties, bus_number): +def _add_new_scsi_controller_helper( + scsi_controller_label, + properties, + bus_number): random_key = randint(-1050, -1000) - adapter_type = properties['type'].strip().lower() if 'type' in properties else None - bus_sharing = properties['bus_sharing'].strip().lower() if 'bus_sharing' in properties else None + adapter_type = properties['type'].strip( + ).lower() if 'type' in properties else None + bus_sharing = properties['bus_sharing'].strip( + ).lower() if 'bus_sharing' in properties else None scsi_spec = vim.vm.device.VirtualDeviceSpec() @@ -443,9 +487,11 @@ def _add_new_scsi_controller_helper(scsi_controller_label, properties, bus_numbe else: # If type not specified or does not match, show error and return if not adapter_type: - err_msg = "The type of '{0}' has not been specified".format(scsi_controller_label) + err_msg = "The type of '{0}' has not been specified".format( + scsi_controller_label) else: - err_msg = "Cannot create '{0}'. Invalid/unsupported type '{1}'".format(scsi_controller_label, adapter_type) + err_msg = "Cannot create '{0}'. Invalid/unsupported type '{1}'".format( + scsi_controller_label, adapter_type) raise SaltCloudSystemExit(err_msg) scsi_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add @@ -457,7 +503,8 @@ def _add_new_scsi_controller_helper(scsi_controller_label, properties, bus_numbe scsi_spec.device.deviceInfo.summary = summary if bus_sharing == "virtual": - # Virtual disks can be shared between virtual machines on the same server + # Virtual disks can be shared between virtual machines on the same + # server scsi_spec.device.sharedBus = vim.vm.device.VirtualSCSIController.Sharing.virtualSharing elif bus_sharing == "physical": @@ -471,7 +518,10 @@ def _add_new_scsi_controller_helper(scsi_controller_label, properties, bus_numbe return scsi_spec -def _add_new_ide_controller_helper(ide_controller_label, properties, bus_number): +def _add_new_ide_controller_helper( + ide_controller_label, + properties, + bus_number): ''' Helper function for adding new IDE controllers @@ -499,7 +549,8 @@ def _set_cd_or_dvd_backing_type(drive, device_type, mode, iso_path): drive.backing.fileName = iso_path datastore = iso_path.partition('[')[-1].rpartition(']')[0] - datastore_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datastore, datastore) + datastore_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.Datastore, datastore) if datastore_ref: drive.backing.datastore = datastore_ref @@ -522,12 +573,18 @@ def _edit_existing_cd_or_dvd_drive(drive, device_type, mode, iso_path): drive_spec = vim.vm.device.VirtualDeviceSpec() drive_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit - drive_spec.device = _set_cd_or_dvd_backing_type(drive, device_type, mode, iso_path) + drive_spec.device = _set_cd_or_dvd_backing_type( + drive, device_type, mode, iso_path) return drive_spec -def _add_new_cd_or_dvd_drive_helper(drive_label, controller_key, device_type, mode, iso_path): +def _add_new_cd_or_dvd_drive_helper( + drive_label, + controller_key, + device_type, + mode, + iso_path): random_key = randint(-3025, -3000) device_type.strip().lower() @@ -539,13 +596,18 @@ def _add_new_cd_or_dvd_drive_helper(drive_label, controller_key, device_type, mo drive_spec.device.deviceInfo = vim.Description() if device_type in ['datastore_iso_file', 'client_device']: - drive_spec.device = _set_cd_or_dvd_backing_type(drive_spec.device, device_type, mode, iso_path) + drive_spec.device = _set_cd_or_dvd_backing_type( + drive_spec.device, device_type, mode, iso_path) else: - # If device_type not specified or does not match, create drive of Client type with Passthough mode + # If device_type not specified or does not match, create drive of + # Client type with Passthough mode if not device_type: - log.debug("The 'device_type' of '{0}' has not been specified. Creating of default type 'client_device'".format(drive_label)) + log.debug( + "The 'device_type' of '{0}' has not been specified. Creating of default type 'client_device'".format(drive_label)) else: - log.error("Cannot create CD/DVD drive of type '{0}'. Creating '{1}' of default type 'client_device'".format(device_type, drive_label)) + log.error( + "Cannot create CD/DVD drive of type '{0}'. Creating '{1}' of default type 'client_device'".format( + device_type, drive_label)) drive_spec.device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo() drive_spec.device.deviceInfo.summary = 'Remote Device' @@ -600,58 +662,87 @@ def _manage_devices(devices, vm=None, container_ref=None): if isinstance(device, vim.vm.device.VirtualDisk): # this is a hard disk if 'disk' in list(devices.keys()): - # there is atleast one disk specified to be created/configured + # there is atleast one disk specified to be + # created/configured unit_number += 1 existing_disks_label.append(device.deviceInfo.label) if device.deviceInfo.label in list(devices['disk'].keys()): - size_gb = float(devices['disk'][device.deviceInfo.label]['size']) + size_gb = float( + devices['disk'][ + device.deviceInfo.label]['size']) size_kb = int(size_gb * 1024.0 * 1024.0) if device.capacityInKB < size_kb: # expand the disk - disk_spec = _edit_existing_hard_disk_helper(device, size_kb) + disk_spec = _edit_existing_hard_disk_helper( + device, size_kb) device_specs.append(disk_spec) elif isinstance(device.backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo) or isinstance(device.backing, vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo): # this is a network adapter if 'network' in list(devices.keys()): - # there is atleast one network adapter specified to be created/configured - existing_network_adapters_label.append(device.deviceInfo.label) - if device.deviceInfo.label in list(devices['network'].keys()): - network_name = devices['network'][device.deviceInfo.label]['name'] - adapter_type = devices['network'][device.deviceInfo.label]['adapter_type'] if 'adapter_type' in devices['network'][device.deviceInfo.label] else '' - switch_type = devices['network'][device.deviceInfo.label]['switch_type'] if 'switch_type' in devices['network'][device.deviceInfo.label] else '' - network_spec = _edit_existing_network_adapter(device, network_name, adapter_type, switch_type) - adapter_mapping = _set_network_adapter_mapping(devices['network'][device.deviceInfo.label]) + # there is atleast one network adapter specified to be + # created/configured + existing_network_adapters_label.append( + device.deviceInfo.label) + if device.deviceInfo.label in list( + devices['network'].keys()): + network_name = devices['network'][ + device.deviceInfo.label]['name'] + adapter_type = devices['network'][ + device.deviceInfo.label]['adapter_type'] if 'adapter_type' in devices['network'][ + device.deviceInfo.label] else '' + switch_type = devices['network'][ + device.deviceInfo.label]['switch_type'] if 'switch_type' in devices['network'][ + device.deviceInfo.label] else '' + network_spec = _edit_existing_network_adapter( + device, network_name, adapter_type, switch_type) + adapter_mapping = _set_network_adapter_mapping( + devices['network'][device.deviceInfo.label]) device_specs.append(network_spec) nics_map.append(adapter_mapping) elif hasattr(device, 'scsiCtlrUnitNumber'): # this is a SCSI controller if 'scsi' in list(devices.keys()): - # there is atleast one SCSI controller specified to be created/configured + # there is atleast one SCSI controller specified to be + # created/configured bus_number += 1 - existing_scsi_controllers_label.append(device.deviceInfo.label) + existing_scsi_controllers_label.append( + device.deviceInfo.label) if device.deviceInfo.label in list(devices['scsi'].keys()): # Modify the existing SCSI controller - scsi_controller_properties = devices['scsi'][device.deviceInfo.label] - bus_sharing = scsi_controller_properties['bus_sharing'].strip().lower() if 'bus_sharing' in scsi_controller_properties else None - if bus_sharing and bus_sharing in ['virtual', 'physical', 'no']: + scsi_controller_properties = devices[ + 'scsi'][device.deviceInfo.label] + bus_sharing = scsi_controller_properties['bus_sharing'].strip( + ).lower() if 'bus_sharing' in scsi_controller_properties else None + if bus_sharing and bus_sharing in [ + 'virtual', 'physical', 'no']: bus_sharing = '{0}Sharing'.format(bus_sharing) if bus_sharing != device.sharedBus: - # Only edit the SCSI controller if bus_sharing is different - scsi_spec = _edit_existing_scsi_controller(device, bus_sharing) + # Only edit the SCSI controller if bus_sharing + # is different + scsi_spec = _edit_existing_scsi_controller( + device, bus_sharing) device_specs.append(scsi_spec) elif isinstance(device, vim.vm.device.VirtualCdrom): # this is a cd/dvd drive if 'cd' in list(devices.keys()): - # there is atleast one cd/dvd drive specified to be created/configured + # there is atleast one cd/dvd drive specified to be + # created/configured existing_cd_drives_label.append(device.deviceInfo.label) if device.deviceInfo.label in list(devices['cd'].keys()): - device_type = devices['cd'][device.deviceInfo.label]['device_type'] if 'device_type' in devices['cd'][device.deviceInfo.label] else '' - mode = devices['cd'][device.deviceInfo.label]['mode'] if 'mode' in devices['cd'][device.deviceInfo.label] else '' - iso_path = devices['cd'][device.deviceInfo.label]['iso_path'] if 'iso_path' in devices['cd'][device.deviceInfo.label] else '' - cd_drive_spec = _edit_existing_cd_or_dvd_drive(device, device_type, mode, iso_path) + device_type = devices['cd'][ + device.deviceInfo.label]['device_type'] if 'device_type' in devices['cd'][ + device.deviceInfo.label] else '' + mode = devices['cd'][ + device.deviceInfo.label]['mode'] if 'mode' in devices['cd'][ + device.deviceInfo.label] else '' + iso_path = devices['cd'][ + device.deviceInfo.label]['iso_path'] if 'iso_path' in devices['cd'][ + device.deviceInfo.label] else '' + cd_drive_spec = _edit_existing_cd_or_dvd_drive( + device, device_type, mode, iso_path) device_specs.append(cd_drive_spec) elif isinstance(device, vim.vm.device.VirtualIDEController): @@ -659,55 +750,68 @@ def _manage_devices(devices, vm=None, container_ref=None): ide_controllers[device.key] = len(device.device) if 'network' in list(devices.keys()): - network_adapters_to_create = list(set(devices['network'].keys()) - set(existing_network_adapters_label)) - network_adapters_to_create.sort() - log.debug("Networks adapters to create: {0}".format(network_adapters_to_create)) if network_adapters_to_create else None # pylint: disable=W0106 + network_adapters_to_create = sorted( + set(devices['network'].keys()) - set(existing_network_adapters_label)) + log.debug("Networks adapters to create: {0}".format( + network_adapters_to_create)) if network_adapters_to_create else None # pylint: disable=W0106 for network_adapter_label in network_adapters_to_create: network_name = devices['network'][network_adapter_label]['name'] - adapter_type = devices['network'][network_adapter_label]['adapter_type'] if 'adapter_type' in devices['network'][network_adapter_label] else '' - switch_type = devices['network'][network_adapter_label]['switch_type'] if 'switch_type' in devices['network'][network_adapter_label] else '' + adapter_type = devices['network'][network_adapter_label][ + 'adapter_type'] if 'adapter_type' in devices['network'][network_adapter_label] else '' + switch_type = devices['network'][network_adapter_label][ + 'switch_type'] if 'switch_type' in devices['network'][network_adapter_label] else '' # create the network adapter - network_spec = _add_new_network_adapter_helper(network_adapter_label, network_name, adapter_type, switch_type, container_ref) - adapter_mapping = _set_network_adapter_mapping(devices['network'][network_adapter_label]) + network_spec = _add_new_network_adapter_helper( + network_adapter_label, network_name, adapter_type, switch_type, container_ref) + adapter_mapping = _set_network_adapter_mapping( + devices['network'][network_adapter_label]) device_specs.append(network_spec) nics_map.append(adapter_mapping) if 'scsi' in list(devices.keys()): - scsi_controllers_to_create = list(set(devices['scsi'].keys()) - set(existing_scsi_controllers_label)) - scsi_controllers_to_create.sort() - log.debug("SCSI controllers to create: {0}".format(scsi_controllers_to_create)) if scsi_controllers_to_create else None # pylint: disable=W0106 + scsi_controllers_to_create = sorted( + set(devices['scsi'].keys()) - set(existing_scsi_controllers_label)) + log.debug("SCSI controllers to create: {0}".format( + scsi_controllers_to_create)) if scsi_controllers_to_create else None # pylint: disable=W0106 for scsi_controller_label in scsi_controllers_to_create: # create the SCSI controller scsi_controller_properties = devices['scsi'][scsi_controller_label] - scsi_spec = _add_new_scsi_controller_helper(scsi_controller_label, scsi_controller_properties, bus_number) + scsi_spec = _add_new_scsi_controller_helper( + scsi_controller_label, scsi_controller_properties, bus_number) device_specs.append(scsi_spec) bus_number += 1 if 'ide' in list(devices.keys()): - ide_controllers_to_create = list(set(devices['ide'].keys()) - set(existing_ide_controllers_label)) - ide_controllers_to_create.sort() - log.debug('IDE controllers to create: {0}'.format(ide_controllers_to_create)) if ide_controllers_to_create else None # pylint: disable=W0106 + ide_controllers_to_create = sorted( + set(devices['ide'].keys()) - set(existing_ide_controllers_label)) + log.debug('IDE controllers to create: {0}'.format( + ide_controllers_to_create)) if ide_controllers_to_create else None # pylint: disable=W0106 for ide_controller_label in ide_controllers_to_create: # create the IDE controller - ide_spec = _add_new_ide_controller_helper(ide_controller_label, None, bus_number) + ide_spec = _add_new_ide_controller_helper( + ide_controller_label, None, bus_number) device_specs.append(ide_spec) bus_number += 1 if 'disk' in list(devices.keys()): - disks_to_create = list(set(devices['disk'].keys()) - set(existing_disks_label)) - disks_to_create.sort() - log.debug("Hard disks to create: {0}".format(disks_to_create)) if disks_to_create else None # pylint: disable=W0106 + disks_to_create = sorted( + set(devices['disk'].keys()) - set(existing_disks_label)) + log.debug("Hard disks to create: {0}".format( + disks_to_create)) if disks_to_create else None # pylint: disable=W0106 for disk_label in disks_to_create: # create the disk size_gb = float(devices['disk'][disk_label]['size']) - thin_provision = bool(devices['disk'][disk_label]['thin_provision']) if 'thin_provision' in devices['disk'][disk_label] else False - disk_spec = _add_new_hard_disk_helper(disk_label, size_gb, unit_number, thin_provision=thin_provision) + thin_provision = bool(devices['disk'][disk_label][ + 'thin_provision']) if 'thin_provision' in devices['disk'][disk_label] else False + disk_spec = _add_new_hard_disk_helper( + disk_label, size_gb, unit_number, thin_provision=thin_provision) # when creating both SCSI controller and Hard disk at the same time we need the randomly # assigned (temporary) key of the newly created SCSI controller if 'controller' in devices['disk'][disk_label]: for spec in device_specs: - if spec.device.deviceInfo.label == devices['disk'][disk_label]['controller']: + if spec.device.deviceInfo.label == devices[ + 'disk'][disk_label]['controller']: disk_spec.device.controllerKey = spec.device.key break @@ -715,34 +819,42 @@ def _manage_devices(devices, vm=None, container_ref=None): unit_number += 1 if 'cd' in list(devices.keys()): - cd_drives_to_create = list(set(devices['cd'].keys()) - set(existing_cd_drives_label)) - cd_drives_to_create.sort() - log.debug("CD/DVD drives to create: {0}".format(cd_drives_to_create)) if cd_drives_to_create else None # pylint: disable=W0106 + cd_drives_to_create = sorted( + set(devices['cd'].keys()) - set(existing_cd_drives_label)) + log.debug("CD/DVD drives to create: {0}".format( + cd_drives_to_create)) if cd_drives_to_create else None # pylint: disable=W0106 for cd_drive_label in cd_drives_to_create: # create the CD/DVD drive - device_type = devices['cd'][cd_drive_label]['device_type'] if 'device_type' in devices['cd'][cd_drive_label] else '' - mode = devices['cd'][cd_drive_label]['mode'] if 'mode' in devices['cd'][cd_drive_label] else '' - iso_path = devices['cd'][cd_drive_label]['iso_path'] if 'iso_path' in devices['cd'][cd_drive_label] else '' + device_type = devices['cd'][cd_drive_label][ + 'device_type'] if 'device_type' in devices['cd'][cd_drive_label] else '' + mode = devices['cd'][cd_drive_label][ + 'mode'] if 'mode' in devices['cd'][cd_drive_label] else '' + iso_path = devices['cd'][cd_drive_label][ + 'iso_path'] if 'iso_path' in devices['cd'][cd_drive_label] else '' controller_key = None # When creating both IDE controller and CD/DVD drive at the same time we need the randomly # assigned (temporary) key of the newly created IDE controller if 'controller' in devices['cd'][cd_drive_label]: for spec in device_specs: - if spec.device.deviceInfo.label == devices['cd'][cd_drive_label]['controller']: + if spec.device.deviceInfo.label == devices[ + 'cd'][cd_drive_label]['controller']: controller_key = spec.device.key ide_controllers[controller_key] = 0 break else: - for ide_controller_key, num_devices in six.iteritems(ide_controllers): + for ide_controller_key, num_devices in six.iteritems( + ide_controllers): if num_devices < 2: controller_key = ide_controller_key break if not controller_key: - log.error("No more available controllers for '{0}'. All IDE controllers are currently in use".format(cd_drive_label)) + log.error("No more available controllers for '{0}'. All IDE controllers are currently in use".format( + cd_drive_label)) else: - cd_drive_spec = _add_new_cd_or_dvd_drive_helper(cd_drive_label, controller_key, device_type, mode, iso_path) + cd_drive_spec = _add_new_cd_or_dvd_drive_helper( + cd_drive_label, controller_key, device_type, mode, iso_path) device_specs.append(cd_drive_spec) ide_controllers[controller_key] += 1 @@ -759,14 +871,21 @@ def _wait_for_vmware_tools(vm_ref, max_wait): starttime = time.time() while time_counter < max_wait: if time_counter % 5 == 0: - log.info("[ {0} ] Waiting for VMware tools to be running [{1} s]".format(vm_ref.name, time_counter)) + log.info( + "[ {0} ] Waiting for VMware tools to be running [{1} s]".format( + vm_ref.name, time_counter)) if str(vm_ref.summary.guest.toolsRunningStatus) == "guestToolsRunning": - log.info("[ {0} ] Successfully got VMware tools running on the guest in {1} seconds".format(vm_ref.name, time_counter)) + log.info( + "[ {0} ] Successfully got VMware tools running on the guest in {1} seconds".format( + vm_ref.name, time_counter)) return True time.sleep(1.0 - ((time.time() - starttime) % 1.0)) time_counter += 1 - log.warning("[ {0} ] Timeout Reached. VMware tools still not running after waiting for {1} seconds".format(vm_ref.name, max_wait)) + log.warning( + "[ {0} ] Timeout Reached. VMware tools still not running after waiting for {1} seconds".format( + vm_ref.name, + max_wait)) return False @@ -819,26 +938,36 @@ def _wait_for_ip(vm_ref, max_wait): vmware_tools_status = _wait_for_vmware_tools(vm_ref, max_wait_vmware_tools) if not vmware_tools_status: return False - + time_counter = 0 starttime = time.time() while time_counter < max_wait_ip: if time_counter % 5 == 0: - log.info("[ {0} ] Waiting to retrieve IPv4 information [{1} s]".format(vm_ref.name, time_counter)) + log.info( + "[ {0} ] Waiting to retrieve IPv4 information [{1} s]".format( + vm_ref.name, time_counter)) - if vm_ref.summary.guest.ipAddress and _valid_ip(vm_ref.summary.guest.ipAddress): - log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter)) + if vm_ref.summary.guest.ipAddress and _valid_ip( + vm_ref.summary.guest.ipAddress): + log.info( + "[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format( + vm_ref.name, time_counter)) return vm_ref.summary.guest.ipAddress - + for net in vm_ref.guest.net: if net.ipConfig.ipAddress: for current_ip in net.ipConfig.ipAddress: if _valid_ip(current_ip.ipAddress): - log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter)) + log.info( + "[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format( + vm_ref.name, time_counter)) return current_ip.ipAddress time.sleep(1.0 - ((time.time() - starttime) % 1.0)) time_counter += 1 - log.warning("[ {0} ] Timeout Reached. Unable to retrieve IPv4 information after waiting for {1} seconds".format(vm_ref.name, max_wait_ip)) + log.warning( + "[ {0} ] Timeout Reached. Unable to retrieve IPv4 information after waiting for {1} seconds".format( + vm_ref.name, + max_wait_ip)) return False @@ -847,7 +976,8 @@ def _wait_for_host(host_ref, task_type, sleep_seconds=5, log_level='debug'): starttime = time.time() while host_ref.runtime.connectionState != 'notResponding': if time_counter % sleep_seconds == 0: - message = "[ {0} ] Waiting for host {1} to finish [{2} s]".format(host_ref.name, task_type, time_counter) + message = "[ {0} ] Waiting for host {1} to finish [{2} s]".format( + host_ref.name, task_type, time_counter) if log_level == 'info': log.info(message) else: @@ -856,7 +986,8 @@ def _wait_for_host(host_ref, task_type, sleep_seconds=5, log_level='debug'): time_counter += 1 while host_ref.runtime.connectionState != 'connected': if time_counter % sleep_seconds == 0: - message = "[ {0} ] Waiting for host {1} to finish [{2} s]".format(host_ref.name, task_type, time_counter) + message = "[ {0} ] Waiting for host {1} to finish [{2} s]".format( + host_ref.name, task_type, time_counter) if log_level == 'info': log.info(message) else: @@ -864,7 +995,8 @@ def _wait_for_host(host_ref, task_type, sleep_seconds=5, log_level='debug'): time.sleep(1.0 - ((time.time() - starttime) % 1.0)) time_counter += 1 if host_ref.runtime.connectionState == 'connected': - message = "[ {0} ] Successfully completed host {1} in {2} seconds".format(host_ref.name, task_type, time_counter) + message = "[ {0} ] Successfully completed host {1} in {2} seconds".format( + host_ref.name, task_type, time_counter) if log_level == 'info': log.info(message) else: @@ -880,27 +1012,34 @@ def _format_instance_info_select(vm, selection): vm_select_info['id'] = vm["name"] if 'image' in selection: - vm_select_info['image'] = "{0} (Detected)".format(vm["config.guestFullName"]) if "config.guestFullName" in vm else "N/A" + vm_select_info['image'] = "{0} (Detected)".format( + vm["config.guestFullName"]) if "config.guestFullName" in vm else "N/A" if 'size' in selection: - cpu = vm["config.hardware.numCPU"] if "config.hardware.numCPU" in vm else "N/A" - ram = "{0} MB".format(vm["config.hardware.memoryMB"]) if "config.hardware.memoryMB" in vm else "N/A" + cpu = vm[ + "config.hardware.numCPU"] if "config.hardware.numCPU" in vm else "N/A" + ram = "{0} MB".format( + vm["config.hardware.memoryMB"]) if "config.hardware.memoryMB" in vm else "N/A" vm_select_info['size'] = u"cpu: {0}\nram: {1}".format(cpu, ram) if 'state' in selection: - vm_select_info['state'] = str(vm["summary.runtime.powerState"]) if "summary.runtime.powerState" in vm else "N/A" + vm_select_info['state'] = str( + vm["summary.runtime.powerState"]) if "summary.runtime.powerState" in vm else "N/A" if 'guest_id' in selection: - vm_select_info['guest_id'] = vm["config.guestId"] if "config.guestId" in vm else "N/A" + vm_select_info['guest_id'] = vm[ + "config.guestId"] if "config.guestId" in vm else "N/A" if 'hostname' in selection: vm_select_info['hostname'] = vm["object"].guest.hostName if 'path' in selection: - vm_select_info['path'] = vm["config.files.vmPathName"] if "config.files.vmPathName" in vm else "N/A" + vm_select_info['path'] = vm[ + "config.files.vmPathName"] if "config.files.vmPathName" in vm else "N/A" if 'tools_status' in selection: - vm_select_info['tools_status'] = str(vm["guest.toolsStatus"]) if "guest.toolsStatus" in vm else "N/A" + vm_select_info['tools_status'] = str( + vm["guest.toolsStatus"]) if "guest.toolsStatus" in vm else "N/A" if 'private_ips' in selection or 'networks' in selection: network_full_info = {} @@ -928,42 +1067,61 @@ def _format_instance_info_select(vm, selection): for device in vm["config.hardware.device"]: device_full_info[device.deviceInfo.label] = {} if 'devices' in selection: - device_full_info[device.deviceInfo.label]['key'] = device.key, - device_full_info[device.deviceInfo.label]['label'] = device.deviceInfo.label, - device_full_info[device.deviceInfo.label]['summary'] = device.deviceInfo.summary, - device_full_info[device.deviceInfo.label]['type'] = type(device).__name__.rsplit(".", 1)[1] + device_full_info[ + device.deviceInfo.label]['key'] = device.key, + device_full_info[device.deviceInfo.label][ + 'label'] = device.deviceInfo.label, + device_full_info[device.deviceInfo.label][ + 'summary'] = device.deviceInfo.summary, + device_full_info[device.deviceInfo.label][ + 'type'] = type(device).__name__.rsplit(".", 1)[1] if device.unitNumber: - device_full_info[device.deviceInfo.label]['unitNumber'] = device.unitNumber + device_full_info[device.deviceInfo.label][ + 'unitNumber'] = device.unitNumber if hasattr(device, 'connectable') and device.connectable: - device_full_info[device.deviceInfo.label]['startConnected'] = device.connectable.startConnected - device_full_info[device.deviceInfo.label]['allowGuestControl'] = device.connectable.allowGuestControl - device_full_info[device.deviceInfo.label]['connected'] = device.connectable.connected - device_full_info[device.deviceInfo.label]['status'] = device.connectable.status + device_full_info[device.deviceInfo.label][ + 'startConnected'] = device.connectable.startConnected + device_full_info[device.deviceInfo.label][ + 'allowGuestControl'] = device.connectable.allowGuestControl + device_full_info[device.deviceInfo.label][ + 'connected'] = device.connectable.connected + device_full_info[device.deviceInfo.label][ + 'status'] = device.connectable.status - if hasattr(device, 'controllerKey') and device.controllerKey: - device_full_info[device.deviceInfo.label]['controllerKey'] = device.controllerKey + if hasattr(device, + 'controllerKey') and device.controllerKey: + device_full_info[device.deviceInfo.label][ + 'controllerKey'] = device.controllerKey if hasattr(device, 'addressType'): - device_full_info[device.deviceInfo.label]['addressType'] = device.addressType + device_full_info[device.deviceInfo.label][ + 'addressType'] = device.addressType if hasattr(device, 'busNumber'): - device_full_info[device.deviceInfo.label]['busNumber'] = device.busNumber + device_full_info[device.deviceInfo.label][ + 'busNumber'] = device.busNumber if hasattr(device, 'device'): - device_full_info[device.deviceInfo.label]['deviceKeys'] = device.device + device_full_info[device.deviceInfo.label][ + 'deviceKeys'] = device.device if hasattr(device, 'videoRamSizeInKB'): - device_full_info[device.deviceInfo.label]['videoRamSizeInKB'] = device.videoRamSizeInKB + device_full_info[device.deviceInfo.label][ + 'videoRamSizeInKB'] = device.videoRamSizeInKB if isinstance(device, vim.vm.device.VirtualDisk): - device_full_info[device.deviceInfo.label]['capacityInKB'] = device.capacityInKB - device_full_info[device.deviceInfo.label]['diskMode'] = device.backing.diskMode - device_full_info[device.deviceInfo.label]['fileName'] = device.backing.fileName + device_full_info[device.deviceInfo.label][ + 'capacityInKB'] = device.capacityInKB + device_full_info[device.deviceInfo.label][ + 'diskMode'] = device.backing.diskMode + device_full_info[device.deviceInfo.label][ + 'fileName'] = device.backing.fileName if hasattr(device, 'macAddress'): - device_full_info[device.deviceInfo.label]['macAddress'] = device.macAddress + device_full_info[device.deviceInfo.label][ + 'macAddress'] = device.macAddress device_mac_addresses.append(device.macAddress) if 'devices' in selection: @@ -974,10 +1132,12 @@ def _format_instance_info_select(vm, selection): if 'storage' in selection: storage_full_info = { - 'committed': int(vm["summary.storage.committed"]) if "summary.storage.committed" in vm else "N/A", - 'uncommitted': int(vm["summary.storage.uncommitted"]) if "summary.storage.uncommitted" in vm else "N/A", - 'unshared': int(vm["summary.storage.unshared"]) if "summary.storage.unshared" in vm else "N/A" - } + 'committed': int( + vm["summary.storage.committed"]) if "summary.storage.committed" in vm else "N/A", + 'uncommitted': int( + vm["summary.storage.uncommitted"]) if "summary.storage.uncommitted" in vm else "N/A", + 'unshared': int( + vm["summary.storage.unshared"]) if "summary.storage.unshared" in vm else "N/A"} vm_select_info['storage'] = storage_full_info if 'files' in selection: @@ -1008,43 +1168,59 @@ def _format_instance_info(vm): } if device.unitNumber: - device_full_info[device.deviceInfo.label]['unitNumber'] = device.unitNumber + device_full_info[device.deviceInfo.label][ + 'unitNumber'] = device.unitNumber if hasattr(device, 'connectable') and device.connectable: - device_full_info[device.deviceInfo.label]['startConnected'] = device.connectable.startConnected - device_full_info[device.deviceInfo.label]['allowGuestControl'] = device.connectable.allowGuestControl - device_full_info[device.deviceInfo.label]['connected'] = device.connectable.connected - device_full_info[device.deviceInfo.label]['status'] = device.connectable.status + device_full_info[device.deviceInfo.label][ + 'startConnected'] = device.connectable.startConnected + device_full_info[device.deviceInfo.label][ + 'allowGuestControl'] = device.connectable.allowGuestControl + device_full_info[device.deviceInfo.label][ + 'connected'] = device.connectable.connected + device_full_info[device.deviceInfo.label][ + 'status'] = device.connectable.status if hasattr(device, 'controllerKey') and device.controllerKey: - device_full_info[device.deviceInfo.label]['controllerKey'] = device.controllerKey + device_full_info[device.deviceInfo.label][ + 'controllerKey'] = device.controllerKey if hasattr(device, 'addressType'): - device_full_info[device.deviceInfo.label]['addressType'] = device.addressType + device_full_info[device.deviceInfo.label][ + 'addressType'] = device.addressType if hasattr(device, 'macAddress'): - device_full_info[device.deviceInfo.label]['macAddress'] = device.macAddress + device_full_info[device.deviceInfo.label][ + 'macAddress'] = device.macAddress device_mac_addresses.append(device.macAddress) if hasattr(device, 'busNumber'): - device_full_info[device.deviceInfo.label]['busNumber'] = device.busNumber + device_full_info[device.deviceInfo.label][ + 'busNumber'] = device.busNumber if hasattr(device, 'device'): - device_full_info[device.deviceInfo.label]['deviceKeys'] = device.device + device_full_info[device.deviceInfo.label][ + 'deviceKeys'] = device.device if hasattr(device, 'videoRamSizeInKB'): - device_full_info[device.deviceInfo.label]['videoRamSizeInKB'] = device.videoRamSizeInKB + device_full_info[device.deviceInfo.label][ + 'videoRamSizeInKB'] = device.videoRamSizeInKB if isinstance(device, vim.vm.device.VirtualDisk): - device_full_info[device.deviceInfo.label]['capacityInKB'] = device.capacityInKB - device_full_info[device.deviceInfo.label]['diskMode'] = device.backing.diskMode - device_full_info[device.deviceInfo.label]['fileName'] = device.backing.fileName + device_full_info[device.deviceInfo.label][ + 'capacityInKB'] = device.capacityInKB + device_full_info[device.deviceInfo.label][ + 'diskMode'] = device.backing.diskMode + device_full_info[device.deviceInfo.label][ + 'fileName'] = device.backing.fileName storage_full_info = { - 'committed': int(vm["summary.storage.committed"]) if "summary.storage.committed" in vm else "N/A", - 'uncommitted': int(vm["summary.storage.uncommitted"]) if "summary.storage.uncommitted" in vm else "N/A", - 'unshared': int(vm["summary.storage.unshared"]) if "summary.storage.unshared" in vm else "N/A" - } + 'committed': int( + vm["summary.storage.committed"]) if "summary.storage.committed" in vm else "N/A", + 'uncommitted': int( + vm["summary.storage.uncommitted"]) if "summary.storage.uncommitted" in vm else "N/A", + 'unshared': int( + vm["summary.storage.unshared"]) if "summary.storage.unshared" in vm else "N/A"} file_full_info = {} if "layoutEx.file" in vm: @@ -1068,29 +1244,41 @@ def _format_instance_info(vm): ip_addresses.extend(net.ipAddress) cpu = vm["config.hardware.numCPU"] if "config.hardware.numCPU" in vm else "N/A" - ram = "{0} MB".format(vm["config.hardware.memoryMB"]) if "config.hardware.memoryMB" in vm else "N/A" + ram = "{0} MB".format(vm["config.hardware.memoryMB"] + ) if "config.hardware.memoryMB" in vm else "N/A" vm_full_info = { - 'id': str(vm['name']), - 'image': "{0} (Detected)".format(vm["config.guestFullName"]) if "config.guestFullName" in vm else "N/A", - 'size': u"cpu: {0}\nram: {1}".format(cpu, ram), - 'state': str(vm["summary.runtime.powerState"]) if "summary.runtime.powerState" in vm else "N/A", + 'id': str( + vm['name']), + 'image': "{0} (Detected)".format( + vm["config.guestFullName"]) if "config.guestFullName" in vm else "N/A", + 'size': u"cpu: {0}\nram: {1}".format( + cpu, + ram), + 'state': str( + vm["summary.runtime.powerState"]) if "summary.runtime.powerState" in vm else "N/A", 'private_ips': ip_addresses, 'public_ips': [], 'devices': device_full_info, 'storage': storage_full_info, 'files': file_full_info, - 'guest_id': str(vm["config.guestId"]) if "config.guestId" in vm else "N/A", - 'hostname': str(vm["object"].guest.hostName), + 'guest_id': str( + vm["config.guestId"]) if "config.guestId" in vm else "N/A", + 'hostname': str( + vm["object"].guest.hostName), 'mac_addresses': device_mac_addresses, 'networks': network_full_info, - 'path': str(vm["config.files.vmPathName"]) if "config.files.vmPathName" in vm else "N/A", - 'tools_status': str(vm["guest.toolsStatus"]) if "guest.toolsStatus" in vm else "N/A" - } + 'path': str( + vm["config.files.vmPathName"]) if "config.files.vmPathName" in vm else "N/A", + 'tools_status': str( + vm["guest.toolsStatus"]) if "guest.toolsStatus" in vm else "N/A"} return vm_full_info -def _get_snapshots(snapshot_list, current_snapshot=None, parent_snapshot_path=""): +def _get_snapshots( + snapshot_list, + current_snapshot=None, + parent_snapshot_path=""): snapshots = {} for snapshot in snapshot_list: snapshot_path = "{0}/{1}".format(parent_snapshot_path, snapshot.name) @@ -1107,7 +1295,10 @@ def _get_snapshots(snapshot_list, current_snapshot=None, parent_snapshot_path="" # Check if child snapshots exist if snapshot.childSnapshotList: - ret = _get_snapshots(snapshot.childSnapshotList, current_snapshot, snapshot_path) + ret = _get_snapshots( + snapshot.childSnapshotList, + current_snapshot, + snapshot_path) if current_snapshot: return ret snapshots.update(ret) @@ -1285,7 +1476,9 @@ def list_datastore_clusters(kwargs=None, call=None): '-f or --function.' ) - return {'Datastore Clusters': salt.utils.vmware.list_datastore_clusters(_get_si())} + return { + 'Datastore Clusters': salt.utils.vmware.list_datastore_clusters( + _get_si())} def list_datastores(kwargs=None, call=None): @@ -1383,7 +1576,8 @@ def list_nodes_min(kwargs=None, call=None): ret = {} vm_properties = ["name"] - vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties( + _get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: ret[vm["name"]] = True @@ -1426,19 +1620,26 @@ def list_nodes(kwargs=None, call=None): "summary.runtime.powerState" ] - vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties( + _get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: - cpu = vm["config.hardware.numCPU"] if "config.hardware.numCPU" in vm else "N/A" - ram = "{0} MB".format(vm["config.hardware.memoryMB"]) if "config.hardware.memoryMB" in vm else "N/A" + cpu = vm[ + "config.hardware.numCPU"] if "config.hardware.numCPU" in vm else "N/A" + ram = "{0} MB".format( + vm["config.hardware.memoryMB"]) if "config.hardware.memoryMB" in vm else "N/A" vm_info = { 'id': vm["name"], - 'image': "{0} (Detected)".format(vm["config.guestFullName"]) if "config.guestFullName" in vm else "N/A", - 'size': u"cpu: {0}\nram: {1}".format(cpu, ram), - 'state': str(vm["summary.runtime.powerState"]) if "summary.runtime.powerState" in vm else "N/A", - 'private_ips': [vm["guest.ipAddress"]] if "guest.ipAddress" in vm else [], - 'public_ips': [] - } + 'image': "{0} (Detected)".format( + vm["config.guestFullName"]) if "config.guestFullName" in vm else "N/A", + 'size': u"cpu: {0}\nram: {1}".format( + cpu, + ram), + 'state': str( + vm["summary.runtime.powerState"]) if "summary.runtime.powerState" in vm else "N/A", + 'private_ips': [ + vm["guest.ipAddress"]] if "guest.ipAddress" in vm else [], + 'public_ips': []} ret[vm_info['id']] = vm_info return ret @@ -1487,7 +1688,8 @@ def list_nodes_full(kwargs=None, call=None): "guest.toolsStatus" ] - vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties( + _get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: ret[vm["name"]] = _format_instance_info(vm) @@ -1537,7 +1739,8 @@ def list_nodes_select(call=None): vm_properties.append("config.guestFullName") if 'size' in selection: - vm_properties.extend(["config.hardware.numCPU", "config.hardware.memoryMB"]) + vm_properties.extend( + ["config.hardware.numCPU", "config.hardware.memoryMB"]) if 'state' in selection: vm_properties.append("summary.runtime.powerState") @@ -1576,7 +1779,8 @@ def list_nodes_select(call=None): elif 'name' not in vm_properties: vm_properties.append("name") - vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties( + _get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: ret[vm["name"]] = _format_instance_info_select(vm, selection) @@ -1616,7 +1820,8 @@ def show_instance(name, call=None): "guest.toolsStatus" ] - vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties( + _get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if vm['name'] == name: @@ -1649,16 +1854,17 @@ def avail_images(call=None): "config.hardware.memoryMB" ] - vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties( + _get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if "config.template" in vm and vm["config.template"]: - templates[vm["name"]] = { + templates[ + vm["name"]] = { 'name': vm["name"], 'guest_fullname': vm["config.guestFullName"] if "config.guestFullName" in vm else "N/A", 'cpus': vm["config.hardware.numCPU"] if "config.hardware.numCPU" in vm else "N/A", - 'ram': vm["config.hardware.memoryMB"] if "config.hardware.memoryMB" in vm else "N/A" - } + 'ram': vm["config.hardware.memoryMB"] if "config.hardware.memoryMB" in vm else "N/A"} return templates @@ -1706,8 +1912,7 @@ def avail_sizes(call=None): log.warning( 'Because sizes are built into templates with VMware, there are no sizes ' - 'to return.' - ) + 'to return.') return {} @@ -1784,14 +1989,18 @@ def list_snapshots(kwargs=None, call=None): "snapshot" ] - vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties( + _get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if vm["rootSnapshot"]: if kwargs and kwargs.get('name') == vm["name"]: - return {vm["name"]: _get_snapshots(vm["snapshot"].rootSnapshotList)} + return { + vm["name"]: _get_snapshots( + vm["snapshot"].rootSnapshotList)} else: - ret[vm["name"]] = _get_snapshots(vm["snapshot"].rootSnapshotList) + ret[vm["name"]] = _get_snapshots( + vm["snapshot"].rootSnapshotList) return ret @@ -1817,7 +2026,8 @@ def start(name, call=None): "summary.runtime.powerState" ] - vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties( + _get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if vm["name"] == name: @@ -1864,7 +2074,8 @@ def stop(name, call=None): "summary.runtime.powerState" ] - vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties( + _get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if vm["name"] == name: @@ -1911,7 +2122,8 @@ def suspend(name, call=None): "summary.runtime.powerState" ] - vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties( + _get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if vm["name"] == name: @@ -1962,11 +2174,13 @@ def reset(name, call=None): "summary.runtime.powerState" ] - vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties( + _get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if vm["name"] == name: - if vm["summary.runtime.powerState"] == "suspended" or vm["summary.runtime.powerState"] == "poweredOff": + if vm["summary.runtime.powerState"] == "suspended" or vm[ + "summary.runtime.powerState"] == "poweredOff": ret = 'cannot reset in suspended/powered off state' log.info('VM {0} {1}'.format(name, ret)) return ret @@ -2010,7 +2224,8 @@ def terminate(name, call=None): "summary.runtime.powerState" ] - vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties( + _get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if vm["name"] == name: @@ -2066,7 +2281,8 @@ def destroy(name, call=None): "summary.runtime.powerState" ] - vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties( + _get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if vm["name"] == name: @@ -2082,7 +2298,8 @@ def destroy(name, call=None): name, exc ), - # Show the traceback if the debug logging level is enabled + # Show the traceback if the debug logging level is + # enabled exc_info_on_loglevel=logging.DEBUG ) return 'failed to destroy' @@ -2109,7 +2326,8 @@ def destroy(name, call=None): transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: - salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__) + salt.utils.cloud.delete_minion_cachedir( + name, __active_provider_name__.split(':')[0], __opts__) return True @@ -2215,14 +2433,17 @@ def create(vm_): ) if 'clonefrom' in vm_: - # If datacenter is specified, set the container reference to start search from it instead + # If datacenter is specified, set the container reference to start + # search from it instead container_ref = None if datacenter: - datacenter_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datacenter, datacenter) + datacenter_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.Datacenter, datacenter) container_ref = datacenter_ref if datacenter_ref else None # Clone VM/template from specified VM/template - object_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, vm_['clonefrom'], container_ref=container_ref) + object_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.VirtualMachine, vm_['clonefrom'], container_ref=container_ref) if object_ref: clone_type = "template" if object_ref.config.template else "vm" else: @@ -2233,19 +2454,26 @@ def create(vm_): clone_type = None object_ref = None - # Either a cluster, or a resource pool must be specified when cloning from template or creating. + # Either a cluster, or a resource pool must be specified when cloning from + # template or creating. if resourcepool: - resourcepool_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.ResourcePool, resourcepool, container_ref=container_ref) + resourcepool_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.ResourcePool, resourcepool, container_ref=container_ref) if not resourcepool_ref: - log.error("Specified resource pool: '{0}' does not exist".format(resourcepool)) + log.error( + "Specified resource pool: '{0}' does not exist".format(resourcepool)) if not clone_type or clone_type == "template": - raise SaltCloudSystemExit('You must specify a resource pool that exists.') + raise SaltCloudSystemExit( + 'You must specify a resource pool that exists.') elif cluster: - cluster_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.ClusterComputeResource, cluster, container_ref=container_ref) + cluster_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.ClusterComputeResource, cluster, container_ref=container_ref) if not cluster_ref: - log.error("Specified cluster: '{0}' does not exist".format(cluster)) + log.error( + "Specified cluster: '{0}' does not exist".format(cluster)) if not clone_type or clone_type == "template": - raise SaltCloudSystemExit('You must specify a cluster that exists.') + raise SaltCloudSystemExit( + 'You must specify a cluster that exists.') else: resourcepool_ref = cluster_ref.resourcePool elif clone_type == "template": @@ -2257,20 +2485,29 @@ def create(vm_): 'You must either specify a cluster or a resource pool when creating.' ) else: - log.debug("Using resource pool used by the {0} {1}".format(clone_type, vm_['clonefrom'])) + log.debug( + "Using resource pool used by the {0} {1}".format( + clone_type, vm_['clonefrom'])) # Either a datacenter or a folder can be optionally specified when cloning, required when creating. - # If not specified when cloning, the existing VM/template\'s parent folder is used. + # If not specified when cloning, the existing VM/template\'s parent folder + # is used. if folder: - folder_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Folder, folder, container_ref=container_ref) + folder_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.Folder, folder, container_ref=container_ref) if not folder_ref: log.error("Specified folder: '{0}' does not exist".format(folder)) - log.debug("Using folder in which {0} {1} is present".format(clone_type, vm_['clonefrom'])) + log.debug( + "Using folder in which {0} {1} is present".format( + clone_type, vm_['clonefrom'])) folder_ref = object_ref.parent elif datacenter: if not datacenter_ref: - log.error("Specified datacenter: '{0}' does not exist".format(datacenter)) - log.debug("Using datacenter folder in which {0} {1} is present".format(clone_type, vm_['clonefrom'])) + log.error( + "Specified datacenter: '{0}' does not exist".format(datacenter)) + log.debug( + "Using datacenter folder in which {0} {1} is present".format( + clone_type, vm_['clonefrom'])) folder_ref = object_ref.parent else: folder_ref = datacenter_ref.vmFolder @@ -2279,7 +2516,9 @@ def create(vm_): 'You must either specify a folder or a datacenter when creating not cloning.' ) else: - log.debug("Using folder in which {0} {1} is present".format(clone_type, vm_['clonefrom'])) + log.debug( + "Using folder in which {0} {1} is present".format( + clone_type, vm_['clonefrom'])) folder_ref = object_ref.parent if 'clonefrom' in vm_: @@ -2292,21 +2531,29 @@ def create(vm_): # Either a datastore/datastore cluster can be optionally specified. # If not specified, the current datastore is used. if datastore: - datastore_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datastore, datastore, container_ref=container_ref) + datastore_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.Datastore, datastore, container_ref=container_ref) if datastore_ref: # specific datastore has been specified reloc_spec.datastore = datastore_ref else: - datastore_cluster_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.StoragePod, datastore, container_ref=container_ref) + datastore_cluster_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.StoragePod, datastore, container_ref=container_ref) if not datastore_cluster_ref: - log.error("Specified datastore/datastore cluster: '{0}' does not exist".format(datastore)) - log.debug("Using datastore used by the {0} {1}".format(clone_type, vm_['clonefrom'])) + log.error( + "Specified datastore/datastore cluster: '{0}' does not exist".format(datastore)) + log.debug( + "Using datastore used by the {0} {1}".format( + clone_type, vm_['clonefrom'])) else: log.debug("No datastore/datastore cluster specified") - log.debug("Using datastore used by the {0} {1}".format(clone_type, vm_['clonefrom'])) + log.debug( + "Using datastore used by the {0} {1}".format( + clone_type, vm_['clonefrom'])) if host: - host_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.HostSystem, host, container_ref=container_ref) + host_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.HostSystem, host, container_ref=container_ref) if host_ref: reloc_spec.host = host_ref else: @@ -2317,9 +2564,11 @@ def create(vm_): 'You must specify a datastore when creating not cloning.' ) else: - datastore_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datastore, datastore) + datastore_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.Datastore, datastore) if not datastore_ref: - raise SaltCloudSystemExit("Specified datastore: '{0}' does not exist".format(datastore)) + raise SaltCloudSystemExit( + "Specified datastore: '{0}' does not exist".format(datastore)) # Create the config specs config_spec = vim.vm.ConfigSpec() @@ -2329,13 +2578,17 @@ def create(vm_): if hardware_version: hardware_version = "vmx-{0}".format(str(hardware_version).zfill(2)) if hardware_version != object_ref.config.version: - log.debug("Scheduling hardware version upgrade from {0} to {1}".format(object_ref.config.version, hardware_version)) + log.debug( + "Scheduling hardware version upgrade from {0} to {1}".format( + object_ref.config.version, + hardware_version)) scheduled_hardware_upgrade = vim.vm.ScheduledHardwareUpgradeInfo() scheduled_hardware_upgrade.upgradePolicy = 'always' scheduled_hardware_upgrade.versionKey = hardware_version config_spec.scheduledHardwareUpgradeInfo = scheduled_hardware_upgrade else: - log.debug("Virtual hardware version already set to {0}".format(hardware_version)) + log.debug( + "Virtual hardware version already set to {0}".format(hardware_version)) if num_cpus: log.debug("Setting cpu to: {0}".format(num_cpus)) @@ -2347,9 +2600,10 @@ def create(vm_): if memory_unit.lower() == "mb": memory_mb = int(memory_num) elif memory_unit.lower() == "gb": - memory_mb = int(float(memory_num)*1024.0) + memory_mb = int(float(memory_num) * 1024.0) else: - err_msg = "Invalid memory type specified: '{0}'".format(memory_unit) + err_msg = "Invalid memory type specified: '{0}'".format( + memory_unit) log.error(err_msg) return {'Error': err_msg} except (TypeError, ValueError): @@ -2374,7 +2628,8 @@ def create(vm_): config=config_spec ) - if customization and (devices and 'network' in list(devices.keys())) and 'Windows' not in object_ref.config.guestFullName: + if customization and (devices and 'network' in list( + devices.keys())) and 'Windows' not in object_ref.config.guestFullName: global_ip = vim.vm.customization.GlobalIPSettings() if 'dns_servers' in list(vm_.keys()): @@ -2393,7 +2648,8 @@ def create(vm_): ) clone_spec.customization = custom_spec - if customization and (devices and 'network' in list(devices.keys())) and 'Windows' in object_ref.config.guestFullName: + if customization and (devices and 'network' in list( + devices.keys())) and 'Windows' in object_ref.config.guestFullName: global_ip = vim.vm.customization.GlobalIPSettings() if 'dns_servers' in list(vm_.keys()): @@ -2412,7 +2668,7 @@ def create(vm_): identity.userData.computerName = vim.vm.customization.FixedName() identity.userData.computerName.name = domain identity.identification = vim.vm.customization.Identification() - + custom_spec = vim.vm.customization.Specification( globalIPSettings=global_ip, identity=identity, @@ -2430,7 +2686,8 @@ def create(vm_): else: config_spec.name = vm_name config_spec.files = vim.vm.FileInfo() - config_spec.files.vmPathName = '[{0}] {1}/{1}.vmx'.format(datastore, vm_name) + config_spec.files.vmPathName = '[{0}] {1}/{1}.vmx'.format( + datastore, vm_name) config_spec.guestId = guest_id log.debug('config_spec set to:\n{0}'.format( @@ -2447,11 +2704,17 @@ def create(vm_): ) if 'clonefrom' in vm_: - log.info("Creating {0} from {1}({2})".format(vm_['name'], clone_type, vm_['clonefrom'])) + log.info( + "Creating {0} from {1}({2})".format( + vm_['name'], + clone_type, + vm_['clonefrom'])) if datastore and not datastore_ref and datastore_cluster_ref: - # datastore cluster has been specified so apply Storage DRS recomendations - pod_spec = vim.storageDrs.PodSelectionSpec(storagePod=datastore_cluster_ref) + # datastore cluster has been specified so apply Storage DRS + # recomendations + pod_spec = vim.storageDrs.PodSelectionSpec( + storagePod=datastore_cluster_ref) storage_spec = vim.storageDrs.StoragePlacementSpec( type='clone', @@ -2466,15 +2729,19 @@ def create(vm_): si = _get_si() # get recommended datastores - recommended_datastores = si.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec) + recommended_datastores = si.content.storageResourceManager.RecommendDatastores( + storageSpec=storage_spec) # apply storage DRS recommendations - task = si.content.storageResourceManager.ApplyStorageDrsRecommendation_Task(recommended_datastores.recommendations[0].key) - salt.utils.vmware.wait_for_task(task, vm_name, 'apply storage DRS recommendations', 5, 'info') + task = si.content.storageResourceManager.ApplyStorageDrsRecommendation_Task( + recommended_datastores.recommendations[0].key) + salt.utils.vmware.wait_for_task( + task, vm_name, 'apply storage DRS recommendations', 5, 'info') else: # clone the VM/template task = object_ref.Clone(folder_ref, vm_name, clone_spec) - salt.utils.vmware.wait_for_task(task, vm_name, 'clone', 5, 'info') + salt.utils.vmware.wait_for_task( + task, vm_name, 'clone', 5, 'info') else: log.info('Creating {0}'.format(vm_['name'])) @@ -2489,14 +2756,16 @@ def create(vm_): ) return {'Error': err_msg} - new_vm_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, vm_name, container_ref=container_ref) + new_vm_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.VirtualMachine, vm_name, container_ref=container_ref) # Find how to power on in CreateVM_Task (if possible), for now this will do if not clone_type and power: task = new_vm_ref.PowerOn() salt.utils.vmware.wait_for_task(task, vm_name, 'power', 5, 'info') - # If it a template or if it does not need to be powered on then do not wait for the IP + # If it a template or if it does not need to be powered on then do not + # wait for the IP if not template and power: ip = _wait_for_ip(new_vm_ref, wait_for_ip_timeout) if ip: @@ -2540,7 +2809,8 @@ def create_datacenter(kwargs=None, call=None): '-f or --function.' ) - datacenter_name = kwargs.get('name') if kwargs and 'name' in kwargs else None + datacenter_name = kwargs.get( + 'name') if kwargs and 'name' in kwargs else None if not datacenter_name: raise SaltCloudSystemExit( @@ -2553,7 +2823,8 @@ def create_datacenter(kwargs=None, call=None): ) # Check if datacenter already exists - datacenter_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datacenter, datacenter_name) + datacenter_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.Datacenter, datacenter_name) if datacenter_ref: return {datacenter_name: 'datacenter already exists'} @@ -2600,7 +2871,8 @@ def create_cluster(kwargs=None, call=None): ) cluster_name = kwargs.get('name') if kwargs and 'name' in kwargs else None - datacenter = kwargs.get('datacenter') if kwargs and 'datacenter' in kwargs else None + datacenter = kwargs.get( + 'datacenter') if kwargs and 'datacenter' in kwargs else None if not cluster_name: raise SaltCloudSystemExit( @@ -2613,14 +2885,16 @@ def create_cluster(kwargs=None, call=None): ) if not isinstance(datacenter, vim.Datacenter): - datacenter = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datacenter, datacenter) + datacenter = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.Datacenter, datacenter) if not datacenter: raise SaltCloudSystemExit( 'The specified datacenter does not exist.' ) # Check if cluster already exists - cluster_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.ClusterComputeResource, cluster_name) + cluster_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.ClusterComputeResource, cluster_name) if cluster_ref: return {cluster_name: 'cluster already exists'} @@ -2642,7 +2916,9 @@ def create_cluster(kwargs=None, call=None): ) return False - log.debug("Created cluster {0} under datacenter {1}".format(cluster_name, datacenter.name)) + log.debug( + "Created cluster {0} under datacenter {1}".format( + cluster_name, datacenter.name)) return {cluster_name: 'created'} return False @@ -2673,7 +2949,8 @@ def rescan_hba(kwargs=None, call=None): 'You must specify name of the host system.' ) - host_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.HostSystem, host_name) + host_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.HostSystem, host_name) try: if hba: @@ -2724,7 +3001,8 @@ def upgrade_tools_all(call=None): ret = {} vm_properties = ["name"] - vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties( + _get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: ret[vm['name']] = _upg_tools_helper(vm['object']) @@ -2755,7 +3033,8 @@ def upgrade_tools(name, reboot=False, call=None): '-a or --action.' ) - vm_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, name) + vm_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.VirtualMachine, name) return _upg_tools_helper(vm_ref, reboot) @@ -2788,12 +3067,12 @@ def list_hosts_by_cluster(kwargs=None, call=None): ) ret = {} - cluster_name = kwargs.get('cluster') if kwargs and 'cluster' in kwargs else None + cluster_name = kwargs.get( + 'cluster') if kwargs and 'cluster' in kwargs else None cluster_properties = ["name"] - cluster_list = salt.utils.vmware.get_mors_with_properties(_get_si(), - vim.ClusterComputeResource, - cluster_properties) + cluster_list = salt.utils.vmware.get_mors_with_properties( + _get_si(), vim.ClusterComputeResource, cluster_properties) for cluster in cluster_list: ret[cluster['name']] = [] @@ -2834,10 +3113,12 @@ def list_clusters_by_datacenter(kwargs=None, call=None): ) ret = {} - datacenter_name = kwargs.get('datacenter') if kwargs and 'datacenter' in kwargs else None + datacenter_name = kwargs.get( + 'datacenter') if kwargs and 'datacenter' in kwargs else None datacenter_properties = ["name"] - datacenter_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.Datacenter, datacenter_properties) + datacenter_list = salt.utils.vmware.get_mors_with_properties( + _get_si(), vim.Datacenter, datacenter_properties) for datacenter in datacenter_list: ret[datacenter['name']] = [] @@ -2845,7 +3126,9 @@ def list_clusters_by_datacenter(kwargs=None, call=None): if isinstance(cluster, vim.ClusterComputeResource): ret[datacenter['name']].append(cluster.name) if datacenter_name and datacenter_name == datacenter['name']: - return {'Clusters by Datacenter': {datacenter_name: ret[datacenter_name]}} + return { + 'Clusters by Datacenter': { + datacenter_name: ret[datacenter_name]}} return {'Clusters by Datacenter': ret} @@ -2878,10 +3161,12 @@ def list_hosts_by_datacenter(kwargs=None, call=None): ) ret = {} - datacenter_name = kwargs.get('datacenter') if kwargs and 'datacenter' in kwargs else None + datacenter_name = kwargs.get( + 'datacenter') if kwargs and 'datacenter' in kwargs else None datacenter_properties = ["name"] - datacenter_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.Datacenter, datacenter_properties) + datacenter_list = salt.utils.vmware.get_mors_with_properties( + _get_si(), vim.Datacenter, datacenter_properties) for datacenter in datacenter_list: ret[datacenter['name']] = [] @@ -2891,7 +3176,9 @@ def list_hosts_by_datacenter(kwargs=None, call=None): if isinstance(host, vim.HostSystem): ret[datacenter['name']].append(host.name) if datacenter_name and datacenter_name == datacenter['name']: - return {'Hosts by Datacenter': {datacenter_name: ret[datacenter_name]}} + return { + 'Hosts by Datacenter': { + datacenter_name: ret[datacenter_name]}} return {'Hosts by Datacenter': ret} @@ -2946,7 +3233,8 @@ def list_hbas(kwargs=None, call=None): ) ret = {} - hba_type = kwargs.get('type').lower() if kwargs and 'type' in kwargs else None + hba_type = kwargs.get('type').lower( + ) if kwargs and 'type' in kwargs else None host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None host_properties = [ "name", @@ -2958,7 +3246,8 @@ def list_hbas(kwargs=None, call=None): 'Specified hba type {0} currently not supported.'.format(hba_type) ) - host_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.HostSystem, host_properties) + host_list = salt.utils.vmware.get_mors_with_properties( + _get_si(), vim.HostSystem, host_properties) for host in host_list: ret[host['name']] = {} @@ -3001,7 +3290,9 @@ def list_dvs(kwargs=None, call=None): '-f or --function.' ) - return {'Distributed Virtual Switches': salt.utils.vmware.list_dvs(_get_si())} + return { + 'Distributed Virtual Switches': salt.utils.vmware.list_dvs( + _get_si())} def list_vapps(kwargs=None, call=None): @@ -3041,7 +3332,8 @@ def enter_maintenance_mode(kwargs=None, call=None): host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None - host_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.HostSystem, host_name) + host_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.HostSystem, host_name) if not host_name or not host_ref: raise SaltCloudSystemExit( @@ -3052,8 +3344,10 @@ def enter_maintenance_mode(kwargs=None, call=None): return {host_name: 'already in maintenance mode'} try: - task = host_ref.EnterMaintenanceMode(timeout=0, evacuatePoweredOffVms=True) - salt.utils.vmware.wait_for_task(task, host_name, 'enter maintenance mode') + task = host_ref.EnterMaintenanceMode( + timeout=0, evacuatePoweredOffVms=True) + salt.utils.vmware.wait_for_task( + task, host_name, 'enter maintenance mode') except Exception as exc: log.error( 'Error while moving host system {0} in maintenance mode: {1}'.format( @@ -3086,7 +3380,8 @@ def exit_maintenance_mode(kwargs=None, call=None): host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None - host_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.HostSystem, host_name) + host_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.HostSystem, host_name) if not host_name or not host_ref: raise SaltCloudSystemExit( @@ -3098,7 +3393,8 @@ def exit_maintenance_mode(kwargs=None, call=None): try: task = host_ref.ExitMaintenanceMode(timeout=0) - salt.utils.vmware.wait_for_task(task, host_name, 'exit maintenance mode') + salt.utils.vmware.wait_for_task( + task, host_name, 'exit maintenance mode') except Exception as exc: log.error( 'Error while moving host system {0} out of maintenance mode: {1}'.format( @@ -3162,27 +3458,37 @@ def create_folder(kwargs=None, call=None): path_exists = True # Split the path in a list and loop over it to check for its existence - for index, folder_name in enumerate(os.path.normpath(folder_path.strip('/')).split('/')): + for index, folder_name in enumerate( + os.path.normpath(folder_path.strip('/')).split('/')): inventory_path = os.path.join(inventory_path, folder_name) - folder_ref = si.content.searchIndex.FindByInventoryPath(inventoryPath=inventory_path) + folder_ref = si.content.searchIndex.FindByInventoryPath( + inventoryPath=inventory_path) if isinstance(folder_ref, vim.Folder): # This is a folder that exists so just append and skip it - log.debug("Path {0}/ exists in the inventory".format(inventory_path)) + log.debug( + "Path {0}/ exists in the inventory".format(inventory_path)) folder_refs.append(folder_ref) elif isinstance(folder_ref, vim.Datacenter): # This is a datacenter that exists so just append and skip it - log.debug("Path {0}/ exists in the inventory".format(inventory_path)) + log.debug( + "Path {0}/ exists in the inventory".format(inventory_path)) folder_refs.append(folder_ref) else: path_exists = False if not folder_refs: # If this is the first folder, create it under the rootFolder - log.debug("Creating folder {0} under rootFolder in the inventory".format(folder_name)) - folder_refs.append(si.content.rootFolder.CreateFolder(folder_name)) + log.debug( + "Creating folder {0} under rootFolder in the inventory".format(folder_name)) + folder_refs.append( + si.content.rootFolder.CreateFolder(folder_name)) else: # Create the folder under the parent folder - log.debug("Creating path {0}/ in the inventory".format(inventory_path)) - folder_refs.append(folder_refs[index-1].CreateFolder(folder_name)) + log.debug( + "Creating path {0}/ in the inventory".format(inventory_path)) + folder_refs.append( + folder_refs[ + index - + 1].CreateFolder(folder_name)) if path_exists: return {inventory_path: 'specfied path already exists'} @@ -3228,7 +3534,8 @@ def create_snapshot(name, kwargs=None, call=None): if kwargs is None: kwargs = {} - snapshot_name = kwargs.get('snapshot_name') if kwargs and 'snapshot_name' in kwargs else None + snapshot_name = kwargs.get( + 'snapshot_name') if kwargs and 'snapshot_name' in kwargs else None if not snapshot_name: raise SaltCloudSystemExit( @@ -3238,23 +3545,27 @@ def create_snapshot(name, kwargs=None, call=None): memdump = _str_to_bool(kwargs.get('memdump', True)) quiesce = _str_to_bool(kwargs.get('quiesce', False)) - vm_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, name) + vm_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.VirtualMachine, name) if vm_ref.summary.runtime.powerState != "poweredOn": - log.debug('VM {0} is not powered on. Setting both memdump and quiesce to False'.format(name)) + log.debug( + 'VM {0} is not powered on. Setting both memdump and quiesce to False'.format(name)) memdump = False quiesce = False if memdump and quiesce: # Either memdump or quiesce should be set to True - log.warning('You can only set either memdump or quiesce to True. Setting quiesce=False') + log.warning( + 'You can only set either memdump or quiesce to True. Setting quiesce=False') quiesce = False desc = kwargs.get('description') if 'description' in kwargs else '' try: task = vm_ref.CreateSnapshot(snapshot_name, desc, memdump, quiesce) - salt.utils.vmware.wait_for_task(task, name, 'create snapshot', 5, 'info') + salt.utils.vmware.wait_for_task( + task, name, 'create snapshot', 5, 'info') except Exception as exc: log.error( 'Error while creating snapshot of {0}: {1}'.format( @@ -3266,8 +3577,10 @@ def create_snapshot(name, kwargs=None, call=None): ) return 'failed to create snapshot' - return {'Snapshot created successfully': _get_snapshots(vm_ref.snapshot.rootSnapshotList, - vm_ref.snapshot.currentSnapshot)} + return { + 'Snapshot created successfully': _get_snapshots( + vm_ref.snapshot.rootSnapshotList, + vm_ref.snapshot.currentSnapshot)} def revert_to_snapshot(name, kwargs=None, call=None): @@ -3304,15 +3617,18 @@ def revert_to_snapshot(name, kwargs=None, call=None): suppress_power_on = _str_to_bool(kwargs.get('power_off', False)) - vm_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, name) + vm_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.VirtualMachine, name) if not vm_ref.rootSnapshot: log.error('VM {0} does not contain any current snapshots'.format(name)) return 'revert failed' try: - task = vm_ref.RevertToCurrentSnapshot(suppressPowerOn=suppress_power_on) - salt.utils.vmware.wait_for_task(task, name, 'revert to snapshot', 5, 'info') + task = vm_ref.RevertToCurrentSnapshot( + suppressPowerOn=suppress_power_on) + salt.utils.vmware.wait_for_task( + task, name, 'revert to snapshot', 5, 'info') except Exception as exc: log.error( @@ -3350,13 +3666,16 @@ def remove_all_snapshots(name, kwargs=None, call=None): 'The remove_all_snapshots action must be called with ' '-a or --action.' ) - connection = _str_to_bool(kwargs.get('merge_snapshots')) if kwargs and 'merge_snapshots' in kwargs else True + connection = _str_to_bool(kwargs.get( + 'merge_snapshots')) if kwargs and 'merge_snapshots' in kwargs else True - vm_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, name) + vm_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.VirtualMachine, name) try: task = vm_ref.RemoveAllSnapshots() - salt.utils.vmware.wait_for_task(task, name, 'remove snapshots', 5, 'info') + salt.utils.vmware.wait_for_task( + task, name, 'remove snapshots', 5, 'info') except Exception as exc: log.error( 'Error while removing snapshots on VM {0}: {1}'.format( @@ -3418,18 +3737,26 @@ def add_host(kwargs=None, call=None): ) host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None - cluster_name = kwargs.get('cluster') if kwargs and 'cluster' in kwargs else None - datacenter_name = kwargs.get('datacenter') if kwargs and 'datacenter' in kwargs else None + cluster_name = kwargs.get( + 'cluster') if kwargs and 'cluster' in kwargs else None + datacenter_name = kwargs.get( + 'datacenter') if kwargs and 'datacenter' in kwargs else None host_user = config.get_cloud_config_value( - 'esxi_host_user', get_configured_provider(), __opts__, search_global=False - ) + 'esxi_host_user', + get_configured_provider(), + __opts__, + search_global=False) host_password = config.get_cloud_config_value( - 'esxi_host_password', get_configured_provider(), __opts__, search_global=False - ) + 'esxi_host_password', + get_configured_provider(), + __opts__, + search_global=False) host_ssl_thumbprint = config.get_cloud_config_value( - 'esxi_host_ssl_thumbprint', get_configured_provider(), __opts__, search_global=False - ) + 'esxi_host_ssl_thumbprint', + get_configured_provider(), + __opts__, + search_global=False) if not host_user: raise SaltCloudSystemExit( @@ -3446,20 +3773,23 @@ def add_host(kwargs=None, call=None): 'You must specify either the IP or DNS name of the host system.' ) - if (cluster_name and datacenter_name) or not(cluster_name or datacenter_name): + if (cluster_name and datacenter_name) or not( + cluster_name or datacenter_name): raise SaltCloudSystemExit( 'You must specify either the cluster name or the datacenter name.' ) if cluster_name: - cluster_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.ClusterComputeResource, cluster_name) + cluster_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.ClusterComputeResource, cluster_name) if not cluster_ref: raise SaltCloudSystemExit( 'Specified cluster does not exist.' ) if datacenter_name: - datacenter_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datacenter, datacenter_name) + datacenter_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.Datacenter, datacenter_name) if not datacenter_ref: raise SaltCloudSystemExit( 'Specified datacenter does not exist.' @@ -3474,15 +3804,34 @@ def add_host(kwargs=None, call=None): if host_ssl_thumbprint: spec.sslThumbprint = host_ssl_thumbprint else: - log.warning('SSL thumbprint has not been specified in provider configuration') + log.warning( + 'SSL thumbprint has not been specified in provider configuration') try: - log.debug('Trying to get the SSL thumbprint directly from the host system') - p1 = subprocess.Popen(('echo', '-n'), stdout=subprocess.PIPE, stderr=subprocess.PIPE) - p2 = subprocess.Popen(('openssl', 's_client', '-connect', '{0}:443'.format(host_name)), stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - p3 = subprocess.Popen(('openssl', 'x509', '-noout', '-fingerprint', '-sha1'), stdin=p2.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + log.debug( + 'Trying to get the SSL thumbprint directly from the host system') + p1 = subprocess.Popen( + ('echo', '-n'), stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p2 = subprocess.Popen( + ('openssl', + 's_client', + '-connect', + '{0}:443'.format(host_name)), + stdin=p1.stdout, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + p3 = subprocess.Popen( + ('openssl', + 'x509', + '-noout', + '-fingerprint', + '-sha1'), + stdin=p2.stdout, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) out = salt.utils.to_str(p3.stdout.read()) ssl_thumbprint = out.split('=')[-1].strip() - log.debug('SSL thumbprint received from the host system: {0}'.format(ssl_thumbprint)) + log.debug( + 'SSL thumbprint received from the host system: {0}'.format(ssl_thumbprint)) spec.sslThumbprint = ssl_thumbprint except Exception as exc: log.error( @@ -3500,13 +3849,18 @@ def add_host(kwargs=None, call=None): task = cluster_ref.AddHost(spec=spec, asConnected=True) ret = 'added host system to cluster {0}'.format(cluster_name) if datacenter_name: - task = datacenter_ref.hostFolder.AddStandaloneHost(spec=spec, addConnected=True) + task = datacenter_ref.hostFolder.AddStandaloneHost( + spec=spec, addConnected=True) ret = 'added host system to datacenter {0}'.format(datacenter_name) - salt.utils.vmware.wait_for_task(task, host_name, 'add host system', 5, 'info') + salt.utils.vmware.wait_for_task( + task, host_name, 'add host system', 5, 'info') except Exception as exc: if isinstance(exc, vim.fault.SSLVerifyFault): - log.error('Authenticity of the host\'s SSL certificate is not verified') - log.info('Try again after setting the esxi_host_ssl_thumbprint to {0} in provider configuration'.format(exc.thumbprint)) + log.error( + 'Authenticity of the host\'s SSL certificate is not verified') + log.info( + 'Try again after setting the esxi_host_ssl_thumbprint to {0} in provider configuration'.format( + exc.thumbprint)) log.error( 'Error while adding host {0}: {1}'.format( host_name, @@ -3543,7 +3897,8 @@ def remove_host(kwargs=None, call=None): 'You must specify name of the host system.' ) - host_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.HostSystem, host_name) + host_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.HostSystem, host_name) if not host_ref: raise SaltCloudSystemExit( 'Specified host system does not exist.' @@ -3556,7 +3911,8 @@ def remove_host(kwargs=None, call=None): else: # This is a host system that is part of a Cluster task = host_ref.Destroy_Task() - salt.utils.vmware.wait_for_task(task, host_name, 'remove host', log_level='info') + salt.utils.vmware.wait_for_task( + task, host_name, 'remove host', log_level='info') except Exception as exc: log.error( 'Error while removing host {0}: {1}'.format( @@ -3594,7 +3950,8 @@ def connect_host(kwargs=None, call=None): 'You must specify name of the host system.' ) - host_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.HostSystem, host_name) + host_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.HostSystem, host_name) if not host_ref: raise SaltCloudSystemExit( 'Specified host system does not exist.' @@ -3605,7 +3962,8 @@ def connect_host(kwargs=None, call=None): try: task = host_ref.ReconnectHost_Task() - salt.utils.vmware.wait_for_task(task, host_name, 'connect host', 5, 'info') + salt.utils.vmware.wait_for_task( + task, host_name, 'connect host', 5, 'info') except Exception as exc: log.error( 'Error while connecting host {0}: {1}'.format( @@ -3643,7 +4001,8 @@ def disconnect_host(kwargs=None, call=None): 'You must specify name of the host system.' ) - host_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.HostSystem, host_name) + host_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.HostSystem, host_name) if not host_ref: raise SaltCloudSystemExit( 'Specified host system does not exist.' @@ -3654,7 +4013,8 @@ def disconnect_host(kwargs=None, call=None): try: task = host_ref.DisconnectHost_Task() - salt.utils.vmware.wait_for_task(task, host_name, 'disconnect host', log_level='info') + salt.utils.vmware.wait_for_task( + task, host_name, 'disconnect host', log_level='info') except Exception as exc: log.error( 'Error while disconnecting host {0}: {1}'.format( @@ -3692,14 +4052,16 @@ def reboot_host(kwargs=None, call=None): ) host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None - force = _str_to_bool(kwargs.get('force')) if kwargs and 'force' in kwargs else False + force = _str_to_bool( + kwargs.get('force')) if kwargs and 'force' in kwargs else False if not host_name: raise SaltCloudSystemExit( 'You must specify name of the host system.' ) - host_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.HostSystem, host_name) + host_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.HostSystem, host_name) if not host_ref: raise SaltCloudSystemExit( 'Specified host system does not exist.' @@ -3719,8 +4081,7 @@ def reboot_host(kwargs=None, call=None): raise SaltCloudSystemExit( 'Specified host system is not in maintenance mode. Specify force=True to ' 'force reboot even if there are virtual machines running or other operations ' - 'in progress.' - ) + 'in progress.') try: host_ref.RebootHost_Task(force) @@ -3755,8 +4116,10 @@ def create_datastore_cluster(kwargs=None, call=None): '-f or --function.' ) - datastore_cluster_name = kwargs.get('name') if kwargs and 'name' in kwargs else None - datacenter_name = kwargs.get('datacenter') if kwargs and 'datacenter' in kwargs else None + datastore_cluster_name = kwargs.get( + 'name') if kwargs and 'name' in kwargs else None + datacenter_name = kwargs.get( + 'datacenter') if kwargs and 'datacenter' in kwargs else None if not datastore_cluster_name: raise SaltCloudSystemExit( @@ -3774,18 +4137,21 @@ def create_datastore_cluster(kwargs=None, call=None): ) # Check if datastore cluster already exists - datastore_cluster_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.StoragePod, datastore_cluster_name) + datastore_cluster_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.StoragePod, datastore_cluster_name) if datastore_cluster_ref: return {datastore_cluster_name: 'datastore cluster already exists'} - datacenter_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datacenter, datacenter_name) + datacenter_ref = salt.utils.vmware.get_mor_by_property( + _get_si(), vim.Datacenter, datacenter_name) if not datacenter_ref: raise SaltCloudSystemExit( 'The specified datacenter does not exist.' ) try: - datacenter_ref.datastoreFolder.CreateStoragePod(name=datastore_cluster_name) + datacenter_ref.datastoreFolder.CreateStoragePod( + name=datastore_cluster_name) except Exception as exc: log.error( 'Error creating datastore cluster {0}: {1}'.format( From cbdeff440f2184fbe4d942f14539cb49a1681af9 Mon Sep 17 00:00:00 2001 From: Anand Nevase Date: Mon, 15 Feb 2016 16:19:50 +0530 Subject: [PATCH 08/65] Revert "Fixed formating using autopep8" This reverts commit dfcb61c49de1b494ae3d456f58843ff67d5972da. --- salt/cloud/clouds/vmware.py | 1020 +++++++++++------------------------ 1 file changed, 327 insertions(+), 693 deletions(-) diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py index 86e4add68e..c7a2887523 100644 --- a/salt/cloud/clouds/vmware.py +++ b/salt/cloud/clouds/vmware.py @@ -247,17 +247,11 @@ def _get_si(): 'password', get_configured_provider(), __opts__, search_global=False ) protocol = config.get_cloud_config_value( - 'protocol', - get_configured_provider(), - __opts__, - search_global=False, - default='https') + 'protocol', get_configured_provider(), __opts__, search_global=False, default='https' + ) port = config.get_cloud_config_value( - 'port', - get_configured_provider(), - __opts__, - search_global=False, - default=443) + 'port', get_configured_provider(), __opts__, search_global=False, default=443 + ) return salt.utils.vmware.get_service_instance(url, username, @@ -275,12 +269,7 @@ def _edit_existing_hard_disk_helper(disk, size_kb): return disk_spec -def _add_new_hard_disk_helper( - disk_label, - size_gb, - unit_number, - controller_key=1000, - thin_provision=False): +def _add_new_hard_disk_helper(disk_label, size_gb, unit_number, controller_key=1000, thin_provision=False): random_key = randint(-2099, -2000) size_kb = int(size_gb * 1024.0 * 1024.0) @@ -304,36 +293,20 @@ def _add_new_hard_disk_helper( return disk_spec -def _edit_existing_network_adapter( - network_adapter, - new_network_name, - adapter_type, - switch_type, - container_ref=None): +def _edit_existing_network_adapter(network_adapter, new_network_name, adapter_type, switch_type, container_ref=None): adapter_type.strip().lower() switch_type.strip().lower() if adapter_type in ["vmxnet", "vmxnet2", "vmxnet3", "e1000", "e1000e"]: - edited_network_adapter = salt.utils.vmware.get_network_adapter_type( - adapter_type) + edited_network_adapter = salt.utils.vmware.get_network_adapter_type(adapter_type) if isinstance(network_adapter, type(edited_network_adapter)): edited_network_adapter = network_adapter else: - log.debug( - "Changing type of '{0}' from '{1}' to '{2}'".format( - network_adapter.deviceInfo.label, - type(network_adapter).__name__.rsplit( - ".", - 1)[1][ - 7:].lower(), - adapter_type)) + log.debug("Changing type of '{0}' from '{1}' to '{2}'".format(network_adapter.deviceInfo.label, type(network_adapter).__name__.rsplit(".", 1)[1][7:].lower(), adapter_type)) else: # If type not specified or does not match, don't change adapter type if adapter_type: - log.error( - "Cannot change type of '{0}' to '{1}'. Not changing type".format( - network_adapter.deviceInfo.label, - adapter_type)) + log.error("Cannot change type of '{0}' to '{1}'. Not changing type".format(network_adapter.deviceInfo.label, adapter_type)) edited_network_adapter = network_adapter if switch_type == 'standard': @@ -362,11 +335,9 @@ def _edit_existing_network_adapter( else: # If switch type not specified or does not match, show error and return if not switch_type: - err_msg = "The switch type to be used by '{0}' has not been specified".format( - network_adapter.deviceInfo.label) + err_msg = "The switch type to be used by '{0}' has not been specified".format(network_adapter.deviceInfo.label) else: - err_msg = "Cannot create '{0}'. Invalid/unsupported switch type '{1}'".format( - network_adapter.deviceInfo.label, switch_type) + err_msg = "Cannot create '{0}'. Invalid/unsupported switch type '{1}'".format(network_adapter.deviceInfo.label, switch_type) raise SaltCloudSystemExit(err_msg) edited_network_adapter.key = network_adapter.key @@ -386,12 +357,7 @@ def _edit_existing_network_adapter( return network_spec -def _add_new_network_adapter_helper( - network_adapter_label, - network_name, - adapter_type, - switch_type, - container_ref=None): +def _add_new_network_adapter_helper(network_adapter_label, network_name, adapter_type, switch_type, container_ref=None): random_key = randint(-4099, -4000) adapter_type.strip().lower() @@ -399,18 +365,13 @@ def _add_new_network_adapter_helper( network_spec = vim.vm.device.VirtualDeviceSpec() if adapter_type in ["vmxnet", "vmxnet2", "vmxnet3", "e1000", "e1000e"]: - network_spec.device = salt.utils.vmware.get_network_adapter_type( - adapter_type) + network_spec.device = salt.utils.vmware.get_network_adapter_type(adapter_type) else: - # If type not specified or does not match, create adapter of type - # vmxnet3 + # If type not specified or does not match, create adapter of type vmxnet3 if not adapter_type: - log.debug("The type of '{0}' has not been specified. Creating of default type 'vmxnet3'".format( - network_adapter_label)) + log.debug("The type of '{0}' has not been specified. Creating of default type 'vmxnet3'".format(network_adapter_label)) else: - log.error( - "Cannot create network adapter of type '{0}'. Creating '{1}' of default type 'vmxnet3'".format( - adapter_type, network_adapter_label)) + log.error("Cannot create network adapter of type '{0}'. Creating '{1}' of default type 'vmxnet3'".format(adapter_type, network_adapter_label)) network_spec.device = vim.vm.device.VirtualVmxnet3() network_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add @@ -418,14 +379,15 @@ def _add_new_network_adapter_helper( if switch_type == 'standard': network_spec.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() network_spec.device.backing.deviceName = network_name - network_spec.device.backing.network = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.Network, network_name, container_ref=container_ref) + network_spec.device.backing.network = salt.utils.vmware.get_mor_by_property(_get_si(), + vim.Network, + network_name, + container_ref=container_ref) elif switch_type == 'distributed': - network_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), - vim.dvs.DistributedVirtualPortgroup, - network_name, - container_ref=container_ref) + network_ref = salt.utils.vmware.get_mor_by_property(_get_si(), + vim.dvs.DistributedVirtualPortgroup, + network_name, + container_ref=container_ref) dvs_port_connection = vim.dvs.PortConnection( portgroupKey=network_ref.key, switchUuid=network_ref.config.distributedVirtualSwitch.uuid @@ -435,11 +397,10 @@ def _add_new_network_adapter_helper( else: # If switch type not specified or does not match, show error and return if not switch_type: - err_msg = "The switch type to be used by '{0}' has not been specified".format( - network_adapter_label) + err_msg = "The switch type to be used by '{0}' has not been specified".format(network_adapter_label) else: - err_msg = "Cannot create '{0}'. Invalid/unsupported switch type '{1}'".format( - network_adapter_label, switch_type) + err_msg = "Cannot create '{0}'. Invalid/unsupported switch type '{1}'".format(network_adapter_label, + switch_type) raise SaltCloudSystemExit(err_msg) network_spec.device.key = random_key @@ -463,15 +424,10 @@ def _edit_existing_scsi_controller(scsi_controller, bus_sharing): return scsi_spec -def _add_new_scsi_controller_helper( - scsi_controller_label, - properties, - bus_number): +def _add_new_scsi_controller_helper(scsi_controller_label, properties, bus_number): random_key = randint(-1050, -1000) - adapter_type = properties['type'].strip( - ).lower() if 'type' in properties else None - bus_sharing = properties['bus_sharing'].strip( - ).lower() if 'bus_sharing' in properties else None + adapter_type = properties['type'].strip().lower() if 'type' in properties else None + bus_sharing = properties['bus_sharing'].strip().lower() if 'bus_sharing' in properties else None scsi_spec = vim.vm.device.VirtualDeviceSpec() @@ -487,11 +443,9 @@ def _add_new_scsi_controller_helper( else: # If type not specified or does not match, show error and return if not adapter_type: - err_msg = "The type of '{0}' has not been specified".format( - scsi_controller_label) + err_msg = "The type of '{0}' has not been specified".format(scsi_controller_label) else: - err_msg = "Cannot create '{0}'. Invalid/unsupported type '{1}'".format( - scsi_controller_label, adapter_type) + err_msg = "Cannot create '{0}'. Invalid/unsupported type '{1}'".format(scsi_controller_label, adapter_type) raise SaltCloudSystemExit(err_msg) scsi_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add @@ -503,8 +457,7 @@ def _add_new_scsi_controller_helper( scsi_spec.device.deviceInfo.summary = summary if bus_sharing == "virtual": - # Virtual disks can be shared between virtual machines on the same - # server + # Virtual disks can be shared between virtual machines on the same server scsi_spec.device.sharedBus = vim.vm.device.VirtualSCSIController.Sharing.virtualSharing elif bus_sharing == "physical": @@ -518,10 +471,7 @@ def _add_new_scsi_controller_helper( return scsi_spec -def _add_new_ide_controller_helper( - ide_controller_label, - properties, - bus_number): +def _add_new_ide_controller_helper(ide_controller_label, properties, bus_number): ''' Helper function for adding new IDE controllers @@ -549,8 +499,7 @@ def _set_cd_or_dvd_backing_type(drive, device_type, mode, iso_path): drive.backing.fileName = iso_path datastore = iso_path.partition('[')[-1].rpartition(']')[0] - datastore_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.Datastore, datastore) + datastore_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datastore, datastore) if datastore_ref: drive.backing.datastore = datastore_ref @@ -573,18 +522,12 @@ def _edit_existing_cd_or_dvd_drive(drive, device_type, mode, iso_path): drive_spec = vim.vm.device.VirtualDeviceSpec() drive_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit - drive_spec.device = _set_cd_or_dvd_backing_type( - drive, device_type, mode, iso_path) + drive_spec.device = _set_cd_or_dvd_backing_type(drive, device_type, mode, iso_path) return drive_spec -def _add_new_cd_or_dvd_drive_helper( - drive_label, - controller_key, - device_type, - mode, - iso_path): +def _add_new_cd_or_dvd_drive_helper(drive_label, controller_key, device_type, mode, iso_path): random_key = randint(-3025, -3000) device_type.strip().lower() @@ -596,18 +539,13 @@ def _add_new_cd_or_dvd_drive_helper( drive_spec.device.deviceInfo = vim.Description() if device_type in ['datastore_iso_file', 'client_device']: - drive_spec.device = _set_cd_or_dvd_backing_type( - drive_spec.device, device_type, mode, iso_path) + drive_spec.device = _set_cd_or_dvd_backing_type(drive_spec.device, device_type, mode, iso_path) else: - # If device_type not specified or does not match, create drive of - # Client type with Passthough mode + # If device_type not specified or does not match, create drive of Client type with Passthough mode if not device_type: - log.debug( - "The 'device_type' of '{0}' has not been specified. Creating of default type 'client_device'".format(drive_label)) + log.debug("The 'device_type' of '{0}' has not been specified. Creating of default type 'client_device'".format(drive_label)) else: - log.error( - "Cannot create CD/DVD drive of type '{0}'. Creating '{1}' of default type 'client_device'".format( - device_type, drive_label)) + log.error("Cannot create CD/DVD drive of type '{0}'. Creating '{1}' of default type 'client_device'".format(device_type, drive_label)) drive_spec.device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo() drive_spec.device.deviceInfo.summary = 'Remote Device' @@ -662,87 +600,58 @@ def _manage_devices(devices, vm=None, container_ref=None): if isinstance(device, vim.vm.device.VirtualDisk): # this is a hard disk if 'disk' in list(devices.keys()): - # there is atleast one disk specified to be - # created/configured + # there is atleast one disk specified to be created/configured unit_number += 1 existing_disks_label.append(device.deviceInfo.label) if device.deviceInfo.label in list(devices['disk'].keys()): - size_gb = float( - devices['disk'][ - device.deviceInfo.label]['size']) + size_gb = float(devices['disk'][device.deviceInfo.label]['size']) size_kb = int(size_gb * 1024.0 * 1024.0) if device.capacityInKB < size_kb: # expand the disk - disk_spec = _edit_existing_hard_disk_helper( - device, size_kb) + disk_spec = _edit_existing_hard_disk_helper(device, size_kb) device_specs.append(disk_spec) elif isinstance(device.backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo) or isinstance(device.backing, vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo): # this is a network adapter if 'network' in list(devices.keys()): - # there is atleast one network adapter specified to be - # created/configured - existing_network_adapters_label.append( - device.deviceInfo.label) - if device.deviceInfo.label in list( - devices['network'].keys()): - network_name = devices['network'][ - device.deviceInfo.label]['name'] - adapter_type = devices['network'][ - device.deviceInfo.label]['adapter_type'] if 'adapter_type' in devices['network'][ - device.deviceInfo.label] else '' - switch_type = devices['network'][ - device.deviceInfo.label]['switch_type'] if 'switch_type' in devices['network'][ - device.deviceInfo.label] else '' - network_spec = _edit_existing_network_adapter( - device, network_name, adapter_type, switch_type) - adapter_mapping = _set_network_adapter_mapping( - devices['network'][device.deviceInfo.label]) + # there is atleast one network adapter specified to be created/configured + existing_network_adapters_label.append(device.deviceInfo.label) + if device.deviceInfo.label in list(devices['network'].keys()): + network_name = devices['network'][device.deviceInfo.label]['name'] + adapter_type = devices['network'][device.deviceInfo.label]['adapter_type'] if 'adapter_type' in devices['network'][device.deviceInfo.label] else '' + switch_type = devices['network'][device.deviceInfo.label]['switch_type'] if 'switch_type' in devices['network'][device.deviceInfo.label] else '' + network_spec = _edit_existing_network_adapter(device, network_name, adapter_type, switch_type) + adapter_mapping = _set_network_adapter_mapping(devices['network'][device.deviceInfo.label]) device_specs.append(network_spec) nics_map.append(adapter_mapping) elif hasattr(device, 'scsiCtlrUnitNumber'): # this is a SCSI controller if 'scsi' in list(devices.keys()): - # there is atleast one SCSI controller specified to be - # created/configured + # there is atleast one SCSI controller specified to be created/configured bus_number += 1 - existing_scsi_controllers_label.append( - device.deviceInfo.label) + existing_scsi_controllers_label.append(device.deviceInfo.label) if device.deviceInfo.label in list(devices['scsi'].keys()): # Modify the existing SCSI controller - scsi_controller_properties = devices[ - 'scsi'][device.deviceInfo.label] - bus_sharing = scsi_controller_properties['bus_sharing'].strip( - ).lower() if 'bus_sharing' in scsi_controller_properties else None - if bus_sharing and bus_sharing in [ - 'virtual', 'physical', 'no']: + scsi_controller_properties = devices['scsi'][device.deviceInfo.label] + bus_sharing = scsi_controller_properties['bus_sharing'].strip().lower() if 'bus_sharing' in scsi_controller_properties else None + if bus_sharing and bus_sharing in ['virtual', 'physical', 'no']: bus_sharing = '{0}Sharing'.format(bus_sharing) if bus_sharing != device.sharedBus: - # Only edit the SCSI controller if bus_sharing - # is different - scsi_spec = _edit_existing_scsi_controller( - device, bus_sharing) + # Only edit the SCSI controller if bus_sharing is different + scsi_spec = _edit_existing_scsi_controller(device, bus_sharing) device_specs.append(scsi_spec) elif isinstance(device, vim.vm.device.VirtualCdrom): # this is a cd/dvd drive if 'cd' in list(devices.keys()): - # there is atleast one cd/dvd drive specified to be - # created/configured + # there is atleast one cd/dvd drive specified to be created/configured existing_cd_drives_label.append(device.deviceInfo.label) if device.deviceInfo.label in list(devices['cd'].keys()): - device_type = devices['cd'][ - device.deviceInfo.label]['device_type'] if 'device_type' in devices['cd'][ - device.deviceInfo.label] else '' - mode = devices['cd'][ - device.deviceInfo.label]['mode'] if 'mode' in devices['cd'][ - device.deviceInfo.label] else '' - iso_path = devices['cd'][ - device.deviceInfo.label]['iso_path'] if 'iso_path' in devices['cd'][ - device.deviceInfo.label] else '' - cd_drive_spec = _edit_existing_cd_or_dvd_drive( - device, device_type, mode, iso_path) + device_type = devices['cd'][device.deviceInfo.label]['device_type'] if 'device_type' in devices['cd'][device.deviceInfo.label] else '' + mode = devices['cd'][device.deviceInfo.label]['mode'] if 'mode' in devices['cd'][device.deviceInfo.label] else '' + iso_path = devices['cd'][device.deviceInfo.label]['iso_path'] if 'iso_path' in devices['cd'][device.deviceInfo.label] else '' + cd_drive_spec = _edit_existing_cd_or_dvd_drive(device, device_type, mode, iso_path) device_specs.append(cd_drive_spec) elif isinstance(device, vim.vm.device.VirtualIDEController): @@ -750,68 +659,55 @@ def _manage_devices(devices, vm=None, container_ref=None): ide_controllers[device.key] = len(device.device) if 'network' in list(devices.keys()): - network_adapters_to_create = sorted( - set(devices['network'].keys()) - set(existing_network_adapters_label)) - log.debug("Networks adapters to create: {0}".format( - network_adapters_to_create)) if network_adapters_to_create else None # pylint: disable=W0106 + network_adapters_to_create = list(set(devices['network'].keys()) - set(existing_network_adapters_label)) + network_adapters_to_create.sort() + log.debug("Networks adapters to create: {0}".format(network_adapters_to_create)) if network_adapters_to_create else None # pylint: disable=W0106 for network_adapter_label in network_adapters_to_create: network_name = devices['network'][network_adapter_label]['name'] - adapter_type = devices['network'][network_adapter_label][ - 'adapter_type'] if 'adapter_type' in devices['network'][network_adapter_label] else '' - switch_type = devices['network'][network_adapter_label][ - 'switch_type'] if 'switch_type' in devices['network'][network_adapter_label] else '' + adapter_type = devices['network'][network_adapter_label]['adapter_type'] if 'adapter_type' in devices['network'][network_adapter_label] else '' + switch_type = devices['network'][network_adapter_label]['switch_type'] if 'switch_type' in devices['network'][network_adapter_label] else '' # create the network adapter - network_spec = _add_new_network_adapter_helper( - network_adapter_label, network_name, adapter_type, switch_type, container_ref) - adapter_mapping = _set_network_adapter_mapping( - devices['network'][network_adapter_label]) + network_spec = _add_new_network_adapter_helper(network_adapter_label, network_name, adapter_type, switch_type, container_ref) + adapter_mapping = _set_network_adapter_mapping(devices['network'][network_adapter_label]) device_specs.append(network_spec) nics_map.append(adapter_mapping) if 'scsi' in list(devices.keys()): - scsi_controllers_to_create = sorted( - set(devices['scsi'].keys()) - set(existing_scsi_controllers_label)) - log.debug("SCSI controllers to create: {0}".format( - scsi_controllers_to_create)) if scsi_controllers_to_create else None # pylint: disable=W0106 + scsi_controllers_to_create = list(set(devices['scsi'].keys()) - set(existing_scsi_controllers_label)) + scsi_controllers_to_create.sort() + log.debug("SCSI controllers to create: {0}".format(scsi_controllers_to_create)) if scsi_controllers_to_create else None # pylint: disable=W0106 for scsi_controller_label in scsi_controllers_to_create: # create the SCSI controller scsi_controller_properties = devices['scsi'][scsi_controller_label] - scsi_spec = _add_new_scsi_controller_helper( - scsi_controller_label, scsi_controller_properties, bus_number) + scsi_spec = _add_new_scsi_controller_helper(scsi_controller_label, scsi_controller_properties, bus_number) device_specs.append(scsi_spec) bus_number += 1 if 'ide' in list(devices.keys()): - ide_controllers_to_create = sorted( - set(devices['ide'].keys()) - set(existing_ide_controllers_label)) - log.debug('IDE controllers to create: {0}'.format( - ide_controllers_to_create)) if ide_controllers_to_create else None # pylint: disable=W0106 + ide_controllers_to_create = list(set(devices['ide'].keys()) - set(existing_ide_controllers_label)) + ide_controllers_to_create.sort() + log.debug('IDE controllers to create: {0}'.format(ide_controllers_to_create)) if ide_controllers_to_create else None # pylint: disable=W0106 for ide_controller_label in ide_controllers_to_create: # create the IDE controller - ide_spec = _add_new_ide_controller_helper( - ide_controller_label, None, bus_number) + ide_spec = _add_new_ide_controller_helper(ide_controller_label, None, bus_number) device_specs.append(ide_spec) bus_number += 1 if 'disk' in list(devices.keys()): - disks_to_create = sorted( - set(devices['disk'].keys()) - set(existing_disks_label)) - log.debug("Hard disks to create: {0}".format( - disks_to_create)) if disks_to_create else None # pylint: disable=W0106 + disks_to_create = list(set(devices['disk'].keys()) - set(existing_disks_label)) + disks_to_create.sort() + log.debug("Hard disks to create: {0}".format(disks_to_create)) if disks_to_create else None # pylint: disable=W0106 for disk_label in disks_to_create: # create the disk size_gb = float(devices['disk'][disk_label]['size']) - thin_provision = bool(devices['disk'][disk_label][ - 'thin_provision']) if 'thin_provision' in devices['disk'][disk_label] else False - disk_spec = _add_new_hard_disk_helper( - disk_label, size_gb, unit_number, thin_provision=thin_provision) + thin_provision = bool(devices['disk'][disk_label]['thin_provision']) if 'thin_provision' in devices['disk'][disk_label] else False + disk_spec = _add_new_hard_disk_helper(disk_label, size_gb, unit_number, thin_provision=thin_provision) # when creating both SCSI controller and Hard disk at the same time we need the randomly # assigned (temporary) key of the newly created SCSI controller if 'controller' in devices['disk'][disk_label]: for spec in device_specs: - if spec.device.deviceInfo.label == devices[ - 'disk'][disk_label]['controller']: + if spec.device.deviceInfo.label == devices['disk'][disk_label]['controller']: disk_spec.device.controllerKey = spec.device.key break @@ -819,42 +715,34 @@ def _manage_devices(devices, vm=None, container_ref=None): unit_number += 1 if 'cd' in list(devices.keys()): - cd_drives_to_create = sorted( - set(devices['cd'].keys()) - set(existing_cd_drives_label)) - log.debug("CD/DVD drives to create: {0}".format( - cd_drives_to_create)) if cd_drives_to_create else None # pylint: disable=W0106 + cd_drives_to_create = list(set(devices['cd'].keys()) - set(existing_cd_drives_label)) + cd_drives_to_create.sort() + log.debug("CD/DVD drives to create: {0}".format(cd_drives_to_create)) if cd_drives_to_create else None # pylint: disable=W0106 for cd_drive_label in cd_drives_to_create: # create the CD/DVD drive - device_type = devices['cd'][cd_drive_label][ - 'device_type'] if 'device_type' in devices['cd'][cd_drive_label] else '' - mode = devices['cd'][cd_drive_label][ - 'mode'] if 'mode' in devices['cd'][cd_drive_label] else '' - iso_path = devices['cd'][cd_drive_label][ - 'iso_path'] if 'iso_path' in devices['cd'][cd_drive_label] else '' + device_type = devices['cd'][cd_drive_label]['device_type'] if 'device_type' in devices['cd'][cd_drive_label] else '' + mode = devices['cd'][cd_drive_label]['mode'] if 'mode' in devices['cd'][cd_drive_label] else '' + iso_path = devices['cd'][cd_drive_label]['iso_path'] if 'iso_path' in devices['cd'][cd_drive_label] else '' controller_key = None # When creating both IDE controller and CD/DVD drive at the same time we need the randomly # assigned (temporary) key of the newly created IDE controller if 'controller' in devices['cd'][cd_drive_label]: for spec in device_specs: - if spec.device.deviceInfo.label == devices[ - 'cd'][cd_drive_label]['controller']: + if spec.device.deviceInfo.label == devices['cd'][cd_drive_label]['controller']: controller_key = spec.device.key ide_controllers[controller_key] = 0 break else: - for ide_controller_key, num_devices in six.iteritems( - ide_controllers): + for ide_controller_key, num_devices in six.iteritems(ide_controllers): if num_devices < 2: controller_key = ide_controller_key break if not controller_key: - log.error("No more available controllers for '{0}'. All IDE controllers are currently in use".format( - cd_drive_label)) + log.error("No more available controllers for '{0}'. All IDE controllers are currently in use".format(cd_drive_label)) else: - cd_drive_spec = _add_new_cd_or_dvd_drive_helper( - cd_drive_label, controller_key, device_type, mode, iso_path) + cd_drive_spec = _add_new_cd_or_dvd_drive_helper(cd_drive_label, controller_key, device_type, mode, iso_path) device_specs.append(cd_drive_spec) ide_controllers[controller_key] += 1 @@ -871,21 +759,14 @@ def _wait_for_vmware_tools(vm_ref, max_wait): starttime = time.time() while time_counter < max_wait: if time_counter % 5 == 0: - log.info( - "[ {0} ] Waiting for VMware tools to be running [{1} s]".format( - vm_ref.name, time_counter)) + log.info("[ {0} ] Waiting for VMware tools to be running [{1} s]".format(vm_ref.name, time_counter)) if str(vm_ref.summary.guest.toolsRunningStatus) == "guestToolsRunning": - log.info( - "[ {0} ] Successfully got VMware tools running on the guest in {1} seconds".format( - vm_ref.name, time_counter)) + log.info("[ {0} ] Successfully got VMware tools running on the guest in {1} seconds".format(vm_ref.name, time_counter)) return True time.sleep(1.0 - ((time.time() - starttime) % 1.0)) time_counter += 1 - log.warning( - "[ {0} ] Timeout Reached. VMware tools still not running after waiting for {1} seconds".format( - vm_ref.name, - max_wait)) + log.warning("[ {0} ] Timeout Reached. VMware tools still not running after waiting for {1} seconds".format(vm_ref.name, max_wait)) return False @@ -938,36 +819,26 @@ def _wait_for_ip(vm_ref, max_wait): vmware_tools_status = _wait_for_vmware_tools(vm_ref, max_wait_vmware_tools) if not vmware_tools_status: return False - + time_counter = 0 starttime = time.time() while time_counter < max_wait_ip: if time_counter % 5 == 0: - log.info( - "[ {0} ] Waiting to retrieve IPv4 information [{1} s]".format( - vm_ref.name, time_counter)) + log.info("[ {0} ] Waiting to retrieve IPv4 information [{1} s]".format(vm_ref.name, time_counter)) - if vm_ref.summary.guest.ipAddress and _valid_ip( - vm_ref.summary.guest.ipAddress): - log.info( - "[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format( - vm_ref.name, time_counter)) + if vm_ref.summary.guest.ipAddress and _valid_ip(vm_ref.summary.guest.ipAddress): + log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter)) return vm_ref.summary.guest.ipAddress - + for net in vm_ref.guest.net: if net.ipConfig.ipAddress: for current_ip in net.ipConfig.ipAddress: if _valid_ip(current_ip.ipAddress): - log.info( - "[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format( - vm_ref.name, time_counter)) + log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter)) return current_ip.ipAddress time.sleep(1.0 - ((time.time() - starttime) % 1.0)) time_counter += 1 - log.warning( - "[ {0} ] Timeout Reached. Unable to retrieve IPv4 information after waiting for {1} seconds".format( - vm_ref.name, - max_wait_ip)) + log.warning("[ {0} ] Timeout Reached. Unable to retrieve IPv4 information after waiting for {1} seconds".format(vm_ref.name, max_wait_ip)) return False @@ -976,8 +847,7 @@ def _wait_for_host(host_ref, task_type, sleep_seconds=5, log_level='debug'): starttime = time.time() while host_ref.runtime.connectionState != 'notResponding': if time_counter % sleep_seconds == 0: - message = "[ {0} ] Waiting for host {1} to finish [{2} s]".format( - host_ref.name, task_type, time_counter) + message = "[ {0} ] Waiting for host {1} to finish [{2} s]".format(host_ref.name, task_type, time_counter) if log_level == 'info': log.info(message) else: @@ -986,8 +856,7 @@ def _wait_for_host(host_ref, task_type, sleep_seconds=5, log_level='debug'): time_counter += 1 while host_ref.runtime.connectionState != 'connected': if time_counter % sleep_seconds == 0: - message = "[ {0} ] Waiting for host {1} to finish [{2} s]".format( - host_ref.name, task_type, time_counter) + message = "[ {0} ] Waiting for host {1} to finish [{2} s]".format(host_ref.name, task_type, time_counter) if log_level == 'info': log.info(message) else: @@ -995,8 +864,7 @@ def _wait_for_host(host_ref, task_type, sleep_seconds=5, log_level='debug'): time.sleep(1.0 - ((time.time() - starttime) % 1.0)) time_counter += 1 if host_ref.runtime.connectionState == 'connected': - message = "[ {0} ] Successfully completed host {1} in {2} seconds".format( - host_ref.name, task_type, time_counter) + message = "[ {0} ] Successfully completed host {1} in {2} seconds".format(host_ref.name, task_type, time_counter) if log_level == 'info': log.info(message) else: @@ -1012,34 +880,27 @@ def _format_instance_info_select(vm, selection): vm_select_info['id'] = vm["name"] if 'image' in selection: - vm_select_info['image'] = "{0} (Detected)".format( - vm["config.guestFullName"]) if "config.guestFullName" in vm else "N/A" + vm_select_info['image'] = "{0} (Detected)".format(vm["config.guestFullName"]) if "config.guestFullName" in vm else "N/A" if 'size' in selection: - cpu = vm[ - "config.hardware.numCPU"] if "config.hardware.numCPU" in vm else "N/A" - ram = "{0} MB".format( - vm["config.hardware.memoryMB"]) if "config.hardware.memoryMB" in vm else "N/A" + cpu = vm["config.hardware.numCPU"] if "config.hardware.numCPU" in vm else "N/A" + ram = "{0} MB".format(vm["config.hardware.memoryMB"]) if "config.hardware.memoryMB" in vm else "N/A" vm_select_info['size'] = u"cpu: {0}\nram: {1}".format(cpu, ram) if 'state' in selection: - vm_select_info['state'] = str( - vm["summary.runtime.powerState"]) if "summary.runtime.powerState" in vm else "N/A" + vm_select_info['state'] = str(vm["summary.runtime.powerState"]) if "summary.runtime.powerState" in vm else "N/A" if 'guest_id' in selection: - vm_select_info['guest_id'] = vm[ - "config.guestId"] if "config.guestId" in vm else "N/A" + vm_select_info['guest_id'] = vm["config.guestId"] if "config.guestId" in vm else "N/A" if 'hostname' in selection: vm_select_info['hostname'] = vm["object"].guest.hostName if 'path' in selection: - vm_select_info['path'] = vm[ - "config.files.vmPathName"] if "config.files.vmPathName" in vm else "N/A" + vm_select_info['path'] = vm["config.files.vmPathName"] if "config.files.vmPathName" in vm else "N/A" if 'tools_status' in selection: - vm_select_info['tools_status'] = str( - vm["guest.toolsStatus"]) if "guest.toolsStatus" in vm else "N/A" + vm_select_info['tools_status'] = str(vm["guest.toolsStatus"]) if "guest.toolsStatus" in vm else "N/A" if 'private_ips' in selection or 'networks' in selection: network_full_info = {} @@ -1067,61 +928,42 @@ def _format_instance_info_select(vm, selection): for device in vm["config.hardware.device"]: device_full_info[device.deviceInfo.label] = {} if 'devices' in selection: - device_full_info[ - device.deviceInfo.label]['key'] = device.key, - device_full_info[device.deviceInfo.label][ - 'label'] = device.deviceInfo.label, - device_full_info[device.deviceInfo.label][ - 'summary'] = device.deviceInfo.summary, - device_full_info[device.deviceInfo.label][ - 'type'] = type(device).__name__.rsplit(".", 1)[1] + device_full_info[device.deviceInfo.label]['key'] = device.key, + device_full_info[device.deviceInfo.label]['label'] = device.deviceInfo.label, + device_full_info[device.deviceInfo.label]['summary'] = device.deviceInfo.summary, + device_full_info[device.deviceInfo.label]['type'] = type(device).__name__.rsplit(".", 1)[1] if device.unitNumber: - device_full_info[device.deviceInfo.label][ - 'unitNumber'] = device.unitNumber + device_full_info[device.deviceInfo.label]['unitNumber'] = device.unitNumber if hasattr(device, 'connectable') and device.connectable: - device_full_info[device.deviceInfo.label][ - 'startConnected'] = device.connectable.startConnected - device_full_info[device.deviceInfo.label][ - 'allowGuestControl'] = device.connectable.allowGuestControl - device_full_info[device.deviceInfo.label][ - 'connected'] = device.connectable.connected - device_full_info[device.deviceInfo.label][ - 'status'] = device.connectable.status + device_full_info[device.deviceInfo.label]['startConnected'] = device.connectable.startConnected + device_full_info[device.deviceInfo.label]['allowGuestControl'] = device.connectable.allowGuestControl + device_full_info[device.deviceInfo.label]['connected'] = device.connectable.connected + device_full_info[device.deviceInfo.label]['status'] = device.connectable.status - if hasattr(device, - 'controllerKey') and device.controllerKey: - device_full_info[device.deviceInfo.label][ - 'controllerKey'] = device.controllerKey + if hasattr(device, 'controllerKey') and device.controllerKey: + device_full_info[device.deviceInfo.label]['controllerKey'] = device.controllerKey if hasattr(device, 'addressType'): - device_full_info[device.deviceInfo.label][ - 'addressType'] = device.addressType + device_full_info[device.deviceInfo.label]['addressType'] = device.addressType if hasattr(device, 'busNumber'): - device_full_info[device.deviceInfo.label][ - 'busNumber'] = device.busNumber + device_full_info[device.deviceInfo.label]['busNumber'] = device.busNumber if hasattr(device, 'device'): - device_full_info[device.deviceInfo.label][ - 'deviceKeys'] = device.device + device_full_info[device.deviceInfo.label]['deviceKeys'] = device.device if hasattr(device, 'videoRamSizeInKB'): - device_full_info[device.deviceInfo.label][ - 'videoRamSizeInKB'] = device.videoRamSizeInKB + device_full_info[device.deviceInfo.label]['videoRamSizeInKB'] = device.videoRamSizeInKB if isinstance(device, vim.vm.device.VirtualDisk): - device_full_info[device.deviceInfo.label][ - 'capacityInKB'] = device.capacityInKB - device_full_info[device.deviceInfo.label][ - 'diskMode'] = device.backing.diskMode - device_full_info[device.deviceInfo.label][ - 'fileName'] = device.backing.fileName + device_full_info[device.deviceInfo.label]['capacityInKB'] = device.capacityInKB + device_full_info[device.deviceInfo.label]['diskMode'] = device.backing.diskMode + device_full_info[device.deviceInfo.label]['fileName'] = device.backing.fileName if hasattr(device, 'macAddress'): - device_full_info[device.deviceInfo.label][ - 'macAddress'] = device.macAddress + device_full_info[device.deviceInfo.label]['macAddress'] = device.macAddress device_mac_addresses.append(device.macAddress) if 'devices' in selection: @@ -1132,12 +974,10 @@ def _format_instance_info_select(vm, selection): if 'storage' in selection: storage_full_info = { - 'committed': int( - vm["summary.storage.committed"]) if "summary.storage.committed" in vm else "N/A", - 'uncommitted': int( - vm["summary.storage.uncommitted"]) if "summary.storage.uncommitted" in vm else "N/A", - 'unshared': int( - vm["summary.storage.unshared"]) if "summary.storage.unshared" in vm else "N/A"} + 'committed': int(vm["summary.storage.committed"]) if "summary.storage.committed" in vm else "N/A", + 'uncommitted': int(vm["summary.storage.uncommitted"]) if "summary.storage.uncommitted" in vm else "N/A", + 'unshared': int(vm["summary.storage.unshared"]) if "summary.storage.unshared" in vm else "N/A" + } vm_select_info['storage'] = storage_full_info if 'files' in selection: @@ -1168,59 +1008,43 @@ def _format_instance_info(vm): } if device.unitNumber: - device_full_info[device.deviceInfo.label][ - 'unitNumber'] = device.unitNumber + device_full_info[device.deviceInfo.label]['unitNumber'] = device.unitNumber if hasattr(device, 'connectable') and device.connectable: - device_full_info[device.deviceInfo.label][ - 'startConnected'] = device.connectable.startConnected - device_full_info[device.deviceInfo.label][ - 'allowGuestControl'] = device.connectable.allowGuestControl - device_full_info[device.deviceInfo.label][ - 'connected'] = device.connectable.connected - device_full_info[device.deviceInfo.label][ - 'status'] = device.connectable.status + device_full_info[device.deviceInfo.label]['startConnected'] = device.connectable.startConnected + device_full_info[device.deviceInfo.label]['allowGuestControl'] = device.connectable.allowGuestControl + device_full_info[device.deviceInfo.label]['connected'] = device.connectable.connected + device_full_info[device.deviceInfo.label]['status'] = device.connectable.status if hasattr(device, 'controllerKey') and device.controllerKey: - device_full_info[device.deviceInfo.label][ - 'controllerKey'] = device.controllerKey + device_full_info[device.deviceInfo.label]['controllerKey'] = device.controllerKey if hasattr(device, 'addressType'): - device_full_info[device.deviceInfo.label][ - 'addressType'] = device.addressType + device_full_info[device.deviceInfo.label]['addressType'] = device.addressType if hasattr(device, 'macAddress'): - device_full_info[device.deviceInfo.label][ - 'macAddress'] = device.macAddress + device_full_info[device.deviceInfo.label]['macAddress'] = device.macAddress device_mac_addresses.append(device.macAddress) if hasattr(device, 'busNumber'): - device_full_info[device.deviceInfo.label][ - 'busNumber'] = device.busNumber + device_full_info[device.deviceInfo.label]['busNumber'] = device.busNumber if hasattr(device, 'device'): - device_full_info[device.deviceInfo.label][ - 'deviceKeys'] = device.device + device_full_info[device.deviceInfo.label]['deviceKeys'] = device.device if hasattr(device, 'videoRamSizeInKB'): - device_full_info[device.deviceInfo.label][ - 'videoRamSizeInKB'] = device.videoRamSizeInKB + device_full_info[device.deviceInfo.label]['videoRamSizeInKB'] = device.videoRamSizeInKB if isinstance(device, vim.vm.device.VirtualDisk): - device_full_info[device.deviceInfo.label][ - 'capacityInKB'] = device.capacityInKB - device_full_info[device.deviceInfo.label][ - 'diskMode'] = device.backing.diskMode - device_full_info[device.deviceInfo.label][ - 'fileName'] = device.backing.fileName + device_full_info[device.deviceInfo.label]['capacityInKB'] = device.capacityInKB + device_full_info[device.deviceInfo.label]['diskMode'] = device.backing.diskMode + device_full_info[device.deviceInfo.label]['fileName'] = device.backing.fileName storage_full_info = { - 'committed': int( - vm["summary.storage.committed"]) if "summary.storage.committed" in vm else "N/A", - 'uncommitted': int( - vm["summary.storage.uncommitted"]) if "summary.storage.uncommitted" in vm else "N/A", - 'unshared': int( - vm["summary.storage.unshared"]) if "summary.storage.unshared" in vm else "N/A"} + 'committed': int(vm["summary.storage.committed"]) if "summary.storage.committed" in vm else "N/A", + 'uncommitted': int(vm["summary.storage.uncommitted"]) if "summary.storage.uncommitted" in vm else "N/A", + 'unshared': int(vm["summary.storage.unshared"]) if "summary.storage.unshared" in vm else "N/A" + } file_full_info = {} if "layoutEx.file" in vm: @@ -1244,41 +1068,29 @@ def _format_instance_info(vm): ip_addresses.extend(net.ipAddress) cpu = vm["config.hardware.numCPU"] if "config.hardware.numCPU" in vm else "N/A" - ram = "{0} MB".format(vm["config.hardware.memoryMB"] - ) if "config.hardware.memoryMB" in vm else "N/A" + ram = "{0} MB".format(vm["config.hardware.memoryMB"]) if "config.hardware.memoryMB" in vm else "N/A" vm_full_info = { - 'id': str( - vm['name']), - 'image': "{0} (Detected)".format( - vm["config.guestFullName"]) if "config.guestFullName" in vm else "N/A", - 'size': u"cpu: {0}\nram: {1}".format( - cpu, - ram), - 'state': str( - vm["summary.runtime.powerState"]) if "summary.runtime.powerState" in vm else "N/A", + 'id': str(vm['name']), + 'image': "{0} (Detected)".format(vm["config.guestFullName"]) if "config.guestFullName" in vm else "N/A", + 'size': u"cpu: {0}\nram: {1}".format(cpu, ram), + 'state': str(vm["summary.runtime.powerState"]) if "summary.runtime.powerState" in vm else "N/A", 'private_ips': ip_addresses, 'public_ips': [], 'devices': device_full_info, 'storage': storage_full_info, 'files': file_full_info, - 'guest_id': str( - vm["config.guestId"]) if "config.guestId" in vm else "N/A", - 'hostname': str( - vm["object"].guest.hostName), + 'guest_id': str(vm["config.guestId"]) if "config.guestId" in vm else "N/A", + 'hostname': str(vm["object"].guest.hostName), 'mac_addresses': device_mac_addresses, 'networks': network_full_info, - 'path': str( - vm["config.files.vmPathName"]) if "config.files.vmPathName" in vm else "N/A", - 'tools_status': str( - vm["guest.toolsStatus"]) if "guest.toolsStatus" in vm else "N/A"} + 'path': str(vm["config.files.vmPathName"]) if "config.files.vmPathName" in vm else "N/A", + 'tools_status': str(vm["guest.toolsStatus"]) if "guest.toolsStatus" in vm else "N/A" + } return vm_full_info -def _get_snapshots( - snapshot_list, - current_snapshot=None, - parent_snapshot_path=""): +def _get_snapshots(snapshot_list, current_snapshot=None, parent_snapshot_path=""): snapshots = {} for snapshot in snapshot_list: snapshot_path = "{0}/{1}".format(parent_snapshot_path, snapshot.name) @@ -1295,10 +1107,7 @@ def _get_snapshots( # Check if child snapshots exist if snapshot.childSnapshotList: - ret = _get_snapshots( - snapshot.childSnapshotList, - current_snapshot, - snapshot_path) + ret = _get_snapshots(snapshot.childSnapshotList, current_snapshot, snapshot_path) if current_snapshot: return ret snapshots.update(ret) @@ -1476,9 +1285,7 @@ def list_datastore_clusters(kwargs=None, call=None): '-f or --function.' ) - return { - 'Datastore Clusters': salt.utils.vmware.list_datastore_clusters( - _get_si())} + return {'Datastore Clusters': salt.utils.vmware.list_datastore_clusters(_get_si())} def list_datastores(kwargs=None, call=None): @@ -1576,8 +1383,7 @@ def list_nodes_min(kwargs=None, call=None): ret = {} vm_properties = ["name"] - vm_list = salt.utils.vmware.get_mors_with_properties( - _get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: ret[vm["name"]] = True @@ -1620,26 +1426,19 @@ def list_nodes(kwargs=None, call=None): "summary.runtime.powerState" ] - vm_list = salt.utils.vmware.get_mors_with_properties( - _get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: - cpu = vm[ - "config.hardware.numCPU"] if "config.hardware.numCPU" in vm else "N/A" - ram = "{0} MB".format( - vm["config.hardware.memoryMB"]) if "config.hardware.memoryMB" in vm else "N/A" + cpu = vm["config.hardware.numCPU"] if "config.hardware.numCPU" in vm else "N/A" + ram = "{0} MB".format(vm["config.hardware.memoryMB"]) if "config.hardware.memoryMB" in vm else "N/A" vm_info = { 'id': vm["name"], - 'image': "{0} (Detected)".format( - vm["config.guestFullName"]) if "config.guestFullName" in vm else "N/A", - 'size': u"cpu: {0}\nram: {1}".format( - cpu, - ram), - 'state': str( - vm["summary.runtime.powerState"]) if "summary.runtime.powerState" in vm else "N/A", - 'private_ips': [ - vm["guest.ipAddress"]] if "guest.ipAddress" in vm else [], - 'public_ips': []} + 'image': "{0} (Detected)".format(vm["config.guestFullName"]) if "config.guestFullName" in vm else "N/A", + 'size': u"cpu: {0}\nram: {1}".format(cpu, ram), + 'state': str(vm["summary.runtime.powerState"]) if "summary.runtime.powerState" in vm else "N/A", + 'private_ips': [vm["guest.ipAddress"]] if "guest.ipAddress" in vm else [], + 'public_ips': [] + } ret[vm_info['id']] = vm_info return ret @@ -1688,8 +1487,7 @@ def list_nodes_full(kwargs=None, call=None): "guest.toolsStatus" ] - vm_list = salt.utils.vmware.get_mors_with_properties( - _get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: ret[vm["name"]] = _format_instance_info(vm) @@ -1739,8 +1537,7 @@ def list_nodes_select(call=None): vm_properties.append("config.guestFullName") if 'size' in selection: - vm_properties.extend( - ["config.hardware.numCPU", "config.hardware.memoryMB"]) + vm_properties.extend(["config.hardware.numCPU", "config.hardware.memoryMB"]) if 'state' in selection: vm_properties.append("summary.runtime.powerState") @@ -1779,8 +1576,7 @@ def list_nodes_select(call=None): elif 'name' not in vm_properties: vm_properties.append("name") - vm_list = salt.utils.vmware.get_mors_with_properties( - _get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: ret[vm["name"]] = _format_instance_info_select(vm, selection) @@ -1820,8 +1616,7 @@ def show_instance(name, call=None): "guest.toolsStatus" ] - vm_list = salt.utils.vmware.get_mors_with_properties( - _get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if vm['name'] == name: @@ -1854,17 +1649,16 @@ def avail_images(call=None): "config.hardware.memoryMB" ] - vm_list = salt.utils.vmware.get_mors_with_properties( - _get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if "config.template" in vm and vm["config.template"]: - templates[ - vm["name"]] = { + templates[vm["name"]] = { 'name': vm["name"], 'guest_fullname': vm["config.guestFullName"] if "config.guestFullName" in vm else "N/A", 'cpus': vm["config.hardware.numCPU"] if "config.hardware.numCPU" in vm else "N/A", - 'ram': vm["config.hardware.memoryMB"] if "config.hardware.memoryMB" in vm else "N/A"} + 'ram': vm["config.hardware.memoryMB"] if "config.hardware.memoryMB" in vm else "N/A" + } return templates @@ -1912,7 +1706,8 @@ def avail_sizes(call=None): log.warning( 'Because sizes are built into templates with VMware, there are no sizes ' - 'to return.') + 'to return.' + ) return {} @@ -1989,18 +1784,14 @@ def list_snapshots(kwargs=None, call=None): "snapshot" ] - vm_list = salt.utils.vmware.get_mors_with_properties( - _get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if vm["rootSnapshot"]: if kwargs and kwargs.get('name') == vm["name"]: - return { - vm["name"]: _get_snapshots( - vm["snapshot"].rootSnapshotList)} + return {vm["name"]: _get_snapshots(vm["snapshot"].rootSnapshotList)} else: - ret[vm["name"]] = _get_snapshots( - vm["snapshot"].rootSnapshotList) + ret[vm["name"]] = _get_snapshots(vm["snapshot"].rootSnapshotList) return ret @@ -2026,8 +1817,7 @@ def start(name, call=None): "summary.runtime.powerState" ] - vm_list = salt.utils.vmware.get_mors_with_properties( - _get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if vm["name"] == name: @@ -2074,8 +1864,7 @@ def stop(name, call=None): "summary.runtime.powerState" ] - vm_list = salt.utils.vmware.get_mors_with_properties( - _get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if vm["name"] == name: @@ -2122,8 +1911,7 @@ def suspend(name, call=None): "summary.runtime.powerState" ] - vm_list = salt.utils.vmware.get_mors_with_properties( - _get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if vm["name"] == name: @@ -2174,13 +1962,11 @@ def reset(name, call=None): "summary.runtime.powerState" ] - vm_list = salt.utils.vmware.get_mors_with_properties( - _get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if vm["name"] == name: - if vm["summary.runtime.powerState"] == "suspended" or vm[ - "summary.runtime.powerState"] == "poweredOff": + if vm["summary.runtime.powerState"] == "suspended" or vm["summary.runtime.powerState"] == "poweredOff": ret = 'cannot reset in suspended/powered off state' log.info('VM {0} {1}'.format(name, ret)) return ret @@ -2224,8 +2010,7 @@ def terminate(name, call=None): "summary.runtime.powerState" ] - vm_list = salt.utils.vmware.get_mors_with_properties( - _get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if vm["name"] == name: @@ -2281,8 +2066,7 @@ def destroy(name, call=None): "summary.runtime.powerState" ] - vm_list = salt.utils.vmware.get_mors_with_properties( - _get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if vm["name"] == name: @@ -2298,8 +2082,7 @@ def destroy(name, call=None): name, exc ), - # Show the traceback if the debug logging level is - # enabled + # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return 'failed to destroy' @@ -2326,8 +2109,7 @@ def destroy(name, call=None): transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: - salt.utils.cloud.delete_minion_cachedir( - name, __active_provider_name__.split(':')[0], __opts__) + salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__) return True @@ -2433,17 +2215,14 @@ def create(vm_): ) if 'clonefrom' in vm_: - # If datacenter is specified, set the container reference to start - # search from it instead + # If datacenter is specified, set the container reference to start search from it instead container_ref = None if datacenter: - datacenter_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.Datacenter, datacenter) + datacenter_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datacenter, datacenter) container_ref = datacenter_ref if datacenter_ref else None # Clone VM/template from specified VM/template - object_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.VirtualMachine, vm_['clonefrom'], container_ref=container_ref) + object_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, vm_['clonefrom'], container_ref=container_ref) if object_ref: clone_type = "template" if object_ref.config.template else "vm" else: @@ -2454,26 +2233,19 @@ def create(vm_): clone_type = None object_ref = None - # Either a cluster, or a resource pool must be specified when cloning from - # template or creating. + # Either a cluster, or a resource pool must be specified when cloning from template or creating. if resourcepool: - resourcepool_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.ResourcePool, resourcepool, container_ref=container_ref) + resourcepool_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.ResourcePool, resourcepool, container_ref=container_ref) if not resourcepool_ref: - log.error( - "Specified resource pool: '{0}' does not exist".format(resourcepool)) + log.error("Specified resource pool: '{0}' does not exist".format(resourcepool)) if not clone_type or clone_type == "template": - raise SaltCloudSystemExit( - 'You must specify a resource pool that exists.') + raise SaltCloudSystemExit('You must specify a resource pool that exists.') elif cluster: - cluster_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.ClusterComputeResource, cluster, container_ref=container_ref) + cluster_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.ClusterComputeResource, cluster, container_ref=container_ref) if not cluster_ref: - log.error( - "Specified cluster: '{0}' does not exist".format(cluster)) + log.error("Specified cluster: '{0}' does not exist".format(cluster)) if not clone_type or clone_type == "template": - raise SaltCloudSystemExit( - 'You must specify a cluster that exists.') + raise SaltCloudSystemExit('You must specify a cluster that exists.') else: resourcepool_ref = cluster_ref.resourcePool elif clone_type == "template": @@ -2485,29 +2257,20 @@ def create(vm_): 'You must either specify a cluster or a resource pool when creating.' ) else: - log.debug( - "Using resource pool used by the {0} {1}".format( - clone_type, vm_['clonefrom'])) + log.debug("Using resource pool used by the {0} {1}".format(clone_type, vm_['clonefrom'])) # Either a datacenter or a folder can be optionally specified when cloning, required when creating. - # If not specified when cloning, the existing VM/template\'s parent folder - # is used. + # If not specified when cloning, the existing VM/template\'s parent folder is used. if folder: - folder_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.Folder, folder, container_ref=container_ref) + folder_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Folder, folder, container_ref=container_ref) if not folder_ref: log.error("Specified folder: '{0}' does not exist".format(folder)) - log.debug( - "Using folder in which {0} {1} is present".format( - clone_type, vm_['clonefrom'])) + log.debug("Using folder in which {0} {1} is present".format(clone_type, vm_['clonefrom'])) folder_ref = object_ref.parent elif datacenter: if not datacenter_ref: - log.error( - "Specified datacenter: '{0}' does not exist".format(datacenter)) - log.debug( - "Using datacenter folder in which {0} {1} is present".format( - clone_type, vm_['clonefrom'])) + log.error("Specified datacenter: '{0}' does not exist".format(datacenter)) + log.debug("Using datacenter folder in which {0} {1} is present".format(clone_type, vm_['clonefrom'])) folder_ref = object_ref.parent else: folder_ref = datacenter_ref.vmFolder @@ -2516,9 +2279,7 @@ def create(vm_): 'You must either specify a folder or a datacenter when creating not cloning.' ) else: - log.debug( - "Using folder in which {0} {1} is present".format( - clone_type, vm_['clonefrom'])) + log.debug("Using folder in which {0} {1} is present".format(clone_type, vm_['clonefrom'])) folder_ref = object_ref.parent if 'clonefrom' in vm_: @@ -2531,29 +2292,21 @@ def create(vm_): # Either a datastore/datastore cluster can be optionally specified. # If not specified, the current datastore is used. if datastore: - datastore_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.Datastore, datastore, container_ref=container_ref) + datastore_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datastore, datastore, container_ref=container_ref) if datastore_ref: # specific datastore has been specified reloc_spec.datastore = datastore_ref else: - datastore_cluster_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.StoragePod, datastore, container_ref=container_ref) + datastore_cluster_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.StoragePod, datastore, container_ref=container_ref) if not datastore_cluster_ref: - log.error( - "Specified datastore/datastore cluster: '{0}' does not exist".format(datastore)) - log.debug( - "Using datastore used by the {0} {1}".format( - clone_type, vm_['clonefrom'])) + log.error("Specified datastore/datastore cluster: '{0}' does not exist".format(datastore)) + log.debug("Using datastore used by the {0} {1}".format(clone_type, vm_['clonefrom'])) else: log.debug("No datastore/datastore cluster specified") - log.debug( - "Using datastore used by the {0} {1}".format( - clone_type, vm_['clonefrom'])) + log.debug("Using datastore used by the {0} {1}".format(clone_type, vm_['clonefrom'])) if host: - host_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.HostSystem, host, container_ref=container_ref) + host_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.HostSystem, host, container_ref=container_ref) if host_ref: reloc_spec.host = host_ref else: @@ -2564,11 +2317,9 @@ def create(vm_): 'You must specify a datastore when creating not cloning.' ) else: - datastore_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.Datastore, datastore) + datastore_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datastore, datastore) if not datastore_ref: - raise SaltCloudSystemExit( - "Specified datastore: '{0}' does not exist".format(datastore)) + raise SaltCloudSystemExit("Specified datastore: '{0}' does not exist".format(datastore)) # Create the config specs config_spec = vim.vm.ConfigSpec() @@ -2578,17 +2329,13 @@ def create(vm_): if hardware_version: hardware_version = "vmx-{0}".format(str(hardware_version).zfill(2)) if hardware_version != object_ref.config.version: - log.debug( - "Scheduling hardware version upgrade from {0} to {1}".format( - object_ref.config.version, - hardware_version)) + log.debug("Scheduling hardware version upgrade from {0} to {1}".format(object_ref.config.version, hardware_version)) scheduled_hardware_upgrade = vim.vm.ScheduledHardwareUpgradeInfo() scheduled_hardware_upgrade.upgradePolicy = 'always' scheduled_hardware_upgrade.versionKey = hardware_version config_spec.scheduledHardwareUpgradeInfo = scheduled_hardware_upgrade else: - log.debug( - "Virtual hardware version already set to {0}".format(hardware_version)) + log.debug("Virtual hardware version already set to {0}".format(hardware_version)) if num_cpus: log.debug("Setting cpu to: {0}".format(num_cpus)) @@ -2600,10 +2347,9 @@ def create(vm_): if memory_unit.lower() == "mb": memory_mb = int(memory_num) elif memory_unit.lower() == "gb": - memory_mb = int(float(memory_num) * 1024.0) + memory_mb = int(float(memory_num)*1024.0) else: - err_msg = "Invalid memory type specified: '{0}'".format( - memory_unit) + err_msg = "Invalid memory type specified: '{0}'".format(memory_unit) log.error(err_msg) return {'Error': err_msg} except (TypeError, ValueError): @@ -2628,8 +2374,7 @@ def create(vm_): config=config_spec ) - if customization and (devices and 'network' in list( - devices.keys())) and 'Windows' not in object_ref.config.guestFullName: + if customization and (devices and 'network' in list(devices.keys())) and 'Windows' not in object_ref.config.guestFullName: global_ip = vim.vm.customization.GlobalIPSettings() if 'dns_servers' in list(vm_.keys()): @@ -2648,8 +2393,7 @@ def create(vm_): ) clone_spec.customization = custom_spec - if customization and (devices and 'network' in list( - devices.keys())) and 'Windows' in object_ref.config.guestFullName: + if customization and (devices and 'network' in list(devices.keys())) and 'Windows' in object_ref.config.guestFullName: global_ip = vim.vm.customization.GlobalIPSettings() if 'dns_servers' in list(vm_.keys()): @@ -2668,7 +2412,7 @@ def create(vm_): identity.userData.computerName = vim.vm.customization.FixedName() identity.userData.computerName.name = domain identity.identification = vim.vm.customization.Identification() - + custom_spec = vim.vm.customization.Specification( globalIPSettings=global_ip, identity=identity, @@ -2686,8 +2430,7 @@ def create(vm_): else: config_spec.name = vm_name config_spec.files = vim.vm.FileInfo() - config_spec.files.vmPathName = '[{0}] {1}/{1}.vmx'.format( - datastore, vm_name) + config_spec.files.vmPathName = '[{0}] {1}/{1}.vmx'.format(datastore, vm_name) config_spec.guestId = guest_id log.debug('config_spec set to:\n{0}'.format( @@ -2704,17 +2447,11 @@ def create(vm_): ) if 'clonefrom' in vm_: - log.info( - "Creating {0} from {1}({2})".format( - vm_['name'], - clone_type, - vm_['clonefrom'])) + log.info("Creating {0} from {1}({2})".format(vm_['name'], clone_type, vm_['clonefrom'])) if datastore and not datastore_ref and datastore_cluster_ref: - # datastore cluster has been specified so apply Storage DRS - # recomendations - pod_spec = vim.storageDrs.PodSelectionSpec( - storagePod=datastore_cluster_ref) + # datastore cluster has been specified so apply Storage DRS recomendations + pod_spec = vim.storageDrs.PodSelectionSpec(storagePod=datastore_cluster_ref) storage_spec = vim.storageDrs.StoragePlacementSpec( type='clone', @@ -2729,19 +2466,15 @@ def create(vm_): si = _get_si() # get recommended datastores - recommended_datastores = si.content.storageResourceManager.RecommendDatastores( - storageSpec=storage_spec) + recommended_datastores = si.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec) # apply storage DRS recommendations - task = si.content.storageResourceManager.ApplyStorageDrsRecommendation_Task( - recommended_datastores.recommendations[0].key) - salt.utils.vmware.wait_for_task( - task, vm_name, 'apply storage DRS recommendations', 5, 'info') + task = si.content.storageResourceManager.ApplyStorageDrsRecommendation_Task(recommended_datastores.recommendations[0].key) + salt.utils.vmware.wait_for_task(task, vm_name, 'apply storage DRS recommendations', 5, 'info') else: # clone the VM/template task = object_ref.Clone(folder_ref, vm_name, clone_spec) - salt.utils.vmware.wait_for_task( - task, vm_name, 'clone', 5, 'info') + salt.utils.vmware.wait_for_task(task, vm_name, 'clone', 5, 'info') else: log.info('Creating {0}'.format(vm_['name'])) @@ -2756,16 +2489,14 @@ def create(vm_): ) return {'Error': err_msg} - new_vm_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.VirtualMachine, vm_name, container_ref=container_ref) + new_vm_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, vm_name, container_ref=container_ref) # Find how to power on in CreateVM_Task (if possible), for now this will do if not clone_type and power: task = new_vm_ref.PowerOn() salt.utils.vmware.wait_for_task(task, vm_name, 'power', 5, 'info') - # If it a template or if it does not need to be powered on then do not - # wait for the IP + # If it a template or if it does not need to be powered on then do not wait for the IP if not template and power: ip = _wait_for_ip(new_vm_ref, wait_for_ip_timeout) if ip: @@ -2809,8 +2540,7 @@ def create_datacenter(kwargs=None, call=None): '-f or --function.' ) - datacenter_name = kwargs.get( - 'name') if kwargs and 'name' in kwargs else None + datacenter_name = kwargs.get('name') if kwargs and 'name' in kwargs else None if not datacenter_name: raise SaltCloudSystemExit( @@ -2823,8 +2553,7 @@ def create_datacenter(kwargs=None, call=None): ) # Check if datacenter already exists - datacenter_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.Datacenter, datacenter_name) + datacenter_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datacenter, datacenter_name) if datacenter_ref: return {datacenter_name: 'datacenter already exists'} @@ -2871,8 +2600,7 @@ def create_cluster(kwargs=None, call=None): ) cluster_name = kwargs.get('name') if kwargs and 'name' in kwargs else None - datacenter = kwargs.get( - 'datacenter') if kwargs and 'datacenter' in kwargs else None + datacenter = kwargs.get('datacenter') if kwargs and 'datacenter' in kwargs else None if not cluster_name: raise SaltCloudSystemExit( @@ -2885,16 +2613,14 @@ def create_cluster(kwargs=None, call=None): ) if not isinstance(datacenter, vim.Datacenter): - datacenter = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.Datacenter, datacenter) + datacenter = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datacenter, datacenter) if not datacenter: raise SaltCloudSystemExit( 'The specified datacenter does not exist.' ) # Check if cluster already exists - cluster_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.ClusterComputeResource, cluster_name) + cluster_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.ClusterComputeResource, cluster_name) if cluster_ref: return {cluster_name: 'cluster already exists'} @@ -2916,9 +2642,7 @@ def create_cluster(kwargs=None, call=None): ) return False - log.debug( - "Created cluster {0} under datacenter {1}".format( - cluster_name, datacenter.name)) + log.debug("Created cluster {0} under datacenter {1}".format(cluster_name, datacenter.name)) return {cluster_name: 'created'} return False @@ -2949,8 +2673,7 @@ def rescan_hba(kwargs=None, call=None): 'You must specify name of the host system.' ) - host_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.HostSystem, host_name) + host_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.HostSystem, host_name) try: if hba: @@ -3001,8 +2724,7 @@ def upgrade_tools_all(call=None): ret = {} vm_properties = ["name"] - vm_list = salt.utils.vmware.get_mors_with_properties( - _get_si(), vim.VirtualMachine, vm_properties) + vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: ret[vm['name']] = _upg_tools_helper(vm['object']) @@ -3033,8 +2755,7 @@ def upgrade_tools(name, reboot=False, call=None): '-a or --action.' ) - vm_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.VirtualMachine, name) + vm_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, name) return _upg_tools_helper(vm_ref, reboot) @@ -3067,12 +2788,12 @@ def list_hosts_by_cluster(kwargs=None, call=None): ) ret = {} - cluster_name = kwargs.get( - 'cluster') if kwargs and 'cluster' in kwargs else None + cluster_name = kwargs.get('cluster') if kwargs and 'cluster' in kwargs else None cluster_properties = ["name"] - cluster_list = salt.utils.vmware.get_mors_with_properties( - _get_si(), vim.ClusterComputeResource, cluster_properties) + cluster_list = salt.utils.vmware.get_mors_with_properties(_get_si(), + vim.ClusterComputeResource, + cluster_properties) for cluster in cluster_list: ret[cluster['name']] = [] @@ -3113,12 +2834,10 @@ def list_clusters_by_datacenter(kwargs=None, call=None): ) ret = {} - datacenter_name = kwargs.get( - 'datacenter') if kwargs and 'datacenter' in kwargs else None + datacenter_name = kwargs.get('datacenter') if kwargs and 'datacenter' in kwargs else None datacenter_properties = ["name"] - datacenter_list = salt.utils.vmware.get_mors_with_properties( - _get_si(), vim.Datacenter, datacenter_properties) + datacenter_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.Datacenter, datacenter_properties) for datacenter in datacenter_list: ret[datacenter['name']] = [] @@ -3126,9 +2845,7 @@ def list_clusters_by_datacenter(kwargs=None, call=None): if isinstance(cluster, vim.ClusterComputeResource): ret[datacenter['name']].append(cluster.name) if datacenter_name and datacenter_name == datacenter['name']: - return { - 'Clusters by Datacenter': { - datacenter_name: ret[datacenter_name]}} + return {'Clusters by Datacenter': {datacenter_name: ret[datacenter_name]}} return {'Clusters by Datacenter': ret} @@ -3161,12 +2878,10 @@ def list_hosts_by_datacenter(kwargs=None, call=None): ) ret = {} - datacenter_name = kwargs.get( - 'datacenter') if kwargs and 'datacenter' in kwargs else None + datacenter_name = kwargs.get('datacenter') if kwargs and 'datacenter' in kwargs else None datacenter_properties = ["name"] - datacenter_list = salt.utils.vmware.get_mors_with_properties( - _get_si(), vim.Datacenter, datacenter_properties) + datacenter_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.Datacenter, datacenter_properties) for datacenter in datacenter_list: ret[datacenter['name']] = [] @@ -3176,9 +2891,7 @@ def list_hosts_by_datacenter(kwargs=None, call=None): if isinstance(host, vim.HostSystem): ret[datacenter['name']].append(host.name) if datacenter_name and datacenter_name == datacenter['name']: - return { - 'Hosts by Datacenter': { - datacenter_name: ret[datacenter_name]}} + return {'Hosts by Datacenter': {datacenter_name: ret[datacenter_name]}} return {'Hosts by Datacenter': ret} @@ -3233,8 +2946,7 @@ def list_hbas(kwargs=None, call=None): ) ret = {} - hba_type = kwargs.get('type').lower( - ) if kwargs and 'type' in kwargs else None + hba_type = kwargs.get('type').lower() if kwargs and 'type' in kwargs else None host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None host_properties = [ "name", @@ -3246,8 +2958,7 @@ def list_hbas(kwargs=None, call=None): 'Specified hba type {0} currently not supported.'.format(hba_type) ) - host_list = salt.utils.vmware.get_mors_with_properties( - _get_si(), vim.HostSystem, host_properties) + host_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.HostSystem, host_properties) for host in host_list: ret[host['name']] = {} @@ -3290,9 +3001,7 @@ def list_dvs(kwargs=None, call=None): '-f or --function.' ) - return { - 'Distributed Virtual Switches': salt.utils.vmware.list_dvs( - _get_si())} + return {'Distributed Virtual Switches': salt.utils.vmware.list_dvs(_get_si())} def list_vapps(kwargs=None, call=None): @@ -3332,8 +3041,7 @@ def enter_maintenance_mode(kwargs=None, call=None): host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None - host_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.HostSystem, host_name) + host_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.HostSystem, host_name) if not host_name or not host_ref: raise SaltCloudSystemExit( @@ -3344,10 +3052,8 @@ def enter_maintenance_mode(kwargs=None, call=None): return {host_name: 'already in maintenance mode'} try: - task = host_ref.EnterMaintenanceMode( - timeout=0, evacuatePoweredOffVms=True) - salt.utils.vmware.wait_for_task( - task, host_name, 'enter maintenance mode') + task = host_ref.EnterMaintenanceMode(timeout=0, evacuatePoweredOffVms=True) + salt.utils.vmware.wait_for_task(task, host_name, 'enter maintenance mode') except Exception as exc: log.error( 'Error while moving host system {0} in maintenance mode: {1}'.format( @@ -3380,8 +3086,7 @@ def exit_maintenance_mode(kwargs=None, call=None): host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None - host_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.HostSystem, host_name) + host_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.HostSystem, host_name) if not host_name or not host_ref: raise SaltCloudSystemExit( @@ -3393,8 +3098,7 @@ def exit_maintenance_mode(kwargs=None, call=None): try: task = host_ref.ExitMaintenanceMode(timeout=0) - salt.utils.vmware.wait_for_task( - task, host_name, 'exit maintenance mode') + salt.utils.vmware.wait_for_task(task, host_name, 'exit maintenance mode') except Exception as exc: log.error( 'Error while moving host system {0} out of maintenance mode: {1}'.format( @@ -3458,37 +3162,27 @@ def create_folder(kwargs=None, call=None): path_exists = True # Split the path in a list and loop over it to check for its existence - for index, folder_name in enumerate( - os.path.normpath(folder_path.strip('/')).split('/')): + for index, folder_name in enumerate(os.path.normpath(folder_path.strip('/')).split('/')): inventory_path = os.path.join(inventory_path, folder_name) - folder_ref = si.content.searchIndex.FindByInventoryPath( - inventoryPath=inventory_path) + folder_ref = si.content.searchIndex.FindByInventoryPath(inventoryPath=inventory_path) if isinstance(folder_ref, vim.Folder): # This is a folder that exists so just append and skip it - log.debug( - "Path {0}/ exists in the inventory".format(inventory_path)) + log.debug("Path {0}/ exists in the inventory".format(inventory_path)) folder_refs.append(folder_ref) elif isinstance(folder_ref, vim.Datacenter): # This is a datacenter that exists so just append and skip it - log.debug( - "Path {0}/ exists in the inventory".format(inventory_path)) + log.debug("Path {0}/ exists in the inventory".format(inventory_path)) folder_refs.append(folder_ref) else: path_exists = False if not folder_refs: # If this is the first folder, create it under the rootFolder - log.debug( - "Creating folder {0} under rootFolder in the inventory".format(folder_name)) - folder_refs.append( - si.content.rootFolder.CreateFolder(folder_name)) + log.debug("Creating folder {0} under rootFolder in the inventory".format(folder_name)) + folder_refs.append(si.content.rootFolder.CreateFolder(folder_name)) else: # Create the folder under the parent folder - log.debug( - "Creating path {0}/ in the inventory".format(inventory_path)) - folder_refs.append( - folder_refs[ - index - - 1].CreateFolder(folder_name)) + log.debug("Creating path {0}/ in the inventory".format(inventory_path)) + folder_refs.append(folder_refs[index-1].CreateFolder(folder_name)) if path_exists: return {inventory_path: 'specfied path already exists'} @@ -3534,8 +3228,7 @@ def create_snapshot(name, kwargs=None, call=None): if kwargs is None: kwargs = {} - snapshot_name = kwargs.get( - 'snapshot_name') if kwargs and 'snapshot_name' in kwargs else None + snapshot_name = kwargs.get('snapshot_name') if kwargs and 'snapshot_name' in kwargs else None if not snapshot_name: raise SaltCloudSystemExit( @@ -3545,27 +3238,23 @@ def create_snapshot(name, kwargs=None, call=None): memdump = _str_to_bool(kwargs.get('memdump', True)) quiesce = _str_to_bool(kwargs.get('quiesce', False)) - vm_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.VirtualMachine, name) + vm_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, name) if vm_ref.summary.runtime.powerState != "poweredOn": - log.debug( - 'VM {0} is not powered on. Setting both memdump and quiesce to False'.format(name)) + log.debug('VM {0} is not powered on. Setting both memdump and quiesce to False'.format(name)) memdump = False quiesce = False if memdump and quiesce: # Either memdump or quiesce should be set to True - log.warning( - 'You can only set either memdump or quiesce to True. Setting quiesce=False') + log.warning('You can only set either memdump or quiesce to True. Setting quiesce=False') quiesce = False desc = kwargs.get('description') if 'description' in kwargs else '' try: task = vm_ref.CreateSnapshot(snapshot_name, desc, memdump, quiesce) - salt.utils.vmware.wait_for_task( - task, name, 'create snapshot', 5, 'info') + salt.utils.vmware.wait_for_task(task, name, 'create snapshot', 5, 'info') except Exception as exc: log.error( 'Error while creating snapshot of {0}: {1}'.format( @@ -3577,10 +3266,8 @@ def create_snapshot(name, kwargs=None, call=None): ) return 'failed to create snapshot' - return { - 'Snapshot created successfully': _get_snapshots( - vm_ref.snapshot.rootSnapshotList, - vm_ref.snapshot.currentSnapshot)} + return {'Snapshot created successfully': _get_snapshots(vm_ref.snapshot.rootSnapshotList, + vm_ref.snapshot.currentSnapshot)} def revert_to_snapshot(name, kwargs=None, call=None): @@ -3617,18 +3304,15 @@ def revert_to_snapshot(name, kwargs=None, call=None): suppress_power_on = _str_to_bool(kwargs.get('power_off', False)) - vm_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.VirtualMachine, name) + vm_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, name) if not vm_ref.rootSnapshot: log.error('VM {0} does not contain any current snapshots'.format(name)) return 'revert failed' try: - task = vm_ref.RevertToCurrentSnapshot( - suppressPowerOn=suppress_power_on) - salt.utils.vmware.wait_for_task( - task, name, 'revert to snapshot', 5, 'info') + task = vm_ref.RevertToCurrentSnapshot(suppressPowerOn=suppress_power_on) + salt.utils.vmware.wait_for_task(task, name, 'revert to snapshot', 5, 'info') except Exception as exc: log.error( @@ -3666,16 +3350,13 @@ def remove_all_snapshots(name, kwargs=None, call=None): 'The remove_all_snapshots action must be called with ' '-a or --action.' ) - connection = _str_to_bool(kwargs.get( - 'merge_snapshots')) if kwargs and 'merge_snapshots' in kwargs else True + connection = _str_to_bool(kwargs.get('merge_snapshots')) if kwargs and 'merge_snapshots' in kwargs else True - vm_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.VirtualMachine, name) + vm_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.VirtualMachine, name) try: task = vm_ref.RemoveAllSnapshots() - salt.utils.vmware.wait_for_task( - task, name, 'remove snapshots', 5, 'info') + salt.utils.vmware.wait_for_task(task, name, 'remove snapshots', 5, 'info') except Exception as exc: log.error( 'Error while removing snapshots on VM {0}: {1}'.format( @@ -3737,26 +3418,18 @@ def add_host(kwargs=None, call=None): ) host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None - cluster_name = kwargs.get( - 'cluster') if kwargs and 'cluster' in kwargs else None - datacenter_name = kwargs.get( - 'datacenter') if kwargs and 'datacenter' in kwargs else None + cluster_name = kwargs.get('cluster') if kwargs and 'cluster' in kwargs else None + datacenter_name = kwargs.get('datacenter') if kwargs and 'datacenter' in kwargs else None host_user = config.get_cloud_config_value( - 'esxi_host_user', - get_configured_provider(), - __opts__, - search_global=False) + 'esxi_host_user', get_configured_provider(), __opts__, search_global=False + ) host_password = config.get_cloud_config_value( - 'esxi_host_password', - get_configured_provider(), - __opts__, - search_global=False) + 'esxi_host_password', get_configured_provider(), __opts__, search_global=False + ) host_ssl_thumbprint = config.get_cloud_config_value( - 'esxi_host_ssl_thumbprint', - get_configured_provider(), - __opts__, - search_global=False) + 'esxi_host_ssl_thumbprint', get_configured_provider(), __opts__, search_global=False + ) if not host_user: raise SaltCloudSystemExit( @@ -3773,23 +3446,20 @@ def add_host(kwargs=None, call=None): 'You must specify either the IP or DNS name of the host system.' ) - if (cluster_name and datacenter_name) or not( - cluster_name or datacenter_name): + if (cluster_name and datacenter_name) or not(cluster_name or datacenter_name): raise SaltCloudSystemExit( 'You must specify either the cluster name or the datacenter name.' ) if cluster_name: - cluster_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.ClusterComputeResource, cluster_name) + cluster_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.ClusterComputeResource, cluster_name) if not cluster_ref: raise SaltCloudSystemExit( 'Specified cluster does not exist.' ) if datacenter_name: - datacenter_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.Datacenter, datacenter_name) + datacenter_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datacenter, datacenter_name) if not datacenter_ref: raise SaltCloudSystemExit( 'Specified datacenter does not exist.' @@ -3804,34 +3474,15 @@ def add_host(kwargs=None, call=None): if host_ssl_thumbprint: spec.sslThumbprint = host_ssl_thumbprint else: - log.warning( - 'SSL thumbprint has not been specified in provider configuration') + log.warning('SSL thumbprint has not been specified in provider configuration') try: - log.debug( - 'Trying to get the SSL thumbprint directly from the host system') - p1 = subprocess.Popen( - ('echo', '-n'), stdout=subprocess.PIPE, stderr=subprocess.PIPE) - p2 = subprocess.Popen( - ('openssl', - 's_client', - '-connect', - '{0}:443'.format(host_name)), - stdin=p1.stdout, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - p3 = subprocess.Popen( - ('openssl', - 'x509', - '-noout', - '-fingerprint', - '-sha1'), - stdin=p2.stdout, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + log.debug('Trying to get the SSL thumbprint directly from the host system') + p1 = subprocess.Popen(('echo', '-n'), stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p2 = subprocess.Popen(('openssl', 's_client', '-connect', '{0}:443'.format(host_name)), stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p3 = subprocess.Popen(('openssl', 'x509', '-noout', '-fingerprint', '-sha1'), stdin=p2.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out = salt.utils.to_str(p3.stdout.read()) ssl_thumbprint = out.split('=')[-1].strip() - log.debug( - 'SSL thumbprint received from the host system: {0}'.format(ssl_thumbprint)) + log.debug('SSL thumbprint received from the host system: {0}'.format(ssl_thumbprint)) spec.sslThumbprint = ssl_thumbprint except Exception as exc: log.error( @@ -3849,18 +3500,13 @@ def add_host(kwargs=None, call=None): task = cluster_ref.AddHost(spec=spec, asConnected=True) ret = 'added host system to cluster {0}'.format(cluster_name) if datacenter_name: - task = datacenter_ref.hostFolder.AddStandaloneHost( - spec=spec, addConnected=True) + task = datacenter_ref.hostFolder.AddStandaloneHost(spec=spec, addConnected=True) ret = 'added host system to datacenter {0}'.format(datacenter_name) - salt.utils.vmware.wait_for_task( - task, host_name, 'add host system', 5, 'info') + salt.utils.vmware.wait_for_task(task, host_name, 'add host system', 5, 'info') except Exception as exc: if isinstance(exc, vim.fault.SSLVerifyFault): - log.error( - 'Authenticity of the host\'s SSL certificate is not verified') - log.info( - 'Try again after setting the esxi_host_ssl_thumbprint to {0} in provider configuration'.format( - exc.thumbprint)) + log.error('Authenticity of the host\'s SSL certificate is not verified') + log.info('Try again after setting the esxi_host_ssl_thumbprint to {0} in provider configuration'.format(exc.thumbprint)) log.error( 'Error while adding host {0}: {1}'.format( host_name, @@ -3897,8 +3543,7 @@ def remove_host(kwargs=None, call=None): 'You must specify name of the host system.' ) - host_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.HostSystem, host_name) + host_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.HostSystem, host_name) if not host_ref: raise SaltCloudSystemExit( 'Specified host system does not exist.' @@ -3911,8 +3556,7 @@ def remove_host(kwargs=None, call=None): else: # This is a host system that is part of a Cluster task = host_ref.Destroy_Task() - salt.utils.vmware.wait_for_task( - task, host_name, 'remove host', log_level='info') + salt.utils.vmware.wait_for_task(task, host_name, 'remove host', log_level='info') except Exception as exc: log.error( 'Error while removing host {0}: {1}'.format( @@ -3950,8 +3594,7 @@ def connect_host(kwargs=None, call=None): 'You must specify name of the host system.' ) - host_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.HostSystem, host_name) + host_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.HostSystem, host_name) if not host_ref: raise SaltCloudSystemExit( 'Specified host system does not exist.' @@ -3962,8 +3605,7 @@ def connect_host(kwargs=None, call=None): try: task = host_ref.ReconnectHost_Task() - salt.utils.vmware.wait_for_task( - task, host_name, 'connect host', 5, 'info') + salt.utils.vmware.wait_for_task(task, host_name, 'connect host', 5, 'info') except Exception as exc: log.error( 'Error while connecting host {0}: {1}'.format( @@ -4001,8 +3643,7 @@ def disconnect_host(kwargs=None, call=None): 'You must specify name of the host system.' ) - host_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.HostSystem, host_name) + host_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.HostSystem, host_name) if not host_ref: raise SaltCloudSystemExit( 'Specified host system does not exist.' @@ -4013,8 +3654,7 @@ def disconnect_host(kwargs=None, call=None): try: task = host_ref.DisconnectHost_Task() - salt.utils.vmware.wait_for_task( - task, host_name, 'disconnect host', log_level='info') + salt.utils.vmware.wait_for_task(task, host_name, 'disconnect host', log_level='info') except Exception as exc: log.error( 'Error while disconnecting host {0}: {1}'.format( @@ -4052,16 +3692,14 @@ def reboot_host(kwargs=None, call=None): ) host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None - force = _str_to_bool( - kwargs.get('force')) if kwargs and 'force' in kwargs else False + force = _str_to_bool(kwargs.get('force')) if kwargs and 'force' in kwargs else False if not host_name: raise SaltCloudSystemExit( 'You must specify name of the host system.' ) - host_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.HostSystem, host_name) + host_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.HostSystem, host_name) if not host_ref: raise SaltCloudSystemExit( 'Specified host system does not exist.' @@ -4081,7 +3719,8 @@ def reboot_host(kwargs=None, call=None): raise SaltCloudSystemExit( 'Specified host system is not in maintenance mode. Specify force=True to ' 'force reboot even if there are virtual machines running or other operations ' - 'in progress.') + 'in progress.' + ) try: host_ref.RebootHost_Task(force) @@ -4116,10 +3755,8 @@ def create_datastore_cluster(kwargs=None, call=None): '-f or --function.' ) - datastore_cluster_name = kwargs.get( - 'name') if kwargs and 'name' in kwargs else None - datacenter_name = kwargs.get( - 'datacenter') if kwargs and 'datacenter' in kwargs else None + datastore_cluster_name = kwargs.get('name') if kwargs and 'name' in kwargs else None + datacenter_name = kwargs.get('datacenter') if kwargs and 'datacenter' in kwargs else None if not datastore_cluster_name: raise SaltCloudSystemExit( @@ -4137,21 +3774,18 @@ def create_datastore_cluster(kwargs=None, call=None): ) # Check if datastore cluster already exists - datastore_cluster_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.StoragePod, datastore_cluster_name) + datastore_cluster_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.StoragePod, datastore_cluster_name) if datastore_cluster_ref: return {datastore_cluster_name: 'datastore cluster already exists'} - datacenter_ref = salt.utils.vmware.get_mor_by_property( - _get_si(), vim.Datacenter, datacenter_name) + datacenter_ref = salt.utils.vmware.get_mor_by_property(_get_si(), vim.Datacenter, datacenter_name) if not datacenter_ref: raise SaltCloudSystemExit( 'The specified datacenter does not exist.' ) try: - datacenter_ref.datastoreFolder.CreateStoragePod( - name=datastore_cluster_name) + datacenter_ref.datastoreFolder.CreateStoragePod(name=datastore_cluster_name) except Exception as exc: log.error( 'Error creating datastore cluster {0}: {1}'.format( From f56a551a222f2ee4dea45abc5d4f89cc4ed10396 Mon Sep 17 00:00:00 2001 From: kstreee Date: Wed, 17 Feb 2016 22:30:50 +0900 Subject: [PATCH 09/65] [saltstack/salt#31194] Fix issue of running multiple salt-api instances. - Change return value from 'True' to '__virtualname__' in rest_cherrypy/__init__.py The __virtualname__ value must be set as name of salt-api instance, thus the code try to get the instance name from __init__.py's directory name. - Change setting __virtualname__ logic because the rest_tornado has a same issue with the rest_cherrypy. - Typo, 'orig_sesion' to 'orig_session' in rest_cherrypy/app.py --- salt/netapi/rest_cherrypy/__init__.py | 12 ++++++++---- salt/netapi/rest_cherrypy/app.py | 8 ++++---- salt/netapi/rest_tornado/__init__.py | 6 +++++- 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/salt/netapi/rest_cherrypy/__init__.py b/salt/netapi/rest_cherrypy/__init__.py index 79b249bd7b..157546bf0c 100644 --- a/salt/netapi/rest_cherrypy/__init__.py +++ b/salt/netapi/rest_cherrypy/__init__.py @@ -20,10 +20,14 @@ try: except ImportError as exc: cpy_error = exc -logger = logging.getLogger(__name__) -cpy_min = '3.2.2' -__virtualname__ = 'rest' +try: + __virtualname__ = os.path.abspath(__file__).rsplit('/')[-2] +except IndexError: + __virtualname__ = 'rest_cherrypy' + +logger = logging.getLogger(__virtualname__) +cpy_min = '3.2.2' def __virtual__(): @@ -36,7 +40,7 @@ def __virtual__(): # Everything looks good; return the module name if not cpy_error and 'port' in mod_opts: - return True + return __virtualname__ # CherryPy wasn't imported; explain why if cpy_error: diff --git a/salt/netapi/rest_cherrypy/app.py b/salt/netapi/rest_cherrypy/app.py index a276f06657..c5c037c7fd 100644 --- a/salt/netapi/rest_cherrypy/app.py +++ b/salt/netapi/rest_cherrypy/app.py @@ -1724,9 +1724,9 @@ class Events(object): # First check if the given token is in our session table; if so it's a # salt-api token and we need to get the Salt token from there. - orig_sesion, _ = cherrypy.session.cache.get(auth_token, ({}, None)) + orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None)) # If it's not in the session table, assume it's a regular Salt token. - salt_token = orig_sesion.get('token', auth_token) + salt_token = orig_session.get('token', auth_token) # The eauth system does not currently support perms for the event # stream, so we're just checking if the token exists not if the token @@ -2011,8 +2011,8 @@ class WebsocketEndpoint(object): # Pulling the session token from an URL param is a workaround for # browsers not supporting CORS in the EventSource API. if token: - orig_sesion, _ = cherrypy.session.cache.get(token, ({}, None)) - salt_token = orig_sesion.get('token') + orig_session, _ = cherrypy.session.cache.get(token, ({}, None)) + salt_token = orig_session.get('token') else: salt_token = cherrypy.session.get('token') diff --git a/salt/netapi/rest_tornado/__init__.py b/salt/netapi/rest_tornado/__init__.py index b4e7f43cda..85992e87ee 100644 --- a/salt/netapi/rest_tornado/__init__.py +++ b/salt/netapi/rest_tornado/__init__.py @@ -3,9 +3,13 @@ from __future__ import absolute_import, print_function import hashlib import logging +import os import distutils.version # pylint: disable=no-name-in-module -__virtualname__ = 'rest_tornado' +try: + __virtualname__ = os.path.abspath(__file__).rsplit('/')[-2] +except IndexError: + __virtualname__ = 'rest_tornado' logger = logging.getLogger(__virtualname__) From d7c2bdaa33ed1587143a81af8065ff7017f9b8e5 Mon Sep 17 00:00:00 2001 From: Kris Raney Date: Thu, 18 Feb 2016 10:52:38 -0600 Subject: [PATCH 10/65] Don't let marker_flag argument eat positional arguments --- salt/utils/boto3.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/utils/boto3.py b/salt/utils/boto3.py index 7accdd1f91..d1692d9d31 100644 --- a/salt/utils/boto3.py +++ b/salt/utils/boto3.py @@ -287,10 +287,12 @@ def assign_funcs(modname, service, module=None): setattr(mod, '_exactly_one', exactly_one) -def paged_call(function, marker_flag='NextMarker', marker_arg='Marker', *args, **kwargs): +def paged_call(function, *args, **kwargs): """Retrieve full set of values from a boto3 API call that may truncate its results, yielding each page as it is obtained. """ + marker_flag = kwargs.pop('marker_flag','NextMarker') + marker_arg = kwargs.pop('marker_flag','Marker') while True: ret = function(*args, **kwargs) marker = ret.get(marker_flag) From 5df36a33b7cf80834ee03bb015aa83e730f14b1b Mon Sep 17 00:00:00 2001 From: Kris Raney Date: Thu, 18 Feb 2016 10:52:51 -0600 Subject: [PATCH 11/65] paged_call for boto 2 --- salt/utils/boto.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/salt/utils/boto.py b/salt/utils/boto.py index 557c57130e..8cc7285295 100644 --- a/salt/utils/boto.py +++ b/salt/utils/boto.py @@ -265,3 +265,18 @@ def assign_funcs(modname, service, module=None, pack=None): # TODO: Remove this and import salt.utils.exactly_one into boto_* modules instead # Leaving this way for now so boto modules can be back ported setattr(mod, '_exactly_one', exactly_one) + + +def paged_call(function, *args, **kwargs): + """Retrieve full set of values from a boto API call that may truncate + its results, yielding each page as it is obtained. + """ + marker_flag = kwargs.pop('marker_flag','marker') + marker_arg = kwargs.pop('marker_flag','marker') + while True: + ret = function(*args, **kwargs) + marker = ret.get(marker_flag) + yield ret + if not marker: + break + kwargs[marker_arg] = marker From 7c78f661191ff8c53050e430d1006d7f46f26a4a Mon Sep 17 00:00:00 2001 From: Kris Raney Date: Thu, 18 Feb 2016 10:53:47 -0600 Subject: [PATCH 12/65] Module functions for dealing with managed policies --- salt/modules/boto_iam.py | 477 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 477 insertions(+) diff --git a/salt/modules/boto_iam.py b/salt/modules/boto_iam.py index 71a91bc2fb..c0705c4aa8 100644 --- a/salt/modules/boto_iam.py +++ b/salt/modules/boto_iam.py @@ -42,10 +42,12 @@ from __future__ import absolute_import import logging import json import yaml +import urllib # Import salt libs import salt.utils.compat import salt.utils.odict as odict +import salt.utils.boto # Import third party libs # pylint: disable=unused-import @@ -1485,3 +1487,478 @@ def export_users(path_prefix='/', region=None, key=None, keyid=None, user_sls.append({"path": user.path}) results["manage user " + name] = {"boto_iam.user_present": user_sls} return _safe_dump(results) + + +def _get_policy_arn(name, region=None, key=None, keyid=None, profile=None): + if name.startswith('arn:aws:iam:'): + return name + + account_id = get_account_id( + region=region, key=key, keyid=keyid, profile=profile + ) + return 'arn:aws:iam::{0}:policy/{1}'.format(account_id, name) + + +def policy_exists(policy_name, + region=None, key=None, keyid=None, profile=None): + ''' + Check to see if policy exists. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_iam.instance_profile_exists myiprofile + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + try: + conn.get_policy(_get_policy_arn(policy_name)) + return True + except boto.exception.BotoServerError: + return False + + +def get_policy(policy_name, + region=None, key=None, keyid=None, profile=None): + ''' + Check to see if policy exists. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_iam.instance_profile_exists myiprofile + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + try: + ret = conn.get_policy(_get_policy_arn(policy_name)) + return ret.get('get_policy_response',{}).get('get_policy_result',{}) + except boto.exception.BotoServerError: + return None + + +def create_policy(policy_name, policy_document, path=None, description=None, + region=None, key=None, keyid=None, profile=None): + ''' + Create a policy. + + CLI Example: + + .. code-block:: bash + + salt myminios boto_iam.create_policy mypolicy '{"Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["s3:Get*", "s3:List*"], "Resource": ["arn:aws:s3:::my-bucket/shared/*"]},]}' + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + if not isinstance(policy_document, string_types): + policy_document = json.dumps(policy_document) + params = {} + for arg in 'path', 'description': + if locals()[arg] is not None: + params[arg] = locals()[arg] + log.debug(policy_document) + if policy_exists(policy_name, region, key, keyid, profile): + return True + try: + conn.create_policy(policy_name, policy_document, **params) + log.info('Created {0} policy.'.format(policy_name)) + except boto.exception.BotoServerError as e: + log.debug(e) + msg = 'Failed to create {0} policy.' + log.error(msg.format(policy_name)) + return False + return True + + +def delete_policy(policy_name, + region=None, key=None, keyid=None, profile=None): + ''' + Delete a policy. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_iam.delete_policy mypolicy + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile) + if not policy_exists(policy_arn, region, key, keyid, profile): + return True + try: + conn.delete_policy(policy_arn) + log.info('Deleted {0} policy.'.format(policy_name)) + except boto.exception.BotoServerError as e: + aws = salt.utils.boto.get_error(e) + log.debug(aws) + msg = 'Failed to delete {0} policy: {1}.' + log.error(msg.format(policy_name, aws.get('message'))) + return False + return True + + +def list_policies(region=None, key=None, keyid=None, profile=None): + ''' + List policies. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_iam.list_policies + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + try: + policies = [] + for ret in salt.utils.boto.paged_call(conn.list_policies): + policies.append(ret.get('list_policies_response',{}).get('list_policies_result',{}).get('policies')) + return policies + except boto.exception.BotoServerError as e: + log.debug(e) + msg = 'Failed to list policy versions.' + log.error(msg) + return [] + + +def policy_version_exists(policy_name, version_id, + region=None, key=None, keyid=None, profile=None): + ''' + Check to see if policy exists. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_iam.instance_profile_exists myiprofile + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile) + try: + conn.get_policy_version(policy_arn, version_id) + return True + except boto.exception.BotoServerError: + return False + + +def get_policy_version(policy_name, version_id, + region=None, key=None, keyid=None, profile=None): + ''' + Check to see if policy exists. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_iam.instance_profile_exists myiprofile + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + try: + ret = conn.get_policy_version(_get_policy_arn(policy_name), version_id) + retval = ret.get('get_policy_version_response',{}).get('get_policy_version_result',{}).get('policy_version',{}) + retval['document'] = urllib.unquote(retval.get('document')) + return { 'policy_version': retval } + except boto.exception.BotoServerError: + return None + + +def create_policy_version(policy_name, policy_document, set_as_default=None, + region=None, key=None, keyid=None, profile=None): + ''' + Create a policy version. + + CLI Example: + + .. code-block:: bash + + salt myminios boto_iam.create_policy_version mypolicy '{"Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["s3:Get*", "s3:List*"], "Resource": ["arn:aws:s3:::my-bucket/shared/*"]},]}' + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + if not isinstance(policy_document, string_types): + policy_document = json.dumps(policy_document) + params = {} + for arg in ('set_as_default',): + if locals()[arg] is not None: + params[arg] = locals()[arg] + policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile) + try: + ret = conn.create_policy_version(policy_arn, policy_document, **params) + vid = ret.get('create_policy_version_response',{}).get('create_policy_version_result',{}).get('policy_version',{}).get('version_id') + log.debug(ret) + log.info('Created {0} policy version {1}.'.format(policy_name, vid)) + return {'created': True, 'version_id': vid} + except boto.exception.BotoServerError as e: + log.debug(e) + msg = 'Failed to create {0} policy version.' + log.error(msg.format(policy_name)) + return {'created': False, 'error': salt.utils.boto.get_error(e)} + + +def delete_policy_version(policy_name, version_id, + region=None, key=None, keyid=None, profile=None): + ''' + Delete a policy version. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_iam.delete_policy_version mypolicy v1 + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile) + if not policy_version_exists(policy_arn, version_id, region, key, keyid, profile): + return True + try: + conn.delete_policy_version(policy_arn, version_id) + log.info('Deleted {0} policy version {1}.'.format(policy_name, version_id)) + except boto.exception.BotoServerError as e: + aws = salt.utils.boto.get_error(e) + log.debug(aws) + msg = 'Failed to delete {0} policy version {1}: {2}' + log.error(msg.format(policy_name, version_id, aws.get('message'))) + return False + return True + + +def list_policy_versions(policy_name, + region=None, key=None, keyid=None, profile=None): + ''' + List versions of a policy. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_iam.list_policy_versions mypolicy + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile) + try: + ret = conn.list_policy_versions(policy_arn) + return ret.get('list_policy_versions_response',{}).get('list_policy_versions_result',{}).get('versions') + except boto.exception.BotoServerError as e: + log.debug(e) + msg = 'Failed to list {0} policy vesions.' + log.error(msg.format(policy_name)) + return [] + + +def set_default_policy_version(policy_name, version_id, + region=None, key=None, keyid=None, profile=None): + ''' + Set the default version of a policy. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_iam.set_default_policy_version mypolicy v1 + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile) + try: + conn.set_default_policy_version(policy_arn, version_id) + log.info('Set {0} policy to version {1}.'.format(policy_name, version_id)) + except boto.exception.BotoServerError as e: + aws = salt.utils.boto.get_error(e) + log.debug(aws) + msg = 'Failed to set {0} policy to version {1}: {2}' + log.error(msg.format(policy_name, version_id, aws.get('message'))) + return False + return True + + +def attach_user_policy(policy_name, user_name, + region=None, key=None, keyid=None, profile=None): + ''' + Attach a managed policy to a user. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_iam.attach_user_policy mypolicy myuser + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile) + try: + conn.attach_user_policy(policy_arn, user_name) + log.info('Attached {0} policy to user {1}.'.format(policy_name, user_name)) + except boto.exception.BotoServerError as e: + log.debug(e) + msg = 'Failed to attach {0} policy to user {1}.' + log.error(msg.format(policy_name, user_name)) + return False + return True + + +def detach_user_policy(policy_name, user_name, + region=None, key=None, keyid=None, profile=None): + ''' + Detach a managed policy to a user. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_iam.detach_user_policy mypolicy myuser + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile) + try: + conn.detach_user_policy(policy_arn, user_name) + log.info('Detached {0} policy to user {1}.'.format(policy_name, user_name)) + except boto.exception.BotoServerError as e: + log.debug(e) + msg = 'Failed to detach {0} policy to user {1}.' + log.error(msg.format(policy_name, user_name)) + return False + return True + + +def attach_group_policy(policy_name, group_name, + region=None, key=None, keyid=None, profile=None): + ''' + Attach a managed policy to a group. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_iam.attach_group_policy mypolicy mygroup + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile) + try: + conn.attach_group_policy(policy_arn, group_name) + log.info('Attached {0} policy to group {1}.'.format(policy_name, group_name)) + except boto.exception.BotoServerError as e: + log.debug(e) + msg = 'Failed to attach {0} policy to group {1}.' + log.error(msg.format(policy_name, group_name)) + return False + return True + + +def detach_group_policy(policy_name, group_name, + region=None, key=None, keyid=None, profile=None): + ''' + Detach a managed policy to a group. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_iam.detach_group_policy mypolicy mygroup + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile) + try: + conn.detach_group_policy(policy_arn, group_name) + log.info('Detached {0} policy to group {1}.'.format(policy_name, group_name)) + except boto.exception.BotoServerError as e: + log.debug(e) + msg = 'Failed to detach {0} policy to group {1}.' + log.error(msg.format(policy_name, group_name)) + return False + return True + + +def attach_role_policy(policy_name, role_name, + region=None, key=None, keyid=None, profile=None): + ''' + Attach a managed policy to a role. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_iam.attach_role_policy mypolicy myrole + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile) + try: + conn.attach_role_policy(policy_arn, role_name) + log.info('Attached {0} policy to role {1}.'.format(policy_name, role_name)) + except boto.exception.BotoServerError as e: + log.debug(e) + msg = 'Failed to attach {0} policy to role {1}.' + log.error(msg.format(policy_name, role_name)) + return False + return True + + +def detach_role_policy(policy_name, role_name, + region=None, key=None, keyid=None, profile=None): + ''' + Detach a managed policy to a role. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_iam.detach_role_policy mypolicy myrole + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile) + try: + conn.detach_role_policy(policy_arn, role_name) + log.info('Detached {0} policy to role {1}.'.format(policy_name, role_name)) + except boto.exception.BotoServerError as e: + log.debug(e) + msg = 'Failed to detach {0} policy to role {1}.' + log.error(msg.format(policy_name, role_name)) + return False + return True + + +def list_entities_for_policy(policy_name, path_prefix=None, entity_filter=None, + region=None, key=None, keyid=None, profile=None): + ''' + List entities that a policy is attached to. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_iam.list_entities_for_policy mypolicy + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + params = {} + for arg in ('path_prefix', 'entity_filter'): + if locals()[arg] is not None: + params[arg] = locals()[arg] + + policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile) + try: + allret = { + 'policy_groups': [], + 'policy_users': [], + 'policy_roles': [], + } + for ret in salt.utils.boto.paged_call(conn.list_entities_for_policy, policy_arn=policy_arn, **params): + for k, v in allret.iteritems(): + v.extend(ret.get('list_entities_for_policy_response',{}).get('list_entities_for_policy_result',{}).get(k)) + return allret + except boto.exception.BotoServerError as e: + log.debug(e) + msg = 'Failed to list {0} policy entities.' + log.error(msg.format(policy_name)) + return [] From 53ccf50bdcf18dc82d6988105597c59f07b86591 Mon Sep 17 00:00:00 2001 From: Kris Raney Date: Thu, 18 Feb 2016 10:55:21 -0600 Subject: [PATCH 13/65] Add state calls for managing managed policies --- salt/states/boto_iam.py | 167 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 167 insertions(+) diff --git a/salt/states/boto_iam.py b/salt/states/boto_iam.py index 3f4b0495e1..0ea8a7d86e 100644 --- a/salt/states/boto_iam.py +++ b/salt/states/boto_iam.py @@ -110,6 +110,16 @@ passed in as a dict, or as a string to pull from pillars or minion config: - region: eu-west-1 - keyid: 'AKIAJHTMIQ2ASDFLASDF' - key: 'fdkjsafkljsASSADFalkfjasdf' + +.. code-block:: yaml + + create policy: + boto_iam.policy_present: + - name: myname + - policy_document: '{"MyPolicy": "Statement": [{"Action": ["sqs:*"], "Effect": "Allow", "Resource": ["arn:aws:sqs:*:*:*"], "Sid": "MyPolicySqs1"}]}' + - region: eu-west-1 + - keyid: 'AKIAJHTMIQ2ASDFLASDF' + - key: 'fdkjsafkljsASSADFalkfjasdf' ''' # Import Python Libs @@ -963,6 +973,163 @@ def server_cert_present(name, public_key, private_key, cert_chain=None, path=Non return ret +def policy_present(name, policy_document, path=None, description=None, + region=None, key=None, keyid=None, profile=None): + ''' + + .. versionadded:: 2015.8.0 + + Ensure the IAM managed policy is present + + name (string) + The name of the new policy. + + policy_document (dict) + The document of the new policy + + path (string) + The path in which the policy will be created. Default is '/'. + + description (string) + Description + + region (string) + Region to connect to. + + key (string) + Secret key to be used. + + keyid (string) + Access key to be used. + + profile (dict) + A dict with region, key and keyid, or a pillar key (string) + that contains a dict with region, key and keyid. + ''' + ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} + policy = __salt__['boto_iam.get_policy'](name, region, key, keyid, profile) + if not policy: + if __opts__['test']: + ret['comment'] = 'IAM policy {0} is set to be created.'.format(name) + ret['result'] = None + return ret + created = __salt__['boto_iam.create_policy'](name, policy_document, path, description, region, key, keyid, profile) + if created: + ret['changes']['policy'] = created + ret['comment'] = os.linesep.join([ret['comment'], 'Policy {0} has been created.'.format(name)]) + else: + policy = policy.get('policy',{}) + log.debug(policy) + ret['comment'] = os.linesep.join([ret['comment'], 'Policy {0} is present.'.format(name)]) + _describe = __salt__['boto_iam.get_policy_version'](name, policy.get('default_version_id'), + region, key, keyid, profile).get('policy_version',{}) + if isinstance(_describe['document'], string_types): + describeDict = json.loads(_describe['document']) + else: + describeDict = _describe['document'] + + if isinstance(policy_document, string_types): + log.debug(policy_document) + policy_document = json.loads(policy_document) + + r = salt.utils.compare_dicts(describeDict, policy_document) + + if bool(r): + if __opts__['test']: + msg = 'Policy {0} set to be modified.'.format(policyName) + ret['comment'] = msg + ret['result'] = None + return ret + + ret['comment'] = os.linesep.join([ret['comment'], 'Policy to be modified']) + policy_document = json.dumps(policy_document) + + r = __salt__['boto_iam.create_policy_version'](policy_name=name, + policy_document=policy_document, + set_as_default=True, + region=region, key=key, + keyid=keyid, profile=profile) + if not r.get('created'): + ret['result'] = False + ret['comment'] = 'Failed to update policy: {0}.'.format(r['error']['message']) + ret['changes'] = {} + return ret + + __salt__['boto_iam.delete_policy_version'](policy_name=name, + version_id=policy['default_version_id'], + region=region, key=key, + keyid=keyid, profile=profile) + + ret['changes'].setdefault('new', {})['document'] = policy_document + ret['changes'].setdefault('old', {})['document'] = _describe['document'] + return ret + + +def policy_absent(name, + region=None, key=None, keyid=None, profile=None): + ''' + + .. versionadded:: 2015.8.0 + + Ensure the IAM managed policy with the specified name is absent + + name (string) + The name of the new policy. + + region (string) + Region to connect to. + + key (string) + Secret key to be used. + + keyid (string) + Access key to be used. + + profile (dict) + A dict with region, key and keyid, or a pillar key (string) + that contains a dict with region, key and keyid. + ''' + ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} + + r = __salt__['boto_iam.policy_exists'](name, + region=region, key=key, keyid=keyid, profile=profile) + if not r: + ret['comment'] = 'Policy {0} does not exist.'.format(name) + return ret + + if __opts__['test']: + ret['comment'] = 'Policy {0} is set to be removed.'.format(name) + ret['result'] = None + return ret + # delete non-default versions + versions = __salt__['boto_iam.list_policy_versions'](name, + region=region, key=key, + keyid=keyid, profile=profile) + if versions: + for version in versions: + if version.get('is_default_version', False): + continue + r = __salt__['boto_iam.delete_policy_version'](name, + version_id=version.get('version_id'), + region=region, key=key, + keyid=keyid, profile=profile) + if not r: + ret['result'] = False + ret['comment'] = 'Failed to delete policy {0}.'.format(name) + return ret + r = __salt__['boto_iam.delete_policy'](name, + region=region, key=key, + keyid=keyid, profile=profile) + if not r: + ret['result'] = False + ret['comment'] = 'Failed to delete policy {0}.'.format(name) + return ret + ret['changes']['old'] = {'policy': name} + ret['changes']['new'] = {'policy': None} + ret['comment'] = 'Policy {0} deleted.'.format(name) + return ret + + def _get_error(error): # Converts boto exception to string that can be used to output error. error = '\n'.join(error.split('\n')[1:]) From 10ae5922335aaa4cf83c4526c97059a2e6d6353d Mon Sep 17 00:00:00 2001 From: Kris Raney Date: Thu, 18 Feb 2016 14:05:43 -0600 Subject: [PATCH 14/65] Add the ability to delete a group --- salt/modules/boto_iam.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/salt/modules/boto_iam.py b/salt/modules/boto_iam.py index c0705c4aa8..bbb19f464c 100644 --- a/salt/modules/boto_iam.py +++ b/salt/modules/boto_iam.py @@ -660,6 +660,37 @@ def get_all_group_policies(group_name, region=None, key=None, keyid=None, return [] +def delete_group(group_name, region=None, key=None, + keyid=None, profile=None): + ''' + Delete a group policy. + + CLI Example:: + + .. code-block:: bash + + salt myminion boto_iam.delete_group mygroup + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + if not conn: + return False + _group = get_group( + group_name, region, key, keyid, profile + ) + if not _group: + return True + try: + conn.delete_group(group_name) + msg = 'Successfully deleted group {0}.' + log.info(msg.format(group_name)) + return True + except boto.exception.BotoServerError as e: + log.debug(e) + msg = 'Failed to delete group {0}.' + log.error(msg.format(group_name)) + return False + + def create_login_profile(user_name, password, region=None, key=None, keyid=None, profile=None): ''' From ac3db29192904fa3a494b2f2210179284eb1b391 Mon Sep 17 00:00:00 2001 From: Kris Raney Date: Thu, 18 Feb 2016 14:06:11 -0600 Subject: [PATCH 15/65] Fix an unrelated bug in an error path --- salt/modules/boto_iam.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/boto_iam.py b/salt/modules/boto_iam.py index bbb19f464c..eac7a0bd74 100644 --- a/salt/modules/boto_iam.py +++ b/salt/modules/boto_iam.py @@ -787,7 +787,7 @@ def get_all_mfa_devices(user_name, region=None, key=None, keyid=None, log.info('Could not find user {0}.'.format(user_name)) return [] msg = 'Failed to get all MFA devices for user {0}.' - log.error(msg.format(user_name, serial)) + log.error(msg.format(user_name)) return False From 81d0cc88fb5d1eeeb5523db9a03771b17e939912 Mon Sep 17 00:00:00 2001 From: Kris Raney Date: Thu, 18 Feb 2016 14:06:57 -0600 Subject: [PATCH 16/65] Add functions to list attached policies --- salt/modules/boto_iam.py | 96 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) diff --git a/salt/modules/boto_iam.py b/salt/modules/boto_iam.py index eac7a0bd74..1233b6963c 100644 --- a/salt/modules/boto_iam.py +++ b/salt/modules/boto_iam.py @@ -1992,4 +1992,100 @@ def list_entities_for_policy(policy_name, path_prefix=None, entity_filter=None, log.debug(e) msg = 'Failed to list {0} policy entities.' log.error(msg.format(policy_name)) + return {} + + +def list_attached_user_policies(user_name, path_prefix=None, entity_filter=None, + region=None, key=None, keyid=None, profile=None): + ''' + List entities attached to the given user. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_iam.list_entities_for_policy mypolicy + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + params = {'UserName': user_name} + if path_prefix is not None: + params['PathPrefix'] = path_prefix + + policies = [] + try: + # Using conn.get_response is a bit of a hack, but it avoids having to + # rewrite this whole module based on boto3 + for ret in salt.utils.boto.paged_call(conn.get_response, 'ListAttachedUserPolicies', params, list_marker='AttachedPolicies'): + policies.extend(ret.get('list_attached_user_policies_response',{}).get('list_attached_user_policies_result',{} + ).get('attached_policies',[])) + return policies + except boto.exception.BotoServerError as e: + log.debug(e) + msg = 'Failed to list user {0} attached policies.' + log.error(msg.format(user_name)) + return [] + + +def list_attached_group_policies(group_name, path_prefix=None, entity_filter=None, + region=None, key=None, keyid=None, profile=None): + ''' + List entities attached to the given group. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_iam.list_entities_for_policy mypolicy + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + params = {'GroupName': group_name} + if path_prefix is not None: + params['PathPrefix'] = path_prefix + + policies = [] + try: + # Using conn.get_response is a bit of a hack, but it avoids having to + # rewrite this whole module based on boto3 + for ret in salt.utils.boto.paged_call(conn.get_response, 'ListAttachedGroupPolicies', params, list_marker='AttachedPolicies'): + policies.extend(ret.get('list_attached_group_policies_response',{}).get('list_attached_group_policies_result',{} + ).get('attached_policies',[])) + return policies + except boto.exception.BotoServerError as e: + log.debug(e) + msg = 'Failed to list group {0} attached policies.' + log.error(msg.format(group_name)) + return [] + + +def list_attached_role_policies(role_name, path_prefix=None, entity_filter=None, + region=None, key=None, keyid=None, profile=None): + ''' + List entities attached to the given role. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_iam.list_entities_for_policy mypolicy + ''' + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + params = {'RoleName': role_name} + if path_prefix is not None: + params['PathPrefix'] = path_prefix + + policies = [] + try: + # Using conn.get_response is a bit of a hack, but it avoids having to + # rewrite this whole module based on boto3 + for ret in salt.utils.boto.paged_call(conn.get_response, 'ListAttachedRolePolicies', params, list_marker='AttachedPolicies'): + policies.extend(ret.get('list_attached_role_policies_response',{}).get('list_attached_role_policies_result',{} + ).get('attached_policies',[])) + return policies + except boto.exception.BotoServerError as e: + log.debug(e) + msg = 'Failed to list role {0} attached policies.' + log.error(msg.format(role_name)) return [] From b1a746b4163484e5a64ce45b13d06b64c2701892 Mon Sep 17 00:00:00 2001 From: Kris Raney Date: Thu, 18 Feb 2016 14:07:38 -0600 Subject: [PATCH 17/65] Fix an unrelated bug --- salt/states/boto_iam.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/salt/states/boto_iam.py b/salt/states/boto_iam.py index 0ea8a7d86e..cca2822f40 100644 --- a/salt/states/boto_iam.py +++ b/salt/states/boto_iam.py @@ -220,15 +220,16 @@ def user_absent(name, delete_keys=True, delete_mfa_devices=True, delete_profile= # delete the user's MFA tokens if delete_mfa_devices: devices = __salt__['boto_iam.get_all_mfa_devices'](user_name=name, region=region, key=key, keyid=keyid, profile=profile) - for d in devices: - serial = d['serial_number'] - if __opts__['test']: - ret['comment'] = os.linesep.join([ret['comment'], 'IAM user {0} MFA device {1} is set to be deleted.'.format(name, serial)]) - ret['result'] = None - else: - mfa_deleted = __salt__['boto_iam.deactivate_mfa_device'](user_name=name, serial=serial, region=region, key=key, keyid=keyid, profile=profile) - if mfa_deleted: - ret['comment'] = os.linesep.join([ret['comment'], 'IAM user {0} MFA device {1} are deleted.'.format(name, serial)]) + if devices: + for d in devices: + serial = d['serial_number'] + if __opts__['test']: + ret['comment'] = os.linesep.join([ret['comment'], 'IAM user {0} MFA device {1} is set to be deleted.'.format(name, serial)]) + ret['result'] = None + else: + mfa_deleted = __salt__['boto_iam.deactivate_mfa_device'](user_name=name, serial=serial, region=region, key=key, keyid=keyid, profile=profile) + if mfa_deleted: + ret['comment'] = os.linesep.join([ret['comment'], 'IAM user {0} MFA device {1} are deleted.'.format(name, serial)]) # delete the user's login profile if delete_profile: if __opts__['test']: From 53c5828308510214399ed3022fcaf643ad436f06 Mon Sep 17 00:00:00 2001 From: Kris Raney Date: Thu, 18 Feb 2016 14:08:16 -0600 Subject: [PATCH 18/65] Add ability to set managed policies on users, roles, and groups --- salt/states/boto_iam.py | 341 ++++++++++++++++++++++++++++++++++- salt/states/boto_iam_role.py | 140 ++++++++++++++ 2 files changed, 478 insertions(+), 3 deletions(-) diff --git a/salt/states/boto_iam.py b/salt/states/boto_iam.py index cca2822f40..c663a81885 100644 --- a/salt/states/boto_iam.py +++ b/salt/states/boto_iam.py @@ -239,6 +239,16 @@ def user_absent(name, delete_keys=True, delete_mfa_devices=True, delete_profile= profile_deleted = __salt__['boto_iam.delete_login_profile'](name, region, key, keyid, profile) if profile_deleted: ret['comment'] = os.linesep.join([ret['comment'], 'IAM user {0} login profile is deleted.'.format(name)]) + if __opts__['test']: + ret['comment'] = os.linesep.join([ret['comment'], 'IAM user {0} policies are set to be deleted.'.format(name)]) + ret['result'] = None + else: + _ret = _user_policies_detached(name, region, key, keyid, profile) + ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) + if not _ret['result']: + ret['result'] = _ret['result'] + if ret['result'] is False: + return ret # finally, actually delete the user if __opts__['test']: ret['comment'] = os.linesep.join([ret['comment'], 'IAM user {0} is set to be deleted.'.format(name)]) @@ -407,8 +417,8 @@ def _delete_key(ret, access_key_id, user_name, region=None, key=None, keyid=None return ret -def user_present(name, policies=None, policies_from_pillars=None, password=None, path=None, region=None, key=None, - keyid=None, profile=None): +def user_present(name, policies=None, policies_from_pillars=None, managed_policies=None, password=None, path=None, + region=None, key=None, keyid=None, profile=None): ''' .. versionadded:: 2015.8.0 @@ -430,6 +440,10 @@ def user_present(name, policies=None, policies_from_pillars=None, password=None, in the policies argument will override the keys defined in policies_from_pillars. + managed_policies (list) + A list of managed policy names or ARNs that should be attached to this + user. + password (string) The password for the new user. Must comply with account policy. @@ -456,6 +470,8 @@ def user_present(name, policies=None, policies_from_pillars=None, password=None, policies = {} if not policies_from_pillars: policies_from_pillars = [] + if not managed_policies: + managed_policies = [] _policies = {} for policy in policies_from_pillars: _policy = __salt__['pillar.get'](policy) @@ -483,6 +499,12 @@ def user_present(name, policies=None, policies_from_pillars=None, password=None, _ret = _user_policies_present(name, _policies, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) + _ret = _user_policies_attached(name, managed_policies, region, key, keyid, profile) + ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) + ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) + if not _ret['result']: + ret['result'] = _ret['result'] + return ret return ret @@ -552,6 +574,126 @@ def _user_policies_present(name, policies=None, region=None, key=None, keyid=Non return ret +def _user_policies_attached( + name, + managed_policies=None, + region=None, + key=None, + keyid=None, + profile=None): + ret = {'result': True, 'comment': '', 'changes': {}} + policies_to_attach = [] + policies_to_detach = [] + for policy in managed_policies or []: + entities = __salt__['boto_iam.list_entities_for_policy'](policy, + entity_filter='User', + region=region, key=key, keyid=keyid, + profile=profile) + if {'user_name': name} not in entities.get('policy_users'): + policies_to_attach.append(policy) + _list = __salt__['boto_iam.list_attached_user_policies'](name, region, key, keyid, + profile) + oldpolicies = [x.get('policy_arn') for x in _list] + for policy_data in _list: + if policy_data.get('policy_name') not in managed_policies \ + and policy_data.get('policy_arn') not in managed_policies: + policies_to_detach.append(policy_data.get('policy_arn')) + if policies_to_attach or policies_to_detach: + _to_modify = list(policies_to_detach) + _to_modify.extend(policies_to_attach) + if __opts__['test']: + msg = '{0} policies to be modified on user {1}.' + ret['comment'] = msg.format(', '.join(_to_modify), name) + ret['result'] = None + return ret + ret['changes']['old'] = {'managed_policies': oldpolicies} + for policy_name in policies_to_attach: + policy_set = __salt__['boto_iam.attach_user_policy'](policy_name, + name, + region, key, + keyid, + profile) + if not policy_set: + _list = __salt__['boto_iam.list_attached_user_policies'](name, region, + key, keyid, + profile) + newpolicies = [x.get('policy_arn') for x in _list] + ret['changes']['new'] = {'manged_policies': newpolicies} + ret['result'] = False + msg = 'Failed to add policy {0} to user {1}' + ret['comment'] = msg.format(policy_name, name) + return ret + for policy_name in policies_to_detach: + policy_unset = __salt__['boto_iam.detach_user_policy'](policy_name, + name, + region, key, + keyid, + profile) + if not policy_unset: + _list = __salt__['boto_iam.list_attached_user_policies'](name, region, + key, keyid, + profile) + newpolicies = [x.get('policy_arn') for x in _list] + ret['changes']['new'] = {'managed_policies': newpolicies} + ret['result'] = False + msg = 'Failed to remove policy {0} from user {1}' + ret['comment'] = msg.format(policy_name, name) + return ret + _list = __salt__['boto_iam.list_attached_user_policies'](name, region, key, + keyid, profile) + newpolicies = [x.get('policy_arn') for x in _list] + log.debug(newpolicies) + ret['changes']['new'] = {'managed_policies': newpolicies} + msg = '{0} policies modified on user {1}.' + ret['comment'] = msg.format(', '.join(newpolicies), name) + return ret + + +def _user_policies_detached( + name, + region=None, + key=None, + keyid=None, + profile=None): + ret = {'result': True, 'comment': '', 'changes': {}} + _list = __salt__['boto_iam.list_attached_user_policies'](user_name=name, + region=region, key=key, keyid=keyid, profile=profile) + oldpolicies = [x.get('policy_arn') for x in _list] + if not _list: + msg = 'No attached policies in user {0}.'.format(name) + ret['comment'] = msg + return ret + if __opts__['test']: + msg = '{0} policies to be detached from user {1}.' + ret['comment'] = msg.format(', '.join(oldpolicies), name) + ret['result'] = None + return ret + ret['changes']['old'] = {'managed_policies': oldpolicies} + for policy_arn in oldpolicies: + policy_unset = __salt__['boto_iam.detach_user_policy'](policy_arn, + name, + region, key, + keyid, + profile) + if not policy_unset: + _list = __salt__['boto_iam.list_attached_user_policies'](name, region, + key, keyid, + profile) + newpolicies = [x.get('policy_arn') for x in _list] + ret['changes']['new'] = {'managed_policies': newpolicies} + ret['result'] = False + msg = 'Failed to detach {0} from user {1}' + ret['comment'] = msg.format(policy_arn, name) + return ret + _list = __salt__['boto_iam.list_attached_user_policies'](name, region, key, + keyid, profile) + newpolicies = [x.get('policy_arn') for x in _list] + ret['changes']['new'] = {'managed_policies': newpolicies} + msg = '{0} policies detached from user {1}.' + ret['comment'] = msg.format(', '.join(newpolicies), name) + return ret + + def _case_password(ret, name, password, region=None, key=None, keyid=None, profile=None): if __opts__['test']: ret['comment'] = 'Login policy for {0} is set to be changed.'.format(name) @@ -571,7 +713,69 @@ def _case_password(ret, name, password, region=None, key=None, keyid=None, profi return ret -def group_present(name, policies=None, policies_from_pillars=None, users=None, path='/', region=None, key=None, keyid=None, profile=None): +def group_absent(name, region=None, key=None, keyid=None, profile=None): + ''' + + .. versionadded:: 2015.8.0 + + Ensure the IAM group is absent. + + name (string) + The name of the group. + + region (string) + Region to connect to. + + key (string) + Secret key to be used. + + keyid (string) + Access key to be used. + + profile (dict) + A dict with region, key and keyid, or a pillar key (string) + that contains a dict with region, key and keyid. + ''' + ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} + if not __salt__['boto_iam.get_group'](name, region, key, keyid, profile): + ret['result'] = True + ret['comment'] = 'IAM Group {0} does not exist.'.format(name) + return ret + if __opts__['test']: + ret['comment'] = os.linesep.join([ret['comment'], 'IAM group {0} policies are set to be deleted.'.format(name)]) + ret['result'] = None + else: + _ret = _group_policies_detached(name, region, key, keyid, profile) + ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) + if not _ret['result']: + ret['result'] = _ret['result'] + if ret['result'] is False: + return ret + ret['comment'] = os.linesep.join([ret['comment'], 'IAM group {0} users are set to be removed.'.format(name)]) + existing_users = __salt__['boto_iam.get_group_members'](group_name=name, region=region, key=key, keyid=keyid, profile=profile) + ret = _case_group(ret, [], name, existing_users, region, key, keyid, profile) + ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) + ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) + if not _ret['result']: + ret['result'] = _ret['result'] + return ret + # finally, actually delete the group + if __opts__['test']: + ret['comment'] = os.linesep.join([ret['comment'], 'IAM group {0} is set to be deleted.'.format(name)]) + ret['result'] = None + return ret + deleted = __salt__['boto_iam.delete_group'](name, region, key, keyid, profile) + if deleted is True: + ret['comment'] = os.linesep.join([ret['comment'], 'IAM group {0} is deleted.'.format(name)]) + ret['result'] = True + ret['changes']['deleted'] = name + return ret + ret['comment'] = 'IAM group {0} could not be deleted.\n {1}'.format(name, deleted) + ret['result'] = False + return ret + + +def group_present(name, policies=None, policies_from_pillars=None, managed_policies=None, users=None, path='/', region=None, key=None, keyid=None, profile=None): ''' .. versionadded:: 2015.8.0 @@ -596,6 +800,9 @@ def group_present(name, policies=None, policies_from_pillars=None, users=None, p in the policies argument will override the keys defined in policies_from_pillars. + manaaged_policies (list) + A list of policy names or ARNs that should be attached to this group. + users (list) A list of users to be added to the group. @@ -617,6 +824,8 @@ def group_present(name, policies=None, policies_from_pillars=None, users=None, p policies = {} if not policies_from_pillars: policies_from_pillars = [] + if not managed_policies: + managed_policies = [] _policies = {} for policy in policies_from_pillars: _policy = __salt__['pillar.get'](policy) @@ -643,6 +852,12 @@ def group_present(name, policies=None, policies_from_pillars=None, users=None, p ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) + if not _ret['result']: + ret['result'] = _ret['result'] + return ret + _ret = _group_policies_attached(name, managed_policies, region, key, keyid, profile) + ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) + ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret @@ -758,6 +973,126 @@ def _group_policies_present( return ret +def _group_policies_attached( + name, + managed_policies=None, + region=None, + key=None, + keyid=None, + profile=None): + ret = {'result': True, 'comment': '', 'changes': {}} + policies_to_attach = [] + policies_to_detach = [] + for policy in managed_policies or []: + entities = __salt__['boto_iam.list_entities_for_policy'](policy, + entity_filter='Group', + region=region, key=key, keyid=keyid, + profile=profile) + if {'group_name': name} not in entities.get('policy_groups'): + policies_to_attach.append(policy) + _list = __salt__['boto_iam.list_attached_group_policies'](name, region, key, keyid, + profile) + oldpolicies = [x.get('policy_arn') for x in _list] + for policy_data in _list: + if policy_data.get('policy_name') not in managed_policies \ + and policy_data.get('policy_arn') not in managed_policies: + policies_to_detach.append(policy_data.get('policy_arn')) + if policies_to_attach or policies_to_detach: + _to_modify = list(policies_to_detach) + _to_modify.extend(policies_to_attach) + if __opts__['test']: + msg = '{0} policies to be modified on group {1}.' + ret['comment'] = msg.format(', '.join(_to_modify), name) + ret['result'] = None + return ret + ret['changes']['old'] = {'managed_policies': oldpolicies} + for policy_name in policies_to_attach: + policy_set = __salt__['boto_iam.attach_group_policy'](policy_name, + name, + region, key, + keyid, + profile) + if not policy_set: + _list = __salt__['boto_iam.list_attached_group_policies'](name, region, + key, keyid, + profile) + newpolicies = [x.get('policy_arn') for x in _list] + ret['changes']['new'] = {'manged_policies': newpolicies} + ret['result'] = False + msg = 'Failed to add policy {0} to group {1}' + ret['comment'] = msg.format(policy_name, name) + return ret + for policy_name in policies_to_detach: + policy_unset = __salt__['boto_iam.detach_group_policy'](policy_name, + name, + region, key, + keyid, + profile) + if not policy_unset: + _list = __salt__['boto_iam.list_attached_group_policies'](name, region, + key, keyid, + profile) + newpolicies = [x.get('policy_arn') for x in _list] + ret['changes']['new'] = {'managed_policies': newpolicies} + ret['result'] = False + msg = 'Failed to remove policy {0} from group {1}' + ret['comment'] = msg.format(policy_name, name) + return ret + _list = __salt__['boto_iam.list_attached_group_policies'](name, region, key, + keyid, profile) + newpolicies = [x.get('policy_arn') for x in _list] + log.debug(newpolicies) + ret['changes']['new'] = {'managed_policies': newpolicies} + msg = '{0} policies modified on group {1}.' + ret['comment'] = msg.format(', '.join(newpolicies), name) + return ret + + +def _group_policies_detached( + name, + region=None, + key=None, + keyid=None, + profile=None): + ret = {'result': True, 'comment': '', 'changes': {}} + _list = __salt__['boto_iam.list_attached_group_policies'](group_name=name, + region=region, key=key, keyid=keyid, profile=profile) + oldpolicies = [x.get('policy_arn') for x in _list] + if not _list: + msg = 'No attached policies in group {0}.'.format(name) + ret['comment'] = msg + return ret + if __opts__['test']: + msg = '{0} policies to be detached from group {1}.' + ret['comment'] = msg.format(', '.join(oldpolicies), name) + ret['result'] = None + return ret + ret['changes']['old'] = {'managed_policies': oldpolicies} + for policy_arn in oldpolicies: + policy_unset = __salt__['boto_iam.detach_group_policy'](policy_arn, + name, + region, key, + keyid, + profile) + if not policy_unset: + _list = __salt__['boto_iam.list_attached_group_policies'](name, region, + key, keyid, + profile) + newpolicies = [x.get('policy_arn') for x in _list] + ret['changes']['new'] = {'managed_policies': newpolicies} + ret['result'] = False + msg = 'Failed to detach {0} from group {1}' + ret['comment'] = msg.format(policy_arn, name) + return ret + _list = __salt__['boto_iam.list_attached_group_policies'](name, region, key, + keyid, profile) + newpolicies = [x.get('policy_arn') for x in _list] + ret['changes']['new'] = {'managed_policies': newpolicies} + msg = '{0} policies detached from group {1}.' + ret['comment'] = msg.format(', '.join(newpolicies), name) + return ret + + def account_policy(allow_users_to_change_password=None, hard_expiry=None, max_password_age=None, minimum_password_length=None, password_reuse_prevention=None, diff --git a/salt/states/boto_iam_role.py b/salt/states/boto_iam_role.py index 6e39e67278..5bdf659c65 100644 --- a/salt/states/boto_iam_role.py +++ b/salt/states/boto_iam_role.py @@ -88,8 +88,10 @@ on the IAM role to be persistent. This functionality was added in 2015.8.0. ''' from __future__ import absolute_import import salt.utils.dictupdate as dictupdate +import logging import salt.ext.six as six +log = logging.getLogger(__name__) def __virtual__(): ''' @@ -104,6 +106,7 @@ def present( path=None, policies=None, policies_from_pillars=None, + managed_policies=None, create_instance_profile=True, region=None, key=None, @@ -134,6 +137,9 @@ def present( in the policies argument will override the keys defined in policies_from_pillars. + managed_policies + A list of (AWS or Customer) managed policies to be attached to the role. + create_instance_profile A boolean of whether or not to create an instance profile and associate it with this role. @@ -165,6 +171,8 @@ def present( policies = {} if not policies_from_pillars: policies_from_pillars = [] + if not managed_policies: + managed_policies = [] _policies = {} for policy in policies_from_pillars: _policy = __salt__['pillar.get'](policy) @@ -195,6 +203,11 @@ def present( delete_policies) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) + if not _ret['result']: + ret['result'] = _ret['result'] + _ret = _policies_attached(name, managed_policies, region, key, keyid, profile) + ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) + ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret @@ -391,6 +404,81 @@ def _policies_present( return ret +def _policies_attached( + name, + managed_policies=None, + region=None, + key=None, + keyid=None, + profile=None): + ret = {'result': True, 'comment': '', 'changes': {}} + policies_to_attach = [] + policies_to_detach = [] + for policy in managed_policies or []: + entities = __salt__['boto_iam.list_entities_for_policy'](policy, + entity_filter='Role', + region=region, key=key, keyid=keyid, + profile=profile) + if {'role_name': name} not in entities.get('policy_roles'): + policies_to_attach.append(policy) + _list = __salt__['boto_iam.list_attached_role_policies'](name, region, key, keyid, + profile) + oldpolicies = [x.get('policy_arn') for x in _list] + for policy_data in _list: + if policy_data.get('policy_name') not in managed_policies \ + and policy_data.get('policy_arn') not in managed_policies: + policies_to_detach.append(policy_data.get('policy_arn')) + if policies_to_attach or policies_to_detach: + _to_modify = list(policies_to_detach) + _to_modify.extend(policies_to_attach) + if __opts__['test']: + msg = '{0} policies to be modified on role {1}.' + ret['comment'] = msg.format(', '.join(_to_modify), name) + ret['result'] = None + return ret + ret['changes']['old'] = {'managed_policies': oldpolicies} + for policy_name in policies_to_attach: + policy_set = __salt__['boto_iam.attach_role_policy'](policy_name, + name, + region, key, + keyid, + profile) + if not policy_set: + _list = __salt__['boto_iam.list_attached_role_policies'](name, region, + key, keyid, + profile) + newpolicies = [x.get('policy_arn') for x in _list] + ret['changes']['new'] = {'manged_policies': newpolicies} + ret['result'] = False + msg = 'Failed to add policy {0} to role {1}' + ret['comment'] = msg.format(policy_name, name) + return ret + for policy_name in policies_to_detach: + policy_unset = __salt__['boto_iam.detach_role_policy'](policy_name, + name, + region, key, + keyid, + profile) + if not policy_unset: + _list = __salt__['boto_iam.list_attached_role_policies'](name, region, + key, keyid, + profile) + newpolicies = [x.get('policy_arn') for x in _list] + ret['changes']['new'] = {'managed_policies': newpolicies} + ret['result'] = False + msg = 'Failed to remove policy {0} from role {1}' + ret['comment'] = msg.format(policy_name, name) + return ret + _list = __salt__['boto_iam.list_attached_role_policies'](name, region, key, + keyid, profile) + newpolicies = [x.get('policy_arn') for x in _list] + log.debug(newpolicies) + ret['changes']['new'] = {'managed_policies': newpolicies} + msg = '{0} policies modified on role {1}.' + ret['comment'] = msg.format(', '.join(newpolicies), name) + return ret + + def absent( name, region=None, @@ -420,6 +508,13 @@ def absent( _ret = _policies_absent(name, region, key, keyid, profile) ret['changes'] = _ret['changes'] ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) + if not _ret['result']: + ret['result'] = _ret['result'] + if ret['result'] is False: + return ret + _ret = _policies_detached(name, region, key, keyid, profile) + ret['changes'] = _ret['changes'] + ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: @@ -550,6 +645,51 @@ def _policies_absent( return ret +def _policies_detached( + name, + region=None, + key=None, + keyid=None, + profile=None): + ret = {'result': True, 'comment': '', 'changes': {}} + _list = __salt__['boto_iam.list_attached_role_policies'](role_name=name, + region=region, key=key, keyid=keyid, profile=profile) + oldpolicies = [x.get('policy_arn') for x in _list] + if not _list: + msg = 'No attached policies in role {0}.'.format(name) + ret['comment'] = msg + return ret + if __opts__['test']: + msg = '{0} policies to be detached from role {1}.' + ret['comment'] = msg.format(', '.join(oldpolicies), name) + ret['result'] = None + return ret + ret['changes']['old'] = {'managed_policies': oldpolicies} + for policy_arn in oldpolicies: + policy_unset = __salt__['boto_iam.detach_role_policy'](policy_arn, + name, + region, key, + keyid, + profile) + if not policy_unset: + _list = __salt__['boto_iam.list_attached_role_policies'](name, region, + key, keyid, + profile) + newpolicies = [x.get('policy_arn') for x in _list] + ret['changes']['new'] = {'managed_policies': newpolicies} + ret['result'] = False + msg = 'Failed to detach {0} from role {1}' + ret['comment'] = msg.format(policy_arn, name) + return ret + _list = __salt__['boto_iam.list_attached_role_policies'](name, region, key, + keyid, profile) + newpolicies = [x.get('policy_arn') for x in _list] + ret['changes']['new'] = {'managed_policies': newpolicies} + msg = '{0} policies detached from role {1}.' + ret['comment'] = msg.format(', '.join(newpolicies), name) + return ret + + def _instance_profile_disassociated( name, region=None, From fe4477c0b1ff85f5d6aaac72ec51cd454e8146c2 Mon Sep 17 00:00:00 2001 From: Kris Raney Date: Thu, 18 Feb 2016 14:22:49 -0600 Subject: [PATCH 19/65] Fix pylint errors --- salt/modules/boto_iam.py | 35 +++++++++++++++++------------------ salt/states/boto_iam.py | 8 ++++---- salt/states/boto_iam_role.py | 2 +- 3 files changed, 22 insertions(+), 23 deletions(-) diff --git a/salt/modules/boto_iam.py b/salt/modules/boto_iam.py index 1233b6963c..cc6bb09319 100644 --- a/salt/modules/boto_iam.py +++ b/salt/modules/boto_iam.py @@ -41,8 +41,8 @@ Connection module for Amazon IAM from __future__ import absolute_import import logging import json -import yaml import urllib +import yaml # Import salt libs import salt.utils.compat @@ -1565,7 +1565,7 @@ def get_policy(policy_name, try: ret = conn.get_policy(_get_policy_arn(policy_name)) - return ret.get('get_policy_response',{}).get('get_policy_result',{}) + return ret.get('get_policy_response', {}).get('get_policy_result', {}) except boto.exception.BotoServerError: return None @@ -1646,7 +1646,7 @@ def list_policies(region=None, key=None, keyid=None, profile=None): try: policies = [] for ret in salt.utils.boto.paged_call(conn.list_policies): - policies.append(ret.get('list_policies_response',{}).get('list_policies_result',{}).get('policies')) + policies.append(ret.get('list_policies_response', {}).get('list_policies_result', {}).get('policies')) return policies except boto.exception.BotoServerError as e: log.debug(e) @@ -1691,9 +1691,9 @@ def get_policy_version(policy_name, version_id, try: ret = conn.get_policy_version(_get_policy_arn(policy_name), version_id) - retval = ret.get('get_policy_version_response',{}).get('get_policy_version_result',{}).get('policy_version',{}) + retval = ret.get('get_policy_version_response', {}).get('get_policy_version_result', {}).get('policy_version', {}) retval['document'] = urllib.unquote(retval.get('document')) - return { 'policy_version': retval } + return {'policy_version': retval} except boto.exception.BotoServerError: return None @@ -1720,8 +1720,7 @@ def create_policy_version(policy_name, policy_document, set_as_default=None, policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile) try: ret = conn.create_policy_version(policy_arn, policy_document, **params) - vid = ret.get('create_policy_version_response',{}).get('create_policy_version_result',{}).get('policy_version',{}).get('version_id') - log.debug(ret) + vid = ret.get('create_policy_version_response', {}).get('create_policy_version_result', {}).get('policy_version', {}).get('version_id') log.info('Created {0} policy version {1}.'.format(policy_name, vid)) return {'created': True, 'version_id': vid} except boto.exception.BotoServerError as e: @@ -1775,7 +1774,7 @@ def list_policy_versions(policy_name, policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile) try: ret = conn.list_policy_versions(policy_arn) - return ret.get('list_policy_versions_response',{}).get('list_policy_versions_result',{}).get('versions') + return ret.get('list_policy_versions_response', {}).get('list_policy_versions_result', {}).get('versions') except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to list {0} policy vesions.' @@ -1986,7 +1985,7 @@ def list_entities_for_policy(policy_name, path_prefix=None, entity_filter=None, } for ret in salt.utils.boto.paged_call(conn.list_entities_for_policy, policy_arn=policy_arn, **params): for k, v in allret.iteritems(): - v.extend(ret.get('list_entities_for_policy_response',{}).get('list_entities_for_policy_result',{}).get(k)) + v.extend(ret.get('list_entities_for_policy_response', {}).get('list_entities_for_policy_result', {}).get(k)) return allret except boto.exception.BotoServerError as e: log.debug(e) @@ -2010,15 +2009,15 @@ def list_attached_user_policies(user_name, path_prefix=None, entity_filter=None, params = {'UserName': user_name} if path_prefix is not None: - params['PathPrefix'] = path_prefix + params['PathPrefix'] = path_prefix policies = [] try: # Using conn.get_response is a bit of a hack, but it avoids having to # rewrite this whole module based on boto3 for ret in salt.utils.boto.paged_call(conn.get_response, 'ListAttachedUserPolicies', params, list_marker='AttachedPolicies'): - policies.extend(ret.get('list_attached_user_policies_response',{}).get('list_attached_user_policies_result',{} - ).get('attached_policies',[])) + policies.extend(ret.get('list_attached_user_policies_response', {}).get('list_attached_user_policies_result', {} + ).get('attached_policies', [])) return policies except boto.exception.BotoServerError as e: log.debug(e) @@ -2042,15 +2041,15 @@ def list_attached_group_policies(group_name, path_prefix=None, entity_filter=Non params = {'GroupName': group_name} if path_prefix is not None: - params['PathPrefix'] = path_prefix + params['PathPrefix'] = path_prefix policies = [] try: # Using conn.get_response is a bit of a hack, but it avoids having to # rewrite this whole module based on boto3 for ret in salt.utils.boto.paged_call(conn.get_response, 'ListAttachedGroupPolicies', params, list_marker='AttachedPolicies'): - policies.extend(ret.get('list_attached_group_policies_response',{}).get('list_attached_group_policies_result',{} - ).get('attached_policies',[])) + policies.extend(ret.get('list_attached_group_policies_response', {}).get('list_attached_group_policies_result', {} + ).get('attached_policies', [])) return policies except boto.exception.BotoServerError as e: log.debug(e) @@ -2074,15 +2073,15 @@ def list_attached_role_policies(role_name, path_prefix=None, entity_filter=None, params = {'RoleName': role_name} if path_prefix is not None: - params['PathPrefix'] = path_prefix + params['PathPrefix'] = path_prefix policies = [] try: # Using conn.get_response is a bit of a hack, but it avoids having to # rewrite this whole module based on boto3 for ret in salt.utils.boto.paged_call(conn.get_response, 'ListAttachedRolePolicies', params, list_marker='AttachedPolicies'): - policies.extend(ret.get('list_attached_role_policies_response',{}).get('list_attached_role_policies_result',{} - ).get('attached_policies',[])) + policies.extend(ret.get('list_attached_role_policies_response', {}).get('list_attached_role_policies_result', {} + ).get('attached_policies', [])) return policies except boto.exception.BotoServerError as e: log.debug(e) diff --git a/salt/states/boto_iam.py b/salt/states/boto_iam.py index c663a81885..1b1ab9bc30 100644 --- a/salt/states/boto_iam.py +++ b/salt/states/boto_iam.py @@ -1354,11 +1354,11 @@ def policy_present(name, policy_document, path=None, description=None, ret['changes']['policy'] = created ret['comment'] = os.linesep.join([ret['comment'], 'Policy {0} has been created.'.format(name)]) else: - policy = policy.get('policy',{}) + policy = policy.get('policy', {}) log.debug(policy) ret['comment'] = os.linesep.join([ret['comment'], 'Policy {0} is present.'.format(name)]) - _describe = __salt__['boto_iam.get_policy_version'](name, policy.get('default_version_id'), - region, key, keyid, profile).get('policy_version',{}) + _describe = __salt__['boto_iam.get_policy_version'](name, policy.get('default_version_id'), + region, key, keyid, profile).get('policy_version', {}) if isinstance(_describe['document'], string_types): describeDict = json.loads(_describe['document']) else: @@ -1372,7 +1372,7 @@ def policy_present(name, policy_document, path=None, description=None, if bool(r): if __opts__['test']: - msg = 'Policy {0} set to be modified.'.format(policyName) + msg = 'Policy {0} set to be modified.'.format(name) ret['comment'] = msg ret['result'] = None return ret diff --git a/salt/states/boto_iam_role.py b/salt/states/boto_iam_role.py index 5bdf659c65..b3b27751e1 100644 --- a/salt/states/boto_iam_role.py +++ b/salt/states/boto_iam_role.py @@ -87,8 +87,8 @@ on the IAM role to be persistent. This functionality was added in 2015.8.0. ''' from __future__ import absolute_import -import salt.utils.dictupdate as dictupdate import logging +import salt.utils.dictupdate as dictupdate import salt.ext.six as six log = logging.getLogger(__name__) From 4d2050bf62a2065a6ef75edeae16411bb05d415e Mon Sep 17 00:00:00 2001 From: Kris Raney Date: Thu, 18 Feb 2016 14:27:54 -0600 Subject: [PATCH 20/65] Can't forget to pass thru identity info --- salt/modules/boto_iam.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/salt/modules/boto_iam.py b/salt/modules/boto_iam.py index cc6bb09319..a90c8b9b91 100644 --- a/salt/modules/boto_iam.py +++ b/salt/modules/boto_iam.py @@ -1544,7 +1544,8 @@ def policy_exists(policy_name, conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: - conn.get_policy(_get_policy_arn(policy_name)) + conn.get_policy(_get_policy_arn(policy_name, + region=region, key=key, keyid=keyid, profile=profile)) return True except boto.exception.BotoServerError: return False @@ -1564,7 +1565,8 @@ def get_policy(policy_name, conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: - ret = conn.get_policy(_get_policy_arn(policy_name)) + ret = conn.get_policy(_get_policy_arn(policy_name, + region=region, key=key, keyid=keyid, profile=profile)) return ret.get('get_policy_response', {}).get('get_policy_result', {}) except boto.exception.BotoServerError: return None @@ -1690,7 +1692,8 @@ def get_policy_version(policy_name, version_id, conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: - ret = conn.get_policy_version(_get_policy_arn(policy_name), version_id) + ret = conn.get_policy_version(_get_policy_arn(policy_name, + region=region, key=key, keyid=keyid, profile=profile), version_id) retval = ret.get('get_policy_version_response', {}).get('get_policy_version_result', {}).get('policy_version', {}) retval['document'] = urllib.unquote(retval.get('document')) return {'policy_version': retval} From 9dd3fe0689c30e226d7a8c0d16074c3682757cae Mon Sep 17 00:00:00 2001 From: Kris Raney Date: Thu, 18 Feb 2016 15:40:07 -0600 Subject: [PATCH 21/65] Fix lint errors --- salt/modules/boto_iam.py | 4 +--- salt/states/boto_iam_role.py | 1 + salt/utils/boto.py | 4 ++-- salt/utils/boto3.py | 4 ++-- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/salt/modules/boto_iam.py b/salt/modules/boto_iam.py index a90c8b9b91..1ecc03e451 100644 --- a/salt/modules/boto_iam.py +++ b/salt/modules/boto_iam.py @@ -41,7 +41,6 @@ Connection module for Amazon IAM from __future__ import absolute_import import logging import json -import urllib import yaml # Import salt libs @@ -1591,7 +1590,6 @@ def create_policy(policy_name, policy_document, path=None, description=None, for arg in 'path', 'description': if locals()[arg] is not None: params[arg] = locals()[arg] - log.debug(policy_document) if policy_exists(policy_name, region, key, keyid, profile): return True try: @@ -1695,7 +1693,7 @@ def get_policy_version(policy_name, version_id, ret = conn.get_policy_version(_get_policy_arn(policy_name, region=region, key=key, keyid=keyid, profile=profile), version_id) retval = ret.get('get_policy_version_response', {}).get('get_policy_version_result', {}).get('policy_version', {}) - retval['document'] = urllib.unquote(retval.get('document')) + retval['document'] = _unquote(retval.get('document')) return {'policy_version': retval} except boto.exception.BotoServerError: return None diff --git a/salt/states/boto_iam_role.py b/salt/states/boto_iam_role.py index b3b27751e1..fe21758e61 100644 --- a/salt/states/boto_iam_role.py +++ b/salt/states/boto_iam_role.py @@ -93,6 +93,7 @@ import salt.ext.six as six log = logging.getLogger(__name__) + def __virtual__(): ''' Only load if boto is available. diff --git a/salt/utils/boto.py b/salt/utils/boto.py index 8cc7285295..941f419d05 100644 --- a/salt/utils/boto.py +++ b/salt/utils/boto.py @@ -271,8 +271,8 @@ def paged_call(function, *args, **kwargs): """Retrieve full set of values from a boto API call that may truncate its results, yielding each page as it is obtained. """ - marker_flag = kwargs.pop('marker_flag','marker') - marker_arg = kwargs.pop('marker_flag','marker') + marker_flag = kwargs.pop('marker_flag', 'marker') + marker_arg = kwargs.pop('marker_flag', 'marker') while True: ret = function(*args, **kwargs) marker = ret.get(marker_flag) diff --git a/salt/utils/boto3.py b/salt/utils/boto3.py index d1692d9d31..3c20121580 100644 --- a/salt/utils/boto3.py +++ b/salt/utils/boto3.py @@ -291,8 +291,8 @@ def paged_call(function, *args, **kwargs): """Retrieve full set of values from a boto3 API call that may truncate its results, yielding each page as it is obtained. """ - marker_flag = kwargs.pop('marker_flag','NextMarker') - marker_arg = kwargs.pop('marker_flag','Marker') + marker_flag = kwargs.pop('marker_flag', 'NextMarker') + marker_arg = kwargs.pop('marker_flag', 'Marker') while True: ret = function(*args, **kwargs) marker = ret.get(marker_flag) From 239cddeb60bd7257a3c656fea21e58e29fa67b8d Mon Sep 17 00:00:00 2001 From: Kris Raney Date: Thu, 18 Feb 2016 16:05:30 -0600 Subject: [PATCH 22/65] Robustness fixes --- salt/states/boto_iam.py | 11 +++++++---- salt/states/boto_iam_role.py | 2 +- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/salt/states/boto_iam.py b/salt/states/boto_iam.py index 1b1ab9bc30..a4abefdc65 100644 --- a/salt/states/boto_iam.py +++ b/salt/states/boto_iam.py @@ -589,7 +589,7 @@ def _user_policies_attached( entity_filter='User', region=region, key=key, keyid=keyid, profile=profile) - if {'user_name': name} not in entities.get('policy_users'): + if {'user_name': name} not in entities.get('policy_users', []): policies_to_attach.append(policy) _list = __salt__['boto_iam.list_attached_user_policies'](name, region, key, keyid, profile) @@ -988,7 +988,7 @@ def _group_policies_attached( entity_filter='Group', region=region, key=key, keyid=keyid, profile=profile) - if {'group_name': name} not in entities.get('policy_groups'): + if {'group_name': name} not in entities.get('policy_groups', []): policies_to_attach.append(policy) _list = __salt__['boto_iam.list_attached_group_policies'](name, region, key, keyid, profile) @@ -1353,9 +1353,13 @@ def policy_present(name, policy_document, path=None, description=None, if created: ret['changes']['policy'] = created ret['comment'] = os.linesep.join([ret['comment'], 'Policy {0} has been created.'.format(name)]) + else: + ret['result'] = False + ret['comment'] = 'Failed to update policy: {0}.'.format(r['error']['message']) + ret['changes'] = {} + return ret else: policy = policy.get('policy', {}) - log.debug(policy) ret['comment'] = os.linesep.join([ret['comment'], 'Policy {0} is present.'.format(name)]) _describe = __salt__['boto_iam.get_policy_version'](name, policy.get('default_version_id'), region, key, keyid, profile).get('policy_version', {}) @@ -1365,7 +1369,6 @@ def policy_present(name, policy_document, path=None, description=None, describeDict = _describe['document'] if isinstance(policy_document, string_types): - log.debug(policy_document) policy_document = json.loads(policy_document) r = salt.utils.compare_dicts(describeDict, policy_document) diff --git a/salt/states/boto_iam_role.py b/salt/states/boto_iam_role.py index fe21758e61..ad2b4f6c33 100644 --- a/salt/states/boto_iam_role.py +++ b/salt/states/boto_iam_role.py @@ -420,7 +420,7 @@ def _policies_attached( entity_filter='Role', region=region, key=key, keyid=keyid, profile=profile) - if {'role_name': name} not in entities.get('policy_roles'): + if {'role_name': name} not in entities.get('policy_roles', []): policies_to_attach.append(policy) _list = __salt__['boto_iam.list_attached_role_policies'](name, region, key, keyid, profile) From 24271d16f0d353abe1b1f5f27e5a84ae9f6ef3e4 Mon Sep 17 00:00:00 2001 From: Kris Raney Date: Thu, 18 Feb 2016 17:33:13 -0600 Subject: [PATCH 23/65] Another pylint fix --- salt/states/boto_iam.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/boto_iam.py b/salt/states/boto_iam.py index a4abefdc65..db99ec98ad 100644 --- a/salt/states/boto_iam.py +++ b/salt/states/boto_iam.py @@ -1355,7 +1355,7 @@ def policy_present(name, policy_document, path=None, description=None, ret['comment'] = os.linesep.join([ret['comment'], 'Policy {0} has been created.'.format(name)]) else: ret['result'] = False - ret['comment'] = 'Failed to update policy: {0}.'.format(r['error']['message']) + ret['comment'] = 'Failed to update policy.' ret['changes'] = {} return ret else: From a697c1845cac765de4ecd32c18b916d57bcd96c4 Mon Sep 17 00:00:00 2001 From: Anand Nevase Date: Sat, 20 Feb 2016 01:12:47 +0530 Subject: [PATCH 24/65] Added windows vm changes as per review comments. --- doc/topics/cloud/vmware.rst | 35 +++++++++++++++++++ salt/cloud/clouds/vmware.py | 68 ++++++++++++++++--------------------- 2 files changed, 65 insertions(+), 38 deletions(-) diff --git a/doc/topics/cloud/vmware.rst b/doc/topics/cloud/vmware.rst index db57caf9e5..e66aaf3249 100644 --- a/doc/topics/cloud/vmware.rst +++ b/doc/topics/cloud/vmware.rst @@ -186,6 +186,13 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or hardware_version: 10 image: centos64Guest + + #For Windows VM + win_username: Administrator + win_password: administrator + win_organization_name: ABC-Corp + plain_text: True + win_installer: /root/Salt-Minion-2015.8.4-AMD64-Setup.exe ``provider`` Enter the name that was specified when the cloud provider config was created. @@ -447,6 +454,34 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or For a clone operation, this argument is ignored. +``win_username`` + Specify windows vm administrator account. + + .. note:: + + Windows template should have "administrator" account. + +``win_password`` + Specify windows vm administrator account password. + + .. note:: + + During network configuration (if network specified), it is used to specify new administrator password for the machine. + +``win_organization_name`` + Specify windows vm user's organization. + VMware vSphere documentation: + + https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.customization.UserData.html + +``plain_text`` + Flag to specify whether or not the password is in plain text, rather than encrypted. + VMware vSphere documentation: + + https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.customization.Password.html + +``win_installer`` + Specify windows minion client installer path Cloning a VM ============ diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py index c7a2887523..e7d4f9e454 100644 --- a/salt/cloud/clouds/vmware.py +++ b/salt/cloud/clouds/vmware.py @@ -116,7 +116,7 @@ configuration, run :py:func:`test_vcenter_connection` # Import python libs from __future__ import absolute_import from random import randint -from re import match, findall +from re import findall import pprint import logging import time @@ -819,7 +819,6 @@ def _wait_for_ip(vm_ref, max_wait): vmware_tools_status = _wait_for_vmware_tools(vm_ref, max_wait_vmware_tools) if not vmware_tools_status: return False - time_counter = 0 starttime = time.time() while time_counter < max_wait_ip: @@ -829,7 +828,6 @@ def _wait_for_ip(vm_ref, max_wait): if vm_ref.summary.guest.ipAddress and _valid_ip(vm_ref.summary.guest.ipAddress): log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter)) return vm_ref.summary.guest.ipAddress - for net in vm_ref.guest.net: if net.ipConfig.ipAddress: for current_ip in net.ipConfig.ipAddress: @@ -2213,6 +2211,15 @@ def create(vm_): customization = config.get_cloud_config_value( 'customization', vm_, __opts__, search_global=False, default=True ) + win_password = config.get_cloud_config_value( + 'win_password', vm_, __opts__, search_global=False, default=None + ) + win_organization_name = config.get_cloud_config_value( + 'win_organization_name', vm_, __opts__, search_global=False, default='organization' + ) + plain_text = config.get_cloud_config_value( + 'plain_text', vm_, __opts__, search_global=False, default=False + ) if 'clonefrom' in vm_: # If datacenter is specified, set the container reference to start search from it instead @@ -2374,45 +2381,30 @@ def create(vm_): config=config_spec ) - if customization and (devices and 'network' in list(devices.keys())) and 'Windows' not in object_ref.config.guestFullName: + if customization and (devices and 'network' in list(devices.keys())): global_ip = vim.vm.customization.GlobalIPSettings() - if 'dns_servers' in list(vm_.keys()): global_ip.dnsServerList = vm_['dns_servers'] - - identity = vim.vm.customization.LinuxPrep() hostName = vm_name.split('.')[0] domainName = vm_name.split('.', 1)[-1] - identity.hostName = vim.vm.customization.FixedName(name=hostName) - identity.domain = domainName if hostName != domainName else domain - - custom_spec = vim.vm.customization.Specification( - globalIPSettings=global_ip, - identity=identity, - nicSettingMap=specs['nics_map'] - ) - clone_spec.customization = custom_spec - - if customization and (devices and 'network' in list(devices.keys())) and 'Windows' in object_ref.config.guestFullName: - global_ip = vim.vm.customization.GlobalIPSettings() - - if 'dns_servers' in list(vm_.keys()): - global_ip.dnsServerList = vm_['dns_servers'] - - identity = vim.vm.customization.Sysprep() - identity.guiUnattended = vim.vm.customization.GuiUnattended() - identity.guiUnattended.autoLogon = False - identity.guiUnattended.password = vim.vm.customization.Password() - identity.guiUnattended.password.value = vm_['win_password'] - identity.guiUnattended.password.plainText = True - identity.userData = vim.vm.customization.UserData() - hostName = vm_name.split('.')[0] - identity.userData.fullName = hostName - identity.userData.orgName = "Organization-Name" - identity.userData.computerName = vim.vm.customization.FixedName() - identity.userData.computerName.name = domain - identity.identification = vim.vm.customization.Identification() - + if 'Windows' not in object_ref.config.guestFullName: + identity = vim.vm.customization.LinuxPrep() + identity.hostName = vim.vm.customization.FixedName(name=hostName) + identity.domain = domainName if hostName != domainName else domain + else: + identity = vim.vm.customization.Sysprep() + identity.guiUnattended = vim.vm.customization.GuiUnattended() + identity.guiUnattended.autoLogon = True + identity.guiUnattended.autoLogonCount = 1 + identity.guiUnattended.password = vim.vm.customization.Password() + identity.guiUnattended.password.value = win_password + identity.guiUnattended.password.plainText = plain_text + identity.userData = vim.vm.customization.UserData() + identity.userData.fullName = domainName if hostName != domainName else domain + identity.userData.orgName = win_organization_name + identity.userData.computerName = vim.vm.customization.FixedName() + identity.userData.computerName.name = hostName + identity.identification = vim.vm.customization.Identification() custom_spec = vim.vm.customization.Specification( globalIPSettings=global_ip, identity=identity, @@ -3797,4 +3789,4 @@ def create_datastore_cluster(kwargs=None, call=None): ) return False - return {datastore_cluster_name: 'created'} + return {datastore_cluster_name: 'created'} \ No newline at end of file From 34597356845f5d28565439217498462cff97a7a9 Mon Sep 17 00:00:00 2001 From: Borys Pierov Date: Fri, 19 Feb 2016 11:40:51 -0500 Subject: [PATCH 25/65] Make sure state.sls preserves saltenv/pillarenv --- salt/modules/state.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/salt/modules/state.py b/salt/modules/state.py index 7d2650baff..279fcc5f95 100644 --- a/salt/modules/state.py +++ b/salt/modules/state.py @@ -712,15 +712,12 @@ def sls(mods, saltenv = __opts__['environment'] else: saltenv = 'base' - else: - __opts__['environment'] = saltenv if not pillarenv: if __opts__.get('pillarenv', None): pillarenv = __opts__['pillarenv'] - else: - __opts__['pillarenv'] = pillarenv + # Modification to __opts__ lost after this if-else if queue: _wait(kwargs.get('__pub_jid')) else: @@ -729,6 +726,10 @@ def sls(mods, __context__['retcode'] = 1 return conflict + # Ensure desired environment + __opts__['environment'] = saltenv + __opts__['pillarenv'] = pillarenv + if isinstance(mods, list): disabled = _disabled(mods) else: From 41eb3847f205f89a01c2d163e1939e0b319aa76a Mon Sep 17 00:00:00 2001 From: Anand Nevase Date: Sat, 20 Feb 2016 02:00:42 +0530 Subject: [PATCH 26/65] Fixed lint error. --- salt/cloud/clouds/vmware.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py index 0c35cc1a18..7b66d2b950 100644 --- a/salt/cloud/clouds/vmware.py +++ b/salt/cloud/clouds/vmware.py @@ -2414,7 +2414,7 @@ def create(vm_): global_ip.dnsServerList = vm_['dns_servers'] hostName = vm_name.split('.')[0] domainName = vm_name.split('.', 1)[-1] - if 'Windows' not in object_ref.config.guestFullName: + if 'Windows' not in object_ref.config.guestFullName: identity = vim.vm.customization.LinuxPrep() identity.hostName = vim.vm.customization.FixedName(name=hostName) identity.domain = domainName if hostName != domainName else domain From 7afbfe26fc1c70da421190ce778e70f2be78de80 Mon Sep 17 00:00:00 2001 From: Toyam Cox Date: Wed, 17 Feb 2016 00:45:57 -0500 Subject: [PATCH 27/65] Add support for Void Linux --- salt/grains/core.py | 6 +- salt/modules/runit.py | 682 ++++++++++++++++++++++++++++++++------- salt/modules/service.py | 1 + salt/modules/xbps-pkg.py | 635 ++++++++++++++++++++++++++++++++++++ 4 files changed, 1205 insertions(+), 119 deletions(-) create mode 100644 salt/modules/xbps-pkg.py diff --git a/salt/grains/core.py b/salt/grains/core.py index 6a70dbae2f..76e7c0c612 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -30,7 +30,7 @@ __FQDN__ = None # /etc/DISTRO-release checking that is part of platform.linux_distribution() from platform import _supported_dists _supported_dists += ('arch', 'mageia', 'meego', 'vmware', 'bluewhite64', - 'slamd64', 'ovs', 'system', 'mint', 'oracle') + 'slamd64', 'ovs', 'system', 'mint', 'oracle', 'void') # Import salt libs import salt.log @@ -997,6 +997,7 @@ _OS_NAME_MAP = { 'manjaro': 'Manjaro', 'antergos': 'Antergos', 'sles': 'SUSE', + 'void': 'Void', } # Map the 'os' grain to the 'os_family' grain @@ -1048,6 +1049,7 @@ _OS_FAMILY_MAP = { 'Devuan': 'Debian', 'antiX': 'Debian', 'NILinuxRT': 'NILinuxRT', + 'Void': 'Void', } @@ -1179,7 +1181,7 @@ def os_data(): init_cmdline = fhr.read().replace('\x00', ' ').split() init_bin = salt.utils.which(init_cmdline[0]) if init_bin is not None: - supported_inits = (six.b('upstart'), six.b('sysvinit'), six.b('systemd')) + supported_inits = (six.b('upstart'), six.b('sysvinit'), six.b('systemd'), six.b('runit')) edge_len = max(len(x) for x in supported_inits) - 1 try: buf_size = __opts__['file_buffer_size'] diff --git a/salt/modules/runit.py b/salt/modules/runit.py index cfb0052265..34faef91a0 100644 --- a/salt/modules/runit.py +++ b/salt/modules/runit.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- ''' runit service module +(http://smarden.org/runit) This module is compatible with the :mod:`service ` states, so it can be used to maintain services using the ``provider`` argument: @@ -11,22 +12,59 @@ so it can be used to maintain services using the ``provider`` argument: service: - running - provider: runit + +Provides virtual `service` module on systems using runit as init. + + +Service management rules (`sv` command): + + service $n is ENABLED if file SERVICE_DIR/$n/run exists + service $n is AVAILABLE if ENABLED or if file AVAIL_SVR_DIR/$n/run exists + service $n is DISABLED if AVAILABLE but not ENABLED + + SERVICE_DIR/$n is normally a symlink to a AVAIL_SVR_DIR/$n folder + + +Service auto-start/stop mechanism: + + `sv` (auto)starts/stops service as soon as SERVICE_DIR/ is + created/deleted, both on service creation or a boot time. + + autostart feature is disabled if file SERVICE_DIR//down exists. This + does not affect the current's service status (if already running) nor + manual service management. + + +Service's alias: + + Service `sva` is an alias of service `svc` when `AVAIL_SVR_DIR/sva` symlinks + to folder `AVAIL_SVR_DIR/svc`. `svc` can't be enabled if it is already + enabled through an alias already enabled, since `sv` files are stored in + folder `SERVICE_DIR/svc/`. + + XBPS package management uses a service's alias to provides service + alternative(s), such as chrony and openntpd both aliased to ntpd. ''' from __future__ import absolute_import # Import python libs import os -import re -#for octal permission conversion -import string +import glob +import logging +import time + +log = logging.getLogger(__name__) # Import salt libs from salt.exceptions import CommandExecutionError +import salt.utils +# Function alias to not shadow built-ins. __func_alias__ = { 'reload_': 'reload' } +# which dir sv works with VALID_SERVICE_DIRS = [ '/service', '/var/service', @@ -38,19 +76,44 @@ for service_dir in VALID_SERVICE_DIRS: SERVICE_DIR = service_dir break +# available service directory(ies) +AVAIL_SVR_DIRS = [] + +# Define the module's virtual name +__virtualname__ = 'service' + + +def __virtual__(): + ''' + Virtual service only on systems using runit as init process (PID 1). + Otherwise, use this module with the provider mechanism. + ''' + if __grains__['init'] == 'runit': + if __grains__['os'] == 'Void': + add_svc_avail_path('/etc/sv') + return __virtualname__ + return False + def _service_path(name): ''' - build service path + Return SERVICE_DIR+name if possible + + name + the service's name to work on ''' if not SERVICE_DIR: - raise CommandExecutionError("Could not find service directory.") - return '{0}/{1}'.format(SERVICE_DIR, name) + raise CommandExecutionError('Could not find service directory.') + return os.path.join(SERVICE_DIR, name) +#-- states.service compatible args def start(name): ''' - Starts service via runit + Start service + + name + the service's name CLI Example: @@ -62,9 +125,13 @@ def start(name): return not __salt__['cmd.retcode'](cmd) +#-- states.service compatible args def stop(name): ''' - Stops service via runit + Stop service + + name + the service's name CLI Example: @@ -76,23 +143,13 @@ def stop(name): return not __salt__['cmd.retcode'](cmd) -def term(name): - ''' - Send a TERM to service via runit - - CLI Example: - - .. code-block:: bash - - salt '*' runit.term - ''' - cmd = 'sv term {0}'.format(_service_path(name)) - return not __salt__['cmd.retcode'](cmd) - - +#-- states.service compatible def reload_(name): ''' - Send a HUP to service via runit + Reload service + + name + the service's name CLI Example: @@ -104,9 +161,13 @@ def reload_(name): return not __salt__['cmd.retcode'](cmd) +#-- states.service compatible def restart(name): ''' - Restart service via runit. This will stop/start service + Restart service + + name + the service's name CLI Example: @@ -118,9 +179,13 @@ def restart(name): return not __salt__['cmd.retcode'](cmd) +#-- states.service compatible def full_restart(name): ''' - Calls runit.restart() function + Calls runit.restart() + + name + the service's name CLI Example: @@ -131,9 +196,16 @@ def full_restart(name): restart(name) +#-- states.service compatible def status(name, sig=None): ''' - Return the status for a service via runit, return pid if running + Return ``True`` if service is running + + name + the service's name + + sig + signature to identify with ps CLI Example: @@ -141,13 +213,194 @@ def status(name, sig=None): salt '*' runit.status ''' - cmd = 'sv status {0}'.format(_service_path(name)) - out = __salt__['cmd.run_stdout'](cmd) + if sig: + # usual way to do by others (debian_service, netbsdservice). + # XXX probably does not work here (check 'runsv sshd' instead of 'sshd' ?) + return bool(__salt__['status.pid'](sig)) + + svc_path = _service_path(name) + if not os.path.exists(svc_path): + # service does not exist + return False + + # sv return code is not relevant to get a service status. + # Check its output instead. + cmd = 'sv status {0}'.format(svc_path) try: - pid = re.search(r'{0}: \(pid (\d+)\)'.format(name), out).group(1) - except AttributeError: - pid = '' - return pid + out = __salt__['cmd.run_stdout'](cmd) + return out.startswith('run: ') + except Exception: + # sv (as a command) returned an error + return False + + +def _is_svc(svc_path): + ''' + Return ``True`` if directory is really a service: + file /run exists and is executable + + svc_path + the (absolute) directory to check for compatibility + ''' + run_file = os.path.join(svc_path, 'run') + if (os.path.exists(svc_path) + and os.path.exists(run_file) + and os.access(run_file, os.X_OK)): + return True + return False + + +def status_autostart(name): + ''' + Return ``True`` if service is autostarted by sv + (file $service_folder/down does not exist) + NB: return ``False`` if the service is not enabled. + + name + the service's name + + CLI Example: + + .. code-block:: bash + + salt '*' runit.status_autostart + ''' + return not os.path.exists(os.path.join(_service_path(name), 'down')) + + +def get_svc_broken_path(name='*'): + ''' + Return list of broken path(s) in SERVICE_DIR that match ``name`` + + A path is broken if it is a broken symlink or can not be a runit service + + name + a glob for service name. default is '*' + + CLI Example: + + .. code-block:: bash + + salt '*' runit.get_svc_broken_path + ''' + if not SERVICE_DIR: + raise CommandExecutionError('Could not find service directory.') + + ret = set() + + for el in glob.glob(os.path.join(SERVICE_DIR, name)): + if not _is_svc(el): + ret.add(el) + return sorted(ret) + + +def get_svc_avail_path(): + ''' + Return list of paths that may contain available services + ''' + return AVAIL_SVR_DIRS + + +def add_svc_avail_path(path): + ''' + Add a path that may contain available services. + Return ``True`` if added (or already present), ``False`` on error. + + path + directory to add to AVAIL_SVR_DIRS + ''' + if os.path.exists(path): + if path not in AVAIL_SVR_DIRS: + AVAIL_SVR_DIRS.append(path) + return True + return False + + +def _get_svc_path(name='*', status=None): + ''' + Return a list of paths to services with ``name`` that have the specified ``status`` + + name + a glob for service name. default is '*' + + status + None : all services (no filter, default choice) + 'DISABLED' : available service(s) that is not enabled + 'ENABLED' : enabled service (whether started on boot or not) + ''' + + # This is the core routine to work with services, called by many + # other functions of this module. + # + # The name of a service is the "apparent" folder's name that contains its + # "run" script. If its "folder" is a symlink, the service is an "alias" of + # the targeted service. + + if not SERVICE_DIR: + raise CommandExecutionError('Could not find service directory.') + + # path list of enabled services as /AVAIL_SVR_DIRS/$service, + # taking care of any service aliases (do not use os.path.realpath()). + ena = set() + for el in glob.glob(os.path.join(SERVICE_DIR, name)): + if _is_svc(el): + ena.add(os.readlink(el)) + log.trace('found enabled service path: {0}'.format(el)) + + if status == 'ENABLED': + return sorted(ena) + + # path list of available services as /AVAIL_SVR_DIRS/$service + ava = set() + for d in AVAIL_SVR_DIRS: + for el in glob.glob(os.path.join(d, name)): + if _is_svc(el): + ava.add(el) + log.trace('found available service path: {0}'.format(el)) + + if status == 'DISABLED': + # service available but not enabled + ret = ava.difference(ena) + else: + # default: return available services + ret = ava.union(ena) + + return sorted(ret) + + +def _get_svc_list(name='*', status=None): + ''' + Return list of services that have the specified service ``status`` + + name + a glob for service name. default is '*' + + status + None : all services (no filter, default choice) + 'DISABLED' : available service that is not enabled + 'ENABLED' : enabled service (whether started on boot or not) + ''' + return sorted([os.path.basename(el) for el in _get_svc_path(name, status)]) + + +def get_svc_alias(): + ''' + Returns the list of service's name that are aliased and their alias path(s) + ''' + + ret = {} + for d in AVAIL_SVR_DIRS: + for el in glob.glob(os.path.join(d, '*')): + if not os.path.islink(el): + continue + psvc = os.readlink(el) + if not os.path.isabs(psvc): + psvc = os.path.join(d, psvc) + nsvc = os.path.basename(psvc) + if nsvc not in ret: + ret[nsvc] = [] + ret[nsvc].append(el) + return ret def available(name): @@ -155,92 +408,16 @@ def available(name): Returns ``True`` if the specified service is available, otherwise returns ``False``. - CLI Example: - - .. code-block:: bash - - salt '*' runit.available foo - ''' - return name in get_all() - - -def enabled(name, **kwargs): - ''' - Returns ``True`` if the specified service has a 'run' file and that - file is executable, otherwhise returns - ``False``. + name + the service's name CLI Example: .. code-block:: bash - salt '*' runit.enabled foo + salt '*' runit.available ''' - if not available(name): - return False - - files = os.listdir(SERVICE_DIR + '/'+name) - if 'run' not in files: - return False - mode = __salt__['file.get_mode'](SERVICE_DIR + '/'+name+'/run') - return (string.atoi(mode, base=8) & 0b0000000001000000) > 0 - - -def enable(name, **kwargs): - ''' - Returns ``True`` if the specified service is enabled - or becomes - enabled - as defined by its run file being executable, otherise - ``False``. - - CLI Example: - - .. code-block:: bash - - salt '*' runit.enable foo - ''' - if not available(name): - return False - - files = os.listdir(SERVICE_DIR + '/'+name) - if 'run' not in files: - return False - - return '0700' == __salt__['file.set_mode'](SERVICE_DIR +'/' +name+'/run', '0700') - - -def disabled(name, **kwargs): - ''' - Returns the opposite of runit.enabled - - CLI Example: - - .. code-block:: bash - - salt '*' runit.disabled foo - ''' - return not enabled(name) - - -def disable(name, **kwargs): - ''' - Returns ``True`` if the specified service is disabled - or becomes - disabled - as defined by its run file being not-executable, otherise - ``False``. - - CLI Example: - - .. code-block:: bash - - salt '*' runit.disable foo - ''' - if not available(name): - return False - - files = os.listdir(SERVICE_DIR + '/'+name) - if 'run' not in files: - return False - - return '0600' == __salt__['file.set_mode'](SERVICE_DIR +'/' +name+'/run', '0600') + return name in _get_svc_list(name) def missing(name): @@ -249,13 +426,16 @@ def missing(name): Returns ``True`` if the specified service is not available, otherwise returns ``False``. + name + the service's name + CLI Example: .. code-block:: bash - salt '*' runit.missing foo + salt '*' runit.missing ''' - return name not in get_all() + return name not in _get_svc_list(name) def get_all(): @@ -268,6 +448,274 @@ def get_all(): salt '*' runit.get_all ''' - if not SERVICE_DIR: - raise CommandExecutionError("Could not find service directory.") - return sorted(os.listdir(SERVICE_DIR)) + return _get_svc_list() + + +def get_enabled(): + ''' + Return a list of all enabled services + + CLI Example: + + .. code-block:: bash + + salt '*' service.get_enabled + ''' + return _get_svc_list(status='ENABLED') + + +def get_disabled(): + ''' + Return a list of all disabled services + + CLI Example: + + .. code-block:: bash + + salt '*' service.get_disabled + ''' + return _get_svc_list(status='DISABLED') + + +def enabled(name): + ''' + Return ``True`` if the named service is enabled, ``False`` otherwise + + name + the service's name + + CLI Example: + + .. code-block:: bash + + salt '*' service.enabled + ''' + # exhaustive check instead of (only) os.path.exists(_service_path(name)) + return name in _get_svc_list(name, 'ENABLED') + + +def disabled(name): + ''' + Return ``True`` if the named service is disabled, ``False`` otherwise + + name + the service's name + + CLI Example: + + .. code-block:: bash + + salt '*' service.disabled + ''' + # return True for a non-existent service + return name not in _get_svc_list(name, 'ENABLED') + + +def show(name): + ''' + Show properties of one or more units/jobs or the manager + + name + the service's name + + CLI Example: + + salt '*' service.show + ''' + ret = {} + ret['enabled'] = False + ret['disabled'] = True + ret['running'] = False + ret['service_path'] = None + ret['autostart'] = False + ret['command_path'] = None + + ret['available'] = available(name) + if not ret['available']: + return ret + + ret['enabled'] = enabled(name) + ret['disabled'] = not ret['enabled'] + ret['running'] = status(name) + ret['autostart'] = status_autostart(name) + ret['service_path'] = _get_svc_path(name)[0] + if ret['service_path']: + ret['command_path'] = os.path.join(ret['service_path'], 'run') + + # XXX provide info about alias ? + + return ret + + +def enable(name, start=False, **kwargs): + ''' + Start service ``name`` at boot. + Returns ``True`` if operation is successful + + name + the service's name + + start + ``False`` : Do not start the service once enabled. Default mode. + (consistent with other service management) + ``True`` : also start the service at the same time (default sv mode) + + CLI Example: + + .. code-block:: bash + + salt '*' service.enable [start=True] + ''' + + # non-existent service + if not available(name): + return False + + # if service is aliased, refuse to enable it + alias = get_svc_alias() + if name in alias: + log.error('This service is aliased, enable its alias instead') + return False + + # down_file: file that disables sv autostart + svc_realpath = _get_svc_path(name)[0] + down_file = os.path.join(svc_realpath, 'down') + + # if service already enabled, remove down_file to + # let service starts on boot (as requested) + if enabled(name): + if os.path.exists(down_file): + try: + os.unlink(down_file) + except OSError: + log.error('Unable to remove file {0}'.format(down_file)) + return False + return True + + # let's enable the service + + if not start: + # create a temp 'down' file BEFORE enabling service. + # will prevent sv from starting this service automatically. + log.trace('need a temporary file {0}'.format(down_file)) + if not os.path.exists(down_file): + try: + salt.utils.fopen(down_file, "w").close() + except IOError: + log.error('Unable to create file {0}'.format(down_file)) + return False + + # enable the service + try: + os.symlink(svc_realpath, _service_path(name)) + + except IOError: + # (attempt to) remove temp down_file anyway + log.error('Unable to create symlink {0}'.format(down_file)) + if not start: + os.unlink(down_file) + return False + + # ensure sv is aware of this new service before continuing. + # if not, down_file might be removed too quickly, + # before 'sv' have time to take care about it. + # Documentation indicates that a change is handled within 5 seconds. + cmd = 'sv status {0}'.format(_service_path(name)) + retcode_sv = 1 + count_sv = 0 + while retcode_sv != 0 and count_sv < 10: + time.sleep(0.5) + count_sv += 1 + call = __salt__['cmd.run_all'](cmd) + retcode_sv = call['retcode'] + + # remove the temp down_file in any case. + if (not start) and os.path.exists(down_file): + try: + os.unlink(down_file) + except OSError: + log.error('Unable to remove temp file {0}'.format(down_file)) + retcode_sv = 1 + + # if an error happened, revert our changes + if retcode_sv != 0: + os.unlink(os.path.join([_service_path(name), name])) + return False + return True + + +def disable(name, stop=False, **kwargs): + ''' + Don't start service ``name`` at boot + Returns ``True`` if operation is successfull + + name + the service's name + + stop + if True, also stops the service + + CLI Example: + + .. code-block:: bash + + salt '*' service.disable [stop=True] + ''' + + # non-existent as registrered service + if not enabled(name): + return False + + # down_file: file that prevent sv autostart + svc_realpath = _get_svc_path(name)[0] + down_file = os.path.join(svc_realpath, 'down') + + if stop: + stop(name) + + if not os.path.exists(down_file): + try: + salt.utils.fopen(down_file, "w").close() + except IOError: + log.error('Unable to create file {0}'.format(down_file)) + return False + + return True + + +def remove(name): + ''' + Remove the service from system. + Returns ``True`` if operation is successfull. + The service will be also stopped. + + name + the service's name + + CLI Example: + + .. code-block:: bash + + salt '*' service.remove + ''' + + if not enabled(name): + return False + + svc_path = _service_path(name) + if not os.path.islink(svc_path): + log.error('{0} is not a symlink: not removed'.format(svc_path)) + return False + + if not stop(name): + log.error('Failed to stop service {0}'.format(name)) + return False + try: + os.remove(svc_path) + except IOError: + log.error('Unable to remove symlink {0}'.format(svc_path)) + return False + return True + + +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 diff --git a/salt/modules/service.py b/salt/modules/service.py index a3637559fb..3745adb2af 100644 --- a/salt/modules/service.py +++ b/salt/modules/service.py @@ -42,6 +42,7 @@ def __virtual__(): 'Linaro', 'elementary OS', 'McAfee OS Server', + 'Void', 'Mint' )) if __grains__.get('os', '') in disable: diff --git a/salt/modules/xbps-pkg.py b/salt/modules/xbps-pkg.py new file mode 100644 index 0000000000..6bbf21b2a7 --- /dev/null +++ b/salt/modules/xbps-pkg.py @@ -0,0 +1,635 @@ +# -*- coding: utf-8 -*- +''' +Package support for XBPS packaging system (VoidLinux distribution) + +XXX what about the initial acceptance of repo's fingerprint when adding a new repo ? + +XXX can be used as a provider if module virtual's name not defined to 'pkg' ? +XXX please fix "versionadded" in this file on once merged into SaltStack. +''' + + +# Import python libs +from __future__ import absolute_import +import os +import re +import logging +import glob + +# Import salt libs +import salt.utils +import salt.utils.decorators as decorators +from salt.exceptions import CommandExecutionError, MinionError + +log = logging.getLogger(__name__) + +# Define the module's virtual name +__virtualname__ = 'pkg' + + +@decorators.memoize +def _check_xbps(): + ''' + Looks to see if xbps-install is present on the system, return full path + ''' + return salt.utils.which('xbps-install') + + +@decorators.memoize +def _get_version(): + ''' + Get the xbps version + ''' + xpath = _check_xbps() + version_string = __salt__['cmd.run']( + '{0} --version'.format(xpath), output_loglevel='trace' + ) + if version_string is None: + # Dunno why it would, but... + return False + + VERSION_MATCH = re.compile(r'(?:XBPS:[\s]+)([\d.]+)(?:[\s]+.*)') + version_match = VERSION_MATCH.search(version_string) + if not version_match: + return False + + return version_match.group(1).split('.') + + +def _rehash(): + ''' + Recomputes internal hash table for the PATH variable. + Used whenever a new command is created during the current + session. + ''' + shell = __salt__['environ.get']('SHELL') + if shell.split('/')[-1] in ('csh', 'tcsh'): + __salt__['cmd.run']('rehash', output_loglevel='trace') + + +def __virtual__(): + ''' + Set the virtual pkg module if the os is Void and xbps-install found + ''' + if __grains__['os'] in ('Void') and _check_xbps(): + return __virtualname__ + return False + + +def list_pkgs(versions_as_list=False, **kwargs): + ''' + List the packages currently installed as a dict:: + + {'': ''} + + .. versionadded:: XXX 201X.XX + + CLI Example: + + .. code-block:: bash + + salt '*' pkg.list_pkgs + ''' + versions_as_list = salt.utils.is_true(versions_as_list) + # not yet implemented or not applicable + if any([salt.utils.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): + return {} + + cmd = 'xbps-query -l' + ret = {} + out = __salt__['cmd.run'](cmd, output_loglevel='trace') + for line in out.splitlines(): + if not line: + continue + try: + # xbps-query -l output sample: + # ii desktop-file-utils-0.22_4 Utilities to ... + # + # XXX handle package status (like 'ii') ? + pkg, ver = line.split(None)[1].rsplit('-', 1) + except ValueError: + log.error('xbps-query: Unexpected formatting in ' + 'line: "{0}"'.format(line)) + + __salt__['pkg_resource.add_pkg'](ret, pkg, ver) + + __salt__['pkg_resource.sort_pkglist'](ret) + if not versions_as_list: + __salt__['pkg_resource.stringify'](ret) + return ret + + +def list_upgrades(refresh=True): + ''' + Check whether or not an upgrade is available for all packages + + CLI Example: + + .. code-block:: bash + + salt '*' pkg.list_upgrades + ''' + + # sample output of 'xbps-install -un': + # fuse-2.9.4_4 update i686 http://repo.voidlinux.eu/current 298133 91688 + # xtools-0.34_1 update noarch http://repo.voidlinux.eu/current 21424 10752 + + refresh = salt.utils.is_true(refresh) + + # Refresh repo index before checking for latest version available + if refresh: + refresh_db() + + ret = {} + + # retrieve list of updatable packages + cmd = 'xbps-install -un' + out = __salt__['cmd.run'](cmd, output_loglevel='trace') + for line in out.splitlines(): + if not line: + continue + pkg = "base-system" + ver = "NonNumericValueIsError" + try: + pkg, ver = line.split()[0].rsplit('-', 1) + except (ValueError, IndexError): + log.error('xbps-query: Unexpected formatting in ' + 'line: "{0}"'.format(line)) + continue + + log.trace('pkg={0} version={1}'.format(pkg, ver)) + ret[pkg] = ver + + return ret + + +def latest_version(*names, **kwargs): + ''' + Return the latest version of the named package available for upgrade or + installation. If more than one package name is specified, a dict of + name/version pairs is returned. + + If the latest version of a given package is already installed, an empty + string will be returned for that package. + + .. versionadded:: XXX 201X.XX + + CLI Example: + + .. code-block:: bash + + salt '*' pkg.latest_version + salt '*' pkg.latest_version ... + ''' + + # Why using 'xbps-install -un' and not 'xbps-query -R': + # if several repos, xbps-query will produces this kind of output, + # that is difficult to handle correctly: + # [*] salt-2015.8.3_2 Remote execution system ... + # [-] salt-2015.8.3_1 Remote execution system ... + # + # XXX 'xbps-install -un pkg1 pkg2' won't produce any info on updatable pkg1 + # if pkg2 is up-to-date. Bug of xbps 0.51, probably get fixed in 0.52. + # See related issue https://github.com/voidlinux/xbps/issues/145 + # + # sample outputs of 'xbps-install -un': + # fuse-2.9.4_4 update i686 http://repo.voidlinux.eu/current 298133 91688 + # xtools-0.34_1 update noarch http://repo.voidlinux.eu/current 21424 10752 + # Package 'vim' is up to date. + + refresh = salt.utils.is_true(kwargs.pop('refresh', True)) + + if len(names) == 0: + return '' + + # Refresh repo index before checking for latest version available + if refresh: + refresh_db() + + # Initialize the dict with empty strings + ret = {} + for name in names: + ret[name] = '' + + # retrieve list of updatable packages + # ignore return code since 'is up to date' case produces retcode==17 (xbps 0.51) + cmd = '{0} {1}'.format('xbps-install -un', ' '.join(names)) + out = __salt__['cmd.run'](cmd, ignore_retcode=True, + output_loglevel='trace') + for line in out.splitlines(): + if not line: + continue + if line.find(' is up to date.') != -1: + continue + # retrieve tuple pkgname version + try: + pkg, ver = line.split()[0].rsplit('-', 1) + except (ValueError, IndexError): + log.error('xbps-query: Unexpected formatting in ' + 'line: "{0}"'.format(line)) + continue + + log.trace('pkg={0} version={1}'.format(pkg, ver)) + if pkg in names: + ret[pkg] = ver + + # Return a string if only one package name passed + if len(names) == 1: + return ret[names[0]] + return ret + + +# available_version is being deprecated +available_version = latest_version + + +def upgrade_available(name): + ''' + Check whether or not an upgrade is available for a given package + + .. versionadded:: XXX 201X.XX + + CLI Example: + + .. code-block:: bash + + salt '*' pkg.upgrade_available + ''' + return latest_version(name) != '' + + +def refresh_db(): + ''' + Update list of available packages from installed repos + + .. versionadded:: XXX 201X.XX + + CLI Example: + + .. code-block:: bash + + salt '*' pkg.refresh_db + ''' + cmd = 'xbps-install -Sy' + call = __salt__['cmd.run_all'](cmd, output_loglevel='trace') + if call['retcode'] != 0: + comment = '' + if 'stderr' in call: + comment += call['stderr'] + + raise CommandExecutionError('{0}'.format(comment)) + + return {} + + +def version(*names, **kwargs): + ''' + Returns a string representing the package version or an empty string if not + installed. If more than one package name is specified, a dict of + name/version pairs is returned. + + .. versionadded:: XXX 201X.XX + + CLI Example: + + .. code-block:: bash + + salt '*' pkg.version + salt '*' pkg.version ... + ''' + return __salt__['pkg_resource.version'](*names, **kwargs) + + +def upgrade(refresh=True): + ''' + Run a full system upgrade + + refresh + Whether or not to refresh the package database before installing. + Default is `True`. + + Return a dict containing the new package names and versions:: + + {'': {'old': '', + 'new': ''}} + + .. versionadded:: XXX 201X.XX + + CLI Example: + + .. code-block:: bash + + salt '*' pkg.upgrade + ''' + + # XXX if xbps has to be upgraded, 2 times is required to fully upgrade system: + # one for xbps, a subsequent one for all other packages. + # Not handled in this code. + + old = list_pkgs() + + arg = "" + if refresh: + arg = "S" + + cmd = ' '.join(['xbps-install', ''.join(['-', arg, 'yu'])]) + + __salt__['cmd.run'](cmd, output_loglevel='trace') + __context__.pop('pkg.list_pkgs', None) + new = list_pkgs() + return salt.utils.compare_dicts(old, new) + + +def install(name=None, refresh=False, fromrepo=None, + pkgs=None, sources=None, **kwargs): + ''' + Install the passed package + + name + The name of the package to be installed. + + refresh + Whether or not to refresh the package database before installing. + + fromrepo + Specify a package repository (url) to install from. + + + Multiple Package Installation Options: + + pkgs + A list of packages to install from a software repository. Must be + passed as a python list. + + CLI Example: + + .. code-block:: bash + + salt '*' pkg.install pkgs='["foo","bar"]' + + sources + A list of packages to install. Must be passed as a list of dicts, + with the keys being package names, and the values being the source URI + or local path to the package. + + CLI Example: + + .. code-block:: bash + + salt '*' pkg.install sources='[{"foo": "salt://foo.deb"},{"bar": "salt://bar.deb"}]' + + Return a dict containing the new package names and versions:: + + {'': {'old': '', + 'new': ''}} + + .. versionadded:: XXX 201X.XX + + CLI Example: + + .. code-block:: bash + + salt '*' pkg.install + ''' + + # XXX sources is not yet used in this code + + try: + pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( + name, pkgs, sources, **kwargs + ) + except MinionError as exc: + raise CommandExecutionError(exc) + + if pkg_params is None or len(pkg_params) == 0: + return {} + + if pkg_type != 'repository': + log.error('xbps: pkg_type "{0}" not supported.'.format(pkg_type)) + return {} + + args = [] + + if refresh: + args.append('-S') # update repo db + if fromrepo: + args.append('--repository={0}'.format(fromrepo)) + args.append('-y') # assume yes when asked + args.extend(pkg_params) + + old = list_pkgs() + __salt__['cmd.run']( + '{0} {1}'.format('xbps-install', ' '.join(args)), + output_loglevel='trace' + ) + __context__.pop('pkg.list_pkgs', None) + new = list_pkgs() + + _rehash() + return salt.utils.compare_dicts(old, new) + + +def remove(name=None, pkgs=None, recursive=True, **kwargs): + ''' + name + The name of the package to be deleted. + + recursive + Also remove dependant packages (not required elsewhere). + Default mode: enabled. + + Multiple Package Options: + + pkgs + A list of packages to delete. Must be passed as a python list. The + ``name`` parameter will be ignored if this option is passed. + + Returns a list containing the removed packages. + + .. versionadded:: XXX 201X.XX + + CLI Example: + + .. code-block:: bash + + salt '*' pkg.remove [recursive=False] + salt '*' pkg.remove ,, [recursive=False] + salt '*' pkg.remove pkgs='["foo", "bar"]' [recursive=False] + ''' + + try: + pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( + name, pkgs + ) + except MinionError as exc: + raise CommandExecutionError(exc) + + if not pkg_params: + return {} + + old = list_pkgs() + + # keep only installed packages + targets = [x for x in pkg_params if x in old] + if not targets: + return {} + + cmd = ['xbps-remove', '-y'] + if recursive: + cmd.append('-R') + cmd.extend(targets) + __salt__['cmd.run'](cmd, output_loglevel='trace') + __context__.pop('pkg.list_pkgs', None) + new = list_pkgs() + + return salt.utils.compare_dicts(old, new) + + +def list_repos(): + ''' + List all repos known by XBPS + + .. versionadded:: XXX 201X.XX + + CLI Example: + + .. code-block:: bash + + salt '*' pkg.list_repos + ''' + repos = {} + out = __salt__['cmd.run']('xbps-query -L', output_loglevel='trace') + for line in out.splitlines(): + repo = {} + if not line: + continue + try: + nb, url, rsa = line.strip().split(' ', 2) + except ValueError: + log.error('Problem parsing xbps-query: Unexpected formatting in ' + 'line: "{0}"'.format(line)) + repo['nbpkg'] = int(nb) if nb.isdigit() else 0 + repo['url'] = url + repo['rsasigned'] = True if rsa == '(RSA signed)' else False + repos[repo['url']] = repo + return repos + + +def get_repo(repo, **kwargs): + ''' + Display information about the repo. + + .. versionadded:: XXX 201X.XX + + CLI Examples: + + .. code-block:: bash + + salt '*' pkg.get_repo 'repo-url' + ''' + repos = list_repos() + if repo in repos: + return repos[repo] + return {} + + +def _locate_repo_files(repo, rewrite=False): + ''' + Find what file a repo is called in. + + Helper function for add_repo() and del_repo() + + repo + url of the repo to locate (persistent). + + rewrite + Whether to remove matching repository settings during this process. + + Returns a list of absolute paths. + ''' + + ret_val = [] + files = [] + conf_dirs = ['/etc/xbps.d/', '/usr/share/xbps.d/'] + name_glob = '*.conf' + # Matches a line where first printing is "repository" and there is an equals + # sign before the repo, an optional forwardslash at the end of the repo name, + # and it's possible for there to be a comment after repository=repo + regex = re.compile(r'\s*repository\s*=\s*'+repo+r'/?\s*(#.*)?$') + + for cur_dir in conf_dirs: + files.extend(glob.glob(cur_dir+name_glob)) + + for filename in files: + write_buff = [] + with salt.utils.fopen(filename, 'r') as cur_file: + for line in cur_file: + if regex.match(line): + ret_val.append(filename) + else: + write_buff.append(line) + if rewrite and filename in ret_val: + if len(write_buff) > 0: + with salt.utils.fopen(filename, 'w') as rewrite_file: + rewrite_file.write("".join(write_buff)) + else: # Prune empty files + os.remove(filename) + + return ret_val + + +def add_repo(repo, conffile='/usr/share/xbps.d/15-saltstack.conf'): + ''' + Add an XBPS repository to the system. + + repo + url of repo to add (persistent). + + conffile + path to xbps conf file to add this repo + default: /usr/share/xbps.d/15-saltstack.conf + + .. versionadded:: XXX 201X.XX + + CLI Examples: + + .. code-block:: bash + + salt '*' pkg.add_repo [conffile=/path/to/xbps/repo.conf] + ''' + + if len(_locate_repo_files(repo)) == 0: + try: + with salt.utils.fopen(conffile, 'a+') as conf_file: + conf_file.write('repository='+repo+'\n') + except IOError: + return False + + return True + + +def del_repo(repo): + ''' + Remove an XBPS repository from the system. + + repo + url of repo to remove (persistent). + + .. versionadded:: XXX 201X.XX + + CLI Examples: + + .. code-block:: bash + + salt '*' pkg.del_repo + ''' + + try: + _locate_repo_files(repo, rewrite=True) + except IOError: + return False + else: + return True + + +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 From a60522daf8ff8475d670af28f6da3c95bb35773f Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Sat, 20 Feb 2016 17:38:03 +0300 Subject: [PATCH 28/65] Set auth retry count to 0 if multimaster mode is failover --- salt/minion.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/minion.py b/salt/minion.py index 365fce3c35..d4b4e67c7a 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -422,6 +422,7 @@ class MinionBase(object): ' {0}'.format(opts['master'])) if opts['master_shuffle']: shuffle(opts['master']) + opts['auth_tries'] = 0 # if opts['master'] is a str and we have never created opts['master_list'] elif isinstance(opts['master'], str) and ('master_list' not in opts): # We have a string, but a list was what was intended. Convert. From 1f8276f2bea1d97ede54232112b34ddb290c730f Mon Sep 17 00:00:00 2001 From: Ilia G Akilov Date: Sat, 20 Feb 2016 14:50:57 -0500 Subject: [PATCH 29/65] minor bug fix, function name replaced with the call --- salt/utils/gitfs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py index b6614e93ca..848ce32a30 100644 --- a/salt/utils/gitfs.py +++ b/salt/utils/gitfs.py @@ -916,7 +916,7 @@ class Pygit2(GitProvider): # https://github.com/libgit2/libgit2/issues/2122 if "Error stat'ing config file" not in str(exc): raise - home = pwd.getpwnam(salt.utils.get_user).pw_dir + home = pwd.getpwnam(salt.utils.get_user()).pw_dir pygit2.settings.search_path[pygit2.GIT_CONFIG_LEVEL_GLOBAL] = home self.repo = pygit2.Repository(self.cachedir) except KeyError: From c0ac0c0096a9a41d9927575f71d68cf877b14628 Mon Sep 17 00:00:00 2001 From: Simon Gomizelj Date: Sat, 20 Feb 2016 20:24:24 -0500 Subject: [PATCH 30/65] Fix check_table The output of 'nft list tables' has the family included in the output --- salt/modules/nftables.py | 2 +- tests/unit/modules/nftables_test.py | 43 ++++++++++++++--------------- 2 files changed, 22 insertions(+), 23 deletions(-) diff --git a/salt/modules/nftables.py b/salt/modules/nftables.py index 4419ab6505..7c55ec91b9 100644 --- a/salt/modules/nftables.py +++ b/salt/modules/nftables.py @@ -482,7 +482,7 @@ def check_table(table=None, family='ipv4'): nft_family = _NFTABLES_FAMILIES[family] cmd = '{0} list tables {1}' . format(_nftables_cmd(), nft_family) - out = __salt__['cmd.run'](cmd, python_shell=False).find('table {0}'.format(table)) + out = __salt__['cmd.run'](cmd, python_shell=False).find('table {0} {1}'.format(nft_family, table)) if out != -1: out = '' diff --git a/tests/unit/modules/nftables_test.py b/tests/unit/modules/nftables_test.py index b1798e0483..e8d5cb5307 100644 --- a/tests/unit/modules/nftables_test.py +++ b/tests/unit/modules/nftables_test.py @@ -136,7 +136,7 @@ class NftablesTestCase(TestCase): ret) ret = 'Error: chain input in table filter in family ipv4 does not exist' - mock = MagicMock(return_value='table filter') + mock = MagicMock(return_value='table ip filter') with patch.dict(nftables.__salt__, {'cmd.run': mock}): self.assertEqual(nftables.get_rule_handle(chain='input', rule=_ru), ret) @@ -178,16 +178,16 @@ class NftablesTestCase(TestCase): with patch.dict(nftables.__salt__, {'cmd.run': mock}): self.assertEqual(nftables.check(chain='input', rule=_ru), ret) - mock = MagicMock(return_value='table filter') + mock = MagicMock(return_value='table ip filter') ret = 'Error: chain input in table filter in family ipv4 does not exist' with patch.dict(nftables.__salt__, {'cmd.run': mock}): self.assertEqual(nftables.check(chain='input', rule=_ru), ret) - mock = MagicMock(return_value='table filter chain input {{') + mock = MagicMock(return_value='table ip filter chain input {{') with patch.dict(nftables.__salt__, {'cmd.run': mock}): self.assertFalse(nftables.check(chain='input', rule=_ru)) - r_val = 'table filter chain input {{ input tcp dport 22 log accept #' + r_val = 'table ip filter chain input {{ input tcp dport 22 log accept #' mock = MagicMock(return_value=r_val) with patch.dict(nftables.__salt__, {'cmd.run': mock}): self.assertTrue(nftables.check(chain='input', rule=_ru)) @@ -222,7 +222,7 @@ class NftablesTestCase(TestCase): with patch.dict(nftables.__salt__, {'cmd.run': mock}): self.assertFalse(nftables.check_table(table='nat')) - mock = MagicMock(return_value='table nat') + mock = MagicMock(return_value='table ip nat') with patch.dict(nftables.__salt__, {'cmd.run': mock}): self.assertTrue(nftables.check_table(table='nat')) @@ -235,16 +235,15 @@ class NftablesTestCase(TestCase): self.assertEqual(nftables.new_table(table=None), 'Error: table needs to be specified') - mock = MagicMock(return_value='table nat') + mock = MagicMock(return_value='') + with patch.dict(nftables.__salt__, {'cmd.run': mock}): + self.assertEqual(nftables.new_table(table='nat'), True) + + mock = MagicMock(return_value='table ip nat') with patch.dict(nftables.__salt__, {'cmd.run': mock}): self.assertEqual(nftables.new_table(table='nat'), 'Error: table nat in family ipv4 already exists') - mock = MagicMock(return_value='nft add table ip nat') - with patch.dict(nftables.__salt__, {'cmd.run': mock}): - self.assertEqual(nftables.new_table(table='nat'), - 'nft add table ip nat') - # 'delete_table' function tests: 1 def test_delete_table(self): @@ -259,9 +258,9 @@ class NftablesTestCase(TestCase): self.assertEqual(nftables.delete_table(table='nat'), 'Error: table nat in family ipv4 does not exist') - mock = MagicMock(return_value='table nat') + mock = MagicMock(return_value='table ip nat') with patch.dict(nftables.__salt__, {'cmd.run': mock}): - self.assertEqual(nftables.delete_table(table='nat'), 'table nat') + self.assertEqual(nftables.delete_table(table='nat'), 'table ip nat') # 'new_chain' function tests: 2 @@ -278,7 +277,7 @@ class NftablesTestCase(TestCase): self.assertEqual(nftables.new_chain(chain='input'), ret) ret = 'Error: chain input in table filter in family ipv4 already exists' - mock = MagicMock(return_value='table filter chain input {{') + mock = MagicMock(return_value='table ip filter chain input {{') with patch.dict(nftables.__salt__, {'cmd.run': mock}): self.assertEqual(nftables.new_chain(chain='input'), ret) @@ -313,7 +312,7 @@ class NftablesTestCase(TestCase): self.assertEqual(nftables.delete_chain(chain='input'), ret) ret = 'Error: chain input in table filter in family ipv4 does not exist' - mock = MagicMock(return_value='table filter') + mock = MagicMock(return_value='table ip filter') with patch.dict(nftables.__salt__, {'cmd.run': mock}): self.assertEqual(nftables.delete_chain(chain='input'), ret) @@ -346,11 +345,11 @@ class NftablesTestCase(TestCase): self.assertEqual(nftables.append(chain='input', rule=_ru), ret) ret = 'Error: chain input in table filter in family ipv4 does not exist' - mock = MagicMock(return_value='table filter') + mock = MagicMock(return_value='table ip filter') with patch.dict(nftables.__salt__, {'cmd.run': mock}): self.assertEqual(nftables.append(chain='input', rule=_ru), ret) - r_val = 'table filter chain input {{ input tcp dport 22 log accept #' + r_val = 'table ip filter chain input {{ input tcp dport 22 log accept #' mock = MagicMock(return_value=r_val) with patch.dict(nftables.__salt__, {'cmd.run': mock}): self.assertTrue(nftables.append(chain='input', rule=_ru)) @@ -388,11 +387,11 @@ class NftablesTestCase(TestCase): self.assertEqual(nftables.insert(chain='input', rule=_ru), ret) ret = 'Error: chain input in table filter in family ipv4 does not exist' - mock = MagicMock(return_value='table filter') + mock = MagicMock(return_value='table ip filter') with patch.dict(nftables.__salt__, {'cmd.run': mock}): self.assertEqual(nftables.insert(chain='input', rule=_ru), ret) - r_val = 'table filter chain input {{ input tcp dport 22 log accept #' + r_val = 'table ip filter chain input {{ input tcp dport 22 log accept #' mock = MagicMock(return_value=r_val) with patch.dict(nftables.__salt__, {'cmd.run': mock}): self.assertTrue(nftables.insert(chain='input', rule=_ru)) @@ -431,12 +430,12 @@ class NftablesTestCase(TestCase): rule=_ru), ret) ret = 'Error: chain input in table filter in family ipv4 does not exist' - mock = MagicMock(return_value='table filter') + mock = MagicMock(return_value='table ip filter') with patch.dict(nftables.__salt__, {'cmd.run': mock}): self.assertEqual(nftables.delete(table='filter', chain='input', rule=_ru), ret) - mock = MagicMock(return_value='table filter chain input {{') + mock = MagicMock(return_value='table ip filter chain input {{') with patch.dict(nftables.__salt__, {'cmd.run': mock}): self.assertTrue(nftables.delete(table='filter', chain='input', rule=_ru)) @@ -470,7 +469,7 @@ class NftablesTestCase(TestCase): self.assertEqual(nftables.flush(table='filter', chain='input'), ret) ret = 'Error: chain input in table filter in family ip does not exist' - mock = MagicMock(return_value='table filter') + mock = MagicMock(return_value='table ip filter') with patch.dict(nftables.__salt__, {'cmd.run': mock}): self.assertEqual(nftables.flush(table='filter', chain='input'), ret) From 963e12ddc2b15af824d45d882675026529616965 Mon Sep 17 00:00:00 2001 From: Simon Gomizelj Date: Sat, 20 Feb 2016 20:42:12 -0500 Subject: [PATCH 31/65] Add missing nftables families The inet family a special hybrid ipv4+ipv6 table, netdev for filtering from ingress. --- salt/modules/nftables.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/modules/nftables.py b/salt/modules/nftables.py index 7c55ec91b9..ae4222e053 100644 --- a/salt/modules/nftables.py +++ b/salt/modules/nftables.py @@ -25,8 +25,10 @@ _NFTABLES_FAMILIES = { 'ip': 'ip', 'ipv6': 'ip6', 'ip6': 'ip6', + 'inet': 'inet', 'arp': 'arp', - 'bridge': 'bridge' + 'bridge': 'bridge', + 'netdev': 'netdev' } From cfab276324b2f247c20140aa1813c11141e8f768 Mon Sep 17 00:00:00 2001 From: kstreee Date: Sun, 21 Feb 2016 15:59:59 +0900 Subject: [PATCH 32/65] [saltstack/salt#31194] Make the code simple because IndexError exception has very rare chance to be raised at 'os.path.abspath(__file__).rsplit('/')[-2]', advised by @whiteinge. --- salt/netapi/rest_cherrypy/__init__.py | 6 +----- salt/netapi/rest_tornado/__init__.py | 5 +---- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/salt/netapi/rest_cherrypy/__init__.py b/salt/netapi/rest_cherrypy/__init__.py index 157546bf0c..ed87442af9 100644 --- a/salt/netapi/rest_cherrypy/__init__.py +++ b/salt/netapi/rest_cherrypy/__init__.py @@ -20,11 +20,7 @@ try: except ImportError as exc: cpy_error = exc - -try: - __virtualname__ = os.path.abspath(__file__).rsplit('/')[-2] -except IndexError: - __virtualname__ = 'rest_cherrypy' +__virtualname__ = os.path.abspath(__file__).rsplit('/')[-2] or 'rest_cherrypy' logger = logging.getLogger(__virtualname__) cpy_min = '3.2.2' diff --git a/salt/netapi/rest_tornado/__init__.py b/salt/netapi/rest_tornado/__init__.py index 85992e87ee..7522267c6c 100644 --- a/salt/netapi/rest_tornado/__init__.py +++ b/salt/netapi/rest_tornado/__init__.py @@ -6,10 +6,7 @@ import logging import os import distutils.version # pylint: disable=no-name-in-module -try: - __virtualname__ = os.path.abspath(__file__).rsplit('/')[-2] -except IndexError: - __virtualname__ = 'rest_tornado' +__virtualname__ = os.path.abspath(__file__).rsplit('/')[-2] or 'rest_cherrypy' logger = logging.getLogger(__virtualname__) From 53e0b56b13dad4a1a9d767cec4c7909bd6a25427 Mon Sep 17 00:00:00 2001 From: kstreee Date: Mon, 22 Feb 2016 09:48:53 +0900 Subject: [PATCH 33/65] [saltstack/salt#31194] Add documentation about running multiple salt-api modules, advised by @whiteinge. --- doc/topics/netapi/writing.rst | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/doc/topics/netapi/writing.rst b/doc/topics/netapi/writing.rst index a0665a2f67..32bbb8f84c 100644 --- a/doc/topics/netapi/writing.rst +++ b/doc/topics/netapi/writing.rst @@ -40,6 +40,18 @@ The ``start()`` function will be called for each :py:mod:`~salt.netapi` module that is loaded. This function should contain the server loop that actually starts the service. This is started in a multiprocess. +Multiple instances +================== + +.. versionadded:: Boron + +:py:mod:`~salt.netapi.rest_cherrypy` and :py:mod:`~salt.netapi.rest_tornado` +support running multiple instances by copying and renaming entire directory +of those. To start the copied multiple :py:mod:`~salt.netapi` modules, add +configuration blocks for the copied :py:mod:`~salt.netapi` modules in the +Salt Master config. The name of each added configuration block must match +with the name of each directory of the copied :py:mod:`~salt.netapi` module. + Inline documentation ==================== @@ -51,4 +63,4 @@ Loader “magic” methods ====================== The loader makes the ``__opts__`` data structure available to any function in -a :py:mod:`~salt.netapi` module. \ No newline at end of file +a :py:mod:`~salt.netapi` module. From 12a0c764489a69a0fc516e0dba1443bea89d712f Mon Sep 17 00:00:00 2001 From: Anand Nevase Date: Mon, 22 Feb 2016 18:50:42 +0530 Subject: [PATCH 34/65] Updated Windows Fullname and Organization for vmware driver --- doc/topics/cloud/vmware.rst | 9 ++++++++- salt/cloud/clouds/vmware.py | 7 +++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/doc/topics/cloud/vmware.rst b/doc/topics/cloud/vmware.rst index fde5d9ef65..4fa77fbac9 100644 --- a/doc/topics/cloud/vmware.rst +++ b/doc/topics/cloud/vmware.rst @@ -193,6 +193,7 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or win_organization_name: ABC-Corp plain_text: True win_installer: /root/Salt-Minion-2015.8.4-AMD64-Setup.exe + win_user_fullname: Windows User ``provider`` Enter the name that was specified when the cloud provider config was created. @@ -475,7 +476,13 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or During network configuration (if network specified), it is used to specify new administrator password for the machine. ``win_organization_name`` - Specify windows vm user's organization. + Specify windows vm user's organization. Default organization name is blank + VMware vSphere documentation: + + https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.customization.UserData.html + +``win_user_fullname`` + Specify windows vm user's fullname. Default fullname is "Windows User" VMware vSphere documentation: https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.customization.UserData.html diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py index 7b66d2b950..542f327d92 100644 --- a/salt/cloud/clouds/vmware.py +++ b/salt/cloud/clouds/vmware.py @@ -2238,11 +2238,14 @@ def create(vm_): 'win_password', vm_, __opts__, search_global=False, default=None ) win_organization_name = config.get_cloud_config_value( - 'win_organization_name', vm_, __opts__, search_global=False, default='organization' + 'win_organization_name', vm_, __opts__, search_global=False, default='' ) plain_text = config.get_cloud_config_value( 'plain_text', vm_, __opts__, search_global=False, default=False ) + win_user_fullname = config.get_cloud_config_value( + 'win_user_fullname', vm_, __opts__, search_global=False, default='Windows User' + ) if 'clonefrom' in vm_: # If datacenter is specified, set the container reference to start search from it instead @@ -2427,7 +2430,7 @@ def create(vm_): identity.guiUnattended.password.value = win_password identity.guiUnattended.password.plainText = plain_text identity.userData = vim.vm.customization.UserData() - identity.userData.fullName = domainName if hostName != domainName else domain + identity.userData.fullName = win_user_fullname identity.userData.orgName = win_organization_name identity.userData.computerName = vim.vm.customization.FixedName() identity.userData.computerName.name = hostName From 04b3a24e78f6ce93bc201935300bf17b45c71086 Mon Sep 17 00:00:00 2001 From: Kris Raney Date: Mon, 22 Feb 2016 10:58:23 -0600 Subject: [PATCH 35/65] Update unit tests to reflect changes --- tests/unit/states/boto_iam_role_test.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/tests/unit/states/boto_iam_role_test.py b/tests/unit/states/boto_iam_role_test.py index ab9b1f3337..03a2820c2a 100644 --- a/tests/unit/states/boto_iam_role_test.py +++ b/tests/unit/states/boto_iam_role_test.py @@ -97,6 +97,7 @@ class BotoIAMRoleTestCase(TestCase): 'boto_iam.build_policy': mock_policy, 'boto_iam.update_assume_role_policy': mock_bool, 'boto_iam.instance_profile_exists': mock_ipe, + 'boto_iam.list_attached_role_policies': mock_lst, 'boto_iam.create_instance_profile': mock_bool, 'boto_iam.profile_associated': mock_pa, 'boto_iam.associate_profile_to_role': mock_bool, @@ -121,7 +122,7 @@ class BotoIAMRoleTestCase(TestCase): ret.update({'comment': comt}) self.assertDictEqual(boto_iam_role.present(name), ret) - comt = (' myrole role present. ') + comt = (' myrole role present. ') ret.update({'comment': comt, 'result': True}) self.assertDictEqual(boto_iam_role.present(name), ret) @@ -142,12 +143,14 @@ class BotoIAMRoleTestCase(TestCase): False, False, True, False, False, False, True]) mock_bool = MagicMock(return_value=False) + mock_lst = MagicMock(return_value=[]) with patch.dict(boto_iam_role.__salt__, {'boto_iam.list_role_policies': mock, 'boto_iam.delete_role_policy': mock_bool, 'boto_iam.profile_associated': mock, 'boto_iam.disassociate_profile_from_role': mock_bool, 'boto_iam.instance_profile_exists': mock, + 'boto_iam.list_attached_role_policies': mock_lst, 'boto_iam.delete_instance_profile': mock_bool, 'boto_iam.role_exists': mock, 'boto_iam.delete_role': mock_bool}): @@ -158,17 +161,20 @@ class BotoIAMRoleTestCase(TestCase): 'old': {'policies': ['mypolicy']}}}) self.assertDictEqual(boto_iam_role.absent(name), ret) - comt = (' No policies in role myrole. Failed to disassociate ' + comt = (' No policies in role myrole.' + ' No attached policies in role myrole. Failed to disassociate ' 'myrole instance profile from myrole role.') ret.update({'comment': comt, 'changes': {}}) self.assertDictEqual(boto_iam_role.absent(name), ret) - comt = (' No policies in role myrole. ' - 'Failed to delete myrole instance profile.') + comt = (' No policies in role myrole.' + ' No attached policies in role myrole. ' + ' Failed to delete myrole instance profile.') ret.update({'comment': comt, 'changes': {}}) self.assertDictEqual(boto_iam_role.absent(name), ret) - comt = (' No policies in role myrole. myrole instance profile ' + comt = (' No policies in role myrole.' + ' No attached policies in role myrole. myrole instance profile ' 'does not exist. Failed to delete myrole iam role.') ret.update({'comment': comt, 'changes': {}}) self.assertDictEqual(boto_iam_role.absent(name), ret) From 815c33e993ce9cd879e4728d6f71d81eb2e004ca Mon Sep 17 00:00:00 2001 From: David Ford Date: Sat, 13 Feb 2016 20:05:08 +0000 Subject: [PATCH 36/65] be more consistent with logging.warning vs logging.warn (deprecated) and quoting style --- salt/auth/ldap.py | 2 +- salt/cli/daemons.py | 14 ++++----- salt/cloud/__init__.py | 10 +++---- salt/cloud/clouds/aliyun.py | 2 +- salt/cloud/clouds/dimensiondata.py | 6 ++-- salt/cloud/clouds/ec2.py | 12 ++++---- salt/cloud/clouds/joyent.py | 2 +- salt/cloud/clouds/nova.py | 8 ++--- salt/cloud/clouds/openstack.py | 8 ++--- salt/cloud/clouds/rackspace.py | 6 ++-- salt/config/__init__.py | 48 +++++++++++++++--------------- salt/daemons/masterapi.py | 10 +++---- salt/fileclient.py | 2 +- salt/fileserver/__init__.py | 2 +- salt/fileserver/s3fs.py | 8 ++--- salt/grains/core.py | 10 +++---- salt/grains/extra.py | 2 +- salt/log/setup.py | 10 +++---- salt/master.py | 6 ++-- salt/minion.py | 2 +- salt/modules/archive.py | 4 +-- salt/modules/bcache.py | 8 ++--- salt/modules/boto_elb.py | 10 +++---- salt/modules/dig.py | 12 ++++---- salt/modules/disk.py | 2 +- salt/modules/dockerng.py | 2 +- salt/modules/ssh.py | 2 +- salt/modules/sysfs.py | 2 +- salt/modules/tls.py | 2 +- salt/modules/win_task.py | 2 +- salt/modules/zcbuildout.py | 4 +-- salt/pillar/pepa.py | 4 +-- salt/pillar/pillar_ldap.py | 2 +- salt/pillar/stack.py | 2 +- salt/renderers/gpg.py | 2 +- salt/renderers/yaml.py | 2 +- salt/renderers/yamlex.py | 2 +- salt/returners/local_cache.py | 2 +- salt/runner.py | 2 +- salt/runners/http.py | 2 +- salt/runners/lxc.py | 6 ++-- salt/runners/virt.py | 4 +-- salt/scripts.py | 4 +-- salt/states/boto_s3_bucket.py | 2 +- salt/states/cyg.py | 2 +- salt/states/saltmod.py | 2 +- salt/states/zcbuildout.py | 4 +-- salt/template.py | 2 +- salt/transport/mixins/auth.py | 2 +- salt/utils/cloud.py | 2 +- salt/utils/http.py | 10 +++---- salt/utils/master.py | 2 +- salt/utils/schedule.py | 2 +- salt/utils/verify.py | 8 ++--- 54 files changed, 146 insertions(+), 146 deletions(-) diff --git a/salt/auth/ldap.py b/salt/auth/ldap.py index cb2d7b48cf..f2ca0de79a 100644 --- a/salt/auth/ldap.py +++ b/salt/auth/ldap.py @@ -176,7 +176,7 @@ def _bind(username, password, anonymous=False): ) result = _ldap.search_s(basedn, int(scope), paramvalues['filter']) if len(result) < 1: - log.warn('Unable to find user {0}'.format(username)) + log.warning('Unable to find user {0}'.format(username)) return False elif len(result) > 1: # Active Directory returns something odd. Though we do not diff --git a/salt/cli/daemons.py b/salt/cli/daemons.py index 03e8e5f2fe..a59c0c0a66 100644 --- a/salt/cli/daemons.py +++ b/salt/cli/daemons.py @@ -305,9 +305,9 @@ class Minion(parsers.MinionOptionParser): # pylint: disable=no-init log.info('The salt minion is starting up') self.minion.tune_in() except (KeyboardInterrupt, SaltSystemExit) as exc: - log.warn('Stopping the Salt Minion') + log.warning('Stopping the Salt Minion') if isinstance(exc, KeyboardInterrupt): - log.warn('Exiting on Ctrl-c') + log.warning('Exiting on Ctrl-c') self.shutdown() else: log.error(str(exc)) @@ -333,9 +333,9 @@ class Minion(parsers.MinionOptionParser): # pylint: disable=no-init self.minion.opts['raet_cleanup_protecteds'] = cleanup_protecteds self.minion.call_in() except (KeyboardInterrupt, SaltSystemExit) as exc: - log.warn('Stopping the Salt Minion') + log.warning('Stopping the Salt Minion') if isinstance(exc, KeyboardInterrupt): - log.warn('Exiting on Ctrl-c') + log.warning('Exiting on Ctrl-c') self.shutdown() else: log.error(str(exc)) @@ -479,9 +479,9 @@ class ProxyMinion(parsers.ProxyMinionOptionParser): # pylint: disable=no-init log.info('The proxy minion is starting up') self.minion.tune_in() except (KeyboardInterrupt, SaltSystemExit) as exc: - log.warn('Stopping the Salt Proxy Minion') + log.warning('Stopping the Salt Proxy Minion') if isinstance(exc, KeyboardInterrupt): - log.warn('Exiting on Ctrl-c') + log.warning('Exiting on Ctrl-c') self.shutdown() else: log.error(str(exc)) @@ -577,7 +577,7 @@ class Syndic(parsers.SyndicOptionParser): try: self.syndic.tune_in() except KeyboardInterrupt: - log.warn('Stopping the Salt Syndic Minion') + log.warning('Stopping the Salt Syndic Minion') self.shutdown() def shutdown(self, exitcode=0, exitmsg=None): diff --git a/salt/cloud/__init__.py b/salt/cloud/__init__.py index 690fadb915..0fea2031ef 100644 --- a/salt/cloud/__init__.py +++ b/salt/cloud/__init__.py @@ -1201,7 +1201,7 @@ class Cloud(object): if deploy: if not make_master and 'master' not in minion_dict: - log.warn( + log.warning( 'There\'s no master defined on the {0!r} VM settings.'.format( vm_['name'] ) @@ -1595,7 +1595,7 @@ class Cloud(object): fun = '{0}.get_configured_provider'.format(driver) if fun not in self.clouds: # Mis-configured provider that got removed? - log.warn( + log.warning( 'The cloud driver, \'{0}\', configured under the ' '\'{1}\' cloud provider alias, could not be loaded. ' 'Please check your provider configuration files and ' @@ -1621,7 +1621,7 @@ class Cloud(object): __active_provider_name__=':'.join([alias, driver]) ): if self.clouds[fun]() is False: - log.warn( + log.warning( 'The cloud driver, \'{0}\', configured under the ' '\'{1}\' cloud provider alias is not properly ' 'configured. Removing it from the available ' @@ -1894,7 +1894,7 @@ class Map(Cloud): 'requires'): deprecated = 'map_{0}'.format(setting) if deprecated in overrides: - log.warn( + log.warning( 'The use of \'{0}\' on the \'{1}\' mapping has ' 'been deprecated. The preferred way now is to ' 'just define \'{2}\'. For now, salt-cloud will do ' @@ -1952,7 +1952,7 @@ class Map(Cloud): # Machine already removed break - log.warn('\'{0}\' already exists, removing from ' + log.warning("'{0}' already exists, removing from " 'the create map.'.format(name)) if 'existing' not in ret: diff --git a/salt/cloud/clouds/aliyun.py b/salt/cloud/clouds/aliyun.py index 14fb6306b9..41548ce6a2 100644 --- a/salt/cloud/clouds/aliyun.py +++ b/salt/cloud/clouds/aliyun.py @@ -313,7 +313,7 @@ def list_nodes_full(call=None): } items = query(params=params) if 'Code' in items: - log.warn('Query instance:{0} attribute failed'.format(instanceId)) + log.warning('Query instance:{0} attribute failed'.format(instanceId)) continue ret[instanceId] = { diff --git a/salt/cloud/clouds/dimensiondata.py b/salt/cloud/clouds/dimensiondata.py index 415c952d48..16a018462d 100644 --- a/salt/cloud/clouds/dimensiondata.py +++ b/salt/cloud/clouds/dimensiondata.py @@ -203,17 +203,17 @@ def create(vm_): public = node['public_ips'] if private and not public: - log.warn( + log.warning( 'Private IPs returned, but not public... Checking for ' 'misidentified IPs' ) for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if salt.utils.cloud.is_public_ip(private_ip): - log.warn('%s is a public IP', private_ip) + log.warning('%s is a public IP', private_ip) data.public_ips.append(private_ip) else: - log.warn('%s is a private IP', private_ip) + log.warning('%s is a private IP', private_ip) if private_ip not in data.private_ips: data.private_ips.append(private_ip) diff --git a/salt/cloud/clouds/ec2.py b/salt/cloud/clouds/ec2.py index 546c682adb..868e804df8 100644 --- a/salt/cloud/clouds/ec2.py +++ b/salt/cloud/clouds/ec2.py @@ -1859,7 +1859,7 @@ def request_instance(vm_=None, call=None): return False if isinstance(data, dict) and 'error' in data: - log.warn( + log.warning( 'There was an error in the query. {0}' .format(data['error']) ) @@ -1976,7 +1976,7 @@ def query_instance(vm_=None, call=None): log.debug('The query returned: {0}'.format(data)) if isinstance(data, dict) and 'error' in data: - log.warn( + log.warning( 'There was an error in the query. {0} attempts ' 'remaining: {1}'.format( attempts, data['error'] @@ -1988,7 +1988,7 @@ def query_instance(vm_=None, call=None): continue if isinstance(data, list) and not data: - log.warn( + log.warning( 'Query returned an empty list. {0} attempts ' 'remaining.'.format(attempts) ) @@ -2018,7 +2018,7 @@ def query_instance(vm_=None, call=None): return False if isinstance(data, dict) and 'error' in data: - log.warn( + log.warning( 'There was an error in the query. {0}'.format(data['error']) ) # Trigger a failure in the wait for IP function @@ -2824,7 +2824,7 @@ def set_tags(name=None, break if failed_to_set_tags: - log.warn( + log.warning( 'Failed to set tags. Remaining attempts {0}'.format( attempts ) @@ -3832,7 +3832,7 @@ def __attach_vol_to_instance(params, kws, instance_id): opts=__opts__, sigver='4') if data[0]: - log.warn( + log.warning( ('Error attaching volume {0} ' 'to instance {1}. Retrying!').format(kws['volume_id'], instance_id)) diff --git a/salt/cloud/clouds/joyent.py b/salt/cloud/clouds/joyent.py index ac02f16575..340ed145d0 100644 --- a/salt/cloud/clouds/joyent.py +++ b/salt/cloud/clouds/joyent.py @@ -197,7 +197,7 @@ def query_instance(vm_=None, call=None): return False if isinstance(data, dict) and 'error' in data: - log.warn( + log.warning( 'There was an error in the query {0}'.format(data['error']) # pylint: disable=E1126 ) # Trigger a failure in the wait for IP function diff --git a/salt/cloud/clouds/nova.py b/salt/cloud/clouds/nova.py index 6732998fb7..7eaafdd3f9 100644 --- a/salt/cloud/clouds/nova.py +++ b/salt/cloud/clouds/nova.py @@ -808,16 +808,16 @@ def create(vm_): private = node.get('private_ips', []) public = node.get('public_ips', []) if private and not public: - log.warn( + log.warning( 'Private IPs returned, but not public... Checking for ' 'misidentified IPs' ) for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if salt.utils.cloud.is_public_ip(private_ip): - log.warn('{0} is a public IP'.format(private_ip)) + log.warning('{0} is a public IP'.format(private_ip)) data.public_ips.append(private_ip) - log.warn( + log.warning( ( 'Public IP address was not ready when we last' ' checked. Appending public IP address now.' @@ -825,7 +825,7 @@ def create(vm_): ) public = data.public_ips else: - log.warn('{0} is a private IP'.format(private_ip)) + log.warning('{0} is a private IP'.format(private_ip)) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) diff --git a/salt/cloud/clouds/openstack.py b/salt/cloud/clouds/openstack.py index 880c543cb8..53e26381a5 100644 --- a/salt/cloud/clouds/openstack.py +++ b/salt/cloud/clouds/openstack.py @@ -688,22 +688,22 @@ def create(vm_): result = [] private = node['private_ips'] if private and not public: - log.warn( + log.warning( 'Private IPs returned, but not public... Checking for ' 'misidentified IPs' ) for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if salt.utils.cloud.is_public_ip(private_ip): - log.warn('{0} is a public IP'.format(private_ip)) + log.warning('{0} is a public IP'.format(private_ip)) data.public_ips.append(private_ip) - log.warn( + log.warning( 'Public IP address was not ready when we last checked.' ' Appending public IP address now.' ) public = data.public_ips else: - log.warn('{0} is a private IP'.format(private_ip)) + log.warning('{0} is a private IP'.format(private_ip)) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) diff --git a/salt/cloud/clouds/rackspace.py b/salt/cloud/clouds/rackspace.py index df1252e626..7a95e16c08 100644 --- a/salt/cloud/clouds/rackspace.py +++ b/salt/cloud/clouds/rackspace.py @@ -280,17 +280,17 @@ def create(vm_): public = node['public_ips'] if private and not public: - log.warn( + log.warning( 'Private IPs returned, but not public... Checking for ' 'misidentified IPs' ) for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if salt.utils.cloud.is_public_ip(private_ip): - log.warn('{0} is a public IP'.format(private_ip)) + log.warning('{0} is a public IP'.format(private_ip)) data.public_ips.append(private_ip) else: - log.warn('{0} is a private IP'.format(private_ip)) + log.warning('{0} is a private IP'.format(private_ip)) if private_ip not in data.private_ips: data.private_ips.append(private_ip) diff --git a/salt/config/__init__.py b/salt/config/__init__.py index b46e7e34ab..49544cb12f 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -1506,9 +1506,9 @@ def load_config(path, env_var, default_path=None): import inspect previous_frame = inspect.getframeinfo(inspect.currentframe().f_back) log.warning( - 'The function \'{0}()\' defined in \'{1}\' is not yet using the ' - 'new \'default_path\' argument to `salt.config.load_config()`. ' - 'As such, the \'{2}\' environment variable will be ignored'.format( + "The function '{0}()' defined in '{1}' is not yet using the " + "new 'default_path' argument to `salt.config.load_config()`. " + "As such, the '{2}' environment variable will be ignored".format( previous_frame.function, previous_frame.filename, env_var ) ) @@ -1574,13 +1574,13 @@ def include_config(include, orig_path, verbose): # for empty include directory (which might be by design) if len(glob.glob(path)) == 0: if verbose: - log.warn( + log.warning( 'Warning parsing configuration file: "include" path/glob ' - '\'{0}\' matches no files'.format(path) + "'{0}' matches no files".format(path) ) for fn_ in sorted(glob.glob(path)): - log.debug('Including configuration from \'{0}\''.format(fn_)) + log.debug("Including configuration from '{0}'".format(fn_)) opts = _read_conf_file(fn_) include = opts.get('include', []) @@ -2289,7 +2289,7 @@ def apply_cloud_providers_config(overrides, defaults=None): # Is the user still using the old format in the new configuration file?! for name, settings in six.iteritems(config.copy()): if '.' in name: - log.warn( + log.warning( 'Please switch to the new providers configuration syntax' ) @@ -2319,7 +2319,7 @@ def apply_cloud_providers_config(overrides, defaults=None): if 'extends' not in details: log.error( 'Please check your cloud providers configuration. ' - 'There\'s no \'driver\', \'provider\', nor \'extends\' ' + "There's no 'driver', 'provider', nor 'extends' " 'definition referenced.' ) continue @@ -2489,9 +2489,9 @@ def apply_cloud_providers_config(overrides, defaults=None): continue log.info( - 'There\'s at least one cloud driver under the \'{0}\' ' + "There's at least one cloud driver under the '{0}' " 'cloud provider alias which does not have the required ' - '\'driver\' setting. Removing it from the available ' + "'driver' setting. Removing it from the available " 'providers listing.'.format( provider_alias ) @@ -2553,10 +2553,10 @@ def get_cloud_config_value(name, vm_, opts, default=None, search_global=True): # and there's more than one entry under the alias. # WARN the user!!!! log.error( - 'The \'{0}\' cloud provider definition has more than one ' + "The '{0}' cloud provider definition has more than one " 'entry. Your VM configuration should be specifying the ' - 'provider as \'driver: {0}:\'. Since ' - 'it\'s not, we\'re returning the first definition which ' + "provider as 'driver: {0}:'. Since " + "it's not, we're returning the first definition which " 'might not be what you intended.'.format( vm_['driver'] ) @@ -2603,9 +2603,9 @@ def is_provider_configured(opts, provider, required_keys=()): # There's at least one require configuration key which is not # set. log.warning( - 'The required \'{0}\' configuration setting is missing ' - 'from the \'{1}\' driver, which is configured under the ' - '\'{2}\' alias.'.format(key, provider, alias) + "The required '{0}' configuration setting is missing " + "from the '{1}' driver, which is configured under the " + "'{2}' alias.".format(key, provider, alias) ) return False # If we reached this far, there's a properly configured provider. @@ -2625,9 +2625,9 @@ def is_provider_configured(opts, provider, required_keys=()): # This provider does not include all necessary keys, # continue to next one. log.warning( - 'The required \'{0}\' configuration setting is ' - 'missing from the \'{1}\' driver, which is configured ' - 'under the \'{2}\' alias.'.format( + "The required '{0}' configuration setting is " + "missing from the '{1}' driver, which is configured " + "under the '{2}' alias.".format( key, provider, alias ) ) @@ -2706,8 +2706,8 @@ def is_profile_configured(opts, provider, profile_name, vm_=None): if profile_key.get(item, None) is None: # There's at least one required configuration item which is not set. log.error( - 'The required \'{0}\' configuration setting is missing from ' - 'the \'{1}\' profile, which is configured under the \'{2}\' ' + "The required '{0}' configuration setting is missing from " + "the '{1}' profile, which is configured under the '{2}' " 'alias.'.format(item, profile_name, alias) ) return False @@ -2731,8 +2731,8 @@ def check_driver_dependencies(driver, dependencies): for key, value in six.iteritems(dependencies): if value is False: log.warning( - 'Missing dependency: \'{0}\'. The {1} driver requires ' - '\'{0}\' to be installed.'.format( + "Missing dependency: '{0}'. The {1} driver requires " + "'{0}' to be installed.".format( key, driver ) @@ -3004,7 +3004,7 @@ def apply_master_config(overrides=None, defaults=None): # to make `salt.modules.publish` not work under the test-suite. if opts['worker_threads'] < 3 and opts.get('peer', None): log.warning( - 'The \'worker_threads\' setting on \'{0}\' cannot be lower than ' + "The 'worker_threads' setting on '{0}' cannot be lower than " '3. Resetting it to the default value of 3.'.format( opts['conf_file'] ) diff --git a/salt/daemons/masterapi.py b/salt/daemons/masterapi.py index 99c7434800..0770744895 100644 --- a/salt/daemons/masterapi.py +++ b/salt/daemons/masterapi.py @@ -336,7 +336,7 @@ class AutoKey(object): if not self.check_permissions(signing_file): message = 'Wrong permissions for {0}, ignoring content' - log.warn(message.format(signing_file)) + log.warning(message.format(signing_file)) return False with salt.utils.fopen(signing_file, 'r') as fp_: @@ -364,7 +364,7 @@ class AutoKey(object): stub_file = os.path.join(autosign_dir, f) mtime = os.path.getmtime(stub_file) if mtime < min_time: - log.warn('Autosign keyid expired {0}'.format(stub_file)) + log.warning('Autosign keyid expired {0}'.format(stub_file)) os.remove(stub_file) stub_file = os.path.join(autosign_dir, keyid) @@ -837,7 +837,7 @@ class RemoteFuncs(object): if not good: # The minion is not who it says it is! # We don't want to listen to it! - log.warn( + log.warning( 'Minion id {0} is not who it says it is!'.format( load['id'] ) @@ -962,7 +962,7 @@ class RemoteFuncs(object): except ValueError: msg = 'Failed to parse timeout value: {0}'.format( load['tmo']) - log.warn(msg) + log.warning(msg) return {} if 'timeout' in load: try: @@ -970,7 +970,7 @@ class RemoteFuncs(object): except ValueError: msg = 'Failed to parse timeout value: {0}'.format( load['timeout']) - log.warn(msg) + log.warning(msg) return {} if 'tgt_type' in load: if load['tgt_type'].startswith('node'): diff --git a/salt/fileclient.py b/salt/fileclient.py index e1c5013b25..2df655fd7d 100644 --- a/salt/fileclient.py +++ b/salt/fileclient.py @@ -945,7 +945,7 @@ class RemoteClient(Client): d_tries += 1 hsum = salt.utils.get_hash(dest, data.get('hash_type', 'md5')) if hsum != data['hsum']: - log.warn('Bad download of file {0}, attempt {1} ' + log.warning('Bad download of file {0}, attempt {1} ' 'of 3'.format(path, d_tries)) continue break diff --git a/salt/fileserver/__init__.py b/salt/fileserver/__init__.py index 8fa7fd1de8..d2df0c8b46 100644 --- a/salt/fileserver/__init__.py +++ b/salt/fileserver/__init__.py @@ -231,7 +231,7 @@ def reap_fileserver_cache_dir(cache_base, find_func): try: filename, _, hash_type = file_rel_path.rsplit('.', 2) except ValueError: - log.warn(( + log.warning(( 'Found invalid hash file [{0}] when attempting to reap' ' cache directory.' ).format(file_)) diff --git a/salt/fileserver/s3fs.py b/salt/fileserver/s3fs.py index e90faba2ea..939807eaa0 100644 --- a/salt/fileserver/s3fs.py +++ b/salt/fileserver/s3fs.py @@ -449,8 +449,8 @@ def _refresh_buckets_cache_file(cache_file): continue else: log.warning( - ('S3 Error! Do you have any files ' - 'in your S3 bucket?')) + 'S3 Error! Do you have any files ' + 'in your S3 bucket?') return {} metadata[saltenv] = bucket_files @@ -488,8 +488,8 @@ def _refresh_buckets_cache_file(cache_file): continue else: log.warning( - ('S3 Error! Do you have any files ' - 'in your S3 bucket?')) + 'S3 Error! Do you have any files ' + 'in your S3 bucket?') return {} environments = [(os.path.dirname(k['Key']).split('/', 1))[0] for k in files] diff --git a/salt/grains/core.py b/salt/grains/core.py index 6a70dbae2f..55ece08f2c 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -193,7 +193,7 @@ def _linux_gpu_data(): log.debug('Unexpected lspci output: \'{0}\''.format(line)) if error: - log.warn( + log.warning( 'Error loading grains, unexpected linux_gpu_data output, ' 'check that you have a valid shell configured and ' 'permissions to run lspci command' @@ -656,8 +656,8 @@ def _virtual(osdata): grains['virtual'] = 'kvm' else: if osdata['kernel'] in skip_cmds: - log.warn( - 'The tools \'dmidecode\' and \'lspci\' failed to ' + log.warning( + "The tools 'dmidecode' and 'lspci' failed to " 'execute because they do not exist on the system of the user ' 'running this instance or the user does not have the ' 'necessary permissions to execute them. Grains output might ' @@ -820,8 +820,8 @@ def _virtual(osdata): grains['virtual_subtype'] = 'Xen Dom0' for command in failed_commands: - log.warn( - 'Although \'{0}\' was found in path, the current user ' + log.warning( + "Although '{0}' was found in path, the current user " 'cannot execute it. Grains output might not be ' 'accurate.'.format(command) ) diff --git a/salt/grains/extra.py b/salt/grains/extra.py index c11d58af6f..228446675b 100644 --- a/salt/grains/extra.py +++ b/salt/grains/extra.py @@ -45,6 +45,6 @@ def config(): try: return yaml.safe_load(fp_.read()) except Exception: - log.warn("Bad syntax in grains file! Skipping.") + log.warning("Bad syntax in grains file! Skipping.") return {} return {} diff --git a/salt/log/setup.py b/salt/log/setup.py index a0c89385ad..05c88b0cd1 100644 --- a/salt/log/setup.py +++ b/salt/log/setup.py @@ -406,7 +406,7 @@ def setup_temp_logger(log_level='error'): Setup the temporary console logger ''' if is_temp_logging_configured(): - logging.getLogger(__name__).warn( + logging.getLogger(__name__).warning( 'Temporary logging is already configured' ) return @@ -460,7 +460,7 @@ def setup_console_logger(log_level='error', log_format=None, date_format=None): Setup the console logger ''' if is_console_configured(): - logging.getLogger(__name__).warn('Console logging already configured') + logging.getLogger(__name__).warning('Console logging already configured') return # Remove the temporary logging handler @@ -533,11 +533,11 @@ def setup_logfile_logger(log_path, log_level='error', log_format=None, ''' if is_logfile_configured(): - logging.getLogger(__name__).warn('Logfile logging already configured') + logging.getLogger(__name__).warning('Logfile logging already configured') return if log_path is None: - logging.getLogger(__name__).warn( + logging.getLogger(__name__).warning( 'log_path setting is set to `None`. Nothing else to do' ) return @@ -940,7 +940,7 @@ def __process_multiprocessing_logging_queue(opts, queue): except (EOFError, KeyboardInterrupt, SystemExit): break except Exception as exc: # pylint: disable=broad-except - logging.getLogger(__name__).warn( + logging.getLogger(__name__).warning( 'An exception occurred in the multiprocessing logging ' 'queue thread: {0}'.format(exc), exc_info_on_loglevel=logging.DEBUG diff --git a/salt/master.py b/salt/master.py index 9853993b4b..17fe4d9024 100644 --- a/salt/master.py +++ b/salt/master.py @@ -993,7 +993,7 @@ class AESFuncs(object): if not self.__verify_minion(clear_load['id'], clear_load['tok']): # The minion is not who it says it is! # We don't want to listen to it! - log.warn( + log.warning( ( 'Minion id {0} is not who it says it is and is attempting ' 'to issue a peer command' @@ -1051,7 +1051,7 @@ class AESFuncs(object): if not self.__verify_minion(load['id'], load['tok']): # The minion is not who it says it is! # We don't want to listen to it! - log.warn( + log.warning( 'Minion id {0} is not who it says it is!'.format( load['id'] ) @@ -1203,7 +1203,7 @@ class AESFuncs(object): if not self.__verify_minion(load['id'], load['tok']): # The minion is not who it says it is! # We don't want to listen to it! - log.warn( + log.warning( 'Minion id {0} is not who it says it is!'.format( load['id'] ) diff --git a/salt/minion.py b/salt/minion.py index 999fa5a332..2e8b97291b 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -1461,7 +1461,7 @@ class Minion(MinionBase): '{0}. This is often due to the master being shut down or ' 'overloaded. If the master is running consider increasing ' 'the worker_threads value.').format(jid) - log.warn(msg) + log.warning(msg) return '' log.trace('ret_val = {0}'.format(ret_val)) # pylint: disable=no-member diff --git a/salt/modules/archive.py b/salt/modules/archive.py index f573679bb3..2d80db8211 100644 --- a/salt/modules/archive.py +++ b/salt/modules/archive.py @@ -526,7 +526,7 @@ def unzip(zip_file, dest, excludes=None, options=None, template=None, salt '*' archive.unzip /tmp/zipfile.zip /home/strongbad/ password='BadPassword' ''' if options: - log.warn('Options \'{0}\' ignored, only works with unzip binary.'.format(options)) + log.warning("Options '{0}' ignored, only works with unzip binary.".format(options)) if not excludes: excludes = [] if runas: @@ -535,7 +535,7 @@ def unzip(zip_file, dest, excludes=None, options=None, template=None, uinfo = __salt__['user.info'](runas) if not uinfo: raise SaltInvocationError( - 'User \'{0}\' does not exist'.format(runas) + "User '{0}' does not exist".format(runas) ) zip_file, dest = _render_filenames(zip_file, dest, None, template) diff --git a/salt/modules/bcache.py b/salt/modules/bcache.py index f7d7cdff57..b96ee384e2 100644 --- a/salt/modules/bcache.py +++ b/salt/modules/bcache.py @@ -196,14 +196,14 @@ def stop(dev=None): ''' if dev is not None: - log.warn('Stopping {0}, device will only reappear after reregistering!'.format(dev)) + log.warning('Stopping {0}, device will only reappear after reregistering!'.format(dev)) if not _bcsys(dev, 'stop', 'goaway', 'error', 'Error stopping {0}'.format(dev)): return False return _wait(lambda: _sysfs_attr(_bcpath(dev)) is False, 'error', 'Device {0} did not stop'.format(dev), 300) else: cache = uuid() if not cache: - log.warn('bcache already stopped?') + log.warning('bcache already stopped?') return None if not _alltrue(detach()): @@ -896,10 +896,10 @@ def _wipe(dev): log.error('Unable to read SysFS props for {0}'.format(dev)) return None elif not discard: - log.warn('{0} seems unable to discard'.format(dev)) + log.warning('{0} seems unable to discard'.format(dev)) wiper = 'dd' elif not HAS_BLKDISCARD: - log.warn('blkdiscard binary not available, properly wipe the dev manually for optimal results') + log.warning('blkdiscard binary not available, properly wipe the dev manually for optimal results') wiper = 'dd' else: wiper = 'blkdiscard' diff --git a/salt/modules/boto_elb.py b/salt/modules/boto_elb.py index 7dce2a49bb..d34e355575 100644 --- a/salt/modules/boto_elb.py +++ b/salt/modules/boto_elb.py @@ -647,7 +647,7 @@ def register_instances(name, instances, region=None, key=None, keyid=None, try: registered_instances = conn.register_instances(name, instances) except boto.exception.BotoServerError as error: - log.warn(error) + log.warning(error) return False registered_instance_ids = [instance.id for instance in registered_instances] @@ -655,7 +655,7 @@ def register_instances(name, instances, region=None, key=None, keyid=None, # able to be registered with the given ELB register_failures = set(instances).difference(set(registered_instance_ids)) if register_failures: - log.warn('Instance(s): {0} not registered with ELB {1}.' + log.warning('Instance(s): {0} not registered with ELB {1}.' .format(list(register_failures), name)) register_result = False else: @@ -696,12 +696,12 @@ def deregister_instances(name, instances, region=None, key=None, keyid=None, # deregister_instances returns "None" because the instances are # effectively deregistered from ELB if error.error_code == 'InvalidInstance': - log.warn('One or more of instance(s) {0} are not part of ELB {1}.' + log.warning('One or more of instance(s) {0} are not part of ELB {1}.' ' deregister_instances not performed.' .format(instances, name)) return None else: - log.warn(error) + log.warning(error) return False registered_instance_ids = [instance.id for instance in registered_instances] @@ -709,7 +709,7 @@ def deregister_instances(name, instances, region=None, key=None, keyid=None, # unable to be deregistered from the given ELB deregister_failures = set(instances).intersection(set(registered_instance_ids)) if deregister_failures: - log.warn('Instance(s): {0} not deregistered from ELB {1}.' + log.warning('Instance(s): {0} not deregistered from ELB {1}.' .format(list(deregister_failures), name)) deregister_result = False else: diff --git a/salt/modules/dig.py b/salt/modules/dig.py index 9fb6e2b58f..ec08a96daa 100644 --- a/salt/modules/dig.py +++ b/salt/modules/dig.py @@ -102,7 +102,7 @@ def A(host, nameserver=None): cmd = __salt__['cmd.run_all'](dig, python_shell=False) # In this case, 0 is not the same as False if cmd['retcode'] != 0: - log.warn( + log.warning( 'dig returned exit code \'{0}\'. Returning empty list as ' 'fallback.'.format( cmd['retcode'] @@ -134,7 +134,7 @@ def AAAA(host, nameserver=None): cmd = __salt__['cmd.run_all'](dig, python_shell=False) # In this case, 0 is not the same as False if cmd['retcode'] != 0: - log.warn( + log.warning( 'dig returned exit code \'{0}\'. Returning empty list as ' 'fallback.'.format( cmd['retcode'] @@ -166,7 +166,7 @@ def NS(domain, resolve=True, nameserver=None): cmd = __salt__['cmd.run_all'](dig, python_shell=False) # In this case, 0 is not the same as False if cmd['retcode'] != 0: - log.warn( + log.warning( 'dig returned exit code \'{0}\'. Returning empty list as ' 'fallback.'.format( cmd['retcode'] @@ -207,7 +207,7 @@ def SPF(domain, record='SPF', nameserver=None): result = __salt__['cmd.run_all'](cmd, python_shell=False) # In this case, 0 is not the same as False if result['retcode'] != 0: - log.warn( + log.warning( 'dig returned exit code \'{0}\'. Returning empty list as fallback.' .format(result['retcode']) ) @@ -264,7 +264,7 @@ def MX(domain, resolve=False, nameserver=None): cmd = __salt__['cmd.run_all'](dig, python_shell=False) # In this case, 0 is not the same as False if cmd['retcode'] != 0: - log.warn( + log.warning( 'dig returned exit code \'{0}\'. Returning empty list as ' 'fallback.'.format( cmd['retcode'] @@ -302,7 +302,7 @@ def TXT(host, nameserver=None): cmd = __salt__['cmd.run_all'](dig, python_shell=False) if cmd['retcode'] != 0: - log.warn( + log.warning( 'dig returned exit code \'{0}\'. Returning empty list as ' 'fallback.'.format( cmd['retcode'] diff --git a/salt/modules/disk.py b/salt/modules/disk.py index 7fb0348814..472d98d7b6 100644 --- a/salt/modules/disk.py +++ b/salt/modules/disk.py @@ -383,7 +383,7 @@ def _hdparm(args, failhard=True): if failhard: raise CommandExecutionError(msg) else: - log.warn(msg) + log.warning(msg) return result['stdout'] diff --git a/salt/modules/dockerng.py b/salt/modules/dockerng.py index fdf0f36acd..98dbda0426 100644 --- a/salt/modules/dockerng.py +++ b/salt/modules/dockerng.py @@ -1421,7 +1421,7 @@ def _validate_input(kwargs, 'Host path {0} in bind {1} is not absolute' .format(container_path, bind) ) - log.warn('Host path {0} in bind {1} is not absolute,' + log.warning('Host path {0} in bind {1} is not absolute,' ' assuming it is a docker volume.'.format(host_path, bind)) if not os.path.isabs(container_path): diff --git a/salt/modules/ssh.py b/salt/modules/ssh.py index d3cdcc0ad5..e01ccae5a4 100644 --- a/salt/modules/ssh.py +++ b/salt/modules/ssh.py @@ -546,7 +546,7 @@ def rm_auth_key(user, key, config='.ssh/authorized_keys'): with salt.utils.fopen(full, 'w') as _fh: _fh.writelines(lines) except (IOError, OSError) as exc: - log.warn('Could not read/write key file: {0}'.format(str(exc))) + log.warning('Could not read/write key file: {0}'.format(str(exc))) return 'Key not removed' return 'Key removed' # TODO: Should this function return a simple boolean? diff --git a/salt/modules/sysfs.py b/salt/modules/sysfs.py index 4c1eb675d3..c87194b816 100644 --- a/salt/modules/sysfs.py +++ b/salt/modules/sysfs.py @@ -249,7 +249,7 @@ def interfaces(root): elif is_r: reads.append(relpath) else: - log.warn('Unable to find any interfaces in {0}'.format(canpath)) + log.warning('Unable to find any interfaces in {0}'.format(canpath)) return { 'r': reads, diff --git a/salt/modules/tls.py b/salt/modules/tls.py index ae6aad473b..de4422d2cf 100644 --- a/salt/modules/tls.py +++ b/salt/modules/tls.py @@ -677,7 +677,7 @@ def create_ca(ca_name, key = OpenSSL.crypto.load_privatekey( OpenSSL.crypto.FILETYPE_PEM, fic2.read()) except OpenSSL.crypto.Error as err: - log.warn('Error loading existing private key' + log.warning('Error loading existing private key' ' %s, generating a new key: %s', ca_keyp, str(err)) bck = "{0}.unloadable.{1}".format(ca_keyp, datetime.utcnow().strftime("%Y%m%d%H%M%S")) diff --git a/salt/modules/win_task.py b/salt/modules/win_task.py index 902f9b20a0..121eaed7f5 100644 --- a/salt/modules/win_task.py +++ b/salt/modules/win_task.py @@ -151,7 +151,7 @@ def __virtual__(): ''' if salt.utils.is_windows(): if not HAS_DEPENDENCIES: - log.warn('Could not load dependencies for {0}'.format(__virtualname__)) + log.warning('Could not load dependencies for {0}'.format(__virtualname__)) return __virtualname__ return (False, "Module win_task: module only works on Windows systems") diff --git a/salt/modules/zcbuildout.py b/salt/modules/zcbuildout.py index a77860ccb3..6bf0917de2 100644 --- a/salt/modules/zcbuildout.py +++ b/salt/modules/zcbuildout.py @@ -720,11 +720,11 @@ def bootstrap(directory='.', distribute = False if new_st: distribute = False - LOG.warning(u'Forcing to use setuptools as we have setuptools >= 0.7') + LOG.warning('Forcing to use setuptools as we have setuptools >= 0.7') if distribute: new_st = False if buildout_ver == 1: - LOG.warning(u'Using distribute !') + LOG.warning('Using distribute !') bootstrap_args += ' --distribute' if not os.path.isdir(dbuild): os.makedirs(dbuild) diff --git a/salt/pillar/pepa.py b/salt/pillar/pepa.py index c5424dfb5c..d603a65b8e 100644 --- a/salt/pillar/pepa.py +++ b/salt/pillar/pepa.py @@ -397,7 +397,7 @@ def ext_pillar(minion_id, pillar, resource, sequence, subkey=False, subkey_only= for categ, info in [next(six.iteritems(s)) for s in sequence]: if categ not in inp: - log.warn("Category is not defined: {0}".format(categ)) + log.warning("Category is not defined: {0}".format(categ)) continue alias = None @@ -416,7 +416,7 @@ def ext_pillar(minion_id, pillar, resource, sequence, subkey=False, subkey_only= if isinstance(inp[categ], list): entries = inp[categ] elif not inp[categ]: - log.warn("Category has no value set: {0}".format(categ)) + log.warning("Category has no value set: {0}".format(categ)) continue else: entries = [inp[categ]] diff --git a/salt/pillar/pillar_ldap.py b/salt/pillar/pillar_ldap.py index 70dedb28d9..93aab30781 100644 --- a/salt/pillar/pillar_ldap.py +++ b/salt/pillar/pillar_ldap.py @@ -288,7 +288,7 @@ def ext_pillar(minion_id, # pylint: disable=W0613 import salt.log msg = 'Error parsing configuration file: {0} - {1}' if salt.log.is_console_configured(): - log.warn(msg.format(config_file, err)) + log.warning(msg.format(config_file, err)) else: print(msg.format(config_file, err)) else: diff --git a/salt/pillar/stack.py b/salt/pillar/stack.py index bfd5fe5b8d..4f836545ec 100644 --- a/salt/pillar/stack.py +++ b/salt/pillar/stack.py @@ -395,7 +395,7 @@ def ext_pillar(minion_id, pillar, *args, **kwargs): stack_config_files += cfgs for cfg in stack_config_files: if not os.path.isfile(cfg): - log.warn('Ignoring pillar stack cfg "{0}": ' + log.warning('Ignoring pillar stack cfg "{0}": ' 'file does not exist'.format(cfg)) continue stack = _process_stack_cfg(cfg, stack, minion_id, pillar) diff --git a/salt/renderers/gpg.py b/salt/renderers/gpg.py index 8ed28910d6..92599e9590 100644 --- a/salt/renderers/gpg.py +++ b/salt/renderers/gpg.py @@ -263,7 +263,7 @@ def _decrypt_ciphertext(cipher, translate_newlines=False): input=cipher.replace(r'\n', '\n') if translate_newlines else cipher ) if not decrypted_data: - log.warn( + log.warning( 'Could not decrypt cipher %s, received: %s', cipher, decrypt_error diff --git a/salt/renderers/yaml.py b/salt/renderers/yaml.py index 29d8c25557..001305c482 100644 --- a/salt/renderers/yaml.py +++ b/salt/renderers/yaml.py @@ -57,7 +57,7 @@ def render(yaml_data, saltenv='base', sls='', argline='', **kws): raise SaltRenderError(exc) if len(warn_list) > 0: for item in warn_list: - log.warn( + log.warning( '{warn} found in {sls} saltenv={env}'.format( warn=item.message, sls=salt.utils.url.create(sls), env=saltenv ) diff --git a/salt/renderers/yamlex.py b/salt/renderers/yamlex.py index 8b408be9c8..a80ee6c35e 100644 --- a/salt/renderers/yamlex.py +++ b/salt/renderers/yamlex.py @@ -23,7 +23,7 @@ def render(sls_data, saltenv='base', sls='', **kws): data = deserialize(sls_data) or {} for item in warn_list: - log.warn( + log.warning( '{warn} found in {sls} saltenv={env}'.format( warn=item.message, sls=salt.utils.url.create(sls), env=saltenv ) diff --git a/salt/returners/local_cache.py b/salt/returners/local_cache.py index 805588d139..0b644d1474 100644 --- a/salt/returners/local_cache.py +++ b/salt/returners/local_cache.py @@ -118,7 +118,7 @@ def prep_jid(nocache=False, passed_jid=None, recurse_count=0): with salt.utils.fopen(os.path.join(jid_dir_, 'nocache'), 'wb+') as fn_: fn_.write('') except IOError: - log.warn('Could not write out jid file for job {0}. Retrying.'.format(jid)) + log.warning('Could not write out jid file for job {0}. Retrying.'.format(jid)) time.sleep(0.1) return prep_jid(passed_jid=jid, nocache=nocache, recurse_count=recurse_count+1) diff --git a/salt/runner.py b/salt/runner.py index b9f30b1609..a02c03d596 100644 --- a/salt/runner.py +++ b/salt/runner.py @@ -165,7 +165,7 @@ class Runner(RunnerClient): if self.opts.get('async', False): async_pub = self.async(self.opts['fun'], low, user=user) # by default: info will be not enougth to be printed out ! - log.warn('Running in async mode. Results of this execution may ' + log.warning('Running in async mode. Results of this execution may ' 'be collected by attaching to the master event bus or ' 'by examing the master job cache, if configured. ' 'This execution is running under tag {tag}'.format(**async_pub)) diff --git a/salt/runners/http.py b/salt/runners/http.py index f67066d97c..b30575a1df 100644 --- a/salt/runners/http.py +++ b/salt/runners/http.py @@ -32,7 +32,7 @@ def query(url, output=True, **kwargs): data='somecontent' ''' if output is not True: - log.warn('Output option has been deprecated. Please use --quiet.') + log.warning('Output option has been deprecated. Please use --quiet.') if 'node' not in kwargs: kwargs['node'] = 'master' diff --git a/salt/runners/lxc.py b/salt/runners/lxc.py index f8905f09d0..dccf54f28e 100644 --- a/salt/runners/lxc.py +++ b/salt/runners/lxc.py @@ -108,7 +108,7 @@ def find_guest(name, quiet=False, path=None): salt-run lxc.find_guest name ''' if quiet: - log.warn('\'quiet\' argument is being deprecated.' + log.warning("'quiet' argument is being deprecated." ' Please migrate to --quiet') for data in _list_iter(path=path): host, l = next(six.iteritems(data)) @@ -234,7 +234,7 @@ def init(names, host=None, saltcloud_mode=False, quiet=False, **kwargs): ''' path = kwargs.get('path', None) if quiet: - log.warn('\'quiet\' argument is being deprecated.' + log.warning("'quiet' argument is being deprecated." ' Please migrate to --quiet') ret = {'comment': '', 'result': True} if host is None: @@ -424,7 +424,7 @@ def cloud_init(names, host=None, quiet=False, **kwargs): init the container with the saltcloud opts format instead ''' if quiet: - log.warn('\'quiet\' argument is being deprecated. Please migrate to --quiet') + log.warning("'quiet' argument is being deprecated. Please migrate to --quiet") return __salt__['lxc.init'](names=names, host=host, saltcloud_mode=True, quiet=quiet, **kwargs) diff --git a/salt/runners/virt.py b/salt/runners/virt.py index 49dcf78dbd..4e41721473 100644 --- a/salt/runners/virt.py +++ b/salt/runners/virt.py @@ -71,7 +71,7 @@ def query(host=None, quiet=False, hyper=None): host = hyper if quiet: - log.warn('\'quiet\' is deprecated. Please migrate to --quiet') + log.warning("'quiet' is deprecated. Please migrate to --quiet") ret = {} client = salt.client.get_local_client(__opts__['conf_file']) try: @@ -117,7 +117,7 @@ def list(host=None, quiet=False, hyper=None): # pylint: disable=redefined-built host = hyper if quiet: - log.warn('\'quiet\' is deprecated. Please migrate to --quiet') + log.warning("'quiet' is deprecated. Please migrate to --quiet") ret = {} client = salt.client.get_local_client(__opts__['conf_file']) for info in client.cmd_iter('virtual:physical', diff --git a/salt/scripts.py b/salt/scripts.py index 7dfd94a262..c04483d3c0 100644 --- a/salt/scripts.py +++ b/salt/scripts.py @@ -85,7 +85,7 @@ def minion_process(): try: minion.start() except (SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc: - log.warn('** Restarting minion **') + log.warning('** Restarting minion **') delay = 60 if minion is not None and hasattr(minion, 'config'): delay = minion.config.get('random_reauth_delay', 60) @@ -199,7 +199,7 @@ def proxy_minion_process(queue): restart = False if restart is True: - log.warn('** Restarting proxy minion **') + log.warning('** Restarting proxy minion **') delay = 60 if proxyminion is not None: if hasattr(proxyminion, 'config'): diff --git a/salt/states/boto_s3_bucket.py b/salt/states/boto_s3_bucket.py index c7de21d0e4..dbf88d4e95 100644 --- a/salt/states/boto_s3_bucket.py +++ b/salt/states/boto_s3_bucket.py @@ -554,7 +554,7 @@ def present(name, Bucket, # notice something mismatches their desired state. if _describe.get('Location', {}).get('LocationConstraint') != LocationConstraint: msg = 'Bucket {0} location does not match desired configuration, but cannot be changed'.format(LocationConstraint) - log.warn(msg) + log.warning(msg) ret['result'] = False ret['comment'] = 'Failed to update bucket: {0}.'.format(msg) return ret diff --git a/salt/states/cyg.py b/salt/states/cyg.py index b347b149e7..855fc10c9d 100644 --- a/salt/states/cyg.py +++ b/salt/states/cyg.py @@ -187,7 +187,7 @@ def updated(name=None, cyg_arch='x86_64', mirrors=None): return ret if not mirrors: - LOG.warn('No mirror given, using the default.') + LOG.warning('No mirror given, using the default.') before = __salt__['cyg.list'](cyg_arch=cyg_arch) if __salt__['cyg.update'](cyg_arch, mirrors=mirrors): diff --git a/salt/states/saltmod.py b/salt/states/saltmod.py index 9d508e6f80..f23a344ce0 100644 --- a/salt/states/saltmod.py +++ b/salt/states/saltmod.py @@ -226,7 +226,7 @@ def state( for minion, mdata in six.iteritems(cmd_ret): if mdata.get('out', '') != 'highstate': - log.warning("Output from salt state not highstate") + log.warning('Output from salt state not highstate') m_ret = False diff --git a/salt/states/zcbuildout.py b/salt/states/zcbuildout.py index a2fec8e2be..b26addc4ef 100644 --- a/salt/states/zcbuildout.py +++ b/salt/states/zcbuildout.py @@ -206,10 +206,10 @@ def installed(name, ret = {} if 'group' in kwargs: - log.warn('Passing \'group\' is deprecated, just remove it') + log.warning("Passing 'group' is deprecated, just remove it") output_loglevel = kwargs.get('output_loglevel', None) if output_loglevel and not loglevel: - log.warn('Passing \'output_loglevel\' is deprecated,' + log.warning("Passing 'output_loglevel' is deprecated," ' please use loglevel instead') try: test_release = int(test_release) diff --git a/salt/template.py b/salt/template.py index 6c846820b8..93e92badd0 100644 --- a/salt/template.py +++ b/salt/template.py @@ -64,7 +64,7 @@ def compile_template(template, return ret # Template is an empty file if salt.utils.is_empty(template): - log.warn('Template is an empty file: {0}'.format(template)) + log.warning('Template is an empty file: {0}'.format(template)) return ret with codecs.open(template, encoding=SLS_ENCODING) as ifile: diff --git a/salt/transport/mixins/auth.py b/salt/transport/mixins/auth.py index 52aeab8c8c..eead2b0253 100644 --- a/salt/transport/mixins/auth.py +++ b/salt/transport/mixins/auth.py @@ -364,7 +364,7 @@ class AESReqServerMixin(object): else: # Something happened that I have not accounted for, FAIL! - log.warn('Unaccounted for authentication failure') + log.warning('Unaccounted for authentication failure') eload = {'result': False, 'id': load['id'], 'pub': load['pub']} diff --git a/salt/utils/cloud.py b/salt/utils/cloud.py index 75877e178c..ff0a4575b9 100644 --- a/salt/utils/cloud.py +++ b/salt/utils/cloud.py @@ -2403,7 +2403,7 @@ def lock_file(filename, interval=.5, timeout=15): while True: if os.path.exists(lock): if time.time() - start >= timeout: - log.warn('Unable to obtain lock for {0}'.format(filename)) + log.warning('Unable to obtain lock for {0}'.format(filename)) return False time.sleep(interval) else: diff --git a/salt/utils/http.py b/salt/utils/http.py index 1b679f2279..97492c10f6 100644 --- a/salt/utils/http.py +++ b/salt/utils/http.py @@ -154,7 +154,7 @@ def query(url, requests_lib = opts.get('requests_lib', False) if requests_lib is True: - log.warn('Please set "backend" to "requests" instead of setting ' + log.warning('Please set "backend" to "requests" instead of setting ' '"requests_lib" to "True"') if HAS_REQUESTS is False: @@ -330,11 +330,11 @@ def query(url, hostname = request.get_host() handlers[0] = urllib_request.HTTPSHandler(1) if not HAS_MATCHHOSTNAME: - log.warn(('match_hostname() not available, SSL hostname checking ' - 'not available. THIS CONNECTION MAY NOT BE SECURE!')) + log.warning('match_hostname() not available, SSL hostname checking ' + 'not available. THIS CONNECTION MAY NOT BE SECURE!') elif verify_ssl is False: - log.warn(('SSL certificate verification has been explicitly ' - 'disabled. THIS CONNECTION MAY NOT BE SECURE!')) + log.warning('SSL certificate verification has been explicitly ' + 'disabled. THIS CONNECTION MAY NOT BE SECURE!') else: if ':' in hostname: hostname, port = hostname.split(':') diff --git a/salt/utils/master.py b/salt/utils/master.py index 3658642d7c..8a36263c89 100644 --- a/salt/utils/master.py +++ b/salt/utils/master.py @@ -165,7 +165,7 @@ class MasterPillarUtil(object): if minion_id is None: return {} if not minion_grains: - log.warn( + log.warning( 'Cannot get pillar data for {0}: no grains supplied.'.format( minion_id ) diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index a461c6cbcd..bcdfb97d2e 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -375,7 +375,7 @@ class Schedule(object): if name in self.opts['pillar']['schedule']: del self.opts['pillar']['schedule'][name] schedule = self.opts['pillar']['schedule'] - log.warn('Pillar schedule deleted. Pillar refresh recommended. Run saltutil.refresh_pillar.') + log.warning('Pillar schedule deleted. Pillar refresh recommended. Run saltutil.refresh_pillar.') # Fire the complete event back along with updated list of schedule evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) diff --git a/salt/utils/verify.py b/salt/utils/verify.py index f6579ee3a0..bee4b8d115 100644 --- a/salt/utils/verify.py +++ b/salt/utils/verify.py @@ -48,7 +48,7 @@ def zmq_version(): if not match: msg = "Using untested zmq python bindings version: '{0}'".format(ver) if is_console_configured(): - log.warn(msg) + log.warning(msg) else: sys.stderr.write("WARNING {0}\n".format(msg)) return True @@ -69,7 +69,7 @@ def zmq_version(): if "dev" in ver and not point: msg = 'Using dev zmq module, please report unexpected results' if is_console_configured(): - log.warn(msg) + log.warning(msg) else: sys.stderr.write("WARNING: {0}\n".format(msg)) return True @@ -133,7 +133,7 @@ def verify_socket(interface, pub_port, ret_port): msg = ('Unable to bind socket, this might not be a problem.' ' Is there another salt-master running?') if is_console_configured(): - log.warn(msg) + log.warning(msg) else: sys.stderr.write('WARNING: {0}\n'.format(msg)) result = False @@ -517,4 +517,4 @@ def verify_log(opts): If an insecre logging configuration is found, show a warning ''' if opts.get('log_level') in ('garbage', 'trace', 'debug'): - log.warn('Insecure logging configuration detected! Sensitive data may be logged.') + log.warning('Insecure logging configuration detected! Sensitive data may be logged.') From 56a899c8d9ebbb18b4a8c1b3cf82fbf51e9d6bf4 Mon Sep 17 00:00:00 2001 From: Mike Place Date: Mon, 22 Feb 2016 16:26:30 -0700 Subject: [PATCH 37/65] Remove useless test This tests absolutely nothing and has no real point. --- tests/unit/modules/runit_test.py | 156 ------------------------------- 1 file changed, 156 deletions(-) delete mode 100644 tests/unit/modules/runit_test.py diff --git a/tests/unit/modules/runit_test.py b/tests/unit/modules/runit_test.py deleted file mode 100644 index 3b7db0ea77..0000000000 --- a/tests/unit/modules/runit_test.py +++ /dev/null @@ -1,156 +0,0 @@ -# -*- coding: utf-8 -*- -''' - :codeauthor: :email:`Jayesh Kariya ` -''' - -# Import Python Libs -from __future__ import absolute_import -import os - -# Import Salt Testing Libs -from salttesting import TestCase, skipIf -from salttesting.mock import ( - MagicMock, - patch, - NO_MOCK, - NO_MOCK_REASON -) - -from salttesting.helpers import ensure_in_syspath - -ensure_in_syspath('../../') - -# Import Salt Libs -from salt.modules import runit - -# Globals -runit.__salt__ = {} -runit.SERVICE_DIR = '/etc/service' - - -@skipIf(NO_MOCK, NO_MOCK_REASON) -class RunitTestCase(TestCase): - ''' - Test cases for salt.modules.runit - ''' - # 'start' function tests: 1 - - def test_start(self): - ''' - Test if it starts service via runit. - ''' - mock_ret = MagicMock(return_value=False) - with patch.dict(runit.__salt__, {'cmd.retcode': mock_ret}): - self.assertTrue(runit.start('ssh')) - - # 'stop' function tests: 1 - - def test_stop(self): - ''' - Test if it stops service via runit. - ''' - mock_ret = MagicMock(return_value=False) - with patch.dict(runit.__salt__, {'cmd.retcode': mock_ret}): - self.assertTrue(runit.stop('ssh')) - - # 'term' function tests: 1 - - def test_term(self): - ''' - Test if it send a TERM to service via runit. - ''' - mock_ret = MagicMock(return_value=False) - with patch.dict(runit.__salt__, {'cmd.retcode': mock_ret}): - self.assertTrue(runit.term('ssh')) - - # 'reload_' function tests: 1 - - def test_reload(self): - ''' - Test if it send a HUP to service via runit. - ''' - mock_ret = MagicMock(return_value=False) - with patch.dict(runit.__salt__, {'cmd.retcode': mock_ret}): - self.assertTrue(runit.reload_('ssh')) - - # 'restart' function tests: 1 - - def test_restart(self): - ''' - Test if it restart service via runit. This will stop/start service. - ''' - mock_ret = MagicMock(return_value=False) - with patch.dict(runit.__salt__, {'cmd.retcode': mock_ret}): - self.assertTrue(runit.restart('ssh')) - - # 'full_restart' function tests: 1 - - def test_full_restart(self): - ''' - Test if it calls runit.restart() function. - ''' - mock_ret = MagicMock(return_value=False) - with patch.dict(runit.__salt__, {'cmd.retcode': mock_ret}): - self.assertIsNone(runit.full_restart('ssh')) - - # 'status' function tests: 1 - - def test_status(self): - ''' - Test if it return the status for a service via runit, - return pid if running. - ''' - mock_run = MagicMock(return_value='salt') - with patch.dict(runit.__salt__, {'cmd.run_stdout': mock_run}): - self.assertEqual(runit.status('ssh'), '') - - # 'available' function tests: 1 - - def test_available(self): - ''' - Test if it returns ``True`` if the specified service is available, - otherwise returns ``False``. - ''' - with patch.object(os, 'listdir', - MagicMock(return_value=['/etc/service'])): - self.assertTrue(runit.available('/etc/service')) - - # 'enabled' function tests: 1 - - def test_enabled(self): - ''' - Test if it returns ``True`` if the specified service is available, - otherwise returns ``False``. - ''' - with patch.object(os, 'listdir', - MagicMock(return_value=['run', 'supervise'])): - mock_mode = MagicMock(return_value='0700') - with patch.dict(runit.__salt__, {'file.get_mode': mock_mode}): - with patch('salt.modules.runit.available', MagicMock(return_value=True)): - self.assertTrue(runit.enabled('foo')) - - # 'missing' function tests: 1 - - def test_missing(self): - ''' - Test if it returns ``True`` if the specified service is not available, - otherwise returns ``False``. - ''' - with patch.object(os, 'listdir', - MagicMock(return_value=['/etc/service'])): - self.assertTrue(runit.missing('foo')) - - # 'get_all' function tests: 1 - - def test_get_all(self): - ''' - Test if it return a list of all available services. - ''' - with patch.object(os, 'listdir', - MagicMock(return_value=['/etc/service'])): - self.assertListEqual(runit.get_all(), ['/etc/service']) - - -if __name__ == '__main__': - from integration import run_tests - run_tests(RunitTestCase, needs_daemon=False) From 144f7206fe2503e120ea36189a07ca3892a0877e Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 22 Feb 2016 16:42:08 -0700 Subject: [PATCH 38/65] Fix uppercase computer names --- salt/modules/win_system.py | 63 ++++++++++++++++++++++++---- salt/states/win_system.py | 4 +- tests/unit/states/win_system_test.py | 12 +++--- 3 files changed, 63 insertions(+), 16 deletions(-) diff --git a/salt/modules/win_system.py b/salt/modules/win_system.py index 7903d19a1e..1a90adc792 100644 --- a/salt/modules/win_system.py +++ b/salt/modules/win_system.py @@ -55,6 +55,15 @@ def _convert_minutes_seconds(timeout, in_seconds=False): return timeout if in_seconds else timeout*60 +def _convert_date_time_string(dt_string): + ''' + convert string to date time object + ''' + dt_string = dt_string.split('.')[0] + dt_obj = datetime.strptime(dt_string, '%Y%m%d%H%M%S') + return dt_obj.strftime('%Y-%m-%d %H:%M:%S') + + def halt(timeout=5, in_seconds=False): ''' Halt a running system. @@ -306,10 +315,13 @@ def set_computer_name(name): if windll.kernel32.SetComputerNameExW(win32con.ComputerNamePhysicalDnsHostname, name): - ret = {'Computer Name': {'Current': get_system_info()['name']}} + ret = {'Computer Name': {'Current': get_computer_name()}} pending = get_pending_computer_name() if pending not in (None, False): - ret['Computer Name']['Pending'] = pending + if pending == name.upper(): + ret['Computer Name']['Pending'] = name + else: + ret['Computer Name']['Pending'] = pending return ret return False @@ -332,7 +344,7 @@ def get_pending_computer_name(): salt 'minion-id' system.get_pending_computer_name ''' - current = get_computer_name() + current = get_computer_name().upper() pending = read_value('HKLM', r'SYSTEM\CurrentControlSet\Control\ComputerName\ComputerName', 'ComputerName')['vdata'] @@ -354,7 +366,7 @@ def get_computer_name(): salt 'minion-id' system.get_computer_name ''' - name = get_system_info()['name'] + name = win32api.GetComputerNameEx(win32con.ComputerNamePhysicalDnsHostname) return name if name else False @@ -409,14 +421,49 @@ def get_system_info(): name, description, version, etc... :rtype: dict ''' - system_info = win32net.NetServerGetInfo(None, 101) - return system_info + os_type = {1: 'Work Station', + 2: 'Domain Controller', + 3: 'Server'} + pythoncom.CoInitialize() + c = wmi.WMI() + system = c.Win32_OperatingSystem()[0] + ret = {'name': get_computer_name(), + 'description': system.Description, + 'install_date': system.InstallDate, + 'last_boot': system.LastBootUpTime, + 'os_manufacturer': system.Manufacturer, + 'os_name': system.Caption, + 'users': system.NumberOfUsers, + 'organization': system.Organization, + 'os_architecture': system.OSArchitecture, + 'primary': system.Primary, + 'os_type': os_type[system.ProductType], + 'registered_user': system.RegisteredUser, + 'system_directory': system.SystemDirectory, + 'system_drive': system.SystemDrive, + 'os_version': system.Version, + 'windows_directory': system.WindowsDirectory} + system = c.Win32_ComputerSystem()[0] + ret.update({'hardware_manufacturer': system.Manufacturer, + 'hardware_model': system.Model, + 'processors': system.NumberOfProcessors, + 'processors_logical': system.NumberOfLogicalProcessors, + 'system_type': system.SystemType}) + system = c.Win32_BIOS()[0] + ret.update({'hardware_serial': system.SerialNumber, + 'bios_manufacturer': system.Manufacturer, + 'bios_version': system.Version, + 'bios_details': system.BIOSVersion, + 'bios_caption': system.Caption, + 'bios_description': system.Description}) + ret['install_date'] = _convert_date_time_string(ret['install_date']) + ret['last_boot'] = _convert_date_time_string(ret['last_boot']) + return ret def get_computer_desc(): ''' Get the Windows computer description - :return: Returns the computer description if found. Otherwise returns False @@ -426,7 +473,7 @@ def get_computer_desc(): salt 'minion-id' system.get_computer_desc ''' - desc = get_system_info()['comment'] + desc = get_system_info()['description'] return desc if desc else False diff --git a/salt/states/win_system.py b/salt/states/win_system.py index cfa9a60558..b61988c217 100644 --- a/salt/states/win_system.py +++ b/salt/states/win_system.py @@ -88,7 +88,7 @@ def computer_name(name): The desired computer name ''' # Just in case someone decides to enter a numeric description - name = str(name).upper() + name = str(name) ret = {'name': name, 'changes': {}, @@ -100,7 +100,7 @@ def computer_name(name): if before_name == name and pending_name is None: return ret - elif pending_name == name: + elif pending_name == name.upper(): ret['comment'] = ('The current computer name is {0!r}, but will be ' 'changed to {1!r} on the next reboot' .format(before_name, name)) diff --git a/tests/unit/states/win_system_test.py b/tests/unit/states/win_system_test.py index 2db6a0a8c2..252bc5e38d 100644 --- a/tests/unit/states/win_system_test.py +++ b/tests/unit/states/win_system_test.py @@ -63,27 +63,27 @@ class WinSystemTestCase(TestCase): ''' Test to manage the computer's name ''' - ret = {'name': 'SALT', + ret = {'name': 'salt', 'changes': {}, 'result': True, 'comment': ''} - mock = MagicMock(return_value='SALT') + mock = MagicMock(return_value='salt') with patch.dict(win_system.__salt__, {"system.get_computer_name": mock}): mock = MagicMock(side_effect=[None, 'SALT', 'Stack', 'stack']) with patch.dict(win_system.__salt__, {"system.get_pending_computer_name": mock}): - ret.update({'comment': "Computer name already set to 'SALT'"}) + ret.update({'comment': "Computer name already set to 'salt'"}) self.assertDictEqual(win_system.computer_name('salt'), ret) ret.update({'comment': "The current computer name" - " is 'SALT', but will be changed to 'SALT' on" + " is 'salt', but will be changed to 'salt' on" " the next reboot"}) self.assertDictEqual(win_system.computer_name('salt'), ret) with patch.dict(win_system.__opts__, {"test": True}): ret.update({'result': None, 'comment': "Computer name will" - " be changed to 'SALT'"}) + " be changed to 'salt'"}) self.assertDictEqual(win_system.computer_name('salt'), ret) with patch.dict(win_system.__opts__, {"test": False}): @@ -91,7 +91,7 @@ class WinSystemTestCase(TestCase): with patch.dict(win_system.__salt__, {"system.set_computer_name": mock}): ret.update({'comment': "Unable to set computer name" - " to 'SALT'", 'result': False}) + " to 'salt'", 'result': False}) self.assertDictEqual(win_system.computer_name('salt'), ret) From 91bb3913c142f9a4e58e68098461c9dc09310990 Mon Sep 17 00:00:00 2001 From: Peter Slovak Date: Tue, 23 Feb 2016 15:06:58 +0100 Subject: [PATCH 39/65] Fix bug #28044 - look for 'pip' first, then 'pip2' --- salt/modules/pip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/pip.py b/salt/modules/pip.py index 34751b0e1c..6ebefa3182 100644 --- a/salt/modules/pip.py +++ b/salt/modules/pip.py @@ -118,7 +118,7 @@ def _get_pip_bin(bin_env): executable itself, or from searching conventional filesystem locations ''' if not bin_env: - which_result = __salt__['cmd.which_bin'](['pip2', 'pip', 'pip-python']) + which_result = __salt__['cmd.which_bin'](['pip', 'pip2', 'pip-python']) if which_result is None: raise CommandNotFoundError('Could not find a `pip` binary') if salt.utils.is_windows(): From 4e630fcef6cf4bedb8dac5e926cdf78aa924d37a Mon Sep 17 00:00:00 2001 From: Pedro Algarvio Date: Sat, 20 Feb 2016 14:21:23 +0000 Subject: [PATCH 40/65] Add missing NullItem --- salt/utils/schema.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/utils/schema.py b/salt/utils/schema.py index dd361d4a9f..49a48a77fd 100644 --- a/salt/utils/schema.py +++ b/salt/utils/schema.py @@ -868,6 +868,11 @@ class BaseSchemaItem(SchemaItem): # return output + '\n' +class NullItem(BaseSchemaItem): + + __type__ = 'null' + + class BooleanItem(BaseSchemaItem): __type__ = 'boolean' From 8ad5794b60d43e1b052fa6bd35d52158690f453e Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Tue, 23 Feb 2016 12:44:08 +0000 Subject: [PATCH 41/65] Proxy Minion for network devices using Napalm library --- doc/ref/modules/all/index.rst | 1 + .../all/salt.modules.napalm_network.rst | 6 + doc/ref/proxy/all/index.rst | 1 + doc/ref/proxy/all/salt.proxy.napalm.rst | 6 + salt/modules/napalm_network.py | 316 ++++++++++++++++++ salt/proxy/napalm.py | 171 ++++++++++ 6 files changed, 501 insertions(+) create mode 100644 doc/ref/modules/all/salt.modules.napalm_network.rst create mode 100644 doc/ref/proxy/all/salt.proxy.napalm.rst create mode 100644 salt/modules/napalm_network.py create mode 100644 salt/proxy/napalm.py diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst index 8f70ce6965..1c226f020c 100644 --- a/doc/ref/modules/all/index.rst +++ b/doc/ref/modules/all/index.rst @@ -198,6 +198,7 @@ Full list of builtin execution modules nacl nagios nagios_rpc + napalm_network netaddress netbsd_sysctl netbsdservice diff --git a/doc/ref/modules/all/salt.modules.napalm_network.rst b/doc/ref/modules/all/salt.modules.napalm_network.rst new file mode 100644 index 0000000000..5bd81ccd19 --- /dev/null +++ b/doc/ref/modules/all/salt.modules.napalm_network.rst @@ -0,0 +1,6 @@ +salt.modules.napalm_network module +=============================== + +.. automodule:: salt.modules.napalm_network + :members: + :undoc-members: diff --git a/doc/ref/proxy/all/index.rst b/doc/ref/proxy/all/index.rst index 4ca6727ecf..cc0281240e 100644 --- a/doc/ref/proxy/all/index.rst +++ b/doc/ref/proxy/all/index.rst @@ -15,6 +15,7 @@ Full list of builtin proxy modules fx2 junos marathon + napalm phillips_hue rest_sample ssh_sample diff --git a/doc/ref/proxy/all/salt.proxy.napalm.rst b/doc/ref/proxy/all/salt.proxy.napalm.rst new file mode 100644 index 0000000000..33e5a8b7d0 --- /dev/null +++ b/doc/ref/proxy/all/salt.proxy.napalm.rst @@ -0,0 +1,6 @@ +================ +salt.proxy.napalm +================ + +.. automodule:: salt.proxy.napalm + :members: diff --git a/salt/modules/napalm_network.py b/salt/modules/napalm_network.py new file mode 100644 index 0000000000..685540b0ed --- /dev/null +++ b/salt/modules/napalm_network.py @@ -0,0 +1,316 @@ +# -*- coding: utf-8 -*- +''' +Basic functions from Napalm library +''' + +from __future__ import absolute_import + +import logging +log = logging.getLogger(__name__) + +# ------------------------------------------------------------------------ +# module properties +# ------------------------------------------------------------------------ + +__virtualname__ = 'net' +__proxyenabled__ = ['napalm'] +# uses NAPALM-based proxy to interact with network devices + +# ------------------------------------------------------------------------ +# property functions +# ------------------------------------------------------------------------ + + +def __virtual__(): + return True + +# ------------------------------------------------------------------------ +# helper functions -- will not be exported +# ------------------------------------------------------------------------ + + +def _filter_list(input_list, search_key, search_value): + + output_list = list() + + for dictionary in input_list: + if dictionary.get(search_key) == search_value: + output_list.append(dictionary) + + return output_list + + +def _filter_dict(input_dict, search_key, search_value): + + output_dict = dict() + + for key, key_list in input_dict.iteritems(): + key_list_filtered = _filter_list(key_list, search_key, search_value) + if key_list_filtered: + output_dict[key] = key_list_filtered + + return output_dict + +# ------------------------------------------------------------------------ +# callable functions +# ------------------------------------------------------------------------ + + +def ping(): + ''' + is the device alive ? + + CLI example: + + .. code-block:: bash + + salt myminion net.ping + ''' + + return { + 'out': __proxy__['napalm.ping']() + } + + +def cli(*commands): + + """ + NAPALM returns a dictionary with the output of all commands passed as arguments: + + CLI example: + + .. code-block:: bash + + salt myminion net.cli "show version" "show route 8.8.8.8" + + :param commands: list of raw commands to execute on device + + Example: + { + u'show version and haiku' : u'''Hostname: re0.edge01.arn01 + Model: mx480 + Junos: 13.3R6.5 + Help me, Obi-Wan + I just saw Episode Two + You're my only hope + ''', + u'show chassis fan' : u'''Item Status RPM Measurement + Top Rear Fan OK 3840 Spinning at intermediate-speed + Bottom Rear Fan OK 3840 Spinning at intermediate-speed + Top Middle Fan OK 3900 Spinning at intermediate-speed + Bottom Middle Fan OK 3840 Spinning at intermediate-speed + Top Front Fan OK 3810 Spinning at intermediate-speed + Bottom Front Fan OK 3840 Spinning at intermediate-speed + ''' + } + """ + + return __proxy__['napalm.call']( + 'cli', + **{ + 'commands': list(commands) + } + ) + # thus we can display the output as is + # in case of errors, they'll be catched in the proxy + + +def arp(interface='', ipaddr='', macaddr=''): + + """ + NAPALM returns a list of dictionaries with details of the ARP entries: + [{INTERFACE, MAC, IP, AGE}] + + CLI example: + + .. code-block:: bash + + salt myminion net.arp + salt myminion net.arp macaddr='5c:5e:ab:da:3c:f0' + + :param interface: interface name to filter on + :param ipaddr: IP address to filter on + :param macaddr: MAC address to filter on + + Example output: + [ + { + 'interface' : 'MgmtEth0/RSP0/CPU0/0', + 'mac' : '5c:5e:ab:da:3c:f0', + 'ip' : '172.17.17.1', + 'age' : 1454496274.84 + }, + { + 'interface': 'MgmtEth0/RSP0/CPU0/0', + 'mac' : '66:0e:94:96:e0:ff', + 'ip' : '172.17.17.2', + 'age' : 1435641582.49 + } + ] + """ + + proxy_output = __proxy__['napalm.call']( + 'get_arp_table', + **{ + } + ) + + if not proxy_output.get('result'): + return proxy_output + + arp_table = proxy_output.get('out') + + if interface: + arp_table = _filter_list(arp_table, 'interface', interface) + + if ipaddr: + arp_table = _filter_list(arp_table, 'ip', ipaddr) + + if macaddr: + arp_table = _filter_list(arp_table, 'mac', macaddr) + + proxy_output.update({ + 'out': arp_table + }) + + return proxy_output + + +def ipaddrs(): + ''' + Returns IP addresses on the device + + CLI example: + + .. code-block:: bash + + salt myminion net.ipaddrs + ''' + return __proxy__['napalm.call']( + 'get_interfaces_ip', + **{ + } + ) + + +def lldp(interface=''): + + """ + returns LLDP neighbors + + CLI example: + + .. code-block:: bash + + salt myminion net.lldp + salt myminion net.lldp interface='TenGigE0/0/0/8' + + :param interface: interface name to filter on + + Example output: + { + 'TenGigE0/0/0/8': [ + { + 'parent_interface': u'Bundle-Ether8', + 'interface_description': u'TenGigE0/0/0/8', + 'remote_chassis_id': u'8c60.4f69.e96c', + 'remote_system_name': u'switch', + 'remote_port': u'Eth2/2/1', + 'remote_port_description': u'Ethernet2/2/1', + 'remote_system_description': u'''Cisco Nexus Operating System (NX-OS) Software 7.1(0)N1(1a) + TAC support: http://www.cisco.com/tac + Copyright (c) 2002-2015, Cisco Systems, Inc. All rights reserved.''', + 'remote_system_capab': u'B, R', + 'remote_system_enable_capab': u'B' + } + ] + } + """ + + proxy_output = __proxy__['napalm.call']( + 'get_lldp_neighbors_detail', + **{ + } + ) + + if not proxy_output.get('result'): + return proxy_output + + lldp_neighbors = proxy_output.get('out') + + if interface: + lldp_neighbors = {interface: lldp_neighbors.get(interface)} + + proxy_output.update({ + 'out': lldp_neighbors + }) + + return proxy_output + + +def mac(address='', interface='', vlan=0): + + """ + returns device MAC address table + + CLI example: + + .. code-block:: bash + + salt myminion net.mac + salt myminion net.mac vlan=10 + + :param address: MAC address to filter on + :param interface: interface name to filter on + :param vlan: vlan identifier + + Example output: + [ + { + 'mac' : '00:1c:58:29:4a:71', + 'interface' : 'xe-3/0/2', + 'static' : False, + 'active' : True, + 'moves' : 1, + 'vlan' : 10, + 'last_move' : 1454417742.58 + }, + { + 'mac' : '8c:60:4f:58:e1:c1', + 'interface' : 'xe-1/0/1', + 'static' : False, + 'active' : True, + 'moves' : 2, + 'vlan' : 42, + 'last_move' : 1453191948.11 + } + ] + + """ + + proxy_output = __proxy__['napalm.call']( + 'get_mac_address_table', + **{ + } + ) + + if not proxy_output.get('result'): + # if negative, leave the output unchanged + return proxy_output + + mac_address_table = proxy_output.get('out') + + if vlan and isinstance(int, vlan): + mac_address_table = {vlan: mac_address_table.get(vlan)} + + if address: + mac_address_table = _filter_dict(mac_address_table, 'mac', address) + + if interface: + mac_address_table = _filter_dict(mac_address_table, 'interface', interface) + + proxy_output.update({ + 'out': mac_address_table + }) + + return proxy_output diff --git a/salt/proxy/napalm.py b/salt/proxy/napalm.py new file mode 100644 index 0000000000..c0745f47e1 --- /dev/null +++ b/salt/proxy/napalm.py @@ -0,0 +1,171 @@ +# -*- coding: utf-8 -*- + +""" +THis module allows Salt interact with network devices via NAPALM library +(https://github.com/napalm-automation/napalm) +""" + +from __future__ import absolute_import + +import logging +log = logging.getLogger(__file__) + +from napalm import get_network_driver + +# ------------------------------------------------------------------------ +# proxy properties +# ------------------------------------------------------------------------ + +__proxyenabled__ = ['napalm'] +# proxy name + +# ------------------------------------------------------------------------ +# global variables +# ------------------------------------------------------------------------ + +NETWORK_DEVICE = {} + +# ------------------------------------------------------------------------ +# property functions +# ------------------------------------------------------------------------ + + +def __virtual__(): + return True + +# ------------------------------------------------------------------------ +# helper functions -- will not be exported +# ------------------------------------------------------------------------ + +# ------------------------------------------------------------------------ +# Salt specific proxy functions +# ------------------------------------------------------------------------ + + +def init(opts): + ''' + Perform any needed setup. + ''' + NETWORK_DEVICE['HOSTNAME'] = opts.get('proxy', {}).get('host') + NETWORK_DEVICE['USERNAME'] = opts.get('proxy', {}).get('username') + NETWORK_DEVICE['PASSWORD'] = opts.get('proxy', {}).get('passwd') + NETWORK_DEVICE['DRIVER_NAME'] = opts.get('proxy', {}).get('driver') + + NETWORK_DEVICE['UP'] = False + + _driver_ = get_network_driver(NETWORK_DEVICE.get('DRIVER_NAME')) + # get driver object form NAPALM + + optional_args = { + 'config_lock': False # to avoid locking config DB + } + + try: + NETWORK_DEVICE['DRIVER'] = _driver_( + NETWORK_DEVICE.get('HOSTNAME', ''), + NETWORK_DEVICE.get('USERNAME', ''), + NETWORK_DEVICE.get('PASSWORD', ''), + optional_args=optional_args + ) + NETWORK_DEVICE.get('DRIVER').open() + # no exception raised here, means connection established + NETWORK_DEVICE['UP'] = True + except Exception as error: + log.error( + "Cannot connect to {hostname} as {username}. Please check error: {error}".format( + hostname=NETWORK_DEVICE.get('HOSTNAME', ''), + username=NETWORK_DEVICE.get('USERNAME', ''), + error=error + ) + ) + + return True + + +def ping(): + ''' + is the device responding ? + ''' + return NETWORK_DEVICE['UP'] + + +def shutdown(opts): + ''' + use napalm close() + ''' + try: + if not NETWORK_DEVICE.get('UP', False): + raise Exception('not connected!') + NETWORK_DEVICE.get('DRIVER').close() + except Exception as error: + log.error( + 'Cannot close connection with {hostname}! Please check error: {error}'.format( + hostname=NETWORK_DEVICE.get('HOSTNAME', '[unknown hostname]'), + error=error + ) + ) + + return True + +# ------------------------------------------------------------------------ +# Callable functions +# ------------------------------------------------------------------------ + + +def call(method, **params): + + """ + This function calls methods from the NAPALM driver object. + Available methods: + + ============================== ===== ===== ====== ======= ====== ====== ===== ========= + _ EOS JunOS IOS-XR FortiOS IBM NXOS IOS Pluribus + ============================== ===== ===== ====== ======= ====== ====== ===== ========= + **cli** |yes| |yes| |yes| |no| |no| |yes| |yes| |yes| + **get_facts** |yes| |yes| |yes| |yes| |no| |yes| |yes| |yes| + **get_interfaces** |yes| |yes| |yes| |yes| |no| |yes| |yes| |yes| + **get_lldp_neighbors** |yes| |yes| |yes| |yes| |no| |no| |yes| |yes| + **get_lldp_neighbors_detail** |yes| |yes| |yes| |no| |no| |yes| |no| |yes| + **get_bgp_neighbors** |yes| |yes| |yes| |yes| |no| |no| |yes| |no| + **get_bgp_neighbors_detail** |yes| |yes| |no| |no| |no| |no| |no| |no| + **get_bgp_config** |yes| |yes| |yes| |no| |no| |no| |no| |no| + **get_environment** |yes| |yes| |yes| |yes| |no| |no| |yes| |no| + **get_mac_address_table** |yes| |yes| |yes| |no| |no| |yes| |no| |yes| + **get_arp_table** |yes| |yes| |yes| |no| |no| |yes| |no| |no| + **get_snmp_information** |no| |no| |no| |no| |no| |no| |no| |yes| + **get_ntp_peers** |yes| |yes| |yes| |no| |no| |yes| |no| |yes| + **get_interfaces_ip** |yes| |yes| |yes| |no| |no| |yes| |yes| |no| + ============================== ===== ===== ====== ======= ====== ====== ===== ========= + + For example:: + + call('cli', **{ + 'commands': [ + "show version", + "show chassis fan" + ] + }) + + """ + + try: + if not NETWORK_DEVICE.get('UP', False): + raise Exception('not connected') + # if connected will try to execute desired command + return { + 'out': getattr(NETWORK_DEVICE.get('DRIVER'), method)(**params), + 'result': True, + 'comment': '' + } + except Exception as error: + # either not connected + # either unable to execute the command + return { + 'out': {}, + 'result': False, + 'comment': 'Cannot execute "{method}" on {device}. Reason: {error}!'.format( + device=NETWORK_DEVICE.get('HOSTNAME', '[unspecified hostname]'), + method=method, + error=error + ) + } From 7acea12cd0b33928881e977da8c23ae5ec3960ca Mon Sep 17 00:00:00 2001 From: jdesilet Date: Tue, 23 Feb 2016 20:07:10 -0700 Subject: [PATCH 42/65] Added the key value service_name to service beacon This feature allows you to call the service name as a key in reactor files. The use case being that you can then create one reactor file that can run states of the same name. An example being: /srv/reactor/service.sls service: local.state.sls: - tgt: {{ data['data']['id'] }} - arg: - {{ data['data']['service_name'] }} Then if you had state with a matching name it could ensure the service is set in the right state when any change is detected. --- salt/beacons/service.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/beacons/service.py b/salt/beacons/service.py index b7f804d5f3..bd04debd08 100644 --- a/salt/beacons/service.py +++ b/salt/beacons/service.py @@ -82,6 +82,7 @@ def beacon(config): for service in config: ret_dict = {} ret_dict[service] = {'running': __salt__['service.status'](service)} + ret_dict['service_name'] = service # If no options is given to the service, we fall back to the defaults # assign a False value to oncleanshutdown and onchangeonly. Those From 2e72c29bf840d6cae965733ecada3579a26af1dc Mon Sep 17 00:00:00 2001 From: Joseph Hall Date: Tue, 23 Feb 2016 21:20:45 -0700 Subject: [PATCH 43/65] Properly handle A and AAAA records --- salt/modules/dnsutil.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/modules/dnsutil.py b/salt/modules/dnsutil.py index 28cecf40f1..bacbb97c20 100644 --- a/salt/modules/dnsutil.py +++ b/salt/modules/dnsutil.py @@ -175,7 +175,7 @@ def parse_zone(zonefile=None, zone=None): continue if comps[0] == 'IN': comps.insert(0, zonedict['ORIGIN']) - if not comps[0].endswith('.'): + if not comps[0].endswith('.') and 'NS' not in line: comps[0] = '{0}.{1}'.format(comps[0], zonedict['ORIGIN']) if comps[2] == 'NS': zonedict.setdefault('NS', []).append(comps[3]) @@ -183,6 +183,11 @@ def parse_zone(zonefile=None, zone=None): if 'MX' not in zonedict: zonedict.setdefault('MX', []).append({'priority': comps[3], 'host': comps[4]}) + elif comps[3] in ('A', 'AAAA'): + zonedict.setdefault(comps[3], {})[comps[0]] = { + 'TARGET': comps[4], + 'TTL': comps[1], + } else: zonedict.setdefault(comps[2], {})[comps[0]] = comps[3] return zonedict From ad6728aeec567e426bbc9414dd8e34f72c96f5d3 Mon Sep 17 00:00:00 2001 From: Borys Pierov Date: Fri, 19 Feb 2016 11:46:40 -0500 Subject: [PATCH 44/65] Include saltenv/pillarenv into data in event.send Based on with_env_opts flag --- salt/modules/event.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/salt/modules/event.py b/salt/modules/event.py index 4c925c115e..37392b2857 100644 --- a/salt/modules/event.py +++ b/salt/modules/event.py @@ -123,6 +123,7 @@ def send(tag, with_env=False, with_grains=False, with_pillar=False, + with_env_opts=False, **kwargs): ''' Send an event to the Salt Master @@ -161,6 +162,11 @@ def send(tag, specify a list of strings of Pillar keys to include. It is a best-practice to only specify a relevant subset of Pillar data. + :param with_env_opts: Include ``saltenv`` and ``pillarenv`` set on minion + at the moment when event is send into event data. + :type with_env_opts: Specify ``True`` to include ``saltenv`` and + ``pillarenv`` values or ``False`` to omit them. + :param kwargs: Any additional keyword arguments passed to this function will be interpreted as key-value pairs and included in the event data. This provides a convenient alternative to YAML for simple values. @@ -209,6 +215,10 @@ def send(tag, else: data_dict['pillar'] = __pillar__ + if with_env_opts: + data_dict['saltenv'] = __opts__.get('environment', 'base') + data_dict['pillarenv'] = __opts__.get('pillarenv') + if kwargs: data_dict.update(kwargs) From b978ad453530af54e7f3b6d43859cae90b0acadb Mon Sep 17 00:00:00 2001 From: Alexander Backlund Date: Fri, 19 Feb 2016 04:42:09 -0800 Subject: [PATCH 45/65] Adds support for encoded PowerShell commands. --- salt/modules/cmdmod.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index 57fa0bffb8..787df0a3b2 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -18,6 +18,7 @@ import subprocess import sys import time import traceback +import base64 from salt.utils import vt # Import salt libs @@ -288,6 +289,8 @@ def _run(cmd, # The third item[2] in each tuple is the name of that method. if stack[-2][2] == 'script': cmd = 'Powershell -NonInteractive -ExecutionPolicy Bypass -File ' + cmd + elif kwargs.get('powershell_encoded', False): + cmd = 'Powershell -NonInteractive -EncodedCommand {0}'.format(cmd) else: cmd = 'Powershell -NonInteractive "{0}"'.format(cmd.replace('"', '\\"')) @@ -844,7 +847,8 @@ def run(cmd, pillar_override=kwargs.get('pillar'), use_vt=use_vt, password=kwargs.get('password', None), - bg=bg) + bg=bg, + **kwargs) log_callback = _check_cb(log_callback) @@ -2597,6 +2601,7 @@ def powershell(cmd, ignore_retcode=False, saltenv='base', use_vt=False, + encode_cmd=False, **kwargs): ''' Execute the passed PowerShell command and return the output as a string. @@ -2705,6 +2710,10 @@ def powershell(cmd, :param str saltenv: The salt environment to use. Default is 'base' + :param bool encode_cmd: Encode the command before executing. Use in cases + where characters may be dropped or incorrectly converted when executed. + Default is False. + CLI Example: .. code-block:: powershell @@ -2719,6 +2728,14 @@ def powershell(cmd, # Append PowerShell Object formatting cmd = '{0} | ConvertTo-Json -Depth 32'.format(cmd) + if encode_cmd: + # Convert the cmd to UTF-16LE without a BOM and base64 encode. + # Just base64 encoding UTF-8 or including a BOM is not valid. + log.debug('Encoding PowerShell command \'{0}\''.format(cmd)) + cmd_utf16 = cmd.decode('utf-8').encode('utf-16le') + cmd = base64.standard_b64encode(cmd_utf16) + kwargs['powershell_encoded'] = True + # Retrieve the response, while overriding shell with 'powershell' response = run(cmd, cwd=cwd, From 7a5dbff3bd44865a717a39975252feb5f3ba8984 Mon Sep 17 00:00:00 2001 From: Alexander Backlund Date: Wed, 24 Feb 2016 00:42:37 -0800 Subject: [PATCH 46/65] added powershell_encoded arg instead of kwarg --- salt/modules/cmdmod.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index 787df0a3b2..e4e29b3022 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -240,6 +240,7 @@ def _run(cmd, use_vt=False, password=None, bg=False, + powershell_encoded=False, **kwargs): ''' Do the DRY thing and only call subprocess.Popen() once @@ -289,7 +290,7 @@ def _run(cmd, # The third item[2] in each tuple is the name of that method. if stack[-2][2] == 'script': cmd = 'Powershell -NonInteractive -ExecutionPolicy Bypass -File ' + cmd - elif kwargs.get('powershell_encoded', False): + elif powershell_encoded: cmd = 'Powershell -NonInteractive -EncodedCommand {0}'.format(cmd) else: cmd = 'Powershell -NonInteractive "{0}"'.format(cmd.replace('"', '\\"')) @@ -678,6 +679,7 @@ def run(cmd, saltenv='base', use_vt=False, bg=False, + powershell_encoded=False, **kwargs): r''' Execute the passed command and return the output as a string @@ -774,6 +776,9 @@ def run(cmd, :param bool use_vt: Use VT utils (saltstack) to stream the command output more interactively to the console and the logs. This is experimental. + :param bool powershell_encoded: Specify if the supplied command is encoded. + Only applies to shell 'powershell'. + .. warning:: This function does not process commands through a shell unless the python_shell flag is set to True. This means that any @@ -848,7 +853,7 @@ def run(cmd, use_vt=use_vt, password=kwargs.get('password', None), bg=bg, - **kwargs) + powershell_encoded=powershell_encoded) log_callback = _check_cb(log_callback) @@ -2734,7 +2739,9 @@ def powershell(cmd, log.debug('Encoding PowerShell command \'{0}\''.format(cmd)) cmd_utf16 = cmd.decode('utf-8').encode('utf-16le') cmd = base64.standard_b64encode(cmd_utf16) - kwargs['powershell_encoded'] = True + powershell_encoded = True + else: + powershell_encoded = False # Retrieve the response, while overriding shell with 'powershell' response = run(cmd, @@ -2755,6 +2762,7 @@ def powershell(cmd, saltenv=saltenv, use_vt=use_vt, python_shell=python_shell, + powershell_encoded=powershell_encoded, **kwargs) try: From 03c5a7bd9e1113d0ffb3c78d675885b6a3185f01 Mon Sep 17 00:00:00 2001 From: Alexander Backlund Date: Wed, 24 Feb 2016 03:29:24 -0800 Subject: [PATCH 47/65] Lint fixes --- salt/modules/cmdmod.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index e4e29b3022..bee62709fb 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -2741,7 +2741,7 @@ def powershell(cmd, cmd = base64.standard_b64encode(cmd_utf16) powershell_encoded = True else: - powershell_encoded = False + powershell_encoded = False # Retrieve the response, while overriding shell with 'powershell' response = run(cmd, From a93c7e2ffaa410063a6322f8bcc1492888fc205f Mon Sep 17 00:00:00 2001 From: Anand Nevase Date: Wed, 24 Feb 2016 17:34:07 +0530 Subject: [PATCH 48/65] change win_organization_name default value to Organization --- doc/topics/cloud/vmware.rst | 2 +- salt/cloud/clouds/vmware.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/topics/cloud/vmware.rst b/doc/topics/cloud/vmware.rst index 4fa77fbac9..3da2cb02c4 100644 --- a/doc/topics/cloud/vmware.rst +++ b/doc/topics/cloud/vmware.rst @@ -476,7 +476,7 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or During network configuration (if network specified), it is used to specify new administrator password for the machine. ``win_organization_name`` - Specify windows vm user's organization. Default organization name is blank + Specify windows vm user's organization. Default organization name is Organization VMware vSphere documentation: https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.customization.UserData.html diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py index 542f327d92..a994facd9c 100644 --- a/salt/cloud/clouds/vmware.py +++ b/salt/cloud/clouds/vmware.py @@ -2238,7 +2238,7 @@ def create(vm_): 'win_password', vm_, __opts__, search_global=False, default=None ) win_organization_name = config.get_cloud_config_value( - 'win_organization_name', vm_, __opts__, search_global=False, default='' + 'win_organization_name', vm_, __opts__, search_global=False, default='Organization' ) plain_text = config.get_cloud_config_value( 'plain_text', vm_, __opts__, search_global=False, default=False From 36f63965ed1bad1d664791fcc4dbdadd18c1e965 Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 24 Feb 2016 11:41:44 -0700 Subject: [PATCH 49/65] Add release notes. --- doc/topics/releases/carbon.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/topics/releases/carbon.rst b/doc/topics/releases/carbon.rst index 5bd0a8a78a..6cbd0055e6 100644 --- a/doc/topics/releases/carbon.rst +++ b/doc/topics/releases/carbon.rst @@ -36,6 +36,11 @@ Functionality Changes - The ``onfail`` requisite now uses OR logic instead of AND logic. :issue:`22370` - The consul external pillar now strips leading and trailing whitespace. :issue:`31165` +- The win_system.py state is now case sensitive for computer names. Previously + computer names set with a state were converted to all caps. If you have a + state setting computer names with lower case letters in the name that has + been applied, the computer name will be changed again to apply the case + sensitive name. Deprecations ============ From 3fe49c28b8496becb0b2f45ef3b1e865af08d886 Mon Sep 17 00:00:00 2001 From: Joseph Hall Date: Wed, 24 Feb 2016 13:31:49 -0700 Subject: [PATCH 50/65] Refactor SPM install to properly manage dependencies --- salt/spm/__init__.py | 232 ++++++++++++++++++++++++++++++------------- 1 file changed, 164 insertions(+), 68 deletions(-) diff --git a/salt/spm/__init__.py b/salt/spm/__init__.py index 10d9746b90..ebba27470e 100644 --- a/salt/spm/__init__.py +++ b/salt/spm/__init__.py @@ -145,6 +145,65 @@ class SPMClient(object): else: raise SPMInvocationError('Invalid local command \'{0}\''.format(command)) + def _install(self, args): + ''' + Install a package from a repo + ''' + if len(args) < 2: + raise SPMInvocationError('A package must be specified') + + packages = args[1:] + for pkg in packages: + if pkg.endswith('.spm') and self._pkgfiles_fun('path_exists', pkg): + to_install, optional, recommended = self._check_all_deps(pkg_file=pkg) + else: + to_install, optional, recommended = self._check_all_deps(pkg_name=pkg) + + optional = set(filter(len, optional)) + self.ui.status('The following dependencies are optional:\n\t{0}\n'.format( + '\n\t'.join(optional) + )) + recommended = set(filter(len, recommended)) + self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format( + '\n\t'.join(recommended) + )) + + to_install = set(filter(len, to_install)) + msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install)) + if not self.opts['assume_yes']: + self.ui.confirm(msg) + + repo_metadata = self._get_repo_metadata() + for package in to_install: + for repo in repo_metadata: + repo_info = repo_metadata[repo] + if package in repo_metadata[repo]['packages']: + cache_path = '{0}/{1}'.format( + self.opts['spm_cache_dir'], + repo + ) + # Download the package + dl_path = '{0}/{1}'.format( + repo_info['info']['url'], + repo_info['packages'][package]['filename'] + ) + out_file = '{0}/{1}'.format( + cache_path, + repo_info['packages'][package]['filename'] + ) + if not os.path.exists(cache_path): + os.makedirs(cache_path) + + if dl_path.startswith('file://'): + dl_path = dl_path.replace('file://', '') + shutil.copyfile(dl_path, out_file) + else: + http.query(dl_path, text_out=out_file) + + # Kick off the install + self._install_indv_pkg(package, out_file) + return + def _local_install(self, args, pkg_name=None): ''' Install a package from a file @@ -152,26 +211,45 @@ class SPMClient(object): if len(args) < 2: raise SPMInvocationError('A package file must be specified') - pkg_file = args[1] - if not os.path.exists(pkg_file): + self._install(args) + + + def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None): + ''' + Starting with one package, check all packages for dependencies + ''' + if pkg_file and not os.path.exists(pkg_file): raise SPMInvocationError('Package file {0} not found'.format(pkg_file)) - comps = pkg_file.split('-') - comps = '-'.join(comps[:-2]).split('/') - name = comps[-1] + if pkg_file and not formula_def: + comps = pkg_file.split('-') + comps = '-'.join(comps[:-2]).split('/') + pkg_name = comps[-1] - formula_tar = tarfile.open(pkg_file, 'r:bz2') - formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name)) - formula_def = yaml.safe_load(formula_ref) + formula_tar = tarfile.open(pkg_file, 'r:bz2') + formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name)) + formula_def = yaml.safe_load(formula_ref) - pkg_info = self._pkgdb_fun('info', name, self.db_conn) - if pkg_info is not None and not self.opts['force']: - raise SPMPackageError( - 'Package {0} already installed, not installing again'.format(formula_def['name']) - ) + self.repo_metadata = self._get_repo_metadata() + if not formula_def: + for repo in self.repo_metadata: + if not isinstance(self.repo_metadata[repo]['packages'], dict): + continue + if pkg_name in self.repo_metadata[repo]['packages']: + formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info'] + if not formula_def: + raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name)) + + # Check to see if the package is already installed + pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn) + pkgs_to_install = [] + if pkg_info is None or self.opts['force']: + pkgs_to_install.append(pkg_name) + + optional_install = [] + recommended_install = [] if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def: - self.repo_metadata = self._get_repo_metadata() self.avail_pkgs = {} for repo in self.repo_metadata: if not isinstance(self.repo_metadata[repo]['packages'], dict): @@ -188,31 +266,47 @@ class SPMClient(object): ) if optional: - self.ui.status('The following dependencies are optional:') + optional_install.extend(optional) + #self.ui.status('The following dependencies are required:') for dep_pkg in optional: pkg_info = self._pkgdb_fun('info', formula_def['name']) + msg = dep_pkg if isinstance(pkg_info, dict): - self.ui.status('{0} [Installed]').format(dep_pkg) - else: - self.ui.status(dep_pkg) + msg = '{0} [Installed]'.format(dep_pkg) + optional_install.append(msg) + #self.ui.status(msg) if recommended: - self.ui.status('The following dependencies are recommended:') + #self.ui.status('The following dependencies are recommended:') + recommended_install.extend(recommended) for dep_pkg in recommended: pkg_info = self._pkgdb_fun('info', formula_def['name']) + msg = dep_pkg if isinstance(pkg_info, dict): - self.ui.status('{0} [Installed]').format(dep_pkg) - else: - self.ui.status(dep_pkg) + msg = '{0} [Installed]'.format(dep_pkg) + recommended_install.append(msg) + #self.ui.status(msg) - if pkg_name is None: - msg = 'Installing package from file {0}'.format(pkg_file) - else: - msg = 'Installing package {0}'.format(pkg_name) - if not self.opts['assume_yes']: - self.ui.confirm(msg) + if needs: + #self.ui.status('The following dependencies are required:') + pkgs_to_install.extend(needs) + for dep_pkg in needs: + pkg_info = self._pkgdb_fun('info', formula_def['name']) + msg = dep_pkg + if isinstance(pkg_info, dict): + msg = '{0} [Installed]'.format(dep_pkg) + #self.ui.status(dep_pkg) - self.ui.status('... installing') + return pkgs_to_install, optional_install, recommended_install + + def _install_indv_pkg(self, pkg_name, pkg_file): + ''' + Install one individual package + ''' + self.ui.status('... installing {0}'.format(pkg_name)) + formula_tar = tarfile.open(pkg_file, 'r:bz2') + formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name)) + formula_def = yaml.safe_load(formula_ref) for field in ('version', 'release', 'summary', 'description'): if field not in formula_def: @@ -220,7 +314,7 @@ class SPMClient(object): pkg_files = formula_tar.getmembers() # First pass: check for files that already exist - existing_files = self._pkgfiles_fun('check_existing', name, pkg_files, formula_def) + existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def) if existing_files and not self.opts['force']: raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format( @@ -228,7 +322,7 @@ class SPMClient(object): ) # We've decided to install - self._pkgdb_fun('register_pkg', name, formula_def, self.db_conn) + self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn) # No defaults for this in config.py; default to the current running # user and group @@ -245,7 +339,7 @@ class SPMClient(object): member.gname = gname out_path = self._pkgfiles_fun('install_file', - name, + pkg_name, formula_tar, member, formula_def, @@ -261,7 +355,7 @@ class SPMClient(object): file_hash, self.files_conn) self._pkgdb_fun('register_file', - name, + pkg_name, member, out_path, digest, @@ -435,7 +529,7 @@ class SPMClient(object): log.debug('Wrote {0}'.format(metadata_filename)) - def _install(self, args): + def _install_old(self, args): ''' Install a package from a repo ''' @@ -475,49 +569,51 @@ class SPMClient(object): if len(args) < 2: raise SPMInvocationError('A package must be specified') - package = args[1] - msg = 'Removing package {0}'.format(package) + packages = args[1:] + msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages)) if not self.opts['assume_yes']: self.ui.confirm(msg) - self.ui.status('... removing') + for package in packages: + self.ui.status('... removing {0}'.format(package)) - if not self._pkgdb_fun('db_exists', self.opts['spm_db']): - raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package)) + if not self._pkgdb_fun('db_exists', self.opts['spm_db']): + raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package)) - # Look at local repo index - pkg_info = self._pkgdb_fun('info', package, self.db_conn) - if pkg_info is None: - raise SPMPackageError('package {0} not installed'.format(package)) - - # Find files that have not changed and remove them - files = self._pkgdb_fun('list_files', package, self.db_conn) - dirs = [] - for filerow in files: - if self._pkgfiles_fun('path_isdir', filerow[0]): - dirs.append(filerow[0]) + # Look at local repo index + pkg_info = self._pkgdb_fun('info', package, self.db_conn) + if pkg_info is None: + self.ui.status('package {0} not installed'.format(package)) continue - file_hash = hashlib.sha1() - digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn) - if filerow[1] == digest: - self._verbose('Removing file {0}'.format(filerow[0]), log.trace) - self._pkgfiles_fun('remove_file', filerow[0], self.files_conn) - else: - self._verbose('Not removing file {0}'.format(filerow[0]), log.trace) - self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn) - # Clean up directories - for dir_ in sorted(dirs, reverse=True): - self._pkgdb_fun('unregister_file', dir_, package, self.db_conn) - try: - self._verbose('Removing directory {0}'.format(dir_), log.trace) - os.rmdir(dir_) - except OSError: - # Leave directories in place that still have files in them - self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace) + # Find files that have not changed and remove them + files = self._pkgdb_fun('list_files', package, self.db_conn) + dirs = [] + for filerow in files: + if self._pkgfiles_fun('path_isdir', filerow[0]): + dirs.append(filerow[0]) + continue + file_hash = hashlib.sha1() + digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn) + if filerow[1] == digest: + self._verbose('Removing file {0}'.format(filerow[0]), log.trace) + self._pkgfiles_fun('remove_file', filerow[0], self.files_conn) + else: + self._verbose('Not removing file {0}'.format(filerow[0]), log.trace) + self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn) - self._pkgdb_fun('unregister_pkg', package, self.db_conn) + # Clean up directories + for dir_ in sorted(dirs, reverse=True): + self._pkgdb_fun('unregister_file', dir_, package, self.db_conn) + try: + self._verbose('Removing directory {0}'.format(dir_), log.trace) + os.rmdir(dir_) + except OSError: + # Leave directories in place that still have files in them + self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace) + + self._pkgdb_fun('unregister_pkg', package, self.db_conn) def _verbose(self, msg, level=log.debug): ''' From b021afc934d3871ccfd347fde47f425e6685aaa8 Mon Sep 17 00:00:00 2001 From: Joseph Hall Date: Wed, 24 Feb 2016 13:44:39 -0700 Subject: [PATCH 51/65] Remove old _install() function --- salt/spm/__init__.py | 33 --------------------------------- 1 file changed, 33 deletions(-) diff --git a/salt/spm/__init__.py b/salt/spm/__init__.py index ebba27470e..0e8fabe06e 100644 --- a/salt/spm/__init__.py +++ b/salt/spm/__init__.py @@ -529,39 +529,6 @@ class SPMClient(object): log.debug('Wrote {0}'.format(metadata_filename)) - def _install_old(self, args): - ''' - Install a package from a repo - ''' - if len(args) < 2: - raise SPMInvocationError('A package must be specified') - - package = args[1] - - self._verbose('Installing package {0}'.format(package), log.debug) - repo_metadata = self._get_repo_metadata() - for repo in repo_metadata: - repo_info = repo_metadata[repo] - if package in repo_metadata[repo]['packages']: - cache_path = '{0}/{1}'.format( - self.opts['spm_cache_dir'], - repo - ) - dl_path = '{0}/{1}'.format(repo_info['info']['url'], repo_info['packages'][package]['filename']) - out_file = '{0}/{1}'.format(cache_path, repo_info['packages'][package]['filename']) - if not os.path.exists(cache_path): - os.makedirs(cache_path) - - if dl_path.startswith('file://'): - dl_path = dl_path.replace('file://', '') - shutil.copyfile(dl_path, out_file) - else: - http.query(dl_path, text_out=out_file) - - self._local_install((None, out_file), package) - return - raise SPMPackageError('Cannot install package {0}, no source package'.format(package)) - def _remove(self, args): ''' Remove a package From ca48a3e8dfb1e5df2aa7f1cd8db67ccade676731 Mon Sep 17 00:00:00 2001 From: Joseph Hall Date: Wed, 24 Feb 2016 14:03:02 -0700 Subject: [PATCH 52/65] Lint --- salt/spm/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/spm/__init__.py b/salt/spm/__init__.py index 0e8fabe06e..4a890eb1a6 100644 --- a/salt/spm/__init__.py +++ b/salt/spm/__init__.py @@ -29,6 +29,7 @@ import salt.ext.six as six from salt.ext.six import string_types from salt.ext.six.moves import input from salt.ext.six.moves import zip +from salt.ext.six.moves import filter # Get logging started log = logging.getLogger(__name__) @@ -213,7 +214,6 @@ class SPMClient(object): self._install(args) - def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None): ''' Starting with one package, check all packages for dependencies From c287e6db556e8b5eeb88efd53a9d65bfd875ee15 Mon Sep 17 00:00:00 2001 From: rallytime Date: Wed, 24 Feb 2016 16:05:00 -0700 Subject: [PATCH 53/65] Created GitHub Issue and PR templates --- Contributing.rst => .github/CONTRIBUTING.md | 0 .github/ISSUE_TEMPLATE.md | 15 +++++++++++++++ .github/PULL_REQUEST_TEMPLATE.md | 12 ++++++++++++ 3 files changed, 27 insertions(+) rename Contributing.rst => .github/CONTRIBUTING.md (100%) create mode 100644 .github/ISSUE_TEMPLATE.md create mode 100644 .github/PULL_REQUEST_TEMPLATE.md diff --git a/Contributing.rst b/.github/CONTRIBUTING.md similarity index 100% rename from Contributing.rst rename to .github/CONTRIBUTING.md diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000000..2a32624265 --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,15 @@ +### Expected Behavior + +### Actual Behavior + +### Steps to Reproduce Issue + +### Versions Report +** Provided by running `salt --versions-report`** + +### Relevant Logs, Configs, or States +** If not already provided above, please post any additional + information that can help us reproduce your issue such as + commands run with ``-l debug``. Be sure to remove any + sensitive information. ** + diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000..4972b2486a --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,12 @@ +### What does this PR do? + +### What Issues does this PR fix or reference? + +### Previous Behavior + +### New Behavior + +### Tests Written? +[ ] Yes +[ ] No + From 7fd530912dc119aa25a3e33730eb6e001dd8ed6f Mon Sep 17 00:00:00 2001 From: Joseph Hall Date: Wed, 24 Feb 2016 18:51:56 -0700 Subject: [PATCH 54/65] Fix test errors --- salt/spm/__init__.py | 108 +++++++++++++++++++++++++------------------ 1 file changed, 63 insertions(+), 45 deletions(-) diff --git a/salt/spm/__init__.py b/salt/spm/__init__.py index 4a890eb1a6..bd5aa974c2 100644 --- a/salt/spm/__init__.py +++ b/salt/spm/__init__.py @@ -154,11 +154,37 @@ class SPMClient(object): raise SPMInvocationError('A package must be specified') packages = args[1:] + file_map = {} + optional = [] + recommended = [] + to_install = [] for pkg in packages: - if pkg.endswith('.spm') and self._pkgfiles_fun('path_exists', pkg): - to_install, optional, recommended = self._check_all_deps(pkg_file=pkg) + if pkg.endswith('.spm'): + if self._pkgfiles_fun('path_exists', pkg): + comps = pkg.split('-') + comps = '-'.join(comps[:-2]).split('/') + pkg_name = comps[-1] + + formula_tar = tarfile.open(pkg, 'r:bz2') + formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name)) + formula_def = yaml.safe_load(formula_ref) + + file_map[pkg_name] = pkg + to_, op_, re_ = self._check_all_deps( + pkg_name=pkg_name, + pkg_file=pkg, + formula_def=formula_def + ) + to_install.extend(to_) + optional.extend(op_) + recommended.extend(re_) + else: + raise SPMInvocationError('Package file {0} not found'.format(pkg)) else: - to_install, optional, recommended = self._check_all_deps(pkg_name=pkg) + to_, op_, re_ = self._check_all_deps(pkg_name=pkg) + to_install.extend(to_) + optional.extend(op_) + recommended.extend(re_) optional = set(filter(len, optional)) self.ui.status('The following dependencies are optional:\n\t{0}\n'.format( @@ -175,34 +201,38 @@ class SPMClient(object): self.ui.confirm(msg) repo_metadata = self._get_repo_metadata() + for package in to_install: - for repo in repo_metadata: - repo_info = repo_metadata[repo] - if package in repo_metadata[repo]['packages']: - cache_path = '{0}/{1}'.format( - self.opts['spm_cache_dir'], - repo - ) - # Download the package - dl_path = '{0}/{1}'.format( - repo_info['info']['url'], - repo_info['packages'][package]['filename'] - ) - out_file = '{0}/{1}'.format( - cache_path, - repo_info['packages'][package]['filename'] - ) - if not os.path.exists(cache_path): - os.makedirs(cache_path) + if package in file_map: + self._install_indv_pkg(package, file_map[package]) + else: + for repo in repo_metadata: + repo_info = repo_metadata[repo] + if package in repo_metadata[repo]['packages']: + cache_path = '{0}/{1}'.format( + self.opts['spm_cache_dir'], + repo + ) + # Download the package + dl_path = '{0}/{1}'.format( + repo_info['info']['url'], + repo_info['packages'][package]['filename'] + ) + out_file = '{0}/{1}'.format( + cache_path, + repo_info['packages'][package]['filename'] + ) + if not os.path.exists(cache_path): + os.makedirs(cache_path) - if dl_path.startswith('file://'): - dl_path = dl_path.replace('file://', '') - shutil.copyfile(dl_path, out_file) - else: - http.query(dl_path, text_out=out_file) + if dl_path.startswith('file://'): + dl_path = dl_path.replace('file://', '') + shutil.copyfile(dl_path, out_file) + else: + http.query(dl_path, text_out=out_file) - # Kick off the install - self._install_indv_pkg(package, out_file) + # Kick off the install + self._install_indv_pkg(package, out_file) return def _local_install(self, args, pkg_name=None): @@ -221,15 +251,6 @@ class SPMClient(object): if pkg_file and not os.path.exists(pkg_file): raise SPMInvocationError('Package file {0} not found'.format(pkg_file)) - if pkg_file and not formula_def: - comps = pkg_file.split('-') - comps = '-'.join(comps[:-2]).split('/') - pkg_name = comps[-1] - - formula_tar = tarfile.open(pkg_file, 'r:bz2') - formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name)) - formula_def = yaml.safe_load(formula_ref) - self.repo_metadata = self._get_repo_metadata() if not formula_def: for repo in self.repo_metadata: @@ -246,6 +267,10 @@ class SPMClient(object): pkgs_to_install = [] if pkg_info is None or self.opts['force']: pkgs_to_install.append(pkg_name) + elif pkg_info is not None and not self.opts['force']: + raise SPMPackageError( + 'Package {0} already installed, not installing again'.format(formula_def['name']) + ) optional_install = [] recommended_install = [] @@ -267,17 +292,14 @@ class SPMClient(object): if optional: optional_install.extend(optional) - #self.ui.status('The following dependencies are required:') for dep_pkg in optional: pkg_info = self._pkgdb_fun('info', formula_def['name']) msg = dep_pkg if isinstance(pkg_info, dict): msg = '{0} [Installed]'.format(dep_pkg) optional_install.append(msg) - #self.ui.status(msg) if recommended: - #self.ui.status('The following dependencies are recommended:') recommended_install.extend(recommended) for dep_pkg in recommended: pkg_info = self._pkgdb_fun('info', formula_def['name']) @@ -285,17 +307,14 @@ class SPMClient(object): if isinstance(pkg_info, dict): msg = '{0} [Installed]'.format(dep_pkg) recommended_install.append(msg) - #self.ui.status(msg) if needs: - #self.ui.status('The following dependencies are required:') pkgs_to_install.extend(needs) for dep_pkg in needs: pkg_info = self._pkgdb_fun('info', formula_def['name']) msg = dep_pkg if isinstance(pkg_info, dict): msg = '{0} [Installed]'.format(dep_pkg) - #self.ui.status(dep_pkg) return pkgs_to_install, optional_install, recommended_install @@ -551,8 +570,7 @@ class SPMClient(object): # Look at local repo index pkg_info = self._pkgdb_fun('info', package, self.db_conn) if pkg_info is None: - self.ui.status('package {0} not installed'.format(package)) - continue + raise SPMInvocationError('Package {0} not installed'.format(package)) # Find files that have not changed and remove them files = self._pkgdb_fun('list_files', package, self.db_conn) From 7a04ae5558394367c13125e910d2fedc9df422cd Mon Sep 17 00:00:00 2001 From: Justin Findlay Date: Wed, 24 Feb 2016 01:02:04 -0700 Subject: [PATCH 55/65] tests.runtests: add test suite data and fcns --- tests/runtests.py | 79 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) diff --git a/tests/runtests.py b/tests/runtests.py index 526d430ae2..892e400bf3 100755 --- a/tests/runtests.py +++ b/tests/runtests.py @@ -42,12 +42,91 @@ except OSError as err: REQUIRED_OPEN_FILES = 3072 +# Combine info from command line options and test suite directories. A test +# suite is a python package of test modules relative to the tests directory. +TEST_SUITES = { + 'unit': + {'display_name': 'Unit', + 'path': 'unit'}, + 'module': + {'display_name': 'Module', + 'path': 'integration/modules'}, + 'state': + {'display_name': 'State', + 'path': 'integration/states'}, + 'cli': + {'display_name': 'CLI', + 'path': 'integration/cli'}, + 'client': + {'display_name': 'Client', + 'path': 'integration/client'}, + 'shell': + {'display_name': 'Shell', + 'path': 'integration/shell'}, + 'runners': + {'display_name': 'Runners', + 'path': 'integration/runners'}, + 'renderers': + {'display_name': 'Renderers', + 'path': 'integration/renderers'}, + 'loader': + {'display_name': 'Loader', + 'path': 'integration/loader'}, + 'outputter': + {'display_name': 'Outputter', + 'path': 'integration/output'}, + 'fileserver': + {'display_name': 'Fileserver', + 'path': 'integration/fileserver'}, + 'wheel': + {'display_name': 'Wheel', + 'path': 'integration/wheel'}, + 'api': + {'display_name': 'NetAPI', + 'path': 'integration/netapi'}, + 'cloud_provider': + {'display_name': 'Cloud Provider', + 'path': 'integration/cloud/providers'}, +} + class SaltTestsuiteParser(SaltCoverageTestingParser): support_docker_execution = True support_destructive_tests_selection = True source_code_basedir = SALT_ROOT + def _get_suites(self, include_unit=False, include_cloud_provider=False): + ''' + Return a set of all test suites except unit and cloud provider tests + unless requested + ''' + suites = set(TEST_SUITES.keys()) + if not include_unit: + suites -= set(['unit']) + if not include_cloud_provider: + suites -= set(['cloud_provider']) + + return suites + + def _check_enabled_suites(self, include_unit=False, include_cloud_provider=False): + ''' + Query whether test suites have been enabled + ''' + suites = self._get_suites(include_unit=include_unit, + include_cloud_provider=include_cloud_provider) + + return any([getattr(self.options, suite) for suite in suites]) + + def _enable_suites(self, include_unit=False, include_cloud_provider=False): + ''' + Enable test suites for current test run + ''' + suites = self._get_suites(include_unit=include_unit, + include_cloud_provider=include_cloud_provider) + + for suite in suites: + setattr(self.options, suite, True) + def setup_additional_options(self): self.add_option( '--sysinfo', From bc60c02cfe768d8cae9f637a7b514c82e47abee4 Mon Sep 17 00:00:00 2001 From: Justin Findlay Date: Wed, 24 Feb 2016 21:56:20 -0700 Subject: [PATCH 56/65] tests.runtests: refactor test suite usage --- tests/runtests.py | 111 ++++++++++------------------------------------ 1 file changed, 23 insertions(+), 88 deletions(-) diff --git a/tests/runtests.py b/tests/runtests.py index 892e400bf3..e5d7628adc 100755 --- a/tests/runtests.py +++ b/tests/runtests.py @@ -283,22 +283,10 @@ class SaltTestsuiteParser(SaltCoverageTestingParser): os.environ['EXPENSIVE_TESTS'] = 'True' if self.options.coverage and any(( - self.options.module, - self.options.cli, - self.options.client, - self.options.shell, - self.options.unit, - self.options.state, - self.options.runners, - self.options.renderers, - self.options.loader, - self.options.name, - self.options.outputter, - self.options.fileserver, - self.options.wheel, - self.options.api, - os.geteuid() != 0, - not self.options.run_destructive)): + self.options.name, + os.geteuid() != 0, + not self.options.run_destructive)) \ + and self._check_enabled_suites(include_unit=True): self.error( 'No sense in generating the tests coverage report when ' 'not running the full test suite, including the ' @@ -306,26 +294,11 @@ class SaltTestsuiteParser(SaltCoverageTestingParser): 'incorrect results.' ) - # Set test suite defaults if no specific suite options are provided - if not any((self.options.module, self.options.client, self.options.cli, - self.options.shell, self.options.unit, self.options.state, - self.options.runners, self.options.loader, self.options.name, - self.options.outputter, self.options.cloud_provider_tests, - self.options.fileserver, self.options.wheel, self.options.api, - self.options.renderers)): - self.options.module = True - self.options.cli = True - self.options.client = True - self.options.shell = True - self.options.unit = True - self.options.runners = True - self.options.renderers = True - self.options.state = True - self.options.loader = True - self.options.outputter = True - self.options.fileserver = True - self.options.wheel = True - self.options.api = True + # When no tests are specifically enumerated on the command line, setup + # a default run: +unit -cloud_provider + if not self.options.name and not \ + self._check_enabled_suites(include_unit=True, include_cloud_provider=True): + self._enable_suites(include_unit=True) self.start_coverage( branch=True, @@ -340,12 +313,12 @@ class SaltTestsuiteParser(SaltCoverageTestingParser): if self.options.clean: TestDaemon.clean() - def run_integration_suite(self, suite_folder, display_name): + def run_integration_suite(self, path='', display_name=''): ''' Run an integration test suite ''' - path = os.path.join(TEST_DIR, 'integration', suite_folder) - return self.run_suite(path, display_name) + full_path = os.path.join(TEST_DIR, path) + return self.run_suite(full_path, display_name) def start_daemons_only(self): if not salt.utils.is_windows(): @@ -436,24 +409,12 @@ class SaltTestsuiteParser(SaltCoverageTestingParser): continue named_tests.append(test) - if (self.options.unit or named_unit_test) and not \ - (self.options.runners or - self.options.renderers or - self.options.state or - self.options.module or - self.options.cli or - self.options.client or - self.options.loader or - self.options.outputter or - self.options.fileserver or - self.options.wheel or - self.options.cloud_provider_tests or - self.options.api or - named_tests): - # We're either not running any of runners, state, module and client - # tests, or, we're only running unittests by passing --unit or by - # passing only `unit.` to --name. - # We don't need the tests daemon running + if (self.options.unit or named_unit_test) and not named_tests and not \ + self._check_enabled_suites(include_cloud_provider=True): + # We're either not running any integration test suites, or we're + # only running unit tests by passing --unit or by passing only + # `unit.` to --name. We don't need the tests daemon + # running return [True] if not salt.utils.is_windows(): self.prep_filehandles() @@ -467,11 +428,8 @@ class SaltTestsuiteParser(SaltCoverageTestingParser): print_header(' * Setting up Salt daemons to execute tests', top=False) status = [] - if not any([self.options.cli, self.options.client, self.options.module, - self.options.runners, self.options.shell, self.options.state, - self.options.loader, self.options.outputter, self.options.name, - self.options.cloud_provider_tests, self.options.api, self.options.renderers, - self.options.fileserver, self.options.wheel]): + # Return an empty status if no tests have been enabled + if not self._check_enabled_suites(include_cloud_provider=True) and not self.options.name: return status with TestDaemon(self): @@ -481,32 +439,9 @@ class SaltTestsuiteParser(SaltCoverageTestingParser): continue results = self.run_suite('', name, load_from_name=True) status.append(results) - if self.options.loader: - status.append(self.run_integration_suite('loader', 'Loader')) - if self.options.runners: - status.append(self.run_integration_suite('runners', 'Runners')) - if self.options.module: - status.append(self.run_integration_suite('modules', 'Module')) - if self.options.state: - status.append(self.run_integration_suite('states', 'State')) - if self.options.cli: - status.append(self.run_integration_suite('cli', 'CLI')) - if self.options.client: - status.append(self.run_integration_suite('client', 'Client')) - if self.options.shell: - status.append(self.run_integration_suite('shell', 'Shell')) - if self.options.outputter: - status.append(self.run_integration_suite('output', 'Outputter')) - if self.options.fileserver: - status.append(self.run_integration_suite('fileserver', 'Fileserver')) - if self.options.wheel: - status.append(self.run_integration_suite('wheel', 'Wheel')) - if self.options.cloud_provider_tests: - status.append(self.run_integration_suite('cloud/providers', 'Cloud Provider')) - if self.options.api: - status.append(self.run_integration_suite('netapi', 'NetAPI')) - if self.options.renderers: - status.append(self.run_integration_suite('renderers', 'Renderers')) + for suite in TEST_SUITES: + if suite != 'unit' and getattr(self.options, suite): + status.append(self.run_integration_suite(**TEST_SUITES[suite])) return status def run_unit_tests(self): From a98bcf6fd140b10387e4a42014908df4884ea9f4 Mon Sep 17 00:00:00 2001 From: Justin Findlay Date: Wed, 24 Feb 2016 01:08:48 -0700 Subject: [PATCH 57/65] tests.runtests: reorder arg declarations --- tests/runtests.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/runtests.py b/tests/runtests.py index e5d7628adc..8860e6e8b1 100755 --- a/tests/runtests.py +++ b/tests/runtests.py @@ -147,6 +147,13 @@ class SaltTestsuiteParser(SaltCoverageTestingParser): action='store_true', help='Do not run any tests. Simply start the daemons.' ) + self.output_options_group.add_option( + '--no-colors', + '--no-colours', + default=False, + action='store_true', + help='Disable colour printing.' + ) self.test_selection_group.add_option( '-m', @@ -269,13 +276,6 @@ class SaltTestsuiteParser(SaltCoverageTestingParser): default=False, help='Run salt-api tests' ) - self.output_options_group.add_option( - '--no-colors', - '--no-colours', - default=False, - action='store_true', - help='Disable colour printing.' - ) def validate_options(self): if self.options.cloud_provider_tests: From 0fb1bbb8831e095b7c7493083e98e7513900aa3d Mon Sep 17 00:00:00 2001 From: Justin Findlay Date: Wed, 24 Feb 2016 01:09:10 -0700 Subject: [PATCH 58/65] tests.runtests: rename cloud provider tests --- tests/runtests.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/runtests.py b/tests/runtests.py index 8860e6e8b1..d01ee18039 100755 --- a/tests/runtests.py +++ b/tests/runtests.py @@ -253,7 +253,9 @@ class SaltTestsuiteParser(SaltCoverageTestingParser): help='Run outputter tests' ) self.test_selection_group.add_option( + '--cloud-provider', '--cloud-provider-tests', + dest='cloud_provider', action='store_true', default=False, help=('Run cloud provider tests. These tests create and delete ' @@ -278,7 +280,7 @@ class SaltTestsuiteParser(SaltCoverageTestingParser): ) def validate_options(self): - if self.options.cloud_provider_tests: + if self.options.cloud_provider: # Turn on expensive tests execution os.environ['EXPENSIVE_TESTS'] = 'True' From b256f2d8b57301ee53e8ec3b2ef9506548aa10f7 Mon Sep 17 00:00:00 2001 From: Alexander Backlund Date: Thu, 25 Feb 2016 00:56:40 -0800 Subject: [PATCH 59/65] Renamed flag encoded_cmd --- salt/modules/cmdmod.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index bee62709fb..e6d449e6f6 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -240,7 +240,7 @@ def _run(cmd, use_vt=False, password=None, bg=False, - powershell_encoded=False, + encoded_cmd=False, **kwargs): ''' Do the DRY thing and only call subprocess.Popen() once @@ -290,7 +290,7 @@ def _run(cmd, # The third item[2] in each tuple is the name of that method. if stack[-2][2] == 'script': cmd = 'Powershell -NonInteractive -ExecutionPolicy Bypass -File ' + cmd - elif powershell_encoded: + elif encoded_cmd: cmd = 'Powershell -NonInteractive -EncodedCommand {0}'.format(cmd) else: cmd = 'Powershell -NonInteractive "{0}"'.format(cmd.replace('"', '\\"')) @@ -679,7 +679,7 @@ def run(cmd, saltenv='base', use_vt=False, bg=False, - powershell_encoded=False, + encoded_cmd=False, **kwargs): r''' Execute the passed command and return the output as a string @@ -776,7 +776,7 @@ def run(cmd, :param bool use_vt: Use VT utils (saltstack) to stream the command output more interactively to the console and the logs. This is experimental. - :param bool powershell_encoded: Specify if the supplied command is encoded. + :param bool encoded_cmd: Specify if the supplied command is encoded. Only applies to shell 'powershell'. .. warning:: @@ -853,7 +853,7 @@ def run(cmd, use_vt=use_vt, password=kwargs.get('password', None), bg=bg, - powershell_encoded=powershell_encoded) + encoded_cmd=encoded_cmd) log_callback = _check_cb(log_callback) @@ -2739,9 +2739,9 @@ def powershell(cmd, log.debug('Encoding PowerShell command \'{0}\''.format(cmd)) cmd_utf16 = cmd.decode('utf-8').encode('utf-16le') cmd = base64.standard_b64encode(cmd_utf16) - powershell_encoded = True + encoded_cmd = True else: - powershell_encoded = False + encoded_cmd = False # Retrieve the response, while overriding shell with 'powershell' response = run(cmd, @@ -2762,7 +2762,7 @@ def powershell(cmd, saltenv=saltenv, use_vt=use_vt, python_shell=python_shell, - powershell_encoded=powershell_encoded, + encoded_cmd=encoded_cmd, **kwargs) try: From 7fdf13bd73b813a89fc48e234fe8827b8f3c5866 Mon Sep 17 00:00:00 2001 From: Ronald van Zantvoort Date: Thu, 25 Feb 2016 14:46:18 +0100 Subject: [PATCH 60/65] Recommit letsencrypt-auto mod/state in develop --- salt/modules/acme.py | 293 +++++++++++++++++++++++++++++++++++++++++++ salt/states/acme.py | 121 ++++++++++++++++++ 2 files changed, 414 insertions(+) create mode 100644 salt/modules/acme.py create mode 100644 salt/states/acme.py diff --git a/salt/modules/acme.py b/salt/modules/acme.py new file mode 100644 index 0000000000..ca655ede84 --- /dev/null +++ b/salt/modules/acme.py @@ -0,0 +1,293 @@ +# -*- coding: utf-8 -*- +''' +ACME / Let's Encrypt module +=========================== + +.. versionadded: 2016.3 + +This module currently uses letsencrypt-auto, which needs to be available in the path or in /opt/letsencrypt/. + +.. note:: + + Currently only the webroot authentication is tested/implemented. + +.. note:: + + Installation & configuration of the Let's Encrypt client can for example be done using + https://github.com/saltstack-formulas/letsencrypt-formula + +.. warning:: + + Be sure to set at least accept-tos = True in cli.ini! + +Most parameters will fall back to cli.ini defaults if None is given. + +''' +# Import python libs +from __future__ import absolute_import +import logging +import datetime +import os + +# Import salt libs +import salt.utils + +log = logging.getLogger(__name__) + +LEA = salt.utils.which_bin(['letsencrypt-auto', '/opt/letsencrypt/letsencrypt-auto']) +LE_LIVE = '/etc/letsencrypt/live/' + + +def __virtual__(): + ''' + Only work when letsencrypt-auto is installed + ''' + return LEA is not None, 'The ACME execution module cannot be loaded: letsencrypt-auto not installed.' + + +def _cert_file(name, cert_type): + ''' + Return expected path of a Let's Encrypt live cert + ''' + return os.path.join(LE_LIVE, name, '{0}.pem'.format(cert_type)) + + +def _expires(name): + ''' + Return the expiry date of a cert + + :return datetime object of expiry date + ''' + cert_file = _cert_file(name, 'cert') + # Use the salt module if available + if 'tls.cert_info' in __salt__: + expiry = __salt__['tls.cert_info'](cert_file)['not_after'] + # Cobble it together using the openssl binary + else: + openssl_cmd = 'openssl x509 -in {0} -noout -enddate'.format(cert_file) + # No %e format on my Linux'es here + strptime_sux_cmd = 'date --date="$({0} | cut -d= -f2)" +%s'.format(openssl_cmd) + expiry = float(__salt__['cmd.shell'](strptime_sux_cmd, output_loglevel='quiet')) + # expiry = datetime.datetime.strptime(expiry.split('=', 1)[-1], '%b %e %H:%M:%S %Y %Z') + + return datetime.datetime.fromtimestamp(expiry) + + +def _renew_by(name, window=None): + ''' + Date before a certificate should be renewed + + :param name: Common Name of the certificate (DNS name of certificate) + :param window: days before expiry date to renew + :return datetime object of first renewal date + ''' + expiry = _expires(name) + if window is not None: + expiry = expiry - datetime.timedelta(days=window) + + return expiry + + +def cert(name, + aliases=None, + email=None, + webroot=None, + test_cert=False, + renew=None, + keysize=None, + server=None, + owner='root', + group='root'): + ''' + Obtain/renew a certificate from an ACME CA, probably Let's Encrypt. + + :param name: Common Name of the certificate (DNS name of certificate) + :param aliases: subjectAltNames (Additional DNS names on certificate) + :param email: e-mail address for interaction with ACME provider + :param webroot: True or full path to webroot used for authentication + :param test_cert: Request a certificate from the Happy Hacker Fake CA (mutually exclusive with 'server') + :param renew: True/'force' to force a renewal, or a window of renewal before expiry in days + :param keysize: RSA key bits + :param server: API endpoint to talk to + :param owner: owner of private key + :param group: group of private key + :return: dict with 'result' True/False/None, 'comment' and certificate's expiry date ('not_after') + + CLI example: + + .. code-block:: bash + + salt 'gitlab.example.com' acme.cert dev.example.com "[gitlab.example.com]" test_cert=True renew=14 webroot=/opt/gitlab/embedded/service/gitlab-rails/public + ''' + + cmd = [LEA, 'certonly'] + + cert_file = _cert_file(name, 'cert') + if not __salt__['file.file_exists'](cert_file): + log.debug('Certificate {0} does not exist (yet)'.format(cert_file)) + renew = False + elif needs_renewal(name, renew): + log.debug('Certificate {0} will be renewed'.format(cert_file)) + cmd.append('--renew-by-default') + renew = True + else: + return { + 'result': None, + 'comment': 'Certificate {0} does not need renewal'.format(cert_file), + 'not_after': expires(name) + } + + if server: + cmd.append('--server {0}'.format(server)) + + if test_cert: + if server: + return {'result': False, 'comment': 'Use either server or test_cert, not both'} + cmd.append('--test-cert') + + if webroot: + cmd.append('--authenticator webroot') + if webroot is not True: + cmd.append('--webroot-path {0}'.format(webroot)) + + if email: + cmd.append('--email {0}'.format(email)) + + if keysize: + cmd.append('--rsa-key-size {0}'.format(keysize)) + + cmd.append('--domains {0}'.format(name)) + if aliases is not None: + for dns in aliases: + cmd.append('--domains {0}'.format(dns)) + + res = __salt__['cmd.run_all'](' '.join(cmd)) + + if res['retcode'] != 0: + return {'result': False, 'comment': 'Certificate {0} renewal failed with:\n{1}'.format(name, res['stderr'])} + + if renew: + comment = 'Certificate {0} renewed'.format(name) + else: + comment = 'Certificate {0} obtained'.format(name) + ret = {'comment': comment, 'not_after': expires(name)} + + res = __salt__['file.check_perms'](_cert_file(name, 'privkey'), {}, owner, group, '0600', follow_symlinks=True) + + if res is None: + ret['result'] = False + ret['comment'] += ', but setting permissions failed.' + elif not res[0].get('result', False): + ret['result'] = False + ret['comment'] += ', but setting permissions failed with \n{0}'.format(res[0]['comment']) + else: + ret['result'] = True + ret['comment'] += '.' + + return ret + + +def certs(): + ''' + Return a list of active certificates + + CLI example: + + .. code-block:: bash + + salt 'vhost.example.com' acme.certs + ''' + return __salt__['file.readdir'](LE_LIVE)[2:] + + +def info(name): + ''' + Return information about a certificate + + .. note:: + Will output tls.cert_info if that's available, or OpenSSL text if not + + :param name: CommonName of cert + + CLI example: + + .. code-block:: bash + + salt 'gitlab.example.com' acme.info dev.example.com + ''' + cert_file = _cert_file(name, 'cert') + # Use the salt module if available + if 'tls.cert_info' in __salt__: + info = __salt__['tls.cert_info'](cert_file) + # Strip out the extensions object contents; + # these trip over our poor state output + # and they serve no real purpose here anyway + info['extensions'] = info['extensions'].keys() + return info + # Cobble it together using the openssl binary + else: + openssl_cmd = 'openssl x509 -in {0} -noout -text'.format(cert_file) + return __salt__['cmd.run'](openssl_cmd, output_loglevel='quiet') + + +def expires(name): + ''' + The expiry date of a certificate in ISO format + + :param name: CommonName of cert + + CLI example: + + .. code-block:: bash + + salt 'gitlab.example.com' acme.expires dev.example.com + ''' + return _expires(name).isoformat() + + +def has(name): + ''' + Test if a certificate is in the Let's Encrypt Live directory + + :param name: CommonName of cert + + Code example: + + .. code-block:: python + + if __salt__['acme.has']('dev.example.com'): + log.info('That is one nice certificate you have there!') + ''' + return __salt__['file.file_exists'](_cert_file(name, 'cert')) + + +def renew_by(name, window=None): + ''' + Date in ISO format when a certificate should first be renewed + + :param name: CommonName of cert + :param window: number of days before expiry when renewal should take place + ''' + return _renew_by(name, window).isoformat() + + +def needs_renewal(name, window=None): + ''' + Check if a certicate needs renewal + + :param name: CommonName of cert + :param window: Window in days to renew earlier or True/force to just return True + + Code example: + + .. code-block:: python + + if __salt__['acme.needs_renewal']('dev.example.com'): + __salt__['acme.cert']('dev.example.com', **kwargs) + else: + log.info('Your certificate is still good') + ''' + if window is not None and window in ('force', 'Force', True): + return True + + return _renew_by(name, window) <= datetime.datetime.today() diff --git a/salt/states/acme.py b/salt/states/acme.py new file mode 100644 index 0000000000..285429d679 --- /dev/null +++ b/salt/states/acme.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +''' +ACME / Let's Encrypt certificate management state +================================================= + +.. versionadded: 2016.3 + +See also the module documentation + +.. code-block:: yaml + + reload-gitlab: + cmd.run: + - name: gitlab-ctl hup + + dev.example.com: + acme.cert: + - aliases: + - gitlab.example.com + - email: acmemaster@example.com + - webroot: /opt/gitlab/embedded/service/gitlab-rails/public + - renew: 14 + - fire_event: acme/dev.example.com + - onchanges_in: + - cmd: reload-gitlab + +''' +# Import python libs +from __future__ import absolute_import +import logging + +log = logging.getLogger(__name__) + + +def __virtual__(): + ''' + Only work when the ACME module agrees + ''' + return 'acme.cert' in __salt__ + + +def cert(name, + aliases=None, + email=None, + webroot=None, + test_cert=False, + renew=None, + keysize=None, + server=None, + owner='root', + group='root'): + ''' + Obtain/renew a certificate from an ACME CA, probably Let's Encrypt. + + :param name: Common Name of the certificate (DNS name of certificate) + :param aliases: subjectAltNames (Additional DNS names on certificate) + :param email: e-mail address for interaction with ACME provider + :param webroot: True or full path to webroot used for authentication + :param test_cert: Request a certificate from the Happy Hacker Fake CA (mutually exclusive with 'server') + :param renew: True/'force' to force a renewal, or a window of renewal before expiry in days + :param keysize: RSA key bits + :param server: API endpoint to talk to + :param owner: owner of private key + :param group: group of private key + ''' + + if __opts__['test']: + ret = { + 'name': name, + 'changes': {}, + 'result': None + } + window = None + try: + window = int(renew) + except: # pylint: disable=bare-except + pass + + comment = 'Certificate {0} '.format(name) + if not __salt__['acme.has'](name): + comment += 'would have been obtained' + elif __salt__['acme.needs_renewal'](name, window): + comment += 'would have been renewed' + else: + comment += 'would not have been touched' + ret['comment'] = comment + return ret + + if not __salt__['acme.has'](name): + old = None + else: + old = __salt__['acme.info'](name) + + res = __salt__['acme.cert']( + name, + aliases=aliases, + email=email, + webroot=webroot, + test_cert=test_cert, + renew=renew, + keysize=keysize, + server=server, + owner=owner, + group=group + ) + + ret = { + 'name': name, + 'result': res['result'] is not False, + 'comment': res['comment'] + } + + if res['result'] is None: + ret['changes'] = {} + else: + ret['changes'] = { + 'old': old, + 'new': __salt__['acme.info'](name) + } + + return ret From 7ce2375a90c54bd679ef87aa183226c4a4d3cb20 Mon Sep 17 00:00:00 2001 From: rallytime Date: Thu, 25 Feb 2016 08:49:46 -0700 Subject: [PATCH 61/65] Lowercase some words --- .github/PULL_REQUEST_TEMPLATE.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 4972b2486a..b450ad070a 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,12 +1,12 @@ ### What does this PR do? -### What Issues does this PR fix or reference? +### What issues does this PR fix or reference? ### Previous Behavior ### New Behavior -### Tests Written? +### Tests written? [ ] Yes [ ] No From a6b1b61c09926e85f88f24c78e08fcd6239fe404 Mon Sep 17 00:00:00 2001 From: rallytime Date: Thu, 25 Feb 2016 09:58:02 -0700 Subject: [PATCH 62/65] Fix PR Template = Checkbox markdown should be a list --- .github/PULL_REQUEST_TEMPLATE.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index b450ad070a..dd924a98ba 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -7,6 +7,6 @@ ### New Behavior ### Tests written? -[ ] Yes -[ ] No +- [ ] Yes +- [ ] No From 70b85742859d983553e76f9a2ba859e7c1f45713 Mon Sep 17 00:00:00 2001 From: Joseph Hall Date: Thu, 25 Feb 2016 10:10:04 -0700 Subject: [PATCH 63/65] Switch manage.bootstrap to Salt SSH --- salt/runners/manage.py | 84 +++++++++++++++++++++++++++++++++--------- salt/version.py | 2 +- 2 files changed, 68 insertions(+), 18 deletions(-) diff --git a/salt/runners/manage.py b/salt/runners/manage.py index 42fe922d55..16bf1d4ea1 100644 --- a/salt/runners/manage.py +++ b/salt/runners/manage.py @@ -12,6 +12,8 @@ import re import subprocess import tempfile import time +import logging +import uuid # Import 3rd-party libs import salt.ext.six as six @@ -19,16 +21,18 @@ from salt.ext.six.moves.urllib.request import urlopen as _urlopen # pylint: dis # Import salt libs import salt.key -import salt.client import salt.utils import salt.utils.minions +import salt.client +import salt.client.ssh import salt.wheel import salt.version from salt.utils.event import tagify -from salt.exceptions import SaltClientError - +from salt.exceptions import SaltClientError, SaltSystemExit FINGERPRINT_REGEX = re.compile(r'^([a-f0-9]{2}:){15}([a-f0-9]{2})$') +log = logging.getLogger(__name__) + def status(output=True): ''' @@ -50,7 +54,6 @@ def status(output=True): key = salt.key.Key(__opts__) keys = key.list_keys() - ret['up'] = sorted(minions) ret['down'] = sorted(set(keys['minions']) - set(minions)) return ret @@ -127,7 +130,6 @@ def down(removekeys=False): wheel.call_func('key.delete', match=minion) return ret - def up(): # pylint: disable=C0103 ''' Print a list of all of the minions that are up @@ -615,7 +617,14 @@ def versions(): def bootstrap(version='develop', script=None, hosts='', - root_user=True): + root_user=False, + roster='flat', + ssh_user='root', + ssh_password=None, + ssh_priv_key=None, + tmp_dir='/tmp/.bootstrap', + script_args='', + http_backend='tornado'): ''' Bootstrap minions with salt-bootstrap @@ -637,23 +646,64 @@ def bootstrap(version='develop', salt-run manage.bootstrap hosts='host1,host2' salt-run manage.bootstrap hosts='host1,host2' version='v0.17' - salt-run manage.bootstrap hosts='host1,host2' version='v0.17' script='https://bootstrap.saltstack.com/develop' - salt-run manage.bootstrap hosts='ec2-user@host1,ec2-user@host2' root_user=False + salt-run manage.bootstrap hosts='host1,host2' version='v0.17' \ + script='https://bootstrap.saltstack.com/develop' + salt-run manage.bootstrap hosts='ec2-user@host1,ec2-user@host2' \ + root_user=False ''' + dep_warning = ( + 'Starting with Salt Carbon, manage.bootstrap now uses Salt SSH to ' + 'connect, and requires a roster entry. Please ensure that a roster ' + 'entry exists for this host. Non-roster hosts will no longer be ' + 'supported starting with Salt Oxygen.' + ) + if root_user is True: + salt.utils.warn_until('Oxygen', dep_warning) + if script is None: script = 'https://bootstrap.saltstack.com' for host in hosts.split(','): - # Could potentially lean on salt-ssh utils to make - # deployment easier on existing hosts (i.e. use salt.utils.vt, - # pass better options to ssh, etc) - subprocess.call(['ssh', - ('root@' if root_user else '') + host, - 'python -c \'import urllib; ' - 'print urllib.urlopen(' - '\'' + script + '\'' - ').read()\' | sh -s -- git ' + version]) + client_opts = __opts__.copy() + client_opts['tgt'] = host + client_opts['selected_target_option'] = 'glob' + tmp_dir = '{0}-{1}/'.format(tmp_dir.rstrip('/'), uuid.uuid4()) + deploy_command = os.path.join(tmp_dir, 'deploy.sh') + try: + client_opts['argv'] = ['file.makedirs', tmp_dir, 'mode=0700'] + salt.client.ssh.SSH(client_opts).run() + client_opts['argv'] = [ + 'http.query', + script, + 'backend={0}'.format(http_backend), + 'text_out={0}'.format(deploy_command) + ] + client = salt.client.ssh.SSH(client_opts).run() + client_opts['argv'] = [ + 'cmd.run', + ' '.join(['sh', deploy_command, script_args]), + 'python_shell=False' + ] + salt.client.ssh.SSH(client_opts).run() + client_opts['argv'] = ['file.remove', tmp_dir] + salt.client.ssh.SSH(client_opts).run() + except SaltSystemExit as exc: + if 'No hosts found with target' in str(exc): + log.warn('The host {0} was not found in the Salt SSH roster ' + 'system. Attempting to log in without Salt SSH.') + salt.utils.warn_until('Oxygen', dep_warning) + ret = subprocess.call([ + 'ssh', + ('root@' if root_user else '') + host, + 'python -c \'import urllib; ' + 'print urllib.urlopen(' + '\'' + script + '\'' + ').read()\' | sh -s -- git ' + version + ]) + return ret + else: + log.error(str(exc)) def bootstrap_psexec(hosts='', master=None, version=None, arch='win32', diff --git a/salt/version.py b/salt/version.py index ecaa813351..e24eb9722c 100644 --- a/salt/version.py +++ b/salt/version.py @@ -87,8 +87,8 @@ class SaltStackVersion(object): 'Boron' : (2016, 3), 'Carbon' : (MAX_SIZE - 103, 0), 'Nitrogen' : (MAX_SIZE - 102, 0), + 'Oxygen' : (MAX_SIZE - 101, 0), # pylint: disable=E8265 - #'Oxygen' : (MAX_SIZE - 101, 0), #'Fluorine' : (MAX_SIZE - 100, 0), #'Neon' : (MAX_SIZE - 99 , 0), #'Sodium' : (MAX_SIZE - 98 , 0), From 1494c449188b808796bdf1e7cf4a1e84652214bd Mon Sep 17 00:00:00 2001 From: hlub Date: Wed, 17 Feb 2016 13:33:50 +0200 Subject: [PATCH 64/65] Refactored GlusterFS support Usage of GlusterFS states did not change except the state 'created' was deprecated in favor of more descriptive name 'volume_present'. - Improved separation of functionality in the execution module from the states. Module functions now return booleans instead of varying strings. Parsing those strings in the states was very error-prone. In glusterfs.peer() function this problem was earlier solved by returning a dict containing both the exit value and output string of the gluster call. This could be a good idea, although without calling the gluster calls twice, and applied to other functios too. - Calls to gluster are now logged more verbosely, while a failing call no longer raises an immediate exception. - Improved checking and verbosity in the state level. Cleaned code a lot, removed some bugs and other odd behaviour. - Updated tests to match the changes. Refactored some test code to be more readable. Added assertions to test that some functions are not called when things are already in the desired state. Preferred changing behaviour of the mocked functions while proceeding instead of listing all of the return values first. - Replaced glusterfs.list_peers() with more informative peer_status() and deprecated the old name. The function now returns a dict with UUIDs as its keys. - glusterfs.info() can now be called without volume name. It always returns a dictionary. Use this function instead of status() in sates. - Functions glusterfs.delete() and glusterfs.create() were deprecated and renamed to delete_volume() and create_volume(). --- salt/modules/glusterfs.py | 260 +++++++++----- salt/states/glusterfs.py | 211 +++++++----- tests/unit/modules/glusterfs_test.py | 484 +++++++++++---------------- tests/unit/states/glusterfs_test.py | 304 ++++++++++------- 4 files changed, 668 insertions(+), 591 deletions(-) diff --git a/salt/modules/glusterfs.py b/salt/modules/glusterfs.py index 5ebb9272bf..13ff089249 100644 --- a/salt/modules/glusterfs.py +++ b/salt/modules/glusterfs.py @@ -17,7 +17,7 @@ from salt.ext.six.moves import range # Import salt libs import salt.utils import salt.utils.cloud as suc -from salt.exceptions import CommandExecutionError, SaltInvocationError +from salt.exceptions import SaltInvocationError log = logging.getLogger(__name__) @@ -43,39 +43,47 @@ def _get_minor_version(): return version -def _gluster(cmd): +def _gluster_ok(xml_data): ''' - Perform a gluster command. + Extract boolean return value from Gluster's XML output. ''' - # We will pass the command string as stdin to allow for much longer - # command strings. This is especially useful for creating large volumes - # where the list of bricks exceeds 128 characters. - return __salt__['cmd.run']( - 'gluster --mode=script', stdin="{0}\n".format(cmd)) + return int(xml_data.find('opRet').text) == 0 def _gluster_xml(cmd): ''' - Perform a gluster --xml command and check for and raise errors. + Perform a gluster --xml command and log result. ''' + # We will pass the command string as stdin to allow for much longer + # command strings. This is especially useful for creating large volumes + # where the list of bricks exceeds 128 characters. root = ET.fromstring( __salt__['cmd.run']( 'gluster --xml --mode=script', stdin="{0}\n".format(cmd) ).replace("\n", "")) - if int(root.find('opRet').text) != 0: - raise CommandExecutionError(root.find('opErrstr').text) + if _gluster_ok(root): + output = root.find('output') + if output: + log.info('Gluster call "{0}" succeeded: {1}'.format(cmd, root.find('output').text)) + else: + log.info('Gluster call "{0}" succeeded'.format(cmd)) + else: + log.error('Failed gluster call: {0}: {1}'.format(cmd, root.find('opErrstr').text)) return root +def _gluster(cmd): + ''' + Perform a gluster command and return a boolean status. + ''' + return _gluster_ok(_gluster_xml(cmd)) + + def _etree_to_dict(t): - list_t = list(t) - if len(list_t) > 0: - d = {} - for child in list_t: - d[child.tag] = _etree_to_dict(child) - else: - d = t.text - return d + d = {} + for child in t: + d[child.tag] = _etree_to_dict(child) + return d or t.text def _iter(root, term): @@ -88,15 +96,20 @@ def _iter(root, term): return root.iter(term) -def list_peers(): +def peer_status(): ''' - Return a list of gluster peers + Return peer status information + + The return value is a dictionary with peer UUIDs as keys and dicts of peer + information as values. Hostnames are listed in one list. GlusterFS separates + one of the hostnames but the only reason for this seems to be which hostname + happens to be used firts in peering. CLI Example: .. code-block:: bash - salt '*' glusterfs.list_peers + salt '*' glusterfs.peer_status GLUSTER direct CLI example (to show what salt is sending to gluster): @@ -118,14 +131,42 @@ def list_peers(): ''' root = _gluster_xml('peer status') - result = {} - for et_peer in _iter(root, 'peer'): - result.update({et_peer.find('hostname').text: [ - x.text for x in _iter(et_peer.find('hostnames'), 'hostname')]}) - if len(result) == 0: + if not _gluster_ok(root): return None - else: - return result + + result = {} + for peer in _iter(root, 'peer'): + uuid = peer.find('uuid').text + result[uuid] = {'hostnames': []} + for item in peer: + if item.tag == 'hostname': + result[uuid]['hostnames'].append(item.text) + elif item.tag == 'hostnames': + for hostname in item: + if hostname.text not in result[uuid]['hostnames']: + result[uuid]['hostnames'].append(hostname.text) + elif item.tag != 'uuid': + result[uuid][item.tag] = item.text + return result + + +def list_peers(): + ''' + Deprecated version of peer_status(), which returns the peered hostnames + and some additional information. + + CLI Example: + + .. code-block:: bash + + salt '*' glusterfs.list_peers + + ''' + salt.utils.warn_until( + 'Nitrogen', + 'The glusterfs.list_peers function is deprecated in favor of' + ' more verbose but very similar glusterfs.peer_status.') + return peer_status() def peer(name): @@ -163,15 +204,10 @@ def peer(name): 'Invalid characters in peer name "{0}"'.format(name)) cmd = 'peer probe {0}'.format(name) - - op_result = { - "exitval": _gluster_xml(cmd).find('opErrno').text, - "output": _gluster_xml(cmd).find('output').text - } - return op_result + return _gluster(cmd) -def create(name, bricks, stripe=False, replica=False, device_vg=False, +def create_volume(name, bricks, stripe=False, replica=False, device_vg=False, transport='tcp', start=False, force=False): ''' Create a glusterfs volume. @@ -249,14 +285,24 @@ def create(name, bricks, stripe=False, replica=False, device_vg=False, if force: cmd += ' force' - log.debug('Clustering command:\n{0}'.format(cmd)) - _gluster_xml(cmd) + if not _gluster(cmd): + return False if start: - _gluster_xml('volume start {0}'.format(name)) - return 'Volume {0} created and started'.format(name) - else: - return 'Volume {0} created. Start volume to use'.format(name) + return start_volume(name) + return True + + +def create(*args, **kwargs): + ''' + Deprecated version of more consistently named create_volume + ''' + salt.utils.warn_until( + 'Nitrogen', + 'The glusterfs.create function is deprecated in favor of' + ' more descriptive glusterfs.create_volume.' + ) + return create_volume(*args, **kwargs) def list_volumes(): @@ -270,8 +316,9 @@ def list_volumes(): salt '*' glusterfs.list_volumes ''' - get_volume_list = 'gluster --xml volume list' root = _gluster_xml('volume list') + if not _gluster_ok(root): + return None results = [x.text for x in _iter(root, 'volume')] return results @@ -291,6 +338,10 @@ def status(name): ''' # Get volume status root = _gluster_xml('volume status {0}'.format(name)) + if not _gluster_ok(root): + # Most probably non-existing volume, the error output is logged + # Tiis return value is easy to test and intuitive + return None ret = {'bricks': {}, 'nfs': {}, 'healers': {}} @@ -326,44 +377,50 @@ def status(name): return ret -def info(name): +def info(name=None): ''' .. versionadded:: 2015.8.4 - Return the gluster volume info. + Return gluster volume info. name - Volume name + Optional name to retrieve only information of one volume CLI Example: .. code-block:: bash - salt '*' glusterfs.info myvolume + salt '*' glusterfs.info ''' - cmd = 'volume info {0}'.format(name) + cmd = 'volume info' + if name is not None: + cmd += ' ' + name + root = _gluster_xml(cmd) + if not _gluster_ok(root): + return None - volume = [x for x in _iter(root, 'volume')][0] + ret = {} + for volume in _iter(root, 'volume'): + name = volume.find('name').text + ret[name] = _etree_to_dict(volume) - ret = {name: _etree_to_dict(volume)} + bricks = {} + for i, brick in enumerate(_iter(volume, 'brick'), start=1): + brickkey = 'brick{0}'.format(i) + bricks[brickkey] = {'path': brick.text} + for child in brick: + if not child.tag == 'name': + bricks[brickkey].update({child.tag: child.text}) + for k, v in brick.items(): + bricks[brickkey][k] = v + ret[name]['bricks'] = bricks - bricks = {} - for i, brick in enumerate(_iter(volume, 'brick'), start=1): - brickkey = 'brick{0}'.format(i) - bricks[brickkey] = {'path': brick.text} - for child in list(brick): - if not child.tag == 'name': - bricks[brickkey].update({child.tag: child.text}) - for k, v in brick.items(): - bricks[brickkey][k] = v - ret[name]['bricks'] = bricks - - options = {} - for option in _iter(volume, 'option'): - options[option.find('name').text] = option.find('value').text - ret[name]['options'] = options + options = {} + for option in _iter(volume, 'option'): + options[option.find('name').text] = option.find('value').text + ret[name]['options'] = options return ret @@ -390,12 +447,15 @@ def start_volume(name, force=False): cmd = '{0} force'.format(cmd) volinfo = info(name) + if name not in volinfo: + log.error("Cannot start non-existing volume {0}".format(name)) + return False - if not force and volinfo['status'] == '1': - return 'Volume already started' + if not force and volinfo[name]['status'] == '1': + log.info("Volume {0} already started".format(name)) + return True - _gluster_xml(cmd) - return 'Volume {0} started'.format(name) + return _gluster(cmd) def stop_volume(name, force=False): @@ -415,17 +475,22 @@ def stop_volume(name, force=False): salt '*' glusterfs.stop_volume mycluster ''' - status(name) + volinfo = info() + if name not in volinfo: + log.error('Cannot stop non-existing volume {0}'.format(name)) + return False + if int(volinfo[name]['status']) != 1: + log.warning('Attempt to stop already stopped volume {0}'.format(name)) + return True cmd = 'volume stop {0}'.format(name) if force: cmd += ' force' - _gluster_xml(cmd) - return 'Volume {0} stopped'.format(name) + return _gluster(cmd) -def delete(target, stop=True): +def delete_volume(target, stop=True): ''' Deletes a gluster volume @@ -435,26 +500,37 @@ def delete(target, stop=True): stop Stop volume before delete if it is started, True by default ''' - if target not in list_volumes(): - raise SaltInvocationError('Volume {0} does not exist'.format(target)) + volinfo = info() + if target not in volinfo: + log.error('Cannot delete non-existing volume {0}'.format(target)) + return False # Stop volume if requested to and it is running - running = (info(target)['status'] == '1') + running = (volinfo[target]['status'] == '1') if not stop and running: # Fail if volume is running if stop is not requested - raise SaltInvocationError( - 'Volume {0} must be stopped before deletion'.format(target)) + log.error('Volume {0} must be stopped before deletion'.format(target)) + return False if running: - stop_volume(target, force=True) + if not stop_volume(target, force=True): + return False cmd = 'volume delete {0}'.format(target) - _gluster_xml(cmd) - if running: - return 'Volume {0} stopped and deleted'.format(target) - else: - return 'Volume {0} deleted'.format(target) + return _gluster(cmd) + + +def delete(*args, **kwargs): + ''' + Deprecated version of more consistently named delete_volume + ''' + salt.utils.warn_until( + 'Nitrogen', + 'The glusterfs.delete function is deprecated in favor of' + ' more descriptive glusterfs.delete_volume.' + ) + return delete_volume(*args, **kwargs) def add_volume_bricks(name, bricks): @@ -468,6 +544,11 @@ def add_volume_bricks(name, bricks): List of bricks to add to the volume ''' + volinfo = info() + if name not in volinfo: + log.error('Volume {0} does not exist, cannot add bricks'.format(name)) + return False + new_bricks = [] cmd = 'volume add-brick {0}'.format(name) @@ -475,7 +556,7 @@ def add_volume_bricks(name, bricks): if isinstance(bricks, str): bricks = [bricks] - volume_bricks = [x['path'] for x in info(name)['bricks'].values()] + volume_bricks = [x['path'] for x in volinfo[name]['bricks'].values()] for brick in bricks: if brick in volume_bricks: @@ -487,10 +568,5 @@ def add_volume_bricks(name, bricks): if len(new_bricks) > 0: for brick in new_bricks: cmd += ' {0}'.format(brick) - - _gluster_xml(cmd) - - return '{0} bricks successfully added to the volume {1}'.format(len(new_bricks), name) - - else: - return 'Bricks already in volume {0}'.format(name) + return _gluster(cmd) + return True diff --git a/salt/states/glusterfs.py b/salt/states/glusterfs.py index 73ab5428d4..5ee7120c5a 100644 --- a/salt/states/glusterfs.py +++ b/salt/states/glusterfs.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- ''' -Manage glusterfs pool. +Manage GlusterFS pool. ''' # Import python libs @@ -10,6 +10,7 @@ import logging import socket # Import salt libs +import salt.utils import salt.utils.cloud as suc from salt.exceptions import SaltCloudException @@ -67,55 +68,56 @@ def peered(name): suc.check_name(name, 'a-zA-Z0-9._-') except SaltCloudException as e: ret['comment'] = 'Invalid characters in peer name.' - ret['result'] = False return ret - peers = __salt__['glusterfs.list_peers']() - - if peers: - if name in peers or any([name in peers[x] for x in peers]): - ret['result'] = True - ret['comment'] = 'Host {0} already peered'.format(name) - return ret - - result = __salt__['glusterfs.peer'](name) - ret['comment'] = '' - if 'exitval' in result: - if int(result['exitval']) <= len(RESULT_CODES): - ret['comment'] = RESULT_CODES[int(result['exitval'])].format(name) - else: - if 'comment' in result: - ret['comment'] = result['comment'] - - newpeers = __salt__['glusterfs.list_peers']() - # if newpeers was null, we know something didn't work. - if newpeers and name in newpeers or any([name in newpeers[x] for x in newpeers]): - ret['result'] = True - ret['changes'] = {'new': newpeers, 'old': peers} - # In case the hostname doesn't have any periods in it - elif name == socket.gethostname(): - ret['result'] = True - return ret - # In case they have a hostname like "example.com" - elif name == socket.gethostname().split('.')[0]: - ret['result'] = True - return ret - elif 'on localhost not needed' in ret['comment']: + # Check if the name resolves to localhost + if socket.gethostbyname(name) in __salt__['network.ip_addrs'](): ret['result'] = True ret['comment'] = 'Peering with localhost is not needed' + return ret + + peers = __salt__['glusterfs.peer_status']() + + if peers and any(name in v['hostnames'] for v in peers.values()): + ret['result'] = True + ret['comment'] = 'Host {0} already peered'.format(name) + return ret + + if __opts__['test']: + ret['comment'] = 'Peer {0} will be added.'.format(name) + ret['result'] = None + return ret + + peered = __salt__['glusterfs.peer'](name) + if not peered: + ret['comment'] = 'Failed to peer with {0}, please check logs for errors'.format(name) + return ret + + # Double check that the action succeeded + newpeers = __salt__['glusterfs.peer_status']() + if newpeers and any(name in v['hostnames'] for v in newpeers.values()): + ret['result'] = True + ret['comment'] = 'Host {0} successfully peered'.format(name) + ret['changes'] = {'new': newpeers, 'old': peers} else: - ret['result'] = False + ret['comment'] = 'Host {0} was successfully peered but did not appear in the list of peers'.format(name) return ret -def created(name, bricks, stripe=False, replica=False, device_vg=False, +def volume_present(name, bricks, stripe=False, replica=False, device_vg=False, transport='tcp', start=False, force=False): ''' - Check if volume already exists + Ensure that the volume exists name name of the volume + bricks + list of brick paths + + start + ensure that the volume is also started + .. code-block:: yaml myvolume: @@ -137,53 +139,76 @@ def created(name, bricks, stripe=False, replica=False, device_vg=False, 'changes': {}, 'comment': '', 'result': False} - volumes = __salt__['glusterfs.list_volumes']() - if name in volumes: - if start: - if isinstance(__salt__['glusterfs.status'](name), dict): - ret['result'] = True - cmnt = 'Volume {0} already exists and is started.'.format(name) - else: - result = __salt__['glusterfs.start_volume'](name) - if 'started' in result: - ret['result'] = True - cmnt = 'Volume {0} started.'.format(name) - ret['changes'] = {'new': 'started', 'old': 'stopped'} - else: - ret['result'] = False - cmnt = result - else: - ret['result'] = True - cmnt = 'Volume {0} already exists.'.format(name) - ret['comment'] = cmnt - return ret - elif __opts__['test']: - if start and isinstance(__salt__['glusterfs.status'](name), dict): - comment = 'Volume {0} will be created and started'.format(name) - else: - comment = 'Volume {0} will be created'.format(name) - ret['comment'] = comment - ret['result'] = None - return ret if suc.check_name(name, 'a-zA-Z0-9._-'): ret['comment'] = 'Invalid characters in volume name.' - ret['result'] = False return ret - ret['comment'] = __salt__['glusterfs.create'](name, bricks, stripe, + volumes = __salt__['glusterfs.list_volumes']() + if name not in volumes: + if __opts__['test']: + comment = 'Volume {0} will be created'.format(name) + if start: + comment += ' and started' + ret['comment'] = comment + ret['result'] = None + return ret + + vol_created = __salt__['glusterfs.create_volume'](name, bricks, stripe, replica, device_vg, transport, start, force) - old_volumes = volumes - volumes = __salt__['glusterfs.list_volumes']() - if name in volumes: - ret['changes'] = {'new': volumes, 'old': old_volumes} - ret['result'] = True + if not vol_created: + ret['comment'] = 'Creation of volume {0} failed'.format(name) + return ret + old_volumes = volumes + volumes = __salt__['glusterfs.list_volumes']() + if name in volumes: + ret['changes'] = {'new': volumes, 'old': old_volumes} + ret['comment'] = 'Volume {0} is created'.format(name) + else: + ret['comment'] = 'Volume {0} already exists'.format(name) + + if start: + if __opts__['test']: + # volume already exists + ret['comment'] = ret['comment'] + ' and will be started' + ret['result'] = None + return ret + if int(__salt__['glusterfs.info']()[name]['status']) == 1: + ret['result'] = True + ret['comment'] = ret['comment'] + ' and is started' + else: + vol_started = __salt__['glusterfs.start_volume'](name) + if vol_started: + ret['result'] = True + ret['comment'] = ret['comment'] + ' and is now started' + if not ret['changes']: + ret['changes'] = {'new': 'started', 'old': 'stopped'} + else: + ret['comment'] = ret['comment'] + ' but failed to start. Check logs for further information' + return ret + + if __opts__['test']: + ret['result'] = None + else: + ret['result'] = True return ret +def created(*args, **kwargs): + ''' + Deprecated version of more descriptively named volume_present + ''' + salt.utils.warn_until( + 'Nitrogen', + 'The glusterfs.created state is deprecated in favor of more descriptive' + ' glusterfs.volume_present.' + ) + return volume_present(*args, **kwargs) + + def started(name): ''' Check if volume has been started @@ -200,13 +225,14 @@ def started(name): 'changes': {}, 'comment': '', 'result': False} - volumes = __salt__['glusterfs.list_volumes']() - if name not in volumes: + + volinfo = __salt__['glusterfs.info']() + if name not in volinfo: ret['result'] = False ret['comment'] = 'Volume {0} does not exist'.format(name) return ret - if isinstance(__salt__['glusterfs.status'](name), dict): + if int(volinfo[name]['status']) == 1: ret['comment'] = 'Volume {0} is already started'.format(name) ret['result'] = True return ret @@ -215,12 +241,14 @@ def started(name): ret['result'] = None return ret - ret['comment'] = __salt__['glusterfs.start_volume'](name) - if 'started' in ret['comment']: + vol_started = __salt__['glusterfs.start_volume'](name) + if vol_started: ret['result'] = True + ret['comment'] = 'Volume {0} is started'.format(name) ret['change'] = {'new': 'started', 'old': 'stopped'} else: ret['result'] = False + ret['comment'] = 'Failed to start volume {0}'.format(name) return ret @@ -255,31 +283,28 @@ def add_volume_bricks(name, bricks): 'comment': '', 'result': False} - current_bricks = __salt__['glusterfs.status'](name) - - if 'does not exist' in current_bricks: - ret['result'] = False - ret['comment'] = current_bricks + volinfo = __salt__['glusterfs.info']() + if name not in volinfo: + ret['comment'] = 'Volume {0} does not exist'.format(name) return ret - if 'is not started' in current_bricks: - ret['result'] = False - ret['comment'] = current_bricks + if int(volinfo[name]['status']) != 1: + ret['comment'] = 'Volume {0} is not started'.format(name) return ret - add_bricks = __salt__['glusterfs.add_volume_bricks'](name, bricks) - ret['comment'] = add_bricks - - if 'bricks successfully added' in add_bricks: - old_bricks = current_bricks - new_bricks = __salt__['glusterfs.status'](name) + current_bricks = [brick['path'] for brick in volinfo[name]['bricks'].values()] + if not set(bricks) - set(current_bricks): ret['result'] = True - ret['changes'] = {'new': list(new_bricks['bricks'].keys()), 'old': list( - old_bricks['bricks'].keys())} + ret['comment'] = 'Bricks already added in volume {0}'.format(name) return ret - if 'Bricks already in volume' in add_bricks: + bricks_added = __salt__['glusterfs.add_volume_bricks'](name, bricks) + if bricks_added: ret['result'] = True + ret['comment'] = 'Bricks successfully added to volume {0}'.format(name) + new_bricks = [brick['path'] for brick in __salt__['glusterfs.info']()[name]['bricks'].values()] + ret['changes'] = {'new': new_bricks, 'old': current_bricks} return ret + ret['comment'] = 'Adding bricks to volume {0} failed'.format(name) return ret diff --git a/tests/unit/modules/glusterfs_test.py b/tests/unit/modules/glusterfs_test.py index 6dd705fb24..5a609c41d2 100644 --- a/tests/unit/modules/glusterfs_test.py +++ b/tests/unit/modules/glusterfs_test.py @@ -18,8 +18,7 @@ from salttesting.mock import ( # Import Salt Libs from salt.modules import glusterfs -import salt.utils.cloud as suc -from salt.exceptions import CommandExecutionError, SaltInvocationError +from salt.exceptions import SaltInvocationError # Globals glusterfs.__salt__ = {} @@ -189,6 +188,7 @@ xml_peer_present = """ 0 + uuid1 node02 node02.domain.dom @@ -311,6 +311,45 @@ xml_volume_info_stopped = """ """ +xml_peer_probe_success = """ + + + 0 + 0 + + + +""" + +xml_peer_probe_already_member = """ + + + 0 + 2 + + Host salt port 24007 already in peer list + +""" + +xml_peer_probe_localhost = """ + + + 0 + 1 + + Probe on localhost not needed + +""" + +xml_peer_probe_fail_cant_connect = """ + + + -1 + 107 + Probe returned with Transport endpoint is not connected + +""" + xml_command_success = """ @@ -334,208 +373,86 @@ class GlusterfsTestCase(TestCase): ''' Test cases for salt.modules.glusterfs ''' - # 'list_peers' function tests: 1 - maxDiff = None - def test_list_peers(self): - ''' - Test if it return a list of gluster peers - ''' - mock = MagicMock(return_value=xml_peer_present) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertDictEqual(glusterfs.list_peers(), { - 'node02': ['node02.domain.dom', '10.0.0.2']}) + # 'peer_status' function tests: 1 - mock = MagicMock(return_value=xml_command_success) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertIsNone(glusterfs.list_peers()) + def test_peer_status(self): + ''' + Test gluster peer status + ''' + mock_run = MagicMock(return_value=xml_peer_present) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}): + self.assertDictEqual( + glusterfs.peer_status(), + {'uuid1': { + 'hostnames': ['node02', 'node02.domain.dom', '10.0.0.2']}}) + + mock_run = MagicMock(return_value=xml_command_success) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}): + self.assertDictEqual(glusterfs.peer_status(), {}) # 'peer' function tests: 1 def test_peer(self): ''' - Test if it adds another node into the peer list. + Test if gluster peer call is successful. ''' + mock_run = MagicMock() + with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}): + mock_run.return_value = xml_peer_probe_already_member + self.assertTrue(glusterfs.peer('salt')) - # invalid characters - mock = MagicMock(return_value=True) - with patch.object(suc, 'check_name', mock): - self.assertRaises(SaltInvocationError, glusterfs.peer, 'a') - # version 3.4 - # by hostname - # peer failed unknown hostname - # peer failed can't connect - mock = MagicMock( - return_value=GlusterResults.v34.peer_probe.fail_cant_connect) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertRaises(CommandExecutionError, glusterfs.peer, 'server2') - # peer self - mock = MagicMock( - return_value=GlusterResults.v34.peer_probe.success_self) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('server1'), - {'exitval': '1', 'output': 'success: on localhost not needed'}) - # peer added - mock = MagicMock( - return_value=GlusterResults.v34.peer_probe.success_other) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('server2'), - {'exitval': '0', 'output': 'success'}) - # peer already member - mock = MagicMock( - return_value=GlusterResults.v34.peer_probe.success_already_peer['hostname']) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('server2'), - {'exitval': '2', 'output': 'success: host server2 port 24007 already in peer list'}) - mock = MagicMock( - return_value=GlusterResults.v34.peer_probe.success_already_peer['ip']) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('10.0.0.2'), - {'exitval': '2', 'output': 'success: host 10.0.0.2 port 24007 already in peer list'}) - # peer in reverse (probe server1 from server2) - mock = MagicMock( - return_value=GlusterResults.v34.peer_probe.success_first_hostname_from_second_first_time) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('server1'), - {'exitval': '0', 'output': 'success'}) - mock = MagicMock( - return_value=GlusterResults.v34.peer_probe.success_first_hostname_from_second_second_time) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('server1'), - {'exitval': '2', 'output': 'success: host server1 port 24007 already in peer list'}) - # peer in reverse using ip address instead of hostname - mock = MagicMock( - return_value=GlusterResults.v34.peer_probe.success_reverse_already_peer['ip']) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('10.0.0.1'), - {'exitval': '2', 'output': 'success: host 10.0.0.1 port 24007 already in peer list'}) - # by ip address - # peer self - mock = MagicMock( - return_value=GlusterResults.v34.peer_probe.success_self) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('10.0.0.1'), - {'exitval': '1', 'output': 'success: on localhost not needed'}) - # peer added - mock = MagicMock( - return_value=GlusterResults.v34.peer_probe.success_other) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('10.0.0.2'), - {'exitval': '0', 'output': 'success'}) - # peer already member - mock = MagicMock( - return_value=GlusterResults.v34.peer_probe.success_already_peer['ip']) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('10.0.0.2'), - {'exitval': '2', 'output': 'success: host 10.0.0.2 port 24007 already in peer list'}) - # version 3.7 - # peer failed unknown hostname - # peer failed can't connect - mock = MagicMock( - return_value=GlusterResults.v37.peer_probe.fail_cant_connect) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertRaises(CommandExecutionError, glusterfs.peer, 'server2') - # peer self - mock = MagicMock( - return_value=GlusterResults.v37.peer_probe.success_self) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('server1'), - {'exitval': '1', 'output': 'Probe on localhost not needed'}) - # peer added - mock = MagicMock( - return_value=GlusterResults.v37.peer_probe.success_other) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('server2'), - {'exitval': '0', 'output': None}) - # peer already member - mock = MagicMock( - return_value=GlusterResults.v37.peer_probe.success_already_peer['hostname']) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('server2'), - {'exitval': '2', 'output': 'Host server2 port 24007 already in peer list'}) - # peer in reverse - # by ip address - # peer added - mock = MagicMock( - return_value=GlusterResults.v37.peer_probe.success_other) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('10.0.0.2'), - {'exitval': '0', 'output': None}) - # peer already member - mock = MagicMock( - return_value=GlusterResults.v37.peer_probe.success_already_peer['ip']) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('10.0.0.2'), - {'exitval': '2', 'output': 'Host 10.0.0.2 port 24007 already in peer list'}) - # peer self - mock = MagicMock( - return_value=GlusterResults.v37.peer_probe.success_self) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('10.0.0.1'), - {'exitval': '1', 'output': 'Probe on localhost not needed'}) - # peer in reverse (probe server1 from server2) - mock = MagicMock( - return_value=GlusterResults.v37.peer_probe.success_first_hostname_from_second_first_time) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('server1'), - {'exitval': '2', 'output': 'Host server1 port 24007 already in peer list'}) - # peer in reverse using ip address instead of hostname first time - mock = MagicMock( - return_value=GlusterResults.v37.peer_probe.success_first_ip_from_second_first_time) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('10.0.0.1'), - {'exitval': '0', 'output': None}) - mock = MagicMock( - return_value=GlusterResults.v37.peer_probe.success_first_ip_from_second_second_time) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('10.0.0.1'), - {'exitval': '2', 'output': 'Host 10.0.0.1 port 24007 already in peer list'}) + mock_run.return_value = xml_peer_probe_localhost + self.assertTrue(glusterfs.peer('salt')) - # 'create' function tests: 1 + mock_run.return_value = xml_peer_probe_fail_cant_connect + self.assertFalse(glusterfs.peer('salt')) - def test_create(self): + # 'create_volume' function tests: 1 + + def test_create_volume(self): ''' - Test if it create a glusterfs volume. + Test if it creates a glusterfs volume. ''' - mock = MagicMock(return_value='') - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): + mock_run = MagicMock(return_value=xml_command_success) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}): self.assertRaises( SaltInvocationError, - glusterfs.create, + glusterfs.create_volume, 'newvolume', 'host1:brick') - mock = MagicMock(return_value='') - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): self.assertRaises( SaltInvocationError, - glusterfs.create, + glusterfs.create_volume, 'newvolume', 'host1/brick') - mock = MagicMock(return_value=xml_command_fail) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertRaises( - CommandExecutionError, - glusterfs.create, - 'newvolume', - 'host1:/brick', - True, - True, - True, - 'tcp', - True) + self.assertFalse(mock_run.called) - mock = MagicMock(return_value=xml_command_success) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.create('newvolume', 'host1:/brick', - True, True, True, 'tcp', True), - 'Volume newvolume created and started') + mock_start_volume = MagicMock(return_value=True) + with patch.object(glusterfs, 'start_volume', mock_start_volume): + # Create, do not start + self.assertTrue(glusterfs.create_volume('newvolume', + 'host1:/brick')) + self.assertFalse(mock_start_volume.called) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.create('newvolume', 'host1:/brick'), - 'Volume newvolume created. Start volume to use') + # Create and start + self.assertTrue(glusterfs.create_volume('newvolume', + 'host1:/brick', + start=True)) + self.assertTrue(mock_start_volume.called) + + mock_start_volume.return_value = False + # Create and fail start + self.assertFalse(glusterfs.create_volume('newvolume', + 'host1:/brick', + start=True)) + + mock_run.return_value = xml_command_fail + self.assertFalse(glusterfs.create_volume('newvolume', 'host1:/brick', + True, True, True, 'tcp', True)) # 'list_volumes' function tests: 1 @@ -558,10 +475,9 @@ class GlusterfsTestCase(TestCase): ''' Test if it check the status of a gluster volume. ''' - mock = MagicMock(return_value=xml_command_fail) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertRaises( - CommandExecutionError, glusterfs.status, 'myvol1') + mock_run = MagicMock(return_value=xml_command_fail) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}): + self.assertIsNone(glusterfs.status('myvol1')) res = {'bricks': { 'node01:/tmp/foo': { @@ -630,26 +546,35 @@ class GlusterfsTestCase(TestCase): ''' Test if it start a gluster volume. ''' - mock_list = MagicMock(return_value=['Newvolume1', 'Newvolume2']) - with patch.object(glusterfs, 'list_volumes', mock_list): - mock_status = MagicMock(return_value={'status': '1'}) - with patch.object(glusterfs, 'info', mock_status): - mock = MagicMock(return_value=xml_command_success) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.start_volume('Newvolume1'), - 'Volume already started') + # Stopped volume + mock_info = MagicMock(return_value={'Newvolume1': {'status': '0'}}) + with patch.object(glusterfs, 'info', mock_info): + mock_run = MagicMock(return_value=xml_command_success) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}): + self.assertEqual(glusterfs.start_volume('Newvolume1'), True) + self.assertEqual(glusterfs.start_volume('nonExisting'), False) + mock_run = MagicMock(return_value=xml_command_fail) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}): + self.assertEqual(glusterfs.start_volume('Newvolume1'), False) - mock_status = MagicMock(return_value={'status': '0'}) - with patch.object(glusterfs, 'info', mock_status): - mock_run = MagicMock(return_value=xml_command_success) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}): - self.assertEqual(glusterfs.start_volume('Newvolume1'), - 'Volume Newvolume1 started') - - mock = MagicMock(return_value=xml_command_fail) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertRaises( - CommandExecutionError, glusterfs.start_volume, 'Newvolume1') + # Started volume + mock_info = MagicMock(return_value={'Newvolume1': {'status': '1'}}) + with patch.object(glusterfs, 'info', mock_info): + mock_run = MagicMock(return_value=xml_command_success) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}): + self.assertEqual( + glusterfs.start_volume('Newvolume1', force=True), + True + ) + mock_run = MagicMock(return_value=xml_command_fail) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}): + # cmd.run should not be called for already running volume: + self.assertEqual(glusterfs.start_volume('Newvolume1'), True) + # except when forcing: + self.assertEqual( + glusterfs.start_volume('Newvolume1', force=True), + False + ) # 'stop_volume' function tests: 1 @@ -657,58 +582,63 @@ class GlusterfsTestCase(TestCase): ''' Test if it stop a gluster volume. ''' - mock = MagicMock(return_value={}) - with patch.object(glusterfs, 'status', mock): - mock = MagicMock(return_value=xml_command_success) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.stop_volume('Newvolume1'), - 'Volume Newvolume1 stopped') + # Stopped volume + mock_info = MagicMock(return_value={'Newvolume1': {'status': '0'}}) + with patch.object(glusterfs, 'info', mock_info): + mock_run = MagicMock(return_value=xml_command_success) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}): + self.assertEqual(glusterfs.stop_volume('Newvolume1'), True) + self.assertEqual(glusterfs.stop_volume('nonExisting'), False) + mock_run = MagicMock(return_value=xml_command_fail) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}): + # cmd.run should not be called for already stopped volume: + self.assertEqual(glusterfs.stop_volume('Newvolume1'), True) - mock = MagicMock(return_value=xml_command_fail) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertRaises( - CommandExecutionError, glusterfs.stop_volume, 'Newvolume1') + # Started volume + mock_info = MagicMock(return_value={'Newvolume1': {'status': '1'}}) + with patch.object(glusterfs, 'info', mock_info): + mock_run = MagicMock(return_value=xml_command_success) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}): + self.assertEqual(glusterfs.stop_volume('Newvolume1'), True) + self.assertEqual(glusterfs.stop_volume('nonExisting'), False) + mock_run = MagicMock(return_value=xml_command_fail) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}): + self.assertEqual(glusterfs.stop_volume('Newvolume1'), False) - mock = MagicMock(return_value=xml_command_fail) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertRaises( - CommandExecutionError, glusterfs.stop_volume, 'Newvolume1') + # 'delete_volume' function tests: 1 - # 'delete' function tests: 1 - - def test_delete(self): + def test_delete_volume(self): ''' Test if it deletes a gluster volume. ''' - mock = MagicMock(return_value=['Newvolume1', 'Newvolume2']) - with patch.object(glusterfs, 'list_volumes', mock): + mock_info = MagicMock(return_value={'Newvolume1': {'status': '1'}}) + with patch.object(glusterfs, 'info', mock_info): # volume doesn't exist - self.assertRaises( - SaltInvocationError, glusterfs.delete, 'Newvolume3') + self.assertFalse(glusterfs.delete_volume('Newvolume3')) - mock = MagicMock(return_value={'status': '1'}) - with patch.object(glusterfs, 'info', mock): - mock = MagicMock(return_value=xml_command_success) - # volume exists, should not be stopped, and is started - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertRaises( - SaltInvocationError, - glusterfs.delete, - 'Newvolume1', - False) + mock_stop_volume = MagicMock(return_value=True) + mock_run = MagicMock(return_value=xml_command_success) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}): + with patch.object(glusterfs, 'stop_volume', mock_stop_volume): + # volume exists, should not be stopped, and is started + self.assertFalse(glusterfs.delete_volume('Newvolume1', + False)) + self.assertFalse(mock_run.called) + self.assertFalse(mock_stop_volume.called) - # volume exists, should be stopped, and is started - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.delete('Newvolume1'), - 'Volume Newvolume1 stopped and deleted') + # volume exists, should be stopped, and is started + self.assertTrue(glusterfs.delete_volume('Newvolume1')) + self.assertTrue(mock_run.called) + self.assertTrue(mock_stop_volume.called) - # volume exists and isn't started - mock = MagicMock(return_value={'status': '0'}) - with patch.object(glusterfs, 'info', mock): - mock = MagicMock(return_value=xml_command_success) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.delete('Newvolume1'), - 'Volume Newvolume1 deleted') + # volume exists and isn't started + mock_info = MagicMock(return_value={'Newvolume1': {'status': '2'}}) + with patch.object(glusterfs, 'info', mock_info): + mock_run = MagicMock(return_value=xml_command_success) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}): + self.assertTrue(glusterfs.delete_volume('Newvolume1')) + mock_run.return_value = xml_command_fail + self.assertFalse(glusterfs.delete_volume('Newvolume1')) # 'add_volume_bricks' function tests: 1 @@ -716,45 +646,37 @@ class GlusterfsTestCase(TestCase): ''' Test if it add brick(s) to an existing volume ''' - # volume does not exist - mock = MagicMock(return_value=xml_command_fail) - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertRaises( - CommandExecutionError, - glusterfs.add_volume_bricks, - 'Newvolume1', - ['bricks']) + mock_info = MagicMock(return_value={ + 'Newvolume1': { + 'status': '1', + 'bricks': { + 'brick1': {'path': 'host:/path1'}, + 'brick2': {'path': 'host:/path2'} + } + } + }) + with patch.object(glusterfs, 'info', mock_info): + mock_run = MagicMock(return_value=xml_command_success) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}): + # Volume does not exist + self.assertFalse(glusterfs.add_volume_bricks('nonExisting', + ['bricks'])) + # Brick already exists + self.assertTrue(glusterfs.add_volume_bricks('Newvolume1', + ['host:/path2'])) + # Already existing brick as a string + self.assertTrue(glusterfs.add_volume_bricks('Newvolume1', + 'host:/path2')) + self.assertFalse(mock_run.called) + # A new brick: + self.assertTrue(glusterfs.add_volume_bricks('Newvolume1', + ['host:/new1'])) + self.assertTrue(mock_run.called) - ret = '1 bricks successfully added to the volume Newvolume1' - # volume does exist - mock = MagicMock(return_value={'bricks': {}}) - with patch.object(glusterfs, 'info', mock): - mock = MagicMock(return_value=xml_command_success) - # ... and the added brick does not exist - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.add_volume_bricks('Newvolume1', - ['bricks']), ret) - - mock = MagicMock( - return_value={'bricks': {'brick1': {'path': 'bricks'}}}) - with patch.object(glusterfs, 'info', mock): - # ... and the added brick does exist - with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - # As a list - self.assertEqual( - glusterfs.add_volume_bricks( - 'Newvolume1', - ['bricks']), - 'Bricks already in volume Newvolume1') - # As a string - self.assertEqual( - glusterfs.add_volume_bricks( - 'Newvolume1', - 'bricks'), - 'Bricks already in volume Newvolume1') - # And empty list - self.assertEqual(glusterfs.add_volume_bricks('Newvolume1', []), - 'Bricks already in volume Newvolume1') + # Gluster call fails + mock_run.return_value = xml_command_fail + self.assertFalse(glusterfs.add_volume_bricks('Newvolume1', + ['new:/path'])) if __name__ == '__main__': diff --git a/tests/unit/states/glusterfs_test.py b/tests/unit/states/glusterfs_test.py index 11dae0cdd6..6236356dbd 100644 --- a/tests/unit/states/glusterfs_test.py +++ b/tests/unit/states/glusterfs_test.py @@ -4,6 +4,7 @@ ''' # Import Python libs from __future__ import absolute_import +import socket # Import Salt Testing Libs from salttesting import skipIf, TestCase @@ -19,12 +20,13 @@ ensure_in_syspath('../../') # Import Salt Libs from salt.states import glusterfs -from tests.unit.modules.glusterfs_test import GlusterResults import salt.modules.glusterfs as mod_glusterfs import salt.utils.cloud import salt.modules.glusterfs as mod_glusterfs + +# Globals glusterfs.__salt__ = {'glusterfs.peer': mod_glusterfs.peer} glusterfs.__opts__ = {} @@ -41,132 +43,161 @@ class GlusterfsTestCase(TestCase): Test to verify if node is peered. ''' name = 'server1' - other_name = 'server1' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} - # probe new peer server2 under gluster 3.4.x - comt = ('Peer {0} added successfully.'.format(name)) - ret.update({'comment': comt, 'result': True, - 'changes': {'new': {name: []}, 'old': {}}}) - mock_xml = MagicMock( - return_value=GlusterResults.v34.peer_probe.success_other) - with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}): - mock = MagicMock(side_effect=[{}, {name: []}]) - with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}): + mock_ip = MagicMock(return_value=['1.2.3.4', '1.2.3.5']) + mock_hostbyname = MagicMock(return_value='1.2.3.5') + mock_peer = MagicMock(return_value=True) + mock_status = MagicMock(return_value={'uuid1': {'hostnames': [name]}}) + + with patch.dict(glusterfs.__salt__, {'glusterfs.peer_status': mock_status, + 'glusterfs.peer': mock_peer, + 'network.ip_addrs': mock_ip}): + with patch.object(socket, 'gethostbyname', mock_hostbyname): + comt = 'Peering with localhost is not needed' + ret.update({'comment': comt}) self.assertDictEqual(glusterfs.peered(name), ret) - # probe new peer server2 under gluster 3.7.x - mock_xml = MagicMock( - return_value=GlusterResults.v37.peer_probe.success_other) - with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}): - mock = MagicMock(side_effect=[{}, {name: []}]) - with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}): + mock_hostbyname.return_value = '1.2.3.42' + comt = ('Host {0} already peered'.format(name)) + ret.update({'comment': comt}) self.assertDictEqual(glusterfs.peered(name), ret) - # probe already existing server2 under gluster 3.4.x - comt = ('Host {0} already peered'.format(name)) - ret.update({'comment': comt, 'changes': {}}) - mock_xml = MagicMock( - return_value=GlusterResults.v34.peer_probe.success_already_peer['hostname']) - with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}): - mock = MagicMock(side_effect=[{name: []}, {name: []}]) - with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}): - self.assertDictEqual(glusterfs.peered(name), ret) + with patch.dict(glusterfs.__opts__, {'test': False}): + old = {'uuid1': {'hostnames': ['other1']}} + new = {'uuid1': {'hostnames': ['other1']}, + 'uuid2': {'hostnames': ['someAlias', name]}} + mock_status.side_effect = [old, new] + comt = 'Host {0} successfully peered'.format(name) + ret.update({'comment': comt, + 'changes': {'old': old, 'new': new}}) + self.assertDictEqual(glusterfs.peered(name), ret) + mock_status.side_effect = None - # probe already existing server2 under gluster 3.7.x - mock_xml = MagicMock( - return_value=GlusterResults.v37.peer_probe.success_already_peer['hostname']) - with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}): - mock = MagicMock(side_effect=[{name: []}, {name: []}]) - with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}): - self.assertDictEqual(glusterfs.peered(name), ret) + mock_status.return_value = { + 'uuid1': {'hostnames': ['other']} + } + mock_peer.return_value = False - # Issue 30932: Peering an existing server by IP fails with gluster 3.7+ - # - # server2 was probed by address, 10.0.0.2. Under 3.4, server1 would be - # known as 10.0.0.1 but starting with 3.7, its hostname of server1 would be - # known instead. Subsequent probing of server1 by server2 used to result in - # "success_already_peer" but now it should succeed in adding an alternate - # hostname entry. + ret.update({'result': False}) - name = 'server1' - ip = '10.0.0.1' - comt = ('Host {0} already peered'.format(ip)) - ret.update({'name': ip, 'comment': comt, 'changes': {}}) - mock_xml = MagicMock( - return_value=GlusterResults.v34.peer_probe.success_first_ip_from_second_first_time) - with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}): - mock = MagicMock(side_effect=[{ip: []}, {ip: []}]) - with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}): - self.assertDictEqual(glusterfs.peered(ip), ret) + comt = ('Failed to peer with {0},' + + ' please check logs for errors').format(name) + ret.update({'comment': comt, 'changes': {}}) + self.assertDictEqual(glusterfs.peered(name), ret) - comt = ('Peer {0} added successfully.'.format(ip)) - ret.update({'name': ip, 'comment': comt, 'changes': { - 'old': {name: []}, 'new': {name: [ip]}}}) - mock_xml = MagicMock( - return_value=GlusterResults.v37.peer_probe.success_first_ip_from_second_first_time) - with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}): - mock = MagicMock(side_effect=[{name: []}, {name: [ip]}]) - with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}): - self.assertDictEqual(glusterfs.peered(ip), ret) + comt = ('Invalid characters in peer name.') + ret.update({'comment': comt, 'name': ':/'}) + self.assertDictEqual(glusterfs.peered(':/'), ret) + ret.update({'name': name}) - comt = ('Host {0} already peered'.format(ip)) - ret.update({'name': ip, 'comment': comt, 'changes': {}}) - mock_xml = MagicMock( - return_value=GlusterResults.v37.peer_probe.success_first_ip_from_second_second_time) - with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}): - mock = MagicMock(side_effect=[{name: [ip]}, {name: [ip]}]) - with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}): - self.assertDictEqual(glusterfs.peered(ip), ret) + with patch.dict(glusterfs.__opts__, {'test': True}): + comt = ('Peer {0} will be added.'.format(name)) + ret.update({'comment': comt, 'result': None}) + self.assertDictEqual(glusterfs.peered(name), ret) - # test for invalid characters - comt = ('Invalid characters in peer name.') - ret.update({'name': '#badhostname', 'comment': comt, 'result': False}) - self.assertDictEqual(glusterfs.peered('#badhostname'), ret) + # 'volume_present' function tests: 1 - # 'created' function tests: 1 - - def test_created(self): + def test_volume_present(self): ''' - Test to check if volume already exists + Test to ensure that a volume exists ''' name = 'salt' - bricks = {'host1': '/srv/gluster/drive1', - 'host2': '/srv/gluster/drive2'} - + bricks = ['host1:/brick1'] ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} - mock = MagicMock(side_effect=[[name], [], [], [], [name]]) - mock_lst = MagicMock(return_value=[]) - with patch.dict(glusterfs.__salt__, {'glusterfs.list_volumes': mock, - 'glusterfs.create': mock_lst}): - comt = ('Volume {0} already exists.'.format(name)) - ret.update({'comment': comt}) - self.assertDictEqual(glusterfs.created(name, bricks), ret) + started_info = {name: {'status': '1'}} + stopped_info = {name: {'status': '0'}} + mock_info = MagicMock() + mock_list = MagicMock() + mock_create = MagicMock() + mock_start = MagicMock(return_value=True) + + with patch.dict(glusterfs.__salt__, { + 'glusterfs.info': mock_info, + 'glusterfs.list_volumes': mock_list, + 'glusterfs.create_volume': mock_create, + 'glusterfs.start_volume': mock_start}): + with patch.dict(glusterfs.__opts__, {'test': False}): + mock_list.return_value = [name] + mock_info.return_value = started_info + comt = ('Volume {0} already exists and is started'.format(name)) + ret.update({'comment': comt}) + self.assertDictEqual(glusterfs.volume_present(name, bricks, + start=True), ret) + + mock_info.return_value = stopped_info + comt = ('Volume {0} already exists and is now started'.format(name)) + ret.update({'comment': comt, + 'changes': {'old': 'stopped', 'new': 'started'}}) + self.assertDictEqual(glusterfs.volume_present(name, bricks, + start=True), ret) + + comt = ('Volume {0} already exists'.format(name)) + ret.update({'comment': comt, 'changes': {}}) + self.assertDictEqual(glusterfs.volume_present(name, bricks, + start=False), ret) with patch.dict(glusterfs.__opts__, {'test': True}): + comt = ('Volume {0} already exists'.format(name)) + ret.update({'comment': comt, 'result': None}) + self.assertDictEqual(glusterfs.volume_present(name, bricks, + start=False), ret) + + comt = ('Volume {0} already exists' + + ' and will be started').format(name) + ret.update({'comment': comt, 'result': None}) + self.assertDictEqual(glusterfs.volume_present(name, bricks, + start=True), ret) + + mock_list.return_value = [] comt = ('Volume {0} will be created'.format(name)) ret.update({'comment': comt, 'result': None}) - self.assertDictEqual(glusterfs.created(name, bricks), ret) + self.assertDictEqual(glusterfs.volume_present(name, bricks, + start=False), ret) + + comt = ('Volume {0} will be created' + + ' and started').format(name) + ret.update({'comment': comt, 'result': None}) + self.assertDictEqual(glusterfs.volume_present(name, bricks, + start=True), ret) with patch.dict(glusterfs.__opts__, {'test': False}): - with patch.object(salt.utils.cloud, 'check_name', - MagicMock(return_value=True)): - comt = ('Invalid characters in volume name.') - ret.update({'comment': comt, 'result': False}) - self.assertDictEqual(glusterfs.created(name, bricks), ret) + mock_list.side_effect = [[], [name]] + comt = ('Volume {0} is created'.format(name)) + ret.update({'comment': comt, + 'result': True, + 'changes': {'old': [], 'new': [name]}}) + self.assertDictEqual(glusterfs.volume_present(name, bricks, + start=False), ret) - comt = ('Host {0} already peered'.format(name)) - ret.update({'comment': [], 'result': True, - 'changes': {'new': ['salt'], 'old': []}}) - self.assertDictEqual(glusterfs.created(name, bricks), ret) + mock_list.side_effect = [[], [name]] + comt = ('Volume {0} is created and is now started'.format(name)) + ret.update({'comment': comt, 'result': True}) + self.assertDictEqual(glusterfs.volume_present(name, bricks, + start=True), ret) + + mock_list.side_effect = None + mock_list.return_value = [] + mock_create.return_value = False + comt = 'Creation of volume {0} failed'.format(name) + ret.update({'comment': comt, 'result': False, 'changes': {}}) + self.assertDictEqual(glusterfs.volume_present(name, bricks), + ret) + + with patch.object(salt.utils.cloud, 'check_name', + MagicMock(return_value=True)): + comt = ('Invalid characters in volume name.') + ret.update({'comment': comt, 'result': False}) + self.assertDictEqual(glusterfs.volume_present(name, bricks), + ret) # 'started' function tests: 1 @@ -181,27 +212,32 @@ class GlusterfsTestCase(TestCase): 'comment': '', 'changes': {}} - mock = MagicMock(side_effect=[[], [name], [name], [name]]) - mock_t = MagicMock(return_value='started') - mock_dict = MagicMock(side_effect=[{}, '', '']) - with patch.dict(glusterfs.__salt__, {'glusterfs.list_volumes': mock, - 'glusterfs.status': mock_dict, - 'glusterfs.start_volume': mock_t}): + started_info = {name: {'status': '1'}} + stopped_info = {name: {'status': '0'}} + mock_info = MagicMock(return_value={}) + mock_start = MagicMock(return_value=True) + + with patch.dict(glusterfs.__salt__, + {'glusterfs.info': mock_info, + 'glusterfs.start_volume': mock_start}): comt = ('Volume {0} does not exist'.format(name)) ret.update({'comment': comt}) self.assertDictEqual(glusterfs.started(name), ret) + mock_info.return_value = started_info comt = ('Volume {0} is already started'.format(name)) ret.update({'comment': comt, 'result': True}) self.assertDictEqual(glusterfs.started(name), ret) with patch.dict(glusterfs.__opts__, {'test': True}): + mock_info.return_value = stopped_info comt = ('Volume {0} will be started'.format(name)) ret.update({'comment': comt, 'result': None}) self.assertDictEqual(glusterfs.started(name), ret) with patch.dict(glusterfs.__opts__, {'test': False}): - ret.update({'comment': 'started', 'result': True, + comt = 'Volume {0} is started'.format(name) + ret.update({'comment': comt, 'result': True, 'change': {'new': 'started', 'old': 'stopped'}}) self.assertDictEqual(glusterfs.started(name), ret) @@ -212,40 +248,58 @@ class GlusterfsTestCase(TestCase): Test to add brick(s) to an existing volume ''' name = 'salt' - bricks = {'bricks': {'host1': '/srv/gluster/drive1'}} + bricks = ['host1:/drive1'] + old_bricks = ['host1:/drive2'] ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} - mock = MagicMock(side_effect=['does not exist', 'is not started', - bricks, bricks, bricks, '']) - mock_t = MagicMock(side_effect=['bricks successfully added', - 'Bricks already in volume', '']) + stopped_volinfo = {'salt': {'status': '0'}} + volinfo = { + 'salt': { + 'status': '1', + 'bricks': {'brick1': {'path': old_bricks[0]}} + } + } + new_volinfo = { + 'salt': { + 'status': '1', + 'bricks': { + 'brick1': {'path': old_bricks[0]}, + 'brick2': {'path': bricks[0]} + } + } + } + + mock_info = MagicMock(return_value={}) + mock_add = MagicMock(side_effect=[False, True]) + with patch.dict(glusterfs.__salt__, - {'glusterfs.status': mock, - 'glusterfs.add_volume_bricks': mock_t}): - ret.update({'comment': 'does not exist'}) - self.assertDictEqual( - glusterfs.add_volume_bricks(name, bricks), ret) + {'glusterfs.info': mock_info, + 'glusterfs.add_volume_bricks': mock_add}): + ret.update({'comment': 'Volume salt does not exist'}) + self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret) - ret.update({'comment': 'is not started'}) - self.assertDictEqual( - glusterfs.add_volume_bricks(name, bricks), ret) + mock_info.return_value = stopped_volinfo + ret.update({'comment': 'Volume salt is not started'}) + self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret) - ret.update({'comment': 'bricks successfully added', 'result': True, - 'changes': {'new': ['host1'], 'old': ['host1']}}) - self.assertDictEqual( - glusterfs.add_volume_bricks(name, bricks), ret) + mock_info.return_value = volinfo + ret.update({'comment': 'Adding bricks to volume salt failed'}) + self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret) - ret.update({'comment': 'Bricks already in volume', 'changes': {}}) - self.assertDictEqual( - glusterfs.add_volume_bricks(name, bricks), ret) + ret.update({'result': True}) + ret.update({'comment': 'Bricks already added in volume salt'}) + self.assertDictEqual(glusterfs.add_volume_bricks(name, old_bricks), + ret) - ret.update({'comment': '', 'result': False}) - self.assertDictEqual( - glusterfs.add_volume_bricks(name, bricks), ret) + mock_info.side_effect = [volinfo, new_volinfo] + ret.update({'comment': 'Bricks successfully added to volume salt', + 'changes': {'new': bricks + old_bricks, + 'old': old_bricks}}) + self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret) if __name__ == '__main__': From 71b11067503a5b85cef666d415a99a7a4159c533 Mon Sep 17 00:00:00 2001 From: Joseph Hall Date: Thu, 25 Feb 2016 11:25:41 -0700 Subject: [PATCH 65/65] Get ssh_user, etc working --- salt/runners/manage.py | 46 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 42 insertions(+), 4 deletions(-) diff --git a/salt/runners/manage.py b/salt/runners/manage.py index 16bf1d4ea1..e9cb87dafb 100644 --- a/salt/runners/manage.py +++ b/salt/runners/manage.py @@ -130,6 +130,7 @@ def down(removekeys=False): wheel.call_func('key.delete', match=minion) return ret + def up(): # pylint: disable=C0103 ''' Print a list of all of the minions that are up @@ -619,7 +620,7 @@ def bootstrap(version='develop', hosts='', root_user=False, roster='flat', - ssh_user='root', + ssh_user=None, ssh_password=None, ssh_priv_key=None, tmp_dir='/tmp/.bootstrap', @@ -632,13 +633,38 @@ def bootstrap(version='develop', Git tag of version to install script : https://bootstrap.saltstack.com - Script to execute + URL containing the script to execute + + script_args + Any additional arguments that you want to pass to the script hosts - Comma-separated hosts [example: hosts='host1.local,host2.local'] + Comma-separated hosts [example: hosts='host1.local,host2.local']. These + hosts need to exist in the specified roster. root_user : True Prepend ``root@`` to each host. + ** Deprecated ** + + roster : flat + The roster to use for Salt SSH + + ssh_user + ssh_password + ssh_privkey + Default values to use, if they are not found in the roster. Keep in + mind that these will not override roster vaules if they are already + defined + + tmp_dir : /tmp/.bootstrap + The temporary directory to download the bootstrap script in. This + directory will have ``-`` appended to it. For example: + ``/tmp/.bootstrap-a19a728e-d40a-4801-aba9-d00655c143a7/`` + + http_backend : tornado + The backend library to use to download the script. If you need to use + a ``file:///`` URL, then you should set this to ``urllib2``. + CLI Example: @@ -664,8 +690,20 @@ def bootstrap(version='develop', if script is None: script = 'https://bootstrap.saltstack.com' + client_opts = __opts__.copy() + if roster is not None: + client_opts['roster'] = roster + + if ssh_user is not None: + client_opts['ssh_user'] = ssh_user + + if ssh_password is not None: + client_opts['ssh_passwd'] = ssh_password + + if ssh_priv_key is not None: + client_opts['ssh_priv'] = ssh_priv_key + for host in hosts.split(','): - client_opts = __opts__.copy() client_opts['tgt'] = host client_opts['selected_target_option'] = 'glob' tmp_dir = '{0}-{1}/'.format(tmp_dir.rstrip('/'), uuid.uuid4())