Adds support for creating without cloning VMs. Makes it possible to manage standalone ESXi.

Fixes saltstack/salt#26902
This commit is contained in:
alex 2015-09-23 05:47:01 -04:00 committed by Alexander Backlund
parent 3e6b5cfc72
commit f418fa7c0e
3 changed files with 314 additions and 199 deletions

View File

@ -132,6 +132,8 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or
SCSI controller 3:
type: paravirtual
bus_sharing: physical
ide:
IDE controller 1
domain: mycompany.com
dns_servers:
@ -139,10 +141,11 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or
- 123.127.255.241
- 123.127.255.242
# If cloning from template, either resourcepool or cluster MUST be specified!
# If cloning from template or creating without cloning, either resourcepool or cluster MUST be specified!
resourcepool: Resources
cluster: Prod
# If creating without cloning, datastore and folder MUST be specified!
datastore: HUGE-DATASTORE-Cluster
folder: Development
datacenter: DC1
@ -168,12 +171,14 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or
/srv/salt/yum/epel.repo: /etc/yum.repos.d/epel.repo
hardware_version: 10
guest_id: centos64Guest
``provider``
Enter the name that was specified when the cloud provider config was created.
``clonefrom``
Enter the name of the VM/template to clone from.
Enter the name of the VM/template to clone from. If not specified, the VM will be created
without cloning.
``num_cpus``
Enter the number of vCPUS that you want the VM/template to have. If not specified,
@ -207,12 +212,21 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or
Enter the mode of connection only if ``device_type: client_device``. Currently
supported modes are ``passthrough`` and ``atapi``. This field is ignored if
``device_type: datastore_iso_file``. Default is ``mode: passthrough``
controller
Specify IDE controller on which to attach the drive. Must be specified when
creating both a controller and a drive at the same time.
disk
Enter the disk specification here. If the hard disk doesn\'t exist, it will
be created with the provided size. If the hard disk already exists, it will
be expanded if the provided size is greater than the current size of the disk.
size
Enter the size of disk
controller
Enter the controller on which to attach the disk. Must be specified when
creating both a controller and a disk at the same time.
network
Enter the network adapter specification here. If the network adapter doesn\'t
exist, a new network adapter will be created with the specified network name,
@ -274,6 +288,9 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or
no
Virtual disks cannot be shared between virtual machines.
ide
Specify to add a IDE adapter.
``domain``
Enter the global domain name to be used for DNS. If not specified and if the VM name
is a FQDN, ``domain`` is set to the domain from the VM name. Default is ``local``.
@ -393,3 +410,13 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or
``hardware_version``
Specify the virtual hardware version for the vm/template that is supported by the
host.
``guest_id``
Specify the guest id of the VM. For a full list of supported values see the
VMware vSphere documentation:
http://pubs.vmware.com/vsphere-60/topic/com.vmware.wssdk.apiref.doc/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html
.. note::
- For a clone operation, this argument is ignored.

View File

@ -528,6 +528,28 @@ def _add_new_scsi_adapter_helper(scsi_adapter_label, properties, bus_number):
return scsi_spec
def _add_new_ide_adapter_helper(ide_adapter_label, properties, bus_number):
'''
Helper function for adding new IDE controllers
.. versionadded:: Boron
'''
random_key = randint(-5050, -5000)
ide_spec = vim.vm.device.VirtualDeviceSpec()
ide_spec.device = vim.vm.device.VirtualIDEController()
ide_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
ide_spec.device.key = random_key
ide_spec.device.busNumber = bus_number
ide_spec.device.deviceInfo = vim.Description()
ide_spec.device.deviceInfo.label = ide_adapter_label
ide_spec.device.deviceInfo.summary = "IDE"
return ide_spec
def _set_cd_or_dvd_backing_type(drive, device_type, mode, iso_path):
if device_type == "datastore_iso_file":
drive.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo()
@ -615,91 +637,83 @@ def _set_network_adapter_mapping(adapter_specs):
return adapter_mapping
def _manage_devices(devices, vm):
def _manage_devices(devices, vm=None):
unit_number = 0
bus_number = 0
device_specs = []
existing_disks_label = []
existing_scsi_adapters_label = []
existing_ide_adapters_label = []
existing_network_adapters_label = []
existing_cd_drives_label = []
ide_controllers = {}
nics_map = []
# loop through all the devices the vm/template has
# check if the device needs to be created or configured
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualDisk):
# this is a hard disk
if 'disk' in list(devices.keys()):
# there is atleast one disk specified to be created/configured
unit_number += 1
existing_disks_label.append(device.deviceInfo.label)
if device.deviceInfo.label in list(devices['disk'].keys()):
size_gb = float(devices['disk'][device.deviceInfo.label]['size'])
size_kb = int(size_gb * 1024.0 * 1024.0)
if device.capacityInKB < size_kb:
# expand the disk
disk_spec = _edit_existing_hard_disk_helper(device, size_kb)
device_specs.append(disk_spec)
# this would be None when we aren't cloning a VM
if vm:
# loop through all the devices the vm/template has
# check if the device needs to be created or configured
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualDisk):
# this is a hard disk
if 'disk' in list(devices.keys()):
# there is atleast one disk specified to be created/configured
unit_number += 1
existing_disks_label.append(device.deviceInfo.label)
if device.deviceInfo.label in list(devices['disk'].keys()):
size_gb = float(devices['disk'][device.deviceInfo.label]['size'])
size_kb = int(size_gb * 1024.0 * 1024.0)
if device.capacityInKB < size_kb:
# expand the disk
disk_spec = _edit_existing_hard_disk_helper(device, size_kb)
device_specs.append(disk_spec)
elif isinstance(device.backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo) or isinstance(device.backing, vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo):
# this is a network adapter
if 'network' in list(devices.keys()):
# there is atleast one network adapter specified to be created/configured
existing_network_adapters_label.append(device.deviceInfo.label)
if device.deviceInfo.label in list(devices['network'].keys()):
network_name = devices['network'][device.deviceInfo.label]['name']
adapter_type = devices['network'][device.deviceInfo.label]['adapter_type'] if 'adapter_type' in devices['network'][device.deviceInfo.label] else ''
switch_type = devices['network'][device.deviceInfo.label]['switch_type'] if 'switch_type' in devices['network'][device.deviceInfo.label] else ''
network_spec = _edit_existing_network_adapter(device, network_name, adapter_type, switch_type)
adapter_mapping = _set_network_adapter_mapping(devices['network'][device.deviceInfo.label])
device_specs.append(network_spec)
nics_map.append(adapter_mapping)
elif isinstance(device.backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo) or isinstance(device.backing, vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo):
# this is a network adapter
if 'network' in list(devices.keys()):
# there is atleast one network adapter specified to be created/configured
existing_network_adapters_label.append(device.deviceInfo.label)
if device.deviceInfo.label in list(devices['network'].keys()):
network_name = devices['network'][device.deviceInfo.label]['name']
adapter_type = devices['network'][device.deviceInfo.label]['adapter_type'] if 'adapter_type' in devices['network'][device.deviceInfo.label] else ''
switch_type = devices['network'][device.deviceInfo.label]['switch_type'] if 'switch_type' in devices['network'][device.deviceInfo.label] else ''
network_spec = _edit_existing_network_adapter(device, network_name, adapter_type, switch_type)
adapter_mapping = _set_network_adapter_mapping(devices['network'][device.deviceInfo.label])
device_specs.append(network_spec)
nics_map.append(adapter_mapping)
elif hasattr(device, 'scsiCtlrUnitNumber'):
# this is a scsi adapter
if 'scsi' in list(devices.keys()):
# there is atleast one scsi adapter specified to be created/configured
bus_number += 1
existing_scsi_adapters_label.append(device.deviceInfo.label)
if device.deviceInfo.label in list(devices['scsi'].keys()):
# Modify the existing SCSI adapter
scsi_adapter_properties = devices['scsi'][device.deviceInfo.label]
bus_sharing = scsi_adapter_properties['bus_sharing'].strip().lower() if 'bus_sharing' in scsi_adapter_properties else None
if bus_sharing and bus_sharing in ['virtual', 'physical', 'no']:
bus_sharing = '{0}Sharing'.format(bus_sharing)
if bus_sharing != device.sharedBus:
# Only edit the SCSI adapter if bus_sharing is different
scsi_spec = _edit_existing_scsi_adapter(device, bus_sharing)
device_specs.append(scsi_spec)
elif hasattr(device, 'scsiCtlrUnitNumber'):
# this is a scsi adapter
if 'scsi' in list(devices.keys()):
# there is atleast one scsi adapter specified to be created/configured
bus_number += 1
existing_scsi_adapters_label.append(device.deviceInfo.label)
if device.deviceInfo.label in list(devices['scsi'].keys()):
# Modify the existing SCSI adapter
scsi_adapter_properties = devices['scsi'][device.deviceInfo.label]
bus_sharing = scsi_adapter_properties['bus_sharing'].strip().lower() if 'bus_sharing' in scsi_adapter_properties else None
if bus_sharing and bus_sharing in ['virtual', 'physical', 'no']:
bus_sharing = '{0}Sharing'.format(bus_sharing)
if bus_sharing != device.sharedBus:
# Only edit the SCSI adapter if bus_sharing is different
scsi_spec = _edit_existing_scsi_adapter(device, bus_sharing)
device_specs.append(scsi_spec)
elif isinstance(device, vim.vm.device.VirtualCdrom):
# this is a cd/dvd drive
if 'cd' in list(devices.keys()):
# there is atleast one cd/dvd drive specified to be created/configured
existing_cd_drives_label.append(device.deviceInfo.label)
if device.deviceInfo.label in list(devices['cd'].keys()):
device_type = devices['cd'][device.deviceInfo.label]['device_type'] if 'device_type' in devices['cd'][device.deviceInfo.label] else ''
mode = devices['cd'][device.deviceInfo.label]['mode'] if 'mode' in devices['cd'][device.deviceInfo.label] else ''
iso_path = devices['cd'][device.deviceInfo.label]['iso_path'] if 'iso_path' in devices['cd'][device.deviceInfo.label] else ''
cd_drive_spec = _edit_existing_cd_or_dvd_drive(device, device_type, mode, iso_path)
device_specs.append(cd_drive_spec)
elif isinstance(device, vim.vm.device.VirtualCdrom):
# this is a cd/dvd drive
if 'cd' in list(devices.keys()):
# there is atleast one cd/dvd drive specified to be created/configured
existing_cd_drives_label.append(device.deviceInfo.label)
if device.deviceInfo.label in list(devices['cd'].keys()):
device_type = devices['cd'][device.deviceInfo.label]['device_type'] if 'device_type' in devices['cd'][device.deviceInfo.label] else ''
mode = devices['cd'][device.deviceInfo.label]['mode'] if 'mode' in devices['cd'][device.deviceInfo.label] else ''
iso_path = devices['cd'][device.deviceInfo.label]['iso_path'] if 'iso_path' in devices['cd'][device.deviceInfo.label] else ''
cd_drive_spec = _edit_existing_cd_or_dvd_drive(device, device_type, mode, iso_path)
device_specs.append(cd_drive_spec)
elif isinstance(device, vim.vm.device.VirtualIDEController):
# this is a controller to add new cd drives to
ide_controllers[device.key] = len(device.device)
if 'disk' in list(devices.keys()):
disks_to_create = list(set(devices['disk'].keys()) - set(existing_disks_label))
disks_to_create.sort()
log.debug("Hard disks to create: {0}".format(disks_to_create))
for disk_label in disks_to_create:
# create the disk
size_gb = float(devices['disk'][disk_label]['size'])
disk_spec = _add_new_hard_disk_helper(disk_label, size_gb, unit_number)
device_specs.append(disk_spec)
unit_number += 1
elif isinstance(device, vim.vm.device.VirtualIDEController):
# this is a controller to add new cd drives to
ide_controllers[device.key] = len(device.device)
if 'network' in list(devices.keys()):
network_adapters_to_create = list(set(devices['network'].keys()) - set(existing_network_adapters_label))
@ -726,6 +740,34 @@ def _manage_devices(devices, vm):
device_specs.append(scsi_spec)
bus_number += 1
if 'disk' in list(devices.keys()):
disks_to_create = list(set(devices['disk'].keys()) - set(existing_disks_label))
disks_to_create.sort()
log.debug("Hard disks to create: {0}".format(disks_to_create))
for disk_label in disks_to_create:
# create the disk
size_gb = float(devices['disk'][disk_label]['size'])
disk_spec = _add_new_hard_disk_helper(disk_label, size_gb, unit_number)
# When creating both the controller and the disk at the same time we need the randomly
# assigned (temporary) key of the newly created controller
if 'controller' in devices['disk'][disk_label]:
for spec in device_specs:
if spec.device.deviceInfo.label == devices['disk'][disk_label]['controller']:
disk_spec.device.controllerKey = spec.device.key
device_specs.append(disk_spec)
unit_number += 1
if 'ide' in list(devices.keys()):
ide_adapters_to_create = list(set(devices['ide'].keys()) - set(existing_ide_adapters_label))
ide_adapters_to_create.sort()
log.debug('IDE devices to create: {0}'.format(ide_adapters_to_create))
for ide_adapter_label in ide_adapters_to_create:
# create the ide adapter
ide_spec = _add_new_ide_adapter_helper(ide_adapter_label, None, bus_number)
device_specs.append(ide_spec)
bus_number += 1
if 'cd' in list(devices.keys()):
cd_drives_to_create = list(set(devices['cd'].keys()) - set(existing_cd_drives_label))
cd_drives_to_create.sort()
@ -735,6 +777,14 @@ def _manage_devices(devices, vm):
device_type = devices['cd'][cd_drive_label]['device_type'] if 'device_type' in devices['cd'][cd_drive_label] else ''
mode = devices['cd'][cd_drive_label]['mode'] if 'mode' in devices['cd'][cd_drive_label] else ''
iso_path = devices['cd'][cd_drive_label]['iso_path'] if 'iso_path' in devices['cd'][cd_drive_label] else ''
# When creating both the controller and the disk at the same time we need the randomly
# assigned (temporary) key of the newly created controller
if 'controller' in devices['cd'][cd_drive_label]:
for spec in device_specs:
if spec.device.deviceInfo.label == devices['cd'][cd_drive_label]['controller']:
controller_key = spec.device.key
ide_controllers[controller_key] = 0
for ide_controller_key, num_devices in six.iteritems(ide_controllers):
if num_devices < 2:
controller_key = ide_controller_key
@ -2248,6 +2298,9 @@ def create(vm_):
hardware_version = config.get_cloud_config_value(
'hardware_version', vm_, __opts__, search_global=False, default=None
)
guest_id = config.get_cloud_config_value(
'guest_id', vm_, __opts__, search_global=False, default=None
)
if 'clonefrom' in vm_:
# Clone VM/template from specified VM/template
@ -2258,49 +2311,65 @@ def create(vm_):
raise SaltCloudSystemExit(
'The VM/template that you have specified under clonefrom does not exist.'
)
else:
clone_type = None
object_ref = None
# Either a cluster, or a resource pool must be specified when cloning from template.
if resourcepool:
resourcepool_ref = _get_mor_by_property(vim.ResourcePool, resourcepool)
if not resourcepool_ref:
log.error("Specified resource pool: '{0}' does not exist".format(resourcepool))
if clone_type == "template":
raise SaltCloudSystemExit('You must specify a resource pool that exists.')
elif cluster:
cluster_ref = _get_mor_by_property(vim.ClusterComputeResource, cluster)
if not cluster_ref:
log.error("Specified cluster: '{0}' does not exist".format(cluster))
if clone_type == "template":
raise SaltCloudSystemExit('You must specify a cluster that exists.')
else:
resourcepool_ref = cluster_ref.resourcePool
elif clone_type == "template":
raise SaltCloudSystemExit(
'You must either specify a cluster or a resource pool when cloning from a template.'
)
# Either a cluster, or a resource pool must be specified when cloning from template or creating.
if resourcepool:
resourcepool_ref = _get_mor_by_property(vim.ResourcePool, resourcepool)
if not resourcepool_ref:
log.error("Specified resource pool: '{0}' does not exist".format(resourcepool))
if not clone_type or clone_type == "template":
raise SaltCloudSystemExit('You must specify a resource pool that exists.')
elif cluster:
cluster_ref = _get_mor_by_property(vim.ClusterComputeResource, cluster)
if not cluster_ref:
log.error("Specified cluster: '{0}' does not exist".format(cluster))
if not clone_type or clone_type == "template":
raise SaltCloudSystemExit('You must specify a cluster that exists.')
else:
log.debug("Using resource pool used by the {0} {1}".format(clone_type, vm_['clonefrom']))
resourcepool_ref = cluster_ref.resourcePool
elif clone_type == "template":
raise SaltCloudSystemExit(
'You must either specify a cluster or a resource pool when cloning from a template.'
)
elif not clone_type:
raise SaltCloudSystemExit(
'You must either specify a cluster or a resource pool when not cloning.'
)
else:
log.debug("Using resource pool used by the {0} {1}".format(clone_type, vm_['clonefrom']))
# Either a datacenter or a folder can be optionally specified
# If not specified, the existing VM/template\'s parent folder is used.
if folder:
folder_ref = _get_mor_by_property(vim.Folder, folder)
if not folder_ref:
log.error("Specified folder: '{0}' does not exist".format(folder))
log.debug("Using folder in which {0} {1} is present".format(clone_type, vm_['clonefrom']))
folder_ref = object_ref.parent
elif datacenter:
datacenter_ref = _get_mor_by_property(vim.Datacenter, datacenter)
if not datacenter_ref:
log.error("Specified datacenter: '{0}' does not exist".format(datacenter))
log.debug("Using datacenter folder in which {0} {1} is present".format(clone_type, vm_['clonefrom']))
folder_ref = object_ref.parent
else:
folder_ref = datacenter_ref.vmFolder
else:
log.trace('resourcepool_ref: {0}'.format(resourcepool_ref))
# Either a datacenter or a folder can be optionally specified when cloning, required when creating.
# If not specified when cloning, the existing VM/template\'s parent folder is used.
if folder:
folder_ref = _get_mor_by_property(vim.Folder, folder)
if not folder_ref:
log.error("Specified folder: '{0}' does not exist".format(folder))
log.debug("Using folder in which {0} {1} is present".format(clone_type, vm_['clonefrom']))
folder_ref = object_ref.parent
elif datacenter:
datacenter_ref = _get_mor_by_property(vim.Datacenter, datacenter)
if not datacenter_ref:
log.error("Specified datacenter: '{0}' does not exist".format(datacenter))
log.debug("Using datacenter folder in which {0} {1} is present".format(clone_type, vm_['clonefrom']))
folder_ref = object_ref.parent
else:
folder_ref = datacenter_ref.vmFolder
elif not clone_type:
raise SaltCloudSystemExit(
'You must either specify a folder or a datacenter when creating not cloning.'
)
else:
log.debug("Using folder in which {0} {1} is present".format(clone_type, vm_['clonefrom']))
folder_ref = object_ref.parent
log.trace('folder_ref: {0}'.format(folder_ref))
if 'clonefrom' in vm_:
# Create the relocation specs
reloc_spec = vim.vm.RelocateSpec()
@ -2329,52 +2398,62 @@ def create(vm_):
reloc_spec.host = host_ref
else:
log.error("Specified host: '{0}' does not exist".format(host))
else:
if not datastore:
raise SaltCloudSystemExit(
'You must specify a datastore when creating not cloning.'
)
else:
datastore_ref = _get_mor_by_property(vim.Datastore, datastore)
if not datastore_ref:
raise SaltCloudSystemExit("Specified datastore: '{0}' does not exist".format(datastore))
# Create the config specs
config_spec = vim.vm.ConfigSpec()
# Create the config specs
config_spec = vim.vm.ConfigSpec()
# If the hardware version is specified and if it is different from the current
# hardware version, then schedule a hardware version upgrade
if hardware_version:
hardware_version = "vmx-{0}".format(str(hardware_version).zfill(2))
if hardware_version != object_ref.config.version:
log.debug("Scheduling hardware version upgrade from {0} to {1}".format(object_ref.config.version, hardware_version))
scheduled_hardware_upgrade = vim.vm.ScheduledHardwareUpgradeInfo()
scheduled_hardware_upgrade.upgradePolicy = 'always'
scheduled_hardware_upgrade.versionKey = hardware_version
config_spec.scheduledHardwareUpgradeInfo = scheduled_hardware_upgrade
# If the hardware version is specified and if it is different from the current
# hardware version, then schedule a hardware version upgrade
if hardware_version:
hardware_version = "vmx-{0}".format(str(hardware_version).zfill(2))
if hardware_version != object_ref.config.version:
log.debug("Scheduling hardware version upgrade from {0} to {1}".format(object_ref.config.version, hardware_version))
scheduled_hardware_upgrade = vim.vm.ScheduledHardwareUpgradeInfo()
scheduled_hardware_upgrade.upgradePolicy = 'always'
scheduled_hardware_upgrade.versionKey = hardware_version
config_spec.scheduledHardwareUpgradeInfo = scheduled_hardware_upgrade
else:
log.debug("Virtual hardware version already set to {1}".format(hardware_version))
if num_cpus:
log.debug("Setting cpu to: {0}".format(num_cpus))
config_spec.numCPUs = int(num_cpus)
if memory:
try:
memory_num, memory_unit = findall(r"[^\W\d_]+|\d+.\d+|\d+", memory)
if memory_unit.lower() == "mb":
memory_mb = int(memory_num)
elif memory_unit.lower() == "gb":
memory_mb = int(float(memory_num)*1024.0)
else:
log.debug("Virtual hardware version already set to {1}".format(hardware_version))
err_msg = "Invalid memory type specified: '{0}'".format(memory_unit)
log.error(err_msg)
return {'Error': err_msg}
except (TypeError, ValueError):
memory_mb = int(memory)
log.debug("Setting memory to: {0} MB".format(memory_mb))
config_spec.memoryMB = memory_mb
if num_cpus:
log.debug("Setting cpu to: {0}".format(num_cpus))
config_spec.numCPUs = int(num_cpus)
if devices:
specs = _manage_devices(devices, object_ref)
config_spec.deviceChange = specs['device_specs']
if memory:
try:
memory_num, memory_unit = findall(r"[^\W\d_]+|\d+.\d+|\d+", memory)
if memory_unit.lower() == "mb":
memory_mb = int(memory_num)
elif memory_unit.lower() == "gb":
memory_mb = int(float(memory_num)*1024.0)
else:
err_msg = "Invalid memory type specified: '{0}'".format(memory_unit)
log.error(err_msg)
return {'Error': err_msg}
except (TypeError, ValueError):
memory_mb = int(memory)
log.debug("Setting memory to: {0} MB".format(memory_mb))
config_spec.memoryMB = memory_mb
if devices:
specs = _manage_devices(devices, object_ref)
config_spec.deviceChange = specs['device_specs']
if extra_config:
for key, value in six.iteritems(extra_config):
option = vim.option.OptionValue(key=key, value=value)
config_spec.extraConfig.append(option)
if extra_config:
for key, value in six.iteritems(extra_config):
option = vim.option.OptionValue(key=key, value=value)
config_spec.extraConfig.append(option)
if 'clonefrom' in vm_:
# Create the clone specs
clone_spec = vim.vm.CloneSpec(
template=template,
@ -2407,16 +2486,27 @@ def create(vm_):
log.debug('clone_spec set to:\n{0}'.format(
pprint.pformat(clone_spec))
)
else:
config_spec.name = vm_name
config_spec.files = vim.vm.FileInfo()
config_spec.files.vmPathName = '[{0}] {1}/{1}.vmx'.format(datastore, vm_name)
config_spec.guestId = guest_id
try:
log.debug('config_spec set to:\n{0}'.format(
pprint.pformat(config_spec))
)
try:
salt.utils.cloud.fire_event(
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
{'kwargs': vm_},
transport=__opts__['transport']
)
if 'clonefrom' in vm_:
log.info("Creating {0} from {1}({2})".format(vm_['name'], clone_type, vm_['clonefrom']))
salt.utils.cloud.fire_event(
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
{'kwargs': vm_},
transport=__opts__['transport']
)
if datastore and not datastore_ref and datastore_cluster_ref:
# datastore cluster has been specified so apply Storage DRS recomendations
@ -2444,49 +2534,49 @@ def create(vm_):
# clone the VM/template
task = object_ref.Clone(folder_ref, vm_name, clone_spec)
_wait_for_task(task, vm_name, "clone", 5, 'info')
except Exception as exc:
err_msg = 'Error creating {0}: {1}'.format(vm_['name'], exc)
log.error(
err_msg,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return {'Error': err_msg}
else:
log.info("Creating {0} without cloning".format(vm_['name']))
new_vm_ref = _get_mor_by_property(vim.VirtualMachine, vm_name)
# If it a template or if it does not need to be powered on then do not wait for the IP
if not template and power:
ip = _wait_for_ip(new_vm_ref, wait_for_ip_timeout)
if ip:
log.info("[ {0} ] IPv4 is: {1}".format(vm_name, ip))
# ssh or smb using ip and install salt only if deploy is True
if deploy:
vm_['key_filename'] = key_filename
vm_['ssh_host'] = ip
salt.utils.cloud.bootstrap(vm_, __opts__)
data = show_instance(vm_name, call='action')
salt.utils.cloud.fire_event(
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
{
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['driver'],
},
transport=__opts__['transport']
task = folder_ref.CreateVM_Task(config_spec, resourcepool_ref)
_wait_for_task(task, vm_name, "create", 5, 'info')
except Exception as exc:
err_msg = 'Error creating {0}: {1}'.format(vm_['name'], exc)
log.error(
err_msg,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return data
else:
err_msg = "clonefrom option hasn\'t been specified. Exiting."
log.error(err_msg)
return {'Error': err_msg}
new_vm_ref = _get_mor_by_property(vim.VirtualMachine, vm_name)
# If it a template or if it does not need to be powered on then do not wait for the IP
if not template and power:
ip = _wait_for_ip(new_vm_ref, wait_for_ip_timeout)
if ip:
log.info("[ {0} ] IPv4 is: {1}".format(vm_name, ip))
# ssh or smb using ip and install salt only if deploy is True
if deploy:
vm_['key_filename'] = key_filename
vm_['ssh_host'] = ip
salt.utils.cloud.bootstrap(vm_, __opts__)
data = show_instance(vm_name, call='action')
salt.utils.cloud.fire_event(
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
{
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['driver'],
},
transport=__opts__['transport']
)
return data
def create_datacenter(kwargs=None, call=None):
'''

View File

@ -2560,8 +2560,6 @@ def is_profile_configured(opts, provider, profile_name):
if driver not in non_image_drivers:
required_keys.append('image')
elif driver == 'vmware':
required_keys.append('clonefrom')
if driver not in non_size_drivers:
required_keys.append('size')