Merge branch 'develop' into kaaelhaa-seed-nameserver

This commit is contained in:
Kenn Leth Hansen 2018-01-10 07:07:09 +01:00 committed by GitHub
commit b8a7104438
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 1101 additions and 93 deletions

View File

@ -11,7 +11,7 @@ and disk size without being tied to a particular server size.
Dependencies
============
* profitbricks >= 3.0.0
* profitbricks >= 4.1.1
Configuration
=============
@ -34,8 +34,10 @@ Configuration
#
username: user@domain.com
password: 123456
# datacenter_id is the UUID of a pre-existing virtual data center.
datacenter_id: 9e6709a0-6bf9-4bd6-8692-60349c70ce0e
# datacenter is the UUID of a pre-existing virtual data center.
datacenter: 9e6709a0-6bf9-4bd6-8692-60349c70ce0e
# delete_volumes is forcing a deletion of all volumes attached to a server on a deletion of a server
delete_volumes: true
# Connect to public LAN ID 1.
public_lan: 1
ssh_public_key: /path/to/id_rsa.pub
@ -65,6 +67,13 @@ A list of existing virtual data centers can be retrieved with the following comm
salt-cloud -f list_datacenters my-profitbricks-config
A new data center can be created with the following command:
.. code-block:: bash
salt-cloud -f create_datacenter my-profitbricks-config name=example location=us/las description="my description"
Authentication
==============
@ -81,7 +90,9 @@ Here is an example of a profile:
profitbricks_staging
provider: my-profitbricks-config
size: Micro Instance
image: 2f98b678-6e7e-11e5-b680-52540066fee9
image_alias: 'ubuntu:latest'
# image or image_alias must be provided
# image: 2f98b678-6e7e-11e5-b680-52540066fee9
cores: 2
ram: 4096
public_lan: 1
@ -117,8 +128,31 @@ Here is an example of a profile:
disk_size: 500
db_log:
disk_size: 50
disk_type: HDD
disk_availability_zone: ZONE_3
disk_type: SSD
Locations can be obtained using the ``--list-locations`` option for the ``salt-cloud``
command:
.. code-block:: bash
# salt-cloud --list-locations my-profitbricks-config
Images can be obtained using the ``--list-sizes`` option for the ``salt-cloud``
command:
.. code-block:: bash
# salt-cloud --list-images my-profitbricks-config
Sizes can be obtained using the ``--list-sizes`` option for the ``salt-cloud``
command:
.. code-block:: bash
# salt-cloud --list-sizes my-profitbricks-config
Profile Specifics:
------------------
The following list explains some of the important properties.
@ -127,14 +161,21 @@ size
.. code-block:: bash
salt-cloud --list-sizes my-profitbricks
salt-cloud --list-sizes my-profitbricks-config
image
Can be one of the options listed in the output of the following command:
.. code-block:: bash
salt-cloud --list-images my-profitbricks
salt-cloud --list-images my-profitbricks-config
image_alias
Can be one of the options listed in the output of the following command:
.. code-block:: bash
salt-cloud -f list_images my-profitbricks-config
disk_size
This option allows you to override the size of the disk as defined by the
@ -144,9 +185,6 @@ disk_type
This option allow the disk type to be set to HDD or SSD. The default is
HDD.
disk_availability_zone
This option will provision the volume in the specified availability_zone.
cores
This option allows you to override the number of CPU cores as defined by
the size.
@ -156,10 +194,6 @@ ram
The value must be a multiple of 256, e.g. 256, 512, 768, 1024, and so
forth.
availability_zone
This options specifies in which availability zone the server should be
built. Zones include ZONE_1 and ZONE_2. The default is AUTO.
public_lan
This option will connect the server to the specified public LAN. If no
LAN exists, then a new public LAN will be created. The value accepts a LAN
@ -179,9 +213,6 @@ public_firewall_rules
icmp_type: <icmp-type>
icmp_code: <icmp-code>
nat
This option will enable NAT on the private NIC.
private_lan
This option will connect the server to the specified private LAN. If no
LAN exists, then a new private LAN will be created. The value accepts a LAN
@ -209,7 +240,7 @@ ssh_public_key
ssh_interface
This option will use the private LAN IP for node connections (such as
bootstrapping the node) instead of the public LAN IP. The value accepts
as bootstrapping the node) instead of the public LAN IP. The value accepts
'private_lan'.
cpu_family
@ -229,4 +260,4 @@ wait_for_timeout
The default wait_for_timeout is 15 minutes.
For more information concerning cloud profiles, see :ref:`here
<salt-cloud-profiles>`.
</topics/cloud/profiles>`.

View File

@ -48,6 +48,9 @@ Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
availability_zone: ZONE_1
# Name or UUID of the HDD image to use.
image: <UUID>
# Image alias could be provided instead of image.
# Example 'ubuntu:latest'
#image_alias: <IMAGE_ALIAS>
# Size of the node disk in GB (overrides server size).
disk_size: 40
# Type of disk (HDD or SSD).
@ -96,6 +99,7 @@ import logging
import os
import pprint
import time
from salt.utils.versions import LooseVersion
# Import salt libs
import salt.utils.cloud
@ -112,11 +116,12 @@ from salt.exceptions import (
# Import 3rd-party libs
from salt.ext import six
try:
import profitbricks
from profitbricks.client import (
ProfitBricksService, Server,
NIC, Volume, FirewallRule,
Datacenter, LoadBalancer, LAN,
PBNotFoundError
PBNotFoundError, PBError
)
HAS_PROFITBRICKS = True
except ImportError:
@ -153,6 +158,13 @@ def get_configured_provider():
)
def version_compatible(version):
'''
Checks profitbricks version
'''
return LooseVersion(profitbricks.API_VERSION) >= LooseVersion(version)
def get_dependencies():
'''
Warn if dependencies are not met.
@ -183,6 +195,31 @@ def get_conn():
)
def avail_locations(call=None):
'''
Return a dict of all available VM locations on the cloud provider with
relevant data
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-locations option'
)
ret = {}
conn = get_conn()
for item in conn.list_locations()['items']:
reg, loc = item['id'].split('/')
location = {'id': item['id']}
if reg not in ret:
ret[reg] = {}
ret[reg][loc] = location
return ret
def avail_images(call=None):
'''
Return a list of the images that are on the provider
@ -195,11 +232,8 @@ def avail_images(call=None):
ret = {}
conn = get_conn()
datacenter = get_datacenter(conn)
for item in conn.list_images()['items']:
if (item['properties']['location'] ==
datacenter['properties']['location']):
image = {'id': item['id']}
image.update(item['properties'])
ret[image['name']] = image
@ -207,6 +241,42 @@ def avail_images(call=None):
return ret
def list_images(call=None, kwargs=None):
'''
List all the images with alias by location
CLI Example:
.. code-block:: bash
salt-cloud -f list_images my-profitbricks-config location=us/las
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_images function must be called with '
'-f or --function.'
)
if not version_compatible('4.0'):
raise SaltCloudNotFound(
"The 'image_alias' feature requires the profitbricks "
"SDK v4.0.0 or greater."
)
ret = {}
conn = get_conn()
if kwargs.get('location') is not None:
item = conn.get_location(kwargs.get('location'), 3)
ret[item['id']] = {'image_alias': item['properties']['imageAliases']}
return ret
for item in conn.list_locations(3)['items']:
ret[item['id']] = {'image_alias': item['properties']['imageAliases']}
return ret
def avail_sizes(call=None):
'''
Return a dict of all available VM sizes on the cloud provider with
@ -288,13 +358,24 @@ def get_datacenter_id():
'''
Return datacenter ID from provider configuration
'''
return config.get_cloud_config_value(
datacenter_id = config.get_cloud_config_value(
'datacenter_id',
get_configured_provider(),
__opts__,
search_global=False
)
conn = get_conn()
try:
conn.get_datacenter(datacenter_id=datacenter_id)
except PBNotFoundError:
log.error('Failed to get datacenter: {0}'.format(
datacenter_id))
raise
return datacenter_id
def list_loadbalancers(call=None):
'''
@ -373,7 +454,8 @@ def create_datacenter(call=None, kwargs=None):
.. code-block:: bash
salt-cloud -f create_datacenter profitbricks name=mydatacenter location=us/las description="my description"
salt-cloud -f create_datacenter profitbricks name=mydatacenter
location=us/las description="my description"
'''
if call != 'function':
raise SaltCloudSystemExit(
@ -492,6 +574,7 @@ def list_nodes(conn=None, call=None):
for item in nodes['items']:
node = {'id': item['id']}
node.update(item['properties'])
node['state'] = node.pop('vmState')
ret[node['name']] = node
return ret
@ -517,10 +600,13 @@ def list_nodes_full(conn=None, call=None):
for item in nodes['items']:
node = {'id': item['id']}
node.update(item['properties'])
node['state'] = node.pop('vmState')
node['public_ips'] = []
node['private_ips'] = []
if item['entities']['nics']['items'] > 0:
for nic in item['entities']['nics']['items']:
if len(nic['properties']['ips']) > 0:
pass
ip_address = nic['properties']['ips'][0]
if salt.utils.cloud.is_public_ip(ip_address):
node['public_ips'].append(ip_address)
@ -673,6 +759,23 @@ def get_key_filename(vm_):
return key_filename
def signal_event(vm_, event, description):
args = __utils__['cloud.filter_event'](
event,
vm_,
['name', 'profile', 'provider', 'driver']
)
__utils__['cloud.fire_event'](
'event',
description,
'salt/cloud/{0}/creating'.format(vm_['name']),
args=args,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
def create(vm_):
'''
Create a single VM from a data dict
@ -688,15 +791,17 @@ def create(vm_):
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
if 'image_alias' in vm_ and not version_compatible('4.0'):
raise SaltCloudNotFound(
"The 'image_alias' parameter requires the profitbricks "
"SDK v4.0.0 or greater."
)
if 'image' not in vm_ and 'image_alias' not in vm_:
log.error('The image or image_alias parameter is required.')
signal_event(vm_, 'creating', 'starting create')
data = None
datacenter_id = get_datacenter_id()
conn = get_conn()
@ -712,14 +817,7 @@ def create(vm_):
# Assembla the composite server object.
server = _get_server(vm_, volumes, nics)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
signal_event(vm_, 'requesting', 'requesting instance')
try:
data = conn.create_server(datacenter_id=datacenter_id, server=server)
@ -728,11 +826,20 @@ def create(vm_):
_wait_for_completion(conn, data, get_wait_timeout(vm_),
'create_server')
except Exception as exc: # pylint: disable=W0703
except PBError as exc:
log.error(
'Error creating {0} on ProfitBricks\n\n'
'The following exception was thrown by the profitbricks library '
'when trying to run the initial deployment: \n{1}'.format(
'when trying to run the initial deployment: \n{1}:\n{2}'.format(
vm_['name'], exc, exc.content
),
exc_info_on_loglevel=logging.DEBUG
)
return False
except Exception as exc: # pylint: disable=W0703
log.error(
'Error creating {0} \n\n'
'Error: \n{1}'.format(
vm_['name'], exc
),
exc_info_on_loglevel=logging.DEBUG
@ -754,7 +861,7 @@ def create(vm_):
'Loaded node data for {0}:\nname: {1}\nstate: {2}'.format(
vm_['name'],
pprint.pformat(data['name']),
data['vmState']
data['state']
)
)
except Exception as err:
@ -768,7 +875,7 @@ def create(vm_):
# Trigger a failure in the wait for IP function
return False
running = data['vmState'] == 'RUNNING'
running = data['state'] == 'RUNNING'
if not running:
# Still not running, trigger another iteration
return
@ -807,14 +914,7 @@ def create(vm_):
)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
signal_event(vm_, 'created', 'created instance')
if 'ssh_host' in vm_:
vm_['key_filename'] = get_key_filename(vm_)
@ -859,9 +959,32 @@ def destroy(name, call=None):
datacenter_id = get_datacenter_id()
conn = get_conn()
node = get_node(conn, name)
attached_volumes = None
delete_volumes = config.get_cloud_config_value(
'delete_volumes',
get_configured_provider(),
__opts__,
search_global=False
)
# Get volumes before the server is deleted
attached_volumes = conn.get_attached_volumes(
datacenter_id=datacenter_id,
server_id=node['id']
)
conn.delete_server(datacenter_id=datacenter_id, server_id=node['id'])
# The server is deleted and now is safe to delete the volumes
if delete_volumes:
for vol in attached_volumes['items']:
log.debug('Deleting volume {0}'.format(vol['id']))
conn.delete_volume(
datacenter_id=datacenter_id,
volume_id=vol['id']
)
log.debug('Deleted volume {0}'.format(vol['id']))
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
@ -1010,11 +1133,14 @@ def _get_system_volume(vm_):
volume = Volume(
name='{0} Storage'.format(vm_['name']),
size=disk_size,
image=get_image(vm_)['id'],
disk_type=get_disk_type(vm_),
ssh_keys=ssh_keys
)
if 'image_alias' in vm_.keys():
volume.image_alias = vm_['image_alias']
else:
volume.image = get_image(vm_)['id']
# Set volume availability zone if defined in the cloud profile
if 'disk_availability_zone' in vm_:
volume.availability_zone = vm_['disk_availability_zone']

View File

@ -3382,12 +3382,12 @@ def is_profile_configured(opts, provider, profile_name, vm_=None):
alias, driver = provider.split(':')
# Most drivers need an image to be specified, but some do not.
non_image_drivers = ['nova', 'virtualbox', 'libvirt', 'softlayer', 'oneandone']
non_image_drivers = ['nova', 'virtualbox', 'libvirt', 'softlayer', 'oneandone', 'profitbricks']
# Most drivers need a size, but some do not.
non_size_drivers = ['opennebula', 'parallels', 'proxmox', 'scaleway',
'softlayer', 'softlayer_hw', 'vmware', 'vsphere',
'virtualbox', 'profitbricks', 'libvirt', 'oneandone']
'virtualbox', 'libvirt', 'oneandone']
provider_key = opts['providers'][alias][driver]
profile_key = opts['providers'][alias][driver]['profiles'][profile_name]

View File

@ -341,3 +341,72 @@ def purge(name=None, pkgs=None, **kwargs):
salt '*' pkg.purge pkgs='["foo", "bar"]'
'''
return remove(name=name, pkgs=pkgs, purge=True)
def upgrade_available(name):
'''
Check whether or not an upgrade is available for a given package
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade_available <package name>
'''
return latest_version(name) != ''
def upgrade(name=None,
pkgs=None,
**kwargs):
'''
Run a full package upgrade (``pkg_add -u``), or upgrade a specific package
if ``name`` or ``pkgs`` is provided.
``name`` is ignored when ``pkgs`` is specified.
Returns a dictionary containing the changes:
.. versionadded:: Fluorine
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
salt '*' pkg.upgrade python%2.7
'''
old = list_pkgs()
cmd = ['pkg_add', '-Ix', '-u']
if kwargs.get('noop', False):
cmd.append('-n')
if pkgs:
cmd.extend(pkgs)
elif name:
cmd.append(name)
# Now run the upgrade, compare the list of installed packages before and
# after and we have all the info we need.
result = __salt__['cmd.run_all'](cmd, output_loglevel='trace',
python_shell=False)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if result['retcode'] != 0:
raise CommandExecutionError(
'Problem encountered upgrading packages',
info={'changes': ret, 'result': result}
)
return ret

403
salt/modules/vmctl.py Normal file
View File

@ -0,0 +1,403 @@
# -*- coding: utf-8 -*-
'''
Manage vms running on the OpenBSD VMM hypervisor using vmctl(8).
.. versionadded:: Fluorine
:codeauthor: :email:`Jasper Lievisse Adriaanse <jasper@openbsd.org>`
.. note::
This module requires the `vmd` service to be running on the OpenBSD
target machine.
'''
from __future__ import absolute_import
# Import python libs
import logging
import re
# Imoprt salt libs:
import salt.utils.path
from salt.exceptions import (CommandExecutionError, SaltInvocationError)
from salt.ext.six.moves import zip
log = logging.getLogger(__name__)
def __virtual__():
'''
Only works on OpenBSD with vmctl(8) present.
'''
if __grains__['os'] == 'OpenBSD' and salt.utils.path.which('vmctl'):
return True
return (False, 'The vmm execution module cannot be loaded: either the system is not OpenBSD or the vmctl binary was not found')
def _id_to_name(id):
'''
Lookup the name associated with a VM id.
'''
vm = status(id=id)
if vm == {}:
return None
else:
return vm['name']
def create_disk(name, size):
'''
Create a VMM disk with the specified `name` and `size`.
size:
Size in megabytes, or use a specifier such as M, G, T.
CLI Example:
.. code-block:: bash
salt '*' vmctl.create_disk /path/to/disk.img size=10G
'''
ret = False
cmd = 'vmctl create {0} -s {1}'.format(name, size)
result = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)
if result['retcode'] == 0:
ret = True
else:
raise CommandExecutionError(
'Problem encountered creating disk image',
info={'errors': [result['stderr']], 'changes': ret}
)
return ret
def load(path):
'''
Load additional configuration from the specified file.
path
Path to the configuration file.
CLI Example:
.. code-block:: bash
salt '*' vmctl.load path=/etc/vm.switches.conf
'''
ret = False
cmd = 'vmctl load {0}'.format(path)
result = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)
if result['retcode'] == 0:
ret = True
else:
raise CommandExecutionError(
'Problem encountered running vmctl',
info={'errors': [result['stderr']], 'changes': ret}
)
return ret
def reload():
'''
Remove all stopped VMs and reload configuration from the default configuration file.
CLI Example:
.. code-block:: bash
salt '*' vmctl.reload
'''
ret = False
cmd = 'vmctl reload'
result = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)
if result['retcode'] == 0:
ret = True
else:
raise CommandExecutionError(
'Problem encountered running vmctl',
info={'errors': [result['stderr']], 'changes': ret}
)
return ret
def reset(all=False, vms=False, switches=False):
'''
Reset the running state of VMM or a subsystem.
all:
Reset the running state.
switches:
Reset the configured switches.
vms:
Reset and terminate all VMs.
CLI Example:
.. code-block:: bash
salt '*' vmctl.reset all=True
'''
ret = False
cmd = ['vmctl', 'reset']
if all:
cmd.append('all')
elif vms:
cmd.append('vms')
elif switches:
cmd.append('switches')
result = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)
if result['retcode'] == 0:
ret = True
else:
raise CommandExecutionError(
'Problem encountered running vmctl',
info={'errors': [result['stderr']], 'changes': ret}
)
return ret
def start(name=None, id=None, bootpath=None, disk=None, disks=None, local_iface=False,
memory=None, nics=0, switch=None):
'''
Starts a VM defined by the specified parameters.
When both a name and id are provided, the id is ignored.
name:
Name of the defined VM.
id:
VM id.
bootpath:
Path to a kernel or BIOS image to load.
disk:
Path to a single disk to use.
disks:
List of multiple disks to use.
local_iface:
Whether to add a local network interface. See "LOCAL INTERFACES"
in the vmctl(8) manual page for more information.
memory:
Memory size of the VM specified in megabytes.
switch:
Add a network interface that is attached to the specified
virtual switch on the host.
CLI Example:
.. code-block:: bash
salt '*' vmctl.start 2 # start VM with id 2
salt '*' vmctl.start name=web1 bootpath='/bsd.rd' nics=2 memory=512M disk='/disk.img'
'''
ret = {'changes': False, 'console': None}
cmd = ['vmctl', 'start']
if not (name or id):
raise SaltInvocationError('Must provide either "name" or "id"')
elif name:
cmd.append(name)
else:
cmd.append(id)
name = _id_to_name(id)
if nics > 0:
cmd.append('-i {0}'.format(nics))
# Paths cannot be appended as otherwise the inserted whitespace is treated by
# vmctl as being part of the path.
if bootpath:
cmd.extend(['-b', bootpath])
if memory:
cmd.append('-m {0}'.format(memory))
if switch:
cmd.append('-n {0}'.format(switch))
if local_iface:
cmd.append('-L')
if disk and (disks and len(disks) > 0):
raise SaltInvocationError('Must provide either "disks" or "disk"')
if disk:
cmd.extend(['-d', disk])
if disks and len(disks) > 0:
cmd.extend(['-d', x] for x in disks)
# Before attempting to define a new VM, make sure it doesn't already exist.
# Otherwise return to indicate nothing was changed.
if len(cmd) > 3:
vmstate = status(name)
if vmstate:
ret['comment'] = 'VM already exists and cannot be redefined'
return ret
result = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)
if result['retcode'] == 0:
ret['changes'] = True
m = re.match(r'.*successfully, tty (\/dev.*)', result['stderr'])
if m:
ret['console'] = m.groups()[0]
else:
m = re.match(r'.*Operation already in progress$', result['stderr'])
if m:
ret['changes'] = False
else:
raise CommandExecutionError(
'Problem encountered running vmctl',
info={'errors': [result['stderr']], 'changes': ret}
)
return ret
def status(name=None, id=None):
'''
List VMs running on the host, or only the VM specified by ``id''.
When both a name and id are provided, the id is ignored.
name:
Name of the defined VM.
id:
VM id.
CLI Example:
.. code-block:: bash
salt '*' vmctl.status # to list all VMs
salt '*' vmctl.status name=web1 # to get a single VM
'''
ret = {}
cmd = ['vmctl', 'status']
result = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)
if result['retcode'] != 0:
raise CommandExecutionError(
'Problem encountered running vmctl',
info={'error': [result['stderr']], 'changes': ret}
)
# Grab the header and save it with the lowercase names.
header = result['stdout'].splitlines()[0].split()
header = list([x.lower() for x in header])
# A VM can be in one of the following states (from vmm.c:vcpu_state_decode())
# - stopped
# - running
# - requesting termination
# - terminated
# - unknown
for line in result['stdout'].splitlines()[1:]:
data = line.split()
vm = dict(list(zip(header, data)))
vmname = vm.pop('name')
if vm['pid'] == '-':
# If the VM has no PID it's not running.
vm['state'] = 'stopped'
elif vmname and data[-2] == '-':
# When a VM does have a PID and the second to last field is a '-', it's
# transitioning to another state. A VM name itself cannot contain a
# '-' so it's safe to split on '-'.
vm['state'] = data[-1]
else:
vm['state'] = 'running'
# When the status is requested of a single VM (by name) which is stopping,
# vmctl doesn't print the status line. So we'll parse the full list and
# return when we've found the requested VM.
if id and int(vm['id']) == id:
return {vmname: vm}
elif name and vmname == name:
return {vmname: vm}
else:
ret[vmname] = vm
# Assert we've not come this far when an id or name have been provided. That
# means the requested VM does not exist.
if id or name:
return {}
return ret
def stop(name=None, id=None):
'''
Stop (terminate) the VM identified by the given id or name.
When both a name and id are provided, the id is ignored.
name:
Name of the defined VM.
id:
VM id.
CLI Example:
.. code-block:: bash
salt '*' vmctl.stop name=alpine
'''
ret = {}
cmd = ['vmctl', 'stop']
if not (name or id):
raise SaltInvocationError('Must provide either "name" or "id"')
elif name:
cmd.append(name)
else:
cmd.append(id)
result = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)
if result['retcode'] == 0:
if re.match('^vmctl: sent request to terminate vm.*', result['stderr']):
ret['changes'] = True
else:
ret['changes'] = False
else:
raise CommandExecutionError(
'Problem encountered running vmctl',
info={'errors': [result['stderr']], 'changes': ret}
)
return ret

View File

@ -46,7 +46,7 @@ def get_class(_class, salt_data):
sub_init = '{0}/classes/{1}/init.yml'.format(saltclass_path,
_class.replace('.', '/'))
for root, dirs, files in salt.utils.path.os_walk('{0}/classes'.format(saltclass_path)):
for root, dirs, files in salt.utils.path.os_walk('{0}/classes'.format(saltclass_path), followlinks=True):
for l_file in files:
l_files.append('{0}/{1}'.format(root, l_file))
@ -220,7 +220,7 @@ def expanded_dict_from_minion(minion_id, salt_data):
_file = ''
saltclass_path = salt_data['path']
# Start
for root, dirs, files in salt.utils.path.os_walk('{0}/nodes'.format(saltclass_path)):
for root, dirs, files in salt.utils.path.os_walk('{0}/nodes'.format(saltclass_path), followlinks=True):
for minion_file in files:
if minion_file == '{0}.yml'.format(minion_id):
_file = os.path.join(root, minion_file)

View File

@ -18,7 +18,8 @@ from salt.config import cloud_providers_config
# Import Third-Party Libs
try:
from profitbricks.client import ProfitBricksService # pylint: disable=unused-import
# pylint: disable=unused-import
from profitbricks.client import ProfitBricksService
HAS_PROFITBRICKS = True
except ImportError:
HAS_PROFITBRICKS = False
@ -29,7 +30,7 @@ PROVIDER_NAME = 'profitbricks'
DRIVER_NAME = 'profitbricks'
@skipIf(HAS_PROFITBRICKS is False, 'salt-cloud requires >= profitbricks 2.3.0')
@skipIf(HAS_PROFITBRICKS is False, 'salt-cloud requires >= profitbricks 4.1.0')
class ProfitBricksTest(ShellCase):
'''
Integration tests for the ProfitBricks cloud provider
@ -65,6 +66,7 @@ class ProfitBricksTest(ShellCase):
username = config[profile_str][DRIVER_NAME]['username']
password = config[profile_str][DRIVER_NAME]['password']
datacenter_id = config[profile_str][DRIVER_NAME]['datacenter_id']
self.datacenter_id = datacenter_id
if username == '' or password == '' or datacenter_id == '':
self.skipTest(
'A username, password, and an datacenter must be provided to '
@ -77,10 +79,104 @@ class ProfitBricksTest(ShellCase):
'''
Tests the return of running the --list-images command for ProfitBricks
'''
image_list = self.run_cloud('--list-images {0}'.format(PROVIDER_NAME))
list_images = self.run_cloud('--list-images {0}'.format(PROVIDER_NAME))
self.assertIn(
'Ubuntu-16.04-LTS-server-2016-10-06',
[i.strip() for i in image_list]
'Ubuntu-16.04-LTS-server-2017-10-01',
[i.strip() for i in list_images]
)
def test_list_image_alias(self):
'''
Tests the return of running the -f list_images
command for ProfitBricks
'''
cmd = '-f list_images {0}'.format(PROVIDER_NAME)
list_images = self.run_cloud(cmd)
self.assertIn(
'- ubuntu:latest',
[i.strip() for i in list_images]
)
def test_list_sizes(self):
'''
Tests the return of running the --list_sizes command for ProfitBricks
'''
list_sizes = self.run_cloud('--list-sizes {0}'.format(PROVIDER_NAME))
self.assertIn(
'Micro Instance:',
[i.strip() for i in list_sizes]
)
def test_list_datacenters(self):
'''
Tests the return of running the -f list_datacenters
command for ProfitBricks
'''
cmd = '-f list_datacenters {0}'.format(PROVIDER_NAME)
list_datacenters = self.run_cloud(cmd)
self.assertIn(
self.datacenter_id,
[i.strip() for i in list_datacenters]
)
def test_list_nodes(self):
'''
Tests the return of running the -f list_nodes command for ProfitBricks
'''
list_nodes = self.run_cloud('-f list_nodes {0}'.format(PROVIDER_NAME))
self.assertIn(
'state:',
[i.strip() for i in list_nodes]
)
self.assertIn(
'name:',
[i.strip() for i in list_nodes]
)
def test_list_nodes_full(self):
'''
Tests the return of running the -f list_nodes_full
command for ProfitBricks
'''
cmd = '-f list_nodes_full {0}'.format(PROVIDER_NAME)
list_nodes = self.run_cloud(cmd)
self.assertIn(
'state:',
[i.strip() for i in list_nodes]
)
self.assertIn(
'name:',
[i.strip() for i in list_nodes]
)
def test_list_location(self):
'''
Tests the return of running the --list-locations
command for ProfitBricks
'''
cmd = '--list-locations {0}'.format(PROVIDER_NAME)
list_locations = self.run_cloud(cmd)
self.assertIn(
'de/fkb',
[i.strip() for i in list_locations]
)
self.assertIn(
'de/fra',
[i.strip() for i in list_locations]
)
self.assertIn(
'us/las',
[i.strip() for i in list_locations]
)
self.assertIn(
'us/ewr',
[i.strip() for i in list_locations]
)
def test_instance(self):
@ -92,11 +188,15 @@ class ProfitBricksTest(ShellCase):
self.assertIn(
INSTANCE_NAME,
[i.strip() for i in self.run_cloud(
'-p profitbricks-test {0}'.format(INSTANCE_NAME), timeout=500
'-p profitbricks-test {0}'.format(INSTANCE_NAME),
timeout=500
)]
)
except AssertionError:
self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)
self.run_cloud(
'-d {0} --assume-yes'.format(INSTANCE_NAME),
timeout=500
)
raise
# delete the instance
@ -119,4 +219,7 @@ class ProfitBricksTest(ShellCase):
# if test instance is still present, delete it
if ret in query:
self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)
self.run_cloud(
'-d {0} --assume-yes'.format(INSTANCE_NAME),
timeout=500
)

View File

@ -1,6 +1,6 @@
profitbricks-test:
provider: profitbricks-config
image: Ubuntu-16.04-LTS-server-2016-10-06
image_alias: 'ubuntu:latest'
image_password: volume2016
size: Small Instance
disk_size: 10

View File

@ -21,6 +21,20 @@ from tests.support.mock import (
import salt.modules.openbsdpkg as openbsdpkg
class ListPackages(object):
def __init__(self):
self._iteration = 0
def __call__(self):
pkg_lists = [
{'vim': '7.4.1467p1-gtk2'},
{'png': '1.6.23', 'vim': '7.4.1467p1-gtk2', 'ruby': '2.3.1p1'}
]
pkgs = pkg_lists[self._iteration]
self._iteration += 1
return pkgs
@skipIf(NO_MOCK, NO_MOCK_REASON)
class OpenbsdpkgTestCase(TestCase, LoaderModuleMockMixin):
'''
@ -64,18 +78,6 @@ class OpenbsdpkgTestCase(TestCase, LoaderModuleMockMixin):
- a flavor is specified ('vim--gtk2')
- a branch is specified ('ruby%2.3')
'''
class ListPackages(object):
def __init__(self):
self._iteration = 0
def __call__(self):
pkg_lists = [
{'vim': '7.4.1467p1-gtk2'},
{'png': '1.6.23', 'vim': '7.4.1467p1-gtk2', 'ruby': '2.3.1p1'}
]
pkgs = pkg_lists[self._iteration]
self._iteration += 1
return pkgs
parsed_targets = (
{'vim--gtk2': None, 'png': None, 'ruby%2.3': None},
@ -109,3 +111,40 @@ class OpenbsdpkgTestCase(TestCase, LoaderModuleMockMixin):
]
run_all_mock.assert_has_calls(expected_calls, any_order=True)
self.assertEqual(run_all_mock.call_count, 3)
def test_upgrade_available(self):
'''
Test upgrade_available when an update is available.
'''
ret = MagicMock(return_value='5.4.2p0')
with patch('salt.modules.openbsdpkg.latest_version', ret):
self.assertTrue(openbsdpkg.upgrade_available('zsh'))
def test_upgrade_not_available(self):
'''
Test upgrade_available when an update is not available.
'''
ret = MagicMock(return_value='')
with patch('salt.modules.openbsdpkg.latest_version', ret):
self.assertFalse(openbsdpkg.upgrade_available('zsh'))
def test_upgrade(self):
'''
Test upgrading packages.
'''
ret = {}
pkg_add_u_stdout = [
'quirks-2.402 signed on 2018-01-02T16:30:59Z',
'Read shared items: ok'
]
ret['stdout'] = '\n'.join(pkg_add_u_stdout)
ret['retcode'] = 0
run_all_mock = MagicMock(return_value=ret)
with patch.dict(openbsdpkg.__salt__, {'cmd.run_all': run_all_mock}):
with patch('salt.modules.openbsdpkg.list_pkgs', ListPackages()):
upgraded = openbsdpkg.upgrade()
expected = {
'png': {'new': '1.6.23', 'old': ''},
'ruby': {'new': '2.3.1p1', 'old': ''}
}
self.assertDictEqual(upgraded, expected)

View File

@ -0,0 +1,237 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Libs
import salt.modules.vmctl as vmctl
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import (
MagicMock,
patch,
)
class VmctlTestCase(TestCase, LoaderModuleMockMixin):
'''
test modules.vmctl functions
'''
def setup_loader_modules(self):
return {vmctl: {}}
def test_create_disk(self):
'''
Tests creating a new disk image.
'''
ret = {}
ret['stdout'] = 'vmctl: imagefile created'
ret['stderr'] = ''
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(vmctl.__salt__, {'cmd.run_all': mock_cmd}):
self.assertTrue(vmctl.create_disk('/path/to/disk.img', '1G'))
def test_load(self):
'''
Tests loading a configuration file.
'''
ret = {}
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(vmctl.__salt__, {'cmd.run_all': mock_cmd}):
self.assertTrue(vmctl.load('/etc/vm.switches.conf'))
def test_reload(self):
'''
Tests reloading the configuration.
'''
ret = {}
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(vmctl.__salt__, {'cmd.run_all': mock_cmd}):
self.assertTrue(vmctl.reload())
def test_reset(self):
'''
Tests resetting VMM.
'''
ret = {}
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(vmctl.__salt__, {'cmd.run_all': mock_cmd}):
res = vmctl.reset()
mock_cmd.assert_called_once_with(['vmctl', 'reset'],
output_loglevel='trace', python_shell=False)
self.assertTrue(res)
def test_reset_vms(self):
'''
Tests resetting VMs.
'''
ret = {}
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(vmctl.__salt__, {'cmd.run_all': mock_cmd}):
res = vmctl.reset(vms=True)
mock_cmd.assert_called_once_with(['vmctl', 'reset', 'vms'],
output_loglevel='trace', python_shell=False)
self.assertTrue(res)
def test_reset_switches(self):
'''
Tests resetting switches.
'''
ret = {}
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(vmctl.__salt__, {'cmd.run_all': mock_cmd}):
res = vmctl.reset(switches=True)
mock_cmd.assert_called_once_with(['vmctl', 'reset', 'switches'],
output_loglevel='trace', python_shell=False)
self.assertTrue(res)
def test_reset_all(self):
'''
Tests resetting all.
'''
ret = {}
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(vmctl.__salt__, {'cmd.run_all': mock_cmd}):
res = vmctl.reset(all=True)
mock_cmd.assert_called_once_with(['vmctl', 'reset', 'all'],
output_loglevel='trace', python_shell=False)
self.assertTrue(res)
def test_start_existing_vm(self):
'''
Tests starting a VM that is already defined.
'''
ret = {}
ret['stderr'] = 'vmctl: started vm 4 successfully, tty /dev/ttyp4'
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
expected = {'changes': True, 'console': '/dev/ttyp4'}
with patch.dict(vmctl.__salt__, {'cmd.run_all': mock_cmd}):
self.assertDictEqual(expected, vmctl.start('4'))
def test_start_new_vm(self):
'''
Tests starting a new VM.
'''
ret = {}
ret['stderr'] = 'vmctl: started vm 4 successfully, tty /dev/ttyp4'
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
mock_status = MagicMock(return_value={})
expected = {'changes': True, 'console': '/dev/ttyp4'}
with patch.dict(vmctl.__salt__, {'cmd.run_all': mock_cmd}):
with patch('salt.modules.vmctl.status', mock_status):
res = vmctl.start('web1', bootpath='/bsd.rd', nics=2, disk='/disk.img')
mock_cmd.assert_called_once_with(['vmctl', 'start', 'web1', '-i 2', '-b', '/bsd.rd', '-d', '/disk.img'],
output_loglevel='trace', python_shell=False)
self.assertDictEqual(expected, res)
def test_status(self):
'''
Tests getting status for all VMs.
'''
ret = {}
ret['stdout'] = ' ID PID VCPUS MAXMEM CURMEM TTY OWNER NAME\n' \
' 1 123 1 2.9G 150M ttyp5 john web1 - stopping\n' \
' 2 456 1 512M 301M ttyp4 paul web2\n' \
' 3 - 1 512M - - george web3\n'
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
expected = {
'web1': {
'curmem': '150M',
'id': '1',
'maxmem': '2.9G',
'owner': 'john',
'pid': '123',
'state': 'stopping',
'tty': 'ttyp5',
'vcpus': '1'
},
'web2': {
'curmem': '301M',
'id': '2',
'maxmem': '512M',
'owner': 'paul',
'pid': '456',
'state': 'running',
'tty': 'ttyp4',
'vcpus': '1'
},
'web3': {
'curmem': '-',
'id': '3',
'maxmem': '512M',
'owner': 'george',
'pid': '-',
'state': 'stopped',
'tty': '-',
'vcpus': '1'
},
}
with patch.dict(vmctl.__salt__, {'cmd.run_all': mock_cmd}):
self.assertEqual(expected, vmctl.status())
def test_status_single(self):
'''
Tests getting status for a single VM.
'''
ret = {}
ret['stdout'] = ' ID PID VCPUS MAXMEM CURMEM TTY OWNER NAME\n' \
' 1 123 1 2.9G 150M ttyp5 ringo web4\n' \
' 2 - 1 512M - - george web3\n'
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
expected = {
'web4': {
'curmem': '150M',
'id': '1',
'maxmem': '2.9G',
'owner': 'ringo',
'pid': '123',
'state': 'running',
'tty': 'ttyp5',
'vcpus': '1'
},
}
with patch.dict(vmctl.__salt__, {'cmd.run_all': mock_cmd}):
self.assertEqual(expected, vmctl.status('web4'))
def test_stop_when_running(self):
'''
Tests stopping a VM that is running.
'''
ret = {}
ret['stdout'] = ''
ret['stderr'] = 'vmctl: sent request to terminate vm 14'
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(vmctl.__salt__, {'cmd.run_all': mock_cmd}):
res = vmctl.stop('web1')
mock_cmd.assert_called_once_with(['vmctl', 'stop', 'web1'],
output_loglevel='trace', python_shell=False)
self.assertTrue(res['changes'])
def test_stop_when_stopped(self):
'''
Tests stopping a VM that is already stopped/stopping.
'''
ret = {}
ret['stdout'] = ''
ret['stderr'] = 'vmctl: terminate vm command failed: Invalid argument'
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(vmctl.__salt__, {'cmd.run_all': mock_cmd}):
res = vmctl.stop('web1')
mock_cmd.assert_called_once_with(['vmctl', 'stop', 'web1'],
output_loglevel='trace', python_shell=False)
self.assertFalse(res['changes'])