Merge pull request #35578 from rallytime/merge-develop

[develop] Merge forward from 2016.3 to develop
This commit is contained in:
Nicole Thomas 2016-08-18 16:59:45 -06:00 committed by GitHub
commit e0e24aab60
43 changed files with 1248 additions and 374 deletions

View File

@ -14,6 +14,7 @@ import subprocess
from salt.ext.six import binary_type, string_types, text_type
from salt.ext.six.moves import cStringIO, StringIO
HAS_XML = True
try:
# Python >2.5
import xml.etree.cElementTree as ElementTree
@ -30,7 +31,8 @@ except Exception:
# normal ElementTree install
import elementtree.ElementTree as ElementTree
except Exception:
raise
ElementTree = None
HAS_XML = False
# True if we are running on Python 3.
@ -44,14 +46,15 @@ else:
import exceptions
if not hasattr(ElementTree, 'ParseError'):
class ParseError(Exception):
'''
older versions of ElementTree do not have ParseError
'''
pass
if HAS_XML:
if not hasattr(ElementTree, 'ParseError'):
class ParseError(Exception):
'''
older versions of ElementTree do not have ParseError
'''
pass
ElementTree.ParseError = ParseError
ElementTree.ParseError = ParseError
def text_(s, encoding='latin-1', errors='strict'):

View File

@ -298,7 +298,7 @@ class SSH(object):
}
if self.opts.get('rand_thin_dir'):
self.defaults['thin_dir'] = os.path.join(
'/tmp',
'/var/tmp',
'.{0}'.format(uuid.uuid4().hex[:6]))
self.opts['ssh_wipe'] = 'True'
self.serial = salt.payload.Serial(opts)

View File

@ -339,7 +339,7 @@ def list_nodes_full(call=None):
provider = comps[0]
__opts__['update_cachedir'] = True
salt.utils.cloud.cache_node_list(ret, provider, __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
@ -594,7 +594,7 @@ def create(vm_):
if 'provider' in vm_:
vm_['driver'] = vm_.pop('provider')
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
@ -616,7 +616,7 @@ def create(vm_):
'securitygroup_id': get_securitygroup(vm_),
}
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
@ -671,7 +671,7 @@ def create(vm_):
vm_['ssh_host'] = public_ip
# The instance is booted and accessible, let's Salt it!
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
@ -681,7 +681,7 @@ def create(vm_):
)
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
@ -966,7 +966,7 @@ def destroy(name, call=None):
'-a or --action.'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
@ -982,7 +982,7 @@ def destroy(name, call=None):
node = query(params)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),

View File

@ -257,7 +257,7 @@ def create(vm_):
if 'provider' in vm_:
vm_['driver'] = vm_.pop('provider')
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
@ -292,7 +292,7 @@ def create(vm_):
if get_project(conn, vm_) is not False:
kwargs['project'] = get_project(conn, vm_)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
@ -314,7 +314,7 @@ def create(vm_):
for ex_blockdevicemapping in ex_blockdevicemappings:
if 'VirtualName' not in ex_blockdevicemapping:
ex_blockdevicemapping['VirtualName'] = '{0}-{1}'.format(vm_['name'], len(volumes))
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting volume',
'salt/cloud/{0}/requesting'.format(ex_blockdevicemapping['VirtualName']),
@ -376,7 +376,7 @@ def create(vm_):
vm_['ssh_host'] = get_ip(data)
vm_['password'] = data.extra['password']
vm_['key_filename'] = get_key()
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
@ -390,7 +390,7 @@ def create(vm_):
)
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
@ -415,7 +415,7 @@ def destroy(name, conn=None, call=None):
'-a or --action.'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
@ -439,7 +439,7 @@ def destroy(name, conn=None, call=None):
)
continue
log.info('Detaching volume: {0}'.format(volume.name))
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'detaching volume',
'salt/cloud/{0}/detaching'.format(volume.name),
@ -449,7 +449,7 @@ def destroy(name, conn=None, call=None):
log.error('Failed to Detach volume: {0}'.format(volume.name))
return False
log.info('Detached volume: {0}'.format(volume.name))
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'detached volume',
'salt/cloud/{0}/detached'.format(volume.name),
@ -457,7 +457,7 @@ def destroy(name, conn=None, call=None):
)
log.info('Destroying volume: {0}'.format(volume.name))
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying volume',
'salt/cloud/{0}/destroying'.format(volume.name),
@ -467,7 +467,7 @@ def destroy(name, conn=None, call=None):
log.error('Failed to Destroy volume: {0}'.format(volume.name))
return False
log.info('Destroyed volume: {0}'.format(volume.name))
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed volume',
'salt/cloud/{0}/destroyed'.format(volume.name),
@ -481,7 +481,7 @@ def destroy(name, conn=None, call=None):
log.info('Destroyed VM: {0}'.format(name))
# Fire destroy action
event = salt.utils.event.SaltEvent('master', __opts__['sock_dir'])
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),

View File

@ -286,7 +286,7 @@ def create(vm_):
if 'provider' in vm_:
vm_['driver'] = vm_.pop('provider')
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
@ -428,7 +428,7 @@ def create(vm_):
'and "hostname" or the minion name must be an FQDN.'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
@ -507,7 +507,8 @@ def create(vm_):
log.debug('Found public IP address to use for ssh minion bootstrapping: {0}'.format(vm_['ssh_host']))
vm_['key_filename'] = key_filename
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
vm_['ssh_host'] = ip_address
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
@ -517,7 +518,7 @@ def create(vm_):
)
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
@ -615,7 +616,7 @@ def show_instance(name, call=None):
'The show_instance action must be called with -a or --action.'
)
node = _get_node(name)
salt.utils.cloud.cache_node(node, __active_provider_name__, __opts__)
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__)
return node
@ -794,7 +795,7 @@ def destroy(name, call=None):
'-a or --action.'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
@ -833,7 +834,7 @@ def destroy(name, call=None):
# for line in pprint.pformat(dir()).splitlines():
# log.debug('delete context: {0}'.format(line))
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
@ -843,7 +844,7 @@ def destroy(name, call=None):
)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return node

View File

@ -93,7 +93,6 @@ import decimal
# Import Salt Libs
import salt.utils
from salt import syspaths
from salt._compat import ElementTree as ET
import salt.utils.http as http
import salt.utils.aws as aws
@ -1904,7 +1903,7 @@ def request_instance(vm_=None, call=None):
'\'del_all_vols_on_destroy\' should be a boolean value.'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
@ -1984,7 +1983,7 @@ def request_instance(vm_=None, call=None):
'Nothing else we can do here.')
return False
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'waiting for spot instance',
'salt/cloud/{0}/waiting_for_spot'.format(vm_['name']),
@ -2048,7 +2047,7 @@ def query_instance(vm_=None, call=None):
instance_id = vm_['instance_id']
location = vm_.get('location', get_location(vm_))
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'querying instance',
'salt/cloud/{0}/querying'.format(vm_['name']),
@ -2155,7 +2154,7 @@ def query_instance(vm_=None, call=None):
raise SaltCloudSystemExit(str(exc))
if 'reactor' in vm_ and vm_['reactor'] is True:
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'instance queried',
'salt/cloud/{0}/query_reactor'.format(vm_['name']),
@ -2194,7 +2193,7 @@ def wait_for_instance(
'gateway', get_ssh_gateway_config(vm_)
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(vm_['name']),
@ -2374,7 +2373,7 @@ def wait_for_instance(
)
if 'reactor' in vm_ and vm_['reactor'] is True:
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'ssh is available',
'salt/cloud/{0}/ssh_ready_reactor'.format(vm_['name']),
@ -2447,7 +2446,7 @@ def create(vm_=None, call=None):
)
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
@ -2459,7 +2458,7 @@ def create(vm_=None, call=None):
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
salt.utils.cloud.cachedir_index_add(
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'ec2', vm_['driver']
)
@ -2549,7 +2548,7 @@ def create(vm_=None, call=None):
tags['Name'] = vm_['name']
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'setting tags',
'salt/cloud/{0}/tagging'.format(vm_['name']),
@ -2619,7 +2618,7 @@ def create(vm_=None, call=None):
'volumes', vm_, __opts__, search_global=True
)
if volumes:
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'attaching volumes',
'salt/cloud/{0}/attaching_volumes'.format(vm_['name']),
@ -2659,7 +2658,7 @@ def create(vm_=None, call=None):
))
return {}
for key, value in six.iteritems(salt.utils.cloud.bootstrap(vm_, __opts__)):
for key, value in six.iteritems(__utils__['cloud.bootstrap'](vm_, __opts__)):
ret.setdefault(key, value)
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
@ -2680,7 +2679,7 @@ def create(vm_=None, call=None):
if ssm_document:
event_data['ssm_document'] = ssm_document
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
@ -2712,7 +2711,7 @@ def queue_instances(instances):
node = _get_node(instance_id=instance_id)
for name in node:
if instance_id == node[name]['instanceId']:
salt.utils.cloud.cache_node(node[name],
__utils__['cloud.cache_node'](node[name],
__active_provider_name__,
__opts__)
@ -3103,7 +3102,7 @@ def destroy(name, call=None):
quiet=True
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
@ -3166,7 +3165,7 @@ def destroy(name, call=None):
sigver='4')
ret['spotInstance'] = result[0]
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
@ -3175,10 +3174,10 @@ def destroy(name, call=None):
transport=__opts__['transport']
)
salt.utils.cloud.cachedir_index_del(name)
__utils__['cloud.cachedir_index_del'](name)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return ret
@ -3266,7 +3265,7 @@ def show_instance(name=None, instance_id=None, call=None, kwargs=None):
node = _get_node(name=name, instance_id=instance_id)
for name in node:
salt.utils.cloud.cache_node(node[name],
__utils__['cloud.cache_node'](node[name],
__active_provider_name__,
__opts__)
return node
@ -3434,7 +3433,7 @@ def _list_nodes_full(location=None):
ret = _extract_instance_info(instances)
salt.utils.cloud.cache_node_list(ret, provider, __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
@ -4660,7 +4659,7 @@ def _parse_pricing(url, name):
regions[region['region']] = sizes
outfile = os.path.join(
syspaths.CACHE_DIR, 'cloud', 'ec2-pricing-{0}.p'.format(name)
__opts__['cachedir'], 'cloud', 'ec2-pricing-{0}.p'.format(name)
)
with salt.utils.fopen(outfile, 'w') as fho:
msgpack.dump(regions, fho)
@ -4724,7 +4723,7 @@ def show_pricing(kwargs=None, call=None):
name = 'linux'
pricefile = os.path.join(
syspaths.CACHE_DIR, 'cloud', 'ec2-pricing-{0}.p'.format(name)
__opts__['cachedir'], 'cloud', 'ec2-pricing-{0}.p'.format(name)
)
if not os.path.isfile(pricefile):

View File

@ -79,7 +79,6 @@ import salt.ext.six as six
import salt.utils.cloud
import salt.config as config
from salt.utils import http
from salt import syspaths
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.exceptions import (
SaltCloudSystemExit,
@ -282,7 +281,7 @@ def show_instance(vm_name, call=None):
)
conn = get_conn()
node = _expand_node(conn.ex_get_node(vm_name))
salt.utils.cloud.cache_node(node, __active_provider_name__, __opts__)
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__)
return node
@ -555,7 +554,7 @@ def create_network(kwargs=None, call=None):
cidr = kwargs['cidr']
conn = get_conn()
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'create network',
'salt/cloud/net/creating',
@ -569,7 +568,7 @@ def create_network(kwargs=None, call=None):
network = conn.ex_create_network(name, cidr)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created network',
'salt/cloud/net/created',
@ -607,7 +606,7 @@ def delete_network(kwargs=None, call=None):
name = kwargs['name']
conn = get_conn()
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'delete network',
'salt/cloud/net/deleting',
@ -631,7 +630,7 @@ def delete_network(kwargs=None, call=None):
)
return False
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'deleted network',
'salt/cloud/net/deleted',
@ -709,7 +708,7 @@ def create_fwrule(kwargs=None, call=None):
dst_tags = dst_tags.split(',')
conn = get_conn()
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'create firewall',
'salt/cloud/firewall/creating',
@ -730,7 +729,7 @@ def create_fwrule(kwargs=None, call=None):
target_tags=dst_tags
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created firewall',
'salt/cloud/firewall/created',
@ -769,7 +768,7 @@ def delete_fwrule(kwargs=None, call=None):
name = kwargs['name']
conn = get_conn()
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'delete firewall',
'salt/cloud/firewall/deleting',
@ -793,7 +792,7 @@ def delete_fwrule(kwargs=None, call=None):
)
return False
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'deleted firewall',
'salt/cloud/firewall/deleted',
@ -862,7 +861,7 @@ def create_hc(kwargs=None, call=None):
conn = get_conn()
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'create health_check',
'salt/cloud/healthcheck/creating',
@ -886,7 +885,7 @@ def create_hc(kwargs=None, call=None):
healthy_threshold=healthy_threshold
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created health_check',
'salt/cloud/healthcheck/created',
@ -930,7 +929,7 @@ def delete_hc(kwargs=None, call=None):
name = kwargs['name']
conn = get_conn()
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'delete health_check',
'salt/cloud/healthcheck/deleting',
@ -954,7 +953,7 @@ def delete_hc(kwargs=None, call=None):
)
return False
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'deleted health_check',
'salt/cloud/healthcheck/deleted',
@ -1023,7 +1022,7 @@ def create_address(kwargs=None, call=None):
conn = get_conn()
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'create address',
'salt/cloud/address/creating',
@ -1034,7 +1033,7 @@ def create_address(kwargs=None, call=None):
addy = conn.ex_create_address(name, ex_region, ex_address)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created address',
'salt/cloud/address/created',
@ -1080,7 +1079,7 @@ def delete_address(kwargs=None, call=None):
conn = get_conn()
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'delete address',
'salt/cloud/address/deleting',
@ -1104,7 +1103,7 @@ def delete_address(kwargs=None, call=None):
)
return False
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'deleted address',
'salt/cloud/address/deleted',
@ -1207,7 +1206,7 @@ def create_lb(kwargs=None, call=None):
if ex_healthchecks:
ex_healthchecks = ex_healthchecks.split(',')
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'create load_balancer',
'salt/cloud/loadbalancer/creating',
@ -1222,7 +1221,7 @@ def create_lb(kwargs=None, call=None):
ex_address=ex_address
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created load_balancer',
'salt/cloud/loadbalancer/created',
@ -1257,7 +1256,7 @@ def delete_lb(kwargs=None, call=None):
name = kwargs['name']
lb_conn = get_lb_conn(get_conn())
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'delete load_balancer',
'salt/cloud/loadbalancer/deleting',
@ -1281,7 +1280,7 @@ def delete_lb(kwargs=None, call=None):
)
return False
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'deleted load_balancer',
'salt/cloud/loadbalancer/deleted',
@ -1350,7 +1349,7 @@ def attach_lb(kwargs=None, call=None):
lb_conn = get_lb_conn(conn)
lb = lb_conn.get_balancer(kwargs['name'])
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'attach load_balancer',
'salt/cloud/loadbalancer/attaching',
@ -1361,7 +1360,7 @@ def attach_lb(kwargs=None, call=None):
result = lb_conn.balancer_attach_compute_node(lb, node)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'attached load_balancer',
'salt/cloud/loadbalancer/attached',
@ -1417,7 +1416,7 @@ def detach_lb(kwargs=None, call=None):
)
return False
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'detach load_balancer',
'salt/cloud/loadbalancer/detaching',
@ -1428,7 +1427,7 @@ def detach_lb(kwargs=None, call=None):
result = lb_conn.balancer_detach_member(lb, remove_member)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'detached load_balancer',
'salt/cloud/loadbalancer/detached',
@ -1463,7 +1462,7 @@ def delete_snapshot(kwargs=None, call=None):
name = kwargs['name']
conn = get_conn()
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'delete snapshot',
'salt/cloud/snapshot/deleting',
@ -1487,7 +1486,7 @@ def delete_snapshot(kwargs=None, call=None):
)
return False
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'deleted snapshot',
'salt/cloud/snapshot/deleted',
@ -1525,7 +1524,7 @@ def delete_disk(kwargs=None, call=None):
disk = conn.ex_get_volume(kwargs.get('disk_name'))
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'delete disk',
'salt/cloud/disk/deleting',
@ -1549,7 +1548,7 @@ def delete_disk(kwargs=None, call=None):
)
return False
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'deleted disk',
'salt/cloud/disk/deleted',
@ -1614,7 +1613,7 @@ def create_disk(kwargs=None, call=None):
location = conn.ex_get_zone(kwargs['location'])
use_existing = True
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'create disk',
'salt/cloud/disk/creating',
@ -1632,7 +1631,7 @@ def create_disk(kwargs=None, call=None):
size, name, location, snapshot, image, use_existing
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created disk',
'salt/cloud/disk/created',
@ -1691,7 +1690,7 @@ def create_snapshot(kwargs=None, call=None):
)
return False
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'create snapshot',
'salt/cloud/snapshot/creating',
@ -1705,7 +1704,7 @@ def create_snapshot(kwargs=None, call=None):
snapshot = conn.create_volume_snapshot(disk, name)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created snapshot',
'salt/cloud/snapshot/created',
@ -1797,7 +1796,7 @@ def detach_disk(name=None, kwargs=None, call=None):
node = conn.ex_get_node(node_name)
disk = conn.ex_get_volume(disk_name)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'detach disk',
'salt/cloud/disk/detaching',
@ -1811,7 +1810,7 @@ def detach_disk(name=None, kwargs=None, call=None):
result = conn.detach_volume(disk, node)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'detached disk',
'salt/cloud/disk/detached',
@ -1870,7 +1869,7 @@ def attach_disk(name=None, kwargs=None, call=None):
node = conn.ex_get_node(node_name)
disk = conn.ex_get_volume(disk_name)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'attach disk',
'salt/cloud/disk/attaching',
@ -1886,7 +1885,7 @@ def attach_disk(name=None, kwargs=None, call=None):
result = conn.attach_volume(node, disk, ex_mode=mode, ex_boot=boot)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'attached disk',
'salt/cloud/disk/attached',
@ -1955,7 +1954,7 @@ def destroy(vm_name, call=None):
'Could not find instance {0}.'.format(vm_name)
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'delete instance',
'salt/cloud/{0}/deleting'.format(vm_name),
@ -1993,7 +1992,7 @@ def destroy(vm_name, call=None):
raise SaltCloudSystemExit(
'Could not destroy instance {0}.'.format(vm_name)
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'delete instance',
'salt/cloud/{0}/deleted'.format(vm_name),
@ -2007,7 +2006,7 @@ def destroy(vm_name, call=None):
'delete_boot_pd is enabled for the instance profile, '
'attempting to delete disk'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'delete disk',
'salt/cloud/disk/deleting',
@ -2029,7 +2028,7 @@ def destroy(vm_name, call=None):
),
exc_info_on_loglevel=logging.DEBUG
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'deleted disk',
'salt/cloud/disk/deleted',
@ -2039,7 +2038,7 @@ def destroy(vm_name, call=None):
)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(vm_name, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.delete_minion_cachedir'](vm_name, __active_provider_name__.split(':')[0], __opts__)
return inst_deleted
@ -2120,7 +2119,7 @@ def request_instance(vm_):
)
log.debug('Create instance kwargs {0}'.format(str(kwargs)))
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'create instance',
'salt/cloud/{0}/creating'.format(vm_['name']),
@ -2175,7 +2174,7 @@ def create(vm_=None, call=None):
ssh_user, ssh_key = __get_ssh_credentials(vm_)
vm_['ssh_host'] = __get_host(node_data, vm_)
vm_['key_filename'] = ssh_key
salt.utils.cloud.bootstrap(vm_, __opts__)
__utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.trace(
@ -2184,7 +2183,7 @@ def create(vm_=None, call=None):
)
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
@ -2216,7 +2215,7 @@ def update_pricing(kwargs=None, call=None):
price_json = http.query(url, decode=True, decode_type='json')
outfile = os.path.join(
syspaths.CACHE_DIR, 'cloud', 'gce-pricing.p'
__opts__['cachedir'], 'cloud', 'gce-pricing.p'
)
with salt.utils.fopen(outfile, 'w') as fho:
msgpack.dump(price_json['dict'], fho)
@ -2252,7 +2251,7 @@ def show_pricing(kwargs=None, call=None):
size = 'CP-COMPUTEENGINE-VMIMAGE-{0}'.format(profile['size'].upper())
pricefile = os.path.join(
syspaths.CACHE_DIR, 'cloud', 'gce-pricing.p'
__opts__['cachedir'], 'cloud', 'gce-pricing.p'
)
if not os.path.exists(pricefile):
update_pricing()

View File

@ -97,7 +97,7 @@ def create(vm_):
if 'provider' in vm_:
vm_['driver'] = vm_.pop('provider')
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
@ -130,7 +130,7 @@ def create(vm_):
'ip': host_ip,
}
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
@ -176,7 +176,7 @@ def create(vm_):
)
vm_['ssh_host'] = host_ip
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
@ -186,7 +186,7 @@ def create(vm_):
)
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
@ -410,7 +410,7 @@ def destroy(name, call=None):
'-a or --action.'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
@ -421,7 +421,7 @@ def destroy(name, call=None):
response = _query('grid', 'server/delete', args={'name': name})
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
@ -431,7 +431,7 @@ def destroy(name, call=None):
)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response

View File

@ -178,7 +178,7 @@ def query_instance(vm_=None, call=None):
'The query_instance action must be called with -a or --action.'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'querying instance',
'salt/cloud/{0}/querying'.format(vm_['name']),
@ -260,7 +260,7 @@ def create(vm_):
'private_key', vm_, __opts__, search_global=False, default=None
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
@ -293,7 +293,7 @@ def create(vm_):
if 'networks' in vm_:
kwargs['networks'] = vm_.get('networks')
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
@ -322,9 +322,9 @@ def create(vm_):
vm_['key_filename'] = key_filename
vm_['ssh_host'] = data[1]['primaryIp']
salt.utils.cloud.bootstrap(vm_, __opts__)
__utils__['cloud.bootstrap'](vm_, __opts__)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
@ -394,7 +394,7 @@ def destroy(name, call=None):
'-a or --action.'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
@ -407,7 +407,7 @@ def destroy(name, call=None):
ret = query(command='my/machines/{0}'.format(node['id']),
location=node['location'], method='DELETE')
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
@ -417,7 +417,7 @@ def destroy(name, call=None):
)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return ret[0] in VALID_RESPONSE_CODES

View File

@ -346,7 +346,7 @@ def create(vm_):
if _validate_name(name) is False:
return False
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
@ -438,7 +438,7 @@ def create(vm_):
)
return False
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
@ -515,7 +515,7 @@ def create(vm_):
vm_['password'] = get_password(vm_)
# Bootstrap!
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
@ -526,7 +526,7 @@ def create(vm_):
)
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
@ -738,7 +738,7 @@ def destroy(name, call=None):
'-a or --action.'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
@ -751,7 +751,7 @@ def destroy(name, call=None):
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
@ -761,7 +761,7 @@ def destroy(name, call=None):
)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response

View File

@ -27,7 +27,6 @@ from salt.exceptions import SaltCloudSystemExit
import salt.client
import salt.runner
import salt.syspaths
# Import 3rd-party libs
@ -69,7 +68,7 @@ def _minion_opts(cfg='minion'):
if 'conf_file' in __opts__:
default_dir = os.path.dirname(__opts__['conf_file'])
else:
default_dir = salt.syspaths.CONFIG_DIR,
default_dir = __opts__['config_dir'],
cfg = os.environ.get(
'SALT_MINION_CONFIG', os.path.join(default_dir, cfg))
opts = config.minion_config(cfg)
@ -80,7 +79,7 @@ def _master_opts(cfg='master'):
cfg = os.environ.get(
'SALT_MASTER_CONFIG',
__opts__.get('conf_file',
os.path.join(salt.syspaths.CONFIG_DIR, cfg)))
os.path.join(__opts__['config_dir'], cfg)))
opts = config.master_config(cfg)
return opts
@ -332,7 +331,7 @@ def show_instance(name, call=None):
if not call:
call = 'action'
nodes = list_nodes_full(call=call)
salt.utils.cloud.cache_node(nodes[name], __active_provider_name__, __opts__)
__utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__)
return nodes[name]
@ -393,7 +392,7 @@ def destroy(vm_, call=None):
ret = {'comment': '{0} was not found'.format(vm_),
'result': False}
if _salt('lxc.info', vm_, path=path):
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(vm_),
@ -405,7 +404,7 @@ def destroy(vm_, call=None):
ret['result'] = cret['result']
if ret['result']:
ret['comment'] = '{0} was destroyed'.format(vm_)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(vm_),
@ -414,7 +413,7 @@ def destroy(vm_, call=None):
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(vm_, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.delete_minion_cachedir'](vm_, __active_provider_name__.split(':')[0], __opts__)
return ret
@ -440,7 +439,7 @@ def create(vm_, call=None):
if 'provider' in vm_:
vm_['driver'] = vm_.pop('provider')
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
@ -480,7 +479,7 @@ def create(vm_, call=None):
__opts__['internal_lxc_profile'] = __opts__['profile']
del __opts__['profile']
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),

View File

@ -399,7 +399,7 @@ def show_instance(name, call=None):
# Find under which cloud service the name is listed, if any
if name not in nodes:
return {}
salt.utils.cloud.cache_node(nodes[name], __active_provider_name__, __opts__)
__utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__)
return nodes[name]
@ -422,7 +422,7 @@ def create(vm_):
if 'provider' in vm_:
vm_['driver'] = vm_.pop('provider')
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
@ -538,7 +538,7 @@ def create(vm_):
del event_kwargs['vm_kwargs']['system_config']
del event_kwargs['vm_kwargs']['os_virtual_hard_disk']
del event_kwargs['vm_kwargs']['network_config']
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
@ -647,14 +647,14 @@ def create(vm_):
vm_['password'] = config.get_cloud_config_value(
'ssh_password', vm_, __opts__
)
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
# Attaching volumes
volumes = config.get_cloud_config_value(
'volumes', vm_, __opts__, search_global=True
)
if volumes:
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'attaching volumes',
'salt/cloud/{0}/attaching_volumes'.format(vm_['name']),
@ -688,7 +688,7 @@ def create(vm_):
ret.update(data)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
@ -957,7 +957,7 @@ def destroy(name, conn=None, call=None, kwargs=None):
delete_type: {'request_id': result.request_id},
}
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
cleanup_disks = config.get_cloud_config_value(
'cleanup_disks',

View File

@ -360,7 +360,7 @@ def show_instance(name, call=None):
conn = get_conn()
node = conn.show_instance(name).__dict__
salt.utils.cloud.cache_node(node, __active_provider_name__, __opts__)
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__)
return node
@ -487,7 +487,7 @@ def destroy(name, conn=None, call=None):
'-a or --action.'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
@ -525,7 +525,7 @@ def destroy(name, conn=None, call=None):
if ret:
log.info('Destroyed VM: {0}'.format(name))
# Fire destroy action
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
@ -536,7 +536,7 @@ def destroy(name, conn=None, call=None):
if __opts__.get('delete_sshkeys', False) is True:
salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0])
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
salt.utils.cloud.cachedir_index_del(name)
return True
@ -640,7 +640,7 @@ def request_instance(vm_=None, call=None):
kwargs.update(get_block_mapping_opts(vm_))
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
@ -731,7 +731,7 @@ def create(vm_):
if 'provider' in vm_:
vm_['driver'] = vm_.pop('provider')
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
@ -968,7 +968,7 @@ def create(vm_):
vm_['ssh_host'] = ip_address
vm_['salt_host'] = salt_ip_address
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
@ -993,7 +993,7 @@ def create(vm_):
'public_ips': data.public_ips
}
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
@ -1103,7 +1103,7 @@ def list_nodes_full(call=None, **kwargs):
except IndexError as exc:
ret = {}
salt.utils.cloud.cache_node_list(ret, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__)
return ret

View File

@ -899,7 +899,7 @@ def create(vm_):
if 'provider' in vm_:
vm_['driver'] = vm_.pop('provider')
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
@ -926,7 +926,7 @@ def create(vm_):
)
kwargs['private_networking'] = 'true' if private_networking else 'false'
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
@ -1054,7 +1054,7 @@ def create(vm_):
)
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
@ -1092,7 +1092,7 @@ def destroy(name, call=None):
'-a or --action.'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
@ -1106,7 +1106,7 @@ def destroy(name, call=None):
data = show_instance(name, call='action')
node = server.one.vm.action(auth, 'delete', int(data['id']))
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),

View File

@ -539,7 +539,7 @@ def request_instance(vm_=None, call=None):
if config_drive is not None:
kwargs['ex_config_drive'] = config_drive
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
@ -614,7 +614,7 @@ def create(vm_):
vm_['key_filename'] = key_filename
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
@ -804,7 +804,7 @@ def create(vm_):
vm_['salt_host'] = salt_ip_address
vm_['ssh_host'] = ip_address
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if hasattr(data, 'extra') and 'password' in data.extra:
@ -817,7 +817,7 @@ def create(vm_):
)
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),

View File

@ -253,7 +253,7 @@ def create_node(vm_):
data = ET.tostring(content, encoding='UTF-8')
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
@ -285,7 +285,7 @@ def create(vm_):
if 'provider' in vm_:
vm_['driver'] = vm_.pop('provider')
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
@ -351,7 +351,7 @@ def create(vm_):
public_ip = comps[0]
vm_['ssh_host'] = public_ip
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.debug(
@ -360,7 +360,7 @@ def create(vm_):
)
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
@ -510,7 +510,7 @@ def show_instance(name, call=None):
for child in children:
ret[item.tag][child.tag] = child.attrib
salt.utils.cloud.cache_node(ret, __active_provider_name__, __opts__)
__utils__['cloud.cache_node'](ret, __active_provider_name__, __opts__)
return ret
@ -545,7 +545,7 @@ def destroy(name, call=None):
'-a or --action.'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
@ -569,7 +569,7 @@ def destroy(name, call=None):
if 'error' in data:
return data['error']
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
@ -579,7 +579,7 @@ def destroy(name, call=None):
)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return {'Destroyed': '{0} was destroyed.'.format(name)}

View File

@ -516,7 +516,7 @@ def create(vm_):
ret = {}
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
@ -605,7 +605,7 @@ def create(vm_):
vm_['ssh_host'] = ip_address
vm_['password'] = ssh_password
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
# Report success!
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
@ -615,7 +615,7 @@ def create(vm_):
)
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
@ -705,7 +705,7 @@ def create_node(vm_, newid):
newnode[prop] = vm_[prop]
# The node is ready. Lets request it to be added
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
@ -739,7 +739,7 @@ def show_instance(name, call=None):
)
nodes = list_nodes_full()
salt.utils.cloud.cache_node(nodes[name], __active_provider_name__, __opts__)
__utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__)
return nodes[name]
@ -824,7 +824,7 @@ def destroy(name, call=None):
'-a or --action.'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
@ -850,7 +850,7 @@ def destroy(name, call=None):
query('delete', 'nodes/{0}/{1}'.format(
vmobj['node'], vmobj['id']
))
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
@ -859,7 +859,7 @@ def destroy(name, call=None):
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return {'Destroyed': '{0} was destroyed.'.format(name)}

View File

@ -517,7 +517,7 @@ def list_nodes_full(call=None):
provider = comps[0]
__opts__['update_cachedir'] = True
salt.utils.cloud.cache_node_list(result, provider, __opts__)
__utils__['cloud.cache_node_list'](result, provider, __opts__)
return result
@ -671,7 +671,7 @@ def create(vm_):
if 'provider' in vm_:
vm_['driver'] = vm_.pop('provider')
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
@ -698,7 +698,7 @@ def create(vm_):
'login_keypair': vm_['login_keypair'],
}
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
@ -737,7 +737,7 @@ def create(vm_):
vm_['ssh_host'] = private_ip
# The instance is booted and accessible, let's Salt it!
salt.utils.cloud.bootstrap(vm_, __opts__)
__utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
@ -747,7 +747,7 @@ def create(vm_):
)
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
@ -882,7 +882,7 @@ def destroy(instance_id, call=None):
instance_data = show_instance(instance_id, call='action')
name = instance_data['instance_name']
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
@ -898,7 +898,7 @@ def destroy(instance_id, call=None):
}
result = query(params)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),

View File

@ -208,7 +208,7 @@ def create(vm_):
if 'provider' in vm_:
vm_['driver'] = vm_.pop('provider')
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
@ -229,7 +229,7 @@ def create(vm_):
'size': get_size(conn, vm_)
}
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
@ -357,7 +357,7 @@ def create(vm_):
vm_['ssh_host'] = ip_address
vm_['password'] = data.extra['password']
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
@ -371,7 +371,7 @@ def create(vm_):
)
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),

View File

@ -17,10 +17,6 @@ from __future__ import absolute_import
import logging
# Import salt libs
import salt.utils
# Import salt cloud libs
import salt.utils.cloud
import salt.config as config
# Get logging started
@ -64,7 +60,7 @@ def create(vm_):
'''
log.info('Provisioning existing machine {0}'.format(vm_['name']))
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
return ret

View File

@ -220,7 +220,7 @@ def create(server_):
if 'provider' in server_:
server_['driver'] = server_.pop('provider')
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(server_['name']),
@ -250,7 +250,7 @@ def create(server_):
'commercial_type': commercial_type,
}
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(server_['name']),
@ -304,7 +304,7 @@ def create(server_):
server_['ssh_password'] = config.get_cloud_config_value(
'ssh_password', server_, __opts__
)
ret = salt.utils.cloud.bootstrap(server_, __opts__)
ret = __utils__['cloud.bootstrap'](server_, __opts__)
ret.update(data)
@ -315,7 +315,7 @@ def create(server_):
)
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(server_['name']),
@ -404,7 +404,7 @@ def show_instance(name, call=None):
'The show_instance action must be called with -a or --action.'
)
node = _get_node(name)
salt.utils.cloud.cache_node(node, __active_provider_name__, __opts__)
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__)
return node
@ -438,7 +438,7 @@ def destroy(name, call=None):
'-a or --action.'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
@ -453,7 +453,7 @@ def destroy(name, call=None):
args={'action': 'terminate'}, http_method='post'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
@ -463,7 +463,7 @@ def destroy(name, call=None):
)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(
__utils__['cloud.delete_minion_cachedir'](
name, __active_provider_name__.split(':')[0], __opts__
)

View File

@ -271,7 +271,7 @@ def create(vm_):
name = '.'.join([name, domain])
vm_['name'] = name
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
@ -377,7 +377,7 @@ def create(vm_):
if post_uri:
kwargs['postInstallScriptUri'] = post_uri
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
@ -486,11 +486,11 @@ def create(vm_):
vm_['ssh_host'] = ip_address
vm_['password'] = passwd
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(response)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
@ -521,7 +521,7 @@ def list_nodes_full(mask='mask[id]', call=None):
for node_id in response:
hostname = node_id['hostname'].split('.')[0]
ret[hostname] = node_id
salt.utils.cloud.cache_node_list(ret, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__)
return ret
@ -576,7 +576,7 @@ def show_instance(name, call=None):
)
nodes = list_nodes_full()
salt.utils.cloud.cache_node(nodes[name], __active_provider_name__, __opts__)
__utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__)
return nodes[name]
@ -596,7 +596,7 @@ def destroy(name, call=None):
'-a or --action.'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
@ -612,7 +612,7 @@ def destroy(name, call=None):
conn = get_conn()
response = conn.deleteObject(id=node['id'])
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
@ -621,7 +621,7 @@ def destroy(name, call=None):
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response

View File

@ -242,7 +242,7 @@ def create(vm_):
name = '.'.join([name, domain])
vm_['name'] = name
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
@ -330,7 +330,7 @@ def create(vm_):
if location:
kwargs['location'] = location
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
@ -418,11 +418,11 @@ def create(vm_):
vm_['ssh_host'] = ip_address
vm_['password'] = passwd
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(response)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
@ -455,7 +455,7 @@ def list_nodes_full(mask='mask[id, hostname, primaryIpAddress, \
for node in response:
ret[node['hostname']] = node
salt.utils.cloud.cache_node_list(ret, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__)
return ret
@ -508,7 +508,7 @@ def show_instance(name, call=None):
)
nodes = list_nodes_full()
salt.utils.cloud.cache_node(nodes[name], __active_provider_name__, __opts__)
__utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__)
return nodes[name]
@ -528,7 +528,7 @@ def destroy(name, call=None):
'-a or --action.'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
@ -552,7 +552,7 @@ def destroy(name, call=None):
}
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
@ -561,7 +561,7 @@ def destroy(name, call=None):
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response

View File

@ -2092,7 +2092,7 @@ def destroy(name, call=None):
'-a or --action.'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
@ -2141,7 +2141,7 @@ def destroy(name, call=None):
)
return 'failed to destroy'
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
@ -2150,7 +2150,7 @@ def destroy(name, call=None):
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return True
@ -2183,7 +2183,7 @@ def create(vm_):
if 'provider' in vm_:
vm_['driver'] = vm_.pop('provider')
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
@ -2485,7 +2485,7 @@ def create(vm_):
)
try:
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
@ -2555,14 +2555,14 @@ def create(vm_):
vm_['key_filename'] = key_filename
vm_['ssh_host'] = ip
out = salt.utils.cloud.bootstrap(vm_, __opts__)
out = __utils__['cloud.bootstrap'](vm_, __opts__)
data = show_instance(vm_name, call='action')
if deploy and out is not None:
data['deploy_kwargs'] = out['deploy_kwargs']
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),

View File

@ -122,7 +122,7 @@ def get_node(conn, name):
nodes = conn.list_nodes()
for node in nodes:
if node.name == name:
salt.utils.cloud.cache_node(salt.utils.simple_types_filter(node.__dict__), __active_provider_name__, __opts__)
__utils__['cloud.cache_node'](salt.utils.simple_types_filter(node.__dict__), __active_provider_name__, __opts__)
return node
@ -342,7 +342,7 @@ def destroy(name, conn=None, call=None):
'-a or --action.'
)
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
@ -383,7 +383,7 @@ def destroy(name, conn=None, call=None):
if ret:
log.info('Destroyed VM: {0}'.format(name))
# Fire destroy action
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
@ -401,7 +401,7 @@ def destroy(name, conn=None, call=None):
salt.utils.cloud.remove_sshkey(private_ips[0])
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return True
@ -424,7 +424,7 @@ def reboot(name, conn=None):
if ret:
log.info('Rebooted VM: {0}'.format(name))
# Fire reboot action
salt.utils.cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'{0} has been rebooted'.format(name), 'salt-cloud'
'salt/cloud/{0}/rebooting'.format(name),
@ -486,7 +486,7 @@ def list_nodes_full(conn=None, call=None):
ret[node.name] = pairs
del ret[node.name]['driver']
salt.utils.cloud.cache_node_list(ret, __active_provider_name__.split(':')[0], __opts__)
__utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__)
return ret
@ -512,7 +512,7 @@ def show_instance(name, call=None):
)
nodes = list_nodes_full()
salt.utils.cloud.cache_node(nodes[name], __active_provider_name__, __opts__)
__utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__)
return nodes[name]

View File

@ -894,7 +894,8 @@ def clouds(opts):
int_type='clouds'),
opts,
tag='clouds',
pack={'__active_provider_name__': None},
pack={'__utils__': salt.loader.utils(opts),
'__active_provider_name__': None},
)
for funcname in LIBCLOUD_FUNCS_NOT_SUPPORTED:
log.trace(

View File

@ -38,6 +38,7 @@ import salt.config
import salt.syspaths
from salt.modules.cmdmod import _parse_env
import salt.utils
import salt.utils.systemd
from salt.exceptions import (
CommandExecutionError, MinionError, SaltInvocationError
)
@ -439,6 +440,20 @@ def install(name=None,
reinstall=False,
**kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any apt-get/dpkg commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Install the passed package, add refresh=True to update the dpkg database.
name
@ -602,11 +617,14 @@ def install(name=None,
if pkg_params is None or len(pkg_params) == 0:
return {}
use_scope = salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True)
cmd_prefix = ['systemd-run', '--scope'] if use_scope else []
old = list_pkgs()
targets = []
downgrade = []
to_reinstall = {}
cmd_prefix = []
if pkg_type == 'repository':
pkg_params_items = six.iteritems(pkg_params)
# Build command prefix
@ -614,9 +632,9 @@ def install(name=None,
if kwargs.get('force_yes', False):
cmd_prefix.append('--force-yes')
if 'force_conf_new' in kwargs and kwargs['force_conf_new']:
cmd_prefix += ['-o', 'DPkg::Options::=--force-confnew']
cmd_prefix.extend(['-o', 'DPkg::Options::=--force-confnew'])
else:
cmd_prefix += ['-o', 'DPkg::Options::=--force-confold']
cmd_prefix.extend(['-o', 'DPkg::Options::=--force-confold'])
cmd_prefix += ['-o', 'DPkg::Options::=--force-confdef']
if 'install_recommends' in kwargs:
if not kwargs['install_recommends']:
@ -800,7 +818,11 @@ def _uninstall(action='remove', name=None, pkgs=None, **kwargs):
targets.extend([x for x in pkg_params if x in old_removed])
if not targets:
return {}
cmd = ['apt-get', '-q', '-y', action]
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend(['apt-get', '-q', '-y', action])
cmd.extend(targets)
env = _parse_env(kwargs.get('env'))
env.update(DPKG_ENV_VARS.copy())
@ -861,9 +883,13 @@ def autoremove(list_only=False, purge=False):
salt '*' pkg.autoremove list_only=True
salt '*' pkg.autoremove purge=True
'''
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
if list_only:
ret = []
cmd = ['apt-get', '--assume-no']
cmd.extend(['apt-get', '--assume-no'])
if purge:
cmd.append('--purge')
cmd.append('autoremove')
@ -881,7 +907,7 @@ def autoremove(list_only=False, purge=False):
return ret
else:
old = list_pkgs()
cmd = ['apt-get', '--assume-yes']
cmd.extend(['apt-get', '--assume-yes'])
if purge:
cmd.append('--purge')
cmd.append('autoremove')
@ -893,6 +919,20 @@ def autoremove(list_only=False, purge=False):
def remove(name=None, pkgs=None, **kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any apt-get/dpkg commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Remove packages using ``apt-get remove``.
name
@ -923,6 +963,20 @@ def remove(name=None, pkgs=None, **kwargs):
def purge(name=None, pkgs=None, **kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any apt-get/dpkg commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Remove packages via ``apt-get purge`` along with all configuration files.
name
@ -953,6 +1007,20 @@ def purge(name=None, pkgs=None, **kwargs):
def upgrade(refresh=True, dist_upgrade=False, **kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any apt-get/dpkg commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Upgrades all packages via ``apt-get upgrade`` or ``apt-get dist-upgrade``
if ``dist_upgrade`` is ``True``.
@ -999,12 +1067,16 @@ def upgrade(refresh=True, dist_upgrade=False, **kwargs):
force_conf = '--force-confnew'
else:
force_conf = '--force-confold'
if dist_upgrade:
cmd = ['apt-get', '-q', '-y', '-o', 'DPkg::Options::={0}'.format(force_conf),
'-o', 'DPkg::Options::=--force-confdef', 'dist-upgrade']
else:
cmd = ['apt-get', '-q', '-y', '-o', 'DPkg::Options::={0}'.format(force_conf),
'-o', 'DPkg::Options::=--force-confdef', 'upgrade']
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend(['apt-get', '-q', '-y',
'-o', 'DPkg::Options::={0}'.format(force_conf),
'-o', 'DPkg::Options::=--force-confdef'])
cmd.append('dist-upgrade' if dist_upgrade else 'upgrade')
call = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False,

View File

@ -242,15 +242,40 @@ def get(key, default='', delimiter=':', merge=None):
This function traverses these data stores in this order, returning the
first match found:
- Minion config file
- Minion configuration
- Minion's grains
- Minion's pillar data
- Master config file
- Master configuration (requires :conf_minion:`pillar_opts` to be set to
``True`` in Minion config file in order to work)
This means that if there is a value that is going to be the same for the
majority of minions, it can be configured in the Master config file, and
then overridden using the grains, pillar, or Minion config file.
Adding config options to the Master or Minion configuration file is easy:
.. code-block:: yaml
my-config-option: value
cafe-menu:
- egg and bacon
- egg sausage and bacon
- egg and spam
- egg bacon and spam
- egg bacon sausage and spam
- spam bacon sausage and spam
- spam egg spam spam bacon and spam
- spam sausage spam spam bacon spam tomato and spam
.. note::
Minion configuration options built into Salt (like those defined
:ref:`here <configuration-salt-minion>`) will *always* be defined in
the Minion configuration and thus *cannot be overridden by grains or
pillar data*. However, additional (user-defined) configuration options
(as in the above example) will not be in the Minion configuration by
default and thus can be overridden using grains/pillar data by leaving
the option out of the minion config file.
**Arguments**
delimiter

View File

@ -22,6 +22,7 @@ import re
# Import salt libs
import salt.utils
import salt.utils.systemd
from salt.exceptions import CommandExecutionError, MinionError
import salt.ext.six as six
@ -462,6 +463,20 @@ def install(name=None,
binhost=None,
**kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any emerge commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Install the passed package(s), add refresh=True to sync the portage tree
before package is installed.
@ -657,7 +672,12 @@ def install(name=None,
targets.append(target)
else:
targets = pkg_params
cmd = ['emerge', '--ask', 'n', '--quiet']
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend(['emerge', '--ask', 'n', '--quiet'])
cmd.extend(bin_opts)
cmd.extend(emerge_opts)
cmd.extend(targets)
@ -686,6 +706,20 @@ def install(name=None,
def update(pkg, slot=None, fromrepo=None, refresh=False, binhost=None):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any emerge commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Updates the passed package (emerge --update package)
slot
@ -730,8 +764,16 @@ def update(pkg, slot=None, fromrepo=None, refresh=False, binhost=None):
bin_opts = []
old = list_pkgs()
cmd = ['emerge', '--ask', 'n', '--quiet', '--update', '--newuse',
'--oneshot']
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend(['emerge',
'--ask', 'n',
'--quiet',
'--update',
'--newuse',
'--oneshot'])
cmd.extend(bin_opts)
cmd.append(full_atom)
call = __salt__['cmd.run_all'](cmd,
@ -757,6 +799,20 @@ def update(pkg, slot=None, fromrepo=None, refresh=False, binhost=None):
def upgrade(refresh=True, binhost=None, backtrack=3):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any emerge commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Run a full system upgrade (emerge -uDN @world)
binhost
@ -797,13 +853,17 @@ def upgrade(refresh=True, binhost=None, backtrack=3):
bin_opts = []
old = list_pkgs()
cmd = ['emerge',
'--ask', 'n',
'--quiet',
'--backtrack', '{0}'.format(backtrack),
'--update',
'--newuse',
'--deep']
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend(['emerge',
'--ask', 'n',
'--quiet',
'--backtrack', '{0}'.format(backtrack),
'--update',
'--newuse',
'--deep'])
if bin_opts:
cmd.extend(bin_opts)
cmd.append('@world')
@ -827,6 +887,20 @@ def upgrade(refresh=True, binhost=None, backtrack=3):
def remove(name=None, slot=None, fromrepo=None, pkgs=None, **kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any emerge commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Remove packages via emerge --unmerge.
name
@ -875,8 +949,17 @@ def remove(name=None, slot=None, fromrepo=None, pkgs=None, **kwargs):
if not targets:
return {}
cmd = ['emerge', '--ask', 'n', '--quiet', '--unmerge',
'--quiet-unmerge-warn'] + targets
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend(['emerge',
'--ask', 'n',
'--quiet',
'--unmerge',
'--quiet-unmerge-warn'])
cmd.extend(targets)
out = __salt__['cmd.run_all'](
cmd,
@ -903,6 +986,20 @@ def remove(name=None, slot=None, fromrepo=None, pkgs=None, **kwargs):
def purge(name=None, slot=None, fromrepo=None, pkgs=None, **kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any emerge commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Portage does not have a purge, this function calls remove followed
by depclean to emulate a purge process

View File

@ -20,6 +20,7 @@ import os.path
# Import salt libs
import salt.utils
import salt.utils.itertools
import salt.utils.systemd
from salt.exceptions import CommandExecutionError, MinionError
# Import 3rd-party libs
@ -438,15 +439,29 @@ def install(name=None,
sources=None,
**kwargs):
'''
Install (pacman -S) the passed package, add refresh=True to install with
-y, add sysupgrade=True to install with -u.
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any pacman commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Install (``pacman -S``) the specified packag(s). Add ``refresh=True`` to
install with ``-y``, add ``sysupgrade=True`` to install with ``-u``.
name
The name of the package to be installed. Note that this parameter is
ignored if either "pkgs" or "sources" is passed. Additionally, please
note that this option can only be used to install packages from a
software repository. To install a package file manually, use the
"sources" option.
ignored if either ``pkgs`` or ``sources`` is passed. Additionally,
please note that this option can only be used to install packages from
a software repository. To install a package file manually, use the
``sources`` option.
CLI Example:
@ -519,12 +534,24 @@ def install(name=None,
if 'root' in kwargs:
pkg_params['-r'] = kwargs['root']
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.append('pacman')
if pkg_type == 'file':
cmd = ['pacman', '-U', '--noprogressbar', '--noconfirm'] + pkg_params
cmd.extend(['-U', '--noprogressbar', '--noconfirm'])
cmd.extend(pkg_params)
elif pkg_type == 'repository':
cmd.append('-S')
if salt.utils.is_true(refresh):
cmd.append('-y')
if salt.utils.is_true(sysupgrade):
cmd.append('-u')
cmd.extend(['--noprogressbar', '--noconfirm', '--needed'])
targets = []
problems = []
options = ['--noprogressbar', '--noconfirm', '--needed']
for param, version_num in six.iteritems(pkg_params):
if version_num is None:
targets.append(param)
@ -538,20 +565,15 @@ def install(name=None,
prefix = prefix or '='
targets.append('{0}{1}{2}'.format(param, prefix, verstr))
else:
msg = 'Invalid version string \'{0}\' for package ' \
'\'{1}\''.format(version_num, name)
msg = ('Invalid version string \'{0}\' for package '
'\'{1}\''.format(version_num, name))
problems.append(msg)
if problems:
for problem in problems:
log.error(problem)
return {}
if salt.utils.is_true(refresh):
options.append('-y')
if salt.utils.is_true(sysupgrade):
options.append('-u')
cmd = ['pacman', '-S'] + options + targets
cmd.extend(targets)
old = list_pkgs()
out = __salt__['cmd.run_all'](
@ -580,6 +602,20 @@ def install(name=None,
def upgrade(refresh=False, root=None, **kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any pacman commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Run a full system upgrade, a pacman -Syu
refresh
@ -601,8 +637,12 @@ def upgrade(refresh=False, root=None, **kwargs):
'comment': ''}
old = list_pkgs()
cmd = ['pacman', '-Su', '--noprogressbar', '--noconfirm']
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend(['pacman', '-Su', '--noprogressbar', '--noconfirm'])
if salt.utils.is_true(refresh):
cmd.append('-y')
@ -641,10 +681,14 @@ def _uninstall(action='remove', name=None, pkgs=None, **kwargs):
if not targets:
return {}
cmd = ['pacman',
'-Rsc' if action == 'purge' else '-R',
'--noprogressbar',
'--noconfirm'] + targets
remove_arg = '-Rsc' if action == 'purge' else '-R'
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend(['pacman', remove_arg, '--noprogressbar', '--noconfirm'])
cmd.extend(targets)
if 'root' in kwargs:
cmd.extend(('-r', kwargs['root']))
@ -675,6 +719,20 @@ def _uninstall(action='remove', name=None, pkgs=None, **kwargs):
def remove(name=None, pkgs=None, **kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any pacman commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Remove packages with ``pacman -R``.
name
@ -705,6 +763,20 @@ def remove(name=None, pkgs=None, **kwargs):
def purge(name=None, pkgs=None, **kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any pacman commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Recursively remove a package and all dependencies which were installed
with it, this will call a ``pacman -Rsc``

View File

@ -241,12 +241,17 @@ def _runlevel():
return ret
def _systemctl_cmd(action, name=None):
def _systemctl_cmd(action, name=None, systemd_scope=False):
'''
Build a systemctl command line. Treat unit names without one
of the valid suffixes as a service.
'''
ret = ['systemctl']
ret = []
if systemd_scope \
and salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
ret.extend(['systemd-run', '--scope'])
ret.append('systemctl')
if isinstance(action, six.string_types):
action = shlex.split(action)
ret.extend(action)
@ -548,6 +553,16 @@ def missing(name):
def unmask(name):
'''
.. versionadded:: 2015.5.0
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Unmask the specified service with systemd
@ -564,7 +579,7 @@ def unmask(name):
return True
cmd = 'unmask --runtime' if 'runtime' in mask_status else 'unmask'
out = __salt__['cmd.run_all'](_systemctl_cmd(cmd, name),
out = __salt__['cmd.run_all'](_systemctl_cmd(cmd, name, systemd_scope=True),
python_shell=False,
redirect_stderr=True)
@ -577,6 +592,16 @@ def unmask(name):
def mask(name, runtime=False):
'''
.. versionadded:: 2015.5.0
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Mask the specified service with systemd
@ -594,7 +619,7 @@ def mask(name, runtime=False):
_check_for_unit_changes(name)
cmd = 'mask --runtime' if runtime else 'mask'
out = __salt__['cmd.run_all'](_systemctl_cmd(cmd, name),
out = __salt__['cmd.run_all'](_systemctl_cmd(cmd, name, systemd_scope=True),
python_shell=False,
redirect_stderr=True)
@ -636,6 +661,17 @@ def masked(name):
def start(name):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Start the specified service with systemd
CLI Example:
@ -646,12 +682,24 @@ def start(name):
'''
_check_for_unit_changes(name)
unmask(name)
return __salt__['cmd.retcode'](_systemctl_cmd('start', name),
python_shell=False) == 0
return __salt__['cmd.retcode'](
_systemctl_cmd('start', name, systemd_scope=True),
python_shell=False) == 0
def stop(name):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Stop the specified service with systemd
CLI Example:
@ -661,12 +709,24 @@ def stop(name):
salt '*' service.stop <service name>
'''
_check_for_unit_changes(name)
return __salt__['cmd.retcode'](_systemctl_cmd('stop', name),
python_shell=False) == 0
return __salt__['cmd.retcode'](
_systemctl_cmd('stop', name, systemd_scope=True),
python_shell=False) == 0
def restart(name):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Restart the specified service with systemd
CLI Example:
@ -677,12 +737,24 @@ def restart(name):
'''
_check_for_unit_changes(name)
unmask(name)
return __salt__['cmd.retcode'](_systemctl_cmd('restart', name),
python_shell=False) == 0
return __salt__['cmd.retcode'](
_systemctl_cmd('restart', name, systemd_scope=True),
python_shell=False) == 0
def reload_(name):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Reload the specified service with systemd
CLI Example:
@ -693,12 +765,24 @@ def reload_(name):
'''
_check_for_unit_changes(name)
unmask(name)
return __salt__['cmd.retcode'](_systemctl_cmd('reload', name),
python_shell=False) == 0
return __salt__['cmd.retcode'](
_systemctl_cmd('reload', name, systemd_scope=True),
python_shell=False) == 0
def force_reload(name):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. versionadded:: 0.12.0
Force-reload the specified service with systemd
@ -711,8 +795,9 @@ def force_reload(name):
'''
_check_for_unit_changes(name)
unmask(name)
return __salt__['cmd.retcode'](_systemctl_cmd('force-reload', name),
python_shell=False) == 0
return __salt__['cmd.retcode'](
_systemctl_cmd('force-reload', name, systemd_scope=True),
python_shell=False) == 0
# The unused sig argument is required to maintain consistency with the API
@ -738,6 +823,17 @@ def status(name, sig=None): # pylint: disable=unused-argument
# established by Salt's service management states.
def enable(name, **kwargs): # pylint: disable=unused-argument
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Enable the named service to start when the system boots
CLI Example:
@ -749,23 +845,39 @@ def enable(name, **kwargs): # pylint: disable=unused-argument
_check_for_unit_changes(name)
unmask(name)
if name in _get_sysv_services():
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
service_exec = _get_service_exec()
if service_exec.endswith('/update-rc.d'):
cmd = [service_exec, '-f', name, 'defaults', '99']
cmd.extend([service_exec, '-f', name, 'defaults', '99'])
elif service_exec.endswith('/chkconfig'):
cmd = [service_exec, name, 'on']
cmd.extend([service_exec, name, 'on'])
return __salt__['cmd.retcode'](cmd,
python_shell=False,
ignore_retcode=True) == 0
return __salt__['cmd.retcode'](_systemctl_cmd('enable', name),
python_shell=False,
ignore_retcode=True) == 0
return __salt__['cmd.retcode'](
_systemctl_cmd('enable', name, systemd_scope=True),
python_shell=False,
ignore_retcode=True) == 0
# The unused kwargs argument is required to maintain consistency with the API
# established by Salt's service management states.
def disable(name, **kwargs): # pylint: disable=unused-argument
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands run by this function from the ``salt-minion`` daemon's
control group. This is done to avoid a race condition in cases where
the ``salt-minion`` service is restarted while a service is being
modified. If desired, usage of `systemd-run(1)`_ can be suppressed by
setting a :mod:`config option <salt.modules.config.get>` called
``systemd.scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
Disable the named service to not start when the system boots
CLI Example:
@ -776,17 +888,22 @@ def disable(name, **kwargs): # pylint: disable=unused-argument
'''
_check_for_unit_changes(name)
if name in _get_sysv_services():
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
service_exec = _get_service_exec()
if service_exec.endswith('/update-rc.d'):
cmd = [service_exec, '-f', name, 'remove']
cmd.extend([service_exec, '-f', name, 'remove'])
elif service_exec.endswith('/chkconfig'):
cmd = [service_exec, name, 'off']
cmd.extend([service_exec, name, 'off'])
return __salt__['cmd.retcode'](cmd,
python_shell=False,
ignore_retcode=True) == 0
return __salt__['cmd.retcode'](_systemctl_cmd('disable', name),
python_shell=False,
ignore_retcode=True) == 0
return __salt__['cmd.retcode'](
_systemctl_cmd('disable', name, systemd_scope=True),
python_shell=False,
ignore_recode=True) == 0
# The unused kwargs argument is required to maintain consistency with the API

View File

@ -31,6 +31,7 @@ import sys # do not remove, used in imported file.py functions
import fileinput # do not remove, used in imported file.py functions
import fnmatch # do not remove, used in imported file.py functions
import mmap # do not remove, used in imported file.py functions
import glob # do not remove, used in imported file.py functions
# do not remove, used in imported file.py functions
import salt.ext.six as six # pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module

View File

@ -42,6 +42,7 @@ except ImportError:
# Import salt libs
import salt.utils
import salt.utils.itertools
import salt.utils.systemd
import salt.utils.decorators as decorators
import salt.utils.pkg.rpm
from salt.exceptions import (
@ -898,6 +899,20 @@ def install(name=None,
update_holds=False,
**kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any yum/dnf commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Install the passed package(s), add refresh=True to clean the yum database
before package is installed.
@ -1250,7 +1265,11 @@ def install(name=None,
targets = []
with _temporarily_unhold(to_install, targets):
if targets:
cmd = [_yum(), '-y']
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend([_yum(), '-y'])
if _yum() == 'dnf':
cmd.extend(['--best', '--allowerasing'])
_add_common_args(cmd)
@ -1268,7 +1287,11 @@ def install(name=None,
targets = []
with _temporarily_unhold(to_downgrade, targets):
if targets:
cmd = [_yum(), '-y']
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend([_yum(), '-y'])
_add_common_args(cmd)
cmd.append('downgrade')
cmd.extend(targets)
@ -1284,7 +1307,11 @@ def install(name=None,
targets = []
with _temporarily_unhold(to_reinstall, targets):
if targets:
cmd = [_yum(), '-y']
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend([_yum(), '-y'])
_add_common_args(cmd)
cmd.append('reinstall')
cmd.extend(targets)
@ -1339,6 +1366,21 @@ def upgrade(name=None,
not be installed.
.. versionchanged:: 2014.7.0
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any yum/dnf commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Run a full system upgrade, a yum upgrade
Return a dict containing the new package names and versions::
@ -1444,7 +1486,11 @@ def upgrade(name=None,
# dictionary's keys.
targets.extend(pkg_params)
cmd = [_yum(), '--quiet', '-y']
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend([_yum(), '--quiet', '-y'])
for args in (repo_arg, exclude_arg, branch_arg):
if args:
cmd.extend(args)
@ -1462,6 +1508,20 @@ def upgrade(name=None,
def remove(name=None, pkgs=None, **kwargs): # pylint: disable=W0613
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any yum/dnf commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Remove packages
name
@ -1497,6 +1557,12 @@ def remove(name=None, pkgs=None, **kwargs): # pylint: disable=W0613
if not targets:
return {}
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend([_yum(), '-y', 'remove'] + targets)
out = __salt__['cmd.run_all'](
[_yum(), '-y', 'remove'] + targets,
output_loglevel='trace',
@ -1523,6 +1589,20 @@ def remove(name=None, pkgs=None, **kwargs): # pylint: disable=W0613
def purge(name=None, pkgs=None, **kwargs): # pylint: disable=W0613
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any yum/dnf commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Package purges are not supported by yum, this function is identical to
:mod:`pkg.remove <salt.modules.yumpkg.remove>`.

View File

@ -35,6 +35,7 @@ from xml.parsers.expat import ExpatError
# Import salt libs
import salt.utils
import salt.utils.systemd
from salt.exceptions import (
CommandExecutionError, MinionError)
@ -101,6 +102,7 @@ class _Zypper(object):
self.__no_raise = False
self.__refresh = False
self.__ignore_repo_failure = False
self.__systemd_scope = False
def __call__(self, *args, **kwargs):
'''
@ -111,6 +113,8 @@ class _Zypper(object):
# Ignore exit code for 106 (repo is not available)
if 'no_repo_failure' in kwargs:
self.__ignore_repo_failure = kwargs['no_repo_failure']
if 'systemd_scope' in kwargs:
self.__systemd_scope = kwargs['systemd_scope']
return self
def __getattr__(self, item):
@ -252,8 +256,12 @@ class _Zypper(object):
# However, Zypper lock needs to be always respected.
was_blocked = False
while True:
log.debug("Calling Zypper: " + ' '.join(self.__cmd))
self.__call_result = __salt__['cmd.run_all'](self.__cmd, **kwargs)
cmd = []
if self.__systemd_scope:
cmd.extend(['systemd-run', '--scope'])
cmd.extend(self.__cmd)
log.debug("Calling Zypper: " + ' '.join(cmd))
self.__call_result = __salt__['cmd.run_all'](cmd, **kwargs)
if self._check_result():
break
@ -296,6 +304,11 @@ class _Zypper(object):
__zypper__ = _Zypper()
def _systemd_scope():
return salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True)
def list_upgrades(refresh=True, **kwargs):
'''
List all available package upgrades on this system
@ -883,6 +896,20 @@ def install(name=None,
ignore_repo_failure=False,
**kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any zypper commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Install the passed package(s), add refresh=True to force a 'zypper refresh'
before package is installed.
@ -1019,10 +1046,11 @@ def install(name=None,
# Split the targets into batches of 500 packages each, so that
# the maximal length of the command line is not broken
systemd_scope = _systemd_scope()
while targets:
cmd = cmd_install + targets[:500]
targets = targets[500:]
for line in __zypper__(no_repo_failure=ignore_repo_failure).call(*cmd).splitlines():
for line in __zypper__(no_repo_failure=ignore_repo_failure, systemd_scope=systemd_scope).call(*cmd).splitlines():
match = re.match(r"^The selected package '([^']+)'.+has lower version", line)
if match:
downgrades.append(match.group(1))
@ -1047,6 +1075,20 @@ def install(name=None,
def upgrade(refresh=True, skip_verify=False):
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any zypper commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Run a full system upgrade, a zypper upgrade
refresh
@ -1084,7 +1126,7 @@ def upgrade(refresh=True, skip_verify=False):
to_append = ''
if skip_verify:
to_append = '--no-gpg-checks'
__zypper__.noraise.call('update', '--auto-agree-with-licenses', to_append)
__zypper__(systemd_scope=_systemd_scope()).noraise.call('update', '--auto-agree-with-licenses', to_append)
if __zypper__.exit_code not in __zypper__.SUCCESS_EXIT_CODES:
ret['result'] = False
ret['comment'] = (__zypper__.stdout() + os.linesep + __zypper__.stderr()).strip()
@ -1111,9 +1153,11 @@ def _uninstall(name=None, pkgs=None):
if not targets:
return {}
systemd_scope = _systemd_scope()
errors = []
while targets:
__zypper__.call('remove', *targets[:500])
__zypper__(systemd_scope=systemd_scope).call('remove', *targets[:500])
targets = targets[500:]
__context__.pop('pkg.list_pkgs', None)
@ -1130,6 +1174,20 @@ def _uninstall(name=None, pkgs=None):
def remove(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any zypper commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Remove packages with ``zypper -n remove``
name
@ -1160,6 +1218,20 @@ def remove(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
def purge(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
'''
.. versionchanged:: 2015.8.12,2016.3.3,Carbon
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any zypper commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Recursively remove a package and all dependencies which were installed
with it, this will call a ``zypper -n remove -u``

View File

@ -1577,7 +1577,7 @@ class Login(LowDataAdapter):
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups'] is not False:
if 'groups' in token and token['groups']:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
@ -1585,13 +1585,12 @@ class Login(LowDataAdapter):
perms.extend(eauth['{0}%'.format(group)])
if not perms:
raise ValueError("Eauth permission list not found.")
except (AttributeError, IndexError, KeyError, ValueError):
logger.debug("Eauth permission list not found.")
except Exception:
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
raise cherrypy.HTTPError(500,
'Configuration for external_auth could not be read.')
perms = None
return {'return': [{
'token': cherrypy.session.id,
@ -1599,7 +1598,7 @@ class Login(LowDataAdapter):
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms,
'perms': perms or {},
}]}

View File

@ -103,6 +103,10 @@ def salt_minion():
'''
import signal
import functools
import salt.utils.process
salt.utils.process.notify_systemd()
import salt.cli.daemons
import multiprocessing
if '' in sys.path:
@ -284,6 +288,9 @@ def salt_syndic():
'''
Start the salt syndic.
'''
import salt.utils.process
salt.utils.process.notify_systemd()
import salt.cli.daemons
pid = os.getpid()
try:
@ -458,6 +465,9 @@ def salt_api():
'''
The main function for salt-api
'''
import salt.utils.process
salt.utils.process.notify_systemd()
import salt.cli.api
sapi = salt.cli.api.SaltAPI() # pylint: disable=E1120
sapi.start()

View File

@ -3,6 +3,19 @@
Installation of packages using OS package managers such as yum or apt-get
=========================================================================
..note::
On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and
Carbon, `systemd-run(1)`_ is now used to isolate commands which modify
installed packages from the ``salt-minion`` daemon's control group. This is
done to keep systemd from killing the package manager commands spawned by
Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_
manpage for more information). If desired, usage of `systemd-run(1)`_ can
be suppressed by setting a :mod:`config option <salt.modules.config.get>`
called ``systemd.use_scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Salt can manage software packages via the pkg state module, packages can be
set up to be installed, latest, removed and purged. Package management
declarations are typically rather simple:
@ -987,8 +1000,6 @@ def installed(
A list of packages to install from a software repository. All packages
listed under ``pkgs`` will be installed via a single command.
Example:
.. code-block:: yaml
mypkgs:
@ -1014,12 +1025,10 @@ def installed(
- bar: 1.2.3-4
- baz
Additionally, :mod:`ebuild <salt.modules.ebuild>`,
:mod:`pacman <salt.modules.pacman>` and
:mod:`zypper <salt.modules.zypper>` support the ``<``, ``<=``, ``>=``, and
``>`` operators for more control over what versions will be installed. For
Example:
Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman
<salt.modules.pacman>` and :mod:`zypper <salt.modules.zypper>` support
the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over
what versions will be installed. For example:
.. code-block:: yaml
@ -1036,9 +1045,7 @@ def installed(
With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a
use flag list and/or if the given packages should be in
package.accept_keywords file and/or the overlay from which you want the
package to be installed.
For example:
package to be installed. For example:
.. code-block:: yaml

View File

@ -56,7 +56,6 @@ import salt.client
import salt.config
import salt.utils
import salt.utils.event
from salt import syspaths
from salt.utils import vt
from salt.utils.nb_popen import NonBlockingPopen
from salt.utils.yamldumper import SafeOrderedDumper
@ -514,7 +513,7 @@ def bootstrap(vm_, opts):
args={'kwargs': event_kwargs},
sock_dir=opts.get(
'sock_dir',
os.path.join(syspaths.SOCK_DIR, 'master')),
os.path.join(__opts__['sock_dir'], 'master')),
transport=opts.get('transport', 'zeromq')
)
@ -1158,7 +1157,7 @@ def deploy_windows(host,
args={'name': name},
sock_dir=opts.get(
'sock_dir',
os.path.join(syspaths.SOCK_DIR, 'master')),
os.path.join(__opts__['sock_dir'], 'master')),
transport=opts.get('transport', 'zeromq')
)
@ -1632,7 +1631,7 @@ def deploy_script(host,
},
sock_dir=opts.get(
'sock_dir',
os.path.join(syspaths.SOCK_DIR, 'master')),
os.path.join(__opts__['sock_dir'], 'master')),
transport=opts.get('transport', 'zeromq')
)
if file_map_fail or file_map_success:
@ -1733,7 +1732,7 @@ def fire_event(key, msg, tag, args=None, sock_dir=None, transport='zeromq'):
'`salt.utils.cloud.fire_event` requires that the `sock_dir`'
'parameter be passed in when calling the function.'
)
sock_dir = os.path.join(syspaths.SOCK_DIR, 'master')
sock_dir = os.path.join(__opts__['sock_dir'], 'master')
event = salt.utils.event.get_event(
'master',
sock_dir,
@ -2497,7 +2496,7 @@ def init_cachedir(base=None):
Initialize the cachedir needed for Salt Cloud to keep track of minions
'''
if base is None:
base = os.path.join(syspaths.CACHE_DIR, 'cloud')
base = os.path.join(__opts__['cachedir'], 'cloud')
needed_dirs = (base,
os.path.join(base, 'requested'),
os.path.join(base, 'active'))
@ -2528,7 +2527,7 @@ def request_minion_cachedir(
will be set to None.
'''
if base is None:
base = os.path.join(syspaths.CACHE_DIR, 'cloud')
base = os.path.join(__opts__['cachedir'], 'cloud')
if not fingerprint and pubkey is not None:
fingerprint = salt.utils.pem_finger(key=pubkey, sum_type=(opts and opts.get('hash_type') or 'sha256'))
@ -2570,7 +2569,7 @@ def change_minion_cachedir(
return False
if base is None:
base = os.path.join(syspaths.CACHE_DIR, 'cloud')
base = os.path.join(__opts__['cachedir'], 'cloud')
fname = '{0}.p'.format(minion_id)
path = os.path.join(base, cachedir, fname)
@ -2591,7 +2590,7 @@ def activate_minion_cachedir(minion_id, base=None):
exists, and should be expected to exist from here on out.
'''
if base is None:
base = os.path.join(syspaths.CACHE_DIR, 'cloud')
base = os.path.join(__opts__['cachedir'], 'cloud')
fname = '{0}.p'.format(minion_id)
src = os.path.join(base, 'requested', fname)
@ -2609,7 +2608,7 @@ def delete_minion_cachedir(minion_id, provider, opts, base=None):
return
if base is None:
base = os.path.join(syspaths.CACHE_DIR, 'cloud')
base = os.path.join(__opts__['cachedir'], 'cloud')
driver = next(six.iterkeys(opts['providers'][provider]))
fname = '{0}.p'.format(minion_id)
@ -2629,7 +2628,7 @@ def list_cache_nodes_full(opts, provider=None, base=None):
return
if base is None:
base = os.path.join(syspaths.CACHE_DIR, 'cloud', 'active')
base = os.path.join(__opts__['cachedir'], 'cloud', 'active')
minions = {}
# First, get a list of all drivers in use
@ -2660,7 +2659,7 @@ def cache_nodes_ip(opts, base=None):
addresses. Returns a dict.
'''
if base is None:
base = os.path.join(syspaths.CACHE_DIR, 'cloud')
base = os.path.join(__opts__['cachedir'], 'cloud')
minions = list_cache_nodes_full(opts, base=base)
@ -2734,7 +2733,7 @@ def update_bootstrap(config, url=None):
# Compute the search path using the install time defined
# syspaths.CONF_DIR
deploy_d_from_syspaths = os.path.join(
syspaths.CONFIG_DIR,
config['config_dir'],
'cloud.deploy.d'
)
@ -2840,7 +2839,7 @@ def cache_node(node, provider, opts):
if 'update_cachedir' not in opts or not opts['update_cachedir']:
return
base = os.path.join(syspaths.CACHE_DIR, 'cloud', 'active')
base = os.path.join(__opts__['cachedir'], 'cloud', 'active')
if not os.path.exists(base):
init_cachedir()
@ -2882,7 +2881,7 @@ def missing_node_cache(prov_dir, node_list, provider, opts):
args={'missing node': node},
sock_dir=opts.get(
'sock_dir',
os.path.join(syspaths.SOCK_DIR, 'master')),
os.path.join(__opts__['sock_dir'], 'master')),
transport=opts.get('transport', 'zeromq')
)
@ -2918,7 +2917,7 @@ def diff_node_cache(prov_dir, node, new_data, opts):
args={'new_data': event_data},
sock_dir=opts.get(
'sock_dir',
os.path.join(syspaths.SOCK_DIR, 'master')),
os.path.join(__opts__['sock_dir'], 'master')),
transport=opts.get('transport', 'zeromq')
)
return
@ -2945,7 +2944,7 @@ def diff_node_cache(prov_dir, node, new_data, opts):
},
sock_dir=opts.get(
'sock_dir',
os.path.join(syspaths.SOCK_DIR, 'master')),
os.path.join(__opts__['sock_dir'], 'master')),
transport=opts.get('transport', 'zeromq')
)

View File

@ -15,22 +15,34 @@ import salt.utils
log = logging.getLogger(__name__)
def booted(context):
def booted(context=None):
'''
Return True if the system was booted with systemd, False otherwise.
Pass in the loader context "__context__", this function will set the
systemd.sd_booted key to represent if systemd is running
'''
# We can cache this for as long as the minion runs.
if 'systemd.sd_booted' not in context:
try:
# This check does the same as sd_booted() from libsystemd-daemon:
# http://www.freedesktop.org/software/systemd/man/sd_booted.html
if os.stat('/run/systemd/system'):
context['systemd.sd_booted'] = True
except OSError:
context['systemd.sd_booted'] = False
return context['systemd.sd_booted']
contextkey = 'salt.utils.systemd.booted'
if isinstance(context, dict):
# Can't put this if block on the same line as the above if block,
# because it willl break the elif below.
if contextkey in context:
return context[contextkey]
elif context is not None:
raise SaltInvocationError('context must be a dictionary if passed')
try:
# This check does the same as sd_booted() from libsystemd-daemon:
# http://www.freedesktop.org/software/systemd/man/sd_booted.html
ret = bool(os.stat('/run/systemd/system'))
except OSError:
ret = False
try:
context[contextkey] = ret
except TypeError:
pass
return ret
def version(context=None):
@ -38,11 +50,14 @@ def version(context=None):
Attempts to run systemctl --version. Returns None if unable to determine
version.
'''
contextkey = 'salt.utils.systemd.version'
if isinstance(context, dict):
if 'systemd.version' in context:
return context['systemd.version']
# Can't put this if block on the same line as the above if block,
# because it willl break the elif below.
if contextkey in context:
return context[contextkey]
elif context is not None:
raise SaltInvocationError('context must be a dictionary or None')
raise SaltInvocationError('context must be a dictionary if passed')
stdout = subprocess.Popen(
['systemctl', '--version'],
close_fds=True,
@ -58,7 +73,20 @@ def version(context=None):
return None
else:
try:
context['systemd.version'] = ret
context[contextkey] = ret
except TypeError:
pass
return ret
def has_scope(context=None):
'''
Scopes were introduced in systemd 205, this function returns a boolean
which is true when the minion is systemd-booted and running systemd>=205.
'''
if not booted(context):
return False
_sd_version = version(context)
if _sd_version is None:
return False
return _sd_version >= 205

View File

@ -8,6 +8,7 @@ from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase
from salttesting.mock import MagicMock
# Import Salt Libs
from salt.cloud.clouds import saltify
@ -15,6 +16,8 @@ from salt.cloud.clouds import saltify
# Globals
saltify.__opts__ = {}
saltify.__opts__['providers'] = {}
saltify.__utils__ = {}
saltify.__utils__['cloud.bootstrap'] = MagicMock()
class SaltifyTestCase(TestCase):

View File

@ -43,7 +43,7 @@ class LocalemodTestCase(TestCase):
'''
Test for Get the current system locale
'''
with patch.dict(localemod.__context__, {'systemd.sd_booted': True}):
with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': True}):
localemod.HAS_DBUS = True
with patch.object(localemod,
'_parse_dbus_locale',
@ -74,7 +74,7 @@ class LocalemodTestCase(TestCase):
'''
Test for Sets the current system locale
'''
with patch.dict(localemod.__context__, {'systemd.sd_booted': True}):
with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': True}):
with patch.object(localemod, '_localectl_set', return_value=True):
self.assertTrue(localemod.set_locale('l'))

View File

@ -442,15 +442,22 @@ class ZypperTestCase(TestCase):
'stderr': ''
}
with patch.dict(zypper.__salt__, {'cmd.run_all': MagicMock(return_value=cmd_out)}):
with patch.dict(zypper.__salt__, {'pkg_resource.parse_targets': MagicMock(return_value=parsed_targets)}):
with patch.dict(zypper.__salt__, {'pkg_resource.stringify': MagicMock()}):
with patch('salt.modules.zypper.list_pkgs', ListPackages()):
diff = zypper.remove(name='vim,pico')
for pkg_name in ['vim', 'pico']:
self.assertTrue(diff.get(pkg_name))
self.assertTrue(diff[pkg_name]['old'])
self.assertFalse(diff[pkg_name]['new'])
# If config.get starts being used elsewhere, we'll need to write a
# side_effect function.
patches = {
'cmd.run_all': MagicMock(return_value=cmd_out),
'pkg_resource.parse_targets': MagicMock(return_value=parsed_targets),
'pkg_resource.stringify': MagicMock(),
'config.get': MagicMock(return_value=True)
}
with patch.dict(zypper.__salt__, patches):
with patch('salt.modules.zypper.list_pkgs', ListPackages()):
diff = zypper.remove(name='vim,pico')
for pkg_name in ['vim', 'pico']:
self.assertTrue(diff.get(pkg_name))
self.assertTrue(diff[pkg_name]['old'])
self.assertFalse(diff[pkg_name]['new'])
def test_repo_value_info(self):
'''

View File

@ -0,0 +1,287 @@
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
import errno
import os
# Import Salt Testing libs
from salttesting import TestCase, skipIf
from salttesting.mock import Mock, patch, NO_MOCK, NO_MOCK_REASON
# Import Salt libs
from salt.exceptions import SaltInvocationError
from salt.utils import systemd as _systemd
def _booted_effect(path):
return True if path == '/run/systemd/system' else os.stat(path)
def _not_booted_effect(path):
if path == '/run/systemd/system':
raise OSError(errno.ENOENT, 'No such file or directory', path)
return os.stat(path)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class SystemdTestCase(TestCase):
'''
Tests the functions in salt.utils.systemd
'''
def test_booted(self):
'''
Test that salt.utils.systemd.booted() returns True when minion is
systemd-booted.
'''
# Ensure that os.stat returns True. os.stat doesn't return a bool
# normally, but the code is doing a simple truth check on the return
# data, so it is sufficient enough to mock it as True for these tests.
with patch('os.stat', side_effect=_booted_effect):
# Test without context dict passed
self.assertTrue(_systemd.booted())
# Test that context key is set when context dict is passed
context = {}
self.assertTrue(_systemd.booted(context))
self.assertEqual(context, {'salt.utils.systemd.booted': True})
def test_not_booted(self):
'''
Test that salt.utils.systemd.booted() returns False when minion is not
systemd-booted.
'''
# Ensure that os.stat raises an exception even if test is being run on
# a systemd-booted host.
with patch('os.stat', side_effect=_not_booted_effect):
# Test without context dict passed
self.assertFalse(_systemd.booted())
# Test that context key is set when context dict is passed
context = {}
self.assertFalse(_systemd.booted(context))
self.assertEqual(context, {'salt.utils.systemd.booted': False})
def test_booted_return_from_context(self):
'''
Test that the context data is returned when present. To ensure we're
getting data from the context dict, we use a non-boolean value to
differentiate it from the True/False return this function normally
produces.
'''
context = {'salt.utils.systemd.booted': 'foo'}
self.assertEqual(_systemd.booted(context), 'foo')
def test_booted_invalid_context(self):
'''
Test with invalid context data. The context value must be a dict, so
this should raise a SaltInvocationError.
'''
# Test with invalid context data
with self.assertRaises(SaltInvocationError):
_systemd.booted(99999)
@patch('subprocess.Popen')
def test_version(self, popen_mock):
'''
Test that salt.utils.systemd.booted() returns True when minion is
systemd-booted.
'''
_version = 231
output = 'systemd {0}\n-SYSVINIT'.format(_version)
popen_mock.return_value = Mock(
communicate=lambda *args, **kwargs: (output, None),
pid=lambda: 12345,
retcode=0
)
# Test without context dict passed
self.assertEqual(_systemd.version(), _version)
# Test that context key is set when context dict is passed
context = {}
self.assertTrue(_systemd.version(context))
self.assertEqual(context, {'salt.utils.systemd.version': _version})
def test_version_return_from_context(self):
'''
Test that the context data is returned when present. To ensure we're
getting data from the context dict, we use a non-integer value to
differentiate it from the integer return this function normally
produces.
'''
context = {'salt.utils.systemd.version': 'foo'}
self.assertEqual(_systemd.version(context), 'foo')
def test_version_invalid_context(self):
'''
Test with invalid context data. The context value must be a dict, so
this should raise a SaltInvocationError.
'''
# Test with invalid context data
with self.assertRaises(SaltInvocationError):
_systemd.version(99999)
@patch('subprocess.Popen')
def test_version_parse_problem(self, popen_mock):
'''
Test with invalid context data. The context value must be a dict, so
this should raise a SaltInvocationError.
'''
popen_mock.return_value = Mock(
communicate=lambda *args, **kwargs: ('invalid', None),
pid=lambda: 12345,
retcode=0
)
# Test without context dict passed
self.assertIsNone(_systemd.version())
# Test that context key is set when context dict is passed. A failure
# to parse the systemctl output should not set a context key, so it
# should not be present in the context dict.
context = {}
self.assertIsNone(_systemd.version(context))
self.assertEqual(context, {})
@patch('subprocess.Popen')
def test_has_scope_systemd204(self, popen_mock):
'''
Scopes are available in systemd>=205. Make sure that this function
returns the expected boolean. We do three separate unit tests for
versions 204 through 206 because mock doesn't like us altering the
return_value in a loop.
'''
_expected = False
_version = 204
_output = 'systemd {0}\n-SYSVINIT'.format(_version)
popen_mock.return_value = Mock(
communicate=lambda *args, **kwargs: (_output, None),
pid=lambda: 12345,
retcode=0
)
# Ensure that os.stat returns True. os.stat doesn't return a bool
# normally, but the code is doing a simple truth check on the
# return data, so it is sufficient enough to mock it as True for
# these tests.
with patch('os.stat', side_effect=_booted_effect):
# Test without context dict passed
self.assertEqual(_systemd.has_scope(), _expected)
# Test that context key is set when context dict is passed
context = {}
self.assertEqual(_systemd.has_scope(context), _expected)
self.assertEqual(
context,
{'salt.utils.systemd.booted': True,
'salt.utils.systemd.version': _version},
)
@patch('subprocess.Popen')
def test_has_scope_systemd205(self, popen_mock):
'''
Scopes are available in systemd>=205. Make sure that this function
returns the expected boolean. We do three separate unit tests for
versions 204 through 206 because mock doesn't like us altering the
return_value in a loop.
'''
_expected = True
_version = 205
_output = 'systemd {0}\n-SYSVINIT'.format(_version)
popen_mock.return_value = Mock(
communicate=lambda *args, **kwargs: (_output, None),
pid=lambda: 12345,
retcode=0
)
# Ensure that os.stat returns True. os.stat doesn't return a bool
# normally, but the code is doing a simple truth check on the
# return data, so it is sufficient enough to mock it as True for
# these tests.
with patch('os.stat', side_effect=_booted_effect):
# Test without context dict passed
self.assertEqual(_systemd.has_scope(), _expected)
# Test that context key is set when context dict is passed
context = {}
self.assertEqual(_systemd.has_scope(context), _expected)
self.assertEqual(
context,
{'salt.utils.systemd.booted': True,
'salt.utils.systemd.version': _version},
)
@patch('subprocess.Popen')
def test_has_scope_systemd206(self, popen_mock):
'''
Scopes are available in systemd>=205. Make sure that this function
returns the expected boolean. We do three separate unit tests for
versions 204 through 206 because mock doesn't like us altering the
return_value in a loop.
'''
_expected = True
_version = 206
_output = 'systemd {0}\n-SYSVINIT'.format(_version)
popen_mock.return_value = Mock(
communicate=lambda *args, **kwargs: (_output, None),
pid=lambda: 12345,
retcode=0
)
# Ensure that os.stat returns True. os.stat doesn't return a bool
# normally, but the code is doing a simple truth check on the
# return data, so it is sufficient enough to mock it as True for
# these tests.
with patch('os.stat', side_effect=_booted_effect):
# Test without context dict passed
self.assertEqual(_systemd.has_scope(), _expected)
# Test that context key is set when context dict is passed
context = {}
self.assertEqual(_systemd.has_scope(context), _expected)
self.assertEqual(
context,
{'salt.utils.systemd.booted': True,
'salt.utils.systemd.version': _version},
)
def test_has_scope_no_systemd(self):
'''
Test the case where the system is not systemd-booted. We should not be
performing a version check in these cases as there is no need.
'''
with patch('os.stat', side_effect=_not_booted_effect):
# Test without context dict passed
self.assertFalse(_systemd.has_scope())
# Test that context key is set when context dict is passed.
# Because we are not systemd-booted, there should be no key in the
# context dict for the version check, as we shouldn't have
# performed this check.
context = {}
self.assertFalse(_systemd.has_scope(context))
self.assertEqual(context, {'salt.utils.systemd.booted': False})
@patch('subprocess.Popen')
def test_has_scope_version_parse_problem(self, popen_mock):
'''
Test the case where the system is systemd-booted, but we failed to
parse the "systemctl --version" output.
'''
popen_mock.return_value = Mock(
communicate=lambda *args, **kwargs: ('invalid', None),
pid=lambda: 12345,
retcode=0
)
with patch('os.stat', side_effect=_booted_effect):
# Test without context dict passed
self.assertFalse(_systemd.has_scope())
# Test that context key is set when context dict is passed. A
# failure to parse the systemctl output should not set a context
# key, so it should not be present in the context dict.
context = {}
self.assertFalse(_systemd.has_scope(context))
self.assertEqual(context, {'salt.utils.systemd.booted': True})
def test_has_scope_invalid_context(self):
'''
Test with invalid context data. The context value must be a dict, so
this should raise a SaltInvocationError.
'''
# Test with invalid context data
with self.assertRaises(SaltInvocationError):
_systemd.has_scope(99999)
if __name__ == '__main__':
from integration import run_tests
run_tests(SystemdTestCase, needs_daemon=False)