Merge branch '2017.7' into win_fix_inet_pton

This commit is contained in:
Nicole Thomas 2017-12-11 13:21:56 -05:00 committed by GitHub
commit 849b99eb34
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
48 changed files with 2047 additions and 749 deletions

View File

@ -10,6 +10,7 @@
driver:
name: docker
use_sudo: false
hostname: salt
privileged: true
username: root
volume:

View File

@ -225,15 +225,16 @@ enclosing brackets ``[`` and ``]``:
Default: ``{}``
This can be used to control logging levels more specifically. The example sets
the main salt library at the 'warning' level, but sets ``salt.modules`` to log
at the ``debug`` level:
This can be used to control logging levels more specifically, based on log call name. The example sets
the main salt library at the 'warning' level, sets ``salt.modules`` to log
at the ``debug`` level, and sets a custom module to the ``all`` level:
.. code-block:: yaml
log_granular_levels:
'salt': 'warning'
'salt.modules': 'debug'
'salt.loader.saltmaster.ext.module.custom_module': 'all'
External Logging Handlers
-------------------------

View File

@ -303,6 +303,20 @@ option on the Salt master.
master_port: 4506
.. conf_minion:: publish_port
``publish_port``
---------------
Default: ``4505``
The port of the master publish server, this needs to coincide with the publish_port
option on the Salt master.
.. code-block:: yaml
publish_port: 4505
.. conf_minion:: user
``user``

View File

@ -314,3 +314,9 @@ Syncing grains can be done a number of ways, they are automatically synced when
above) the grains can be manually synced and reloaded by calling the
:mod:`saltutil.sync_grains <salt.modules.saltutil.sync_grains>` or
:mod:`saltutil.sync_all <salt.modules.saltutil.sync_all>` functions.
.. note::
When the :conf_minion:`grains_cache` is set to False, the grains dictionary is built
and stored in memory on the minion. Every time the minion restarts or
``saltutil.refresh_grains`` is run, the grain dictionary is rebuilt from scratch.

View File

@ -1526,6 +1526,54 @@ Returns:
.. jinja_ref:: jinja-in-files
Escape filters
--------------
.. jinja_ref:: regex_escape
``regex_escape``
----------------
.. versionadded:: 2017.7.0
Allows escaping of strings so they can be interpreted literally by another function.
Example:
.. code-block:: jinja
regex_escape = {{ 'https://example.com?foo=bar%20baz' | regex_escape }}
will be rendered as:
.. code-block:: text
regex_escape = https\:\/\/example\.com\?foo\=bar\%20baz
Set Theory Filters
------------------
.. jinja_ref:: unique
``unique``
----------
.. versionadded:: 2017.7.0
Performs set math using Jinja filters.
Example:
.. code-block:: jinja
unique = {{ ['foo', 'foo', 'bar'] | unique }}
will be rendered as:
.. code-block:: text
unique = ['foo', 'bar']
Jinja in Files
==============

View File

@ -186,19 +186,60 @@ class Beacon(object):
else:
self.opts['beacons'][name].append({'enabled': enabled_value})
def list_beacons(self):
def _get_beacons(self,
include_opts=True,
include_pillar=True):
'''
Return the beacons data structure
'''
beacons = {}
if include_pillar:
pillar_beacons = self.opts.get('pillar', {}).get('beacons', {})
if not isinstance(pillar_beacons, dict):
raise ValueError('Beacons must be of type dict.')
beacons.update(pillar_beacons)
if include_opts:
opts_beacons = self.opts.get('beacons', {})
if not isinstance(opts_beacons, dict):
raise ValueError('Beacons must be of type dict.')
beacons.update(opts_beacons)
return beacons
def list_beacons(self,
include_pillar=True,
include_opts=True):
'''
List the beacon items
include_pillar: Whether to include beacons that are
configured in pillar, default is True.
include_opts: Whether to include beacons that are
configured in opts, default is True.
'''
beacons = self._get_beacons(include_pillar, include_opts)
# Fire the complete event back along with the list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
b_conf = self.functions['config.merge']('beacons')
self.opts['beacons'].update(b_conf)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
evt.fire_event({'complete': True, 'beacons': beacons},
tag='/salt/minion/minion_beacons_list_complete')
return True
def list_available_beacons(self):
'''
List the available beacons
'''
_beacons = ['{0}'.format(_beacon.replace('.beacon', ''))
for _beacon in self.beacons if '.beacon' in _beacon]
# Fire the complete event back along with the list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': _beacons},
tag='/salt/minion/minion_beacons_list_available_complete')
return True
def add_beacon(self, name, beacon_data):
'''
Add a beacon item
@ -207,16 +248,23 @@ class Beacon(object):
data = {}
data[name] = beacon_data
if name in self.opts['beacons']:
log.info('Updating settings for beacon '
'item: {0}'.format(name))
if name in self._get_beacons(include_opts=False):
comment = 'Cannot update beacon item {0}, ' \
'because it is configured in pillar.'.format(name)
complete = False
else:
log.info('Added new beacon item {0}'.format(name))
self.opts['beacons'].update(data)
if name in self.opts['beacons']:
comment = 'Updating settings for beacon ' \
'item: {0}'.format(name)
else:
comment = 'Added new beacon item: {0}'.format(name)
complete = True
self.opts['beacons'].update(data)
# Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
evt.fire_event({'complete': complete, 'comment': comment,
'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacon_add_complete')
return True
@ -229,15 +277,21 @@ class Beacon(object):
data = {}
data[name] = beacon_data
log.info('Updating settings for beacon '
'item: {0}'.format(name))
self.opts['beacons'].update(data)
if name in self._get_beacons(include_opts=False):
comment = 'Cannot modify beacon item {0}, ' \
'it is configured in pillar.'.format(name)
complete = False
else:
comment = 'Updating settings for beacon ' \
'item: {0}'.format(name)
complete = True
self.opts['beacons'].update(data)
# Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
evt.fire_event({'complete': complete, 'comment': comment,
'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacon_modify_complete')
return True
def delete_beacon(self, name):
@ -245,13 +299,22 @@ class Beacon(object):
Delete a beacon item
'''
if name in self.opts['beacons']:
log.info('Deleting beacon item {0}'.format(name))
del self.opts['beacons'][name]
if name in self._get_beacons(include_opts=False):
comment = 'Cannot delete beacon item {0}, ' \
'it is configured in pillar.'.format(name)
complete = False
else:
if name in self.opts['beacons']:
del self.opts['beacons'][name]
comment = 'Deleting beacon item: {0}'.format(name)
else:
comment = 'Beacon item {0} not found.'.format(name)
complete = True
# Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
evt.fire_event({'complete': complete, 'comment': comment,
'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacon_delete_complete')
return True
@ -289,11 +352,19 @@ class Beacon(object):
Enable a beacon
'''
self._update_enabled(name, True)
if name in self._get_beacons(include_opts=False):
comment = 'Cannot enable beacon item {0}, ' \
'it is configured in pillar.'.format(name)
complete = False
else:
self._update_enabled(name, True)
comment = 'Enabling beacon item {0}'.format(name)
complete = True
# Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
evt.fire_event({'complete': complete, 'comment': comment,
'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacon_enabled_complete')
return True
@ -303,11 +374,19 @@ class Beacon(object):
Disable a beacon
'''
self._update_enabled(name, False)
if name in self._get_beacons(include_opts=False):
comment = 'Cannot disable beacon item {0}, ' \
'it is configured in pillar.'.format(name)
complete = False
else:
self._update_enabled(name, False)
comment = 'Disabling beacon item {0}'.format(name)
complete = True
# Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
evt.fire_event({'complete': complete, 'comment': comment,
'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacon_disabled_complete')
return True

View File

@ -240,7 +240,7 @@ class SyncClientMixin(object):
def low(self, fun, low, print_event=True, full_return=False):
'''
Check for deprecated usage and allow until Salt Oxygen.
Check for deprecated usage and allow until Salt Fluorine.
'''
msg = []
if 'args' in low:
@ -251,7 +251,7 @@ class SyncClientMixin(object):
low['kwarg'] = low.pop('kwargs')
if msg:
salt.utils.warn_until('Oxygen', ' '.join(msg))
salt.utils.warn_until('Fluorine', ' '.join(msg))
return self._low(fun, low, print_event=print_event, full_return=full_return)

View File

@ -723,6 +723,7 @@ class Single(object):
self.thin_dir = kwargs['thin_dir']
elif self.winrm:
saltwinshell.set_winvars(self)
self.python_env = kwargs.get('ssh_python_env')
else:
if user:
thin_dir = DEFAULT_THIN_DIR.replace('%%USER%%', user)
@ -782,6 +783,10 @@ class Single(object):
self.serial = salt.payload.Serial(opts)
self.wfuncs = salt.loader.ssh_wrapper(opts, None, self.context)
self.shell = salt.client.ssh.shell.gen_shell(opts, **args)
if self.winrm:
# Determine if Windows client is x86 or AMD64
arch, _, _ = self.shell.exec_cmd('powershell $ENV:PROCESSOR_ARCHITECTURE')
self.arch = arch.strip()
self.thin = thin if thin else salt.utils.thin.thin_path(opts['cachedir'])
def __arg_comps(self):

View File

@ -48,6 +48,10 @@ log = logging.getLogger(__name__)
# The name salt will identify the lib by
__virtualname__ = 'virtualbox'
#if no clone mode is specified in the virtualbox profile
#then default to 0 which was the old default value
DEFAULT_CLONE_MODE = 0
def __virtual__():
'''
@ -85,6 +89,30 @@ def get_configured_provider():
return configured
def map_clonemode(vm_info):
"""
Convert the virtualbox config file values for clone_mode into the integers the API requires
"""
mode_map = {
'state': 0,
'child': 1,
'all': 2
}
if not vm_info:
return DEFAULT_CLONE_MODE
if 'clonemode' not in vm_info:
return DEFAULT_CLONE_MODE
if vm_info['clonemode'] in mode_map:
return mode_map[vm_info['clonemode']]
else:
raise SaltCloudSystemExit(
"Illegal clonemode for virtualbox profile. Legal values are: {}".format(','.join(mode_map.keys()))
)
def create(vm_info):
"""
Creates a virtual machine from the given VM information.
@ -102,6 +130,7 @@ def create(vm_info):
profile: <dict>
driver: <provider>:<profile>
clonefrom: <vm_name>
clonemode: <mode> (default: state, choices: state, child, all)
}
@type vm_info dict
@return dict of resulting vm. !!!Passwords can and should be included!!!
@ -133,6 +162,9 @@ def create(vm_info):
key_filename = config.get_cloud_config_value(
'private_key', vm_info, __opts__, search_global=False, default=None
)
clone_mode = map_clonemode(vm_info)
wait_for_pattern = vm_info['waitforpattern'] if 'waitforpattern' in vm_info.keys() else None
interface_index = vm_info['interfaceindex'] if 'interfaceindex' in vm_info.keys() else 0
log.debug("Going to fire event: starting create")
__utils__['cloud.fire_event'](
@ -147,7 +179,8 @@ def create(vm_info):
# to create the virtual machine.
request_kwargs = {
'name': vm_info['name'],
'clone_from': vm_info['clonefrom']
'clone_from': vm_info['clonefrom'],
'clone_mode': clone_mode
}
__utils__['cloud.fire_event'](
@ -163,17 +196,17 @@ def create(vm_info):
# Booting and deploying if needed
if power:
vb_start_vm(vm_name, timeout=boot_timeout)
ips = vb_wait_for_network_address(wait_for_ip_timeout, machine_name=vm_name)
ips = vb_wait_for_network_address(wait_for_ip_timeout, machine_name=vm_name, wait_for_pattern=wait_for_pattern)
if len(ips):
ip = ips[0]
ip = ips[interface_index]
log.info("[ {0} ] IPv4 is: {1}".format(vm_name, ip))
# ssh or smb using ip and install salt only if deploy is True
if deploy:
vm_info['key_filename'] = key_filename
vm_info['ssh_host'] = ip
res = __utils__['cloud.bootstrap'](vm_info)
res = __utils__['cloud.bootstrap'](vm_info, __opts__)
vm_result.update(res)
__utils__['cloud.fire_event'](

View File

@ -862,6 +862,10 @@ class MinionManager(MinionBase):
failed = False
while True:
try:
if minion.opts.get('beacons_before_connect', False):
minion.setup_beacons(before_connect=True)
if minion.opts.get('scheduler_before_connect', False):
minion.setup_scheduler(before_connect=True)
yield minion.connect_master(failed=failed)
minion.tune_in(start=False)
break
@ -936,6 +940,7 @@ class Minion(MinionBase):
# True means the Minion is fully functional and ready to handle events.
self.ready = False
self.jid_queue = jid_queue or []
self.periodic_callbacks = {}
if io_loop is None:
if HAS_ZMQ:
@ -967,6 +972,19 @@ class Minion(MinionBase):
# post_master_init
if not salt.utils.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
else:
if self.opts.get('beacons_before_connect', False):
log.warning(
'\'beacons_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['beacons_before_connect'] = False
if self.opts.get('scheduler_before_connect', False):
log.warning(
'\'scheduler_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['scheduler_before_connect'] = False
log.info('Creating minion process manager')
@ -1070,19 +1088,22 @@ class Minion(MinionBase):
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
if not self.ready:
self._setup_core()
elif self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
@ -1136,9 +1157,6 @@ class Minion(MinionBase):
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
self.grains_cache = self.opts['grains']
self.ready = True
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
@ -1896,6 +1914,8 @@ class Minion(MinionBase):
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
include_pillar = data.get(u'include_pillar', None)
include_opts = data.get(u'include_opts', None)
if func == 'add':
self.beacons.add_beacon(name, beacon_data)
@ -1912,7 +1932,9 @@ class Minion(MinionBase):
elif func == 'disable_beacon':
self.beacons.disable_beacon(name)
elif func == 'list':
self.beacons.list_beacons()
self.beacons.list_beacons(include_opts, include_pillar)
elif func == u'list_available':
self.beacons.list_available_beacons()
def environ_setenv(self, tag, data):
'''
@ -2176,6 +2198,118 @@ class Minion(MinionBase):
except (ValueError, NameError):
pass
def _setup_core(self):
'''
Set up the core minion attributes.
This is safe to call multiple times.
'''
if not self.ready:
# First call. Initialize.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.grains_cache = self.opts['grains']
self.ready = True
def setup_beacons(self, before_connect=False):
'''
Set up the beacons.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'beacons' not in self.periodic_callbacks:
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_beacons()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
def setup_scheduler(self, before_connect=False):
'''
Set up the scheduler.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'schedule' not in self.periodic_callbacks:
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_schedule()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
# Main Minion Tune In
def tune_in(self, start=True):
'''
@ -2187,6 +2321,10 @@ class Minion(MinionBase):
log.debug('Minion \'{0}\' trying to tune in'.format(self.opts['id']))
if start:
if self.opts.get('beacons_before_connect', False):
self.setup_beacons(before_connect=True)
if self.opts.get('scheduler_before_connect', False):
self.setup_scheduler(before_connect=True)
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
@ -2201,31 +2339,9 @@ class Minion(MinionBase):
# On first startup execute a state run if configured to do so
self._state_run()
loop_interval = self.opts['loop_interval']
self.setup_beacons()
self.setup_scheduler()
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
self.periodic_callbacks = {}
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0 and self.connected:
@ -2243,30 +2359,7 @@ class Minion(MinionBase):
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
self.periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons, sync=False)
self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
if hasattr(self, 'schedule'):
self.periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(self.periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks['ping'].start()
# add handler to subscriber
if hasattr(self, 'pub_channel') and self.pub_channel is not None:

View File

@ -27,12 +27,22 @@ __func_alias__ = {
}
def list_(return_yaml=True):
def list_(return_yaml=True,
include_pillar=True,
include_opts=True):
'''
List the beacons currently configured on the minion
:param return_yaml: Whether to return YAML formatted output, default True
:return: List of currently configured Beacons.
:param return_yaml: Whether to return YAML formatted output,
default True
:param include_pillar: Whether to include beacons that are
configured in pillar, default is True.
:param include_opts: Whether to include beacons that are
configured in opts, default is True.
:return: List of currently configured Beacons.
CLI Example:
@ -45,7 +55,10 @@ def list_(return_yaml=True):
try:
eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire']({'func': 'list'}, 'manage_beacons')
res = __salt__['event.fire']({'func': 'list',
'include_pillar': include_pillar,
'include_opts': include_opts},
'manage_beacons')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_beacons_list_complete', wait=30)
log.debug('event_ret {0}'.format(event_ret))
@ -69,6 +82,47 @@ def list_(return_yaml=True):
return {'beacons': {}}
def list_available(return_yaml=True):
'''
List the beacons currently available on the minion
:param return_yaml: Whether to return YAML formatted output, default True
:return: List of currently configured Beacons.
CLI Example:
.. code-block:: bash
salt '*' beacons.list_available
'''
beacons = None
try:
eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire']({'func': 'list_available'}, 'manage_beacons')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_beacons_list_available_complete', wait=30)
if event_ret and event_ret['complete']:
beacons = event_ret['beacons']
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret = {}
ret['result'] = False
ret['comment'] = 'Event module not available. Beacon add failed.'
return ret
if beacons:
if return_yaml:
tmp = {'beacons': beacons}
yaml_out = yaml.safe_dump(tmp, default_flow_style=False)
return yaml_out
else:
return beacons
else:
return {'beacons': {}}
def add(name, beacon_data, **kwargs):
'''
Add a beacon on the minion
@ -91,6 +145,10 @@ def add(name, beacon_data, **kwargs):
ret['comment'] = 'Beacon {0} is already configured.'.format(name)
return ret
if name not in list_available(return_yaml=False):
ret['comment'] = 'Beacon "{0}" is not available.'.format(name)
return ret
if 'test' in kwargs and kwargs['test']:
ret['result'] = True
ret['comment'] = 'Beacon: {0} would be added.'.format(name)
@ -130,7 +188,10 @@ def add(name, beacon_data, **kwargs):
if name in beacons and beacons[name] == beacon_data:
ret['result'] = True
ret['comment'] = 'Added beacon: {0}.'.format(name)
return ret
else:
ret['result'] = False
ret['comment'] = event_ret['comment']
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Beacon add failed.'
@ -215,7 +276,10 @@ def modify(name, beacon_data, **kwargs):
if name in beacons and beacons[name] == beacon_data:
ret['result'] = True
ret['comment'] = 'Modified beacon: {0}.'.format(name)
return ret
else:
ret['result'] = False
ret['comment'] = event_ret['comment']
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Beacon add failed.'
@ -257,6 +321,9 @@ def delete(name, **kwargs):
ret['result'] = True
ret['comment'] = 'Deleted beacon: {0}.'.format(name)
return ret
else:
ret['result'] = False
ret['comment'] = event_ret['comment']
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Beacon add failed.'
@ -279,7 +346,7 @@ def save():
ret = {'comment': [],
'result': True}
beacons = list_(return_yaml=False)
beacons = list_(return_yaml=False, include_pillar=False)
# move this file into an configurable opt
sfn = '{0}/{1}/beacons.conf'.format(__opts__['config_dir'],
@ -332,7 +399,7 @@ def enable(**kwargs):
else:
ret['result'] = False
ret['comment'] = 'Failed to enable beacons on minion.'
return ret
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Beacons enable job failed.'
@ -372,7 +439,7 @@ def disable(**kwargs):
else:
ret['result'] = False
ret['comment'] = 'Failed to disable beacons on minion.'
return ret
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Beacons enable job failed.'
@ -435,7 +502,10 @@ def enable_beacon(name, **kwargs):
else:
ret['result'] = False
ret['comment'] = 'Failed to enable beacon {0} on minion.'.format(name)
return ret
else:
ret['result'] = False
ret['comment'] = event_ret['comment']
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Beacon enable job failed.'
@ -488,7 +558,10 @@ def disable_beacon(name, **kwargs):
else:
ret['result'] = False
ret['comment'] = 'Failed to disable beacon on minion.'
return ret
else:
ret['result'] = False
ret['comment'] = event_ret['comment']
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Beacon disable job failed.'

View File

@ -147,8 +147,24 @@ def _render_tab(lst):
cron['cmd']
)
)
for spec in lst['special']:
ret.append('{0} {1}\n'.format(spec['spec'], spec['cmd']))
for cron in lst['special']:
if cron['comment'] is not None or cron['identifier'] is not None:
comment = '#'
if cron['comment']:
comment += ' {0}'.format(
cron['comment'].rstrip().replace('\n', '\n# '))
if cron['identifier']:
comment += ' {0}:{1}'.format(SALT_CRON_IDENTIFIER,
cron['identifier'])
comment += '\n'
ret.append(comment)
ret.append('{0}{1} {2}\n'.format(
cron['commented'] is True and '#DISABLED#' or '',
cron['spec'],
cron['cmd']
)
)
return ret
@ -317,7 +333,15 @@ def list_tab(user):
continue
dat['spec'] = comps[0]
dat['cmd'] = ' '.join(comps[1:])
dat['identifier'] = identifier
dat['comment'] = comment
dat['commented'] = False
if commented_cron_job:
dat['commented'] = True
ret['special'].append(dat)
identifier = None
comment = None
commented_cron_job = False
elif line.startswith('#'):
# It's a comment! Catch it!
comment_line = line.lstrip('# ')
@ -363,11 +387,17 @@ def list_tab(user):
ret['pre'].append(line)
return ret
# For consistency's sake
ls = salt.utils.alias_function(list_tab, 'ls')
def set_special(user, special, cmd):
def set_special(user,
special,
cmd,
commented=False,
comment=None,
identifier=None):
'''
Set up a special command in the crontab.
@ -379,11 +409,60 @@ def set_special(user, special, cmd):
'''
lst = list_tab(user)
for cron in lst['special']:
if special == cron['spec'] and cmd == cron['cmd']:
cid = _cron_id(cron)
if _cron_matched(cron, cmd, identifier):
test_setted_id = (
cron['identifier'] is None
and SALT_CRON_NO_IDENTIFIER
or cron['identifier'])
tests = [(cron['comment'], comment),
(cron['commented'], commented),
(identifier, test_setted_id),
(cron['spec'], special)]
if cid or identifier:
tests.append((cron['cmd'], cmd))
if any([_needs_change(x, y) for x, y in tests]):
rm_special(user, cmd, identifier=cid)
# Use old values when setting the new job if there was no
# change needed for a given parameter
if not _needs_change(cron['spec'], special):
special = cron['spec']
if not _needs_change(cron['commented'], commented):
commented = cron['commented']
if not _needs_change(cron['comment'], comment):
comment = cron['comment']
if not _needs_change(cron['cmd'], cmd):
cmd = cron['cmd']
if (
cid == SALT_CRON_NO_IDENTIFIER
):
if identifier:
cid = identifier
if (
cid == SALT_CRON_NO_IDENTIFIER
and cron['identifier'] is None
):
cid = None
cron['identifier'] = cid
if not cid or (
cid and not _needs_change(cid, identifier)
):
identifier = cid
jret = set_special(user, special, cmd, commented=commented,
comment=comment, identifier=identifier)
if jret == 'new':
return 'updated'
else:
return jret
return 'present'
spec = {'spec': special,
'cmd': cmd}
lst['special'].append(spec)
cron = {'spec': special,
'cmd': cmd,
'identifier': identifier,
'comment': comment,
'commented': commented}
lst['special'].append(cron)
comdat = _write_cron_lines(user, _render_tab(lst))
if comdat['retcode']:
# Failed to commit, return the error
@ -536,7 +615,7 @@ def set_job(user,
return 'new'
def rm_special(user, special, cmd):
def rm_special(user, cmd, special=None, identifier=None):
'''
Remove a special cron job for a specified user.
@ -544,22 +623,28 @@ def rm_special(user, special, cmd):
.. code-block:: bash
salt '*' cron.rm_job root @hourly /usr/bin/foo
salt '*' cron.rm_special root /usr/bin/foo
'''
lst = list_tab(user)
ret = 'absent'
rm_ = None
for ind in range(len(lst['special'])):
if lst['special'][ind]['cmd'] == cmd and \
lst['special'][ind]['spec'] == special:
lst['special'].pop(ind)
rm_ = ind
if rm_ is not None:
break
if _cron_matched(lst['special'][ind], cmd, identifier=identifier):
if special is None:
# No special param was specified
rm_ = ind
else:
if lst['special'][ind]['spec'] == special:
rm_ = ind
if rm_ is not None:
lst['special'].pop(rm_)
ret = 'removed'
comdat = _write_cron_lines(user, _render_tab(lst))
if comdat['retcode']:
# Failed to commit
return comdat['stderr']
comdat = _write_cron_lines(user, _render_tab(lst))
if comdat['retcode']:
# Failed to commit, return the error
return comdat['stderr']
return ret
@ -610,6 +695,7 @@ def rm_job(user,
return comdat['stderr']
return ret
rm = salt.utils.alias_function(rm_job, 'rm')

View File

@ -1861,14 +1861,14 @@ def line(path, content=None, match=None, mode=None, location=None,
if changed:
if show_changes:
with salt.utils.fopen(path, 'r') as fp_:
path_content = _splitlines_preserving_trailing_newline(
fp_.read())
changes_diff = ''.join(difflib.unified_diff(
path_content, _splitlines_preserving_trailing_newline(body)))
path_content = fp_.read().splitlines(True)
changes_diff = ''.join(difflib.unified_diff(path_content, body.splitlines(True)))
if __opts__['test'] is False:
fh_ = None
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'w')
# Make sure we match the file mode from salt.utils.fopen
mode = 'wb' if six.PY2 and salt.utils.is_windows() else 'w'
fh_ = salt.utils.atomicfile.atomic_open(path, mode)
fh_.write(body)
finally:
if fh_:
@ -3368,7 +3368,11 @@ def stats(path, hash_type=None, follow_symlinks=True):
pstat = os.lstat(path)
except OSError:
# Not a broken symlink, just a nonexistent path
return ret
# NOTE: The file.directory state checks the content of the error
# message in this exception. Any changes made to the message for this
# exception will reflect the file.directory state as well, and will
# likely require changes there.
raise CommandExecutionError('Path not found: {0}'.format(path))
else:
if follow_symlinks:
pstat = os.stat(path)
@ -3832,8 +3836,15 @@ def get_managed(
parsed_scheme = urlparsed_source.scheme
parsed_path = os.path.join(
urlparsed_source.netloc, urlparsed_source.path).rstrip(os.sep)
unix_local_source = parsed_scheme in ('file', '')
if parsed_scheme and parsed_scheme.lower() in 'abcdefghijklmnopqrstuvwxyz':
if unix_local_source:
sfn = parsed_path
if not os.path.exists(sfn):
msg = 'Local file source {0} does not exist'.format(sfn)
return '', {}, msg
if parsed_scheme and parsed_scheme.lower() in string.ascii_lowercase:
parsed_path = ':'.join([parsed_scheme, parsed_path])
parsed_scheme = 'file'
@ -3841,9 +3852,10 @@ def get_managed(
source_sum = __salt__['cp.hash_file'](source, saltenv)
if not source_sum:
return '', {}, 'Source file {0} not found'.format(source)
elif not source_hash and parsed_scheme == 'file':
elif not source_hash and unix_local_source:
source_sum = _get_local_file_source_sum(parsed_path)
elif not source_hash and source.startswith(os.sep):
# This should happen on Windows
source_sum = _get_local_file_source_sum(source)
else:
if not skip_verify:
@ -4193,12 +4205,6 @@ def check_perms(name, ret, user, group, mode, follow_symlinks=False):
# Check permissions
perms = {}
cur = stats(name, follow_symlinks=follow_symlinks)
if not cur:
# NOTE: The file.directory state checks the content of the error
# message in this exception. Any changes made to the message for this
# exception will reflect the file.directory state as well, and will
# likely require changes there.
raise CommandExecutionError('{0} does not exist'.format(name))
perms['luser'] = cur['user']
perms['lgroup'] = cur['group']
perms['lmode'] = salt.utils.normalize_mode(cur['mode'])
@ -4498,11 +4504,18 @@ def check_file_meta(
'''
changes = {}
if not source_sum:
source_sum = {}
lstats = stats(name, hash_type=source_sum.get('hash_type', None), follow_symlinks=False)
source_sum = dict()
try:
lstats = stats(name, hash_type=source_sum.get('hash_type', None),
follow_symlinks=False)
except CommandExecutionError:
lstats = {}
if not lstats:
changes['newfile'] = name
return changes
if 'hsum' in source_sum:
if source_sum['hsum'] != lstats['sum']:
if not sfn and source:
@ -4741,21 +4754,22 @@ def manage_file(name,
if source_sum and ('hsum' in source_sum):
source_sum['hsum'] = source_sum['hsum'].lower()
if source and not sfn:
# File is not present, cache it
sfn = __salt__['cp.cache_file'](source, saltenv)
if source:
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
htype = source_sum.get('hash_type', __opts__['hash_type'])
# Recalculate source sum now that file has been cached
source_sum = {
'hash_type': htype,
'hsum': get_hash(sfn, form=htype)
}
# File is not present, cache it
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
htype = source_sum.get('hash_type', __opts__['hash_type'])
# Recalculate source sum now that file has been cached
source_sum = {
'hash_type': htype,
'hsum': get_hash(sfn, form=htype)
}
if keep_mode:
if _urlparse(source).scheme in ('salt', 'file') \
or source.startswith('/'):
if _urlparse(source).scheme in ('salt', 'file', ''):
try:
mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True)
except Exception as exc:
@ -4785,7 +4799,7 @@ def manage_file(name,
# source, and we are not skipping checksum verification, then
# verify that it matches the specified checksum.
if not skip_verify \
and _urlparse(source).scheme not in ('salt', ''):
and _urlparse(source).scheme != 'salt':
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
@ -4973,8 +4987,6 @@ def manage_file(name,
makedirs_(name, user=user, group=group, mode=dir_mode)
if source:
# It is a new file, set the diff accordingly
ret['changes']['diff'] = 'New file'
# Apply the new file
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
@ -4998,6 +5010,8 @@ def manage_file(name,
)
ret['result'] = False
return ret
# It is a new file, set the diff accordingly
ret['changes']['diff'] = 'New file'
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)

View File

@ -585,7 +585,8 @@ def _parse_members(settype, members):
def _parse_member(settype, member, strict=False):
subtypes = settype.split(':')[1].split(',')
parts = member.split(' ')
all_parts = member.split(' ', 1)
parts = all_parts[0].split(',')
parsed_member = []
for i in range(len(subtypes)):
@ -610,8 +611,8 @@ def _parse_member(settype, member, strict=False):
parsed_member.append(part)
if len(parts) > len(subtypes):
parsed_member.append(' '.join(parts[len(subtypes):]))
if len(all_parts) > 1:
parsed_member.append(all_parts[1])
return parsed_member

View File

@ -19,11 +19,12 @@ import logging
import time
# Import 3rdp-party libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
from salt.ext.six.moves import range, map # pylint: disable=import-error,redefined-builtin
from salt.ext.six import string_types
# Import salt libs
import salt.utils
import salt.utils.files
import salt.utils.decorators as decorators
from salt.utils.locales import sdecode as _sdecode
from salt.exceptions import CommandExecutionError, SaltInvocationError
@ -520,16 +521,72 @@ def get_auto_login():
return False if ret['retcode'] else ret['stdout']
def enable_auto_login(name):
def _kcpassword(password):
'''
Internal function for obfuscating the password used for AutoLogin
This is later written as the contents of the ``/etc/kcpassword`` file
.. versionadded:: 2017.7.3
Adapted from:
https://github.com/timsutton/osx-vm-templates/blob/master/scripts/support/set_kcpassword.py
Args:
password(str):
The password to obfuscate
Returns:
str: The obfuscated password
'''
# The magic 11 bytes - these are just repeated
# 0x7D 0x89 0x52 0x23 0xD2 0xBC 0xDD 0xEA 0xA3 0xB9 0x1F
key = [125, 137, 82, 35, 210, 188, 221, 234, 163, 185, 31]
key_len = len(key)
# Convert each character to a byte
password = list(map(ord, password))
# pad password length out to an even multiple of key length
remainder = len(password) % key_len
if remainder > 0:
password = password + [0] * (key_len - remainder)
# Break the password into chunks the size of len(key) (11)
for chunk_index in range(0, len(password), len(key)):
# Reset the key_index to 0 for each iteration
key_index = 0
# Do an XOR on each character of that chunk of the password with the
# corresponding item in the key
# The length of the password, or the length of the key, whichever is
# smaller
for password_index in range(chunk_index,
min(chunk_index + len(key), len(password))):
password[password_index] = password[password_index] ^ key[key_index]
key_index += 1
# Convert each byte back to a character
password = list(map(chr, password))
return ''.join(password)
def enable_auto_login(name, password):
'''
.. versionadded:: 2016.3.0
Configures the machine to auto login with the specified user
:param str name: The user account use for auto login
Args:
:return: True if successful, False if not
:rtype: bool
name (str): The user account use for auto login
password (str): The password to user for auto login
.. versionadded:: 2017.7.3
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
@ -537,6 +594,7 @@ def enable_auto_login(name):
salt '*' user.enable_auto_login stevej
'''
# Make the entry into the defaults file
cmd = ['defaults',
'write',
'/Library/Preferences/com.apple.loginwindow.plist',
@ -544,6 +602,13 @@ def enable_auto_login(name):
name]
__salt__['cmd.run'](cmd)
current = get_auto_login()
# Create/Update the kcpassword file with an obfuscated password
o_password = _kcpassword(password=password)
with salt.utils.files.set_umask(0o077):
with salt.utils.fopen('/etc/kcpassword', 'w') as fd:
fd.write(o_password)
return current if isinstance(current, bool) else current.lower() == name.lower()
@ -553,8 +618,8 @@ def disable_auto_login():
Disables auto login on the machine
:return: True if successful, False if not
:rtype: bool
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
@ -562,6 +627,11 @@ def disable_auto_login():
salt '*' user.disable_auto_login
'''
# Remove the kcpassword file
cmd = 'rm -f /etc/kcpassword'
__salt__['cmd.run'](cmd)
# Remove the entry from the defaults file
cmd = ['defaults',
'delete',
'/Library/Preferences/com.apple.loginwindow.plist',

View File

@ -24,7 +24,7 @@ Values or Entries
Values/Entries are name/data pairs. There can be many values in a key. The
(Default) value corresponds to the Key, the rest are their own value pairs.
:depends: - winreg Python module
:depends: - PyWin32
'''
# When production windows installer is using Python 3, Python 2 code can be removed
@ -35,14 +35,13 @@ from __future__ import unicode_literals
import sys
import logging
from salt.ext.six.moves import range # pylint: disable=W0622,import-error
from salt.ext import six
# Import third party libs
try:
from salt.ext.six.moves import winreg as _winreg # pylint: disable=import-error,no-name-in-module
from win32con import HWND_BROADCAST, WM_SETTINGCHANGE
from win32api import RegCreateKeyEx, RegSetValueEx, RegFlushKey, \
RegCloseKey, error as win32apiError, SendMessage
import win32gui
import win32api
import win32con
import pywintypes
HAS_WINDOWS_MODULES = True
except ImportError:
HAS_WINDOWS_MODULES = False
@ -60,7 +59,7 @@ __virtualname__ = 'reg'
def __virtual__():
'''
Only works on Windows systems with the _winreg python module
Only works on Windows systems with the PyWin32
'''
if not salt.utils.is_windows():
return (False, 'reg execution module failed to load: '
@ -69,106 +68,76 @@ def __virtual__():
if not HAS_WINDOWS_MODULES:
return (False, 'reg execution module failed to load: '
'One of the following libraries did not load: '
+ '_winreg, win32gui, win32con, win32api')
+ 'win32gui, win32con, win32api')
return __virtualname__
# winreg in python 2 is hard coded to use codex 'mbcs', which uses
# encoding that the user has assign. The function _unicode_to_mbcs
# and _unicode_to_mbcs help with this.
def _to_mbcs(vdata):
'''
Converts unicode to to current users character encoding. Use this for values
returned by reg functions
'''
return salt.utils.to_unicode(vdata, 'mbcs')
def _unicode_to_mbcs(instr):
def _to_unicode(vdata):
'''
Converts unicode to to current users character encoding.
Converts from current users character encoding to unicode. Use this for
parameters being pass to reg functions
'''
if isinstance(instr, six.text_type):
# unicode to windows utf8
return instr.encode('mbcs')
else:
# Assume its byte str or not a str/unicode
return instr
def _mbcs_to_unicode(instr):
'''
Converts from current users character encoding to unicode.
When instr has a value of None, the return value of the function
will also be None.
'''
if instr is None or isinstance(instr, six.text_type):
return instr
else:
return six.text_type(instr, 'mbcs')
def _mbcs_to_unicode_wrap(obj, vtype):
'''
Wraps _mbcs_to_unicode for use with registry vdata
'''
if vtype == 'REG_BINARY':
# We should be able to leave it alone if the user has passed binary data in yaml with
# binary !!
# In python < 3 this should have type str and in python 3+ this should be a byte array
return obj
if isinstance(obj, list):
return [_mbcs_to_unicode(x) for x in obj]
elif isinstance(obj, six.integer_types):
return obj
else:
return _mbcs_to_unicode(obj)
return salt.utils.to_unicode(vdata, 'utf-8')
class Registry(object): # pylint: disable=R0903
'''
Delay '_winreg' usage until this module is used
Delay usage until this module is used
'''
def __init__(self):
self.hkeys = {
'HKEY_CURRENT_USER': _winreg.HKEY_CURRENT_USER,
'HKEY_LOCAL_MACHINE': _winreg.HKEY_LOCAL_MACHINE,
'HKEY_USERS': _winreg.HKEY_USERS,
'HKCU': _winreg.HKEY_CURRENT_USER,
'HKLM': _winreg.HKEY_LOCAL_MACHINE,
'HKU': _winreg.HKEY_USERS,
'HKEY_CURRENT_USER': win32con.HKEY_CURRENT_USER,
'HKEY_LOCAL_MACHINE': win32con.HKEY_LOCAL_MACHINE,
'HKEY_USERS': win32con.HKEY_USERS,
'HKCU': win32con.HKEY_CURRENT_USER,
'HKLM': win32con.HKEY_LOCAL_MACHINE,
'HKU': win32con.HKEY_USERS,
}
self.vtype = {
'REG_BINARY': _winreg.REG_BINARY,
'REG_DWORD': _winreg.REG_DWORD,
'REG_EXPAND_SZ': _winreg.REG_EXPAND_SZ,
'REG_MULTI_SZ': _winreg.REG_MULTI_SZ,
'REG_SZ': _winreg.REG_SZ
'REG_BINARY': win32con.REG_BINARY,
'REG_DWORD': win32con.REG_DWORD,
'REG_EXPAND_SZ': win32con.REG_EXPAND_SZ,
'REG_MULTI_SZ': win32con.REG_MULTI_SZ,
'REG_SZ': win32con.REG_SZ,
'REG_QWORD': win32con.REG_QWORD
}
self.opttype = {
'REG_OPTION_NON_VOLATILE': _winreg.REG_OPTION_NON_VOLATILE,
'REG_OPTION_VOLATILE': _winreg.REG_OPTION_VOLATILE
'REG_OPTION_NON_VOLATILE': 0,
'REG_OPTION_VOLATILE': 1
}
# Return Unicode due to from __future__ import unicode_literals
self.vtype_reverse = {
_winreg.REG_BINARY: 'REG_BINARY',
_winreg.REG_DWORD: 'REG_DWORD',
_winreg.REG_EXPAND_SZ: 'REG_EXPAND_SZ',
_winreg.REG_MULTI_SZ: 'REG_MULTI_SZ',
_winreg.REG_SZ: 'REG_SZ',
# REG_QWORD isn't in the winreg library
11: 'REG_QWORD'
win32con.REG_BINARY: 'REG_BINARY',
win32con.REG_DWORD: 'REG_DWORD',
win32con.REG_EXPAND_SZ: 'REG_EXPAND_SZ',
win32con.REG_MULTI_SZ: 'REG_MULTI_SZ',
win32con.REG_SZ: 'REG_SZ',
win32con.REG_QWORD: 'REG_QWORD'
}
self.opttype_reverse = {
_winreg.REG_OPTION_NON_VOLATILE: 'REG_OPTION_NON_VOLATILE',
_winreg.REG_OPTION_VOLATILE: 'REG_OPTION_VOLATILE'
0: 'REG_OPTION_NON_VOLATILE',
1: 'REG_OPTION_VOLATILE'
}
# delete_key_recursive uses this to check the subkey contains enough \
# as we do not want to remove all or most of the registry
self.subkey_slash_check = {
_winreg.HKEY_CURRENT_USER: 0,
_winreg.HKEY_LOCAL_MACHINE: 1,
_winreg.HKEY_USERS: 1
win32con.HKEY_CURRENT_USER: 0,
win32con.HKEY_LOCAL_MACHINE: 1,
win32con.HKEY_USERS: 1
}
self.registry_32 = {
True: _winreg.KEY_READ | _winreg.KEY_WOW64_32KEY,
False: _winreg.KEY_READ,
True: win32con.KEY_READ | win32con.KEY_WOW64_32KEY,
False: win32con.KEY_READ,
}
def __getattr__(self, k):
@ -191,21 +160,16 @@ def _key_exists(hive, key, use_32bit_registry=False):
:return: Returns True if found, False if not found
:rtype: bool
'''
if PY2:
local_hive = _mbcs_to_unicode(hive)
local_key = _unicode_to_mbcs(key)
else:
local_hive = hive
local_key = key
local_hive = _to_unicode(hive)
local_key = _to_unicode(key)
registry = Registry()
hkey = registry.hkeys[local_hive]
access_mask = registry.registry_32[use_32bit_registry]
try:
handle = _winreg.OpenKey(hkey, local_key, 0, access_mask)
_winreg.CloseKey(handle)
handle = win32api.RegOpenKeyEx(hkey, local_key, 0, access_mask)
win32api.RegCloseKey(handle)
return True
except WindowsError: # pylint: disable=E0602
return False
@ -224,7 +188,10 @@ def broadcast_change():
salt '*' reg.broadcast_change
'''
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms644952(v=vs.85).aspx
return bool(SendMessage(HWND_BROADCAST, WM_SETTINGCHANGE, 0, 0))
_, res = win32gui.SendMessageTimeout(
win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE, 0, 0,
win32con.SMTO_ABORTIFHUNG, 5000)
return not bool(res)
def list_keys(hive, key=None, use_32bit_registry=False):
@ -253,12 +220,8 @@ def list_keys(hive, key=None, use_32bit_registry=False):
salt '*' reg.list_keys HKLM 'SOFTWARE'
'''
if PY2:
local_hive = _mbcs_to_unicode(hive)
local_key = _unicode_to_mbcs(key)
else:
local_hive = hive
local_key = key
local_hive = _to_unicode(hive)
local_key = _to_unicode(key)
registry = Registry()
hkey = registry.hkeys[local_hive]
@ -266,12 +229,12 @@ def list_keys(hive, key=None, use_32bit_registry=False):
subkeys = []
try:
handle = _winreg.OpenKey(hkey, local_key, 0, access_mask)
handle = win32api.RegOpenKeyEx(hkey, local_key, 0, access_mask)
for i in range(_winreg.QueryInfoKey(handle)[0]):
subkey = _winreg.EnumKey(handle, i)
for i in range(win32api.RegQueryInfoKey(handle)[0]):
subkey = win32api.RegEnumKey(handle, i)
if PY2:
subkeys.append(_mbcs_to_unicode(subkey))
subkeys.append(_to_unicode(subkey))
else:
subkeys.append(subkey)
@ -312,13 +275,8 @@ def list_values(hive, key=None, use_32bit_registry=False, include_default=True):
salt '*' reg.list_values HKLM 'SYSTEM\\CurrentControlSet\\Services\\Tcpip'
'''
if PY2:
local_hive = _mbcs_to_unicode(hive)
local_key = _unicode_to_mbcs(key)
else:
local_hive = hive
local_key = key
local_hive = _to_unicode(hive)
local_key = _to_unicode(key)
registry = Registry()
hkey = registry.hkeys[local_hive]
@ -327,37 +285,21 @@ def list_values(hive, key=None, use_32bit_registry=False, include_default=True):
values = list()
try:
handle = _winreg.OpenKey(hkey, local_key, 0, access_mask)
handle = win32api.RegOpenKeyEx(hkey, local_key, 0, access_mask)
for i in range(_winreg.QueryInfoKey(handle)[1]):
vname, vdata, vtype = _winreg.EnumValue(handle, i)
for i in range(win32api.RegQueryInfoKey(handle)[1]):
vname, vdata, vtype = win32api.RegEnumValue(handle, i)
if not vname:
vname = "(Default)"
value = {'hive': local_hive,
'key': local_key,
'vname': vname,
'vdata': vdata,
'vname': _to_mbcs(vname),
'vdata': _to_mbcs(vdata),
'vtype': registry.vtype_reverse[vtype],
'success': True}
values.append(value)
if include_default:
# Get the default value for the key
value = {'hive': local_hive,
'key': local_key,
'vname': '(Default)',
'vdata': None,
'success': True}
try:
# QueryValueEx returns unicode data
vdata, vtype = _winreg.QueryValueEx(handle, '(Default)')
if vdata or vdata in [0, '']:
value['vtype'] = registry.vtype_reverse[vtype]
value['vdata'] = vdata
else:
value['comment'] = 'Empty Value'
except WindowsError: # pylint: disable=E0602
value['vdata'] = ('(value not set)')
value['vtype'] = 'REG_SZ'
values.append(value)
except WindowsError as exc: # pylint: disable=E0602
log.debug(exc)
log.debug(r'Cannot find key: {0}\{1}'.format(hive, key))
@ -403,30 +345,19 @@ def read_value(hive, key, vname=None, use_32bit_registry=False):
salt '*' reg.read_value HKEY_LOCAL_MACHINE 'SOFTWARE\Salt' 'version'
'''
# If no name is passed, the default value of the key will be returned
# The value name is Default
# Setup the return array
if PY2:
ret = {'hive': _mbcs_to_unicode(hive),
'key': _mbcs_to_unicode(key),
'vname': _mbcs_to_unicode(vname),
'vdata': None,
'success': True}
local_hive = _mbcs_to_unicode(hive)
local_key = _unicode_to_mbcs(key)
local_vname = _unicode_to_mbcs(vname)
local_hive = _to_unicode(hive)
local_key = _to_unicode(key)
local_vname = _to_unicode(vname)
else:
ret = {'hive': hive,
'key': key,
'vname': vname,
'vdata': None,
'success': True}
local_hive = hive
local_key = key
local_vname = vname
ret = {'hive': local_hive,
'key': local_key,
'vname': local_vname,
'vdata': None,
'success': True}
if not vname:
ret['vname'] = '(Default)'
@ -436,19 +367,22 @@ def read_value(hive, key, vname=None, use_32bit_registry=False):
access_mask = registry.registry_32[use_32bit_registry]
try:
handle = _winreg.OpenKey(hkey, local_key, 0, access_mask)
handle = win32api.RegOpenKeyEx(hkey, local_key, 0, access_mask)
try:
# QueryValueEx returns unicode data
vdata, vtype = _winreg.QueryValueEx(handle, local_vname)
# RegQueryValueEx returns and accepts unicode data
vdata, vtype = win32api.RegQueryValueEx(handle, local_vname)
if vdata or vdata in [0, '']:
ret['vtype'] = registry.vtype_reverse[vtype]
ret['vdata'] = vdata
if vtype == 7:
ret['vdata'] = [_to_mbcs(i) for i in vdata]
else:
ret['vdata'] = _to_mbcs(vdata)
else:
ret['comment'] = 'Empty Value'
except WindowsError: # pylint: disable=E0602
ret['vdata'] = ('(value not set)')
ret['vtype'] = 'REG_SZ'
except WindowsError as exc: # pylint: disable=E0602
except pywintypes.error as exc: # pylint: disable=E0602
log.debug(exc)
log.debug('Cannot find key: {0}\\{1}'.format(local_hive, local_key))
ret['comment'] = 'Cannot find key: {0}\\{1}'.format(local_hive, local_key)
@ -555,42 +489,47 @@ def set_value(hive,
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'version' '2015.5.2' \\
vtype=REG_LIST vdata='[a,b,c]'
'''
if PY2:
try:
local_hive = _mbcs_to_unicode(hive)
local_key = _mbcs_to_unicode(key)
local_vname = _mbcs_to_unicode(vname)
local_vtype = _mbcs_to_unicode(vtype)
local_vdata = _mbcs_to_unicode_wrap(vdata, local_vtype)
except TypeError as exc: # pylint: disable=E0602
log.error(exc, exc_info=True)
return False
else:
local_hive = hive
local_key = key
local_vname = vname
local_vdata = vdata
local_vtype = vtype
local_hive = _to_unicode(hive)
local_key = _to_unicode(key)
local_vname = _to_unicode(vname)
local_vtype = _to_unicode(vtype)
registry = Registry()
hkey = registry.hkeys[local_hive]
vtype_value = registry.vtype[local_vtype]
access_mask = registry.registry_32[use_32bit_registry] | _winreg.KEY_ALL_ACCESS
access_mask = registry.registry_32[use_32bit_registry] | win32con.KEY_ALL_ACCESS
# Check data type and cast to expected type
# int will automatically become long on 64bit numbers
# https://www.python.org/dev/peps/pep-0237/
# String Types to Unicode
if vtype_value in [1, 2]:
local_vdata = _to_unicode(vdata)
# Don't touch binary...
elif vtype_value == 3:
local_vdata = vdata
# Make sure REG_MULTI_SZ is a list of strings
elif vtype_value == 7:
local_vdata = [_to_unicode(i) for i in vdata]
# Everything else is int
else:
local_vdata = int(vdata)
if volatile:
create_options = registry.opttype['REG_OPTION_VOLATILE']
else:
create_options = registry.opttype['REG_OPTION_NON_VOLATILE']
try:
handle, _ = RegCreateKeyEx(hkey, local_key, access_mask,
handle, _ = win32api.RegCreateKeyEx(hkey, local_key, access_mask,
Options=create_options)
RegSetValueEx(handle, local_vname, 0, vtype_value, local_vdata)
RegFlushKey(handle)
RegCloseKey(handle)
win32api.RegSetValueEx(handle, local_vname, 0, vtype_value, local_vdata)
win32api.RegFlushKey(handle)
win32api.RegCloseKey(handle)
broadcast_change()
return True
except (win32apiError, SystemError, ValueError, TypeError) as exc: # pylint: disable=E0602
except (win32api.error, SystemError, ValueError, TypeError) as exc: # pylint: disable=E0602
log.error(exc, exc_info=True)
return False
@ -626,18 +565,14 @@ def delete_key_recursive(hive, key, use_32bit_registry=False):
salt '*' reg.delete_key_recursive HKLM SOFTWARE\\salt
'''
if PY2:
local_hive = _mbcs_to_unicode(hive)
local_key = _unicode_to_mbcs(key)
else:
local_hive = hive
local_key = key
local_hive = _to_unicode(hive)
local_key = _to_unicode(key)
# Instantiate the registry object
registry = Registry()
hkey = registry.hkeys[local_hive]
key_path = local_key
access_mask = registry.registry_32[use_32bit_registry] | _winreg.KEY_ALL_ACCESS
access_mask = registry.registry_32[use_32bit_registry] | win32con.KEY_ALL_ACCESS
if not _key_exists(local_hive, local_key, use_32bit_registry):
return False
@ -654,17 +589,17 @@ def delete_key_recursive(hive, key, use_32bit_registry=False):
i = 0
while True:
try:
subkey = _winreg.EnumKey(_key, i)
subkey = win32api.RegEnumKey(_key, i)
yield subkey
i += 1
except WindowsError: # pylint: disable=E0602
except pywintypes.error: # pylint: disable=E0602
break
def _traverse_registry_tree(_hkey, _keypath, _ret, _access_mask):
'''
Traverse the registry tree i.e. dive into the tree
'''
_key = _winreg.OpenKey(_hkey, _keypath, 0, _access_mask)
_key = win32api.RegOpenKeyEx(_hkey, _keypath, 0, _access_mask)
for subkeyname in _subkeys(_key):
subkeypath = r'{0}\{1}'.format(_keypath, subkeyname)
_ret = _traverse_registry_tree(_hkey, subkeypath, _ret, access_mask)
@ -683,8 +618,8 @@ def delete_key_recursive(hive, key, use_32bit_registry=False):
# Delete all sub_keys
for sub_key_path in key_list:
try:
key_handle = _winreg.OpenKey(hkey, sub_key_path, 0, access_mask)
_winreg.DeleteKey(key_handle, '')
key_handle = win32api.RegOpenKeyEx(hkey, sub_key_path, 0, access_mask)
win32api.RegDeleteKey(key_handle, '')
ret['Deleted'].append(r'{0}\{1}'.format(hive, sub_key_path))
except WindowsError as exc: # pylint: disable=E0602
log.error(exc, exc_info=True)
@ -723,23 +658,18 @@ def delete_value(hive, key, vname=None, use_32bit_registry=False):
salt '*' reg.delete_value HKEY_CURRENT_USER 'SOFTWARE\\Salt' 'version'
'''
if PY2:
local_hive = _mbcs_to_unicode(hive)
local_key = _unicode_to_mbcs(key)
local_vname = _unicode_to_mbcs(vname)
else:
local_hive = hive
local_key = key
local_vname = vname
local_hive = _to_unicode(hive)
local_key = _to_unicode(key)
local_vname = _to_unicode(vname)
registry = Registry()
hkey = registry.hkeys[local_hive]
access_mask = registry.registry_32[use_32bit_registry] | _winreg.KEY_ALL_ACCESS
access_mask = registry.registry_32[use_32bit_registry] | win32con.KEY_ALL_ACCESS
try:
handle = _winreg.OpenKey(hkey, local_key, 0, access_mask)
_winreg.DeleteValue(handle, local_vname)
_winreg.CloseKey(handle)
handle = win32api.RegOpenKeyEx(hkey, local_key, 0, access_mask)
win32api.RegDeleteValue(handle, local_vname)
win32api.RegCloseKey(handle)
broadcast_change()
return True
except WindowsError as exc: # pylint: disable=E0602

View File

@ -1084,8 +1084,8 @@ def build_routes(iface, **settings):
log.debug("IPv4 routes:\n{0}".format(opts4))
log.debug("IPv6 routes:\n{0}".format(opts6))
routecfg = template.render(routes=opts4)
routecfg6 = template.render(routes=opts6)
routecfg = template.render(routes=opts4, iface=iface)
routecfg6 = template.render(routes=opts6, iface=iface)
if settings['test']:
routes = _read_temp(routecfg)

View File

@ -99,17 +99,16 @@ def _set_retcode(ret, highstate=None):
__context__['retcode'] = 2
def _check_pillar(kwargs, pillar=None):
def _get_pillar_errors(kwargs, pillar=None):
'''
Check the pillar for errors, refuse to run the state if there are errors
in the pillar and return the pillar errors
Checks all pillars (external and internal) for errors.
Return an error message, if anywhere or None.
:param kwargs: dictionary of options
:param pillar: external pillar
:return: None or an error message
'''
if kwargs.get('force'):
return True
pillar_dict = pillar if pillar is not None else __pillar__
if '_errors' in pillar_dict:
return False
return True
return None if kwargs.get('force') else (pillar or {}).get('_errors', __pillar__.get('_errors')) or None
def _wait(jid):
@ -411,10 +410,10 @@ def template(tem, queue=False, **kwargs):
context=__context__,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
raise CommandExecutionError('Pillar failed to render',
info=st_.opts['pillar']['_errors'])
raise CommandExecutionError('Pillar failed to render', info=errors)
if not tem.endswith('.sls'):
tem = '{sls}.sls'.format(sls=tem)
@ -493,6 +492,18 @@ def apply_(mods=None,
Values passed this way will override Pillar values set via
``pillar_roots`` or an external Pillar source.
exclude
Exclude specific states from execution. Accepts a list of sls names, a
comma-separated string of sls names, or a list of dictionaries
containing ``sls`` or ``id`` keys. Glob-patterns may be used to match
multiple states.
.. code-block:: bash
salt '*' state.apply exclude=bar,baz
salt '*' state.apply exclude=foo*
salt '*' state.apply exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
queue : False
Instead of failing immediately when another state run is in progress,
queue the new state run to begin running once the other has finished.
@ -758,6 +769,18 @@ def highstate(test=None, queue=False, **kwargs):
.. versionadded:: 2016.3.0
exclude
Exclude specific states from execution. Accepts a list of sls names, a
comma-separated string of sls names, or a list of dictionaries
containing ``sls`` or ``id`` keys. Glob-patterns may be used to match
multiple states.
.. code-block:: bash
salt '*' state.higstate exclude=bar,baz
salt '*' state.higstate exclude=foo*
salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
saltenv
Specify a salt fileserver environment to be used when applying states
@ -872,11 +895,10 @@ def highstate(test=None, queue=False, **kwargs):
mocked=kwargs.get('mock', False),
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
err = ['Pillar failed to render with the following messages:']
err += __pillar__['_errors']
return err
return ['Pillar failed to render with the following messages:'] + errors
st_.push_active()
ret = {}
@ -935,6 +957,18 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
.. versionadded:: 2016.3.0
exclude
Exclude specific states from execution. Accepts a list of sls names, a
comma-separated string of sls names, or a list of dictionaries
containing ``sls`` or ``id`` keys. Glob-patterns may be used to match
multiple states.
.. code-block:: bash
salt '*' state.sls foo,bar,baz exclude=bar,baz
salt '*' state.sls foo,bar,baz exclude=ba*
salt '*' state.sls foo,bar,baz exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
queue : False
Instead of failing immediately when another state run is in progress,
queue the new state run to begin running once the other has finished.
@ -1071,11 +1105,10 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
mocked=kwargs.get('mock', False),
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
err = ['Pillar failed to render with the following messages:']
err += __pillar__['_errors']
return err
return ['Pillar failed to render with the following messages:'] + errors
orchestration_jid = kwargs.get('orchestration_jid')
umask = os.umask(0o77)
@ -1090,7 +1123,6 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
mods = mods.split(',')
st_.push_active()
ret = {}
try:
high_, errors = st_.render_highstate({opts['environment']: mods})
@ -1197,11 +1229,10 @@ def top(topfn, test=None, queue=False, **kwargs):
pillar_enc=pillar_enc,
context=__context__,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
err = ['Pillar failed to render with the following messages:']
err += __pillar__['_errors']
return err
return ['Pillar failed to render with the following messages:'] + errors
st_.push_active()
st_.opts['state_top'] = salt.utils.url.create(topfn)
@ -1259,10 +1290,10 @@ def show_highstate(queue=False, **kwargs):
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
raise CommandExecutionError('Pillar failed to render',
info=st_.opts['pillar']['_errors'])
raise CommandExecutionError('Pillar failed to render', info=errors)
st_.push_active()
try:
@ -1293,10 +1324,10 @@ def show_lowstate(queue=False, **kwargs):
st_ = salt.state.HighState(opts,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
raise CommandExecutionError('Pillar failed to render',
info=st_.opts['pillar']['_errors'])
raise CommandExecutionError('Pillar failed to render', info=errors)
st_.push_active()
try:
@ -1394,11 +1425,10 @@ def sls_id(id_, mods, test=None, queue=False, **kwargs):
st_ = salt.state.HighState(opts,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
err = ['Pillar failed to render with the following messages:']
err += __pillar__['_errors']
return err
return ['Pillar failed to render with the following messages:'] + errors
if isinstance(mods, six.string_types):
split_mods = mods.split(',')
@ -1480,10 +1510,10 @@ def show_low_sls(mods, test=None, queue=False, **kwargs):
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
raise CommandExecutionError('Pillar failed to render',
info=st_.opts['pillar']['_errors'])
raise CommandExecutionError('Pillar failed to render', info=errors)
if isinstance(mods, six.string_types):
mods = mods.split(',')
@ -1567,10 +1597,10 @@ def show_sls(mods, test=None, queue=False, **kwargs):
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
raise CommandExecutionError('Pillar failed to render',
info=st_.opts['pillar']['_errors'])
raise CommandExecutionError('Pillar failed to render', info=errors)
if isinstance(mods, six.string_types):
mods = mods.split(',')
@ -1616,10 +1646,10 @@ def show_top(queue=False, **kwargs):
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = 5
raise CommandExecutionError('Pillar failed to render',
info=st_.opts['pillar']['_errors'])
raise CommandExecutionError('Pillar failed to render', info=errors)
errors = []
top_ = st_.get_top()

View File

@ -58,7 +58,7 @@ from salt.modules.file import (check_hash, # pylint: disable=W0611
lstat, path_exists_glob, write, pardir, join, HASHES, HASHES_REVMAP,
comment, uncomment, _add_flags, comment_line, _regex_to_static,
_get_line_indent, apply_template_on_contents, dirname, basename,
list_backups_dir)
list_backups_dir, _assert_occurrence, _starts_till)
from salt.modules.file import normpath as normpath_
from salt.utils import namespaced_function as _namespaced_function
@ -116,7 +116,7 @@ def __virtual__():
global write, pardir, join, _add_flags, apply_template_on_contents
global path_exists_glob, comment, uncomment, _mkstemp_copy
global _regex_to_static, _get_line_indent, dirname, basename
global list_backups_dir, normpath_
global list_backups_dir, normpath_, _assert_occurrence, _starts_till
replace = _namespaced_function(replace, globals())
search = _namespaced_function(search, globals())
@ -179,6 +179,8 @@ def __virtual__():
basename = _namespaced_function(basename, globals())
list_backups_dir = _namespaced_function(list_backups_dir, globals())
normpath_ = _namespaced_function(normpath_, globals())
_assert_occurrence = _namespaced_function(_assert_occurrence, globals())
_starts_till = _namespaced_function(_starts_till, globals())
else:
return False, 'Module win_file: Missing Win32 modules'
@ -789,7 +791,7 @@ def chgrp(path, group):
def stats(path, hash_type='sha256', follow_symlinks=True):
'''
Return a dict containing the stats for a given file
Return a dict containing the stats about a given file
Under Windows, `gid` will equal `uid` and `group` will equal `user`.
@ -818,6 +820,8 @@ def stats(path, hash_type='sha256', follow_symlinks=True):
salt '*' file.stats /etc/passwd
'''
# This is to mirror the behavior of file.py. `check_file_meta` expects an
# empty dictionary when the file does not exist
if not os.path.exists(path):
raise CommandExecutionError('Path not found: {0}'.format(path))
@ -1225,33 +1229,37 @@ def mkdir(path,
path (str): The full path to the directory.
owner (str): The owner of the directory. If not passed, it will be the
account that created the directory, likely SYSTEM
owner (str):
The owner of the directory. If not passed, it will be the account
that created the directory, likely SYSTEM
grant_perms (dict): A dictionary containing the user/group and the basic
permissions to grant, ie: ``{'user': {'perms': 'basic_permission'}}``.
You can also set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to`` setting
like this:
grant_perms (dict):
A dictionary containing the user/group and the basic permissions to
grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also
set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to``
setting like this:
.. code-block:: yaml
.. code-block:: yaml
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
To set advanced permissions use a list for the ``perms`` parameter, ie:
To set advanced permissions use a list for the ``perms`` parameter, ie:
.. code-block:: yaml
.. code-block:: yaml
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
deny_perms (dict): A dictionary containing the user/group and
permissions to deny along with the ``applies_to`` setting. Use the same
format used for the ``grant_perms`` parameter. Remember, deny
permissions supersede grant permissions.
deny_perms (dict):
A dictionary containing the user/group and permissions to deny along
with the ``applies_to`` setting. Use the same format used for the
``grant_perms`` parameter. Remember, deny permissions supersede
grant permissions.
inheritance (bool): If True the object will inherit permissions from the
parent, if False, inheritance will be disabled. Inheritance setting will
not apply to parent directories if they must be created
inheritance (bool):
If True the object will inherit permissions from the parent, if
False, inheritance will be disabled. Inheritance setting will not
apply to parent directories if they must be created
Returns:
bool: True if successful
@ -1310,33 +1318,37 @@ def makedirs_(path,
path (str): The full path to the directory.
owner (str): The owner of the directory. If not passed, it will be the
account that created the directly, likely SYSTEM
owner (str):
The owner of the directory. If not passed, it will be the account
that created the directly, likely SYSTEM
grant_perms (dict): A dictionary containing the user/group and the basic
permissions to grant, ie: ``{'user': {'perms': 'basic_permission'}}``.
You can also set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to`` setting
like this:
grant_perms (dict):
A dictionary containing the user/group and the basic permissions to
grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also
set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to``
setting like this:
.. code-block:: yaml
.. code-block:: yaml
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
To set advanced permissions use a list for the ``perms`` parameter, ie:
To set advanced permissions use a list for the ``perms`` parameter, ie:
.. code-block:: yaml
.. code-block:: yaml
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
deny_perms (dict): A dictionary containing the user/group and
permissions to deny along with the ``applies_to`` setting. Use the same
format used for the ``grant_perms`` parameter. Remember, deny
permissions supersede grant permissions.
deny_perms (dict):
A dictionary containing the user/group and permissions to deny along
with the ``applies_to`` setting. Use the same format used for the
``grant_perms`` parameter. Remember, deny permissions supersede
grant permissions.
inheritance (bool): If True the object will inherit permissions from the
parent, if False, inheritance will be disabled. Inheritance setting will
not apply to parent directories if they must be created
inheritance (bool):
If True the object will inherit permissions from the parent, if
False, inheritance will be disabled. Inheritance setting will not
apply to parent directories if they must be created
.. note::
@ -1421,36 +1433,40 @@ def makedirs_perms(path,
path (str): The full path to the directory.
owner (str): The owner of the directory. If not passed, it will be the
account that created the directory, likely SYSTEM
owner (str):
The owner of the directory. If not passed, it will be the account
that created the directory, likely SYSTEM
grant_perms (dict): A dictionary containing the user/group and the basic
permissions to grant, ie: ``{'user': {'perms': 'basic_permission'}}``.
You can also set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to`` setting
like this:
grant_perms (dict):
A dictionary containing the user/group and the basic permissions to
grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also
set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to``
setting like this:
.. code-block:: yaml
.. code-block:: yaml
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
To set advanced permissions use a list for the ``perms`` parameter, ie:
To set advanced permissions use a list for the ``perms`` parameter, ie:
.. code-block:: yaml
.. code-block:: yaml
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
deny_perms (dict): A dictionary containing the user/group and
permissions to deny along with the ``applies_to`` setting. Use the same
format used for the ``grant_perms`` parameter. Remember, deny
permissions supersede grant permissions.
deny_perms (dict):
A dictionary containing the user/group and permissions to deny along
with the ``applies_to`` setting. Use the same format used for the
``grant_perms`` parameter. Remember, deny permissions supersede
grant permissions.
inheritance (bool): If True the object will inherit permissions from the
parent, if False, inheritance will be disabled. Inheritance setting will
not apply to parent directories if they must be created
inheritance (bool):
If True the object will inherit permissions from the parent, if
False, inheritance will be disabled. Inheritance setting will not
apply to parent directories if they must be created
Returns:
bool: True if successful, otherwise raise an error
bool: True if successful, otherwise raises an error
CLI Example:
@ -1503,45 +1519,54 @@ def check_perms(path,
deny_perms=None,
inheritance=True):
'''
Set owner and permissions for each directory created.
Set owner and permissions for each directory created. Used mostly by the
state system.
Args:
path (str): The full path to the directory.
ret (dict): A dictionary to append changes to and return. If not passed,
will create a new dictionary to return.
ret (dict):
A dictionary to append changes to and return. If not passed, will
create a new dictionary to return.
owner (str): The owner of the directory. If not passed, it will be the
account that created the directory, likely SYSTEM
owner (str):
The owner of the directory. If not passed, it will be the account
that created the directory, likely SYSTEM
grant_perms (dict): A dictionary containing the user/group and the basic
permissions to grant, ie: ``{'user': {'perms': 'basic_permission'}}``.
You can also set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to`` setting
like this:
grant_perms (dict):
A dictionary containing the user/group and the basic permissions to
grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also
set the ``applies_to`` setting here. The default is
``this_folder_subfolders_files``. Specify another ``applies_to``
setting like this:
.. code-block:: yaml
.. code-block:: yaml
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
{'user': {'perms': 'full_control', 'applies_to': 'this_folder'}}
To set advanced permissions use a list for the ``perms`` parameter, ie:
To set advanced permissions use a list for the ``perms`` parameter, ie:
.. code-block:: yaml
.. code-block:: yaml
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
{'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}}
deny_perms (dict): A dictionary containing the user/group and
permissions to deny along with the ``applies_to`` setting. Use the same
format used for the ``grant_perms`` parameter. Remember, deny
permissions supersede grant permissions.
deny_perms (dict):
A dictionary containing the user/group and permissions to deny along
with the ``applies_to`` setting. Use the same format used for the
``grant_perms`` parameter. Remember, deny permissions supersede
grant permissions.
inheritance (bool): If True the object will inherit permissions from the
parent, if False, inheritance will be disabled. Inheritance setting will
not apply to parent directories if they must be created
inheritance (bool):
If True the object will inherit permissions from the parent, if
False, inheritance will be disabled. Inheritance setting will not
apply to parent directories if they must be created
Returns:
bool: True if successful, otherwise raise an error
dict: A dictionary of changes made to the object
Raises:
CommandExecutionError: If the object does not exist
CLI Example:
@ -1556,6 +1581,9 @@ def check_perms(path,
# Specify advanced attributes with a list
salt '*' file.check_perms C:\\Temp\\ Administrators "{'jsnuffy': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'files_only'}}"
'''
if not os.path.exists(path):
raise CommandExecutionError('Path not found: {0}'.format(path))
path = os.path.expanduser(path)
if not ret:

View File

@ -619,8 +619,8 @@ class _policy_info(object):
},
},
'RemoteRegistryExactPaths': {
'Policy': 'Network access: Remotely accessible registry '
'paths',
'Policy': 'Network access: Remotely accessible '
'registry paths',
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
@ -632,8 +632,8 @@ class _policy_info(object):
},
},
'RemoteRegistryPaths': {
'Policy': 'Network access: Remotely accessible registry '
'paths and sub-paths',
'Policy': 'Network access: Remotely accessible '
'registry paths and sub-paths',
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
@ -644,8 +644,8 @@ class _policy_info(object):
},
},
'RestrictNullSessAccess': {
'Policy': 'Network access: Restrict anonymous access to '
'Named Pipes and Shares',
'Policy': 'Network access: Restrict anonymous access '
'to Named Pipes and Shares',
'lgpo_section': self.security_options_gpedit_path,
'Settings': self.enabled_one_disabled_zero.keys(),
'Registry': {
@ -898,9 +898,9 @@ class _policy_info(object):
'Transform': self.enabled_one_disabled_zero_transform,
},
'CachedLogonsCount': {
'Policy': 'Interactive logon: Number of previous logons '
'to cache (in case domain controller is not '
'available)',
'Policy': 'Interactive logon: Number of previous '
'logons to cache (in case domain controller '
'is not available)',
'Settings': {
'Function': '_in_range_inclusive',
'Args': {'min': 0, 'max': 50}
@ -915,8 +915,9 @@ class _policy_info(object):
},
},
'ForceUnlockLogon': {
'Policy': 'Interactive logon: Require Domain Controller '
'authentication to unlock workstation',
'Policy': 'Interactive logon: Require Domain '
'Controller authentication to unlock '
'workstation',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
@ -983,8 +984,8 @@ class _policy_info(object):
},
'EnableUIADesktopToggle': {
'Policy': 'User Account Control: Allow UIAccess '
'applications to prompt for elevation without '
'using the secure desktop',
'applications to prompt for elevation '
'without using the secure desktop',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
@ -998,8 +999,8 @@ class _policy_info(object):
},
'ConsentPromptBehaviorAdmin': {
'Policy': 'User Account Control: Behavior of the '
'elevation prompt for administrators in Admin '
'Approval Mode',
'elevation prompt for administrators in '
'Admin Approval Mode',
'Settings': self.uac_admin_prompt_lookup.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
@ -1077,7 +1078,7 @@ class _policy_info(object):
},
'EnableSecureUIAPaths': {
'Policy': 'User Account Control: Only elevate UIAccess '
'applicaitons that are installed in secure '
'applications that are installed in secure '
'locations',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
@ -1091,8 +1092,8 @@ class _policy_info(object):
'Transform': self.enabled_one_disabled_zero_transform,
},
'EnableLUA': {
'Policy': 'User Account Control: Run all administrators '
'in Admin Approval Mode',
'Policy': 'User Account Control: Run all '
'administrators in Admin Approval Mode',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
@ -1354,8 +1355,8 @@ class _policy_info(object):
'Transform': self.enabled_one_disabled_zero_transform,
},
'EnableForcedLogoff': {
'Policy': 'Microsoft network server: Disconnect clients '
'when logon hours expire',
'Policy': 'Microsoft network server: Disconnect '
'clients when logon hours expire',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
@ -1422,7 +1423,8 @@ class _policy_info(object):
'Transform': self.enabled_one_disabled_zero_transform,
},
'UndockWithoutLogon': {
'Policy': 'Devices: Allow undock without having to log on',
'Policy': 'Devices: Allow undock without having to log '
'on',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
@ -1497,8 +1499,8 @@ class _policy_info(object):
},
},
'SubmitControl': {
'Policy': 'Domain controller: Allow server operators to '
'schedule tasks',
'Policy': 'Domain controller: Allow server operators '
'to schedule tasks',
'Settings': self.enabled_one_disabled_zero_strings.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
@ -1577,8 +1579,8 @@ class _policy_info(object):
'Transform': self.enabled_one_disabled_zero_strings_transform,
},
'SignSecureChannel': {
'Policy': 'Domain member: Digitally sign secure channel '
'data (when possible)',
'Policy': 'Domain member: Digitally sign secure '
'channel data (when possible)',
'Settings': self.enabled_one_disabled_zero_strings.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
@ -2301,7 +2303,7 @@ class _policy_info(object):
},
'RecoveryConsoleSecurityLevel': {
'Policy': 'Recovery console: Allow automatic '
'adminstrative logon',
'administrative logon',
'Settings': self.enabled_one_disabled_zero.keys(),
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
@ -2433,15 +2435,18 @@ class _policy_info(object):
'''
converts a binary 0/1 to Disabled/Enabled
'''
if val is not None:
if ord(val) == 0:
return 'Disabled'
elif ord(val) == 1:
return 'Enabled'
try:
if val is not None:
if ord(val) == 0:
return 'Disabled'
elif ord(val) == 1:
return 'Enabled'
else:
return 'Invalid Value'
else:
return 'Invalid Value'
else:
return 'Not Defined'
return 'Not Defined'
except TypeError:
return 'Invalid Value'
@classmethod
def _binary_enable_zero_disable_one_reverse_conversion(cls, val, **kwargs):
@ -3502,7 +3507,7 @@ def _processValueItem(element, reg_key, reg_valuename, policy, parent_element,
reg_key,
reg_valuename,
chr(registry.vtype[this_vtype]),
six.unichr(len(this_element_value.encode('utf-16-le'))),
six.unichr(len(this_element_value.encode('utf-16-le', '' if six.PY2 else 'surrogatepass'))),
this_element_value)
return expected_string
@ -4242,8 +4247,8 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
for adm_namespace in admtemplate_data:
for adm_policy in admtemplate_data[adm_namespace]:
if str(admtemplate_data[adm_namespace][adm_policy]).lower() == 'not configured':
if adm_policy in base_policy_settings[adm_namespace]:
base_policy_settings[adm_namespace].pop(adm_policy)
if base_policy_settings.get(adm_namespace, {}).pop(adm_policy, None) is not None:
log.debug('Policy "{0}" removed'.format(adm_policy))
else:
log.debug('adding {0} to base_policy_settings'.format(adm_policy))
if adm_namespace not in base_policy_settings:

View File

@ -39,10 +39,11 @@ import logging
import os
import re
import time
import sys
from functools import cmp_to_key
# Import third party libs
import salt.ext.six as six
from salt.ext import six
# pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse
@ -50,9 +51,12 @@ from salt.ext.six.moves.urllib.parse import urlparse as _urlparse
from salt.exceptions import (CommandExecutionError,
SaltInvocationError,
SaltRenderError)
import salt.utils
import salt.utils.pkg
import salt.utils # Can be removed once is_true, get_hash, compare_dicts are moved
import salt.utils.args
import salt.utils.files
import salt.utils.path
import salt.utils.pkg
import salt.utils.versions
import salt.syspaths
import salt.payload
from salt.exceptions import MinionError
@ -99,7 +103,7 @@ def latest_version(*names, **kwargs):
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package1> <package2> <package3> ...
'''
if len(names) == 0:
if not names:
return ''
# Initialize the return dict with empty strings
@ -124,6 +128,8 @@ def latest_version(*names, **kwargs):
if name in installed_pkgs:
log.trace('Determining latest installed version of %s', name)
try:
# installed_pkgs[name] Can be version number or 'Not Found'
# 'Not Found' occurs when version number is not found in the registry
latest_installed = sorted(
installed_pkgs[name],
key=cmp_to_key(_reverse_cmp_pkg_versions)
@ -140,6 +146,8 @@ def latest_version(*names, **kwargs):
# get latest available (from winrepo_dir) version of package
pkg_info = _get_package_info(name, saltenv=saltenv)
log.trace('Raw winrepo pkg_info for {0} is {1}'.format(name, pkg_info))
# latest_available can be version number or 'latest' or even 'Not Found'
latest_available = _get_latest_pkg_version(pkg_info)
if latest_available:
log.debug('Latest available version '
@ -147,9 +155,9 @@ def latest_version(*names, **kwargs):
# check, whether latest available version
# is newer than latest installed version
if salt.utils.compare_versions(ver1=str(latest_available),
oper='>',
ver2=str(latest_installed)):
if compare_versions(ver1=str(latest_available),
oper='>',
ver2=str(latest_installed)):
log.debug('Upgrade of {0} from {1} to {2} '
'is available'.format(name,
latest_installed,
@ -188,10 +196,9 @@ def upgrade_available(name, **kwargs):
# same default as latest_version
refresh = salt.utils.is_true(kwargs.get('refresh', True))
current = version(name, saltenv=saltenv, refresh=refresh).get(name)
latest = latest_version(name, saltenv=saltenv, refresh=False)
return compare_versions(latest, '>', current)
# if latest_version returns blank, the latest version is already installed or
# their is no package definition. This is a salt standard which could be improved.
return latest_version(name, saltenv=saltenv, refresh=refresh) != ''
def list_upgrades(refresh=True, **kwargs):
@ -222,9 +229,13 @@ def list_upgrades(refresh=True, **kwargs):
pkgs = {}
for pkg in installed_pkgs:
if pkg in available_pkgs:
# latest_version() will be blank if the latest version is installed.
# or the package name is wrong. Given we check available_pkgs, this
# should not be the case of wrong package name.
# Note: latest_version() is an expensive way to do this as it
# calls list_pkgs each time.
latest_ver = latest_version(pkg, refresh=False, saltenv=saltenv)
install_ver = installed_pkgs[pkg]
if compare_versions(latest_ver, '>', install_ver):
if latest_ver:
pkgs[pkg] = latest_ver
return pkgs
@ -241,7 +252,7 @@ def list_available(*names, **kwargs):
saltenv (str): The salt environment to use. Default ``base``.
refresh (bool): Refresh package metadata. Default ``True``.
refresh (bool): Refresh package metadata. Default ``False``.
return_dict_always (bool):
Default ``False`` dict when a single package name is queried.
@ -264,7 +275,7 @@ def list_available(*names, **kwargs):
return ''
saltenv = kwargs.get('saltenv', 'base')
refresh = salt.utils.is_true(kwargs.get('refresh', True))
refresh = salt.utils.is_true(kwargs.get('refresh', False))
return_dict_always = \
salt.utils.is_true(kwargs.get('return_dict_always', False))
@ -293,7 +304,9 @@ def list_available(*names, **kwargs):
def version(*names, **kwargs):
'''
Returns a version if the package is installed, else returns an empty string
Returns a string representing the package version or an empty string if not
installed. If more than one package name is specified, a dict of
name/version pairs is returned.
Args:
name (str): One or more package names
@ -303,10 +316,11 @@ def version(*names, **kwargs):
refresh (bool): Refresh package metadata. Default ``False``.
Returns:
str: version string when a single package is specified.
dict: The package name(s) with the installed versions.
.. code-block:: cfg
{['<version>', '<version>', ]} OR
{'<package name>': ['<version>', '<version>', ]}
CLI Example:
@ -315,19 +329,25 @@ def version(*names, **kwargs):
salt '*' pkg.version <package name>
salt '*' pkg.version <package name01> <package name02>
'''
saltenv = kwargs.get('saltenv', 'base')
installed_pkgs = list_pkgs(refresh=kwargs.get('refresh', False))
available_pkgs = get_repo_data(saltenv).get('repo')
'''
# Standard is return empty string even if not a valid name
# TODO: Look at returning an error across all platforms with
# CommandExecutionError(msg,info={'errors': errors })
# available_pkgs = get_repo_data(saltenv).get('repo')
# for name in names:
# if name in available_pkgs:
# ret[name] = installed_pkgs.get(name, '')
saltenv = kwargs.get('saltenv', 'base')
installed_pkgs = list_pkgs(saltenv=saltenv, refresh=kwargs.get('refresh', False))
if len(names) == 1:
return installed_pkgs.get(names[0], '')
ret = {}
for name in names:
if name in available_pkgs:
ret[name] = installed_pkgs.get(name, '')
else:
ret[name] = 'not available'
ret[name] = installed_pkgs.get(name, '')
return ret
@ -336,7 +356,6 @@ def list_pkgs(versions_as_list=False, **kwargs):
List the packages currently installed
Args:
version_as_list (bool): Returns the versions as a list
Kwargs:
saltenv (str): The salt environment to use. Default ``base``.
@ -424,7 +443,7 @@ def _get_reg_software():
'(value not set)',
'',
None]
#encoding = locale.getpreferredencoding()
reg_software = {}
hive = 'HKLM'
@ -462,7 +481,7 @@ def _get_reg_software():
def _refresh_db_conditional(saltenv, **kwargs):
'''
Internal use only in this module, has a different set of defaults and
returns True or False. And supports check the age of the existing
returns True or False. And supports checking the age of the existing
generated metadata db, as well as ensure metadata db exists to begin with
Args:
@ -476,8 +495,7 @@ def _refresh_db_conditional(saltenv, **kwargs):
failhard (bool):
If ``True``, an error will be raised if any repo SLS files failed to
process. If ``False``, no error will be raised, and a dictionary
containing the full results will be returned.
process.
Returns:
bool: True Fetched or Cache uptodate, False to indicate an issue
@ -695,8 +713,8 @@ def genrepo(**kwargs):
verbose (bool):
Return verbose data structure which includes 'success_list', a list
of all sls files and the package names contained within. Default
'False'
of all sls files and the package names contained within.
Default ``False``.
failhard (bool):
If ``True``, an error will be raised if any repo SLS files failed
@ -739,11 +757,13 @@ def genrepo(**kwargs):
successful_verbose
)
serial = salt.payload.Serial(__opts__)
# TODO: 2016.11 has PY2 mode as 'w+b' develop has 'w+' ? PY3 is 'wb+'
# also the reading of this is 'rb' in get_repo_data()
mode = 'w+' if six.PY2 else 'wb+'
with salt.utils.fopen(repo_details.winrepo_file, mode) as repo_cache:
repo_cache.write(serial.dumps(ret))
# save reading it back again. ! this breaks due to utf8 issues
#__context__['winrepo.data'] = ret
# For some reason we can not save ret into __context__['winrepo.data'] as this breaks due to utf8 issues
successful_count = len(successful_verbose)
error_count = len(ret['errors'])
if verbose:
@ -778,7 +798,7 @@ def genrepo(**kwargs):
return results
def _repo_process_pkg_sls(file, short_path_name, ret, successful_verbose):
def _repo_process_pkg_sls(filename, short_path_name, ret, successful_verbose):
renderers = salt.loader.render(__opts__, __salt__)
def _failed_compile(msg):
@ -788,7 +808,7 @@ def _repo_process_pkg_sls(file, short_path_name, ret, successful_verbose):
try:
config = salt.template.compile_template(
file,
filename,
renderers,
__opts__['renderer'],
__opts__.get('renderer_blacklist', ''),
@ -803,7 +823,6 @@ def _repo_process_pkg_sls(file, short_path_name, ret, successful_verbose):
if config:
revmap = {}
errors = []
pkgname_ok_list = []
for pkgname, versions in six.iteritems(config):
if pkgname in ret['repo']:
log.error(
@ -812,12 +831,12 @@ def _repo_process_pkg_sls(file, short_path_name, ret, successful_verbose):
)
errors.append('package \'{0}\' already defined'.format(pkgname))
break
for version, repodata in six.iteritems(versions):
for version_str, repodata in six.iteritems(versions):
# Ensure version is a string/unicode
if not isinstance(version, six.string_types):
if not isinstance(version_str, six.string_types):
msg = (
'package \'{0}\'{{0}}, version number {1} '
'is not a string'.format(pkgname, version)
'is not a string'.format(pkgname, version_str)
)
log.error(
msg.format(' within \'{0}\''.format(short_path_name))
@ -829,7 +848,7 @@ def _repo_process_pkg_sls(file, short_path_name, ret, successful_verbose):
msg = (
'package \'{0}\'{{0}}, repo data for '
'version number {1} is not defined as a dictionary '
.format(pkgname, version)
.format(pkgname, version_str)
)
log.error(
msg.format(' within \'{0}\''.format(short_path_name))
@ -840,8 +859,6 @@ def _repo_process_pkg_sls(file, short_path_name, ret, successful_verbose):
if errors:
ret.setdefault('errors', {})[short_path_name] = errors
else:
if pkgname not in pkgname_ok_list:
pkgname_ok_list.append(pkgname)
ret.setdefault('repo', {}).update(config)
ret.setdefault('name_map', {}).update(revmap)
successful_verbose[short_path_name] = config.keys()
@ -916,7 +933,8 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
to install. (no spaces after the commas)
refresh (bool):
Boolean value representing whether or not to refresh the winrepo db
Boolean value representing whether or not to refresh the winrepo db.
Default ``False``.
pkgs (list):
A list of packages to install from a software repository. All
@ -1072,7 +1090,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
for pkg in pkg_params:
pkg_params[pkg] = {'version': pkg_params[pkg]}
if pkg_params is None or len(pkg_params) == 0:
if not pkg_params:
log.error('No package definition found')
return {}
@ -1114,11 +1132,12 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
version_num = str(version_num)
if not version_num:
# following can be version number or latest
version_num = _get_latest_pkg_version(pkginfo)
# Check if the version is already installed
if version_num in old.get(pkg_name, '').split(',') \
or (old.get(pkg_name) == 'Not Found'):
or (old.get(pkg_name, '') == 'Not Found'):
# Desired version number already installed
ret[pkg_name] = {'current': version_num}
continue
@ -1244,32 +1263,32 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
options.get('extra_install_flags', '')
)
#Compute msiexec string
# Compute msiexec string
use_msiexec, msiexec = _get_msiexec(pkginfo[version_num].get('msiexec', False))
# Build cmd and arguments
# cmd and arguments must be separated for use with the task scheduler
cmd_shell = os.getenv('ComSpec', '{0}\\system32\\cmd.exe'.format(os.getenv('WINDIR')))
if use_msiexec:
cmd = msiexec
arguments = ['/i', cached_pkg]
arguments = '"{0}" /I "{1}"'.format(msiexec, cached_pkg)
if pkginfo[version_num].get('allusers', True):
arguments.append('ALLUSERS="1"')
arguments.extend(salt.utils.shlex_split(install_flags, posix=False))
arguments = '{0} ALLUSERS=1'.format(arguments)
else:
cmd = cached_pkg
arguments = salt.utils.shlex_split(install_flags, posix=False)
arguments = '"{0}"'.format(cached_pkg)
if install_flags:
arguments = '{0} {1}'.format(arguments, install_flags)
# Install the software
# Check Use Scheduler Option
if pkginfo[version_num].get('use_scheduler', False):
# Create Scheduled Task
__salt__['task.create_task'](name='update-salt-software',
user_name='System',
force=True,
action_type='Execute',
cmd=cmd,
arguments=' '.join(arguments),
cmd=cmd_shell,
arguments='/s /c "{0}"'.format(arguments),
start_in=cache_path,
trigger_type='Once',
start_date='1975-01-01',
@ -1312,15 +1331,13 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
ret[pkg_name] = {'install status': 'failed'}
else:
# Combine cmd and arguments
cmd = [cmd]
cmd.extend(arguments)
# Launch the command
result = __salt__['cmd.run_all'](cmd,
cache_path,
python_shell=False,
redirect_stderr=True)
result = __salt__['cmd.run_all'](
'"{0}" /s /c "{1}"'.format(cmd_shell, arguments),
cache_path,
output_loglevel='trace',
python_shell=False,
redirect_stderr=True)
if not result['retcode']:
ret[pkg_name] = {'install status': 'success'}
changed.append(pkg_name)
@ -1397,14 +1414,17 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
.. versionadded:: 0.16.0
Args:
name (str): The name(s) of the package(s) to be uninstalled. Can be a
single package or a comma delimted list of packages, no spaces.
name (str):
The name(s) of the package(s) to be uninstalled. Can be a
single package or a comma delimited list of packages, no spaces.
version (str):
The version of the package to be uninstalled. If this option is
used to to uninstall multiple packages, then this version will be
applied to all targeted packages. Recommended using only when
uninstalling a single package. If this parameter is omitted, the
latest version will be uninstalled.
pkgs (list):
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
@ -1541,6 +1561,7 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
# Compare the hash of the cached installer to the source only if
# the file is hosted on salt:
# TODO cp.cache_file does cache and hash checking? So why do it again?
if uninstaller.startswith('salt:'):
if __salt__['cp.hash_file'](uninstaller, saltenv) != \
__salt__['cp.hash_file'](cached_pkg):
@ -1566,6 +1587,7 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
# Get parameters for cmd
expanded_cached_pkg = str(os.path.expandvars(cached_pkg))
expanded_cache_path = str(os.path.expandvars(cache_path))
# Get uninstall flags
uninstall_flags = pkginfo[target].get('uninstall_flags', '')
@ -1574,31 +1596,31 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
uninstall_flags = '{0} {1}'.format(
uninstall_flags, kwargs.get('extra_uninstall_flags', ''))
#Compute msiexec string
# Compute msiexec string
use_msiexec, msiexec = _get_msiexec(pkginfo[target].get('msiexec', False))
cmd_shell = os.getenv('ComSpec', '{0}\\system32\\cmd.exe'.format(os.getenv('WINDIR')))
# Build cmd and arguments
# cmd and arguments must be separated for use with the task scheduler
if use_msiexec:
cmd = msiexec
arguments = ['/x']
arguments.extend(salt.utils.shlex_split(uninstall_flags, posix=False))
arguments = '"{0}" /X "{1}"'.format(msiexec, uninstaller if uninstaller else expanded_cached_pkg)
else:
cmd = expanded_cached_pkg
arguments = salt.utils.shlex_split(uninstall_flags, posix=False)
arguments = '"{0}"'.format(expanded_cached_pkg)
if uninstall_flags:
arguments = '{0} {1}'.format(arguments, uninstall_flags)
# Uninstall the software
# Check Use Scheduler Option
if pkginfo[target].get('use_scheduler', False):
# Create Scheduled Task
__salt__['task.create_task'](name='update-salt-software',
user_name='System',
force=True,
action_type='Execute',
cmd=cmd,
arguments=' '.join(arguments),
start_in=cache_path,
cmd=cmd_shell,
arguments='/s /c "{0}"'.format(arguments),
start_in=expanded_cache_path,
trigger_type='Once',
start_date='1975-01-01',
start_time='01:00',
@ -1610,13 +1632,11 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
log.error('Scheduled Task failed to run')
ret[pkgname] = {'uninstall status': 'failed'}
else:
# Build the install command
cmd = [cmd]
cmd.extend(arguments)
# Launch the command
result = __salt__['cmd.run_all'](
cmd,
'"{0}" /s /c "{1}"'.format(cmd_shell, arguments),
expanded_cache_path,
output_loglevel='trace',
python_shell=False,
redirect_stderr=True)
if not result['retcode']:
@ -1662,11 +1682,13 @@ def purge(name=None, pkgs=None, version=None, **kwargs):
name (str): The name of the package to be deleted.
version (str): The version of the package to be deleted. If this option
is used in combination with the ``pkgs`` option below, then this
version (str):
The version of the package to be deleted. If this option is
used in combination with the ``pkgs`` option below, then this
version will be applied to all targeted packages.
pkgs (list): A list of packages to delete. Must be passed as a python
pkgs (list):
A list of packages to delete. Must be passed as a python
list. The ``name`` parameter will be ignored if this option is
passed.
@ -1800,4 +1822,20 @@ def compare_versions(ver1='', oper='==', ver2=''):
salt '*' pkg.compare_versions 1.2 >= 1.3
'''
return salt.utils.compare_versions(ver1, oper, ver2)
if not ver1:
raise SaltInvocationError('compare_version, ver1 is blank')
if not ver2:
raise SaltInvocationError('compare_version, ver2 is blank')
# Support version being the special meaning of 'latest'
if ver1 == 'latest':
ver1 = str(sys.maxsize)
if ver2 == 'latest':
ver2 = str(sys.maxsize)
# Support version being the special meaning of 'Not Found'
if ver1 == 'Not Found':
ver1 = '0.0.0.0.0'
if ver2 == 'Not Found':
ver2 = '0.0.0.0.0'
return salt.utils.compare_versions(ver1, oper, ver2, ignore_epoch=True)

View File

@ -143,6 +143,17 @@ def get_printout(out, opts=None, **kwargs):
# See Issue #29796 for more information.
out = opts['output']
# Handle setting the output when --static is passed.
if not out and opts.get('static'):
if opts.get('output'):
out = opts['output']
elif opts.get('fun', '').split('.')[0] == 'state':
# --static doesn't have an output set at this point, but if we're
# running a state function and "out" hasn't already been set, we
# should set the out variable to "highstate". Otherwise state runs
# are set to "nested" below. See Issue #44556 for more information.
out = 'highstate'
if out == 'text':
out = 'txt'
elif out is None or out == '':

View File

@ -254,14 +254,14 @@ def returner(ret):
with _get_serv(ret, commit=True) as cur:
sql = '''INSERT INTO salt_returns
(fun, jid, return, id, success, full_ret, alter_time)
VALUES (%s, %s, %s, %s, %s, %s, %s)'''
VALUES (%s, %s, %s, %s, %s, %s, to_timestamp(%s))'''
cur.execute(sql, (ret['fun'], ret['jid'],
psycopg2.extras.Json(ret['return']),
ret['id'],
ret.get('success', False),
psycopg2.extras.Json(ret),
time.strftime('%Y-%m-%d %H:%M:%S %z', time.localtime())))
time.time()))
except salt.exceptions.SaltMasterError:
log.critical('Could not store return with pgjsonb returner. PostgreSQL server unavailable.')
@ -278,9 +278,9 @@ def event_return(events):
tag = event.get('tag', '')
data = event.get('data', '')
sql = '''INSERT INTO salt_events (tag, data, master_id, alter_time)
VALUES (%s, %s, %s, %s)'''
VALUES (%s, %s, %s, to_timestamp(%s))'''
cur.execute(sql, (tag, psycopg2.extras.Json(data),
__opts__['id'], time.strftime('%Y-%m-%d %H:%M:%S %z', time.localtime())))
__opts__['id'], time.time()))
def save_load(jid, load, minions=None):

View File

@ -686,7 +686,7 @@ class State(object):
except AttributeError:
pillar_enc = str(pillar_enc).lower()
self._pillar_enc = pillar_enc
if initial_pillar is not None:
if initial_pillar:
self.opts['pillar'] = initial_pillar
if self._pillar_override:
self.opts['pillar'] = salt.utils.dictupdate.merge(

View File

@ -202,7 +202,14 @@ def _check_cron(user,
return 'present'
else:
for cron in lst['special']:
if special == cron['spec'] and cmd == cron['cmd']:
if _cron_matched(cron, cmd, identifier):
if any([_needs_change(x, y) for x, y in
((cron['spec'], special),
(cron['identifier'], identifier),
(cron['cmd'], cmd),
(cron['comment'], comment),
(cron['commented'], commented))]):
return 'update'
return 'present'
return 'absent'
@ -349,7 +356,12 @@ def present(name,
commented=commented,
identifier=identifier)
else:
data = __salt__['cron.set_special'](user, special, name)
data = __salt__['cron.set_special'](user=user,
special=special,
cmd=name,
comment=comment,
commented=commented,
identifier=identifier)
if data == 'present':
ret['comment'] = 'Cron {0} already present'.format(name)
return ret
@ -418,7 +430,7 @@ def absent(name,
if special is None:
data = __salt__['cron.rm_job'](user, name, identifier=identifier)
else:
data = __salt__['cron.rm_special'](user, special, name)
data = __salt__['cron.rm_special'](user, name, special=special, identifier=identifier)
if data == 'absent':
ret['comment'] = "Cron {0} already absent".format(name)

View File

@ -758,7 +758,7 @@ def _check_directory_win(name,
changes = {}
if not os.path.isdir(name):
changes = {'directory': 'new'}
changes = {name: {'directory': 'new'}}
else:
# Check owner
owner = salt.utils.win_dacl.get_owner(name)
@ -883,7 +883,11 @@ def _check_dir_meta(name,
'''
Check the changes in directory metadata
'''
stats = __salt__['file.stats'](name, None, follow_symlinks)
try:
stats = __salt__['file.stats'](name, None, follow_symlinks)
except CommandExecutionError:
stats = {}
changes = {}
if not stats:
changes['directory'] = 'new'
@ -2087,6 +2091,9 @@ def managed(name,
'name': name,
'result': True}
if not name:
return _error(ret, 'Destination file name is required')
if mode is not None and salt.utils.is_windows():
return _error(ret, 'The \'mode\' option is not supported on Windows')
@ -2237,8 +2244,6 @@ def managed(name,
ret['comment'] = 'Error while applying template on contents'
return ret
if not name:
return _error(ret, 'Must provide name to file.managed')
user = _test_owner(kwargs, user=user)
if salt.utils.is_windows():
@ -2988,7 +2993,7 @@ def directory(name,
ret, _ = __salt__['file.check_perms'](
full, ret, user, group, dir_mode, follow_symlinks)
except CommandExecutionError as exc:
if not exc.strerror.endswith('does not exist'):
if not exc.strerror.startswith('Path not found'):
errors.append(exc.strerror)
if clean:

View File

@ -709,7 +709,7 @@ def edited_conf(name, lxc_conf=None, lxc_conf_unset=None):
# to keep this function around and cannot officially remove it. Progress of
# the new function will be tracked in https://github.com/saltstack/salt/issues/35523
salt.utils.warn_until(
'Oxygen',
'Fluorine',
'This state is unsuitable for setting parameters that appear more '
'than once in an LXC config file, or parameters which must appear in '
'a certain order (such as when configuring more than one network '

View File

@ -59,6 +59,7 @@ from __future__ import absolute_import
# Import python libs
import logging
import salt.utils
log = logging.getLogger(__name__)
@ -186,13 +187,14 @@ def present(name,
use_32bit_registry=use_32bit_registry)
if vdata == reg_current['vdata'] and reg_current['success']:
ret['comment'] = '{0} in {1} is already configured'.\
format(vname if vname else '(Default)', name)
ret['comment'] = u'{0} in {1} is already configured' \
''.format(salt.utils.to_unicode(vname, 'utf-8') if vname else u'(Default)',
salt.utils.to_unicode(name, 'utf-8'))
return ret
add_change = {'Key': r'{0}\{1}'.format(hive, key),
'Entry': '{0}'.format(vname if vname else '(Default)'),
'Value': '{0}'.format(vdata)}
'Entry': u'{0}'.format(salt.utils.to_unicode(vname, 'utf-8') if vname else u'(Default)'),
'Value': salt.utils.to_unicode(vdata, 'utf-8')}
# Check for test option
if __opts__['test']:

View File

@ -65,7 +65,8 @@ def exists(name, index=None):
'''
Add the directory to the system PATH at index location
index: where the directory should be placed in the PATH (default: None)
index: where the directory should be placed in the PATH (default: None).
This is 0-indexed, so 0 means to prepend at the very start of the PATH.
[Note: Providing no index will append directory to PATH and
will not enforce its location within the PATH.]
@ -96,7 +97,7 @@ def exists(name, index=None):
try:
currIndex = sysPath.index(path)
if index:
if index is not None:
index = int(index)
if index < 0:
index = len(sysPath) + index + 1
@ -115,7 +116,7 @@ def exists(name, index=None):
except ValueError:
pass
if not index:
if index is None:
index = len(sysPath) # put it at the end
ret['changes']['added'] = '{0} will be added at index {1}'.format(name, index)
if __opts__['test']:

View File

@ -5,5 +5,6 @@
/{{route.netmask}}
{%- endif -%}
{%- if route.gateway %} via {{route.gateway}}
{%- else %} dev {{iface}}
{%- endif %}
{% endfor -%}

View File

@ -1143,10 +1143,10 @@ def format_call(fun,
continue
extra[key] = copy.deepcopy(value)
# We'll be showing errors to the users until Salt Oxygen comes out, after
# We'll be showing errors to the users until Salt Fluorine comes out, after
# which, errors will be raised instead.
warn_until(
'Oxygen',
'Fluorine',
'It\'s time to start raising `SaltInvocationError` instead of '
'returning warnings',
# Let's not show the deprecation warning on the console, there's no
@ -1183,7 +1183,7 @@ def format_call(fun,
'{0}. If you were trying to pass additional data to be used '
'in a template context, please populate \'context\' with '
'\'key: value\' pairs. Your approach will work until Salt '
'Oxygen is out.{1}'.format(
'Fluorine is out.{1}'.format(
msg,
'' if 'full' not in ret else ' Please update your state files.'
)

View File

@ -120,6 +120,8 @@ class _AtomicWFile(object):
self._fh.close()
if os.path.isfile(self._filename):
shutil.copymode(self._filename, self._tmp_filename)
st = os.stat(self._filename)
os.chown(self._tmp_filename, st.st_uid, st.st_gid)
atomic_rename(self._tmp_filename, self._filename)
def __exit__(self, exc_type, exc_value, traceback):

View File

@ -39,6 +39,16 @@ HASHES = {
HASHES_REVMAP = dict([(y, x) for x, y in six.iteritems(HASHES)])
def __clean_tmp(tmp):
'''
Remove temporary files
'''
try:
salt.utils.rm_rf(tmp)
except Exception:
pass
def guess_archive_type(name):
'''
Guess an archive type (tar, zip, or rar) by its file extension
@ -116,7 +126,15 @@ def copyfile(source, dest, backup_mode='', cachedir=''):
fstat = os.stat(dest)
except OSError:
pass
shutil.move(tgt, dest)
# The move could fail if the dest has xattr protections, so delete the
# temp file in this case
try:
shutil.move(tgt, dest)
except Exception:
__clean_tmp(tgt)
raise
if fstat is not None:
os.chown(dest, fstat.st_uid, fstat.st_gid)
os.chmod(dest, fstat.st_mode)
@ -134,10 +152,7 @@ def copyfile(source, dest, backup_mode='', cachedir=''):
subprocess.call(cmd, stdout=dev_null, stderr=dev_null)
if os.path.isfile(tgt):
# The temp file failed to move
try:
os.remove(tgt)
except Exception:
pass
__clean_tmp(tgt)
def rename(src, dst):

View File

@ -334,6 +334,7 @@ import errno
import random
import yaml
import copy
import weakref
# Import Salt libs
import salt.config
@ -845,6 +846,47 @@ class Schedule(object):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid()
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
ret['return'] = self.functions[func](*args, **kwargs)
# runners do not provide retcode

View File

@ -197,7 +197,7 @@ def vb_get_network_adapters(machine_name=None, machine=None):
return network_adapters
def vb_wait_for_network_address(timeout, step=None, machine_name=None, machine=None):
def vb_wait_for_network_address(timeout, step=None, machine_name=None, machine=None, wait_for_pattern=None):
'''
Wait until a machine has a network address to return or quit after the timeout
@ -209,12 +209,16 @@ def vb_wait_for_network_address(timeout, step=None, machine_name=None, machine=N
@type machine_name: str
@param machine:
@type machine: IMachine
@type wait_for_pattern: str
@param wait_for_pattern:
@type machine: str
@return:
@rtype: list
'''
kwargs = {
'machine_name': machine_name,
'machine': machine
'machine': machine,
'wait_for_pattern': wait_for_pattern
}
return wait_for(vb_get_network_addresses, timeout=timeout, step=step, default=[], func_kwargs=kwargs)
@ -251,7 +255,7 @@ def vb_wait_for_session_state(xp_session, state='Unlocked', timeout=10, step=Non
wait_for(_check_session_state, timeout=timeout, step=step, default=False, func_args=args)
def vb_get_network_addresses(machine_name=None, machine=None):
def vb_get_network_addresses(machine_name=None, machine=None, wait_for_pattern=None):
'''
TODO distinguish between private and public addresses
@ -276,21 +280,38 @@ def vb_get_network_addresses(machine_name=None, machine=None):
machine = vb_get_box().findMachine(machine_name)
ip_addresses = []
# We can't trust virtualbox to give us up to date guest properties if the machine isn't running
# For some reason it may give us outdated (cached?) values
log.debug("checking for power on:")
if machine.state == _virtualboxManager.constants.MachineState_Running:
try:
total_slots = int(machine.getGuestPropertyValue('/VirtualBox/GuestInfo/Net/Count'))
except ValueError:
total_slots = 0
for i in range(total_slots):
try:
address = machine.getGuestPropertyValue('/VirtualBox/GuestInfo/Net/{0}/V4/IP'.format(i))
if address:
ip_addresses.append(address)
except Exception as e:
log.debug(e.message)
log.debug("got power on:")
#wait on an arbitrary named property
#for instance use a dhcp client script to set a property via VBoxControl guestproperty set dhcp_done 1
if wait_for_pattern and not machine.getGuestPropertyValue(wait_for_pattern):
log.debug("waiting for pattern:{}:".format(wait_for_pattern))
return None
_total_slots = machine.getGuestPropertyValue('/VirtualBox/GuestInfo/Net/Count')
#upon dhcp the net count drops to 0 and it takes some seconds for it to be set again
if not _total_slots:
log.debug("waiting for net count:{}:".format(wait_for_pattern))
return None
try:
total_slots = int(_total_slots)
for i in range(total_slots):
try:
address = machine.getGuestPropertyValue('/VirtualBox/GuestInfo/Net/{0}/V4/IP'.format(i))
if address:
ip_addresses.append(address)
except Exception as e:
log.debug(e.message)
except ValueError as e:
log.debug(e.message)
return None
log.debug("returning ip_addresses:{}:".format(ip_addresses))
return ip_addresses
@ -339,6 +360,7 @@ def vb_create_machine(name=None):
def vb_clone_vm(
name=None,
clone_from=None,
clone_mode=0,
timeout=10000,
**kwargs
):
@ -370,7 +392,7 @@ def vb_clone_vm(
progress = source_machine.cloneTo(
new_machine,
0, # CloneMode
clone_mode, # CloneMode
None # CloneOptions : None = Full?
)

View File

@ -116,6 +116,41 @@ class EC2Test(ShellCase):
except AssertionError:
raise
def test_instance_rename(self):
'''
Tests creating and renaming an instance on EC2 (classic)
'''
# create the instance
rename = INSTANCE_NAME + '-rename'
instance = self.run_cloud('-p ec2-test {0} --no-deploy'.format(INSTANCE_NAME), timeout=500)
ret_str = '{0}:'.format(INSTANCE_NAME)
# check if instance returned
try:
self.assertIn(ret_str, instance)
except AssertionError:
self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)
raise
change_name = self.run_cloud('-a rename {0} newname={1} --assume-yes'.format(INSTANCE_NAME, rename), timeout=500)
check_rename = self.run_cloud('-a show_instance {0} --assume-yes'.format(rename), [rename])
exp_results = [' {0}:'.format(rename), ' size:',
' architecture:']
try:
for result in exp_results:
self.assertIn(result, check_rename[0])
except AssertionError:
self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)
raise
# delete the instance
delete = self.run_cloud('-d {0} --assume-yes'.format(rename), timeout=500)
ret_str = ' shutting-down'
# check if deletion was performed appropriately
self.assertIn(ret_str, delete)
def tearDown(self):
'''
Clean up after tests

View File

@ -3,12 +3,23 @@
# Import python libs
from __future__ import absolute_import
import getpass
import grp
import pwd
import os
import shutil
import sys
# Posix only
try:
import grp
import pwd
except ImportError:
pass
# Windows only
try:
import win32file
except ImportError:
pass
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
@ -18,6 +29,16 @@ from tests.support.paths import FILES, TMP
import salt.utils
def symlink(source, link_name):
'''
Handle symlinks on Windows with Python < 3.2
'''
if salt.utils.is_windows():
win32file.CreateSymbolicLink(link_name, source)
else:
os.symlink(source, link_name)
class FileModuleTest(ModuleCase):
'''
Validate the file module
@ -25,27 +46,27 @@ class FileModuleTest(ModuleCase):
def setUp(self):
self.myfile = os.path.join(TMP, 'myfile')
with salt.utils.fopen(self.myfile, 'w+') as fp:
fp.write('Hello\n')
fp.write('Hello' + os.linesep)
self.mydir = os.path.join(TMP, 'mydir/isawesome')
if not os.path.isdir(self.mydir):
# left behind... Don't fail because of this!
os.makedirs(self.mydir)
self.mysymlink = os.path.join(TMP, 'mysymlink')
if os.path.islink(self.mysymlink):
if os.path.islink(self.mysymlink) or os.path.isfile(self.mysymlink):
os.remove(self.mysymlink)
os.symlink(self.myfile, self.mysymlink)
symlink(self.myfile, self.mysymlink)
self.mybadsymlink = os.path.join(TMP, 'mybadsymlink')
if os.path.islink(self.mybadsymlink):
if os.path.islink(self.mybadsymlink) or os.path.isfile(self.mybadsymlink):
os.remove(self.mybadsymlink)
os.symlink('/nonexistentpath', self.mybadsymlink)
symlink('/nonexistentpath', self.mybadsymlink)
super(FileModuleTest, self).setUp()
def tearDown(self):
if os.path.isfile(self.myfile):
os.remove(self.myfile)
if os.path.islink(self.mysymlink):
if os.path.islink(self.mysymlink) or os.path.isfile(self.mysymlink):
os.remove(self.mysymlink)
if os.path.islink(self.mybadsymlink):
if os.path.islink(self.mybadsymlink) or os.path.isfile(self.mybadsymlink):
os.remove(self.mybadsymlink)
shutil.rmtree(self.mydir, ignore_errors=True)
super(FileModuleTest, self).tearDown()
@ -173,3 +194,20 @@ class FileModuleTest(ModuleCase):
ret = self.run_function('file.source_list', ['file://' + self.myfile,
'filehash', 'base'])
self.assertEqual(list(ret), ['file://' + self.myfile, 'filehash'])
def test_file_line_changes_format(self):
'''
Test file.line changes output formatting.
Issue #41474
'''
ret = self.minion_run('file.line', self.myfile, 'Goodbye',
mode='insert', after='Hello')
self.assertIn('Hello' + os.linesep + '+Goodbye', ret)
def test_file_line_content(self):
self.minion_run('file.line', self.myfile, 'Goodbye',
mode='insert', after='Hello')
with salt.utils.fopen(self.myfile, 'r') as fp:
content = fp.read()
self.assertEqual(content, 'Hello' + os.linesep + 'Goodbye' + os.linesep)

View File

@ -14,6 +14,7 @@ from salt.ext.six.moves import range
@skip_if_not_root
@destructiveTest
class GroupModuleTest(ModuleCase):
'''
Validate the linux group system module
@ -39,7 +40,6 @@ class GroupModuleTest(ModuleCase):
)
)
@destructiveTest
def tearDown(self):
'''
Reset to original settings
@ -57,33 +57,30 @@ class GroupModuleTest(ModuleCase):
for x in range(size)
)
@destructiveTest
def test_add(self):
'''
Test the add group function
'''
#add a new group
# add a new group
self.assertTrue(self.run_function('group.add', [self._group, self._gid]))
group_info = self.run_function('group.info', [self._group])
self.assertEqual(group_info['name'], self._group)
self.assertEqual(group_info['gid'], self._gid)
#try adding the group again
# try adding the group again
self.assertFalse(self.run_function('group.add', [self._group, self._gid]))
@destructiveTest
def test_delete(self):
'''
Test the delete group function
'''
self.assertTrue(self.run_function('group.add', [self._group]))
#correct functionality
# correct functionality
self.assertTrue(self.run_function('group.delete', [self._group]))
#group does not exist
# group does not exist
self.assertFalse(self.run_function('group.delete', [self._no_group]))
@destructiveTest
def test_info(self):
'''
Test the info group function
@ -97,7 +94,6 @@ class GroupModuleTest(ModuleCase):
self.assertEqual(group_info['gid'], self._gid)
self.assertIn(self._user, group_info['members'])
@destructiveTest
def test_chgid(self):
'''
Test the change gid function
@ -107,7 +103,6 @@ class GroupModuleTest(ModuleCase):
group_info = self.run_function('group.info', [self._group])
self.assertEqual(group_info['gid'], self._new_gid)
@destructiveTest
def test_adduser(self):
'''
Test the add user to group function
@ -117,14 +112,13 @@ class GroupModuleTest(ModuleCase):
self.assertTrue(self.run_function('group.adduser', [self._group, self._user]))
group_info = self.run_function('group.info', [self._group])
self.assertIn(self._user, group_info['members'])
#try add a non existing user
# try add a non existing user
self.assertFalse(self.run_function('group.adduser', [self._group, self._no_user]))
#try add a user to non existing group
# try add a user to non existing group
self.assertFalse(self.run_function('group.adduser', [self._no_group, self._user]))
#try add a non existing user to a non existing group
# try add a non existing user to a non existing group
self.assertFalse(self.run_function('group.adduser', [self._no_group, self._no_user]))
@destructiveTest
def test_deluser(self):
'''
Test the delete user from group function
@ -136,7 +130,6 @@ class GroupModuleTest(ModuleCase):
group_info = self.run_function('group.info', [self._group])
self.assertNotIn(self._user, group_info['members'])
@destructiveTest
def test_members(self):
'''
Test the members function
@ -150,7 +143,6 @@ class GroupModuleTest(ModuleCase):
self.assertIn(self._user, group_info['members'])
self.assertIn(self._user1, group_info['members'])
@destructiveTest
def test_getent(self):
'''
Test the getent function

View File

@ -7,12 +7,14 @@
from __future__ import absolute_import
import random
import string
import os
# Import Salt Testing Libs
from tests.support.case import ModuleCase
from tests.support.helpers import destructiveTest, skip_if_not_root
# Import Salt Libs
import salt.utils
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
@ -148,6 +150,86 @@ class MacUserModuleTest(ModuleCase):
self.run_function('user.delete', [CHANGE_USER])
raise
def test_mac_user_enable_auto_login(self):
'''
Tests mac_user functions that enable auto login
'''
# Make sure auto login is disabled before we start
if self.run_function('user.get_auto_login'):
self.skipTest('Auto login already enabled')
try:
# Does enable return True
self.assertTrue(
self.run_function('user.enable_auto_login',
['Spongebob', 'Squarepants']))
# Did it set the user entry in the plist file
self.assertEqual(
self.run_function('user.get_auto_login'),
'Spongebob')
# Did it generate the `/etc/kcpassword` file
self.assertTrue(os.path.exists('/etc/kcpassword'))
# Are the contents of the file correct
test_data = b'.\xf8\'B\xa0\xd9\xad\x8b\xcd\xcdl'
with salt.utils.fopen('/etc/kcpassword', 'rb') as f:
file_data = f.read()
self.assertEqual(test_data, file_data)
# Does disable return True
self.assertTrue(self.run_function('user.disable_auto_login'))
# Does it remove the user entry in the plist file
self.assertFalse(self.run_function('user.get_auto_login'))
# Is the `/etc/kcpassword` file removed
self.assertFalse(os.path.exists('/etc/kcpassword'))
finally:
# Make sure auto_login is disabled
self.assertTrue(self.run_function('user.disable_auto_login'))
# Make sure autologin is disabled
if self.run_function('user.get_auto_login'):
raise Exception('Failed to disable auto login')
def test_mac_user_disable_auto_login(self):
'''
Tests mac_user functions that disable auto login
'''
# Make sure auto login is enabled before we start
# Is there an existing setting
if self.run_function('user.get_auto_login'):
self.skipTest('Auto login already enabled')
try:
# Enable auto login for the test
self.run_function('user.enable_auto_login',
['Spongebob', 'Squarepants'])
# Make sure auto login got set up
if not self.run_function('user.get_auto_login') == 'Spongebob':
raise Exception('Failed to enable auto login')
# Does disable return True
self.assertTrue(self.run_function('user.disable_auto_login'))
# Does it remove the user entry in the plist file
self.assertFalse(self.run_function('user.get_auto_login'))
# Is the `/etc/kcpassword` file removed
self.assertFalse(os.path.exists('/etc/kcpassword'))
finally:
# Make sure auto login is disabled
self.assertTrue(self.run_function('user.disable_auto_login'))
# Make sure auto login is disabled
if self.run_function('user.get_auto_login'):
raise Exception('Failed to disable auto login')
def tearDown(self):
'''
Clean up after tests

View File

@ -109,3 +109,59 @@ class OutputReturnTest(ShellCase):
delattr(self, 'maxDiff')
else:
self.maxDiff = old_max_diff
def test_output_highstate(self):
'''
Regression tests for the highstate outputter. Calls a basic state with various
flags. Each comparison should be identical when successful.
'''
# Test basic highstate output. No frills.
expected = ['minion:', ' ID: simple-ping', ' Function: module.run',
' Name: test.ping', ' Result: True',
' Comment: Module function test.ping executed',
' Changes: ', ' ret:', ' True',
'Summary for minion', 'Succeeded: 1 (changed=1)', 'Failed: 0',
'Total states run: 1']
state_run = self.run_salt('"minion" state.sls simple-ping')
for expected_item in expected:
self.assertIn(expected_item, state_run)
# Test highstate output while also passing --out=highstate.
# This is a regression test for Issue #29796
state_run = self.run_salt('"minion" state.sls simple-ping --out=highstate')
for expected_item in expected:
self.assertIn(expected_item, state_run)
# Test highstate output when passing --static and running a state function.
# See Issue #44556.
state_run = self.run_salt('"minion" state.sls simple-ping --static')
for expected_item in expected:
self.assertIn(expected_item, state_run)
# Test highstate output when passing --static and --out=highstate.
# See Issue #44556.
state_run = self.run_salt('"minion" state.sls simple-ping --static --out=highstate')
for expected_item in expected:
self.assertIn(expected_item, state_run)
def test_output_highstate_falls_back_nested(self):
'''
Tests outputter when passing --out=highstate with a non-state call. This should
fall back to "nested" output.
'''
expected = ['minion:', ' True']
ret = self.run_salt('"minion" test.ping --out=highstate')
self.assertEqual(ret, expected)
def test_static_simple(self):
'''
Tests passing the --static option with a basic test.ping command. This
should be the "nested" output.
'''
expected = ['minion:', ' True']
ret = self.run_salt('"minion" test.ping --static')
self.assertEqual(ret, expected)

View File

@ -2,12 +2,13 @@
# Import python libs
from __future__ import absolute_import
import os
# Import Salt Testing libs
from tests.support.case import ShellCase
from tests.support.case import ShellCase, SPMCase
class SPMTest(ShellCase):
class SPMTest(ShellCase, SPMCase):
'''
Test spm script
'''
@ -29,3 +30,47 @@ class SPMTest(ShellCase):
output = self.run_spm('doesnotexist')
for arg in expected_args:
self.assertIn(arg, ''.join(output))
def test_spm_assume_yes(self):
'''
test spm install with -y arg
'''
config = self._spm_config(assume_yes=False)
self._spm_build_files(config)
spm_file = os.path.join(config['spm_build_dir'],
'apache-201506-2.spm')
build = self.run_spm('build {0} -c {1}'.format(self.formula_dir,
self._tmp_spm))
install = self.run_spm('install {0} -c {1} -y'.format(spm_file,
self._tmp_spm))
self.assertTrue(os.path.exists(os.path.join(config['formula_path'],
'apache', 'apache.sls')))
def test_spm_force(self):
'''
test spm install with -f arg
'''
config = self._spm_config(assume_yes=False)
self._spm_build_files(config)
spm_file = os.path.join(config['spm_build_dir'],
'apache-201506-2.spm')
build = self.run_spm('build {0} -c {1}'.format(self.formula_dir,
self._tmp_spm))
install = self.run_spm('install {0} -c {1} -y'.format(spm_file,
self._tmp_spm))
self.assertTrue(os.path.exists(os.path.join(config['formula_path'],
'apache', 'apache.sls')))
# check if it forces the install after its already been installed it
install = self.run_spm('install {0} -c {1} -y -f'.format(spm_file,
self._tmp_spm))
self.assertEqual(['... installing apache'], install)

View File

@ -67,17 +67,16 @@ def _test_managed_file_mode_keep_helper(testcase, local=False):
'''
DRY helper function to run the same test with a local or remote path
'''
rel_path = 'grail/scene33'
name = os.path.join(TMP, os.path.basename(rel_path))
grail_fs_path = os.path.join(FILES, 'file', 'base', rel_path)
grail = 'salt://' + rel_path if not local else grail_fs_path
name = os.path.join(TMP, 'scene33')
grail_fs_path = os.path.join(FILES, 'file', 'base', 'grail', 'scene33')
grail = 'salt://grail/scene33' if not local else grail_fs_path
# Get the current mode so that we can put the file back the way we
# found it when we're done.
grail_fs_mode = os.stat(grail_fs_path).st_mode
initial_mode = 504 # 0770 octal
new_mode_1 = 384 # 0600 octal
new_mode_2 = 420 # 0644 octal
grail_fs_mode = int(testcase.run_function('file.get_mode', [grail_fs_path]), 8)
initial_mode = 0o770
new_mode_1 = 0o600
new_mode_2 = 0o644
# Set the initial mode, so we can be assured that when we set the mode
# to "keep", we're actually changing the permissions of the file to the
@ -568,6 +567,84 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
if os.path.exists('/tmp/sudoers'):
os.remove('/tmp/sudoers')
def test_managed_local_source_with_source_hash(self):
'''
Make sure that we enforce the source_hash even with local files
'''
name = os.path.join(TMP, 'local_source_with_source_hash')
local_path = os.path.join(FILES, 'file', 'base', 'grail', 'scene33')
actual_hash = '567fd840bf1548edc35c48eb66cdd78bfdfcccff'
# Reverse the actual hash
bad_hash = actual_hash[::-1]
def remove_file():
try:
os.remove(name)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
def do_test(clean=False):
for proto in ('file://', ''):
source = proto + local_path
log.debug('Trying source %s', source)
try:
ret = self.run_state(
'file.managed',
name=name,
source=source,
source_hash='sha1={0}'.format(bad_hash))
self.assertSaltFalseReturn(ret)
ret = ret[next(iter(ret))]
# Shouldn't be any changes
self.assertFalse(ret['changes'])
# Check that we identified a hash mismatch
self.assertIn(
'does not match actual checksum', ret['comment'])
ret = self.run_state(
'file.managed',
name=name,
source=source,
source_hash='sha1={0}'.format(actual_hash))
self.assertSaltTrueReturn(ret)
finally:
if clean:
remove_file()
remove_file()
log.debug('Trying with nonexistant destination file')
do_test()
log.debug('Trying with destination file already present')
with salt.utils.fopen(name, 'w'):
pass
try:
do_test(clean=False)
finally:
remove_file()
def test_managed_local_source_does_not_exist(self):
'''
Make sure that we exit gracefully when a local source doesn't exist
'''
name = os.path.join(TMP, 'local_source_does_not_exist')
local_path = os.path.join(FILES, 'file', 'base', 'grail', 'scene99')
for proto in ('file://', ''):
source = proto + local_path
log.debug('Trying source %s', source)
ret = self.run_state(
'file.managed',
name=name,
source=source)
self.assertSaltFalseReturn(ret)
ret = ret[next(iter(ret))]
# Shouldn't be any changes
self.assertFalse(ret['changes'])
# Check that we identified a hash mismatch
self.assertIn(
'does not exist', ret['comment'])
def test_directory(self):
'''
file.directory
@ -585,19 +662,29 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
try:
tmp_dir = os.path.join(TMP, 'pgdata')
sym_dir = os.path.join(TMP, 'pg_data')
os.mkdir(tmp_dir, 0o700)
os.symlink(tmp_dir, sym_dir)
ret = self.run_state(
'file.directory', test=True, name=sym_dir, follow_symlinks=True,
mode=700
)
if IS_WINDOWS:
self.run_function('file.mkdir', [tmp_dir, 'Administrators'])
else:
os.mkdir(tmp_dir, 0o700)
self.run_function('file.symlink', [tmp_dir, sym_dir])
if IS_WINDOWS:
ret = self.run_state(
'file.directory', test=True, name=sym_dir,
follow_symlinks=True, win_owner='Administrators')
else:
ret = self.run_state(
'file.directory', test=True, name=sym_dir,
follow_symlinks=True, mode=700)
self.assertSaltTrueReturn(ret)
finally:
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
self.run_function('file.remove', [tmp_dir])
if os.path.islink(sym_dir):
os.unlink(sym_dir)
self.run_function('file.remove', [sym_dir])
@skip_if_not_root
@skipIf(IS_WINDOWS, 'Mode not available in Windows')
@ -1592,25 +1679,24 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
'''
fname = 'append_issue_1864_makedirs'
name = os.path.join(TMP, fname)
try:
self.assertFalse(os.path.exists(name))
except AssertionError:
os.remove(name)
# Make sure the file is not there to begin with
if os.path.isfile(name):
self.run_function('file.remove', [name])
try:
# Non existing file get's touched
if os.path.isfile(name):
# left over
os.remove(name)
ret = self.run_state(
'file.append', name=name, text='cheese', makedirs=True
)
self.assertSaltTrueReturn(ret)
finally:
if os.path.isfile(name):
os.remove(name)
self.run_function('file.remove', [name])
# Nested directory and file get's touched
name = os.path.join(TMP, 'issue_1864', fname)
try:
ret = self.run_state(
'file.append', name=name, text='cheese', makedirs=True
@ -1618,20 +1704,17 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
self.assertSaltTrueReturn(ret)
finally:
if os.path.isfile(name):
os.remove(name)
self.run_function('file.remove', [name])
# Parent directory exists but file does not and makedirs is False
try:
# Parent directory exists but file does not and makedirs is False
ret = self.run_state(
'file.append', name=name, text='cheese'
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(name))
finally:
shutil.rmtree(
os.path.join(TMP, 'issue_1864'),
ignore_errors=True
)
self.run_function('file.remove', [os.path.join(TMP, 'issue_1864')])
def test_prepend_issue_27401_makedirs(self):
'''
@ -1966,19 +2049,21 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.run_function('state.sls', mods='issue-8343')
for name, step in six.iteritems(ret):
self.assertSaltTrueReturn({name: step})
with salt.utils.fopen(testcase_filedest) as fp_:
contents = fp_.read().split(os.linesep)
self.assertEqual(
['#-- start salt managed zonestart -- PLEASE, DO NOT EDIT',
'foo',
'#-- end salt managed zonestart --',
'#',
'#-- start salt managed zoneend -- PLEASE, DO NOT EDIT',
'bar',
'#-- end salt managed zoneend --',
''],
contents
)
expected = [
'#-- start salt managed zonestart -- PLEASE, DO NOT EDIT',
'foo',
'#-- end salt managed zonestart --',
'#',
'#-- start salt managed zoneend -- PLEASE, DO NOT EDIT',
'bar',
'#-- end salt managed zoneend --',
'']
self.assertEqual(expected, contents)
finally:
if os.path.isdir(testcase_filedest):
os.unlink(testcase_filedest)

View File

@ -628,7 +628,7 @@ class SPMCase(TestCase, AdaptedConfigurationTestCaseMixin):
description: Formula for installing Apache
'''))
def _spm_config(self):
def _spm_config(self, assume_yes=True):
self._tmp_spm = tempfile.mkdtemp()
config = self.get_temp_config('minion', **{
'spm_logfile': os.path.join(self._tmp_spm, 'log'),
@ -641,10 +641,10 @@ class SPMCase(TestCase, AdaptedConfigurationTestCaseMixin):
'spm_db': os.path.join(self._tmp_spm, 'packages.db'),
'extension_modules': os.path.join(self._tmp_spm, 'modules'),
'file_roots': {'base': [self._tmp_spm, ]},
'formula_path': os.path.join(self._tmp_spm, 'spm'),
'formula_path': os.path.join(self._tmp_spm, 'salt'),
'pillar_path': os.path.join(self._tmp_spm, 'pillar'),
'reactor_path': os.path.join(self._tmp_spm, 'reactor'),
'assume_yes': True,
'assume_yes': True if assume_yes else False,
'force': False,
'verbose': False,
'cache': 'localfs',
@ -652,6 +652,16 @@ class SPMCase(TestCase, AdaptedConfigurationTestCaseMixin):
'spm_repo_dups': 'ignore',
'spm_share_dir': os.path.join(self._tmp_spm, 'share'),
})
import salt.utils
import yaml
if not os.path.isdir(config['formula_path']):
os.makedirs(config['formula_path'])
with salt.utils.fopen(os.path.join(self._tmp_spm, 'spm'), 'w') as fp:
fp.write(yaml.dump(config))
return config
def _spm_create_update_repo(self, config):

View File

@ -25,6 +25,16 @@ import salt.grains.core as core
# Import 3rd-party libs
import salt.ext.six as six
# Globals
IPv4Address = salt.ext.ipaddress.IPv4Address
IPv6Address = salt.ext.ipaddress.IPv6Address
IP4_LOCAL = '127.0.0.1'
IP4_ADD1 = '10.0.0.1'
IP4_ADD2 = '10.0.0.2'
IP6_LOCAL = '::1'
IP6_ADD1 = '2001:4860:4860::8844'
IP6_ADD2 = '2001:4860:4860::8888'
@skipIf(NO_MOCK, NO_MOCK_REASON)
class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
@ -462,3 +472,127 @@ PATCHLEVEL = 3
self.assertEqual(os_grains.get('osrelease'), os_release_map['osrelease'])
self.assertListEqual(list(os_grains.get('osrelease_info')), os_release_map['osrelease_info'])
self.assertEqual(os_grains.get('osmajorrelease'), os_release_map['osmajorrelease'])
def _check_ipaddress(self, value, ip_v):
'''
check if ip address in a list is valid
'''
for val in value:
assert isinstance(val, six.string_types)
ip_method = 'is_ipv{0}'.format(ip_v)
self.assertTrue(getattr(salt.utils.network, ip_method)(val))
def _check_empty(self, key, value, empty):
'''
if empty is False and value does not exist assert error
if empty is True and value exists assert error
'''
if not empty and not value:
raise Exception("{0} is empty, expecting a value".format(key))
elif empty and value:
raise Exception("{0} is suppose to be empty. value: {1} \
exists".format(key, value))
@skipIf(not salt.utils.is_linux(), 'System is not Linux')
def test_fqdn_return(self):
'''
test ip4 and ip6 return values
'''
net_ip4_mock = [IP4_LOCAL, IP4_ADD1, IP4_ADD2]
net_ip6_mock = [IP6_LOCAL, IP6_ADD1, IP6_ADD2]
self._run_fqdn_tests(net_ip4_mock, net_ip6_mock,
ip4_empty=False, ip6_empty=False)
@skipIf(not salt.utils.is_linux(), 'System is not Linux')
def test_fqdn6_empty(self):
'''
test when ip6 is empty
'''
net_ip4_mock = [IP4_LOCAL, IP4_ADD1, IP4_ADD2]
net_ip6_mock = []
self._run_fqdn_tests(net_ip4_mock, net_ip6_mock,
ip4_empty=False)
@skipIf(not salt.utils.is_linux(), 'System is not Linux')
def test_fqdn4_empty(self):
'''
test when ip4 is empty
'''
net_ip4_mock = []
net_ip6_mock = [IP6_LOCAL, IP6_ADD1, IP6_ADD2]
self._run_fqdn_tests(net_ip4_mock, net_ip6_mock,
ip6_empty=False)
@skipIf(not salt.utils.is_linux(), 'System is not Linux')
def test_fqdn_all_empty(self):
'''
test when both ip4 and ip6 are empty
'''
net_ip4_mock = []
net_ip6_mock = []
self._run_fqdn_tests(net_ip4_mock, net_ip6_mock)
def _run_fqdn_tests(self, net_ip4_mock, net_ip6_mock,
ip6_empty=True, ip4_empty=True):
def _check_type(key, value, ip4_empty, ip6_empty):
'''
check type and other checks
'''
assert isinstance(value, list)
if '4' in key:
self._check_empty(key, value, ip4_empty)
self._check_ipaddress(value, ip_v='4')
elif '6' in key:
self._check_empty(key, value, ip6_empty)
self._check_ipaddress(value, ip_v='6')
ip4_mock = [(2, 1, 6, '', (IP4_ADD1, 0)),
(2, 3, 0, '', (IP4_ADD2, 0))]
ip6_mock = [(10, 1, 6, '', (IP6_ADD1, 0, 0, 0)),
(10, 3, 0, '', (IP6_ADD2, 0, 0, 0))]
with patch.dict(core.__opts__, {'ipv6': False}):
with patch.object(salt.utils.network, 'ip_addrs',
MagicMock(return_value=net_ip4_mock)):
with patch.object(salt.utils.network, 'ip_addrs6',
MagicMock(return_value=net_ip6_mock)):
with patch.object(core.socket, 'getaddrinfo', side_effect=[ip4_mock, ip6_mock]):
get_fqdn = core.ip_fqdn()
ret_keys = ['fqdn_ip4', 'fqdn_ip6', 'ipv4', 'ipv6']
for key in ret_keys:
value = get_fqdn[key]
_check_type(key, value, ip4_empty, ip6_empty)
@skipIf(not salt.utils.is_linux(), 'System is not Linux')
def test_dns_return(self):
'''
test the return for a dns grain. test for issue:
https://github.com/saltstack/salt/issues/41230
'''
resolv_mock = {'domain': '', 'sortlist': [], 'nameservers':
[IPv4Address(IP4_ADD1),
IPv6Address(IP6_ADD1)], 'ip4_nameservers':
[IPv4Address(IP4_ADD1)],
'search': ['test.saltstack.com'], 'ip6_nameservers':
[IPv6Address(IP6_ADD1)], 'options': []}
ret = {'dns': {'domain': '', 'sortlist': [], 'nameservers':
[IP4_ADD1, IP6_ADD1], 'ip4_nameservers':
[IP4_ADD1], 'search': ['test.saltstack.com'],
'ip6_nameservers': [IP6_ADD1], 'options':
[]}}
self._run_dns_test(resolv_mock, ret)
def _run_dns_test(self, resolv_mock, ret):
with patch.object(salt.utils, 'is_windows',
MagicMock(return_value=False)):
with patch.dict(core.__opts__, {'ipv6': False}):
with patch.object(salt.utils.dns, 'parse_resolv',
MagicMock(return_value=resolv_mock)):
get_dns = core.dns()
self.assertEqual(get_dns, ret)

View File

@ -698,9 +698,9 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
with patch.object(state, '_check_queue', mock):
self.assertEqual(state.top("reverse_top.sls"), "A")
mock = MagicMock(side_effect=[False, True, True])
with patch.object(state, '_check_pillar', mock):
with patch.dict(state.__pillar__, {"_errors": "E"}):
mock = MagicMock(side_effect=[['E'], None, None])
with patch.object(state, '_get_pillar_errors', mock):
with patch.dict(state.__pillar__, {"_errors": ['E']}):
self.assertListEqual(state.top("reverse_top.sls"), ret)
with patch.dict(state.__opts__, {"test": "A"}):
@ -857,14 +857,10 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
True),
["A"])
mock = MagicMock(side_effect=[False,
True,
True,
True,
True])
with patch.object(state, '_check_pillar', mock):
mock = MagicMock(side_effect=[['E', '1'], None, None, None, None])
with patch.object(state, '_get_pillar_errors', mock):
with patch.dict(state.__context__, {"retcode": 5}):
with patch.dict(state.__pillar__, {"_errors": "E1"}):
with patch.dict(state.__pillar__, {"_errors": ['E', '1']}):
self.assertListEqual(state.sls("core,edit.vim dev",
None,
None,
@ -979,3 +975,62 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
MockJson.flag = False
with patch('salt.utils.fopen', mock_open()):
self.assertTrue(state.pkg(tar_file, 0, "md5"))
def test_get_pillar_errors_CC(self):
'''
Test _get_pillar_errors function.
CC: External clean, Internal clean
:return:
'''
for int_pillar, ext_pillar in [({'foo': 'bar'}, {'fred': 'baz'}),
({'foo': 'bar'}, None),
({}, {'fred': 'baz'})]:
with patch('salt.modules.state.__pillar__', int_pillar):
for opts, res in [({'force': True}, None),
({'force': False}, None),
({}, None)]:
assert res == state._get_pillar_errors(kwargs=opts, pillar=ext_pillar)
def test_get_pillar_errors_EC(self):
'''
Test _get_pillar_errors function.
EC: External erroneous, Internal clean
:return:
'''
errors = ['failure', 'everywhere']
for int_pillar, ext_pillar in [({'foo': 'bar'}, {'fred': 'baz', '_errors': errors}),
({}, {'fred': 'baz', '_errors': errors})]:
with patch('salt.modules.state.__pillar__', int_pillar):
for opts, res in [({'force': True}, None),
({'force': False}, errors),
({}, errors)]:
assert res == state._get_pillar_errors(kwargs=opts, pillar=ext_pillar)
def test_get_pillar_errors_EE(self):
'''
Test _get_pillar_errors function.
CC: External erroneous, Internal erroneous
:return:
'''
errors = ['failure', 'everywhere']
for int_pillar, ext_pillar in [({'foo': 'bar', '_errors': errors}, {'fred': 'baz', '_errors': errors})]:
with patch('salt.modules.state.__pillar__', int_pillar):
for opts, res in [({'force': True}, None),
({'force': False}, errors),
({}, errors)]:
assert res == state._get_pillar_errors(kwargs=opts, pillar=ext_pillar)
def test_get_pillar_errors_CE(self):
'''
Test _get_pillar_errors function.
CC: External clean, Internal erroneous
:return:
'''
errors = ['failure', 'everywhere']
for int_pillar, ext_pillar in [({'foo': 'bar', '_errors': errors}, {'fred': 'baz'}),
({'foo': 'bar', '_errors': errors}, None)]:
with patch('salt.modules.state.__pillar__', int_pillar):
for opts, res in [({'force': True}, None),
({'force': False}, errors),
({}, errors)]:
assert res == state._get_pillar_errors(kwargs=opts, pillar=ext_pillar)

View File

@ -0,0 +1,51 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Shane Lee <slee@saltstack.com>`
'''
# Import Python Libs
from __future__ import absolute_import
import os
# Import Salt Testing Libs
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.win_file as win_file
from salt.exceptions import CommandExecutionError
import salt.utils
@skipIf(NO_MOCK, NO_MOCK_REASON)
class WinFileTestCase(TestCase):
'''
Test cases for salt.modules.win_file
'''
FAKE_RET = {'fake': 'ret data'}
if salt.utils.is_windows():
FAKE_PATH = os.sep.join(['C:', 'path', 'does', 'not', 'exist'])
else:
FAKE_PATH = os.sep.join(['path', 'does', 'not', 'exist'])
def test_issue_43328_stats(self):
'''
Make sure that a CommandExecutionError is raised if the file does NOT
exist
'''
with patch('os.path.exists', return_value=False):
self.assertRaises(CommandExecutionError,
win_file.stats,
self.FAKE_PATH)
def test_issue_43328_check_perms_no_ret(self):
'''
Make sure that a CommandExecutionError is raised if the file does NOT
exist
'''
with patch('os.path.exists', return_value=False):
self.assertRaises(
CommandExecutionError, win_file.check_perms, self.FAKE_PATH)

View File

@ -577,7 +577,7 @@ class TestFileState(TestCase, LoaderModuleMockMixin):
'file.copy': mock_cp,
'file.manage_file': mock_ex,
'cmd.run_all': mock_cmd_fail}):
comt = ('Must provide name to file.managed')
comt = ('Destination file name is required')
ret.update({'comment': comt, 'name': '', 'pchanges': {}})
self.assertDictEqual(filestate.managed(''), ret)
@ -743,7 +743,7 @@ class TestFileState(TestCase, LoaderModuleMockMixin):
mock_check = MagicMock(return_value=(
None,
'The directory "{0}" will be changed'.format(name),
{'directory': 'new'}))
{name: {'directory': 'new'}}))
mock_error = CommandExecutionError
with patch.dict(filestate.__salt__, {'config.manage_mode': mock_t,
'file.user_to_uid': mock_uid,
@ -801,16 +801,15 @@ class TestFileState(TestCase, LoaderModuleMockMixin):
group=group),
ret)
with patch.object(os.path, 'isfile', mock_f):
with patch.object(os.path, 'isdir', mock_f):
with patch.dict(filestate.__opts__, {'test': True}):
if salt.utils.is_windows():
comt = 'The directory "{0}" will be changed' \
''.format(name)
p_chg = {'directory': 'new'}
else:
comt = ('The following files will be changed:\n{0}:'
' directory - new\n'.format(name))
p_chg = {'/etc/grub.conf': {'directory': 'new'}}
p_chg = {'/etc/grub.conf': {'directory': 'new'}}
ret.update({
'comment': comt,
'result': None,

View File

@ -131,3 +131,51 @@ class MinionTestCase(TestCase):
self.assertEqual(minion.jid_queue, [456, 789])
finally:
minion.destroy()
def test_beacons_before_connect(self):
'''
Tests that the 'beacons_before_connect' option causes the beacons to be initialized before connect.
'''
with patch('salt.minion.Minion.ctx', MagicMock(return_value={})), \
patch('salt.minion.Minion.sync_connect_master', MagicMock(side_effect=RuntimeError('stop execution'))), \
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.start', MagicMock(return_value=True)), \
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.join', MagicMock(return_value=True)):
mock_opts = copy.copy(salt.config.DEFAULT_MINION_OPTS)
mock_opts['beacons_before_connect'] = True
try:
minion = salt.minion.Minion(mock_opts, io_loop=tornado.ioloop.IOLoop())
try:
minion.tune_in(start=True)
except RuntimeError:
pass
# Make sure beacons are initialized but the sheduler is not
self.assertTrue('beacons' in minion.periodic_callbacks)
self.assertTrue('schedule' not in minion.periodic_callbacks)
finally:
minion.destroy()
def test_scheduler_before_connect(self):
'''
Tests that the 'scheduler_before_connect' option causes the scheduler to be initialized before connect.
'''
with patch('salt.minion.Minion.ctx', MagicMock(return_value={})), \
patch('salt.minion.Minion.sync_connect_master', MagicMock(side_effect=RuntimeError('stop execution'))), \
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.start', MagicMock(return_value=True)), \
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.join', MagicMock(return_value=True)):
mock_opts = copy.copy(salt.config.DEFAULT_MINION_OPTS)
mock_opts['scheduler_before_connect'] = True
try:
minion = salt.minion.Minion(mock_opts, io_loop=tornado.ioloop.IOLoop())
try:
minion.tune_in(start=True)
except RuntimeError:
pass
# Make sure the scheduler is initialized but the beacons are not
self.assertTrue('schedule' in minion.periodic_callbacks)
self.assertTrue('beacons' not in minion.periodic_callbacks)
finally:
minion.destroy()