Merge branch 'develop' into virtualenv_mod

This commit is contained in:
Julien Cigar 2015-08-07 14:44:00 +02:00
commit d9b5018700
37 changed files with 2678 additions and 700 deletions

View File

@ -356,12 +356,12 @@
# If this is set to True the first newline after a Jinja block is removed # If this is set to True the first newline after a Jinja block is removed
# (block, not variable tag!). Defaults to False, corresponds to the Jinja # (block, not variable tag!). Defaults to False, corresponds to the Jinja
# environment init variable "trim_blocks". # environment init variable "trim_blocks".
# jinja_trim_blocks: False #jinja_trim_blocks: False
# #
# If this is set to True leading spaces and tabs are stripped from the start # If this is set to True leading spaces and tabs are stripped from the start
# of a line to a block. Defaults to False, corresponds to the Jinja # of a line to a block. Defaults to False, corresponds to the Jinja
# environment init variable "lstrip_blocks". # environment init variable "lstrip_blocks".
# jinja_lstrip_blocks: False #jinja_lstrip_blocks: False
# The failhard option tells the minions to stop immediately after the first # The failhard option tells the minions to stop immediately after the first
# failure detected in the state execution, defaults to False # failure detected in the state execution, defaults to False
@ -381,7 +381,7 @@
#state_output: full #state_output: full
# Automatically aggregate all states that have support for mod_aggregate by # Automatically aggregate all states that have support for mod_aggregate by
# setting to True. Or pass a list of state module names to automatically # setting to 'True'. Or pass a list of state module names to automatically
# aggregate just those types. # aggregate just those types.
# #
# state_aggregate: # state_aggregate:
@ -389,6 +389,11 @@
# #
#state_aggregate: False #state_aggregate: False
# Send progress events as each function in a state run completes execution
# by setting to 'True'. Progress events are in the format
# 'salt/job/<JID>/prog/<MID>/<RUN NUM>'.
#state_events: False
##### File Server settings ##### ##### File Server settings #####
########################################## ##########################################
# Salt runs a lightweight file server written in zeromq to deliver files to # Salt runs a lightweight file server written in zeromq to deliver files to

View File

@ -908,6 +908,41 @@ If set to 'changes', the output will be full unless the state didn't change.
state_output: full state_output: full
.. conf_master:: state_aggregate
``state_aggregate``
-------------------
Default: ``False``
Automatically aggregate all states that have support for mod_aggregate by
setting to ``True``. Or pass a list of state module names to automatically
aggregate just those types.
.. code-block:: yaml
state_aggregate:
- pkg
.. code-block:: yaml
state_aggregate: True
.. conf_master:: state_events
``state_events``
----------------
Default: ``False``
Send progress events as each function in a state run completes execution
by setting to ``True``. Progress events are in the format
``salt/job/<JID>/prog/<MID>/<RUN NUM>``.
.. code-block:: yaml
state_events: True
.. conf_master:: yaml_utf8 .. conf_master:: yaml_utf8
``yaml_utf8`` ``yaml_utf8``
@ -2308,10 +2343,21 @@ master, specify the higher level master with this configuration value.
syndic_master: masterofmasters syndic_master: masterofmasters
You can optionally connect a syndic to multiple higher level masters by
setting the 'syndic_master' value to a list:
.. code-block:: yaml
syndic_master:
- masterofmasters1
- masterofmasters2
Each higher level master must be set up in a multimaster configuration.
.. conf_master:: syndic_master_port .. conf_master:: syndic_master_port
``syndic_master_port`` ``syndic_master_port``
----------------------- ----------------------
Default: ``4506`` Default: ``4506``

View File

@ -553,7 +553,7 @@ be able to execute a certain module. The sys module is built into the minion
and cannot be disabled. and cannot be disabled.
This setting can also tune the minion, as all modules are loaded into ram This setting can also tune the minion, as all modules are loaded into ram
disabling modules will lover the minion's ram footprint. disabling modules will lower the minion's ram footprint.
.. code-block:: yaml .. code-block:: yaml

View File

@ -78,6 +78,11 @@ append a ``%`` to the ID:
- '*': - '*':
- 'pkg.*' - 'pkg.*'
.. warning::
All users that have external authentication privileges are allowed to run
:mod:`saltutil.findjob <salt.modules.saltutil.find_job>`. Be aware
that this could inadvertently expose some data such as minion IDs.
.. _salt-token-generation: .. _salt-token-generation:
Tokens Tokens

View File

@ -86,6 +86,15 @@ Job events
:var fun: The function the minion ran. E.g., ``test.ping``. :var fun: The function the minion ran. E.g., ``test.ping``.
:var return: The data returned from the execution module. :var return: The data returned from the execution module.
.. salt:event:: salt/job/<JID>/prog/<MID>/<RUN NUM>
Fired each time a each function in a state run completes execution. Must be
enabled using the :conf_master:`state_events` option.
:var data: The data returned from the state module function.
:var id: The minion ID.
:var jid: The job ID.
.. _event-master_presence: .. _event-master_presence:
Presence events Presence events

View File

@ -63,3 +63,12 @@ of Masters" nodes can control multiple segments underneath them.
Syndics are covered in depth in :doc:`Salt Syndic </topics/topology/syndic>`. Syndics are covered in depth in :doc:`Salt Syndic </topics/topology/syndic>`.
Syndic with Multimaster
=======================
.. versionadded:: 2015.5.0
Syndic with Multimaster lets you connect a syndic to multiple masters to provide
an additional layer of redundancy in a syndic configuration.
Syndics are covered in depth in :doc:`Salt Syndic </topics/topology/syndic>`.

File diff suppressed because it is too large Load Diff

View File

@ -23,6 +23,14 @@ Salt Cloud Changes
a page was changed from 20 (default) to 200 to reduce the number of API calls a page was changed from 20 (default) to 200 to reduce the number of API calls
to Digital Ocean. to Digital Ocean.
New Docker State/Module
=======================
A new docker :mod:`state <salt.states.dockerng>` and :mod:`execution module
<salt.modules.dockerng>` have been added. They will eventually take the place
of the existing state and execution module, but for now will exist alongside
them.
Git Pillar Rewritten Git Pillar Rewritten
==================== ====================
@ -31,6 +39,25 @@ with :mod:`gitfs <salt.fileserver.gitfs>`. See :mod:`here
<salt.pillar.git_pillar>` for more information on the new git_pillar <salt.pillar.git_pillar>` for more information on the new git_pillar
functionality. functionality.
Windows Software Repo Changes
=============================
The :mod:`winrepo.update_git_repos <salt.runners.winrepo.update_git_repos>`
runner has been updated to use either GitPython_ or pygit2_ to checkout the git
repositories containing repo data. Existing winrepo git checkouts should be
removed before starting up the salt-master after upgrading, if GitPython_ or
pygit2_ is installed, to allow them to be checked out again.
This enhancement also brings new functionality, see the :mod:`winrepo runner
<salt.runners.winrepo>` documentation for more information.
If neither GitPython_ nor pygit2_ are installed, then Salt will fall back to
the pre-existing behavior for :mod:`winrepo.update_git_repos
<salt.runners.winrepo.update_git_repos>`.
.. _GitPython: https://github.com/gitpython-developers/GitPython
.. _pygit2: https://github.com/libgit2/pygit2
JBoss 7 State JBoss 7 State
============= =============

View File

@ -4,102 +4,207 @@
Salt Syndic Salt Syndic
=========== ===========
The Salt Syndic interface is a powerful tool which allows for the construction The most basic or typical Salt topology consists of a single Master node
of Salt command topologies. A basic Salt setup has a Salt Master commanding a controlling a group of Minion nodes. An intermediate node type, called Syndic,
group of Salt Minions. The Syndic interface is a special passthrough when used offers greater structural flexibility and scalability in the
minion, it is run on a master and connects to another master, then the master construction of Salt topologies than topologies constructed only out of Master
that the Syndic minion is listening to can control the minions attached to and Minion node types.
the master running the syndic.
The intent for supporting many layouts is not presented with the intent of A Syndic node can be thought of as a special passthrough Minion node. A Syndic
supposing the use of any single topology, but to allow a more flexible method node consists of a ``salt-syndic`` daemon and a ``salt-master`` daemon running
of controlling many systems. on the same system. The ``salt-master`` daemon running on the Syndic node
controls a group of lower level Minion nodes and the ``salt-syndic`` daemon
connects higher level Master node, sometimes called a Master of Masters.
The ``salt-syndic`` daemon relays publications and events between the Master
node and the local ``salt-master`` daemon. This gives the Master node control
over the Minion nodes attached to the ``salt-master`` daemon running on the
Syndic node.
Configuring the Syndic Configuring the Syndic
====================== ======================
Since the Syndic only needs to be attached to a higher level master the To setup a Salt Syndic you need to tell the Syndic node and its Master node
configuration is very simple. On a master that is running a syndic to connect about each other. If your Master node is located at ``10.10.0.1``, then your
to a higher level master the :conf_master:`syndic_master` option needs to be configurations would be:
set in the master config file. The ``syndic_master`` option contains the
hostname or IP address of the master server that can control the master that
the syndic is running on.
The master that the syndic connects to sees the syndic as an ordinary minion, On the Syndic node:
and treats it as such. the higher level master will need to accept the syndic's
minion key like any other minion. This master will also need to set the
:conf_master:`order_masters` value in the configuration to ``True``. The
``order_masters`` option in the config on the higher level master is very
important, to control a syndic extra information needs to be sent with the
publications, the ``order_masters`` option makes sure that the extra data is
sent out.
To sum up, you have those configuration options available on the master side: .. code-block:: yaml
- :conf_master:`syndic_master`: MasterOfMaster ip/address # /etc/salt/master
- :conf_master:`syndic_master_port`: MasterOfMaster ret_port syndic_master: 10.10.0.1 # may be either an IP address or a hostname
- :conf_master:`syndic_log_file`: path to the logfile (absolute or not)
- :conf_master:`syndic_pidfile`: path to the pidfile (absolute or not)
Each Syndic must provide its own ``file_roots`` directory. Files will not be .. code-block:: yaml
automatically transferred from the master-master.
# /etc/salt/minion
# id is shared by the salt-syndic daemon and a possible salt-minion daemon
# on the Syndic node
id: my_syndic
On the Master node:
.. code-block:: yaml
# /etc/salt/master
order_masters: True
The :conf_master:`syndic_master` option tells the Syndic node where to find the
Master node in the same way that the :conf_minion:`master` option tells a
Minion node where to find a Master node.
The :conf_minion:`id` option is used by the ``salt-syndic`` daemon to identify
with the Master node and if unset will default to the hostname or IP address of
the Syndic just as with a Minion.
The :conf_master:`order_masters` option configures the Master node to send
extra information with its publications that is needed by Syndic nodes
connected directly to it.
.. note::
Each Syndic must provide its own ``file_roots`` directory. Files will not
be automatically transferred from the Master node.
Configuring the Syndic with Multimaster
=======================================
.. versionadded:: 2015.5.0
Syndic with Multimaster lets you connect a syndic to multiple masters to provide
an additional layer of redundancy in a syndic configuration.
Higher level masters should first be configured in a multimaster configuration.
See :doc:`Multimaster Tutorial </topics/tutorials/multimaster>`.
On the syndic, the :conf_master:`syndic_master` option is populated with
a list of the higher level masters.
Since each syndic is connected to each master, jobs sent from any master are
forwarded to minions that are connected to each syndic. If the ``master_id`` value
is set in the master config on the higher level masters, job results are returned
to the master that originated the request in a best effort fashion. Events/jobs
without a ``master_id`` are returned to any available master.
Running the Syndic Running the Syndic
================== ==================
The Syndic is a separate daemon that needs to be started on the master that is The ``salt-syndic`` daemon is a separate process that needs to be started in
controlled by a higher master. Starting the Syndic daemon is the same as addition to the ``salt-master`` daemon running on the Syndic node. Starting
starting the other Salt daemons. the ``salt-syndic`` daemon is the same as starting the other Salt daemons.
The Master node in many ways sees the Syndic as an ordinary Minion node. In
particular, the Master will need to accept the Syndic's Minion key as it would
for any other Minion.
On the Syndic node:
.. code-block:: bash .. code-block:: bash
# salt-syndic # salt-syndic
or
# service salt-syndic start
.. note:: On the Master node:
If you have an exceptionally large infrastructure or many layers of .. code-block:: bash
syndics, you may find that the CLI doesn't wait long enough for the syndics
to return their events. If you think this is the case, you can set the
:conf_master:`syndic_wait` value in the upper master config. The default
value is ``1``, and should work for the majority of deployments.
# salt-key -a my_syndic
The Master node will now be able to control the Minion nodes connected to the
Syndic. Only the Syndic key will be listed in the Master node's key registry
but this also means that key activity between the Syndic's Minions and the
Syndic does not encumber the Master node. In this way, the Syndic's key on the
Master node can be thought of as a placeholder for the keys of all the Minion
and Syndic nodes beneath it, giving the Master node a clear, high level
structural view on the Salt cluster.
On the Master node:
.. code-block:: bash
# salt-key -L
Accepted Keys:
my_syndic
Denied Keys:
Unaccepted Keys:
Rejected Keys:
# salt '*' test.ping
minion_1:
True
minion_2:
True
minion_4:
True
minion_3:
True
Topology Topology
======== ========
The ``salt-syndic`` is little more than a command and event forwarder. When a A Master node (a node which is itself not a Syndic to another higher level
command is issued from a higher-level master, it will be received by the Master node) must run a ``salt-master`` daemon and optionally a ``salt-minion``
configured syndics on lower-level masters, and propagated to to their minions, daemon.
and other syndics that are bound to them further down in the hierarchy. When
events and job return data are generated by minions, they aggregated back,
through the same syndic(s), to the master which issued the command.
The master sitting at the top of the hierarchy (the Master of Masters) will *not* A Syndic node must run ``salt-syndic`` and ``salt-master`` daemons and
be running the ``salt-syndic`` daemon. It will have the ``salt-master`` optionally a ``salt-minion`` daemon.
daemon running, and optionally, the ``salt-minion`` daemon. Each syndic
connected to an upper-level master will have both the ``salt-master`` and the
``salt-syndic`` daemon running, and optionally, the ``salt-minion`` daemon.
Nodes on the lowest points of the hierarchy (minions which do not propagate A Minion node must run a ``salt-minion`` daemon.
data to another level) will only have the ``salt-minion`` daemon running. There
is no need for either ``salt-master`` or ``salt-syndic`` to be running on a
standard minion.
Syndic and the CLI When a ``salt-master`` daemon issues a command, it will be received by the
================== Syndic and Minion nodes directly connected to it. A Minion node will process
the command in the way it ordinarily would. On a Syndic node, the
``salt-syndic`` daemon will relay the command to the ``salt-master`` daemon
running on the Syndic node, which then propagates the command to to the Minions
and Syndics connected to it.
In order for the high-level master to return information from minions that are When events and job return data are generated by ``salt-minion`` daemons, they
below the syndic(s), the CLI requires a short wait time in order to allow the are aggregated by the ``salt-master`` daemon they are connected to, which
syndic(s) to gather responses from their minions. This value is defined in the ``salt-master`` daemon then relays the data back through its ``salt-syndic``
``syndic_wait`` and has a default of five seconds. daemon until the data reaches the Master or Syndic node that issued the command.
While it is possible to run a syndic without a minion installed on the same machine, Syndic wait
it is recommended, for a faster CLI response time, to do so. Without a minion ===========
installed on the syndic, the timeout value of ``syndic_wait`` increases
significantly - about three-fold. With a minion installed on the syndic, the CLI
timeout resides at the value defined in ``syndic_wait``.
.. note:: .. note::
To reduce the amount of time the CLI waits for minions to respond, install a minion To reduce the amount of time the CLI waits for Minions to respond, install
on the syndic or tune the value of the ``syndic_wait`` configuration. a Minion on the Syndic or tune the value of the ``syndic_wait``
configuration.
While it is possible to run a Syndic without a Minion installed on the same
system, it is recommended, for a faster CLI response time, to do so. Without a
Minion installed on the Syndic node, the timeout value of ``syndic_wait``
increases significantly - about three-fold. With a Minion installed on the
Syndic, the CLI timeout resides at the value defined in ``syndic_wait``.
.. note::
If you have a very large infrastructure or many layers of Syndics, you may
find that the CLI doesn't wait long enough for the Syndics to return their
events. If you think this is the case, you can set the
:conf_master:`syndic_wait` value in the Master configs on the Master or
Syndic nodes from which commands are executed. The default value is ``5``,
and should work for the majority of deployments.
In order for a Master or Syndic node to return information from Minions that
are below their Syndics, the CLI requires a short wait time in order to allow
the Syndics to gather responses from their Minions. This value is defined in
the :conf_master:`syndic_wait` config option and has a default of five seconds.
Syndic config options
=====================
These are the options that can be used to configure a Syndic node. Note that
other than ``id``, Syndic config options are placed in the Master config on the
Syndic node.
- :conf_minion:`id`: Syndic id (shared by the ``salt-syndic`` daemon with a
potential ``salt-minion`` daemon on the same system)
- :conf_master:`syndic_master`: Master node IP address or hostname
- :conf_master:`syndic_master_port`: Master node ret_port
- :conf_master:`syndic_log_file`: path to the logfile (absolute or not)
- :conf_master:`syndic_pidfile`: path to the pidfile (absolute or not)
- :conf_master:`syndic_wait`: time in seconds to wait on returns from this syndic

View File

@ -170,6 +170,20 @@ Add ``cache_dir: True`` when the installer requires multiple source files. The
directory containing the installer file will be recursively cached on the minion. directory containing the installer file will be recursively cached on the minion.
Only applies to salt: installer URLs. Only applies to salt: installer URLs.
Alternatively the ``uninstaller`` can also simply repeat the URL of the msi file.
.. code-block:: yaml
7zip:
9.20.00.0:
installer: salt://win/repo/7zip/7z920-x64.msi
full_name: 7-Zip 9.20 (x64 edition)
reboot: False
install_flags: '/qn /norestart'
msiexec: True
uninstaller: salt://win/repo/7zip/7z920-x64.msi
uninstall_flags: '/qn /norestart'
.. code-block:: yaml .. code-block:: yaml
sqlexpress: sqlexpress:

View File

@ -57,7 +57,7 @@ class SPMClient(object):
elif command == 'build': elif command == 'build':
self._build(args) self._build(args)
elif command == 'update_repo': elif command == 'update_repo':
self._download_repo_metadata() self._download_repo_metadata(args)
elif command == 'create_repo': elif command == 'create_repo':
self._create_repo(args) self._create_repo(args)
elif command == 'files': elif command == 'files':
@ -235,7 +235,7 @@ class SPMClient(object):
formula_tar.close() formula_tar.close()
conn.close() conn.close()
def _traverse_repos(self, callback): def _traverse_repos(self, callback, repo_name=None):
''' '''
Traverse through all repo files and apply the functionality provided in Traverse through all repo files and apply the functionality provided in
the callback to them the callback to them
@ -260,9 +260,11 @@ class SPMClient(object):
for repo in repo_data: for repo in repo_data:
if repo_data[repo].get('enabled', True) is False: if repo_data[repo].get('enabled', True) is False:
continue continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo]) callback(repo, repo_data[repo])
def _download_repo_metadata(self): def _download_repo_metadata(self, args):
''' '''
Connect to all repos and download metadata Connect to all repos and download metadata
''' '''
@ -283,7 +285,8 @@ class SPMClient(object):
with salt.utils.fopen(cache_path, 'w') as cph: with salt.utils.fopen(cache_path, 'w') as cph:
msgpack.dump(metadata, cph) msgpack.dump(metadata, cph)
self._traverse_repos(_update_metadata) repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self): def _get_repo_metadata(self):
''' '''

View File

@ -1681,11 +1681,11 @@ class Map(Cloud):
return {} return {}
if not os.path.isfile(self.opts['map']): if not os.path.isfile(self.opts['map']):
raise SaltCloudNotFound( log.error(
'The specified map file does not exist: {0}\n'.format( 'The specified map file does not exist: \'{0}\''.format(
self.opts['map'] self.opts['map'])
)
) )
raise SaltCloudNotFound()
try: try:
renderer = self.opts.get('renderer', 'yaml_jinja') renderer = self.opts.get('renderer', 'yaml_jinja')
rend = salt.loader.render(self.opts, {}) rend = salt.loader.render(self.opts, {})

View File

@ -84,7 +84,11 @@ class SaltCloud(parsers.SaltCloudParser):
self.exit(salt.defaults.exitcodes.EX_OK) self.exit(salt.defaults.exitcodes.EX_OK)
log.info('salt-cloud starting') log.info('salt-cloud starting')
mapper = salt.cloud.Map(self.config) try:
mapper = salt.cloud.Map(self.config)
except SaltCloudException as exc:
msg = 'There was an error generating the mapper.'
self.handle_exception(msg, exc)
names = self.config.get('names', None) names = self.config.get('names', None)
if names is not None: if names is not None:
@ -163,12 +167,21 @@ class SaltCloud(parsers.SaltCloudParser):
elif self.options.destroy and (self.config.get('names', None) or elif self.options.destroy and (self.config.get('names', None) or
self.config.get('map', None)): self.config.get('map', None)):
if self.config.get('map', None): map_file = self.config.get('map', None)
log.info('Applying map from {0!r}.'.format(self.config['map'])) names = self.config.get('names', ())
if map_file is not None:
if names != ():
msg = 'Supplying a mapfile, \'{0}\', in addition to instance names {1} ' \
'with the \'--destroy\' or \'-d\' function is not supported. ' \
'Please choose to delete either the entire map file or individual ' \
'instances.'.format(map_file, names)
self.handle_exception(msg, SaltCloudSystemExit)
log.info('Applying map from \'{0}\'.'.format(map_file))
matching = mapper.delete_map(query='list_nodes') matching = mapper.delete_map(query='list_nodes')
else: else:
matching = mapper.get_running_by_names( matching = mapper.get_running_by_names(
self.config.get('names', ()), names,
profile=self.options.profile profile=self.options.profile
) )

File diff suppressed because it is too large Load Diff

View File

@ -1696,21 +1696,21 @@ class ClearFuncs(object):
try: try:
name = self.loadauth.load_name(clear_load) name = self.loadauth.load_name(clear_load)
groups = self.loadauth.get_groups(clear_load) groups = self.loadauth.get_groups(clear_load)
if not ((name in self.opts['external_auth'][clear_load['eauth']]) | eauth_config = self.opts['external_auth'][clear_load['eauth']]
('*' in self.opts['external_auth'][clear_load['eauth']])): if '*' not in eauth_config and name not in eauth_config:
found = False found = False
for group in groups: for group in groups:
if "{0}%".format(group) in self.opts['external_auth'][clear_load['eauth']]: if "{0}%".format(group) in eauth_config:
found = True found = True
break break
if not found: if not found:
log.warning('Authentication failure of type "eauth" occurred.') log.warning('Authentication failure of type "eauth" occurred.')
return '' return ''
else:
clear_load['groups'] = groups
if not self.loadauth.time_auth(clear_load): if not self.loadauth.time_auth(clear_load):
log.warning('Authentication failure of type "eauth" occurred.') log.warning('Authentication failure of type "eauth" occurred.')
return '' return ''
clear_load['groups'] = groups
return self.loadauth.mk_token(clear_load) return self.loadauth.mk_token(clear_load)
except Exception as exc: except Exception as exc:
log.error( log.error(
@ -1759,40 +1759,45 @@ class ClearFuncs(object):
) )
) )
return '' return ''
# Bail if the token is empty or if the eauth type specified is not allowed
if not token or token['eauth'] not in self.opts['external_auth']: if not token or token['eauth'] not in self.opts['external_auth']:
log.warning('Authentication failure of type "token" occurred.') log.warning('Authentication failure of type "token" occurred.')
return '' return ''
if not ((token['name'] in self.opts['external_auth'][token['eauth']]) |
('*' in self.opts['external_auth'][token['eauth']])):
found = False
for group in token['groups']:
if "{0}%".format(group) in self.opts['external_auth'][token['eauth']]:
found = True
break
if not found:
log.warning('Authentication failure of type "token" occurred.')
return ''
group_perm_keys = [item for item in self.opts['external_auth'][token['eauth']] if item.endswith('%')] # The configured auth groups # Fetch eauth config and collect users and groups configured for access
eauth_config = self.opts['external_auth'][token['eauth']]
eauth_users = []
eauth_groups = []
for entry in eauth_config:
if entry.endswith('%'):
eauth_groups.append(entry.rstrip('%'))
else:
eauth_users.append(entry)
# First we need to know if the user is allowed to proceed via any of their group memberships. # If there are groups in the token, check if any of them are listed in the eauth config
group_auth_match = False group_auth_match = False
for group_config in group_perm_keys: try:
group_config = group_config.rstrip('%')
for group in token['groups']: for group in token['groups']:
if group == group_config: if group in eauth_groups:
group_auth_match = True group_auth_match = True
break
except KeyError:
pass
if '*' not in eauth_users and token['name'] not in eauth_users and not group_auth_match:
log.warning('Authentication failure of type "token" occurred.')
return ''
# Compile list of authorized actions for the user
auth_list = [] auth_list = []
# Add permissions for '*' or user-specific to the auth list
if '*' in self.opts['external_auth'][token['eauth']]: for user_key in ('*', token['name']):
auth_list.extend(self.opts['external_auth'][token['eauth']]['*']) auth_list.extend(eauth_config.get(user_key, []))
if token['name'] in self.opts['external_auth'][token['eauth']]: # Add any add'l permissions allowed by group membership
auth_list.extend(self.opts['external_auth'][token['eauth']][token['name']])
if group_auth_match: if group_auth_match:
auth_list = self.ckminions.fill_auth_list_from_groups(self.opts['external_auth'][token['eauth']], token['groups'], auth_list) auth_list = self.ckminions.fill_auth_list_from_groups(eauth_config, token['groups'], auth_list)
log.trace("compiled auth_list: {0}".format(auth_list)) log.trace("Compiled auth_list: {0}".format(auth_list))
good = self.ckminions.auth_check( good = self.ckminions.auth_check(
auth_list, auth_list,

View File

@ -1597,7 +1597,7 @@ def del_repo_key(name=None, **kwargs):
owner_name, ppa_name = name[4:].split('/') owner_name, ppa_name = name[4:].split('/')
ppa_info = _get_ppa_info_from_launchpad( ppa_info = _get_ppa_info_from_launchpad(
owner_name, ppa_name) owner_name, ppa_name)
keyid = ppa_info['signing_key_fingerprint'] keyid = ppa_info['signing_key_fingerprint'][-8:]
else: else:
raise SaltInvocationError( raise SaltInvocationError(
'keyid_ppa requires that a PPA be passed' 'keyid_ppa requires that a PPA be passed'

View File

@ -19,11 +19,10 @@ option. This will give users a couple release cycles to modify their scripts,
SLS files, etc. to use the new functionality, rather than forcing users to SLS files, etc. to use the new functionality, rather than forcing users to
change everything immediately. change everything immediately.
In the **Carbon** release of Salt (slated for late summer/early fall 2015), In the **Carbon** release of Salt (due early 2016), this execution module will
this execution module will take the place of the default Docker execution take the place of the default Docker execution module, and backwards-compatible
module, and backwards-compatible naming will be maintained for a couple naming will be maintained for a couple releases after that to allow users time
releases after that to allow users time to replace references to ``dockerng`` to replace references to ``dockerng`` with ``docker``.
with ``docker``.
Installation Prerequisites Installation Prerequisites

View File

@ -66,7 +66,7 @@ def system_info():
.. code-block:: bash .. code-block:: bash
salt dell drac.getsysinfo salt dell drac.system_info
''' '''
cmd = __salt__['cmd.run_all']('racadm getsysinfo') cmd = __salt__['cmd.run_all']('racadm getsysinfo')
@ -84,7 +84,7 @@ def network_info():
.. code-block:: bash .. code-block:: bash
salt dell drac.getniccfg salt dell drac.network_info
''' '''
cmd = __salt__['cmd.run_all']('racadm getniccfg') cmd = __salt__['cmd.run_all']('racadm getniccfg')

View File

@ -7,9 +7,9 @@ from __future__ import absolute_import
# Import Python libs # Import Python libs
import collections import collections
import logging import logging
import os
import sys import sys
import traceback import traceback
import os
# Import salt libs # Import salt libs
import salt.crypt import salt.crypt

View File

@ -725,7 +725,7 @@ def install(name=None,
opts += 'U' opts += 'U'
if salt.utils.is_true(dryrun): if salt.utils.is_true(dryrun):
opts += 'n' opts += 'n'
if not salt.utils.is_true(dryrun) and pkg_type != 'file': if not salt.utils.is_true(dryrun):
opts += 'y' opts += 'y'
if salt.utils.is_true(quiet): if salt.utils.is_true(quiet):
opts += 'q' opts += 'q'

View File

@ -40,7 +40,7 @@ def __virtual__():
''' '''
Provides imgadm only on SmartOS Provides imgadm only on SmartOS
''' '''
if __grains__['os'] == "SmartOS" and _check_imgadm(): if salt.utils.is_smartos_globalzone() and _check_imgadm():
return __virtualname__ return __virtualname__
return False return False

View File

@ -41,7 +41,7 @@ def __virtual__():
''' '''
Provides virt on SmartOS Provides virt on SmartOS
''' '''
if __grains__['os'] == "SmartOS" and _check_vmadm(): if salt.utils.is_smartos_globalzone() and _check_vmadm():
return __virtualname__ return __virtualname__
return False return False

View File

@ -104,9 +104,9 @@ def _wait(jid):
def running(concurrent=False): def running(concurrent=False):
''' '''
Return a dict of state return data if a state function is already running. Return a list of strings that contain state return data if a state function is
This function is used to prevent multiple state calls from being run at already running. This function is used to prevent multiple state calls from being
the same time. run at the same time.
CLI Example: CLI Example:

View File

@ -437,12 +437,28 @@ def genrepo(saltenv='base'):
def install(name=None, refresh=False, pkgs=None, saltenv='base', **kwargs): def install(name=None, refresh=False, pkgs=None, saltenv='base', **kwargs):
''' '''
Install the passed package Install the passed package from the winrepo
Return a dict containing the new package names and versions:: :param name: The name of the package to install
:type name: str or None
{'<package>': {'old': '<old-version>', :param bool refresh: Boolean value representing whether or not to refresh
'new': '<new-version>'}} the winrepo db
:param pkgs: A list of packages to install from a software repository.
All packages listed under ``pkgs`` will be installed via a single
command.
:type pkgs: list or None
:param str saltenv: The salt environment to use. Default is ``base``.
:param dict kwargs: Any additional argument that may be passed from the
state module. If they don't apply, they are ignored.
:return: Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example: CLI Example:
@ -507,6 +523,9 @@ def install(name=None, refresh=False, pkgs=None, saltenv='base', **kwargs):
if not cached_pkg: if not cached_pkg:
# It's not cached. Cache it, mate. # It's not cached. Cache it, mate.
cached_pkg = __salt__['cp.cache_file'](installer, saltenv) cached_pkg = __salt__['cp.cache_file'](installer, saltenv)
if not cached_pkg:
return 'Unable to cache file {0} from saltenv: {1}'\
.format(installer, saltenv)
if __salt__['cp.hash_file'](installer, saltenv) != \ if __salt__['cp.hash_file'](installer, saltenv) != \
__salt__['cp.hash_file'](cached_pkg): __salt__['cp.hash_file'](cached_pkg):
cached_pkg = __salt__['cp.cache_file'](installer, saltenv) cached_pkg = __salt__['cp.cache_file'](installer, saltenv)
@ -516,6 +535,9 @@ def install(name=None, refresh=False, pkgs=None, saltenv='base', **kwargs):
cached_pkg = cached_pkg.replace('/', '\\') cached_pkg = cached_pkg.replace('/', '\\')
cache_path, _ = os.path.split(cached_pkg) cache_path, _ = os.path.split(cached_pkg)
msiexec = pkginfo[version_num].get('msiexec') msiexec = pkginfo[version_num].get('msiexec')
allusers = pkginfo[version_num].get('allusers')
if allusers is None:
allusers = True
install_flags = '{0} {1}'.format(pkginfo[version_num]['install_flags'], options and options.get('extra_install_flags') or "") install_flags = '{0} {1}'.format(pkginfo[version_num]['install_flags'], options and options.get('extra_install_flags') or "")
cmd = [] cmd = []
@ -523,6 +545,8 @@ def install(name=None, refresh=False, pkgs=None, saltenv='base', **kwargs):
cmd.extend(['msiexec', '/i']) cmd.extend(['msiexec', '/i'])
cmd.append(cached_pkg) cmd.append(cached_pkg)
cmd.extend(install_flags.split()) cmd.extend(install_flags.split())
if msiexec and allusers:
cmd.append('ALLUSERS="1"')
__salt__['cmd.run'](cmd, cache_path, output_loglevel='trace', python_shell=False) __salt__['cmd.run'](cmd, cache_path, output_loglevel='trace', python_shell=False)

View File

@ -362,6 +362,8 @@ def salt_api_acl_tool(username, request):
- 1.1.1.2 - 1.1.1.2
foo: foo:
- 8.8.4.4 - 8.8.4.4
bar:
- '*'
:param username: Username to check against the API. :param username: Username to check against the API.
:type username: str :type username: str
@ -390,14 +392,14 @@ def salt_api_acl_tool(username, request):
users = acl.get('users', {}) users = acl.get('users', {})
if users: if users:
if username in users: if username in users:
if ip in users[username]: if ip in users[username] or '*' in users[username]:
logger.info(success_str.format(username, ip)) logger.info(success_str.format(username, ip))
return True return True
else: else:
logger.info(failure_str.format(username, ip)) logger.info(failure_str.format(username, ip))
return False return False
elif username not in users and '*' in users: elif username not in users and '*' in users:
if ip in users['*']: if ip in users['*'] or '*' in users['*']:
logger.info(success_str.format(username, ip)) logger.info(success_str.format(username, ip))
return True return True
else: else:
@ -1521,19 +1523,18 @@ class Login(LowDataAdapter):
try: try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {}) eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token: if 'groups' in token:
user_groups = set(token['groups']) user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')]) eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
perms = []
for group in user_groups & eauth_groups: for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)]) perms.extend(eauth['{0}%'.format(group)])
perms = perms or None if not perms:
else:
perms = eauth.get(token['name'], eauth.get('*'))
if perms is None:
raise ValueError("Eauth permission list not found.") raise ValueError("Eauth permission list not found.")
except (AttributeError, IndexError, KeyError, ValueError): except (AttributeError, IndexError, KeyError, ValueError):
logger.debug("Configuration for external_auth malformed for " logger.debug("Configuration for external_auth malformed for "

View File

@ -23,6 +23,17 @@ the pickle protocol, set ``carbon.mode`` to ``pickle``:
carbon.mode: pickle carbon.mode: pickle
You can also specify the pattern used for the metric base path (except for virt modules metrics):
carbon.metric_base_pattern: carbon.[minion_id].[module].[function]
These tokens can used :
[module]: salt module
[function]: salt function
[minion_id]: minion id
Default is :
carbon.metric_base_pattern: [module].[function].[minion_id]
Carbon settings may also be configured as: Carbon settings may also be configured as:
.. code-block:: yaml .. code-block:: yaml
@ -32,6 +43,7 @@ Carbon settings may also be configured as:
port: <carbon port> port: <carbon port>
skip_on_error: True skip_on_error: True
mode: (pickle|text) mode: (pickle|text)
metric_base_pattern: <pattern> | [module].[function].[minion_id]
Alternative configuration values can be used by prefacing the configuration. Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from Any values not found in the alternative configuration will be pulled from
@ -208,6 +220,7 @@ def _send(saltdata, metric_base, opts):
host = opts.get('host') host = opts.get('host')
port = opts.get('port') port = opts.get('port')
skip = opts.get('skip') skip = opts.get('skip')
metric_base_pattern = opts.get('carbon.metric_base_pattern')
if 'mode' in opts: if 'mode' in opts:
mode = opts.get('mode').lower() mode = opts.get('mode').lower()

View File

@ -217,8 +217,8 @@ def format_log(ret):
new = chg[pkg]['new'] new = chg[pkg]['new']
if not new and new not in (False, None): if not new and new not in (False, None):
new = 'absent' new = 'absent'
msg += '{0} changed from {1} to ' \ msg += '{0!r} changed from {1!r} to ' \
'{2}\n'.format(pkg, old, new) '{2!r}\n'.format(pkg, old, new)
if not msg: if not msg:
msg = str(ret['changes']) msg = str(ret['changes'])
if ret['result'] is True or ret['result'] is None: if ret['result'] is True or ret['result'] is None:
@ -257,9 +257,9 @@ class Compiler(object):
''' '''
Class used to compile and manage the High Data structure Class used to compile and manage the High Data structure
''' '''
def __init__(self, opts): def __init__(self, opts, renderers):
self.opts = opts self.opts = opts
self.rend = salt.loader.render(self.opts, {}) self.rend = renderers
# We need __setstate__ and __getstate__ to avoid pickling errors since # We need __setstate__ and __getstate__ to avoid pickling errors since
# 'self.rend' contains a function reference which is not picklable. # 'self.rend' contains a function reference which is not picklable.

View File

@ -1505,7 +1505,8 @@ def managed(name,
except Exception as exc: except Exception as exc:
ret['changes'] = {} ret['changes'] = {}
log.debug(traceback.format_exc()) log.debug(traceback.format_exc())
os.remove(tmp_filename) if os.path.isfile(tmp_filename):
os.remove(tmp_filename)
return _error(ret, 'Unable to check_cmd file: {0}'.format(exc)) return _error(ret, 'Unable to check_cmd file: {0}'.format(exc))
# file being updated to verify using check_cmd # file being updated to verify using check_cmd
@ -1523,7 +1524,8 @@ def managed(name,
cret = mod_run_check_cmd(check_cmd, tmp_filename, **check_cmd_opts) cret = mod_run_check_cmd(check_cmd, tmp_filename, **check_cmd_opts)
if isinstance(cret, dict): if isinstance(cret, dict):
ret.update(cret) ret.update(cret)
os.remove(tmp_filename) if os.path.isfile(tmp_filename):
os.remove(tmp_filename)
return ret return ret
# Since we generated a new tempfile and we are not returning here # Since we generated a new tempfile and we are not returning here
# lets change the original sfn to the new tempfile or else we will # lets change the original sfn to the new tempfile or else we will
@ -1561,7 +1563,7 @@ def managed(name,
log.debug(traceback.format_exc()) log.debug(traceback.format_exc())
return _error(ret, 'Unable to manage file: {0}'.format(exc)) return _error(ret, 'Unable to manage file: {0}'.format(exc))
finally: finally:
if tmp_filename: if tmp_filename and os.path.isfile(tmp_filename):
os.remove(tmp_filename) os.remove(tmp_filename)

View File

@ -81,9 +81,6 @@ def user_exists(name, password=None, htpasswd_file=None, options='',
ret['comment'] = useradd_ret['stderr'] ret['comment'] = useradd_ret['stderr']
return ret return ret
if __opts__['test']: ret['result'] = True
ret['result'] = None
else:
ret['result'] = True
ret['comment'] = 'User already known' ret['comment'] = 'User already known'
return ret return ret

View File

@ -198,8 +198,6 @@ def _find_remove_targets(name=None,
__salt__.get('pkg.normalize_name', lambda pkgname: pkgname) __salt__.get('pkg.normalize_name', lambda pkgname: pkgname)
to_remove = {_normalize_name(name): version} to_remove = {_normalize_name(name): version}
cver = cur_pkgs.get(name, [])
version_spec = False version_spec = False
# Find out which packages will be targeted in the call to pkg.remove # Find out which packages will be targeted in the call to pkg.remove
# Check current versions against specified versions # Check current versions against specified versions
@ -557,14 +555,61 @@ def installed(
Ensure that the package is installed, and that it is the correct version Ensure that the package is installed, and that it is the correct version
(if specified). (if specified).
name :param str name:
The name of the package to be installed. This parameter is ignored if The name of the package to be installed. This parameter is ignored if
either "pkgs" or "sources" is used. Additionally, please note that this either "pkgs" or "sources" is used. Additionally, please note that this
option can only be used to install packages from a software repository. option can only be used to install packages from a software repository.
To install a package file manually, use the "sources" option detailed To install a package file manually, use the "sources" option detailed
below. below.
fromrepo :param str version:
Install a specific version of a package. This option is ignored if
either "pkgs" or "sources" is used. Currently, this option is supported
for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`,
:mod:`ebuild <salt.modules.ebuild>`,
:mod:`pacman <salt.modules.pacman>`,
:mod:`yumpkg <salt.modules.yumpkg>`, and
:mod:`zypper <salt.modules.zypper>`. The version number includes the
release designation where applicable, to allow Salt to target a
specific release of a given version. When in doubt, using the
``pkg.latest_version`` function for an uninstalled package will tell
you the version available.
.. code-block:: bash
# salt myminion pkg.latest_version httpd
myminion:
2.2.15-30.el6.centos
Also, while this function is not yet implemented for all pkg frontends,
:mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will
show all versions available in the various repositories for a given
package, irrespective of whether or not it is installed.
.. code-block:: bash
# salt myminion pkg.list_repo_pkgs httpd
myminion:
----------
base:
|_
----------
httpd:
2.2.15-29.el6.centos
updates:
|_
----------
httpd:
2.2.15-30.el6.centos
The version strings returned by either of these functions can be used
as version specifiers in pkg states.
:param bool refresh:
Update the repo database of available packages prior to installing the
requested package.
:param str fromrepo:
Specify a repository from which to install Specify a repository from which to install
.. note:: .. note::
@ -618,68 +663,101 @@ def installed(
**4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or
``precise-security`` could be used for the ``fromrepo`` value. ``precise-security`` could be used for the ``fromrepo`` value.
skip_verify :param bool skip_verify:
Skip the GPG verification check for the package to be installed Skip the GPG verification check for the package to be installed
skip_suggestions :param bool skip_suggestions:
Force strict package naming. Disables lookup of package alternatives. Force strict package naming. Disables lookup of package alternatives.
.. versionadded:: 2014.1.1 .. versionadded:: 2014.1.1
version :param list pkgs:
Install a specific version of a package. This option is ignored if A list of packages to install from a software repository. All packages
either "pkgs" or "sources" is used. Currently, this option is supported listed under ``pkgs`` will be installed via a single command.
for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`,
Example:
.. code-block:: yaml
mypkgs:
pkg.installed:
- pkgs:
- foo
- bar
- baz
- hold: True
``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`,
:mod:`ebuild <salt.modules.ebuild>`, :mod:`ebuild <salt.modules.ebuild>`,
:mod:`pacman <salt.modules.pacman>`, :mod:`pacman <salt.modules.pacman>`, :mod:`yumpkg <salt.modules.yumpkg>`,
:mod:`yumpkg <salt.modules.yumpkg>`, and and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified
:mod:`zypper <salt.modules.zypper>`. The version number includes the in the ``pkgs`` argument. For example:
release designation where applicable, to allow Salt to target a
specific release of a given version. When in doubt, using the
``pkg.latest_version`` function for an uninstalled package will tell
you the version available.
.. code-block:: bash .. code-block:: yaml
# salt myminion pkg.latest_version httpd mypkgs:
myminion: pkg.installed:
2.2.15-30.el6.centos - pkgs:
- foo
- bar: 1.2.3-4
- baz
Also, while this function is not yet implemented for all pkg frontends, Additionally, :mod:`ebuild <salt.modules.ebuild>`,
:mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will :mod:`pacman <salt.modules.pacman>` and
show all versions available in the various repositories for a given :mod:`zypper <salt.modules.zypper>` support the ``<``, ``<=``, ``>=``, and
package, irrespective of whether or not it is installed. ``>`` operators for more control over what versions will be installed. For
.. code-block:: bash Example:
# salt myminion pkg.list_repo_pkgs httpd .. code-block:: yaml
myminion:
----------
base:
|_
----------
httpd:
2.2.15-29.el6.centos
updates:
|_
----------
httpd:
2.2.15-30.el6.centos
The version strings returned by either of these functions can be used mypkgs:
as version specifiers in pkg states. pkg.installed:
- pkgs:
- foo
- bar: '>=1.2.3-4'
- baz
refresh ``NOTE:`` When using comparison operators, the expression must be enclosed
Update the repo database of available packages prior to installing the in quotes to avoid a YAML render error.
requested package.
hold With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a
Force the package to be held at the current installed version. use flag list and/or if the given packages should be in
Currently works with YUM & APT-based systems. package.accept_keywords file and/or the overlay from which you want the
package to be installed.
.. versionadded:: 2014.7.0 For example:
allow_updates .. code-block:: yaml
mypkgs:
pkg.installed:
- pkgs:
- foo: '~'
- bar: '~>=1.2:slot::overlay[use,-otheruse]'
- baz
**Multiple Package Installation Options: (not supported in Windows or
pkgng)**
:param list sources:
A list of packages to install, along with the source URI or local path
from which to install each package. In the example below, ``foo``,
``bar``, ``baz``, etc. refer to the name of the package, as it would
appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt
CLI commands.
.. code-block:: yaml
mypkgs:
pkg.installed:
- sources:
- foo: salt://rpms/foo.rpm
- bar: http://somesite.org/bar.rpm
- baz: ftp://someothersite.org/baz.rpm
- qux: /minion/path/to/qux.rpm
:param bool allow_updates:
Allow the package to be updated outside Salt's control (e.g. auto Allow the package to be updated outside Salt's control (e.g. auto
updates on Windows). This means a package on the Minion can have a updates on Windows). This means a package on the Minion can have a
newer version than the latest available in the repository without newer version than the latest available in the repository without
@ -687,20 +765,22 @@ def installed(
.. versionadded:: 2014.7.0 .. versionadded:: 2014.7.0
Example: Example:
.. code-block:: yaml .. code-block:: yaml
httpd: httpd:
pkg.installed: pkg.installed:
- fromrepo: mycustomrepo - fromrepo: mycustomrepo
- skip_verify: True - skip_verify: True
- skip_suggestions: True - skip_suggestions: True
- version: 2.0.6~ubuntu3 - version: 2.0.6~ubuntu3
- refresh: True - refresh: True
- hold: False - allow_updates: True
- hold: False
:param bool pkg_verify:
pkg_verify
.. versionadded:: 2014.7.0 .. versionadded:: 2014.7.0
For requested packages that are already installed and would not be For requested packages that are already installed and would not be
@ -711,27 +791,27 @@ def installed(
below). Currently, this option is supported for the following pkg below). Currently, this option is supported for the following pkg
providers: :mod:`yumpkg <salt.modules.yumpkg>`. providers: :mod:`yumpkg <salt.modules.yumpkg>`.
Examples: Examples:
.. code-block:: yaml .. code-block:: yaml
httpd: httpd:
pkg.installed: pkg.installed:
- version: 2.2.15-30.el6.centos - version: 2.2.15-30.el6.centos
- pkg_verify: True - pkg_verify: True
.. code-block:: yaml .. code-block:: yaml
mypkgs: mypkgs:
pkg.installed: pkg.installed:
- pkgs: - pkgs:
- foo - foo
- bar: 1.2.3-4 - bar: 1.2.3-4
- baz - baz
- pkg_verify: - pkg_verify:
- ignore_types: [config,doc] - ignore_types: [config,doc]
normalize : True :param bool normalize:
Normalize the package name by removing the architecture, if the Normalize the package name by removing the architecture, if the
architecture of the package is different from the architecture of the architecture of the package is different from the architecture of the
operating system. The ability to disable this behavior is useful for operating system. The ability to disable this behavior is useful for
@ -741,133 +821,64 @@ def installed(
.. versionadded:: 2014.7.0 .. versionadded:: 2014.7.0
Example: Example:
.. code-block:: yaml .. code-block:: yaml
gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: gpfs.gplbin-2.6.32-279.31.1.el6.x86_64:
pkg.installed: pkg.installed:
- normalize: False - normalize: False
**Multiple Package Installation Options: (not supported in Windows or :param kwargs:
pkgng)** These are specific to each OS. If it does not apply to the execution
module for your OS, it is ignored.
pkgs :param bool hold:
A list of packages to install from a software repository. All packages Force the package to be held at the current installed version.
listed under ``pkgs`` will be installed via a single command. Currently works with YUM & APT based systems.
Example: .. versionadded:: 2014.7.0
.. code-block:: yaml :param list names:
A list of packages to install from a software repository. Each package
will be installed individually by the package manager.
mypkgs: .. warning::
pkg.installed:
- pkgs:
- foo
- bar
- baz
- hold: True
``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, Unlike ``pkgs``, the ``names`` parameter cannot specify a version.
:mod:`ebuild <salt.modules.ebuild>`, In addition, it makes a separate call to the package management
:mod:`pacman <salt.modules.pacman>`, :mod:`yumpkg <salt.modules.yumpkg>`, frontend to install each package, whereas ``pkgs`` makes just a
and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified single call. It is therefore recommended to use ``pkgs`` instead of
in the ``pkgs`` argument. For example: ``names`` to install multiple packages, both for the additional
features and the performance improvement that it brings.
.. code-block:: yaml :param bool install_recommends:
Whether to install the packages marked as recommended. Default is
``True``. Currently only works with APT-based systems.
mypkgs: .. versionadded:: 2015.5.0
pkg.installed:
- pkgs:
- foo
- bar: 1.2.3-4
- baz
Additionally, :mod:`ebuild <salt.modules.ebuild>`, .. code-block:: yaml
:mod:`pacman <salt.modules.pacman>` and
:mod:`zypper <salt.modules.zypper>` support the ``<``, ``<=``, ``>=``, and
``>`` operators for more control over what versions will be installed. For
example:
.. code-block:: yaml httpd:
pkg.installed:
- install_recommends: False
mypkgs: :param bool only_upgrade:
pkg.installed: Only upgrade the packages, if they are already installed. Default is
- pkgs: ``False``. Currently only works with APT-based systems.
- foo
- bar: '>=1.2.3-4'
- baz
``NOTE:`` When using comparison operators, the expression must be enclosed .. versionadded:: 2015.5.0
in quotes to avoid a YAML render error.
With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use .. code-block:: yaml
flag list and/or if the given packages should be in package.accept_keywords
file and/or the overlay from which you want the package to be installed.
For example:
.. code-block:: yaml httpd:
pkg.installed:
- only_upgrade: True
mypkgs: :return:
pkg.installed: A dictionary containing the state of the software installation
- pkgs: :rtype dict:
- foo: '~'
- bar: '~>=1.2:slot::overlay[use,-otheruse]'
- baz
names
A list of packages to install from a software repository. Each package
will be installed individually by the package manager.
.. warning::
Unlike ``pkgs``, the ``names`` parameter cannot specify a version.
In addition, it makes a separate call to the package management
frontend to install each package, whereas ``pkgs`` makes just a
single call. It is therefore recommended to use ``pkgs`` instead of
``names`` to install multiple packages, both for the additional
features and the performance improvement that it brings.
sources
A list of packages to install, along with the source URI or local path
from which to install each package. In the example below, ``foo``,
``bar``, ``baz``, etc. refer to the name of the package, as it would
appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt
CLI commands.
.. code-block:: yaml
mypkgs:
pkg.installed:
- sources:
- foo: salt://rpms/foo.rpm
- bar: http://somesite.org/bar.rpm
- baz: ftp://someothersite.org/baz.rpm
- qux: /minion/path/to/qux.rpm
install_recommends
Whether to install the packages marked as recommended. Default is
``True``. Currently only works with APT-based systems.
.. versionadded:: 2015.5.0
.. code-block:: yaml
httpd:
pkg.installed:
- install_recommends: False
only_upgrade
Only upgrade the packages, if they are already installed. Default is
``False``. Currently only works with APT-based systems.
.. versionadded:: 2015.5.0
.. code-block:: yaml
httpd:
pkg.installed:
- only_upgrade: True
''' '''
if isinstance(pkgs, list) and len(pkgs) == 0: if isinstance(pkgs, list) and len(pkgs) == 0:

View File

@ -299,7 +299,7 @@ def running(name, enable=None, sig=None, init_delay=None, **kwargs):
if before_toggle_status: if before_toggle_status:
ret['comment'] = 'The service {0} is already running'.format(name) ret['comment'] = 'The service {0} is already running'.format(name)
if __opts__['test']: if __opts__['test']:
ret['result'] = None ret['result'] = True
return ret return ret
if enable is True and not before_toggle_enable_status: if enable is True and not before_toggle_enable_status:
ret.update(_enable(name, None, **kwargs)) ret.update(_enable(name, None, **kwargs))
@ -392,7 +392,7 @@ def dead(name, enable=None, sig=None, **kwargs):
ret.update(_disable(name, None, **kwargs)) ret.update(_disable(name, None, **kwargs))
return ret return ret
else: else:
ret['result'] = None ret['result'] = True
return ret return ret
if __opts__['test']: if __opts__['test']:

View File

@ -82,6 +82,8 @@ class AsyncTCPReqChannel(salt.transport.client.ReqChannel):
@classmethod @classmethod
def __key(cls, opts, **kwargs): def __key(cls, opts, **kwargs):
if 'master_uri' in kwargs:
opts['master_uri'] = kwargs['master_uri']
return (opts['pki_dir'], # where the keys are stored return (opts['pki_dir'], # where the keys are stored
opts['id'], # minion ID opts['id'], # minion ID
opts['master_uri'], # master ID opts['master_uri'], # master ID

View File

@ -30,11 +30,10 @@ class Reactor(multiprocessing.Process, salt.state.Compiler):
''' '''
def __init__(self, opts): def __init__(self, opts):
multiprocessing.Process.__init__(self) multiprocessing.Process.__init__(self)
salt.state.Compiler.__init__(self, opts) local_minion_opts = opts.copy()
local_minion_opts = self.opts.copy()
local_minion_opts['file_client'] = 'local' local_minion_opts['file_client'] = 'local'
self.minion = salt.minion.MasterMinion(local_minion_opts) self.minion = salt.minion.MasterMinion(local_minion_opts)
salt.state.Compiler.__init__(self, opts, self.minion.rend)
def render_reaction(self, glob_ref, tag, data): def render_reaction(self, glob_ref, tag, data):
''' '''

View File

@ -359,6 +359,7 @@ class Schedule(object):
if name in self.opts['pillar']['schedule']: if name in self.opts['pillar']['schedule']:
del self.opts['pillar']['schedule'][name] del self.opts['pillar']['schedule'][name]
schedule = self.opts['pillar']['schedule'] schedule = self.opts['pillar']['schedule']
log.warn('Pillar schedule deleted. Pillar refresh recommended. Run saltutil.refresh_pillar.')
# Fire the complete event back along with updated list of schedule # Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)

View File

@ -552,6 +552,7 @@ def dependency_information(include_salt_cloud=False):
('ZMQ', 'zmq', 'zmq_version'), ('ZMQ', 'zmq', 'zmq_version'),
('Mako', 'mako', '__version__'), ('Mako', 'mako', '__version__'),
('Tornado', 'tornado', 'version'), ('Tornado', 'tornado', 'version'),
('timelib', 'timelib', 'version'),
] ]
if include_salt_cloud: if include_salt_cloud:

View File

@ -387,6 +387,98 @@ class OpenNebulaTestCase(TestCase):
self.assertEqual(opennebula.get_vn_id(mock_kwargs, 'foo'), self.assertEqual(opennebula.get_vn_id(mock_kwargs, 'foo'),
mock_id) mock_id)
# TODO: Write tests for create function
def test_destroy_function_error(self):
'''
Tests that a SaltCloudSystemExit is raised when --function or -f is provided.
'''
self.assertRaises(SaltCloudSystemExit, opennebula.destroy, 'my-vm', 'function')
def test_image_allocate_function_error(self):
'''
Tests that a SaltCloudSystemExit is raised when something other than
--function or -f is provided.
'''
self.assertRaises(SaltCloudSystemExit, opennebula.image_allocate, 'foo')
def test_image_allocate_no_name_or_datastore_id(self):
'''
Tests that a SaltCloudSystemExit is raised when a neither a datastore_id
nor a datastore_name is provided.
'''
self.assertRaises(SaltCloudSystemExit, opennebula.image_allocate, 'function')
def test_image_allocate_no_path_or_data(self):
'''
Tests that a SaltCloudSystemExit is raised when neither the path nor data args
are provided.
'''
self.assertRaises(SaltCloudSystemExit,
opennebula.image_allocate,
'function',
kwargs={'datastore_id': '5'})
def test_image_clone_function_error(self):
'''
Tests that a SaltCloudSystemExit is raised when something other than
--function or -f is provided.
'''
self.assertRaises(SaltCloudSystemExit, opennebula.image_clone, 'foo')
def test_image_clone_no_name(self):
'''
Tests that a SaltCloudSystemExit is raised when a name isn't provided.
'''
self.assertRaises(SaltCloudSystemExit, opennebula.image_clone, 'function')
def test_image_clone_no_image_id_or_image_name(self):
'''
Tests that a SaltCloudSystemExit is raised when neither the image_id nor
the image_name args are provided.
'''
self.assertRaises(SaltCloudSystemExit,
opennebula.image_clone,
'function',
kwargs={'name': 'test'})
def test_image_delete_function_error(self):
'''
Tests that a SaltCloudSystemExit is raised when something other than
--function or -f is provided.
'''
self.assertRaises(SaltCloudSystemExit, opennebula.image_delete, 'foo')
def test_image_delete_no_name_or_image_id(self):
'''
Tests that a SaltCloudSystemExit is raised when a neither an image_id
nor a name is provided.
'''
self.assertRaises(SaltCloudSystemExit, opennebula.image_delete, 'function')
def test_image_persist_function_error(self):
'''
Tests that a SaltCloudSystemExit is raised when something other than
--function or -f is provided.
'''
self.assertRaises(SaltCloudSystemExit, opennebula.image_persistent, 'foo')
def test_image_persist_no_persist(self):
'''
Tests that a SaltCloudSystemExit is raised when the persist kwarg is missing.
'''
self.assertRaises(SaltCloudSystemExit, opennebula.image_persistent, 'function')
def test_image_persist_no_name_or_image_id(self):
'''
Tests that a SaltCloudSystemExit is raised when a neither an image_id
nor a name is provided.
'''
self.assertRaises(SaltCloudSystemExit,
opennebula.image_delete,
'function',
kwargs={'persist': False})
if __name__ == '__main__': if __name__ == '__main__':
from integration import run_tests from integration import run_tests

View File

@ -58,7 +58,7 @@ class ServiceTestCase(TestCase):
'result': True}, 'result': True},
{'changes': {}, {'changes': {},
'comment': 'The service salt is already running', 'comment': 'The service salt is already running',
'name': 'salt', 'result': None}] 'name': 'salt', 'result': True}]
tmock = MagicMock(return_value=True) tmock = MagicMock(return_value=True)
fmock = MagicMock(return_value=False) fmock = MagicMock(return_value=False)
@ -153,7 +153,7 @@ class ServiceTestCase(TestCase):
'result': True}, 'result': True},
{'changes': {}, {'changes': {},
'comment': 'The service salt is already dead', 'name': 'salt', 'comment': 'The service salt is already dead', 'name': 'salt',
'result': None}] 'result': True}]
mock = MagicMock(return_value="salt") mock = MagicMock(return_value="salt")
with patch.object(service, '_enabled_used_error', mock): with patch.object(service, '_enabled_used_error', mock):