diff --git a/.gitignore b/.gitignore index e2997f32e4..b6dab4c220 100644 --- a/.gitignore +++ b/.gitignore @@ -70,3 +70,6 @@ tests/integration/files/conf/grains # ignore the local root /root/** + +# Ignore file cache created by jinja tests +tests/unit/templates/roots diff --git a/.pylintrc b/.pylintrc index 89b08d2458..7feb6b9e52 100644 --- a/.pylintrc +++ b/.pylintrc @@ -27,10 +27,10 @@ persistent=yes # List of plugins (as comma separated values of python modules names) to load, # usually to register additional checkers. -load-plugins=salttesting.pylintplugins.pep8, - salttesting.pylintplugins.pep263, - salttesting.pylintplugins.strings, - salttesting.pylintplugins.fileperms +load-plugins=saltpylint.pep8, + saltpylint.pep263, + saltpylint.strings, + saltpylint.fileperms # Fileperms Lint Plugin Settings diff --git a/.testing.pylintrc b/.testing.pylintrc index 5b2fb3dd7a..70feb01036 100644 --- a/.testing.pylintrc +++ b/.testing.pylintrc @@ -14,10 +14,10 @@ init-hook=" # Pickle collected data for later comparisons. persistent=no -load-plugins=salttesting.pylintplugins.pep8, - salttesting.pylintplugins.pep263, - salttesting.pylintplugins.strings, - salttesting.pylintplugins.fileperms +load-plugins=saltpylint.pep8, + saltpylint.pep263, + saltpylint.strings, + saltpylint.fileperms # Fileperms Lint Plugin Settings fileperms-default=0644 diff --git a/conf/minion b/conf/minion index 07d3474f00..bac89f5dca 100644 --- a/conf/minion +++ b/conf/minion @@ -151,7 +151,6 @@ # auth_tries: 10 # auth_safemode: False # ping_interval: 90 -# restart_on_error: True # # Minions won't know master is missing until a ping fails. After the ping fail, # the minion will attempt authentication and likely fails out and cause a restart. diff --git a/doc/ref/beacons/all/index.rst b/doc/ref/beacons/all/index.rst new file mode 100644 index 0000000000..47d2acfc52 --- /dev/null +++ b/doc/ref/beacons/all/index.rst @@ -0,0 +1,13 @@ +.. _all-salt.beacons: + +=================================== +Full list of builtin beacon modules +=================================== + +.. currentmodule:: salt.beacons + +.. autosummary:: + :toctree: + :template: autosummary.rst.tmpl + + inotify diff --git a/doc/ref/beacons/all/salt.beacons.inotify.rst b/doc/ref/beacons/all/salt.beacons.inotify.rst new file mode 100644 index 0000000000..5a2a45bdfa --- /dev/null +++ b/doc/ref/beacons/all/salt.beacons.inotify.rst @@ -0,0 +1,6 @@ +==================== +salt.beacons.inotify +==================== + +.. automodule:: salt.beacons.inotify + :members: \ No newline at end of file diff --git a/doc/ref/configuration/logging/index.rst b/doc/ref/configuration/logging/index.rst index b368bf5ccf..dd8ae4bf31 100644 --- a/doc/ref/configuration/logging/index.rst +++ b/doc/ref/configuration/logging/index.rst @@ -163,7 +163,7 @@ at the ``debug`` level: .. code-block:: yaml log_granular_levels: - 'salt': 'warning', + 'salt': 'warning' 'salt.modules': 'debug' @@ -172,4 +172,4 @@ External Logging Handlers Besides the internal logging handlers used by salt, there are some external which can be used, see the :doc:`external logging handlers` -document. \ No newline at end of file +document. diff --git a/doc/ref/index.rst b/doc/ref/index.rst index c7adb54d45..eebe1779b1 100644 --- a/doc/ref/index.rst +++ b/doc/ref/index.rst @@ -29,4 +29,5 @@ Reference modules/index tops/index tops/all/index - wheel/all/index \ No newline at end of file + wheel/all/index + beacons/all/index diff --git a/doc/topics/beacons/index.rst b/doc/topics/beacons/index.rst new file mode 100644 index 0000000000..91e1a2faf0 --- /dev/null +++ b/doc/topics/beacons/index.rst @@ -0,0 +1,42 @@ +.. _beacons: + +======= +Beacons +======= + +The beacon system allows the minion to hook into system processes and +continually translate external events into the salt event bus. The +primary example of this is the :py:mod:`~salt.beacons.inotify` beacon. This +beacon uses inotify to watch configured files or directories on the minion for +changes, creation, deletion etc. + +This allows for the changes to be sent up to the master where the +reactor can respond to changes. + +Configuring The Beacons +======================= + +The beacon system, like many others in Salt, can be configured via the +minion pillar, grains, or local config file: + +.. code-block:: yaml + + beacons: + inotify: + /etc/httpd/conf.d: {} + /opt: {} + +Writing Beacon Plugins +====================== + +Beacon plugins use the standard Salt loader system, meaning that many of the +constructs from other plugin systems holds true, such as the ``__virtual__`` +function. + +The important function in the Beacon Plugin is the ``beacon`` function. When +the beacon is configured to run, this function will be executed repeatedly +by the minion. The ``beacon`` function therefore cannot block and should be +as lightweight as possible. The ``beacon`` also must return a list of dicts, +each dict in the list will be translated into an event on the master. + +Please see the :py:mod:`~salt.beacons.inotify` beacon as an example. diff --git a/doc/topics/installation/windows.rst b/doc/topics/installation/windows.rst index 93241e5a5f..375fa1d29f 100644 --- a/doc/topics/installation/windows.rst +++ b/doc/topics/installation/windows.rst @@ -28,7 +28,7 @@ minion exe>` should match the contents of the corresponding md5 file. * Salt-Minion-2014.7.0-1-win32-Setup.exe | md5 * Salt-Minion-2014.7.0-AMD64-Setup.exe | md5 .. note:: - The 2014.7.0 exe's have been removed because of a regression. Please use the 2014.7.1 release instead. + The 2014.7.0 installers have been removed because of a regression. Please use the 2014.7.1 release instead. * 2014.1.13 * `Salt-Minion-2014.1.13-x86-Setup.exe `__ | `md5 `__ @@ -380,4 +380,4 @@ this, salt-minion can't report some installed packages. .. _pywin32: http://sourceforge.net/projects/pywin32/files/pywin32 .. _Cython: http://www.lfd.uci.edu/~gohlke/pythonlibs/#cython .. _jinja2: http://www.lfd.uci.edu/~gohlke/pythonlibs/#jinja2 -.. _msgpack: http://www.lfd.uci.edu/~gohlke/pythonlibs/#msgpack \ No newline at end of file +.. _msgpack: http://www.lfd.uci.edu/~gohlke/pythonlibs/#msgpack diff --git a/doc/topics/releases/2015.2.0.rst b/doc/topics/releases/2015.2.0.rst index 2547c5d643..355991e073 100644 --- a/doc/topics/releases/2015.2.0.rst +++ b/doc/topics/releases/2015.2.0.rst @@ -2,6 +2,18 @@ Salt 2014.12.0 Release Notes - Codename Lithium =============================================== +Beacons +======= + +The beacon system allows the minion to hook into system processes and +continually translate external events into the salt event bus. The +primary example of this is the :py:mod:`~salt.beacons.inotify` beacon. This +beacon uses inotify to watch configured files or directories on the minion for +changes, creation, deletion etc. + +This allows for the changes to be sent up to the master where the +reactor can respond to changes. + Salt SSH ======== diff --git a/doc/topics/tutorials/http.rst b/doc/topics/tutorials/http.rst index 083bf164d5..bdfed93be5 100644 --- a/doc/topics/tutorials/http.rst +++ b/doc/topics/tutorials/http.rst @@ -275,6 +275,35 @@ as required. However, each must individually be turned on. The return from these will be found in the return dict as ``status``, ``headers`` and ``text``, respectively. +Writing Return Data to Files +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +It is possible to write either the return data or headers to files, as soon as +the response is received from the server, but specifying file locations via the +``text_out`` or ``headers_out`` arguments. ``text`` and ``headers`` do not need +to be returned to the user in order to do this. + +.. code-block:: python + + salt.utils.http.query( + 'http://example.com', + text=False, + headers=False, + text_out='/path/to/url_download.txt', + headers_out='/path/to/headers_download.txt', + ) + +SSL Verification +~~~~~~~~~~~~~~~~ +By default, this function will verify SSL certificates. However, for testing or +debugging purposes, SSL verification can be turned off. + +.. code-block:: python + + salt.utils.http.query( + 'https://example.com', + ssl_verify=False, + ) + CA Bundles ~~~~~~~~~~ The ``requests`` library has its own method of detecting which CA (certficate @@ -292,19 +321,58 @@ using the ``ca_bundle`` variable. ca_bundle='/path/to/ca_bundle.pem', ) -SSL Verification -~~~~~~~~~~~~~~~~ +Updating CA Bundles ++++++++++++++++++++ +The ``update_ca_bundle()`` function can be used to update the bundle file at a +specified location. If the target location is not specified, then it will +attempt to auto-detect the location of the bundle file. If the URL to download +the bundle from does not exist, a bundle will be downloaded from the cURL +website. -By default, this function will verify SSL certificates. However, for testing or -debugging purposes, SSL verification can be turned off. +CAUTION: The ``target`` and the ``source`` should always be specified! Failure +to specify the ``target`` may result in the file being written to the wrong +location on the local system. Failure to specify the ``source`` may cause the +upstream URL to receive excess unnecessary traffic, and may cause a file to be +download which is hazardous or does not meet the needs of the user. .. code-block:: python - salt.utils.http.query( - 'https://example.com', - ssl_verify=False, + salt.utils.http.update_ca_bundle( + target='/path/to/ca-bundle.crt', + source='https://example.com/path/to/ca-bundle.crt', + opts=__opts__, ) +The ``opts`` parameter should also always be specified. If it is, then the +``target`` and the ``source`` may be specified in the relevant configuration +file (master or minion) as ``ca_bundle`` and ``ca_bundle_url``, respectively. + +.. code-block:: yaml + + ca_bundle: /path/to/ca-bundle.crt + ca_bundle_url: https://example.com/path/to/ca-bundle.crt + +If Salt is unable to auto-detect the location of the CA bundle, it will raise +an error. + +The ``update_ca_bundle()`` function can also be passed a string or a list of +strings which represent files on the local system, which should be appended (in +the specified order) to the end of the CA bundle file. This is useful in +environments where private certs need to be made available, and are not +otherwise reasonable to add to the bundle file. + +.. code-block:: python + + salt.utils.http.update_ca_bundle( + opts=__opts__, + merge_files=[ + '/etc/ssl/private_cert_1.pem', + '/etc/ssl/private_cert_2.pem', + '/etc/ssl/private_cert_3.pem', + ] + ) + + Test Mode ~~~~~~~~~ diff --git a/salt/beacons/__init__.py b/salt/beacons/__init__.py new file mode 100644 index 0000000000..15a08873d5 --- /dev/null +++ b/salt/beacons/__init__.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +''' +This package contains the loader modules for the salt streams system +''' +# Import salt libs +import salt.loader + + +class Beacon(object): + ''' + This class is used to eveluate and execute on the beacon system + ''' + def __init__(self, opts): + self.opts = opts + self.beacons = salt.loader.beacons(opts) + + def process(self, config): + ''' + Process the configured beacons + + The config must be a dict and looks like this in yaml + + code_block:: yaml + + beacons: + inotify: + - /etc/fstab + - /var/cache/foo/* + ''' + ret = [] + for mod in config: + fun_str = '{0}.beacon'.format(mod) + if fun_str in self.beacons: + tag = 'salt/beacon/{0}/{1}/'.format(self.opts['id'], mod) + raw = self.beacons[fun_str](config[mod]) + for data in raw: + if 'tag' in data: + tag += data.pop('tag') + ret.append({'tag': tag, 'data': data}) + return ret diff --git a/salt/beacons/inotify.py b/salt/beacons/inotify.py new file mode 100644 index 0000000000..306620ce42 --- /dev/null +++ b/salt/beacons/inotify.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- +''' +Watch files and translate the changes into salt events +''' +# Import python libs +import collections + +# Import third party libs +try: + import pyinotify + HAS_PYINOTIFY = True + DEFAULT_MASK = pyinotify.IN_CREATE | pyinotify.IN_DELETE | pyinotify.IN_MODIFY +except ImportError: + HAS_PYINOTIFY = False + DEFAULT_MASK = None + +__virtualname__ = 'inotify' + + +def __virtual__(): + if HAS_PYINOTIFY: + return __virtualname__ + return False + + +def _enqueue(revent): + ''' + Enqueue the event + ''' + __context__['inotify.que'].append(revent) + + +def _get_notifier(): + ''' + Check the context for the notifier and construct it if not present + ''' + if 'inotify.notifier' in __context__: + return __context__['inotify.notifier'] + __context__['inotify.que'] = collections.deque() + wm = pyinotify.WatchManager() + __context__['inotify.notifier'] = pyinotify.Notifier(wm, _enqueue) + return __context__['inotify.notifier'] + + +def beacon(config): + ''' + Watch the configured files + ''' + ret = [] + notifier = _get_notifier() + wm = notifier._watch_manager + # Read in existing events + # remove watcher files that are not in the config + # update all existing files with watcher settings + # return original data + if notifier.check_events(1): + notifier.read_events() + notifier.process_events() + while __context__['inotify.que']: + sub = {} + event = __context__['inotify.que'].popleft() + sub['tag'] = event.path + sub['path'] = event.pathname + sub['change'] = event.maskname + ret.append(sub) + + current = set() + for wd in wm.watches: + current.add(wm.watches[wd].path) + need = set(config) + for path in current.difference(need): + # These need to be removed + for wd in wm.watches: + if path == wm.watches[wd].path: + wm.rm_watch(wd) + for path in config: + if isinstance(config[path], dict): + mask = config[path].get('mask', DEFAULT_MASK) + rec = config[path].get('rec', False) + auto_add = config[path].get('auto_add', False) + else: + mask = DEFAULT_MASK + rec = False + auto_add = False + # TODO: make the config handle more options + if path not in current: + wm.add_watch( + path, + mask, + rec=rec, + auto_add=auto_add) + else: + for wd in wm.watches: + if path == wm.watches[wd].path: + update = False + if wm.watches[wd].mask != mask: + update = True + if wm.watches[wd].auto_add != auto_add: + update = True + if update: + wm.update_watch( + wd, + mask=mask, + rec=rec, + auto_add=auto_add) + return ret diff --git a/salt/beacons/journald.py b/salt/beacons/journald.py new file mode 100644 index 0000000000..9f02c900ce --- /dev/null +++ b/salt/beacons/journald.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +''' +A simple beacon to watch journald for specific entries +''' +# Import salt libs +import salt.utils +import salt.utils.cloud +import salt.ext.six + +# Import third party libs +try: + import systemd.journal + HAS_SYSTEMD = True +except ImportError: + HAS_SYSTEMD = False + +__virtualname__ = 'journald' + + +def __virtual__(): + if HAS_SYSTEMD: + return __virtualname__ + return False + + +def _get_journal(): + ''' + Return the active running journal object + ''' + if 'systemd.journald' in __context__: + return __context__['systemd.journald'] + __context__['systemd.journald'] = systemd.journal.Reader() + # get to the end of the journal + __context__['systemd.journald'].seek_tail() + __context__['systemd.journald'].get_previous() + return __context__['systemd.journald'] + + +def beacon(config): + ''' + The journald beacon allows for the systemd jornal to be parsed and linked + objects to be turned into events. + + This beacons config will return all sshd jornal entries + + .. code-block:: yaml + + beacons: + journald: + sshd: + SYSLOG_IDENTIFIER: sshd + PRIORITY: 6 + ''' + ret = [] + journal = _get_journal() + while True: + cur = journal.get_next() + if not cur: + break + for name in config: + n_flag = 0 + for key in config[name]: + if isinstance(key, salt.ext.six.string_types): + key = salt.utils.sdecode(key) + if key in cur: + if config[name][key] == cur[key]: + n_flag += 1 + if n_flag == len(config[name]): + # Match! + ret.append(salt.utils.cloud.simple_types_filter(cur)) + return ret diff --git a/salt/beacons/wtmp.py b/salt/beacons/wtmp.py new file mode 100644 index 0000000000..8c579cbb0f --- /dev/null +++ b/salt/beacons/wtmp.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +''' +Beacon to fire events at login of users as registered in the wtmp file +''' +# Import python libs +import os +import struct + +__virtualname__ = 'wtmp' +WTMP = '/var/log/wtmp' +FMT = '': {'old': '', @@ -530,6 +535,8 @@ def install(name=None, cmd = cmd + ['-o', 'DPkg::Options::=--force-confdef'] if 'install_recommends' in kwargs and not kwargs['install_recommends']: cmd.append('--no-install-recommends') + if 'only_upgrade' in kwargs and kwargs['only_upgrade']: + cmd.append('--only-upgrade') if skip_verify: cmd.append('--allow-unauthenticated') if fromrepo: diff --git a/salt/modules/boto_dynamodb.py b/salt/modules/boto_dynamodb.py new file mode 100644 index 0000000000..68dfd8dffd --- /dev/null +++ b/salt/modules/boto_dynamodb.py @@ -0,0 +1,278 @@ +# -*- coding: utf-8 -*- +''' +Connection module for Amazon DynamoDB + +.. versionadded:: 2015.2 + +:configuration: This module accepts explicit DynamoDB credentials but can also + utilize IAM roles assigned to the instance trough Instance Profiles. + Dynamic credentials are then automatically obtained from AWS API and no + further configuration is necessary. More Information available at:: + + http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + + If IAM roles are not used you need to specify them either in a pillar or + in the minion's config file:: + keyid: GKTADJGHEIQSXMKKRBJ08H + key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs + + A region may also be specified in the configuration:: + + region: us-east-1 + + If a region is not specified, the default is us-east-1. + + It's also possible to specify key, keyid and region via a profile, either + as a passed in dict, or as a string to pull from pillars or minion config: + + myprofile: + keyid: GKTADJGHEIQSXMKKRBJ08H + key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs + region: us-east-1 + +:depends: boto +''' + +# Import Python libs +import logging +import time + +logger = logging.getLogger(__name__) +logging.getLogger('boto').setLevel(logging.INFO) + +# Import third party libs +try: + import boto + import boto.dynamodb2 + from boto.dynamodb2.fields import HashKey, RangeKey + from boto.dynamodb2.fields import AllIndex, GlobalAllIndex + from boto.dynamodb2.table import Table + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +from salt._compat import string_types + + +def __virtual__(): + ''' + Only load if boto libraries exist. + ''' + if not HAS_BOTO: + return False + return True + + +def _create_connection(region=None, key=None, keyid=None, profile=None): + ''' + Get a boto connection to DynamoDB. + ''' + if profile: + if isinstance(profile, string_types): + _profile = __salt__['config.option'](profile) + elif isinstance(profile, dict): + _profile = profile + key = _profile.get('key', None) + keyid = _profile.get('keyid', None) + region = _profile.get('region', None) + + if not region and __salt__['config.option']('dynamodb.region'): + region = __salt__['config.option']('dynamodb.region') + + if not region: + region = 'us-east-1' + + if not key and __salt__['config.option']('dynamodb.key'): + key = __salt__['config.option']('dynamodb.key') + if not keyid and __salt__['config.option']('dynamodb.keyid'): + keyid = __salt__['config.option']('dynamodb.keyid') + + try: + conn = boto.dynamodb2.connect_to_region( + region, + aws_access_key_id=keyid, + aws_secret_access_key=key + ) + except boto.exception.NoAuthHandlerFound: + logger.error('No authentication credentials found when attempting to' + ' make boto dynamodb connection.') + return None + return conn + + +def create_table(table_name, region=None, key=None, keyid=None, profile=None, + read_capacity_units=None, write_capacity_units=None, + hash_key=None, hash_key_data_type=None, range_key=None, + range_key_data_type=None, local_indexes=None, + global_indexes=None): + ''' + Creates a DynamoDB table. + + CLI example:: + + salt myminion boto_dynamodb.create_table table_name / + region=us-east-1 / + hash_key=id / + hash_key_data_type=N / + range_key=created_at / + range_key_data_type=N / + read_capacity_units=1 / + write_capacity_units=1 + ''' + schema = [] + primary_index_fields = [] + primary_index_name = '' + if hash_key: + hash_key_obj = HashKey(hash_key, data_type=hash_key_data_type) + schema.append(hash_key_obj) + primary_index_fields.append(hash_key_obj) + primary_index_name += hash_key + if range_key: + range_key_obj = RangeKey(range_key, data_type=range_key_data_type) + schema.append(range_key_obj) + primary_index_fields.append(range_key_obj) + primary_index_name += '_' + primary_index_name += range_key + primary_index_name += '_index' + throughput = { + 'read': read_capacity_units, + 'write': write_capacity_units + } + local_table_indexes = [] + # Add the table's key + local_table_indexes.append( + AllIndex(primary_index_name, parts=primary_index_fields) + ) + if local_indexes: + for index in local_indexes: + local_table_indexes.append(_extract_index(index)) + global_table_indexes = [] + if global_indexes: + for index in global_indexes: + global_table_indexes.append( + _extract_index(index, global_index=True) + ) + + conn = _create_connection(region, key, keyid, profile) + Table.create( + table_name, + schema=schema, + throughput=throughput, + indexes=local_table_indexes, + global_indexes=global_table_indexes, + connection=conn + ) + + # Table creation can take several seconds to propagate. + # We will check MAX_ATTEMPTS times. + MAX_ATTEMPTS = 30 + for i in range(MAX_ATTEMPTS): + if exists( + table_name, + region, + key, + keyid, + profile + ): + return True + else: + time.sleep(1) # sleep for one second and try again + return False + + +def exists(table_name, region=None, key=None, keyid=None, profile=None): + ''' + Check to see if a table exists. + + CLI example:: + + salt myminion boto_dynamodb.exists table_name region=us-east-1 + ''' + conn = _create_connection(region, key, keyid, profile) + tables = conn.list_tables() + return tables and table_name in tables['TableNames'] + + +def delete(table_name, region=None, key=None, keyid=None, profile=None): + ''' + Delete a DynamoDB table. + + CLI example:: + + salt myminion boto_dynamodb.delete table_name region=us-east-1 + ''' + conn = _create_connection(region, key, keyid, profile) + table = Table(table_name, connection=conn) + table.delete() + + # Table deletion can take several seconds to propagate. + # We will retry MAX_ATTEMPTS times. + MAX_ATTEMPTS = 30 + for i in range(MAX_ATTEMPTS): + if not exists(table_name, region, key, keyid, profile): + return True + else: + time.sleep(1) # sleep for one second and try again + return False + + +def _extract_index(index_data, global_index=False): + ''' + Instantiates and returns an AllIndex object given a valid index + configuration + ''' + parsed_data = {} + keys = [] + + for key, value in index_data.iteritems(): + for item in value: + for field, data in item.iteritems(): + if field == 'hash_key': + parsed_data['hash_key'] = data + elif field == 'hash_key_data_type': + parsed_data['hash_key_data_type'] = data + elif field == 'range_key': + parsed_data['range_key'] = data + elif field == 'range_key_data_type': + parsed_data['range_key_data_type'] = data + elif field == 'name': + parsed_data['name'] = data + elif field == 'read_capacity_units': + parsed_data['read_capacity_units'] = data + elif field == 'write_capacity_units': + parsed_data['write_capacity_units'] = data + + if parsed_data['hash_key']: + keys.append( + HashKey( + parsed_data['hash_key'], + data_type=parsed_data['hash_key_data_type'] + ) + ) + if parsed_data['range_key']: + keys.append( + RangeKey( + parsed_data['range_key'], + data_type=parsed_data['range_key_data_type'] + ) + ) + if ( + global_index and + parsed_data['read_capacity_units'] and + parsed_data['write_capacity_units']): + parsed_data['throughput'] = { + 'read': parsed_data['read_capacity_units'], + 'write': parsed_data['write_capacity_units'] + } + if parsed_data['name'] and len(keys) > 0: + if global_index: + return GlobalAllIndex( + parsed_data['name'], + parts=keys, + throughput=parsed_data['throughput'] + ) + else: + return AllIndex( + parsed_data['name'], + parts=keys + ) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index 92eec79bb1..ffae981375 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -163,7 +163,7 @@ def _run(cmd, output_loglevel='debug', runas=None, shell=DEFAULT_SHELL, - python_shell=True, + python_shell=False, env=None, clean_env=False, rstrip=True, @@ -313,6 +313,9 @@ def _run(cmd, run_env = os.environ.copy() run_env.update(env) + if python_shell is None: + python_shell = False + kwargs = {'cwd': cwd, 'shell': python_shell, 'env': run_env, @@ -471,7 +474,7 @@ def _run_quiet(cmd, stdin=None, runas=None, shell=DEFAULT_SHELL, - python_shell=True, + python_shell=False, env=None, template=None, umask=None, @@ -502,7 +505,7 @@ def _run_all_quiet(cmd, stdin=None, runas=None, shell=DEFAULT_SHELL, - python_shell=True, + python_shell=False, env=None, template=None, umask=None, @@ -533,7 +536,7 @@ def run(cmd, stdin=None, runas=None, shell=DEFAULT_SHELL, - python_shell=True, + python_shell=None, env=None, clean_env=False, template=None, @@ -552,6 +555,19 @@ def run(cmd, Note that ``env`` represents the environment variables for the command, and should be formatted as a dict, or a YAML string which resolves to a dict. + ************************************************************************* + WARNING: This function does not process commands through a shell + unless the python_shell flag is set to True. This means that any + shell-specific functionality such as 'echo' or the use of pipes, + redirection or &&, should either be migrated to cmd.shell or + have the python_shell=True flag set here. + + The use of python_shell=True means that the shell will accept _any_ input + including potentially malicious commands such as 'good_command;rm -rf /'. + Be absolutely certain that you have sanitized your input prior to using + python_shell=True + ************************************************************************* + CLI Example: .. code-block:: bash @@ -589,6 +605,12 @@ def run(cmd, salt '*' cmd.run cmd='sed -e s/=/:/g' ''' + try: + if __opts__.get('cmd_safe', True) is False and python_shell is None: + # Override-switch for python_shell + python_shell = True + except NameError: + pass ret = _run(cmd, runas=runas, shell=shell, @@ -637,12 +659,101 @@ def run(cmd, return ret['stdout'] +def shell(cmd, + cwd=None, + stdin=None, + runas=None, + shell=DEFAULT_SHELL, + env=None, + clean_env=False, + template=None, + rstrip=True, + umask=None, + output_loglevel='debug', + quiet=False, + timeout=None, + reset_system_locale=True, + ignore_retcode=False, + saltenv='base', + use_vt=False, + **kwargs): + ''' + Execute the passed command and return the output as a string. + + ************************************************************ + WARNING: This passes the cmd argument directly to the shell + without any further processing! Be absolutely sure that you + have properly santized the command passed to this function + and do not use untrusted inputs. + ************************************************************ + + Note that ``env`` represents the environment variables for the command, and + should be formatted as a dict, or a YAML string which resolves to a dict. + + CLI Example: + + .. code-block:: bash + + salt '*' cmd.run "ls -l | awk '/foo/{print \\$2}'" + + The template arg can be set to 'jinja' or another supported template + engine to render the command arguments before execution. + For example: + + .. code-block:: bash + + salt '*' cmd.run template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'" + + Specify an alternate shell with the shell parameter: + + .. code-block:: bash + + salt '*' cmd.run "Get-ChildItem C:\\ " shell='powershell' + + A string of standard input can be specified for the command to be run using + the ``stdin`` parameter. This can be useful in cases where sensitive + information must be read from standard input.: + + .. code-block:: bash + + salt '*' cmd.run "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' + + If an equal sign (``=``) appears in an argument to a Salt command it is + interpreted as a keyword argument in the format ``key=val``. That + processing can be bypassed in order to pass an equal sign through to the + remote shell command by manually specifying the kwarg: + + .. code-block:: bash + + salt '*' cmd.run cmd='sed -e s/=/:/g' + ''' + run(cmd, + cwd=cwd, + stdin=stdin, + runas=runas, + shell=shell, + env=env, + clean_env=clean_env, + template=template, + rstrip=rstrip, + umask=umask, + output_loglevel=output_loglevel, + quiet=quiet, + timeout=timeout, + reset_system_locale=reset_system_locale, + ignore_retcode=ignore_retcode, + saltenv=saltenv, + use_vt=use_vt, + python_shell=True, + **kwargs) + + def run_stdout(cmd, cwd=None, stdin=None, runas=None, shell=DEFAULT_SHELL, - python_shell=True, + python_shell=None, env=None, clean_env=False, template=None, @@ -683,6 +794,12 @@ def run_stdout(cmd, salt '*' cmd.run_stdout "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' ''' + try: + if __opts__.get('cmd_safe', True) is False and python_shell is None: + # Override-switch for python_shell + python_shell = True + except NameError: + pass ret = _run(cmd, runas=runas, cwd=cwd, @@ -723,7 +840,7 @@ def run_stderr(cmd, stdin=None, runas=None, shell=DEFAULT_SHELL, - python_shell=True, + python_shell=None, env=None, clean_env=False, template=None, @@ -764,6 +881,12 @@ def run_stderr(cmd, salt '*' cmd.run_stderr "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' ''' + try: + if __opts__.get('cmd_safe', True) is False and python_shell is None: + # Override-switch for python_shell + python_shell = True + except NameError: + pass ret = _run(cmd, runas=runas, cwd=cwd, @@ -804,7 +927,7 @@ def run_all(cmd, stdin=None, runas=None, shell=DEFAULT_SHELL, - python_shell=True, + python_shell=None, env=None, clean_env=False, template=None, @@ -845,6 +968,12 @@ def run_all(cmd, salt '*' cmd.run_all "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' ''' + try: + if __opts__.get('cmd_safe', True) is False and python_shell is None: + # Override-switch for python_shell + python_shell = True + except NameError: + pass ret = _run(cmd, runas=runas, cwd=cwd, @@ -885,7 +1014,7 @@ def retcode(cmd, stdin=None, runas=None, shell=DEFAULT_SHELL, - python_shell=True, + python_shell=None, env=None, clean_env=False, template=None, @@ -964,7 +1093,7 @@ def _retcode_quiet(cmd, stdin=None, runas=None, shell=DEFAULT_SHELL, - python_shell=True, + python_shell=False, env=None, clean_env=False, template=None, @@ -1005,7 +1134,7 @@ def script(source, stdin=None, runas=None, shell=DEFAULT_SHELL, - python_shell=True, + python_shell=None, env=None, template=None, umask=None, @@ -1044,6 +1173,13 @@ def script(source, salt '*' cmd.script salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' ''' + try: + if __opts__.get('cmd_safe', True) is False and python_shell is None: + # Override-switch for python_shell + python_shell = True + except NameError: + pass + def _cleanup_tempfile(path): try: os.remove(path) @@ -1110,7 +1246,7 @@ def script_retcode(source, stdin=None, runas=None, shell=DEFAULT_SHELL, - python_shell=True, + python_shell=None, env=None, template='jinja', umask=None, @@ -1147,6 +1283,12 @@ def script_retcode(source, salt '*' cmd.script_retcode salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' ''' + try: + if __opts__.get('cmd_safe', True) is False and python_shell is None: + # Override-switch for python_shell + python_shell = True + except NameError: + pass if isinstance(__env__, string_types): salt.utils.warn_until( 'Boron', diff --git a/salt/modules/http.py b/salt/modules/http.py index 5ed1be3f19..635f4725eb 100644 --- a/salt/modules/http.py +++ b/salt/modules/http.py @@ -24,3 +24,45 @@ def query(url, **kwargs): data='somecontent' ''' return salt.utils.http.query(url=url, opts=__opts__, **kwargs) + + +def update_ca_bundle(target=None, source=None, merge_files=None): + ''' + Update the local CA bundle file from a URL + + CLI Example: + + .. code-block:: bash + + salt '*' http.update_ca_bundle + salt '*' http.update_ca_bundle target=/path/to/cacerts.pem + salt '*' http.update_ca_bundle source=https://example.com/cacerts.pem + + If the ``target`` is not specified, it will be pulled from the ``ca_cert`` + configuration variable available to the minion. If it cannot be found there, + it will be placed at ``<>/cacerts.pem``. + + If the ``source`` is not specified, it will be pulled from the + ``ca_cert_url`` configuration variable available to the minion. If it cannot + be found, it will be downloaded from the cURL website, using an http (not + https) URL. USING THE DEFAULT URL SHOULD BE AVOIDED! + + ``merge_files`` may also be specified, which includes a string or list of + strings representing a file or files to be appended to the end of the CA + bundle, once it is downloaded. + + CLI Example: + + .. code-block:: bash + + salt '*' http.update_ca_bundle merge_files=/path/to/mycert.pem + ''' + if target is None: + target = __salt__['config.get']('ca_bundle', None) + + if source is None: + source = __salt__['config.get']('ca_bundle_url', None) + + return salt.utils.http.update_ca_bundle( + target, source, __opts__, merge_files + ) diff --git a/salt/modules/localemod.py b/salt/modules/localemod.py index cd151a9633..255dab4911 100644 --- a/salt/modules/localemod.py +++ b/salt/modules/localemod.py @@ -7,10 +7,12 @@ from __future__ import absolute_import # Import python libs import logging import re +import os # Import salt libs import salt.utils import salt.ext.six as six +import salt.utils.decorators as decorators log = logging.getLogger(__name__) @@ -173,41 +175,62 @@ def avail(locale): salt '*' locale.avail 'en_US.UTF-8' ''' - normalized_locale = _normalize_locale(locale) + try: + normalized_locale = _normalize_locale(locale) + except IndexError: + log.error('Unable to validate locale "{0}"'.format(locale)) + return False avail_locales = __salt__['locale.list_avail']() locale_exists = next((True for x in avail_locales if _normalize_locale(x.strip()) == normalized_locale), False) return locale_exists -def gen_locale(locale): +@decorators.which('locale-gen') +def gen_locale(locale, charmap=None): ''' Generate a locale. .. versionadded:: 2014.7.0 + :param locale: Any locale listed in /usr/share/i18n/locales or + /usr/share/i18n/SUPPORTED for debian and gentoo based distros + + :param charmap: debian and gentoo based systems require the charmap to be + specified independently of the locale. + CLI Example: .. code-block:: bash - salt '*' locale.gen_locale 'en_US.UTF-8' + salt '*' locale.gen_locale en_US.UTF-8 + salt '*' locale.gen_locale en_US.UTF-8 UTF-8 # debian and gentoo only ''' - # validate the supplied locale - valid = __salt__['file.replace']( - '/usr/share/i18n/SUPPORTED', - '^{0}$'.format(locale), - '^{0}$'.format(locale), - search_only=True - ) + on_debian = __grains__.get('os') == 'Debian' + on_gentoo = __grains__.get('os_family') == 'Gentoo' + + if on_debian or on_gentoo: + if not charmap: + log.error('On debian and gentoo systems you must provide a charmap') + return False + + search = '/usr/share/i18n/SUPPORTED' + locale_format = '{0} {1}'.format(locale, charmap) + valid = __salt__['file.search'](search, '^{0}$'.format(locale_format)) + else: + search = '/usr/share/i18n/locales' + locale_format = locale + valid = locale_format in os.listdir(search) + if not valid: - log.error('The provided locale "{0}" is invalid'.format(locale)) + log.error('The provided locale "{0}" is not found in {1}'.format(locale, search)) return False - if __grains__.get('os') == 'Debian' or __grains__.get('os_family') == 'Gentoo': + if on_debian or on_gentoo: __salt__['file.replace']( '/etc/locale.gen', - '# {0} '.format(locale), - '{0} '.format(locale), + r'^#\s*{0}$'.format(locale_format), + '{0}'.format(locale_format), append_if_not_found=True ) elif __grains__.get('os') == 'Ubuntu': @@ -222,13 +245,9 @@ def gen_locale(locale): 'locale-gen' ) - if __grains__.get('os_family') == 'Gentoo': - return __salt__['cmd.retcode']( - 'locale-gen --generate "{0}"'.format(locale), - python_shell=False - ) - else: - return __salt__['cmd.retcode']( - 'locale-gen "{0}"'.format(locale), - python_shell=False - ) + cmd = ['locale-gen'] + if on_gentoo: + cmd.append('--generate') + cmd.append(locale_format) + + return __salt__['cmd.retcode'](cmd, python_shell=False) diff --git a/salt/modules/status.py b/salt/modules/status.py index c1bdd62bb6..0174dc12ff 100644 --- a/salt/modules/status.py +++ b/salt/modules/status.py @@ -156,33 +156,65 @@ def cpustats(): salt '*' status.cpustats ''' - procf = '/proc/stat' - if not os.path.isfile(procf): - return {} - stats = salt.utils.fopen(procf, 'r').read().splitlines() - ret = {} - for line in stats: - if not line: - continue - comps = line.split() - if comps[0] == 'cpu': - ret[comps[0]] = {'idle': _number(comps[4]), - 'iowait': _number(comps[5]), - 'irq': _number(comps[6]), - 'nice': _number(comps[2]), - 'softirq': _number(comps[7]), - 'steal': _number(comps[8]), - 'system': _number(comps[3]), - 'user': _number(comps[1])} - elif comps[0] == 'intr': - ret[comps[0]] = {'total': _number(comps[1]), - 'irqs': [_number(x) for x in comps[2:]]} - elif comps[0] == 'softirq': - ret[comps[0]] = {'total': _number(comps[1]), - 'softirqs': [_number(x) for x in comps[2:]]} - else: - ret[comps[0]] = _number(comps[1]) - return ret + def linux_cpustats(): + ''' + linux specific implementation of cpustats + ''' + procf = '/proc/stat' + if not os.path.isfile(procf): + return {} + stats = salt.utils.fopen(procf, 'r').read().splitlines() + ret = {} + for line in stats: + if not line: + continue + comps = line.split() + if comps[0] == 'cpu': + ret[comps[0]] = {'idle': _number(comps[4]), + 'iowait': _number(comps[5]), + 'irq': _number(comps[6]), + 'nice': _number(comps[2]), + 'softirq': _number(comps[7]), + 'steal': _number(comps[8]), + 'system': _number(comps[3]), + 'user': _number(comps[1])} + elif comps[0] == 'intr': + ret[comps[0]] = {'total': _number(comps[1]), + 'irqs': [_number(x) for x in comps[2:]]} + elif comps[0] == 'softirq': + ret[comps[0]] = {'total': _number(comps[1]), + 'softirqs': [_number(x) for x in comps[2:]]} + else: + ret[comps[0]] = _number(comps[1]) + return ret + + def freebsd_cpustats(): + ''' + freebsd specific implementation of cpustats + ''' + vmstat = __salt__['cmd.run']('vmstat -P').splitlines() + vm0 = vmstat[0].split() + cpu0loc = vm0.index('cpu0') + vm1 = vmstat[1].split() + usloc = vm1.index('us') + vm2 = vmstat[2].split() + cpuctr = 0 + ret = {} + for cpu in vm0[cpu0loc:]: + ret[cpu] = {'us': _number(vm2[usloc + 3 * cpuctr]), + 'sy': _number(vm2[usloc + 1 + 3 * cpuctr]), + 'id': _number(vm2[usloc + 2 + 3 * cpuctr]), } + cpuctr += 1 + return ret + + # dict that return a function that does the right thing per platform + get_version = { + 'Linux': linux_cpustats, + 'FreeBSD': freebsd_cpustats, + } + + errmsg = 'This method is unsupported on the current operating system!' + return get_version.get(__grains__['kernel'], lambda: errmsg)() def meminfo(): @@ -195,22 +227,53 @@ def meminfo(): salt '*' status.meminfo ''' - procf = '/proc/meminfo' - if not os.path.isfile(procf): - return {} - stats = salt.utils.fopen(procf, 'r').read().splitlines() - ret = {} - for line in stats: - if not line: - continue - comps = line.split() - comps[0] = comps[0].replace(':', '') - ret[comps[0]] = { - 'value': comps[1], - } - if len(comps) > 2: - ret[comps[0]]['unit'] = comps[2] - return ret + def linux_meminfo(): + ''' + linux specific implementation of meminfo + ''' + procf = '/proc/meminfo' + if not os.path.isfile(procf): + return {} + stats = salt.utils.fopen(procf, 'r').read().splitlines() + ret = {} + for line in stats: + if not line: + continue + comps = line.split() + comps[0] = comps[0].replace(':', '') + ret[comps[0]] = { + 'value': comps[1], + } + if len(comps) > 2: + ret[comps[0]]['unit'] = comps[2] + return ret + + def freebsd_meminfo(): + ''' + freebsd specific implementation of meminfo + ''' + sysctlvm = __salt__['cmd.run']('sysctl vm').splitlines() + sysctlvm = [x for x in sysctlvm if x.startswith('vm')] + sysctlvm = [x.split(':') for x in sysctlvm] + sysctlvm = [[y.strip() for y in x] for x in sysctlvm] + sysctlvm = [x for x in sysctlvm if x[1]] # If x[1] not empty + + ret = {} + for line in sysctlvm: + ret[line[0]] = line[1] + # Special handling for vm.total as it's especially important + sysctlvmtot = __salt__['cmd.run']('sysctl -n vm.vmtotal').splitlines() + sysctlvmtot = [x for x in sysctlvmtot if x] + ret['vm.vmtotal'] = sysctlvmtot + return ret + # dict that return a function that does the right thing per platform + get_version = { + 'Linux': linux_meminfo, + 'FreeBSD': freebsd_meminfo, + } + + errmsg = 'This method is unsupported on the current operating system!' + return get_version.get(__grains__['kernel'], lambda: errmsg)() def cpuinfo(): @@ -223,21 +286,48 @@ def cpuinfo(): salt '*' status.cpuinfo ''' - procf = '/proc/cpuinfo' - if not os.path.isfile(procf): - return {} - stats = salt.utils.fopen(procf, 'r').read().splitlines() - ret = {} - for line in stats: - if not line: - continue - comps = line.split(':') - comps[0] = comps[0].strip() - if comps[0] == 'flags': - ret[comps[0]] = comps[1].split() - else: + def linux_cpuinfo(): + ''' + linux specific cpuinfo implementation + ''' + procf = '/proc/cpuinfo' + if not os.path.isfile(procf): + return {} + stats = salt.utils.fopen(procf, 'r').read().splitlines() + ret = {} + for line in stats: + if not line: + continue + comps = line.split(':') + comps[0] = comps[0].strip() + if comps[0] == 'flags': + ret[comps[0]] = comps[1].split() + else: + ret[comps[0]] = comps[1].strip() + return ret + + def freebsd_cpuinfo(): + ''' + freebds specific cpuinfo implementation + ''' + freebsd_cmd = 'sysctl hw.model hw.ncpu' + ret = {} + for line in __salt__['cmd.run'](freebsd_cmd).splitlines(): + if not line: + continue + comps = line.split(':') + comps[0] = comps[0].strip() ret[comps[0]] = comps[1].strip() - return ret + return ret + + # dict that returns a function that does the right thing per platform + get_version = { + 'Linux': linux_cpuinfo, + 'FreeBSD': freebsd_cpuinfo, + } + + errmsg = 'This method is unsupported on the current operating system!' + return get_version.get(__grains__['kernel'], lambda: errmsg)() def diskstats(): @@ -370,12 +460,7 @@ def nproc(): salt '*' status.nproc ''' - data = __salt__['cmd.run']('nproc') - try: - ret = int(data.strip()) - except Exception: - return 0 - return ret + return __grains__.get('num_cpus', 0) def netstats(): @@ -436,7 +521,7 @@ def netdev(): comps = line.split() # Fix lines like eth0:9999..' comps[0] = line.split(':')[0].strip() - #Support lines both like eth0:999 and eth0: 9999 + # Support lines both like eth0:999 and eth0: 9999 comps.insert(1, line.split(':')[1].strip().split()[0]) ret[comps[0]] = {'iface': comps[0], 'rx_bytes': _number(comps[1]), @@ -548,12 +633,23 @@ def version(): salt '*' status.version ''' - procf = '/proc/version' - if not os.path.isfile(procf): - return {} - ret = salt.utils.fopen(procf, 'r').read().strip() + def linux_version(): + ''' + linux specific implementation of version + ''' + procf = '/proc/version' + if not os.path.isfile(procf): + return {} + return salt.utils.fopen(procf, 'r').read().strip() - return ret + # dict that returns a function that does the right thing per platform + get_version = { + 'Linux': linux_version, + 'FreeBSD': lambda: __salt__['cmd.run']('sysctl -n kern.version'), + } + + errmsg = 'This method is unsupported on the current operating system!' + return get_version.get(__grains__['kernel'], lambda: errmsg)() def master(master=None, connected=True): diff --git a/salt/netapi/rest_cherrypy/app.py b/salt/netapi/rest_cherrypy/app.py index ad309f4f9c..0c78fc2452 100644 --- a/salt/netapi/rest_cherrypy/app.py +++ b/salt/netapi/rest_cherrypy/app.py @@ -1473,47 +1473,30 @@ class Events(object): def __init__(self): self.opts = cherrypy.config['saltopts'] - self.auth = salt.auth.LoadAuth(self.opts) self.resolver = salt.auth.Resolver(self.opts) - def _is_valid_salt_token(self, salt_token): + def _is_valid_token(self, auth_token): ''' - Check if this is a valid salt master token - More on salt master token generation can - be found at - http://docs.saltstack.com/en/latest/topics/eauth/index.html#tokens + Check if this is a valid salt-api token or valid Salt token - Returns - True if this token is a valid salt token - False otherwise - ''' - if salt_token and self.resolver.get_token(salt_token): - return True - return False + salt-api tokens are regular session tokens that tie back to a real Salt + token. Salt tokens are tokens generated by Salt's eauth system. - def _is_valid_salt_api_token(self, salt_api_token): + :return bool: True if valid, False if not valid. ''' - Check if this is a valid salt api token - Salt API tokens are generated on Login - - Returns - True if this token is a valid salt api token - False otherwise - ''' - if not salt_api_token: + if auth_token is None: return False - # Pulling the session token from an URL param is a workaround for - # browsers not supporting CORS in the EventSource API. - if salt_api_token: - orig_sesion, _ = cherrypy.session.cache.get(salt_api_token, - ({}, None)) - salt_token = orig_sesion.get('token') - else: - salt_token = cherrypy.session.get('token') + # First check if the given token is in our session table; if so it's a + # salt-api token and we need to get the Salt token from there. + orig_sesion, _ = cherrypy.session.cache.get(auth_token, ({}, None)) + # If it's not in the session table, assume it's a regular Salt token. + salt_token = orig_sesion.get('token', auth_token) - # Manually verify the token - if salt_token and self.auth.get_tok(salt_token): + # The eauth system does not currently support perms for the event + # stream, so we're just checking if the token exists not if the token + # allows access. + if salt_token and self.resolver.get_token(salt_token): return True return False @@ -1530,21 +1513,21 @@ class Events(object): :status 200: |200| :status 401: |401| :status 406: |406| + :query token: **optional** parameter containing the token + ordinarily supplied via the X-Auth-Token header in order to + allow cross-domain requests in browsers that do not include + CORS support in the EventSource API. E.g., + ``curl -NsS localhost:8000/events?token=308650d`` + :query salt_token: **optional** parameter containing a raw Salt + *eauth token* (not to be confused with the token returned from + the /login URL). E.g., + ``curl -NsS localhost:8000/events?salt_token=30742765`` **Example request:** .. code-block:: bash - curl -NsS localhost:8000/events?salt_token=307427657b16a70aed360a46c5370035 - - Or you can pass the token sent by cherrypy's - `/login` endpoint (these are different tokens). - :ref:`salt-token-generation` describes the process of obtaining a - Salt token. - - .. code-block:: bash - - curl -NsS localhost:8000/events?token=308650dbd728d8405a32ac9c2b2c1ed7705222bc + curl -NsS localhost:8000/events .. code-block:: http @@ -1569,9 +1552,7 @@ class Events(object): .. code-block:: javascript - var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75'); - // Salt token works as well! - // var source = new EventSource('/events?salt_token=307427657b16a70aed360a46c5370035'); + var source = new EventSource('/events'); source.onopen = function() { console.debug('opening') }; source.onerror = function(e) { console.debug('error!', e) }; source.onmessage = function(e) { console.debug(e.data) }; @@ -1581,16 +1562,6 @@ class Events(object): .. code-block:: javascript var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true}); - // You can supply the salt token as well - var source = new EventSource('/events?salt_token=307427657b16a70aed360a46c5370035', {withCredentials: true}); - - Some browser clients lack CORS support for the ``EventSource()`` API. Such - clients may instead pass the :mailheader:`X-Auth-Token` value as an URL - parameter: - - .. code-block:: bash - - curl -NsS localhost:8000/events/6d1b722e It is also possible to consume the stream via the shell. @@ -1605,7 +1576,7 @@ class Events(object): .. code-block:: bash - curl -NsS localhost:8000/events?salt_token=307427657b16a70aed360a46c5370035 |\ + curl -NsS localhost:8000/events |\ while IFS= read -r line ; do echo $line done @@ -1614,7 +1585,7 @@ class Events(object): .. code-block:: bash - curl -NsS localhost:8000/events?salt_token=307427657b16a70aed360a46c5370035 |\ + curl -NsS localhost:8000/events |\ awk ' BEGIN { RS=""; FS="\\n" } $1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 } @@ -1624,9 +1595,11 @@ class Events(object): tag: 20140112010149808995 data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}} ''' - if (not (self._is_valid_salt_api_token(token) or - self._is_valid_salt_token(salt_token))): + cookies = cherrypy.request.cookie + auth_token = token or salt_token or ( + cookies['session_id'].value if 'session_id' in cookies else None) + if not self._is_valid_token(auth_token): raise cherrypy.HTTPError(401) # Release the session lock before starting the long-running response diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py index c0d33e411d..0de25ab81a 100644 --- a/salt/pillar/__init__.py +++ b/salt/pillar/__init__.py @@ -165,7 +165,6 @@ class Pillar(object): saltenv = env opts = dict(opts_in) opts['file_roots'] = opts['pillar_roots'] - opts['__pillar'] = True opts['file_client'] = 'local' if not grains: opts['grains'] = {} diff --git a/salt/runners/doc.py b/salt/runners/doc.py index 930c4d566b..8dfedf4703 100644 --- a/salt/runners/doc.py +++ b/salt/runners/doc.py @@ -72,3 +72,25 @@ def execution(): ret = dict(list(i)) return ret + + +# Still need to modify some of the backend for auth checks to make this work +def __list_functions(user=None): + ''' + List all of the functions, optionally pass in a user to evaluate + permissions on + ''' + client = salt.client.get_local_client(__opts__['conf_file']) + funcs = {} + gener = client.cmd_iter( + '*', + 'sys.list_functions', + timeout=__opts__['timeout']) + for ret in gener: + funcs.update(ret) + if not user: + __jid_event__.fire_event({'message': funcs}, 'progress') + return funcs + for _, val in __opts__['external_auth'].items(): + if user in val: + pass diff --git a/salt/runners/http.py b/salt/runners/http.py index 3c1185dab7..415f25a8d8 100644 --- a/salt/runners/http.py +++ b/salt/runners/http.py @@ -34,3 +34,39 @@ def query(url, output=True, **kwargs): ret = salt.utils.http.query(url=url, opts=__opts__, **kwargs) return ret + + +def update_ca_bundle(target=None, source=None, merge_files=None): + ''' + Update the local CA bundle file from a URL + + CLI Example: + + .. code-block:: bash + + salt-run http.update_ca_bundle + salt-run http.update_ca_bundle target=/path/to/cacerts.pem + salt-run http.update_ca_bundle source=https://example.com/cacerts.pem + + If the ``target`` is not specified, it will be pulled from the ``ca_cert`` + configuration variable available to the master. If it cannot be found there, + it will be placed at ``<>/cacerts.pem``. + + If the ``source`` is not specified, it will be pulled from the + ``ca_cert_url`` configuration variable available to the master. If it cannot + be found, it will be downloaded from the cURL website, using an http (not + https) URL. USING THE DEFAULT URL SHOULD BE AVOIDED! + + ``merge_files`` may also be specified, which includes a string or list of + strings representing a file or files to be appended to the end of the CA + bundle, once it is downloaded. + + CLI Example: + + .. code-block:: bash + + salt-run http.update_ca_bundle merge_files=/path/to/mycert.pem + ''' + return salt.utils.http.update_ca_bundle( + target, source, __opts__, merge_files + ) diff --git a/salt/state.py b/salt/state.py index bf4fc509c1..d3ef1691aa 100644 --- a/salt/state.py +++ b/salt/state.py @@ -572,6 +572,7 @@ class State(object): self.pre = {} self.__run_num = 0 self.jid = jid + self.instance_id = str(id(self)) def _gather_pillar(self): ''' @@ -1505,6 +1506,7 @@ class State(object): # state module to change these at runtime. '__low__': immutabletypes.freeze(low), '__running__': immutabletypes.freeze(running) if running else {}, + '__instance_id__': self.instance_id, '__lowstate__': immutabletypes.freeze(chunks) if chunks else {} } @@ -2046,6 +2048,23 @@ class State(object): return errors ret = dict(list(disabled.items()) + list(self.call_chunks(chunks).items())) ret = self.call_listen(chunks, ret) + + def _cleanup_accumulator_data(): + accum_data_path = os.path.join( + salt.utils.get_accumulator_dir(self.opts['cachedir']), + self.instance_id + ) + try: + os.remove(accum_data_path) + log.debug('Deleted accumulator data file {0}'.format( + accum_data_path) + ) + except OSError: + log.debug('File {0} does not exist, no need to cleanup.'.format( + accum_data_path) + ) + _cleanup_accumulator_data() + return ret def render_template(self, high, template): diff --git a/salt/states/boto_dynamodb.py b/salt/states/boto_dynamodb.py new file mode 100644 index 0000000000..1b8f04d9eb --- /dev/null +++ b/salt/states/boto_dynamodb.py @@ -0,0 +1,268 @@ +# -*- coding: utf-8 -*- +''' +Manage DynamoDB Tables +================= + +.. versionadded:: 2015.2 + +Create and destroy DynamoDB tables. Be aware that this interacts with Amazon's +services, and so may incur charges. + +This module uses ``boto``, which can be installed via package, or pip. + +This module accepts explicit DynamoDB credentials but can also utilize +IAM roles assigned to the instance through Instance Profiles. Dynamic +credentials are then automatically obtained from AWS API and no further +configuration is necessary. More information available `here +`_. + +If IAM roles are not used you need to specify them either in a pillar file or +in the minion's config file: + +.. code-block:: yaml + + keyid: GKTADJGHEIQSXMKKRBJ08H + key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs + region: us-east-1 + +It's also possible to specify ``key``, ``keyid`` and ``region`` via a +profile, either passed in as a dict, or as a string to pull from +pillars or minion config: + +.. code-block:: yaml + + myprofile: + keyid: GKTADJGHEIQSXMKKRBJ08H + key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs + region: us-east-1 + +.. code-block:: yaml + +Ensure DynamoDB table does not exist: + boto_dynamodb.absent: + - table_name: new_table + - keyid: GKTADJGHEIQSXMKKRBJ08H + - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs + - region: us-east-1 + +Ensure DynamoDB table exists: + boto_dynamodb.present: + - table_name: new_table + - read_capacity_units: 1 + - write_capacity_units: 2 + - hash_key: primary_id + - hash_key_data_type: N + - range_key: start_timestamp + - range_key_data_type: N + - keyid: GKTADJGHEIQSXMKKRBJ08H + - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs + - region: us-east-1 + - local_indexes: + - index: + - name: "primary_id_end_timestamp_index" + - hash_key: primary_id + - hash_key_data_type: N + - range_key: end_timestamp + - range_key_data_type: N + - global_indexes: + - index: + - name: "name_end_timestamp_index" + - hash_key: name + - hash_key_data_type: S + - range_key: end_timestamp + - range_key_data_type: N + - read_capacity_units: 3 + - write_capacity_units: 4 + + +''' +# Import Python libs +import sys +import logging + +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s %(name)s %(levelname)s %(message)s', + stream=sys.stdout +) +log = logging.getLogger() + + +def __virtual__(): + ''' + Only load if boto_dynamodb is available. + ''' + ret = 'boto_dynamodb' if 'boto_dynamodb.exists' in __salt__ else False + return ret + + +def present(table_name, + region=None, + key=None, + keyid=None, + profile=None, + read_capacity_units=None, + write_capacity_units=None, + hash_key=None, + hash_key_data_type=None, + range_key=None, + range_key_data_type=None, + local_indexes=None, + global_indexes=None): + ''' + Ensure the DynamoDB table exists. Note: all properties of the table + can only be set during table creation. Adding or changing + indexes or key schema cannot be done after table creation + + table_name + Name of the DynamoDB table + + region + Region to connect to. + + key + Secret key to be used. + + keyid + Access key to be used. + + profile + A dict with region, key and keyid, or a pillar key (string) + that contains a dict with region, key and keyid. + + read_capacity_units + The read throughput for this table + + write_capacity_units + The write throughput for this table + + hash_key + The name of the attribute that will be used as the hash key + for this table + + hash_key_data_type + The DynamoDB datatype of the hash key + + range_key + The name of the attribute that will be used as the range key + for this table + + range_key_data_type + The DynamoDB datatype of the range key + + local_indexes + The local indexes you would like to create + + global_indexes + The local indexes you would like to create + ''' + ret = {'name': table_name, 'result': None, 'comment': '', 'changes': {}} + exists = __salt__['boto_dynamodb.exists']( + table_name, + region, + key, + keyid, + profile + ) + if exists: + ret['comment'] = 'DynamoDB table {0} already exists. \ + Nothing to change.'.format(table_name) + ret['result'] = True + return ret + + if __opts__['test']: + ret['comment'] = 'DynamoDB table {0} is set to be created \ + '.format(table_name) + return ret + + is_created = __salt__['boto_dynamodb.create_table']( + table_name, + region, + key, + keyid, + profile, + read_capacity_units, + write_capacity_units, + hash_key, + hash_key_data_type, + range_key, + range_key_data_type, + local_indexes, + global_indexes + ) + ret['result'] = is_created + + if is_created: + ret['comment'] = 'DynamoDB table {0} created successfully \ + '.format(table_name) + ret['changes'].setdefault('old', None) + changes = {} + changes['table'] = table_name + changes['read_capacity_units'] = read_capacity_units, + changes['write_capacity_units'] = write_capacity_units, + changes['hash_key'] = hash_key, + changes['hash_key_data_type'] = hash_key_data_type + changes['range_key'] = range_key, + changes['range_key_data_type'] = range_key_data_type, + changes['local_indexes'] = local_indexes, + changes['global_indexes'] = global_indexes + ret['changes']['new'] = changes + else: + ret['comment'] = 'Failed to create table {0}'.format(table_name) + return ret + + +def absent(table_name, + region=None, + key=None, + keyid=None, + profile=None): + ''' + Ensure the DynamoDB table does not exist. + + table_name + Name of the DynamoDB table. + + region + Region to connect to. + + key + Secret key to be used. + + keyid + Access key to be used. + + profile + A dict with region, key and keyid, or a pillar key (string) + that contains a dict with region, key and keyid. + ''' + ret = {'name': table_name, 'result': None, 'comment': '', 'changes': {}} + exists = __salt__['boto_dynamodb.exists']( + table_name, + region, + key, + keyid, + profile + ) + if not exists: + ret['comment'] = 'DynamoDB table {0} does not exist'.format(table_name) + ret['result'] = True + return ret + + if __opts__['test']: + ret['comment'] = 'DynamoDB table {0} is set to be deleted \ + '.format(table_name) + ret['result'] = None + return ret + + is_deleted = __salt__['boto_dynamodb.delete'](table_name, region, key, keyid, profile) + if is_deleted: + ret['comment'] = 'Deleted DynamoDB table {0}'.format(table_name) + ret['changes'].setdefault('old', 'Table {0} exists'.format(table_name)) + ret['changes'].setdefault('new', 'Table {0} deleted'.format(table_name)) + ret['result'] = True + else: + ret['comment'] = 'Failed to delete DynamoDB table {0} \ + '.format(table_name) + ret['result'] = False + return ret diff --git a/salt/states/file.py b/salt/states/file.py index 6f9aa86745..cdc2ecb8b2 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -245,6 +245,7 @@ import traceback import yaml # Import salt libs +import salt.payload import salt.utils import salt.utils.templates from salt.exceptions import CommandExecutionError @@ -258,8 +259,39 @@ log = logging.getLogger(__name__) COMMENT_REGEX = r'^([[:space:]]*){0}[[:space:]]?' -_ACCUMULATORS = {} -_ACCUMULATORS_DEPS = {} + +def _get_accumulator_filepath(): + ''' + Return accumulator data path. + ''' + return os.path.join(salt.utils.get_accumulator_dir(__opts__['cachedir']), + __instance_id__) + + +def _load_accumulators(): + def _deserialize(path): + serial = salt.payload.Serial(__opts__) + ret = {'accumulators': {}, 'accumulators_deps': {}} + try: + with open(path, 'rb') as f: + loaded = serial.load(f) + return loaded if loaded else ret + except IOError: + return ret + + loaded = _deserialize(_get_accumulator_filepath()) + + return loaded['accumulators'], loaded['accumulators_deps'] + + +def _persist_accummulators(accumulators, accumulators_deps): + + accumm_data = {'accumulators': accumulators, + 'accumulators_deps': accumulators_deps} + + serial = salt.payload.Serial(__opts__) + with open(_get_accumulator_filepath(), 'w+b') as f: + serial.dump(accumm_data, f) def _check_user(user, group): @@ -961,7 +993,7 @@ def exists(name): if not name: return _error(ret, 'Must provide name to file.exists') if not os.path.exists(name): - return _error(ret, ('Specified path {0} does not exist').format(name)) + return _error(ret, 'Specified path {0} does not exist'.format(name)) ret['comment'] = 'Path {0} exists'.format(name) return ret @@ -984,7 +1016,7 @@ def missing(name): if not name: return _error(ret, 'Must provide name to file.missing') if os.path.exists(name): - return _error(ret, ('Specified path {0} exists').format(name)) + return _error(ret, 'Specified path {0} exists'.format(name)) ret['comment'] = 'Path {0} is missing'.format(name) return ret @@ -1348,10 +1380,11 @@ def managed(name, 'No changes made.'.format(name)) return ret - if name in _ACCUMULATORS: + accum_data, _ = _load_accumulators() + if name in accum_data: if not context: context = {} - context['accumulator'] = _ACCUMULATORS[name] + context['accumulator'] = accum_data[name] try: if __opts__['test']: @@ -2381,13 +2414,15 @@ def replace(name, if changes: ret['changes'] = {'diff': changes} - ret['comment'] = ('Changes were made' - if not __opts__['test'] else 'Changes would have been made') if __opts__['test']: ret['result'] = None + ret['comment'] = 'Changes would have been made' + else: + ret['result'] = True + ret['comment'] = 'Changes were made' else: - ret['comment'] = ('No changes were made' - if not __opts__['test'] else 'No changes would have been made') + ret['result'] = True + ret['comment'] = 'No changes needed to be made' return ret @@ -2507,11 +2542,12 @@ def blockreplace( if not check_res: return _error(ret, check_msg) - if name in _ACCUMULATORS: - accumulator = _ACCUMULATORS[name] + accum_data, accum_deps = _load_accumulators() + if name in accum_data: + accumulator = accum_data[name] # if we have multiple accumulators for a file, only apply the one # required at a time - deps = _ACCUMULATORS_DEPS.get(name, []) + deps = accum_deps.get(name, []) filtered = [a for a in deps if __low__['__id__'] in deps[a] and a in accumulator] if not filtered: @@ -3728,21 +3764,23 @@ def accumulated(name, filename, text, **kwargs): return ret if isinstance(text, string_types): text = (text,) - if filename not in _ACCUMULATORS: - _ACCUMULATORS[filename] = {} - if filename not in _ACCUMULATORS_DEPS: - _ACCUMULATORS_DEPS[filename] = {} - if name not in _ACCUMULATORS_DEPS[filename]: - _ACCUMULATORS_DEPS[filename][name] = [] + accum_data, accum_deps = _load_accumulators() + if filename not in accum_data: + accum_data[filename] = {} + if filename not in accum_deps: + accum_deps[filename] = {} + if name not in accum_deps[filename]: + accum_deps[filename][name] = [] for accumulator in deps: - _ACCUMULATORS_DEPS[filename][name].extend(six.itervalues(accumulator)) - if name not in _ACCUMULATORS[filename]: - _ACCUMULATORS[filename][name] = [] + accum_deps[filename][name].extend(six.itervalues(accumulator)) + if name not in accum_data[filename]: + accum_data[filename][name] = [] for chunk in text: - if chunk not in _ACCUMULATORS[filename][name]: - _ACCUMULATORS[filename][name].append(chunk) + if chunk not in accum_data[filename][name]: + accum_data[filename][name].append(chunk) ret['comment'] = ('Accumulator {0} for file {1} ' 'was charged by text'.format(name, filename)) + _persist_accummulators(accum_data, accum_deps) return ret diff --git a/salt/states/pkg.py b/salt/states/pkg.py index 121a39dd50..a751c361fe 100644 --- a/salt/states/pkg.py +++ b/salt/states/pkg.py @@ -782,6 +782,17 @@ def installed( pkg.installed: - install_recommends: False + only_upgrade + Only upgrade the packages, if they are already installed. Default is False. + Currently only works with APT based systems. + + .. versionadded:: Lithium + + .. code-block:: yaml + + httpd: + pkg.installed: + - only_upgrade: True ''' if isinstance(pkgs, list) and len(pkgs) == 0: @@ -1187,6 +1198,18 @@ def latest( pkg.latest: - install_recommends: False + only_upgrade + Only upgrade the packages, if they are already installed. Default is False. + Currently only works with APT based systems. + + .. versionadded:: Lithium + + .. code-block:: yaml + + httpd: + pkg.latest: + - only_upgrade: True + ''' rtag = __gen_rtag() refresh = bool( diff --git a/salt/states/sysctl.py b/salt/states/sysctl.py index c8355dbbf3..6791bd2331 100644 --- a/salt/states/sysctl.py +++ b/salt/states/sysctl.py @@ -80,7 +80,7 @@ def present(name, value, config=None): 'The value {0} is set to be changed to {1} ' return ret elif name in configured and name in current: - if str(value) == __salt__['sysctl.get'](name): + if str(value).split() == __salt__['sysctl.get'](name).split(): ret['result'] = True ret['comment'] = 'Sysctl value {0} = {1} is already set'.format( name, diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index 9ad3ddcdac..4bd6dbc2a9 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -550,6 +550,18 @@ def required_modules_error(name, docstring): return msg.format(filename, ', '.join(modules)) +def get_accumulator_dir(cachedir): + ''' + Return the directory that accumulator data is stored in, creating it if it + doesn't exist. + ''' + fn_ = os.path.join(cachedir, 'accumulator') + if not os.path.isdir(fn_): + # accumulator_dir is not present, create it + os.makedirs(fn_) + return fn_ + + def check_or_die(command): ''' Simple convenience function for modules to use for gracefully blowing up diff --git a/salt/utils/cloud.py b/salt/utils/cloud.py index 447ccf2d13..ea6f208d2b 100644 --- a/salt/utils/cloud.py +++ b/salt/utils/cloud.py @@ -2025,7 +2025,7 @@ def simple_types_filter(data): simpletypes_keys = (str, six.text_type, int, long, float, bool) simpletypes_values = tuple(list(simpletypes_keys) + [list, tuple]) - if isinstance(data, list): + if isinstance(data, (list, tuple)): simplearray = [] for value in data: if value is not None: @@ -2041,7 +2041,7 @@ def simple_types_filter(data): for key, value in data.items(): if key is not None and not isinstance(key, simpletypes_keys): key = repr(key) - if value is not None and isinstance(value, (dict, list)): + if value is not None and isinstance(value, (dict, list, tuple)): value = simple_types_filter(value) elif value is not None and not isinstance(value, simpletypes_values): value = repr(value) diff --git a/salt/utils/event.py b/salt/utils/event.py index 6ac615c933..27a2dbf098 100644 --- a/salt/utils/event.py +++ b/salt/utils/event.py @@ -130,7 +130,8 @@ def get_master_event(opts, sock_dir, listen=True): elif opts['transport'] == 'raet': import salt.utils.raetevent return salt.utils.raetevent.MasterEvent( - opts=opts, sock_dir=sock_dir, listen=listen) + opts=opts, sock_dir=sock_dir, listen=listen + ) def tagify(suffix='', prefix='', base=SALT): diff --git a/salt/utils/github.py b/salt/utils/github.py index 1055fb26f5..74c7d2805c 100644 --- a/salt/utils/github.py +++ b/salt/utils/github.py @@ -1,14 +1,12 @@ # -*- coding: utf-8 -*- ''' Connection library for GitHub - -:depends: requests ''' from __future__ import absolute_import # Import Python libs import json -import requests +import salt.utils.http import logging log = logging.getLogger(__name__) @@ -48,8 +46,14 @@ def get_user_pubkeys(users): user = tmp_user url = 'https://api.github.com/users/{0}/keys'.format(user) - result = requests.request('GET', url) - keys = json.loads(result.text) + result = salt.utils.http.query( + url, + 'GET', + decode=False, + text=True, + ) + + keys = json.loads(result['text']) ret[user] = {} for key in keys: diff --git a/salt/utils/http.py b/salt/utils/http.py index 6e55228202..19c9b7803e 100644 --- a/salt/utils/http.py +++ b/salt/utils/http.py @@ -11,6 +11,7 @@ import os.path import json import logging import salt.ext.six.moves.http_cookiejar # pylint: disable=E0611 +from salt.ext.six import string_types from salt._compat import ElementTree as ET import ssl @@ -82,6 +83,9 @@ def query(url, requests_lib=None, ca_bundle=None, verify_ssl=None, + text_out=None, + headers_out=None, + decode_out=None, **kwargs): ''' Query a resource, and decode the return data @@ -123,7 +127,10 @@ def query(url, data = _render( data_file, data_render, data_renderer, template_dict, opts ) - log.trace('POST Data: {0}'.format(pprint.pformat(data))) + + log.debug('Using {0} Method'.format(method)) + if method == 'POST': + log.trace('POST Data: {0}'.format(pprint.pformat(data))) if header_file is not None: header_tpl = _render( @@ -205,7 +212,7 @@ def query(url, if url.startswith('https') or port == 443: if not HAS_MATCHHOSTNAME: - log.warn(('match_hostname() not available, SSL hostname checking' + log.warn(('match_hostname() not available, SSL hostname checking ' 'not available. THIS CONNECTION MAY NOT BE SECURE!')) elif verify_ssl is False: log.warn(('SSL certificate verification has been explicitly ' @@ -243,6 +250,13 @@ def query(url, result_headers = result.headers.headers result_text = result.read() + if isinstance(result_headers, list): + result_headers_dict = {} + for header in result_headers: + comps = header.split(':') + result_headers_dict[comps[0].strip()] = ':'.join(comps[1:]).strip() + result_headers = result_headers_dict + log.debug('Response Status Code: {0}'.format(result_status_code)) log.trace('Response Headers: {0}'.format(result_headers)) log.trace('Response Cookies: {0}'.format(sess_cookies)) @@ -252,6 +266,14 @@ def query(url, log.trace(('Cannot Trace Log Response Text: {0}. This may be due to ' 'incompatibilities between requests and logging.').format(exc)) + if os.path.exists(text_out): + with salt.utils.fopen(text_out, 'w') as tof: + tof.write(result_text) + + if os.path.exists(headers_out): + with salt.utils.fopen(headers_out, 'w') as hof: + hof.write(result_headers) + if cookies is not None: sess_cookies.save() @@ -304,13 +326,17 @@ def query(url, else: text = True + if os.path.exists(decode_out): + with salt.utils.fopen(decode_out, 'w') as dof: + dof.write(result_text) + if text is True: ret['text'] = result_text return ret -def get_ca_bundle(opts): +def get_ca_bundle(opts=None): ''' Return the location of the ca bundle file. See the following article: @@ -319,16 +345,28 @@ def get_ca_bundle(opts): if hasattr(get_ca_bundle, '__return_value__'): return get_ca_bundle.__return_value__ + if opts is None: + opts = {} + opts_bundle = opts.get('ca_bundle', None) if opts_bundle is not None and os.path.exists(opts_bundle): return opts_bundle + file_roots = opts.get('file_roots', '/srv/salt') + + # Please do not change the order without good reason for path in ( - '/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem', + # Check Salt first + os.path.join(file_roots, 'cacert.pem'), + os.path.join(file_roots, 'ca-bundle.crt'), + # Debian has paths that often exist on other distros + '/etc/ssl/certs/ca-certificates.crt', + # RedHat is also very common '/etc/pki/tls/certs/ca-bundle.crt', '/etc/pki/tls/certs/ca-bundle.trust.crt', + # RedHat's link for Debian compatability '/etc/ssl/certs/ca-bundle.crt', - '/etc/ssl/certs/ca-certificates.crt', + # Suse has an unusual path '/var/lib/ca-certificates/ca-bundle.pem', ): if os.path.exists(path): @@ -337,6 +375,97 @@ def get_ca_bundle(opts): return None +def update_ca_bundle( + target=None, + source=None, + opts=None, + merge_files=None, + ): + ''' + Attempt to update the CA bundle file from a URL + + If not specified, the local location on disk (``target``) will be + auto-detected, if possible. If it is not found, then a new location on disk + will be created and updated. + + The default ``source`` is: + + http://curl.haxx.se/ca/cacert.pem + + This is based on the information at: + + http://curl.haxx.se/docs/caextract.html + + A string or list of strings representing files to be appended to the end of + the CA bundle file may also be passed through as ``merge_files``. + ''' + if opts is None: + opts = {} + + file_roots = opts.get('file_roots', '/srv/salt') + + if target is None: + target = get_ca_bundle(opts) + + if target is None: + log.error('Unable to detect location to write CA bundle to') + return + + if source is None: + source = opts.get('ca_bundle_url', 'http://curl.haxx.se/ca/cacert.pem') + + log.debug('Attempting to download {0} to {1}'.format(source, target)) + query( + source, + text=True, + decode=False, + headers=False, + status=False, + text_out=target + ) + + if merge_files is not None: + if isinstance(merge_files, string_types): + merge_files = [merge_files] + + if not isinstance(merge_files, list): + log.error('A value was passed as merge_files which was not either ' + 'a string or a list') + return + + merge_content = '' + + for cert_file in merge_files: + if os.path.exists(cert_file): + log.debug( + 'Queueing up {0} to be appended to {1}'.format( + cert_file, target + ) + ) + try: + with salt.utils.fopen(cert_file, 'r') as fcf: + merge_content = '\n'.join((merge_content, fcf.read())) + except IOError as exc: + log.error( + 'Reading from {0} caused the following error: {1}'.format( + cert_file, exc + ) + ) + + if merge_content: + log.debug('Appending merge_files to {0}'.format(target)) + try: + with salt.utils.fopen(target, 'a') as tfp: + tfp.write('\n') + tfp.write(merge_content) + except IOError as exc: + log.error( + 'Writing to {0} caused the following error: {1}'.format( + target, exc + ) + ) + + def _render(template, render, renderer, template_dict, opts): ''' Render a template diff --git a/salt/utils/jinja.py b/salt/utils/jinja.py index 19a80febdf..638fa9d472 100644 --- a/salt/utils/jinja.py +++ b/salt/utils/jinja.py @@ -65,7 +65,7 @@ class SaltCacheLoader(BaseLoader): self.opts = opts self.saltenv = saltenv self.encoding = encoding - if self.opts.get('__pillar', False): + if self.opts['file_roots'] is self.opts['pillar_roots']: self.searchpath = opts['file_roots'][saltenv] else: self.searchpath = [path.join(opts['cachedir'], 'files', saltenv)] diff --git a/salt/utils/minions.py b/salt/utils/minions.py index 484d7a7e3c..c8e8708d4c 100644 --- a/salt/utils/minions.py +++ b/salt/utils/minions.py @@ -380,16 +380,17 @@ class CkMinions(object): matcher_args = ['@'.join(comps[1:])] if comps[0] in ('G', 'P', 'I'): matcher_args.append(delimiter) + matcher_args.append(True) if not matcher: # If an unknown matcher is called at any time, fail out return [] if unmatched and unmatched[-1] == '-': - results.append(str(set(matcher('@'.join(comps[1:]), True)))) + results.append(str(set(matcher(*matcher_args)))) results.append(')') unmatched.pop() else: - results.append(str(set(matcher('@'.join(comps[1:]), True)))) + results.append(str(set(matcher(*matcher_args)))) elif match in opers: # We didn't match a target, so append a boolean operator or # subexpression diff --git a/salt/utils/network.py b/salt/utils/network.py index 84e83de1dd..09e2576ad8 100644 --- a/salt/utils/network.py +++ b/salt/utils/network.py @@ -7,6 +7,7 @@ from __future__ import absolute_import # Import python libs import socket import subprocess +import shlex import re import logging import os @@ -1011,7 +1012,8 @@ def _freebsd_remotes_on(port, which_end): remotes = set() try: - data = subprocess.check_output(['sockstat', '-4', '-c', '-p {0}'.format(port)]) + cmd = shlex.split('sockstat -4 -c -p {0}'.format(port)) + data = subprocess.check_output(cmd) except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = {0}'.format(ex.returncode)) raise diff --git a/salt/utils/pagerduty.py b/salt/utils/pagerduty.py index bca6d2fdf5..078105e969 100644 --- a/salt/utils/pagerduty.py +++ b/salt/utils/pagerduty.py @@ -19,8 +19,8 @@ Library for interacting with PagerDuty API from __future__ import absolute_import import json -import requests import logging +import salt.utils.http from salt.version import __version__ log = logging.getLogger(__name__) @@ -90,16 +90,18 @@ def query(method='GET', profile=None, url=None, path='api/v1', else: headers['Content-type'] = 'application/json' - result = requests.request( - method, + result = salt.utils.http.query( url, - headers=headers, + method, params=params, + header_dict=headers, data=json.dumps(data), - verify=verify_ssl + decode=False, + text=True, + opts=opts, ) - return result.text + return result['text'] def list_items(action, key, profile=None, api_key=None, opts=None): diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index bc2d2e1e74..a545c26caf 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -233,6 +233,7 @@ try: _CRON_SUPPORTED = True except ImportError: _CRON_SUPPORTED = False +import yaml # Import Salt libs import salt.utils @@ -241,6 +242,7 @@ import salt.utils.process from salt.utils.odict import OrderedDict from salt.utils.process import os_is_running import salt.payload +import salt.syspaths log = logging.getLogger(__name__) @@ -260,6 +262,7 @@ class Schedule(object): self.returners = returners else: self.returners = returners.loader.gen_functions() + self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')() self.schedule_returner = self.option('schedule_returner') # Keep track of the lowest loop interval needed in this variable self.loop_interval = sys.maxint @@ -273,11 +276,24 @@ class Schedule(object): return self.functions['config.merge'](opt, {}, omit_master=True) return self.opts.get(opt, {}) + def persist(self): + ''' + Persist the modified schedule into <>/minion.d/_schedule.conf + ''' + schedule_conf = os.path.join( + salt.syspaths.CONFIG_DIR, + 'minion.d', + '_schedule.conf') + try: + with salt.utils.fopen(schedule_conf, 'w+') as fp_: + fp_.write(yaml.dump({'schedule': self.opts['schedule']})) + except (IOError, OSError): + log.error('Failed to persist the updated schedule') + def delete_job(self, name, where=None): ''' Deletes a job from the scheduler. ''' - if where is None or where != 'pillar': # ensure job exists, then delete it if name in self.opts['schedule']: @@ -314,6 +330,7 @@ class Schedule(object): else: log.info('Added new job {0} to scheduler'.format(new_job)) self.opts['schedule'].update(data) + self.persist() def enable_job(self, name, where=None): ''' @@ -429,6 +446,8 @@ class Schedule(object): if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] + ret['metadata']['_TOS'] = self.time_offset + ret['metadata']['_TS'] = time.ctime() else: log.warning('schedule: The metadata parameter must be ' 'specified as a dictionary. Ignoring.') diff --git a/tests/integration/files/conf/cloud.profiles.d/rackspace.conf b/tests/integration/files/conf/cloud.profiles.d/rackspace.conf index 2ea8791772..45c33fcf3e 100644 --- a/tests/integration/files/conf/cloud.profiles.d/rackspace.conf +++ b/tests/integration/files/conf/cloud.profiles.d/rackspace.conf @@ -1,4 +1,4 @@ rackspace-test: provider: rackspace-config size: 2 GB Performance - image: Ubuntu 14.04 LTS (Trusty Tahr) + image: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM) \ No newline at end of file diff --git a/tests/unit/minion_test.py b/tests/unit/minion_test.py index 182c385df8..82e7e2d33f 100644 --- a/tests/unit/minion_test.py +++ b/tests/unit/minion_test.py @@ -39,7 +39,8 @@ class MinionTestCase(TestCase): opts = { 'id': 'salt-testing', 'hash_type': 'sha512', - 'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'minion') + 'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'minion'), + 'extension_modules': '' } with patch.dict(__opts__, opts): testminion = minion.MinionBase(__opts__) diff --git a/tests/unit/modules/cmdmod_test.py b/tests/unit/modules/cmdmod_test.py index 60feab6746..1128b7cc97 100644 --- a/tests/unit/modules/cmdmod_test.py +++ b/tests/unit/modules/cmdmod_test.py @@ -215,7 +215,7 @@ class CMDMODTestCase(TestCase): ''' Tests end result when a command is not found ''' - ret = cmdmod._run('foo').get('stderr') + ret = cmdmod._run('foo', use_vt=True).get('stderr') self.assertIn('foo', ret) @patch('salt.utils.is_windows', MagicMock(return_value=True)) diff --git a/tests/unit/states/file_test.py b/tests/unit/states/file_test.py index b992f34ba9..fbe1bb96b6 100644 --- a/tests/unit/states/file_test.py +++ b/tests/unit/states/file_test.py @@ -18,7 +18,8 @@ import salt.states.file as filestate filestate.__env__ = 'base' filestate.__salt__ = {'file.manage_file': False} -filestate.__opts__ = {'test': False} +filestate.__opts__ = {'test': False, 'cachedir': ''} +filestate.__instance_id__ = '' @skipIf(NO_MOCK, NO_MOCK_REASON) diff --git a/tests/unit/templates/jinja_test.py b/tests/unit/templates/jinja_test.py index e31d8446ce..bd01bc49df 100644 --- a/tests/unit/templates/jinja_test.py +++ b/tests/unit/templates/jinja_test.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Import python libs +import copy import os import tempfile import json @@ -57,12 +58,26 @@ class MockFileClient(object): class TestSaltCacheLoader(TestCase): + def __init__(self, *args, **kws): + TestCase.__init__(self, *args, **kws) + self.opts = { + 'cachedir': TEMPLATES_DIR, + 'file_roots': { + 'test': [os.path.join(TEMPLATES_DIR, 'files', 'test')] + }, + 'pillar_roots': { + 'test': [os.path.join(TEMPLATES_DIR, 'files', 'test')] + } + } + def test_searchpath(self): ''' The searchpath is based on the cachedir option and the saltenv parameter ''' tmp = tempfile.gettempdir() - loader = SaltCacheLoader({'cachedir': tmp}, saltenv='test') + opts = copy.deepcopy(self.opts) + opts.update({'cachedir': tmp}) + loader = SaltCacheLoader(opts, saltenv='test') assert loader.searchpath == [os.path.join(tmp, 'files', 'test')] def test_mockclient(self): @@ -70,7 +85,7 @@ class TestSaltCacheLoader(TestCase): A MockFileClient is used that records all file requests normally sent to the master. ''' - loader = SaltCacheLoader({'cachedir': TEMPLATES_DIR}, 'test') + loader = SaltCacheLoader(self.opts, 'test') fc = MockFileClient(loader) res = loader.get_source(None, 'hello_simple') assert len(res) == 3 @@ -86,7 +101,7 @@ class TestSaltCacheLoader(TestCase): ''' Setup a simple jinja test environment ''' - loader = SaltCacheLoader({'cachedir': TEMPLATES_DIR}, 'test') + loader = SaltCacheLoader(self.opts, 'test') fc = MockFileClient(loader) jinja = Environment(loader=loader) return fc, jinja @@ -134,6 +149,9 @@ class TestGetTemplate(TestCase): 'file_roots': { 'test': [os.path.join(TEMPLATES_DIR, 'files', 'test')] }, + 'pillar_roots': { + 'test': [os.path.join(TEMPLATES_DIR, 'files', 'test')] + }, 'fileserver_backend': ['roots'], 'hash_type': 'md5', 'extension_modules': os.path.join( @@ -178,7 +196,9 @@ class TestGetTemplate(TestCase): filename = os.path.join(TEMPLATES_DIR, 'files', 'test', 'hello_import') out = render_jinja_tmpl( salt.utils.fopen(filename).read(), - dict(opts={'cachedir': TEMPLATES_DIR, 'file_client': 'remote'}, + dict(opts={'cachedir': TEMPLATES_DIR, 'file_client': 'remote', + 'file_roots': self.local_opts['file_roots'], + 'pillar_roots': self.local_opts['pillar_roots']}, a='Hi', b='Salt', saltenv='test')) self.assertEqual(out, 'Hey world !Hi Salt !\n') self.assertEqual(fc.requests[0]['path'], 'salt://macro') @@ -267,7 +287,9 @@ class TestGetTemplate(TestCase): filename = os.path.join(TEMPLATES_DIR, 'files', 'test', 'hello_import') out = render_jinja_tmpl( salt.utils.fopen(filename).read(), - dict(opts={'cachedir': TEMPLATES_DIR, 'file_client': 'remote'}, + dict(opts={'cachedir': TEMPLATES_DIR, 'file_client': 'remote', + 'file_roots': self.local_opts['file_roots'], + 'pillar_roots': self.local_opts['pillar_roots']}, a='Hi', b='Sàlt', saltenv='test')) self.assertEqual(out, u'Hey world !Hi Sàlt !\n') self.assertEqual(fc.requests[0]['path'], 'salt://macro') @@ -278,7 +300,9 @@ class TestGetTemplate(TestCase): filename = os.path.join(TEMPLATES_DIR, 'files', 'test', 'non_ascii') out = render_jinja_tmpl( salt.utils.fopen(filename).read(), - dict(opts={'cachedir': TEMPLATES_DIR, 'file_client': 'remote'}, + dict(opts={'cachedir': TEMPLATES_DIR, 'file_client': 'remote', + 'file_roots': self.local_opts['file_roots'], + 'pillar_roots': self.local_opts['pillar_roots']}, a='Hi', b='Sàlt', saltenv='test')) self.assertEqual(u'Assunção\n', out) self.assertEqual(fc.requests[0]['path'], 'salt://macro')