Merge branch 'develop' into macports

This commit is contained in:
Jeremy McMillan 2017-12-18 21:31:09 -06:00 committed by GitHub
commit d1bc6fc3f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
512 changed files with 16334 additions and 10423 deletions

View File

@ -10,6 +10,7 @@
driver:
name: docker
use_sudo: false
hostname: salt
privileged: true
username: root
volume:
@ -34,7 +35,7 @@ provisioner:
require_chef: false
remote_states:
name: git://github.com/saltstack/salt-jenkins.git
branch: master
branch: oxygen
repo: git
testingdir: /testing
salt_copy_filter:

View File

@ -1,10 +0,0 @@
#my-openstack-hp-config:
# driver: openstack
# identity_url: 'https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/'
# compute_name: Compute
# compute_region: 'az-1.region-a.geo-1'
# tenant: myuser-tenant1
# user: myuser
# ssh_key_name: mykey
# ssh_key_file: '/etc/salt/hpcloud/mykey.pem'
# password: mypass

View File

@ -1,10 +0,0 @@
#my-openstack-rackspace-config:
# driver: openstack
# identity_url: 'https://identity.api.rackspacecloud.com/v2.0/tokens'
# compute_name: cloudServersOpenStack
# protocol: ipv4
# compute_region: DFW
# protocol: ipv4
# user: myuser
# tenant: 5555555
# apikey: 901d3f579h23c8v73q9

View File

@ -36,7 +36,7 @@
# The root directory prepended to these options: pki_dir, cachedir,
# sock_dir, log_file, autosign_file, autoreject_file, extension_modules,
# key_logfile, pidfile:
# key_logfile, pidfile, autosign_grains_dir:
#root_dir: /
# The path to the master's configuration file.
@ -351,6 +351,11 @@
# the autosign_file and the auto_accept setting.
#autoreject_file: /etc/salt/autoreject.conf
# If the autosign_grains_dir is specified, incoming keys from minons with grain
# values matching those defined in files in this directory will be accepted
# automatically. This is insecure. Minions need to be configured to send the grains.
#autosign_grains_dir: /etc/salt/autosign_grains
# Enable permissive access to the salt keys. This allows you to run the
# master or minion as root, but have a non-root group be given access to
# your pki_dir. To make the access explicit, root must belong to the group
@ -1297,4 +1302,3 @@
# use OS defaults, typically 75 seconds on Linux, see
# /proc/sys/net/ipv4/tcp_keepalive_intvl.
#tcp_keepalive_intvl: -1

View File

@ -666,6 +666,12 @@
# certfile: <path_to_certfile>
# ssl_version: PROTOCOL_TLSv1_2
# Grains to be sent to the master on authentication to check if the minion's key
# will be accepted automatically. Needs to be configured on the master.
#autosign_grains:
# - uuid
# - server_id
###### Reactor Settings #####
###########################################

View File

@ -37,7 +37,7 @@ syndic_user: salt
# The root directory prepended to these options: pki_dir, cachedir,
# sock_dir, log_file, autosign_file, autoreject_file, extension_modules,
# key_logfile, pidfile:
# key_logfile, pidfile, autosign_grains_dir:
#root_dir: /
# The path to the master's configuration file.
@ -320,6 +320,11 @@ syndic_user: salt
# the autosign_file and the auto_accept setting.
#autoreject_file: /etc/salt/autoreject.conf
# If the autosign_grains_dir is specified, incoming keys from minons with grain
# values matching those defined in files in this directory will be accepted
# automatically. This is insecure. Minions need to be configured to send the grains.
#autosign_grains_dir: /etc/salt/autosign_grains
# Enable permissive access to the salt keys. This allows you to run the
# master or minion as root, but have a non-root group be given access to
# your pki_dir. To make the access explicit, root must belong to the group
@ -1248,4 +1253,3 @@ syndic_user: salt
# use OS defaults, typically 75 seconds on Linux, see
# /proc/sys/net/ipv4/tcp_keepalive_intvl.
#tcp_keepalive_intvl: -1

View File

@ -3,4 +3,4 @@ salt.cloud.clouds.openstack
===========================
.. automodule:: salt.cloud.clouds.openstack
:members:
:members:

View File

@ -225,15 +225,16 @@ enclosing brackets ``[`` and ``]``:
Default: ``{}``
This can be used to control logging levels more specifically. The example sets
the main salt library at the 'warning' level, but sets ``salt.modules`` to log
at the ``debug`` level:
This can be used to control logging levels more specifically, based on log call name. The example sets
the main salt library at the 'warning' level, sets ``salt.modules`` to log
at the ``debug`` level, and sets a custom module to the ``all`` level:
.. code-block:: yaml
log_granular_levels:
'salt': 'warning'
'salt.modules': 'debug'
'salt.loader.saltmaster.ext.module.custom_module': 'all'
External Logging Handlers
-------------------------

View File

@ -140,7 +140,8 @@ an alternative root.
This directory is prepended to the following options:
:conf_master:`pki_dir`, :conf_master:`cachedir`, :conf_master:`sock_dir`,
:conf_master:`log_file`, :conf_master:`autosign_file`,
:conf_master:`autoreject_file`, :conf_master:`pidfile`.
:conf_master:`autoreject_file`, :conf_master:`pidfile`,
:conf_master:`autosign_grains_dir`.
.. conf_master:: conf_file
@ -407,6 +408,19 @@ to False.
color: False
.. conf_master:: color_theme
``color_theme``
---------
Default: ``""``
Specifies a path to the color theme to use for colored command line output.
.. code-block:: yaml
color_theme: /etc/salt/color_theme
.. conf_master:: cli_summary
``cli_summary``
@ -1321,6 +1335,32 @@ minion IDs for which keys will automatically be rejected. Will override both
membership in the :conf_master:`autosign_file` and the
:conf_master:`auto_accept` setting.
.. conf_master:: autosign_grains_dir
``autosign_grains_dir``
-----------------------
.. versionadded:: Oxygen
Default: ``not defined``
If the ``autosign_grains_dir`` is specified, incoming keys from minions with
grain values that match those defined in files in the autosign_grains_dir
will be accepted automatically. Grain values that should be accepted automatically
can be defined by creating a file named like the corresponding grain in the
autosign_grains_dir and writing the values into that file, one value per line.
Lines starting with a ``#`` will be ignored.
Minion must be configured to send the corresponding grains on authentication.
This should still be considered a less than secure option, due to the fact
that trust is based on just the requesting minion.
Please see the :ref:`Autoaccept Minions from Grains <tutorial-autoaccept-grains>`
documentation for more infomation.
.. code-block:: yaml
autosign_grains_dir: /etc/salt/autosign_grains
.. conf_master:: permissive_pki_access
``permissive_pki_access``

View File

@ -321,6 +321,20 @@ option on the Salt master.
master_port: 4506
.. conf_minion:: publish_port
``publish_port``
---------------
Default: ``4505``
The port of the master publish server, this needs to coincide with the publish_port
option on the Salt master.
.. code-block:: yaml
publish_port: 4505
.. conf_minion:: source_interface_name
``source_interface_name``
@ -606,6 +620,19 @@ This directory may contain sensitive data and should be protected accordingly.
cachedir: /var/cache/salt/minion
.. conf_master:: color_theme
``color_theme``
---------
Default: ``""``
Specifies a path to the color theme to use for colored command line output.
.. code-block:: yaml
color_theme: /etc/salt/color_theme
.. conf_minion:: append_minionid_config_dirs
``append_minionid_config_dirs``
@ -2423,6 +2450,27 @@ minion's pki directory.
master_sign_key_name: <filename_without_suffix>
.. conf_minion:: autosign_grains
``autosign_grains``
-------------------
.. versionadded:: Oxygen
Default: ``not defined``
The grains that should be sent to the master on authentication to decide if
the minion's key should be accepted automatically.
Please see the :ref:`Autoaccept Minions from Grains <tutorial-autoaccept-grains>`
documentation for more infomation.
.. code-block:: yaml
autosign_grains:
- uuid
- server_id
.. conf_minion:: always_verify_signature
``always_verify_signature``

View File

@ -147,7 +147,7 @@ must implement the following functions:
``prep_jid``
Ensures that job ids (jid) don't collide, unless passed_jid is provided.
``nochache`` is an optional boolean that indicates if return data
``nocache`` is an optional boolean that indicates if return data
should be cached. ``passed_jid`` is a caller provided jid which should be
returned unconditionally.

View File

@ -184,37 +184,18 @@ minion. In your pillar file, you would use something like this:
ssh_key_file: /root/.ssh/id_rsa
update_cachedir: True
diff_cache_events: True
change_password: True
providers:
my-nova:
identity_url: https://identity.api.rackspacecloud.com/v2.0/
compute_region: IAD
user: myuser
api_key: apikey
tenant: 123456
driver: nova
my-openstack:
identity_url: https://identity.api.rackspacecloud.com/v2.0/tokens
user: user2
apikey: apikey2
tenant: 654321
compute_region: DFW
driver: openstack
compute_name: cloudServersOpenStack
region_name: ORD
cloud: mycloud
profiles:
ubuntu-nova:
provider: my-nova
size: performance1-8
image: bb02b1a3-bc77-4d17-ab5b-421d89850fca
script_args: git develop
ubuntu-openstack:
provider: my-openstack
size: performance1-8
image: bb02b1a3-bc77-4d17-ab5b-421d89850fca
size: ds512M
image: CentOS 7
script_args: git develop
@ -363,76 +344,7 @@ be set in the configuration file to enable interfacing with GoGrid:
OpenStack
---------
OpenStack configuration differs between providers, and at the moment several
options need to be specified. This module has been officially tested against
the HP and the Rackspace implementations, and some examples are provided for
both.
.. code-block:: yaml
# For HP
my-openstack-hp-config:
identity_url:
'https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/'
compute_name: Compute
compute_region: 'az-1.region-a.geo-1'
tenant: myuser-tenant1
user: myuser
ssh_key_name: mykey
ssh_key_file: '/etc/salt/hpcloud/mykey.pem'
password: mypass
driver: openstack
# For Rackspace
my-openstack-rackspace-config:
identity_url: 'https://identity.api.rackspacecloud.com/v2.0/tokens'
compute_name: cloudServersOpenStack
protocol: ipv4
compute_region: DFW
user: myuser
tenant: 5555555
password: mypass
driver: openstack
If you have an API key for your provider, it may be specified instead of a
password:
.. code-block:: yaml
my-openstack-hp-config:
apikey: 901d3f579h23c8v73q9
my-openstack-rackspace-config:
apikey: 901d3f579h23c8v73q9
.. note::
In the cloud profile that uses this provider configuration, the syntax for the
``provider`` required field would be either ``provider: my-openstack-hp-config``
or ``provider: my-openstack-rackspace-config``.
You will certainly need to configure the ``user``, ``tenant``, and either
``password`` or ``apikey``.
If your OpenStack instances only have private IP addresses and a CIDR range of
private addresses are not reachable from the salt-master, you may set your
preference to have Salt ignore it:
.. code-block:: yaml
my-openstack-config:
ignore_cidr: 192.168.0.0/16
For in-house OpenStack Essex installation, libcloud needs the service_type :
.. code-block:: yaml
my-openstack-config:
identity_url: 'http://control.openstack.example.org:5000/v2.0/'
compute_name : Compute Service
service_type : compute
.. automodule:: salt.cloud.clouds.openstack
DigitalOcean
------------

View File

@ -1,185 +0,0 @@
==============================
Getting Started With OpenStack
==============================
OpenStack is one the most popular cloud projects. It's an open source project
to build public and/or private clouds. You can use Salt Cloud to launch
OpenStack instances.
Dependencies
============
* Libcloud >= 0.13.2
Configuration
=============
* Using the new format, set up the cloud configuration at
``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/openstack.conf``:
.. code-block:: yaml
my-openstack-config:
# Set the location of the salt-master
#
minion:
master: saltmaster.example.com
# Configure the OpenStack driver
#
identity_url: http://identity.youopenstack.com/v2.0/tokens
compute_name: nova
protocol: ipv4
compute_region: RegionOne
# Configure Openstack authentication credentials
#
user: myname
password: 123456
# tenant is the project name
tenant: myproject
driver: openstack
# skip SSL certificate validation (default false)
insecure: false
.. note::
.. versionchanged:: 2015.8.0
The ``provider`` parameter in cloud provider definitions was renamed to ``driver``. This
change was made to avoid confusion with the ``provider`` parameter that is used in cloud profile
definitions. Cloud provider definitions now use ``driver`` to refer to the Salt cloud module that
provides the underlying functionality to connect to a cloud host, while cloud profiles continue
to use ``provider`` to refer to provider configurations that you define.
Using nova client to get information from OpenStack
===================================================
One of the best ways to get information about OpenStack is using the novaclient
python package (available in pypi as python-novaclient). The client
configuration is a set of environment variables that you can get from the
Dashboard. Log in and then go to Project -> Access & security -> API Access and
download the "OpenStack RC file". Then:
.. code-block:: yaml
source /path/to/your/rcfile
nova credentials
nova endpoints
In the ``nova endpoints`` output you can see the information about
``compute_region`` and ``compute_name``.
Compute Region
==============
It depends on the OpenStack cluster that you are using. Please, have a look at
the previous sections.
Authentication
==============
The ``user`` and ``password`` is the same user as is used to log into the
OpenStack Dashboard.
Profiles
========
Here is an example of a profile:
.. code-block:: yaml
openstack_512:
provider: my-openstack-config
size: m1.tiny
image: cirros-0.3.1-x86_64-uec
ssh_key_file: /tmp/test.pem
ssh_key_name: test
ssh_interface: private_ips
The following list explains some of the important properties.
size
can be one of the options listed in the output of ``nova flavor-list``.
image
can be one of the options listed in the output of ``nova image-list``.
ssh_key_file
The SSH private key that the salt-cloud uses to SSH into the VM after its
first booted in order to execute a command or script. This private key's
*public key* must be the openstack public key inserted into the
authorized_key's file of the VM's root user account.
ssh_key_name
The name of the openstack SSH public key that is inserted into the
authorized_keys file of the VM's root user account. Prior to using this
public key, you must use openstack commands or the horizon web UI to load
that key into the tenant's account. Note that this openstack tenant must be
the one you defined in the cloud provider.
ssh_interface
This option allows you to create a VM without a public IP. If this option
is omitted and the VM does not have a public IP, then the salt-cloud waits
for a certain period of time and then destroys the VM. With the nova drive,
private cloud networks can be defined here.
For more information concerning cloud profiles, see :ref:`here
<salt-cloud-profiles>`.
change_password
~~~~~~~~~~~~~~~
If no ssh_key_file is provided, and the server already exists, change_password
will use the api to change the root password of the server so that it can be
bootstrapped.
.. code-block:: yaml
change_password: True
userdata_file
~~~~~~~~~~~~~
Use `userdata_file` to specify the userdata file to upload for use with
cloud-init if available.
.. code-block:: yaml
my-openstack-config:
# Pass userdata to the instance to be created
userdata_file: /etc/salt/cloud-init/packages.yml
.. note::
As of the 2016.11.4 release, this file can be templated. To use templating,
simply specify a ``userdata_template`` option in the cloud profile:
.. code-block:: yaml
my-openstack-config:
# Pass userdata to the instance to be created
userdata_file: /etc/salt/cloud-init/packages.yml
userdata_template: jinja
If no ``userdata_template`` is set in the cloud profile, then the master
configuration will be checked for a :conf_master:`userdata_template` value.
If this is not set, then no templating will be performed on the
userdata_file.
To disable templating in a cloud profile when a
:conf_master:`userdata_template` has been set in the master configuration
file, simply set ``userdata_template`` to ``False`` in the cloud profile:
.. code-block:: yaml
my-openstack-config:
# Pass userdata to the instance to be created
userdata_file: /etc/salt/cloud-init/packages.yml
userdata_template: False

View File

@ -80,12 +80,21 @@ same way as in the above example, only without a top-level ``grains:`` key:
.. note::
The content of ``/etc/salt/grains`` is ignored if you specify grains in the minion config.
Grains in ``/etc/salt/grains`` are ignored if you specify the same grains in the minion config.
.. note::
Grains are static, and since they are not often changed, they will need a grains refresh when they are updated. You can do this by calling: ``salt minion saltutil.refresh_modules``
.. note::
You can equally configure static grains for Proxy Minions.
As multiple Proxy Minion processes can run on the same machine, you need
to index the files using the Minion ID, under ``/etc/salt/proxy.d/<minion ID>/grains``.
For example, the grains for the Proxy Minion ``router1`` can be defined
under ``/etc/salt/proxy.d/router1/grains``, while the grains for the
Proxy Minion ``switch7`` can be put in ``/etc/salt/proxy.d/switch7/grains``.
Matching Grains in the Top File
===============================
@ -278,3 +287,9 @@ Syncing grains can be done a number of ways, they are automatically synced when
above) the grains can be manually synced and reloaded by calling the
:mod:`saltutil.sync_grains <salt.modules.saltutil.sync_grains>` or
:mod:`saltutil.sync_all <salt.modules.saltutil.sync_all>` functions.
.. note::
When the :conf_minion:`grains_cache` is set to False, the grains dictionary is built
and stored in memory on the minion. Every time the minion restarts or
``saltutil.refresh_grains`` is run, the grain dictionary is rebuilt from scratch.

View File

@ -869,26 +869,33 @@ Example:
.. note::
This option may have adverse effects when using the default renderer, ``yaml_jinja``.
This is due to the fact that YAML requires proper handling in regard to special
characters. Please see the section on :ref:`YAML ASCII support <yaml_plain_ascii>`
in the :ref:`YAML Idiosyncracies <yaml-idiosyncrasies>` documentation for more
information.
This option may have adverse effects when using the default renderer,
``yaml_jinja``. This is due to the fact that YAML requires proper handling
in regard to special characters. Please see the section on :ref:`YAML ASCII
support <yaml_plain_ascii>` in the :ref:`YAML Idiosyncracies
<yaml-idiosyncrasies>` documentation for more information.
.. jinja_ref:: json_decode_list
.. jinja_ref:: json_encode_list
``json_decode_list``
``json_encode_list``
--------------------
.. versionadded:: 2017.7.0
.. versionadded:: Oxygen
Renamed from ``json_decode_list`` to ``json_encode_list``. When you encode
something you get bytes, and when you decode, you get your locale's
encoding (usually a ``unicode`` type). This filter was incorrectly-named
when it was added. ``json_decode_list`` will be supported until the Neon
release.
JSON decodes as unicode, Jinja needs bytes.
Recursively encodes all string elements of the list to bytes.
Example:
.. code-block:: jinja
{{ [1, 2, 3] | json_decode_list }}
{{ [1, 2, 3] | json_encode_list }}
Returns:
@ -898,25 +905,35 @@ Returns:
.. jinja_ref:: json_decode_dict
.. jinja_ref:: json_encode_dict
``json_decode_dict``
``json_encode_dict``
--------------------
.. versionadded:: 2017.7.0
.. versionadded:: Oxygen
Renamed from ``json_decode_dict`` to ``json_encode_dict``. When you encode
something you get bytes, and when you decode, you get your locale's
encoding (usually a ``unicode`` type). This filter was incorrectly-named
when it was added. ``json_decode_dict`` will be supported until the Neon
release.
JSON decodes as unicode, Jinja needs bytes.
Recursively encodes all string items in the dictionary to bytes.
Example:
Assuming that ``pillar['foo']`` contains ``{u'a': u'\u0414'}``, and your locale
is ``en_US.UTF-8``:
.. code-block:: jinja
{{ {'a': 'b'} | json_decode_dict }}
{{ pillar['foo'] | json_encode_dict }}
Returns:
.. code-block:: python
{'a': 'b'}
{'a': '\xd0\x94'}
.. jinja_ref:: random_hash
@ -927,7 +944,8 @@ Returns:
.. versionadded:: 2017.7.0
.. versionadded:: Oxygen
Renamed from ``rand_str`` to ``random_hash`` to more accurately describe
what the filter does.
what the filter does. ``rand_str`` will be supported until the Neon
release.
Generates a random number between 1 and the number passed to the filter, and
then hashes it. The default hash type is the one specified by the minion's
@ -1612,6 +1630,54 @@ Returns:
Test supports additional optional arguments: ``ignorecase``, ``multiline``
Escape filters
--------------
.. jinja_ref:: regex_escape
``regex_escape``
----------------
.. versionadded:: 2017.7.0
Allows escaping of strings so they can be interpreted literally by another function.
Example:
.. code-block:: jinja
regex_escape = {{ 'https://example.com?foo=bar%20baz' | regex_escape }}
will be rendered as:
.. code-block:: text
regex_escape = https\:\/\/example\.com\?foo\=bar\%20baz
Set Theory Filters
------------------
.. jinja_ref:: unique
``unique``
----------
.. versionadded:: 2017.7.0
Performs set math using Jinja filters.
Example:
.. code-block:: jinja
unique = {{ ['foo', 'foo', 'bar'] | unique }}
will be rendered as:
.. code-block:: text
unique = ['foo', 'bar']
Jinja in Files
==============

View File

@ -211,6 +211,28 @@ localtime.
This will schedule the command: ``state.sls httpd test=True`` at 5:00 PM on
Monday, Wednesday and Friday, and 3:00 PM on Tuesday and Thursday.
.. code-block:: yaml
schedule:
job1:
function: state.sls
args:
- httpd
kwargs:
test: True
when:
- 'tea time'
.. code-block:: yaml
whens:
tea time: 1:40pm
deployment time: Friday 5:00pm
The Salt scheduler also allows custom phrases to be used for the `when`
parameter. These `whens` can be stored as either pillar values or
grain values.
.. code-block:: yaml
schedule:

View File

@ -128,6 +128,17 @@ by any master tops matches that are not matched via a top file.
To make master tops matches execute first, followed by top file matches, set
the new :conf_minion:`master_tops_first` minion config option to ``True``.
Several Jinja Filters Renamed
-----------------------------
The following Jinja filters (originally added in 2017.7.0) have been renamed
due to the fact that they were inaccurately named when initially added. The
original names will be supported until the Neon release of Salt.
- :jinja_ref:`rand_str` renamed to :jinja_ref:`random_hash`
- :jinja_ref:`jinja_decode_dict` renamed to :jinja_ref:`jinja_encode_dict`
- :jinja_ref:`jinja_decode_list` renamed to :jinja_ref:`jinja_encode_list`
Return Codes for Runner/Wheel Functions
---------------------------------------
@ -222,6 +233,63 @@ The new grains added are:
* ``iscsi_iqn``: Show the iSCSI IQN name for a host
* ``swap_total``: Show the configured swap_total for Linux, *BSD, OS X and Solaris/SunOS
Salt Minion Autodiscovery
------------------------
Salt Minion now no longer need to be configured against a specifig DNS name or IP address of a Master.
For this feature Salt Master now requires port 4520 for UDP broadcast packets to be opened
and the Salt Minion be able to send UDP packets to the same port.
Connection to a type instead of DNS
===================================
By now each Minion was connecting to a Master by DNS or IP address. From now on it is possible
also to connect to a _type_ of a Master. For example, in a network there are three different
Masters, each corresponds for a particular niche or environment or specific role etc. The Minion
is supposed to connect only to one of those Masters that is described approriately.
To achieve such an effect, each `/etc/salt/master` configuration should have a `discovery` option,
which should have a `mapping` element with arbitrary key/value pairs. The same configuration shoul
be on the Minion, so then when mapping matches, Minion recognises Master as its connection target.
Example for Master configuration (`/etc/salt/master`):
.. code-block:: yaml
discovery:
mapping:
description: SES 5.0
node: 1
The example above describes a system that is running a particular product, where `description` is
an arbitrary key and `SES 5.0` is just a string. In order to match exactly this Master, the
following configuration at Minion should be present:
.. code-block:: yaml
discovery:
match: all # Can be "all" or "any"
mapping:
description: SES 5.0
node: 1
Notice `match` criteria is set to `all`. This would mean that from all found Masters select only
that, which `description` is set to `SES 5.0` _and_ `node` is set to `1`. All other Masters will
be ignored.
Limitations
===========
This feature has a couple of _temporary_ limitations that are subject to change in the future:
- Only one Master on the network is supported. Currently the Minion cannot select which Master
out of few the same to choose. This will change to choosing the Master that is least loaded.
- Minions will accept _any_ master that matches connection criteria without any particular
security applied (priv/pub key check, signature, fingerprint etc). That implies that administrator
is expected to know his network and make sure it is clean.
Grains Changes
--------------
@ -244,6 +312,12 @@ New support for Cisco UCS Chassis
The salt proxy minion now allows for control of Cisco USC chassis. See
the ``cimc`` modules for details.
New support for Cassandra v3
----------------------------
The ``cassandra_cql`` module now supports Cassandra v3 which has changed
its internal schema to define keyspaces and columns.
New salt-ssh roster
-------------------

View File

@ -67,6 +67,13 @@ This would explicitly set the roots to the default:
base:
- /srv/thorium
Example ``thorium_roots`` configuration:
.. code-block:: yaml
thorium_roots:
base:
- /etc/salt/thorium
The Thorium top.sls File

View File

@ -0,0 +1,44 @@
.. _tutorial-autoaccept-grains:
==============================
Autoaccept minions from Grains
==============================
.. versionadded:: Oxygen
To automatically accept minions based on certain characteristics, e.g. the ``uuid``
you can specify certain grain values on the salt master. Minions with matching grains
will have their keys automatically accepted.
1. Configure the autosign_grains_dir in the master config file:
.. code-block:: yaml
autosign_grains_dir: /etc/salt/autosign_grains
2. Configure the grain values to be accepted
Place a file named like the grain in the autosign_grains_dir and write the values that
should be accepted automatically inside that file. For example to automatically
accept minions based on their ``uuid`` create a file named ``/etc/salt/autosign_grains/uuid``:
.. code-block:: none
8f7d68e2-30c5-40c6-b84a-df7e978a03ee
1d3c5473-1fbc-479e-b0c7-877705a0730f
The master is now setup to accept minions with either of the two specified uuids.
Multiple values must always be written into separate lines.
Lines starting with a ``#`` are ignored.
3. Configure the minion to send the specific grains to the master in the minion config file:
.. code-block:: yaml
autosign_grains:
- uuid
Now you should be able to start salt-minion and run ``salt-call
state.apply`` or any other salt commands that require master authentication.

View File

@ -35,3 +35,4 @@ Tutorials Index
* :ref:`Multi-cloud orchestration with Apache Libcloud <tutorial-libcloud>`
* :ref:`Running Salt States and Commands in Docker Containers <docker-sls>`
* :ref:`Preseed Minion with Accepted Key <tutorial-preseed-key>`
* :ref:`Autoaccept Minions from Grains <tutorial-autoaccept-grains>`

View File

@ -31,11 +31,20 @@
# ./build.sh v2015.8.3 2 /tmp/custom_pkg
#
############################################################################
echo -n -e "\033]0;Build: Variables\007"
############################################################################
# Make sure the script is launched with sudo
############################################################################
if [[ $(id -u) -ne 0 ]]
then
exec sudo /bin/bash -c "$(printf '%q ' "$BASH_SOURCE" "$@")"
fi
############################################################################
# Check passed parameters, set defaults
############################################################################
echo -n -e "\033]0;Build: Variables\007"
if [ "$1" == "" ]; then
VERSION=`git describe`
else
@ -80,24 +89,24 @@ fi
# Create the Build Environment
############################################################################
echo -n -e "\033]0;Build: Build Environment\007"
sudo $PKGRESOURCES/build_env.sh $PYVER
$PKGRESOURCES/build_env.sh $PYVER
############################################################################
# Install Salt
############################################################################
echo -n -e "\033]0;Build: Install Salt\007"
sudo rm -rf $SRCDIR/build
sudo rm -rf $SRCDIR/dist
sudo $PYTHON $SRCDIR/setup.py build -e "$PYTHON -E -s"
sudo $PYTHON $SRCDIR/setup.py install
rm -rf $SRCDIR/build
rm -rf $SRCDIR/dist
$PYTHON $SRCDIR/setup.py build -e "$PYTHON -E -s"
$PYTHON $SRCDIR/setup.py install
############################################################################
# Build Package
############################################################################
echo -n -e "\033]0;Build: Package Salt\007"
sudo $PKGRESOURCES/build_pkg.sh $VERSION $PYVER $PKGDIR
$PKGRESOURCES/build_pkg.sh $VERSION $PYVER $PKGDIR
############################################################################
# Sign Package
############################################################################
sudo $PKGRESOURCES/build_sig.sh salt-$VERSION-py$PYVER-$CPUARCH.pkg salt-$VERSION-py$PYVER-$CPUARCH-signed.pkg
$PKGRESOURCES/build_sig.sh salt-$VERSION-py$PYVER-$CPUARCH.pkg salt-$VERSION-py$PYVER-$CPUARCH-signed.pkg

View File

@ -24,6 +24,14 @@
#
############################################################################
############################################################################
# Make sure the script is launched with sudo
############################################################################
if [[ $(id -u) -ne 0 ]]
then
exec sudo /bin/bash -c "$(printf '%q ' "$BASH_SOURCE" "$@")"
fi
############################################################################
# Set to Exit on all Errors
############################################################################
@ -54,17 +62,18 @@ ulimit -n 1200
SRCDIR=`git rev-parse --show-toplevel`
SCRIPTDIR=`pwd`
SHADIR=$SCRIPTDIR/shasums
PKG_CONFIG_PATH=/opt/salt/lib/pkgconfig
CFLAGS="-I/opt/salt/include"
LDFLAGS="-L/opt/salt/lib"
INSTALL_DIR=/opt/salt
PKG_CONFIG_PATH=$INSTALL_DIR/lib/pkgconfig
CFLAGS="-I$INSTALL_DIR/include"
LDFLAGS="-L$INSTALL_DIR/lib"
if [ "$PYVER" == "2" ]; then
PYDIR=/opt/salt/lib/python2.7
PYTHON=/opt/salt/bin/python
PIP=/opt/salt/bin/pip
PYDIR=$INSTALL_DIR/lib/python2.7
PYTHON=$INSTALL_DIR/bin/python
PIP=$INSTALL_DIR/bin/pip
else
PYDIR=/opt/salt/lib/python3.5
PYTHON=/opt/salt/bin/python3
PIP=/opt/salt/bin/pip3
PYDIR=$INSTALL_DIR/lib/python3.5
PYTHON=$INSTALL_DIR/bin/python3
PIP=$INSTALL_DIR/bin/pip3
fi
############################################################################
@ -74,10 +83,10 @@ fi
# Fink, Brew)
# Check for Xcode Command Line Tools first
if [ -d '/Library/Developer/CommandLineTools/usr/bin' ]; then
PATH=/Library/Developer/CommandLineTools/usr/bin:/opt/salt/bin:$PATH
PATH=/Library/Developer/CommandLineTools/usr/bin:$INSTALL_DIR/bin:$PATH
MAKE=/Library/Developer/CommandLineTools/usr/bin/make
elif [ -d '/Applications/Xcode.app/Contents/Developer/usr/bin' ]; then
PATH=/Applications/Xcode.app/Contents/Developer/usr/bin:/opt/salt/bin:$PATH
PATH=/Applications/Xcode.app/Contents/Developer/usr/bin:$INSTALL_DIR/bin:$PATH
MAKE=/Applications/Xcode.app/Contents/Developer/usr/bin/make
else
echo "No installation of XCode found. This script requires XCode."
@ -125,12 +134,15 @@ download(){
############################################################################
# Ensure Paths are present and clean
############################################################################
echo "################################################################################"
echo "Ensure Paths are present and clean"
echo "################################################################################"
echo -n -e "\033]0;Build_Env: Clean\007"
# Make sure /opt/salt is clean
sudo rm -rf /opt/salt
sudo mkdir -p /opt/salt
sudo chown $USER:staff /opt/salt
# Make sure $INSTALL_DIR is clean
rm -rf $INSTALL_DIR
mkdir -p $INSTALL_DIR
chown $USER:staff $INSTALL_DIR
# Make sure build staging is clean
rm -rf build
@ -140,7 +152,7 @@ BUILDDIR=$SCRIPTDIR/build
############################################################################
# Download and install pkg-config
############################################################################
echo -n -e "\033]0;Build_Env: pkg-config\007"
echo -n -e "\033]0;Build_Env: pkg-config: download\007"
PKGURL="http://pkgconfig.freedesktop.org/releases/pkg-config-0.29.2.tar.gz"
PKGDIR="pkg-config-0.29.2"
@ -151,18 +163,22 @@ echo "##########################################################################
echo "Building pkg-config"
echo "################################################################################"
cd $PKGDIR
env LDFLAGS="-framework CoreFoundation -framework Carbon" ./configure --prefix=/opt/salt --with-internal-glib
echo -n -e "\033]0;Build_Env: pkg-config: configure\007"
env LDFLAGS="-framework CoreFoundation -framework Carbon" ./configure --prefix=$INSTALL_DIR --with-internal-glib
echo -n -e "\033]0;Build_Env: pkg-config: make\007"
$MAKE
echo -n -e "\033]0;Build_Env: pkg-config: make check\007"
$MAKE check
sudo -H $MAKE install
echo -n -e "\033]0;Build_Env: pkg-config: make install\007"
$MAKE install
############################################################################
# Download and install libsodium
############################################################################
echo -n -e "\033]0;Build_Env: libsodium\007"
echo -n -e "\033]0;Build_Env: libsodium: download\007"
PKGURL="https://download.libsodium.org/libsodium/releases/libsodium-1.0.13.tar.gz"
PKGDIR="libsodium-1.0.13"
PKGURL="https://download.libsodium.org/libsodium/releases/libsodium-1.0.15.tar.gz"
PKGDIR="libsodium-1.0.15"
download $PKGURL
@ -170,15 +186,19 @@ echo "##########################################################################
echo "Building libsodium"
echo "################################################################################"
cd $PKGDIR
./configure --prefix=/opt/salt
echo -n -e "\033]0;Build_Env: libsodium: configure\007"
./configure --prefix=$INSTALL_DIR
echo -n -e "\033]0;Build_Env: libsodium: make\007"
$MAKE
echo -n -e "\033]0;Build_Env: libsodium: make check\007"
$MAKE check
sudo -H $MAKE install
echo -n -e "\033]0;Build_Env: libsodium: make install\007"
$MAKE install
############################################################################
# Download and install zeromq
############################################################################
echo -n -e "\033]0;Build_Env: zeromq\007"
echo -n -e "\033]0;Build_Env: zeromq: download\007"
PKGURL="http://download.zeromq.org/zeromq-4.1.4.tar.gz"
PKGDIR="zeromq-4.1.4"
@ -189,18 +209,22 @@ echo "##########################################################################
echo "Building zeromq"
echo "################################################################################"
cd $PKGDIR
./configure --prefix=/opt/salt
echo -n -e "\033]0;Build_Env: zeromq: configure\007"
./configure --prefix=$INSTALL_DIR
echo -n -e "\033]0;Build_Env: zeromq: make\007"
$MAKE
echo -n -e "\033]0;Build_Env: zeromq: make check\007"
$MAKE check
sudo -H $MAKE install
echo -n -e "\033]0;Build_Env: zeromq: make install\007"
$MAKE install
############################################################################
# Download and install OpenSSL
############################################################################
echo -n -e "\033]0;Build_Env: OpenSSL\007"
echo -n -e "\033]0;Build_Env: OpenSSL: download\007"
PKGURL="http://openssl.org/source/openssl-1.0.2l.tar.gz"
PKGDIR="openssl-1.0.2l"
PKGURL="http://openssl.org/source/openssl-1.0.2n.tar.gz"
PKGDIR="openssl-1.0.2n"
download $PKGURL
@ -208,19 +232,23 @@ echo "##########################################################################
echo "Building OpenSSL"
echo "################################################################################"
cd $PKGDIR
./Configure darwin64-x86_64-cc --prefix=/opt/salt --openssldir=/opt/salt/openssl
echo -n -e "\033]0;Build_Env: OpenSSL: configure\007"
./Configure darwin64-x86_64-cc --prefix=$INSTALL_DIR --openssldir=$INSTALL_DIR/openssl
echo -n -e "\033]0;Build_Env: OpenSSL: make\007"
$MAKE
echo -n -e "\033]0;Build_Env: OpenSSL: make test\007"
$MAKE test
sudo -H $MAKE install
echo -n -e "\033]0;Build_Env: OpenSSL: make install\007"
$MAKE install
############################################################################
# Download and install Python
############################################################################
echo -n -e "\033]0;Build_Env: Python\007"
echo -n -e "\033]0;Build_Env: Python: download\007"
if [ "$PYVER" == "2" ]; then
PKGURL="https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tar.xz"
PKGDIR="Python-2.7.13"
PKGURL="https://www.python.org/ftp/python/2.7.14/Python-2.7.14.tar.xz"
PKGDIR="Python-2.7.14"
else
PKGURL="https://www.python.org/ftp/python/3.5.3/Python-3.5.3.tar.xz"
PKGDIR="Python-3.5.3"
@ -233,15 +261,17 @@ echo "Building Python"
echo "################################################################################"
echo "Note there are some test failures"
cd $PKGDIR
./configure --prefix=/opt/salt --enable-shared --enable-toolbox-glue --with-ensurepip=install
echo -n -e "\033]0;Build_Env: Python: configure\007"
./configure --prefix=$INSTALL_DIR --enable-shared --enable-toolbox-glue --with-ensurepip=install
echo -n -e "\033]0;Build_Env: Python: make\007"
$MAKE
# $MAKE test
sudo -H $MAKE install
echo -n -e "\033]0;Build_Env: Python: make install\007"
$MAKE install
############################################################################
# upgrade pip
############################################################################
sudo -H $PIP install --upgrade pip
$PIP install --upgrade pip
############################################################################
# Download and install salt python dependencies
@ -253,23 +283,21 @@ cd $BUILDDIR
echo "################################################################################"
echo "Installing Salt Dependencies with pip (normal)"
echo "################################################################################"
sudo -H $PIP install \
-r $SRCDIR/pkg/osx/req.txt \
--no-cache-dir
$PIP install -r $SRCDIR/pkg/osx/req.txt \
--no-cache-dir
echo "################################################################################"
echo "Installing Salt Dependencies with pip (build_ext)"
echo "################################################################################"
sudo -H $PIP install \
-r $SRCDIR/pkg/osx/req_ext.txt \
--global-option=build_ext \
--global-option="-I/opt/salt/include" \
--no-cache-dir
$PIP install -r $SRCDIR/pkg/osx/req_ext.txt \
--global-option=build_ext \
--global-option="-I$INSTALL_DIR/include" \
--no-cache-dir
echo "--------------------------------------------------------------------------------"
echo "Create Symlink to certifi for openssl"
echo "--------------------------------------------------------------------------------"
sudo ln -s $PYDIR/site-packages/certifi/cacert.pem /opt/salt/openssl/cert.pem
ln -s $PYDIR/site-packages/certifi/cacert.pem $INSTALL_DIR/openssl/cert.pem
echo -n -e "\033]0;Build_Env: Finished\007"

View File

@ -28,6 +28,14 @@
#
############################################################################
############################################################################
# Make sure the script is launched with sudo
############################################################################
if [[ $(id -u) -ne 0 ]]
then
exec sudo /bin/bash -c "$(printf '%q ' "$BASH_SOURCE" "$@")"
fi
############################################################################
# Set to Exit on all Errors
############################################################################
@ -96,8 +104,8 @@ mkdir -p $PKGDIR
############################################################################
echo -n -e "\033]0;Build_Pkg: Copy Start Scripts\007"
sudo cp $PKGRESOURCES/scripts/start-*.sh /opt/salt/bin/
sudo cp $PKGRESOURCES/scripts/salt-config.sh /opt/salt/bin
cp $PKGRESOURCES/scripts/start-*.sh /opt/salt/bin/
cp $PKGRESOURCES/scripts/salt-config.sh /opt/salt/bin
############################################################################
# Copy Service Definitions from Salt Repo to the Package Directory
@ -118,20 +126,20 @@ cp $PKGRESOURCES/scripts/com.saltstack.salt.api.plist $PKGDIR/Library/LaunchDaem
############################################################################
echo -n -e "\033]0;Build_Pkg: Trim unneeded files\007"
sudo rm -rdf $PKGDIR/opt/salt/bin/pkg-config
sudo rm -rdf $PKGDIR/opt/salt/lib/pkgconfig
sudo rm -rdf $PKGDIR/opt/salt/lib/engines
sudo rm -rdf $PKGDIR/opt/salt/share/aclocal
sudo rm -rdf $PKGDIR/opt/salt/share/doc
sudo rm -rdf $PKGDIR/opt/salt/share/man/man1/pkg-config.1
rm -rdf $PKGDIR/opt/salt/bin/pkg-config
rm -rdf $PKGDIR/opt/salt/lib/pkgconfig
rm -rdf $PKGDIR/opt/salt/lib/engines
rm -rdf $PKGDIR/opt/salt/share/aclocal
rm -rdf $PKGDIR/opt/salt/share/doc
rm -rdf $PKGDIR/opt/salt/share/man/man1/pkg-config.1
if [ "$PYVER" == "2" ]; then
sudo rm -rdf $PKGDIR/opt/salt/lib/python2.7/test
rm -rdf $PKGDIR/opt/salt/lib/python2.7/test
else
sudo rm -rdf $PKGDIR/opt/salt/lib/python3.5/test
rm -rdf $PKGDIR/opt/salt/lib/python3.5/test
fi
echo -n -e "\033]0;Build_Pkg: Remove compiled python files\007"
sudo find $PKGDIR/opt/salt -name '*.pyc' -type f -delete
find $PKGDIR/opt/salt -name '*.pyc' -type f -delete
############################################################################
# Copy Config Files from Salt Repo to the Package Directory

View File

@ -1,31 +1,31 @@
apache-libcloud==2.1.0
apache-libcloud==2.2.1
backports.ssl_match_hostname==3.5.0.1
backports_abc==0.5
certifi
cffi==1.10.0
CherryPy==11.0.0
cffi==1.11.2
CherryPy==13.0.0
click==6.7
enum34==1.1.6
gitdb==0.6.4
GitPython==2.1.1
idna==2.5
GitPython==2.1.7
idna==2.6
ipaddress==1.0.18
Jinja2==2.9.6
Jinja2==2.10
linode-python==1.1.1
Mako==1.0.7
MarkupSafe==1.0
msgpack-python==0.4.8
pyasn1==0.2.3
pyasn1==0.4.2
pycparser==2.18
pycrypto==2.6.1
python-dateutil==2.6.1
python-gnupg==0.4.1
PyYAML==3.12
pyzmq==16.0.2
requests==2.18.1
pyzmq==17.0.0b3
requests==2.18.4
singledispatch==3.4.0.3
six==1.10.0
six==1.11.0
smmap==0.9.0
timelib==0.2.4
tornado==4.5.1
tornado==4.5.2
vultr==1.0rc1

View File

@ -1,2 +1,2 @@
cryptography==2.0
pyOpenSSL==17.1.0
cryptography==2.1.4
pyOpenSSL==17.5.0

View File

@ -1 +0,0 @@
f37c9a28ce129d01e63c84d7db627a06402854578f62d17927334ea21ede318e04bbf66e890e3f47c85333e6b19f6e5581fb3f3e27efd24be27017d1b6529c4b ./Python-2.7.13.tar.xz

View File

@ -0,0 +1 @@
78310b0be6388ffa15f29a80afb9ab3c03a572cb094e9da00cfe391afadb51696e41f592eb658d6a31a2f422fdac8a55214a382cbb8cfb43d4a127d5b35ea7f9 ./Python-2.7.14.tar.xz

View File

@ -1 +0,0 @@
c619b12fdf0b2e59174b6e383a62d5499ebcd720fdbb2c1a41a98a46c285df075202423454b294fefee185432441e943805397d7656f7cd7837de425da623929 ./libsodium-1.0.13.tar.gz

View File

@ -0,0 +1 @@
299a208f8342793d13498e95b23f1749f5b5b13ec276db3ec401130615e837ef475b6a1283b6e87a5f8227d23e70e38ca721073dadd5dc88fe4aff342aa64adc ./libsodium-1.0.15.tar.gz

View File

@ -1 +0,0 @@
047d964508ad6025c79caabd8965efd2416dc026a56183d0ef4de7a0a6769ce8e0b4608a3f8393d326f6d03b26a2b067e6e0c750f35b20be190e595e8290c0e3 ./openssl-1.0.2l.tar.gz

View File

@ -0,0 +1 @@
144bf0d6aa27b4af01df0b7b734c39962649e1711554247d42e05e14d8945742b18745aefdba162e2dfc762b941fd7d3b2d5dc6a781ae4ba10a6f5a3cadb0687 ./openssl-1.0.2n.tar.gz

View File

@ -8,6 +8,7 @@
}
/var/log/salt/minion {
su root root
weekly
missingok
rotate 7

View File

@ -218,24 +218,40 @@ Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Installing pypi resources using pip . . ."
Write-Output " ----------------------------------------------------------------"
if ( ! [bool]$Env:SALT_REQ_LOCAL_CACHE) {
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts2Dir'])\pip.exe" "--no-cache-dir install -r $($script_path)\req_2.txt" "pip install"
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts2Dir'])\pip.exe" "--no-cache-dir install -r $($script_path)\req.txt" "pip install"
} else {
if ( (Get-ChildItem $Env:SALT_REQ_LOCAL_CACHE | Measure-Object).Count -eq 0 ) {
# folder empty
Write-Output " pip download from req_2.txt into empty local cache SALT_REQ $Env:SALT_REQ_LOCAL_CACHE"
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip download --dest $Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req_2.txt" "pip download"
Write-Output " pip download from req.txt into empty local cache SALT_REQ $Env:SALT_REQ_LOCAL_CACHE"
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip download --dest $Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip download"
}
Write-Output " reading from local pip cache $Env:SALT_REQ_LOCAL_CACHE"
Write-Output " If a (new) ressource is missing, please delete all files in this cache, go online and repeat"
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req_2.txt" "pip install"
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip install"
}
#==============================================================================
# Move PyWin32 DLL's to site-packages\win32
# Install PyWin32 from wheel file
#==============================================================================
Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Installing PyWin32 . . ."
Write-Output " ----------------------------------------------------------------"
# Download
$file = "$($ini[$bitPrograms]['PyWin322'])"
$url = "$($ini['Settings']['SaltRepo'])/$bitFolder/$file"
$file = "$($ini['Settings']['DownloadDir'])\$bitFolder\$file"
DownloadFileWithProgress $url $file
# Install
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts2Dir'])\pip.exe" "install $file " "pip install PyWin32"
# Move DLL's to Python Root
Write-Output " - $script_name :: Moving PyWin32 DLLs . . ."
Move-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs2Dir'])\win32" -Force
# Create gen_py directory
New-Item -Path "$($ini['Settings']['SitePkgs2Dir'])\win32com\gen_py" -ItemType Directory -Force
# Remove pywin32_system32 directory
Write-Output " - $script_name :: Removing pywin32_system32 Directory . . ."
Remove-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32"
@ -248,37 +264,6 @@ Remove-Item "$($ini['Settings']['SitePkgs2Dir'])\pythonwin" -Force -Recurse
Write-Output " - $script_name :: Removing PyWin32 scripts . . ."
Remove-Item "$($ini['Settings']['Scripts2Dir'])\pywin32_*" -Force -Recurse
#==============================================================================
# Install PyYAML with CLoader
# This has to be a compiled binary to get the CLoader
#==============================================================================
Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Installing PyYAML . . ."
Write-Output " ----------------------------------------------------------------"
# Download
$file = "$($ini[$bitPrograms]['PyYAML2'])"
$url = "$($ini['Settings']['SaltRepo'])/$bitFolder/$file"
$file = "$($ini['Settings']['DownloadDir'])\$bitFolder\$file"
DownloadFileWithProgress $url $file
# Install
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts2Dir'])\easy_install.exe" "-Z $file " "easy_install PyYAML"
#==============================================================================
# Install PyCrypto from wheel file
#==============================================================================
Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Installing PyCrypto . . ."
Write-Output " ----------------------------------------------------------------"
# Download
$file = "$($ini[$bitPrograms]['PyCrypto2'])"
$url = "$($ini['Settings']['SaltRepo'])/$bitFolder/$file"
$file = "$($ini['Settings']['DownloadDir'])\$bitFolder\$file"
DownloadFileWithProgress $url $file
# Install
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts2Dir'])\pip.exe" "install --no-index --find-links=$($ini['Settings']['DownloadDir']) $file " "pip install PyCrypto"
#==============================================================================
# Copy DLLs to Python Directory
#==============================================================================

View File

@ -218,16 +218,16 @@ Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Installing pypi resources using pip . . ."
Write-Output " ----------------------------------------------------------------"
if ( ! [bool]$Env:SALT_REQ_LOCAL_CACHE) {
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts3Dir'])\pip.exe" "--no-cache-dir install -r $($script_path)\req_3.txt" "pip install"
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts3Dir'])\pip.exe" "--no-cache-dir install -r $($script_path)\req.txt" "pip install"
} else {
if ( (Get-ChildItem $Env:SALT_REQ_LOCAL_CACHE | Measure-Object).Count -eq 0 ) {
# folder empty
Write-Output " pip download from req_3.txt into empty local cache SALT_REQ $Env:SALT_REQ_LOCAL_CACHE"
Start_Process_and_test_exitcode "$($ini['Settings']['Python3Dir'])\python.exe" "-m pip download --dest $Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req_3.txt" "pip download"
Write-Output " pip download from req.txt into empty local cache SALT_REQ $Env:SALT_REQ_LOCAL_CACHE"
Start_Process_and_test_exitcode "$($ini['Settings']['Python3Dir'])\python.exe" "-m pip download --dest $Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip download"
}
Write-Output " reading from local pip cache $Env:SALT_REQ_LOCAL_CACHE"
Write-Output " If a (new) ressource is missing, please delete all files in this cache, go online and repeat"
Start_Process_and_test_exitcode "$($ini['Settings']['Python3Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req_3.txt" "pip install"
Start_Process_and_test_exitcode "$($ini['Settings']['Python3Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req.txt" "pip install"
}
#==============================================================================
@ -243,12 +243,15 @@ $file = "$($ini['Settings']['DownloadDir'])\$bitFolder\$file"
DownloadFileWithProgress $url $file
# Install
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts3Dir'])\pip.exe" "install --no-index --find-links=$($ini['Settings']['DownloadDir']) $file " "pip install PyWin32"
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts3Dir'])\pip.exe" "install $file " "pip install PyWin32"
# Move DLL's to Python Root
Write-Output " - $script_name :: Moving PyWin32 DLLs . . ."
Move-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs3Dir'])\win32" -Force
# Create gen_py directory
New-Item -Path "$($ini['Settings']['SitePkgs3Dir'])\win32com\gen_py" -ItemType Directory -Force
# Remove pywin32_system32 directory
Write-Output " - $script_name :: Removing pywin32_system32 Directory . . ."
Remove-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32"

View File

@ -33,6 +33,10 @@ goto CheckPython3
MsiExec.exe /X {4A656C6C-D24A-473F-9747-3A8D00907A03} /QN
echo %0 :: - 2.7.13 (64 bit)
MsiExec.exe /X {4A656C6C-D24A-473F-9747-3A8D00907A04} /QN
echo %0 :: - 2.7.14 (32 bit)
MsiExec.exe /X {0398A685-FD8D-46B3-9816-C47319B0CF5E} /QN
echo %0 :: - 2.7.14 (64 bit)
MsiExec.exe /X {0398A685-FD8D-46B3-9816-C47319B0CF5F} /QN
echo.

View File

@ -362,7 +362,7 @@ Section -Prerequisites
# /qb! used by 2008 installer
# It just ignores the unrecognized switches...
ClearErrors
ExecWait '"$INSTDIR\vcredist.exe" /qb! /passive /norestart' $0
ExecWait '"$INSTDIR\vcredist.exe" /qb! /quiet /norestart' $0
IfErrors 0 CheckVcRedistErrorCode
MessageBox MB_OK \
"$VcRedistName failed to install. Try installing the package manually." \

View File

@ -60,20 +60,20 @@ Function Get-Settings {
# Filenames for 64 bit Windows
$64bitPrograms = @{
"PyCrypto2" = "pycrypto-2.6.1-cp27-none-win_amd64.whl"
"Python2" = "python-2.7.13.amd64.msi"
"PyYAML2" = "PyYAML-3.11.win-amd64-py2.7.exe"
"Python2" = "python-2.7.14.amd64.msi"
"PyWin322" = "pywin32-221-cp27-cp27m-win_amd64.whl"
"Python3" = "python-3.5.3-amd64.exe"
"PyWin323" = "pywin32-220.1-cp35-cp35m-win_amd64.whl"
"PyWin323" = "pywin32-221-cp35-cp35m-win_amd64.whl"
}
$ini.Add("64bitPrograms", $64bitPrograms)
# Filenames for 32 bit Windows
$32bitPrograms = @{
"PyCrypto2" = "pycrypto-2.6.1-cp27-none-win32.whl"
"Python2" = "python-2.7.13.msi"
"PyYAML2" = "PyYAML-3.11.win32-py2.7.exe"
"Python2" = "python-2.7.14.msi"
"PyWin322" = "pywin32-221-cp27-cp27m-win32.whl"
"Python3" = "python-3.5.3.exe"
"PyWin323" = "pywin32-220.1-cp35-cp35m-win32.whl"
"PyWin323" = "pywin32-221-cp35-cp35m-win32.whl"
}
$ini.Add("32bitPrograms", $32bitPrograms)

36
pkg/windows/req.txt Normal file
View File

@ -0,0 +1,36 @@
backports-abc==0.5
backports.ssl-match-hostname==3.5.0.1
certifi
cffi==1.11.2
cryptography==2.1.4
enum34==1.1.6
futures==3.1.1
gitdb==0.6.4
GitPython==2.1.7
idna==2.6
ioloop==0.1a0
ipaddress==1.0.18
Jinja2==2.10
lxml==4.1.1
Mako==1.0.7
MarkupSafe==1.0
msgpack-python==0.4.8
psutil==5.4.1
pyasn1==0.4.2
pycparser==2.18
pycrypto==2.6.1
pycurl==7.43.0
PyMySQL==0.7.11
pyOpenSSL==17.5.0
python-dateutil==2.6.1
python-gnupg==0.4.1
pyyaml==3.12
pyzmq==17.0.0b3
requests==2.18.4
singledispatch==3.4.0.3
six==1.11.0
smmap==0.9.0
timelib==0.2.4
tornado==4.5.2
wheel==0.30.0
WMI==1.4.9

View File

@ -1,4 +0,0 @@
-r req_base.txt
lxml==3.6.0
pypiwin32==219

View File

@ -1,5 +0,0 @@
-r req_base.txt
lxml==3.7.3
pycrypto==2.6.1
PyYAML==3.12

View File

@ -1,34 +0,0 @@
backports-abc==0.5
backports.ssl-match-hostname==3.5.0.1
certifi
cffi==1.10.0
CherryPy==10.2.1
cryptography==1.8.1
enum34==1.1.6
futures==3.1.1
gitdb==0.6.4
GitPython==2.1.3
idna==2.5
ioloop==0.1a0
ipaddress==1.0.18
Jinja2==2.9.6
Mako==1.0.6
MarkupSafe==1.0
msgpack-python==0.4.8
psutil==5.2.2
pyasn1==0.2.3
pycparser==2.17
pycurl==7.43.0
PyMySQL==0.7.11
pyOpenSSL==17.0.0
python-dateutil==2.6.0
python-gnupg==0.4.0
pyzmq==16.0.2
requests==2.13.0
singledispatch==3.4.0.3
six==1.10.0
smmap==0.9.0
timelib==0.2.4
tornado==4.5.1
wheel==0.30.0a0
WMI==1.4.9

View File

@ -1,2 +1,2 @@
pip==9.0.1
setuptools==35.0.2
setuptools==38.2.4

View File

@ -2,7 +2,7 @@ mock
boto
boto3
moto
SaltPyLint>=v2017.3.6
SaltPyLint>=v2017.6.22
apache-libcloud
virtualenv

View File

@ -4,10 +4,9 @@ Salt package
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
import warnings
# future lint: disable=non-unicode-string
# All salt related deprecation warnings should be shown once each!
warnings.filterwarnings(
'once', # Show once
@ -15,19 +14,18 @@ warnings.filterwarnings(
DeprecationWarning, # This filter is for DeprecationWarnings
r'^(salt|salt\.(.*))$' # Match module(s) 'salt' and 'salt.<whatever>'
)
# future lint: enable=non-unicode-string
# While we are supporting Python2.6, hide nested with-statements warnings
warnings.filterwarnings(
u'ignore',
u'With-statements now directly support multiple context managers',
'ignore',
'With-statements now directly support multiple context managers',
DeprecationWarning
)
# Filter the backports package UserWarning about being re-imported
warnings.filterwarnings(
u'ignore',
u'^Module backports was already imported from (.*), but (.*) is being added to sys.path$',
'ignore',
'^Module backports was already imported from (.*), but (.*) is being added to sys.path$',
UserWarning
)
@ -39,7 +37,7 @@ def __define_global_system_encoding_variable__():
# and reset to None
encoding = None
if not sys.platform.startswith(u'win') and sys.stdin is not None:
if not sys.platform.startswith('win') and sys.stdin is not None:
# On linux we can rely on sys.stdin for the encoding since it
# most commonly matches the filesystem encoding. This however
# does not apply to windows
@ -65,16 +63,16 @@ def __define_global_system_encoding_variable__():
# the way back to ascii
encoding = sys.getdefaultencoding()
if not encoding:
if sys.platform.startswith(u'darwin'):
if sys.platform.startswith('darwin'):
# Mac OS X uses UTF-8
encoding = u'utf-8'
elif sys.platform.startswith(u'win'):
encoding = 'utf-8'
elif sys.platform.startswith('win'):
# Windows uses a configurable encoding; on Windows, Python uses the name “mbcs”
# to refer to whatever the currently configured encoding is.
encoding = u'mbcs'
encoding = 'mbcs'
else:
# On linux default to ascii as a last resort
encoding = u'ascii'
encoding = 'ascii'
# We can't use six.moves.builtins because these builtins get deleted sooner
# than expected. See:
@ -85,7 +83,7 @@ def __define_global_system_encoding_variable__():
import builtins # pylint: disable=import-error
# Define the detected encoding as a built-in variable for ease of use
setattr(builtins, u'__salt_system_encoding__', encoding)
setattr(builtins, '__salt_system_encoding__', encoding)
# This is now garbage collectable
del sys

View File

@ -46,7 +46,7 @@ else:
if HAS_XML:
if not hasattr(ElementTree, u'ParseError'):
if not hasattr(ElementTree, 'ParseError'):
class ParseError(Exception):
'''
older versions of ElementTree do not have ParseError
@ -56,7 +56,7 @@ if HAS_XML:
ElementTree.ParseError = ParseError
def text_(s, encoding=u'latin-1', errors=u'strict'):
def text_(s, encoding='latin-1', errors='strict'):
'''
If ``s`` is an instance of ``binary_type``, return
``s.decode(encoding, errors)``, otherwise return ``s``
@ -66,7 +66,7 @@ def text_(s, encoding=u'latin-1', errors=u'strict'):
return s
def bytes_(s, encoding=u'latin-1', errors=u'strict'):
def bytes_(s, encoding='latin-1', errors='strict'):
'''
If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``s``
@ -79,25 +79,25 @@ def bytes_(s, encoding=u'latin-1', errors=u'strict'):
if PY3:
def ascii_native_(s):
if isinstance(s, text_type):
s = s.encode(u'ascii')
return str(s, u'ascii', u'strict')
s = s.encode('ascii')
return str(s, 'ascii', 'strict')
else:
def ascii_native_(s):
if isinstance(s, text_type):
s = s.encode(u'ascii')
s = s.encode('ascii')
return str(s)
ascii_native_.__doc__ = '''
Python 3: If ``s`` is an instance of ``text_type``, return
``s.encode(u'ascii')``, otherwise return ``str(s, 'ascii', 'strict')``
``s.encode('ascii')``, otherwise return ``str(s, 'ascii', 'strict')``
Python 2: If ``s`` is an instance of ``text_type``, return
``s.encode(u'ascii')``, otherwise return ``str(s)``
''' # future lint: disable=non-unicode-string
``s.encode('ascii')``, otherwise return ``str(s)``
'''
if PY3:
def native_(s, encoding=u'latin-1', errors=u'strict'):
def native_(s, encoding='latin-1', errors='strict'):
'''
If ``s`` is an instance of ``text_type``, return
``s``, otherwise return ``str(s, encoding, errors)``
@ -106,7 +106,7 @@ if PY3:
return s
return str(s, encoding, errors)
else:
def native_(s, encoding=u'latin-1', errors=u'strict'):
def native_(s, encoding='latin-1', errors='strict'):
'''
If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``str(s)``
@ -121,7 +121,7 @@ return ``str(s, encoding, errors)``
Python 2: If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``str(s)``
''' # future lint: disable=non-unicode-string
'''
def string_io(data=None): # cStringIO can't handle unicode

View File

@ -330,7 +330,6 @@ class Beacon(object):
evt.fire_event({'complete': complete, 'comment': comment,
'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacon_modify_complete')
return True
def delete_beacon(self, name):

View File

@ -0,0 +1,63 @@
# -*- coding: utf-8 -*-
'''
Beacon to fire event when we notice a AIX user is locked due to many failed login attempts.
.. versionadded:: Oxygen
:depends: none
'''
# Import Python libs
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
__virtualname__ = 'aix_account'
def __virtual__():
'''
Only load if kernel is AIX
'''
if __grains__['kernel'] == ('AIX'):
return __virtualname__
return (False, 'The aix_account beacon module failed to load: '
'only available on AIX systems.')
def __validate__(config):
'''
Validate the beacon configuration
'''
# Configuration for aix_account beacon should be a dictionary
if not isinstance(config, dict):
return False, ('Configuration for aix_account beacon must be a dict.')
if 'user' not in config:
return False, ('Configuration for aix_account beacon must '
'include a user or ALL for all users.')
return True, 'Valid beacon configuration'
def beacon(config):
'''
Checks for locked accounts due to too many invalid login attempts, 3 or higher.
.. code-block:: yaml
beacons:
aix_account:
user: ALL
interval: 120
'''
ret = []
user = config['user']
locked_accounts = __salt__['shadow.login_failures'](user)
ret.append({'accounts': locked_accounts})
return ret

View File

@ -145,8 +145,10 @@ class BaseCaller(object):
print_ret = ret.get('return', {})
salt.output.display_output(
{'local': print_ret},
out,
self.opts)
out=out,
opts=self.opts,
_retcode=ret.get('retcode', 0))
# _retcode will be available in the kwargs of the outputter function
if self.opts.get('retcode_passthrough', False):
sys.exit(ret['retcode'])
except SaltInvocationError as err:
@ -372,8 +374,10 @@ class RAETCaller(BaseCaller):
self.process.terminate()
salt.output.display_output(
{'local': print_ret},
ret.get('out', 'nested'),
self.opts)
out=ret.get('out', 'nested'),
opts=self.opts,
_retcode=ret.get('retcode', 0))
# _retcode will be available in the kwargs of the outputter function
if self.opts.get('retcode_passthrough', False):
sys.exit(ret['retcode'])

View File

@ -181,7 +181,7 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
for full_ret in self.local_client.cmd_cli(**kwargs):
ret_, out, retcode = self._format_ret(full_ret)
ret.update(ret_)
self._output_ret(ret, out)
self._output_ret(ret, out, retcode=retcode)
else:
if self.options.verbose:
kwargs['verbose'] = True
@ -190,7 +190,7 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
try:
ret_, out, retcode = self._format_ret(full_ret)
retcodes.append(retcode)
self._output_ret(ret_, out)
self._output_ret(ret_, out, retcode=retcode)
ret.update(full_ret)
except KeyError:
errors.append(full_ret)
@ -212,7 +212,7 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
except (SaltInvocationError, EauthAuthenticationError, SaltClientError) as exc:
ret = str(exc)
self._output_ret(ret, '')
self._output_ret(ret, '', retcode=1)
def _preview_target(self):
'''
@ -352,7 +352,7 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
'Requested job was still run but output cannot be displayed.\n')
salt.output.update_progress(self.config, progress, self.progress_bar, out)
def _output_ret(self, ret, out):
def _output_ret(self, ret, out, retcode=0):
'''
Print the output from a single return to the terminal
'''
@ -362,7 +362,10 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
self._print_docs(ret)
else:
# Determine the proper output method and run it
salt.output.display_output(ret, out, self.config)
salt.output.display_output(ret,
out=out,
opts=self.config,
_retcode=retcode)
if not ret:
sys.stderr.write('ERROR: No return received\n')
sys.exit(2)

File diff suppressed because it is too large Load Diff

View File

@ -16,7 +16,7 @@ client applications.
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
import os
# Import Salt libs
@ -38,7 +38,7 @@ def tokenify(cmd, token=None):
Otherwise return cmd
'''
if token is not None:
cmd[u'token'] = token
cmd['token'] = token
return cmd
@ -51,19 +51,19 @@ class APIClient(object):
if not opts:
opts = salt.config.client_config(
os.environ.get(
u'SALT_MASTER_CONFIG',
os.path.join(syspaths.CONFIG_DIR, u'master')
'SALT_MASTER_CONFIG',
os.path.join(syspaths.CONFIG_DIR, 'master')
)
)
self.opts = opts
self.localClient = salt.client.get_local_client(self.opts[u'conf_file'])
self.localClient = salt.client.get_local_client(self.opts['conf_file'])
self.runnerClient = salt.runner.RunnerClient(self.opts)
self.wheelClient = salt.wheel.Wheel(self.opts)
self.resolver = salt.auth.Resolver(self.opts)
self.event = salt.utils.event.get_event(
u'master',
self.opts[u'sock_dir'],
self.opts[u'transport'],
'master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=listen)
@ -119,20 +119,20 @@ class APIClient(object):
'''
cmd = dict(cmd) # make copy
client = u'minion' # default to local minion client
mode = cmd.get(u'mode', u'async') # default to 'async'
client = 'minion' # default to local minion client
mode = cmd.get('mode', 'async') # default to 'async'
# check for wheel or runner prefix to fun name to use wheel or runner client
funparts = cmd.get(u'fun', u'').split(u'.')
if len(funparts) > 2 and funparts[0] in [u'wheel', u'runner']: # master
funparts = cmd.get('fun', '').split('.')
if len(funparts) > 2 and funparts[0] in ['wheel', 'runner']: # master
client = funparts[0]
cmd[u'fun'] = u'.'.join(funparts[1:]) # strip prefix
cmd['fun'] = '.'.join(funparts[1:]) # strip prefix
if not (u'token' in cmd or
(u'eauth' in cmd and u'password' in cmd and u'username' in cmd)):
raise EauthAuthenticationError(u'No authentication credentials given')
if not ('token' in cmd or
('eauth' in cmd and 'password' in cmd and 'username' in cmd)):
raise EauthAuthenticationError('No authentication credentials given')
executor = getattr(self, u'{0}_{1}'.format(client, mode))
executor = getattr(self, '{0}_{1}'.format(client, mode))
result = executor(**cmd)
return result
@ -205,9 +205,9 @@ class APIClient(object):
Adds client per the command.
'''
cmd[u'client'] = u'minion'
if len(cmd[u'module'].split(u'.')) > 2 and cmd[u'module'].split(u'.')[0] in [u'runner', u'wheel']:
cmd[u'client'] = u'master'
cmd['client'] = 'minion'
if len(cmd['module'].split('.')) > 2 and cmd['module'].split('.')[0] in ['runner', 'wheel']:
cmd['client'] = 'master'
return self._signature(cmd)
def _signature(self, cmd):
@ -217,20 +217,20 @@ class APIClient(object):
'''
result = {}
client = cmd.get(u'client', u'minion')
if client == u'minion':
cmd[u'fun'] = u'sys.argspec'
cmd[u'kwarg'] = dict(module=cmd[u'module'])
client = cmd.get('client', 'minion')
if client == 'minion':
cmd['fun'] = 'sys.argspec'
cmd['kwarg'] = dict(module=cmd['module'])
result = self.run(cmd)
elif client == u'master':
parts = cmd[u'module'].split(u'.')
elif client == 'master':
parts = cmd['module'].split('.')
client = parts[0]
module = u'.'.join(parts[1:]) # strip prefix
if client == u'wheel':
module = '.'.join(parts[1:]) # strip prefix
if client == 'wheel':
functions = self.wheelClient.functions
elif client == u'runner':
elif client == 'runner':
functions = self.runnerClient.functions
result = {u'master': salt.utils.args.argspec_report(functions, module)}
result = {'master': salt.utils.args.argspec_report(functions, module)}
return result
def create_token(self, creds):
@ -275,20 +275,20 @@ class APIClient(object):
tokenage = self.resolver.mk_token(creds)
except Exception as ex:
raise EauthAuthenticationError(
u"Authentication failed with {0}.".format(repr(ex)))
"Authentication failed with {0}.".format(repr(ex)))
if u'token' not in tokenage:
raise EauthAuthenticationError(u"Authentication failed with provided credentials.")
if 'token' not in tokenage:
raise EauthAuthenticationError("Authentication failed with provided credentials.")
# Grab eauth config for the current backend for the current user
tokenage_eauth = self.opts[u'external_auth'][tokenage[u'eauth']]
if tokenage[u'name'] in tokenage_eauth:
tokenage[u'perms'] = tokenage_eauth[tokenage[u'name']]
tokenage_eauth = self.opts['external_auth'][tokenage['eauth']]
if tokenage['name'] in tokenage_eauth:
tokenage['perms'] = tokenage_eauth[tokenage['name']]
else:
tokenage[u'perms'] = tokenage_eauth[u'*']
tokenage['perms'] = tokenage_eauth['*']
tokenage[u'user'] = tokenage[u'name']
tokenage[u'username'] = tokenage[u'name']
tokenage['user'] = tokenage['name']
tokenage['username'] = tokenage['name']
return tokenage
@ -301,11 +301,11 @@ class APIClient(object):
result = self.resolver.get_token(token)
except Exception as ex:
raise EauthAuthenticationError(
u"Token validation failed with {0}.".format(repr(ex)))
"Token validation failed with {0}.".format(repr(ex)))
return result
def get_event(self, wait=0.25, tag=u'', full=False):
def get_event(self, wait=0.25, tag='', full=False):
'''
Get a single salt event.
If no events are available, then block for up to ``wait`` seconds.
@ -323,4 +323,4 @@ class APIClient(object):
Need to convert this to a master call with appropriate authentication
'''
return self.event.fire_event(data, salt.utils.event.tagify(tag, u'wui'))
return self.event.fire_event(data, salt.utils.event.tagify(tag, 'wui'))

View File

@ -4,7 +4,7 @@ A collection of mixins useful for the various *Client interfaces
'''
# Import Python libs
from __future__ import absolute_import, print_function, with_statement
from __future__ import absolute_import, print_function, with_statement, unicode_literals
import fnmatch
import signal
import logging
@ -38,18 +38,18 @@ import tornado.stack_context
log = logging.getLogger(__name__)
CLIENT_INTERNAL_KEYWORDS = frozenset([
u'client',
u'cmd',
u'eauth',
u'fun',
u'kwarg',
u'match',
u'token',
u'__jid__',
u'__tag__',
u'__user__',
u'username',
u'password'
'client',
'cmd',
'eauth',
'fun',
'kwarg',
'match',
'token',
'__jid__',
'__tag__',
'__user__',
'username',
'password'
])
@ -81,9 +81,9 @@ class ClientFuncsDict(collections.MutableMapping):
raise KeyError
def wrapper(*args, **kwargs):
low = {u'fun': key,
u'args': args,
u'kwargs': kwargs,
low = {'fun': key,
'args': args,
'kwargs': kwargs,
}
pub_data = {}
# Copy kwargs keys so we can iterate over and pop the pub data
@ -91,18 +91,18 @@ class ClientFuncsDict(collections.MutableMapping):
# pull out pub_data if you have it
for kwargs_key in kwargs_keys:
if kwargs_key.startswith(u'__pub_'):
if kwargs_key.startswith('__pub_'):
pub_data[kwargs_key] = kwargs.pop(kwargs_key)
async_pub = self.client._gen_async_pub(pub_data.get(u'__pub_jid'))
async_pub = self.client._gen_async_pub(pub_data.get('__pub_jid'))
user = salt.utils.user.get_specific_user()
return self.client._proc_function(
key,
low,
user,
async_pub[u'tag'], # TODO: fix
async_pub[u'jid'], # TODO: fix
async_pub['tag'], # TODO: fix
async_pub['jid'], # TODO: fix
False, # Don't daemonize
)
return wrapper
@ -133,14 +133,14 @@ class SyncClientMixin(object):
Execute a function through the master network interface.
'''
load = kwargs
load[u'cmd'] = self.client
load['cmd'] = self.client
channel = salt.transport.Channel.factory(self.opts,
crypt=u'clear',
usage=u'master_call')
crypt='clear',
usage='master_call')
ret = channel.send(load)
if isinstance(ret, collections.Mapping):
if u'error' in ret:
salt.utils.error.raise_error(**ret[u'error'])
if 'error' in ret:
salt.utils.error.raise_error(**ret['error'])
return ret
def cmd_sync(self, low, timeout=None, full_return=False):
@ -159,19 +159,19 @@ class SyncClientMixin(object):
'eauth': 'pam',
})
'''
event = salt.utils.event.get_master_event(self.opts, self.opts[u'sock_dir'], listen=True)
event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=True)
job = self.master_call(**low)
ret_tag = salt.utils.event.tagify(u'ret', base=job[u'tag'])
ret_tag = salt.utils.event.tagify('ret', base=job['tag'])
if timeout is None:
timeout = self.opts.get(u'rest_timeout', 300)
timeout = self.opts.get('rest_timeout', 300)
ret = event.get_event(tag=ret_tag, full=True, wait=timeout, auto_reconnect=True)
if ret is None:
raise salt.exceptions.SaltClientTimeout(
u"RunnerClient job '{0}' timed out".format(job[u'jid']),
jid=job[u'jid'])
"RunnerClient job '{0}' timed out".format(job['jid']),
jid=job['jid'])
return ret if full_return else ret[u'data'][u'return']
return ret if full_return else ret['data']['return']
def cmd(self, fun, arg=None, pub_data=None, kwarg=None, print_event=True, full_return=False):
'''
@ -206,57 +206,57 @@ class SyncClientMixin(object):
arg = tuple()
if not isinstance(arg, list) and not isinstance(arg, tuple):
raise salt.exceptions.SaltInvocationError(
u'arg must be formatted as a list/tuple'
'arg must be formatted as a list/tuple'
)
if pub_data is None:
pub_data = {}
if not isinstance(pub_data, dict):
raise salt.exceptions.SaltInvocationError(
u'pub_data must be formatted as a dictionary'
'pub_data must be formatted as a dictionary'
)
if kwarg is None:
kwarg = {}
if not isinstance(kwarg, dict):
raise salt.exceptions.SaltInvocationError(
u'kwarg must be formatted as a dictionary'
'kwarg must be formatted as a dictionary'
)
arglist = salt.utils.args.parse_input(
arg,
no_parse=self.opts.get(u'no_parse', []))
no_parse=self.opts.get('no_parse', []))
# if you were passed kwarg, add it to arglist
if kwarg:
kwarg[u'__kwarg__'] = True
kwarg['__kwarg__'] = True
arglist.append(kwarg)
args, kwargs = salt.minion.load_args_and_kwargs(
self.functions[fun], arglist, pub_data
)
low = {u'fun': fun,
u'arg': args,
u'kwarg': kwargs}
low = {'fun': fun,
'arg': args,
'kwarg': kwargs}
return self.low(fun, low, print_event=print_event, full_return=full_return)
@property
def mminion(self):
if not hasattr(self, u'_mminion'):
if not hasattr(self, '_mminion'):
self._mminion = salt.minion.MasterMinion(self.opts, states=False, rend=False)
return self._mminion
def low(self, fun, low, print_event=True, full_return=False):
'''
Check for deprecated usage and allow until Salt Oxygen.
Check for deprecated usage and allow until Salt Fluorine.
'''
msg = []
if u'args' in low:
msg.append(u'call with arg instead')
low[u'arg'] = low.pop(u'args')
if u'kwargs' in low:
msg.append(u'call with kwarg instead')
low[u'kwarg'] = low.pop(u'kwargs')
if 'args' in low:
msg.append('call with arg instead')
low['arg'] = low.pop('args')
if 'kwargs' in low:
msg.append('call with kwarg instead')
low['kwarg'] = low.pop('kwargs')
if msg:
salt.utils.versions.warn_until(u'Oxygen', u' '.join(msg))
salt.utils.versions.warn_until('Fluorine', ' '.join(msg))
return self._low(fun, low, print_event=print_event, full_return=full_return)
@ -270,13 +270,13 @@ class SyncClientMixin(object):
class_name = self.__class__.__name__.lower()
except AttributeError:
log.warning(
u'Unable to determine class name',
'Unable to determine class name',
exc_info_on_loglevel=logging.DEBUG
)
return True
try:
return self.opts[u'{0}_returns'.format(class_name)]
return self.opts['{0}_returns'.format(class_name)]
except KeyError:
# No such option, assume this isn't one we care about gating and
# just return True.
@ -299,24 +299,24 @@ class SyncClientMixin(object):
# this is not to clutter the output with the module loading
# if we have a high debug level.
self.mminion # pylint: disable=W0104
jid = low.get(u'__jid__', salt.utils.jid.gen_jid(self.opts))
tag = low.get(u'__tag__', salt.utils.event.tagify(jid, prefix=self.tag_prefix))
jid = low.get('__jid__', salt.utils.jid.gen_jid(self.opts))
tag = low.get('__tag__', salt.utils.event.tagify(jid, prefix=self.tag_prefix))
data = {u'fun': u'{0}.{1}'.format(self.client, fun),
u'jid': jid,
u'user': low.get(u'__user__', u'UNKNOWN'),
data = {'fun': '{0}.{1}'.format(self.client, fun),
'jid': jid,
'user': low.get('__user__', 'UNKNOWN'),
}
event = salt.utils.event.get_event(
u'master',
self.opts[u'sock_dir'],
self.opts[u'transport'],
'master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
if print_event:
print_func = self.print_async_event \
if hasattr(self, u'print_async_event') \
if hasattr(self, 'print_async_event') \
else None
else:
# Suppress printing of return event (this keeps us from printing
@ -331,12 +331,12 @@ class SyncClientMixin(object):
# TODO: document these, and test that they exist
# TODO: Other things to inject??
func_globals = {u'__jid__': jid,
u'__user__': data[u'user'],
u'__tag__': tag,
func_globals = {'__jid__': jid,
'__user__': data['user'],
'__tag__': tag,
# weak ref to avoid the Exception in interpreter
# teardown of event
u'__jid_event__': weakref.proxy(namespaced_event),
'__jid_event__': weakref.proxy(namespaced_event),
}
try:
@ -348,9 +348,9 @@ class SyncClientMixin(object):
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if u'.' not in mod_name:
if '.' not in mod_name:
continue
mod, _ = mod_name.split(u'.', 1)
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
@ -366,81 +366,81 @@ class SyncClientMixin(object):
# we make the transition we will load "kwargs" using format_call if
# there are no kwargs in the low object passed in.
if u'arg' in low and u'kwarg' in low:
args = low[u'arg']
kwargs = low[u'kwarg']
if 'arg' in low and 'kwarg' in low:
args = low['arg']
kwargs = low['kwarg']
else:
f_call = salt.utils.args.format_call(
self.functions[fun],
low,
expected_extra_kws=CLIENT_INTERNAL_KEYWORDS
)
args = f_call.get(u'args', ())
kwargs = f_call.get(u'kwargs', {})
args = f_call.get('args', ())
kwargs = f_call.get('kwargs', {})
# Update the event data with loaded args and kwargs
data[u'fun_args'] = list(args) + ([kwargs] if kwargs else [])
func_globals[u'__jid_event__'].fire_event(data, u'new')
data['fun_args'] = list(args) + ([kwargs] if kwargs else [])
func_globals['__jid_event__'].fire_event(data, 'new')
# Initialize a context for executing the method.
with tornado.stack_context.StackContext(self.functions.context_dict.clone):
data[u'return'] = self.functions[fun](*args, **kwargs)
data['return'] = self.functions[fun](*args, **kwargs)
try:
data[u'success'] = self.context.get(u'retcode', 0) == 0
data['success'] = self.context.get('retcode', 0) == 0
except AttributeError:
# Assume a True result if no context attribute
data[u'success'] = True
if isinstance(data[u'return'], dict) and u'data' in data[u'return']:
data['success'] = True
if isinstance(data['return'], dict) and 'data' in data['return']:
# some functions can return boolean values
data[u'success'] = salt.utils.state.check_result(data[u'return'][u'data'])
data['success'] = salt.utils.state.check_result(data['return']['data'])
except (Exception, SystemExit) as ex:
if isinstance(ex, salt.exceptions.NotImplemented):
data[u'return'] = str(ex)
data['return'] = six.text_type(ex)
else:
data[u'return'] = u'Exception occurred in {0} {1}: {2}'.format(
data['return'] = 'Exception occurred in {0} {1}: {2}'.format(
self.client,
fun,
traceback.format_exc(),
)
data[u'success'] = False
data['success'] = False
if self.store_job:
try:
salt.utils.job.store_job(
self.opts,
{
u'id': self.opts[u'id'],
u'tgt': self.opts[u'id'],
u'jid': data[u'jid'],
u'return': data,
'id': self.opts['id'],
'tgt': self.opts['id'],
'jid': data['jid'],
'return': data,
},
event=None,
mminion=self.mminion,
)
except salt.exceptions.SaltCacheError:
log.error(u'Could not store job cache info. '
u'Job details for this run may be unavailable.')
log.error('Could not store job cache info. '
'Job details for this run may be unavailable.')
# Outputters _can_ mutate data so write to the job cache first!
namespaced_event.fire_event(data, u'ret')
namespaced_event.fire_event(data, 'ret')
# if we fired an event, make sure to delete the event object.
# This will ensure that we call destroy, which will do the 0MQ linger
log.info(u'Runner completed: %s', data[u'jid'])
log.info('Runner completed: %s', data['jid'])
del event
del namespaced_event
return data if full_return else data[u'return']
return data if full_return else data['return']
def get_docs(self, arg=None):
'''
Return a dictionary of functions and the inline documentation for each
'''
if arg:
if u'*' in arg:
if '*' in arg:
target_mod = arg
_use_fnmatch = True
else:
target_mod = arg + u'.' if not arg.endswith(u'.') else arg
target_mod = arg + '.' if not arg.endswith('.') else arg
_use_fnmatch = False
if _use_fnmatch:
docs = [(fun, self.functions[fun].__doc__)
@ -478,9 +478,9 @@ class AsyncClientMixin(object):
salt.log.setup.setup_multiprocessing_logging()
# pack a few things into low
low[u'__jid__'] = jid
low[u'__user__'] = user
low[u'__tag__'] = tag
low['__jid__'] = jid
low['__user__'] = user
low['__tag__'] = tag
return self.low(fun, low, full_return=False)
@ -508,9 +508,9 @@ class AsyncClientMixin(object):
if jid is None:
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix=self.tag_prefix)
return {u'tag': tag, u'jid': jid}
return {'tag': tag, 'jid': jid}
def async(self, fun, low, user=u'UNKNOWN', pub=None):
def async(self, fun, low, user='UNKNOWN', pub=None):
'''
Execute the function in a multiprocess and return the event tag to use
to watch for the return
@ -519,7 +519,7 @@ class AsyncClientMixin(object):
proc = salt.utils.process.SignalHandlingMultiprocessingProcess(
target=self._proc_function,
args=(fun, low, user, async_pub[u'tag'], async_pub[u'jid']))
args=(fun, low, user, async_pub['tag'], async_pub['jid']))
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
@ -535,29 +535,29 @@ class AsyncClientMixin(object):
return
# if we are "quiet", don't print
if self.opts.get(u'quiet', False):
if self.opts.get('quiet', False):
return
# some suffixes we don't want to print
if suffix in (u'new',):
if suffix in ('new',):
return
try:
outputter = self.opts.get(u'output', event.get(u'outputter', None) or event.get(u'return').get(u'outputter'))
outputter = self.opts.get('output', event.get('outputter', None) or event.get('return').get('outputter'))
except AttributeError:
outputter = None
# if this is a ret, we have our own set of rules
if suffix == u'ret':
if suffix == 'ret':
# Check if outputter was passed in the return data. If this is the case,
# then the return data will be a dict two keys: 'data' and 'outputter'
if isinstance(event.get(u'return'), dict) \
and set(event[u'return']) == set((u'data', u'outputter')):
event_data = event[u'return'][u'data']
outputter = event[u'return'][u'outputter']
if isinstance(event.get('return'), dict) \
and set(event['return']) == set(('data', 'outputter')):
event_data = event['return']['data']
outputter = event['return']['outputter']
else:
event_data = event[u'return']
event_data = event['return']
else:
event_data = {u'suffix': suffix, u'event': event}
event_data = {'suffix': suffix, 'event': event}
salt.output.display_output(event_data, outputter, self.opts)

View File

@ -3,7 +3,7 @@
The main entry point for salt-api
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
import signal
import logging
@ -20,7 +20,7 @@ class NetapiClient(object):
'''
def __init__(self, opts):
self.opts = opts
self.process_manager = salt.utils.process.ProcessManager(name=u'NetAPIProcessManager')
self.process_manager = salt.utils.process.ProcessManager(name='NetAPIProcessManager')
self.netapi = salt.loader.netapi(self.opts)
def run(self):
@ -28,11 +28,11 @@ class NetapiClient(object):
Load and start all available api modules
'''
if not len(self.netapi):
log.error(u"Did not find any netapi configurations, nothing to start")
log.error("Did not find any netapi configurations, nothing to start")
for fun in self.netapi:
if fun.endswith(u'.start'):
log.info(u'Starting %s netapi module', fun)
if fun.endswith('.start'):
log.info('Starting %s netapi module', fun)
self.process_manager.add_process(self.netapi[fun])
# Install the SIGINT/SIGTERM handlers if not done so far

View File

@ -2,7 +2,7 @@
'''
The client libs to communicate with the salt master when running raet
'''
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
# Import python libs
import os
@ -32,7 +32,7 @@ class LocalClient(salt.client.LocalClient):
The RAET LocalClient
'''
def __init__(self,
c_path=os.path.join(syspaths.CONFIG_DIR, u'master'),
c_path=os.path.join(syspaths.CONFIG_DIR, 'master'),
mopts=None):
salt.client.LocalClient.__init__(self, c_path, mopts)
@ -41,22 +41,22 @@ class LocalClient(salt.client.LocalClient):
tgt,
fun,
arg=(),
tgt_type=u'glob',
ret=u'',
jid=u'',
tgt_type='glob',
ret='',
jid='',
timeout=5,
**kwargs):
'''
Publish the command!
'''
if u'expr_form' in kwargs:
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
u'Fluorine',
u'The target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop(u'expr_form')
tgt_type = kwargs.pop('expr_form')
payload_kwargs = self._prep_pub(
tgt,
@ -68,21 +68,21 @@ class LocalClient(salt.client.LocalClient):
timeout=timeout,
**kwargs)
kind = self.opts[u'__role']
kind = self.opts['__role']
if kind not in kinds.APPL_KINDS:
emsg = (u"Invalid application kind = '{0}' for Raet LocalClient.".format(kind))
log.error(emsg + u"\n")
emsg = ("Invalid application kind = '{0}' for Raet LocalClient.".format(kind))
log.error(emsg + "\n")
raise ValueError(emsg)
if kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.master],
kinds.APPL_KIND_NAMES[kinds.applKinds.syndic]]:
lanename = u'master'
lanename = 'master'
else:
emsg = (u"Unsupported application kind '{0}' for Raet LocalClient.".format(kind))
log.error(emsg + u'\n')
emsg = ("Unsupported application kind '{0}' for Raet LocalClient.".format(kind))
log.error(emsg + '\n')
raise ValueError(emsg)
sockdirpath = self.opts[u'sock_dir']
name = u'client' + nacling.uuid(size=18)
sockdirpath = self.opts['sock_dir']
name = 'client' + nacling.uuid(size=18)
stack = LaneStack(
name=name,
lanename=lanename,
@ -91,12 +91,12 @@ class LocalClient(salt.client.LocalClient):
manor_yard = RemoteYard(
stack=stack,
lanename=lanename,
name=u'manor',
name='manor',
dirpath=sockdirpath)
stack.addRemote(manor_yard)
route = {u'dst': (None, manor_yard.name, u'local_cmd'),
u'src': (None, stack.local.name, None)}
msg = {u'route': route, u'load': payload_kwargs}
route = {'dst': (None, manor_yard.name, 'local_cmd'),
'src': (None, stack.local.name, None)}
msg = {'route': route, 'load': payload_kwargs}
stack.transmit(msg)
stack.serviceAll()
while True:
@ -104,9 +104,9 @@ class LocalClient(salt.client.LocalClient):
stack.serviceAll()
while stack.rxMsgs:
msg, sender = stack.rxMsgs.popleft()
ret = msg.get(u'return', {})
if u'ret' in ret:
ret = msg.get('return', {})
if 'ret' in ret:
stack.server.close()
return ret[u'ret']
return ret['ret']
stack.server.close()
return ret

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
import os
import copy
import logging
@ -23,7 +23,7 @@ class SSHClient(object):
.. versionadded:: 2015.5.0
'''
def __init__(self,
c_path=os.path.join(syspaths.CONFIG_DIR, u'master'),
c_path=os.path.join(syspaths.CONFIG_DIR, 'master'),
mopts=None,
disable_custom_roster=False):
if mopts:
@ -31,14 +31,14 @@ class SSHClient(object):
else:
if os.path.isdir(c_path):
log.warning(
u'%s expects a file path not a directory path(%s) to '
u'its \'c_path\' keyword argument',
'%s expects a file path not a directory path(%s) to '
'its \'c_path\' keyword argument',
self.__class__.__name__, c_path
)
self.opts = salt.config.client_config(c_path)
# Salt API should never offer a custom roster!
self.opts[u'__disable_custom_roster'] = disable_custom_roster
self.opts['__disable_custom_roster'] = disable_custom_roster
def _prep_ssh(
self,
@ -46,30 +46,30 @@ class SSHClient(object):
fun,
arg=(),
timeout=None,
tgt_type=u'glob',
tgt_type='glob',
kwarg=None,
**kwargs):
'''
Prepare the arguments
'''
if u'expr_form' in kwargs:
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
u'Fluorine',
u'The target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop(u'expr_form')
tgt_type = kwargs.pop('expr_form')
opts = copy.deepcopy(self.opts)
opts.update(kwargs)
if timeout:
opts[u'timeout'] = timeout
opts['timeout'] = timeout
arg = salt.utils.args.condition_input(arg, kwarg)
opts[u'argv'] = [fun] + arg
opts[u'selected_target_option'] = tgt_type
opts[u'tgt'] = tgt
opts[u'arg'] = arg
opts['argv'] = [fun] + arg
opts['selected_target_option'] = tgt_type
opts['tgt'] = tgt
opts['arg'] = arg
return salt.client.ssh.SSH(opts)
def cmd_iter(
@ -78,8 +78,8 @@ class SSHClient(object):
fun,
arg=(),
timeout=None,
tgt_type=u'glob',
ret=u'',
tgt_type='glob',
ret='',
kwarg=None,
**kwargs):
'''
@ -88,14 +88,14 @@ class SSHClient(object):
.. versionadded:: 2015.5.0
'''
if u'expr_form' in kwargs:
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
u'Fluorine',
u'The target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop(u'expr_form')
tgt_type = kwargs.pop('expr_form')
ssh = self._prep_ssh(
tgt,
@ -105,7 +105,7 @@ class SSHClient(object):
tgt_type,
kwarg,
**kwargs)
for ret in ssh.run_iter(jid=kwargs.get(u'jid', None)):
for ret in ssh.run_iter(jid=kwargs.get('jid', None)):
yield ret
def cmd(self,
@ -113,7 +113,7 @@ class SSHClient(object):
fun,
arg=(),
timeout=None,
tgt_type=u'glob',
tgt_type='glob',
kwarg=None,
**kwargs):
'''
@ -122,14 +122,14 @@ class SSHClient(object):
.. versionadded:: 2015.5.0
'''
if u'expr_form' in kwargs:
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
u'Fluorine',
u'The target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop(u'expr_form')
tgt_type = kwargs.pop('expr_form')
ssh = self._prep_ssh(
tgt,
@ -140,7 +140,7 @@ class SSHClient(object):
kwarg,
**kwargs)
final = {}
for ret in ssh.run_iter(jid=kwargs.get(u'jid', None)):
for ret in ssh.run_iter(jid=kwargs.get('jid', None)):
final.update(ret)
return final
@ -166,16 +166,16 @@ class SSHClient(object):
kwargs = copy.deepcopy(low)
for ignore in [u'tgt', u'fun', u'arg', u'timeout', u'tgt_type', u'kwarg']:
for ignore in ['tgt', 'fun', 'arg', 'timeout', 'tgt_type', 'kwarg']:
if ignore in kwargs:
del kwargs[ignore]
return self.cmd(low[u'tgt'],
low[u'fun'],
low.get(u'arg', []),
low.get(u'timeout'),
low.get(u'tgt_type'),
low.get(u'kwarg'),
return self.cmd(low['tgt'],
low['fun'],
low.get('arg', []),
low.get('timeout'),
low.get('tgt_type'),
low.get('kwarg'),
**kwargs)
def cmd_async(self, low, timeout=None):
@ -204,8 +204,8 @@ class SSHClient(object):
fun,
arg=(),
timeout=None,
tgt_type=u'glob',
ret=u'',
tgt_type='glob',
ret='',
kwarg=None,
sub=3,
**kwargs):
@ -226,24 +226,24 @@ class SSHClient(object):
.. versionadded:: 2017.7.0
'''
if u'expr_form' in kwargs:
if 'expr_form' in kwargs:
salt.utils.versions.warn_until(
u'Fluorine',
u'The target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
'Fluorine',
'The target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = kwargs.pop(u'expr_form')
tgt_type = kwargs.pop('expr_form')
minion_ret = self.cmd(tgt,
u'sys.list_functions',
'sys.list_functions',
tgt_type=tgt_type,
**kwargs)
minions = list(minion_ret)
random.shuffle(minions)
f_tgt = []
for minion in minions:
if fun in minion_ret[minion][u'return']:
if fun in minion_ret[minion]['return']:
f_tgt.append(minion)
if len(f_tgt) >= sub:
break
return self.cmd_iter(f_tgt, fun, arg, timeout, tgt_type=u'list', ret=ret, kwarg=kwarg, **kwargs)
return self.cmd_iter(f_tgt, fun, arg, timeout, tgt_type='list', ret=ret, kwarg=kwarg, **kwargs)

View File

@ -2,7 +2,7 @@
'''
Manage transport commands via ssh
'''
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
# Import python libs
import re
@ -18,14 +18,16 @@ import salt.defaults.exitcodes
import salt.utils.nb_popen
import salt.utils.vt
from salt.ext import six
log = logging.getLogger(__name__)
SSH_PASSWORD_PROMPT_RE = re.compile(r'(?:.*)[Pp]assword(?: for .*)?:', re.M) # future lint: disable=non-unicode-string
KEY_VALID_RE = re.compile(r'.*\(yes\/no\).*') # future lint: disable=non-unicode-string
SSH_PASSWORD_PROMPT_RE = re.compile(r'(?:.*)[Pp]assword(?: for .*)?:', re.M)
KEY_VALID_RE = re.compile(r'.*\(yes\/no\).*')
# Keep these in sync with ./__init__.py
RSTR = u'_edbc7885e4f9aac9b83b35999b68d015148caf467b78fa39c05f669c0ff89878'
RSTR_RE = re.compile(r'(?:^|\r?\n)' + RSTR + r'(?:\r?\n|$)') # future lint: disable=non-unicode-string
RSTR = '_edbc7885e4f9aac9b83b35999b68d015148caf467b78fa39c05f669c0ff89878'
RSTR_RE = re.compile(r'(?:^|\r?\n)' + RSTR + r'(?:\r?\n|$)')
class NoPasswdError(Exception):
@ -40,7 +42,7 @@ def gen_key(path):
'''
Generate a key for use with salt-ssh
'''
cmd = u'ssh-keygen -P "" -f {0} -t rsa -q'.format(path)
cmd = 'ssh-keygen -P "" -f {0} -t rsa -q'.format(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
subprocess.call(cmd, shell=True)
@ -50,12 +52,12 @@ def gen_shell(opts, **kwargs):
'''
Return the correct shell interface for the target system
'''
if kwargs[u'winrm']:
if kwargs['winrm']:
try:
import saltwinshell
shell = saltwinshell.Shell(opts, **kwargs)
except ImportError:
log.error(u'The saltwinshell library is not available')
log.error('The saltwinshell library is not available')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
else:
shell = Shell(opts, **kwargs)
@ -85,10 +87,10 @@ class Shell(object):
ssh_options=None):
self.opts = opts
# ssh <ipv6>, but scp [<ipv6]:/path
self.host = host.strip(u'[]')
self.host = host.strip('[]')
self.user = user
self.port = port
self.passwd = str(passwd) if passwd else passwd
self.passwd = six.text_type(passwd) if passwd else passwd
self.priv = priv
self.timeout = timeout
self.sudo = sudo
@ -96,18 +98,18 @@ class Shell(object):
self.mods = mods
self.identities_only = identities_only
self.remote_port_forwards = remote_port_forwards
self.ssh_options = u'' if ssh_options is None else ssh_options
self.ssh_options = '' if ssh_options is None else ssh_options
def get_error(self, errstr):
'''
Parse out an error and return a targeted error string
'''
for line in errstr.split(u'\n'):
if line.startswith(u'ssh:'):
for line in errstr.split('\n'):
if line.startswith('ssh:'):
return line
if line.startswith(u'Pseudo-terminal'):
if line.startswith('Pseudo-terminal'):
continue
if u'to the list of known hosts.' in line:
if 'to the list of known hosts.' in line:
continue
return line
return errstr
@ -117,36 +119,36 @@ class Shell(object):
Return options for the ssh command base for Salt to call
'''
options = [
u'KbdInteractiveAuthentication=no',
'KbdInteractiveAuthentication=no',
]
if self.passwd:
options.append(u'PasswordAuthentication=yes')
options.append('PasswordAuthentication=yes')
else:
options.append(u'PasswordAuthentication=no')
if self.opts.get(u'_ssh_version', (0,)) > (4, 9):
options.append(u'GSSAPIAuthentication=no')
options.append(u'ConnectTimeout={0}'.format(self.timeout))
if self.opts.get(u'ignore_host_keys'):
options.append(u'StrictHostKeyChecking=no')
if self.opts.get(u'no_host_keys'):
options.extend([u'StrictHostKeyChecking=no',
u'UserKnownHostsFile=/dev/null'])
known_hosts = self.opts.get(u'known_hosts_file')
options.append('PasswordAuthentication=no')
if self.opts.get('_ssh_version', (0,)) > (4, 9):
options.append('GSSAPIAuthentication=no')
options.append('ConnectTimeout={0}'.format(self.timeout))
if self.opts.get('ignore_host_keys'):
options.append('StrictHostKeyChecking=no')
if self.opts.get('no_host_keys'):
options.extend(['StrictHostKeyChecking=no',
'UserKnownHostsFile=/dev/null'])
known_hosts = self.opts.get('known_hosts_file')
if known_hosts and os.path.isfile(known_hosts):
options.append(u'UserKnownHostsFile={0}'.format(known_hosts))
options.append('UserKnownHostsFile={0}'.format(known_hosts))
if self.port:
options.append(u'Port={0}'.format(self.port))
options.append('Port={0}'.format(self.port))
if self.priv:
options.append(u'IdentityFile={0}'.format(self.priv))
options.append('IdentityFile={0}'.format(self.priv))
if self.user:
options.append(u'User={0}'.format(self.user))
options.append('User={0}'.format(self.user))
if self.identities_only:
options.append(u'IdentitiesOnly=yes')
options.append('IdentitiesOnly=yes')
ret = []
for option in options:
ret.append(u'-o {0} '.format(option))
return u''.join(ret)
ret.append('-o {0} '.format(option))
return ''.join(ret)
def _passwd_opts(self):
'''
@ -155,41 +157,41 @@ class Shell(object):
# TODO ControlMaster does not work without ControlPath
# user could take advantage of it if they set ControlPath in their
# ssh config. Also, ControlPersist not widely available.
options = [u'ControlMaster=auto',
u'StrictHostKeyChecking=no',
options = ['ControlMaster=auto',
'StrictHostKeyChecking=no',
]
if self.opts[u'_ssh_version'] > (4, 9):
options.append(u'GSSAPIAuthentication=no')
options.append(u'ConnectTimeout={0}'.format(self.timeout))
if self.opts.get(u'ignore_host_keys'):
options.append(u'StrictHostKeyChecking=no')
if self.opts.get(u'no_host_keys'):
options.extend([u'StrictHostKeyChecking=no',
u'UserKnownHostsFile=/dev/null'])
if self.opts['_ssh_version'] > (4, 9):
options.append('GSSAPIAuthentication=no')
options.append('ConnectTimeout={0}'.format(self.timeout))
if self.opts.get('ignore_host_keys'):
options.append('StrictHostKeyChecking=no')
if self.opts.get('no_host_keys'):
options.extend(['StrictHostKeyChecking=no',
'UserKnownHostsFile=/dev/null'])
if self.passwd:
options.extend([u'PasswordAuthentication=yes',
u'PubkeyAuthentication=yes'])
options.extend(['PasswordAuthentication=yes',
'PubkeyAuthentication=yes'])
else:
options.extend([u'PasswordAuthentication=no',
u'PubkeyAuthentication=yes',
u'KbdInteractiveAuthentication=no',
u'ChallengeResponseAuthentication=no',
u'BatchMode=yes'])
options.extend(['PasswordAuthentication=no',
'PubkeyAuthentication=yes',
'KbdInteractiveAuthentication=no',
'ChallengeResponseAuthentication=no',
'BatchMode=yes'])
if self.port:
options.append(u'Port={0}'.format(self.port))
options.append('Port={0}'.format(self.port))
if self.user:
options.append(u'User={0}'.format(self.user))
options.append('User={0}'.format(self.user))
if self.identities_only:
options.append(u'IdentitiesOnly=yes')
options.append('IdentitiesOnly=yes')
ret = []
for option in options:
ret.append(u'-o {0} '.format(option))
return u''.join(ret)
ret.append('-o {0} '.format(option))
return ''.join(ret)
def _ssh_opts(self):
return u' '.join([u'-o {0}'.format(opt)
return ' '.join(['-o {0}'.format(opt)
for opt in self.ssh_options])
def _copy_id_str_old(self):
@ -199,9 +201,9 @@ class Shell(object):
if self.passwd:
# Using single quotes prevents shell expansion and
# passwords containing '$'
return u"{0} {1} '{2} -p {3} {4} {5}@{6}'".format(
u'ssh-copy-id',
u'-i {0}.pub'.format(self.priv),
return "{0} {1} '{2} -p {3} {4} {5}@{6}'".format(
'ssh-copy-id',
'-i {0}.pub'.format(self.priv),
self._passwd_opts(),
self.port,
self._ssh_opts(),
@ -217,9 +219,9 @@ class Shell(object):
if self.passwd:
# Using single quotes prevents shell expansion and
# passwords containing '$'
return u"{0} {1} {2} -p {3} {4} {5}@{6}".format(
u'ssh-copy-id',
u'-i {0}.pub'.format(self.priv),
return "{0} {1} {2} -p {3} {4} {5}@{6}".format(
'ssh-copy-id',
'-i {0}.pub'.format(self.priv),
self._passwd_opts(),
self.port,
self._ssh_opts(),
@ -232,11 +234,11 @@ class Shell(object):
Execute ssh-copy-id to plant the id file on the target
'''
stdout, stderr, retcode = self._run_cmd(self._copy_id_str_old())
if salt.defaults.exitcodes.EX_OK != retcode and u'Usage' in stderr:
if salt.defaults.exitcodes.EX_OK != retcode and 'Usage' in stderr:
stdout, stderr, retcode = self._run_cmd(self._copy_id_str_new())
return stdout, stderr, retcode
def _cmd_str(self, cmd, ssh=u'ssh'):
def _cmd_str(self, cmd, ssh='ssh'):
'''
Return the cmd string to execute
'''
@ -245,21 +247,21 @@ class Shell(object):
# need to deliver the SHIM to the remote host and execute it there
command = [ssh]
if ssh != u'scp':
if ssh != 'scp':
command.append(self.host)
if self.tty and ssh == u'ssh':
command.append(u'-t -t')
if self.tty and ssh == 'ssh':
command.append('-t -t')
if self.passwd or self.priv:
command.append(self.priv and self._key_opts() or self._passwd_opts())
if ssh != u'scp' and self.remote_port_forwards:
command.append(u' '.join([u'-R {0}'.format(item)
for item in self.remote_port_forwards.split(u',')]))
if ssh != 'scp' and self.remote_port_forwards:
command.append(' '.join(['-R {0}'.format(item)
for item in self.remote_port_forwards.split(',')]))
if self.ssh_options:
command.append(self._ssh_opts())
command.append(cmd)
return u' '.join(command)
return ' '.join(command)
def _old_run_cmd(self, cmd):
'''
@ -276,7 +278,7 @@ class Shell(object):
data = proc.communicate()
return data[0], data[1], proc.returncode
except Exception:
return (u'local', u'Unknown Error', None)
return ('local', 'Unknown Error', None)
def _run_nb_cmd(self, cmd):
'''
@ -300,7 +302,7 @@ class Shell(object):
err = self.get_error(err)
yield out, err, rcode
except Exception:
yield (u'', u'Unknown Error', None)
yield ('', 'Unknown Error', None)
def exec_nb_cmd(self, cmd):
'''
@ -311,9 +313,9 @@ class Shell(object):
rcode = None
cmd = self._cmd_str(cmd)
logmsg = u'Executing non-blocking command: {0}'.format(cmd)
logmsg = 'Executing non-blocking command: {0}'.format(cmd)
if self.passwd:
logmsg = logmsg.replace(self.passwd, (u'*' * 6))
logmsg = logmsg.replace(self.passwd, ('*' * 6))
log.debug(logmsg)
for out, err, rcode in self._run_nb_cmd(cmd):
@ -322,7 +324,7 @@ class Shell(object):
if err is not None:
r_err.append(err)
yield None, None, None
yield u''.join(r_out), u''.join(r_err), rcode
yield ''.join(r_out), ''.join(r_err), rcode
def exec_cmd(self, cmd):
'''
@ -330,11 +332,11 @@ class Shell(object):
'''
cmd = self._cmd_str(cmd)
logmsg = u'Executing command: {0}'.format(cmd)
logmsg = 'Executing command: {0}'.format(cmd)
if self.passwd:
logmsg = logmsg.replace(self.passwd, (u'*' * 6))
if u'decode("base64")' in logmsg or u'base64.b64decode(' in logmsg:
log.debug(u'Executed SHIM command. Command logged to TRACE')
logmsg = logmsg.replace(self.passwd, ('*' * 6))
if 'decode("base64")' in logmsg or 'base64.b64decode(' in logmsg:
log.debug('Executed SHIM command. Command logged to TRACE')
log.trace(logmsg)
else:
log.debug(logmsg)
@ -347,19 +349,19 @@ class Shell(object):
scp a file or files to a remote system
'''
if makedirs:
self.exec_cmd(u'mkdir -p {0}'.format(os.path.dirname(remote)))
self.exec_cmd('mkdir -p {0}'.format(os.path.dirname(remote)))
# scp needs [<ipv6}
host = self.host
if u':' in host:
host = u'[{0}]'.format(host)
if ':' in host:
host = '[{0}]'.format(host)
cmd = u'{0} {1}:{2}'.format(local, host, remote)
cmd = self._cmd_str(cmd, ssh=u'scp')
cmd = '{0} {1}:{2}'.format(local, host, remote)
cmd = self._cmd_str(cmd, ssh='scp')
logmsg = u'Executing command: {0}'.format(cmd)
logmsg = 'Executing command: {0}'.format(cmd)
if self.passwd:
logmsg = logmsg.replace(self.passwd, (u'*' * 6))
logmsg = logmsg.replace(self.passwd, ('*' * 6))
log.debug(logmsg)
return self._run_cmd(cmd)
@ -373,16 +375,16 @@ class Shell(object):
cmd,
shell=True,
log_stdout=True,
log_stdout_level=u'trace',
log_stdout_level='trace',
log_stderr=True,
log_stderr_level=u'trace',
log_stderr_level='trace',
stream_stdout=False,
stream_stderr=False)
sent_passwd = 0
send_password = True
ret_stdout = u''
ret_stderr = u''
old_stdout = u''
ret_stdout = ''
ret_stderr = ''
old_stdout = ''
try:
while term.has_unread_data:
@ -399,26 +401,26 @@ class Shell(object):
send_password = False
if buff and SSH_PASSWORD_PROMPT_RE.search(buff) and send_password:
if not self.passwd:
return u'', u'Permission denied, no authentication information', 254
return '', 'Permission denied, no authentication information', 254
if sent_passwd < passwd_retries:
term.sendline(self.passwd)
sent_passwd += 1
continue
else:
# asking for a password, and we can't seem to send it
return u'', u'Password authentication failed', 254
return '', 'Password authentication failed', 254
elif buff and KEY_VALID_RE.search(buff):
if key_accept:
term.sendline(u'yes')
term.sendline('yes')
continue
else:
term.sendline(u'no')
ret_stdout = (u'The host key needs to be accepted, to '
u'auto accept run salt-ssh with the -i '
u'flag:\n{0}').format(stdout)
return ret_stdout, u'', 254
elif buff and buff.endswith(u'_||ext_mods||_'):
mods_raw = json.dumps(self.mods, separators=(u',', u':')) + u'|_E|0|'
term.sendline('no')
ret_stdout = ('The host key needs to be accepted, to '
'auto accept run salt-ssh with the -i '
'flag:\n{0}').format(stdout)
return ret_stdout, '', 254
elif buff and buff.endswith('_||ext_mods||_'):
mods_raw = json.dumps(self.mods, separators=(',', ':')) + '|_E|0|'
term.sendline(mods_raw)
if stdout:
old_stdout = stdout

View File

@ -8,7 +8,7 @@ helper script used by salt.client.ssh.Single. It is here, in a
separate file, for convenience of development.
'''
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
import hashlib
import tarfile
@ -18,8 +18,8 @@ import os
import stat
import subprocess
THIN_ARCHIVE = u'salt-thin.tgz'
EXT_ARCHIVE = u'salt-ext_mods.tgz'
THIN_ARCHIVE = 'salt-thin.tgz'
EXT_ARCHIVE = 'salt-ext_mods.tgz'
# Keep these in sync with salt/defaults/exitcodes.py
EX_THIN_DEPLOY = 11
@ -54,7 +54,7 @@ def get_system_encoding():
# and reset to None
encoding = None
if not sys.platform.startswith(u'win') and sys.stdin is not None:
if not sys.platform.startswith('win') and sys.stdin is not None:
# On linux we can rely on sys.stdin for the encoding since it
# most commonly matches the filesystem encoding. This however
# does not apply to windows
@ -80,16 +80,16 @@ def get_system_encoding():
# the way back to ascii
encoding = sys.getdefaultencoding()
if not encoding:
if sys.platform.startswith(u'darwin'):
if sys.platform.startswith('darwin'):
# Mac OS X uses UTF-8
encoding = u'utf-8'
elif sys.platform.startswith(u'win'):
encoding = 'utf-8'
elif sys.platform.startswith('win'):
# Windows uses a configurable encoding; on Windows, Python uses the name "mbcs"
# to refer to whatever the currently configured encoding is.
encoding = u'mbcs'
encoding = 'mbcs'
else:
# On linux default to ascii as a last resort
encoding = u'ascii'
encoding = 'ascii'
return encoding
@ -97,7 +97,7 @@ def is_windows():
'''
Simple function to return if a host is Windows or not
'''
return sys.platform.startswith(u'win')
return sys.platform.startswith('win')
def need_deployment():
@ -121,35 +121,35 @@ def need_deployment():
# Attack detected
need_deployment()
# If SUDOing then also give the super user group write permissions
sudo_gid = os.environ.get(u'SUDO_GID')
sudo_gid = os.environ.get('SUDO_GID')
if sudo_gid:
try:
os.chown(OPTIONS.saltdir, -1, int(sudo_gid))
stt = os.stat(OPTIONS.saltdir)
os.chmod(OPTIONS.saltdir, stt.st_mode | stat.S_IWGRP | stat.S_IRGRP | stat.S_IXGRP)
except OSError:
sys.stdout.write(u'\n\nUnable to set permissions on thin directory.\nIf sudo_user is set '
u'and is not root, be certain the user is in the same group\nas the login user')
sys.stdout.write('\n\nUnable to set permissions on thin directory.\nIf sudo_user is set '
'and is not root, be certain the user is in the same group\nas the login user')
sys.exit(1)
# Delimiter emitted on stdout *only* to indicate shim message to master.
sys.stdout.write(u"{0}\ndeploy\n".format(OPTIONS.delimiter))
sys.stdout.write("{0}\ndeploy\n".format(OPTIONS.delimiter))
sys.exit(EX_THIN_DEPLOY)
# Adapted from salt.utils.hashutils.get_hash()
def get_hash(path, form=u'sha1', chunk_size=4096):
def get_hash(path, form='sha1', chunk_size=4096):
'''
Generate a hash digest string for a file.
'''
try:
hash_type = getattr(hashlib, form)
except AttributeError:
raise ValueError(u'Invalid hash type: {0}'.format(form))
with open(path, u'rb') as ifile:
raise ValueError('Invalid hash type: {0}'.format(form))
with open(path, 'rb') as ifile:
hash_obj = hash_type()
# read the file in in chunks, not the entire file
for chunk in iter(lambda: ifile.read(chunk_size), b''): # future lint: disable=non-unicode-string
for chunk in iter(lambda: ifile.read(chunk_size), b''):
hash_obj.update(chunk)
return hash_obj.hexdigest()
@ -170,7 +170,7 @@ def need_ext():
'''
Signal that external modules need to be deployed.
'''
sys.stdout.write(u"{0}\next_mods\n".format(OPTIONS.delimiter))
sys.stdout.write("{0}\next_mods\n".format(OPTIONS.delimiter))
sys.exit(EX_MOD_DEPLOY)
@ -180,20 +180,20 @@ def unpack_ext(ext_path):
'''
modcache = os.path.join(
OPTIONS.saltdir,
u'running_data',
u'var',
u'cache',
u'salt',
u'minion',
u'extmods')
'running_data',
'var',
'cache',
'salt',
'minion',
'extmods')
tfile = tarfile.TarFile.gzopen(ext_path)
old_umask = os.umask(0o077)
tfile.extractall(path=modcache)
tfile.close()
os.umask(old_umask)
os.unlink(ext_path)
ver_path = os.path.join(modcache, u'ext_version')
ver_dst = os.path.join(OPTIONS.saltdir, u'ext_version')
ver_path = os.path.join(modcache, 'ext_version')
ver_dst = os.path.join(OPTIONS.saltdir, 'ext_version')
shutil.move(ver_path, ver_dst)
@ -208,8 +208,8 @@ def main(argv): # pylint: disable=W0613
unpack_thin(thin_path)
# Salt thin now is available to use
else:
if not sys.platform.startswith(u'win'):
scpstat = subprocess.Popen([u'/bin/sh', u'-c', u'command -v scp']).wait()
if not sys.platform.startswith('win'):
scpstat = subprocess.Popen(['/bin/sh', '-c', 'command -v scp']).wait()
if scpstat != 0:
sys.exit(EX_SCP_NOT_FOUND)
@ -218,46 +218,46 @@ def main(argv): # pylint: disable=W0613
if not os.path.isdir(OPTIONS.saltdir):
sys.stderr.write(
u'ERROR: salt path "{0}" exists but is'
u' not a directory\n'.format(OPTIONS.saltdir)
'ERROR: salt path "{0}" exists but is'
' not a directory\n'.format(OPTIONS.saltdir)
)
sys.exit(EX_CANTCREAT)
version_path = os.path.normpath(os.path.join(OPTIONS.saltdir, u'version'))
version_path = os.path.normpath(os.path.join(OPTIONS.saltdir, 'version'))
if not os.path.exists(version_path) or not os.path.isfile(version_path):
sys.stderr.write(
u'WARNING: Unable to locate current thin '
u' version: {0}.\n'.format(version_path)
'WARNING: Unable to locate current thin '
' version: {0}.\n'.format(version_path)
)
need_deployment()
with open(version_path, u'r') as vpo:
with open(version_path, 'r') as vpo:
cur_version = vpo.readline().strip()
if cur_version != OPTIONS.version:
sys.stderr.write(
u'WARNING: current thin version {0}'
u' is not up-to-date with {1}.\n'.format(
'WARNING: current thin version {0}'
' is not up-to-date with {1}.\n'.format(
cur_version, OPTIONS.version
)
)
need_deployment()
# Salt thin exists and is up-to-date - fall through and use it
salt_call_path = os.path.join(OPTIONS.saltdir, u'salt-call')
salt_call_path = os.path.join(OPTIONS.saltdir, 'salt-call')
if not os.path.isfile(salt_call_path):
sys.stderr.write(u'ERROR: thin is missing "{0}"\n'.format(salt_call_path))
sys.stderr.write('ERROR: thin is missing "{0}"\n'.format(salt_call_path))
need_deployment()
with open(os.path.join(OPTIONS.saltdir, u'minion'), u'w') as config:
config.write(OPTIONS.config + u'\n')
with open(os.path.join(OPTIONS.saltdir, 'minion'), 'w') as config:
config.write(OPTIONS.config + '\n')
if OPTIONS.ext_mods:
ext_path = os.path.join(OPTIONS.saltdir, EXT_ARCHIVE)
if os.path.exists(ext_path):
unpack_ext(ext_path)
else:
version_path = os.path.join(OPTIONS.saltdir, u'ext_version')
version_path = os.path.join(OPTIONS.saltdir, 'ext_version')
if not os.path.exists(version_path) or not os.path.isfile(version_path):
need_ext()
with open(version_path, u'r') as vpo:
with open(version_path, 'r') as vpo:
cur_version = vpo.readline().strip()
if cur_version != OPTIONS.ext_mods:
need_ext()
@ -270,38 +270,38 @@ def main(argv): # pylint: disable=W0613
salt_argv = [
sys.executable,
salt_call_path,
u'--retcode-passthrough',
u'--local',
u'--metadata',
u'--out', u'json',
u'-l', u'quiet',
u'-c', OPTIONS.saltdir
'--retcode-passthrough',
'--local',
'--metadata',
'--out', 'json',
'-l', 'quiet',
'-c', OPTIONS.saltdir
]
try:
if argv_prepared[-1].startswith(u'--no-parse='):
if argv_prepared[-1].startswith('--no-parse='):
salt_argv.append(argv_prepared.pop(-1))
except (IndexError, TypeError):
pass
salt_argv.append(u'--')
salt_argv.append('--')
salt_argv.extend(argv_prepared)
sys.stderr.write(u'SALT_ARGV: {0}\n'.format(salt_argv))
sys.stderr.write('SALT_ARGV: {0}\n'.format(salt_argv))
# Only emit the delimiter on *both* stdout and stderr when completely successful.
# Yes, the flush() is necessary.
sys.stdout.write(OPTIONS.delimiter + u'\n')
sys.stdout.write(OPTIONS.delimiter + '\n')
sys.stdout.flush()
if not OPTIONS.tty:
sys.stderr.write(OPTIONS.delimiter + u'\n')
sys.stderr.write(OPTIONS.delimiter + '\n')
sys.stderr.flush()
if OPTIONS.cmd_umask is not None:
old_umask = os.umask(OPTIONS.cmd_umask)
if OPTIONS.tty:
# Returns bytes instead of string on python 3
stdout, _ = subprocess.Popen(salt_argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
sys.stdout.write(stdout.decode(encoding=get_system_encoding(), errors=u"replace"))
sys.stdout.write(stdout.decode(encoding=get_system_encoding(), errors="replace"))
sys.stdout.flush()
if OPTIONS.wipe:
shutil.rmtree(OPTIONS.saltdir)
@ -313,5 +313,5 @@ def main(argv): # pylint: disable=W0613
if OPTIONS.cmd_umask is not None:
os.umask(old_umask)
if __name__ == u'__main__':
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -2,7 +2,7 @@
'''
Create ssh executor system
'''
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
# Import python libs
import logging
import os
@ -16,6 +16,8 @@ from contextlib import closing
import salt.client.ssh.shell
import salt.client.ssh
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
import salt.utils.thin
import salt.utils.url
import salt.utils.verify
@ -85,33 +87,33 @@ class SSHHighState(salt.state.BaseHighState):
'''
Evaluate master_tops locally
'''
if u'id' not in self.opts:
log.error(u'Received call for external nodes without an id')
if 'id' not in self.opts:
log.error('Received call for external nodes without an id')
return {}
if not salt.utils.verify.valid_id(self.opts, self.opts[u'id']):
if not salt.utils.verify.valid_id(self.opts, self.opts['id']):
return {}
# Evaluate all configured master_tops interfaces
grains = {}
ret = {}
if u'grains' in self.opts:
grains = self.opts[u'grains']
if 'grains' in self.opts:
grains = self.opts['grains']
for fun in self.tops:
if fun not in self.opts.get(u'master_tops', {}):
if fun not in self.opts.get('master_tops', {}):
continue
try:
ret.update(self.tops[fun](opts=self.opts, grains=grains))
except Exception as exc:
# If anything happens in the top generation, log it and move on
log.error(
u'Top function %s failed with error %s for minion %s',
fun, exc, self.opts[u'id']
'Top function %s failed with error %s for minion %s',
fun, exc, self.opts['id']
)
return ret
def lowstate_file_refs(chunks, extras=u''):
def lowstate_file_refs(chunks, extras=''):
'''
Create a list of file ref objects to reconcile
'''
@ -119,12 +121,12 @@ def lowstate_file_refs(chunks, extras=u''):
for chunk in chunks:
if not isinstance(chunk, dict):
continue
saltenv = u'base'
saltenv = 'base'
crefs = []
for state in chunk:
if state == u'__env__':
if state == '__env__':
saltenv = chunk[state]
elif state.startswith(u'__'):
elif state.startswith('__'):
continue
crefs.extend(salt_refs(chunk[state]))
if crefs:
@ -132,7 +134,7 @@ def lowstate_file_refs(chunks, extras=u''):
refs[saltenv] = []
refs[saltenv].append(crefs)
if extras:
extra_refs = extras.split(u',')
extra_refs = extras.split(',')
if extra_refs:
for env in refs:
for x in extra_refs:
@ -144,7 +146,7 @@ def salt_refs(data, ret=None):
'''
Pull salt file references out of the states
'''
proto = u'salt://'
proto = 'salt://'
if ret is None:
ret = []
if isinstance(data, six.string_types):
@ -166,38 +168,38 @@ def prep_trans_tar(opts, file_client, chunks, file_refs, pillar=None, id_=None,
'''
gendir = tempfile.mkdtemp()
trans_tar = salt.utils.files.mkstemp()
lowfn = os.path.join(gendir, u'lowstate.json')
pillarfn = os.path.join(gendir, u'pillar.json')
roster_grainsfn = os.path.join(gendir, u'roster_grains.json')
lowfn = os.path.join(gendir, 'lowstate.json')
pillarfn = os.path.join(gendir, 'pillar.json')
roster_grainsfn = os.path.join(gendir, 'roster_grains.json')
sync_refs = [
[salt.utils.url.create(u'_modules')],
[salt.utils.url.create(u'_states')],
[salt.utils.url.create(u'_grains')],
[salt.utils.url.create(u'_renderers')],
[salt.utils.url.create(u'_returners')],
[salt.utils.url.create(u'_output')],
[salt.utils.url.create(u'_utils')],
[salt.utils.url.create('_modules')],
[salt.utils.url.create('_states')],
[salt.utils.url.create('_grains')],
[salt.utils.url.create('_renderers')],
[salt.utils.url.create('_returners')],
[salt.utils.url.create('_output')],
[salt.utils.url.create('_utils')],
]
with salt.utils.files.fopen(lowfn, u'w+') as fp_:
fp_.write(json.dumps(chunks))
with salt.utils.files.fopen(lowfn, 'w+') as fp_:
fp_.write(salt.utils.stringutils.to_str(json.dumps(chunks)))
if pillar:
with salt.utils.files.fopen(pillarfn, u'w+') as fp_:
fp_.write(json.dumps(pillar))
with salt.utils.files.fopen(pillarfn, 'w+') as fp_:
fp_.write(salt.utils.stringutils.to_str(json.dumps(pillar)))
if roster_grains:
with salt.utils.files.fopen(roster_grainsfn, u'w+') as fp_:
fp_.write(json.dumps(roster_grains))
with salt.utils.files.fopen(roster_grainsfn, 'w+') as fp_:
fp_.write(salt.utils.stringutils.to_str(json.dumps(roster_grains)))
if id_ is None:
id_ = u''
id_ = ''
try:
cachedir = os.path.join(u'salt-ssh', id_).rstrip(os.sep)
cachedir = os.path.join('salt-ssh', id_).rstrip(os.sep)
except AttributeError:
# Minion ID should always be a str, but don't let an int break this
cachedir = os.path.join(u'salt-ssh', str(id_)).rstrip(os.sep)
cachedir = os.path.join('salt-ssh', six.string_types(id_)).rstrip(os.sep)
for saltenv in file_refs:
# Location where files in this saltenv will be cached
cache_dest_root = os.path.join(cachedir, u'files', saltenv)
cache_dest_root = os.path.join(cachedir, 'files', saltenv)
file_refs[saltenv].extend(sync_refs)
env_root = os.path.join(gendir, saltenv)
if not os.path.isdir(env_root):
@ -209,7 +211,7 @@ def prep_trans_tar(opts, file_client, chunks, file_refs, pillar=None, id_=None,
try:
path = file_client.cache_file(name, saltenv, cachedir=cachedir)
except IOError:
path = u''
path = ''
if path:
tgt = os.path.join(env_root, short)
tgt_dir = os.path.dirname(tgt)
@ -220,10 +222,10 @@ def prep_trans_tar(opts, file_client, chunks, file_refs, pillar=None, id_=None,
try:
files = file_client.cache_dir(name, saltenv, cachedir=cachedir)
except IOError:
files = u''
files = ''
if files:
for filename in files:
fn = filename[len(file_client.get_cachedir(cache_dest)):].strip(u'/')
fn = filename[len(file_client.get_cachedir(cache_dest)):].strip('/')
tgt = os.path.join(
env_root,
short,
@ -240,8 +242,8 @@ def prep_trans_tar(opts, file_client, chunks, file_refs, pillar=None, id_=None,
except OSError:
cwd = None
os.chdir(gendir)
with closing(tarfile.open(trans_tar, u'w:gz')) as tfp:
for root, dirs, files in os.walk(gendir):
with closing(tarfile.open(trans_tar, 'w:gz')) as tfp:
for root, dirs, files in salt.utils.path.os_walk(gendir):
for name in files:
full = os.path.join(root, name)
tfp.add(full[len(gendir):].lstrip(os.sep))

View File

@ -7,7 +7,7 @@ as ZeroMQ salt, but via ssh.
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
import json
import copy
@ -42,8 +42,8 @@ class FunctionWrapper(object):
self.wfuncs = wfuncs if isinstance(wfuncs, dict) else {}
self.opts = opts
self.mods = mods if isinstance(mods, dict) else {}
self.kwargs = {u'id_': id_,
u'host': host}
self.kwargs = {'id_': id_,
'host': host}
self.fsclient = fsclient
self.kwargs.update(kwargs)
self.aliases = aliases
@ -67,14 +67,14 @@ class FunctionWrapper(object):
'''
Return the function call to simulate the salt local lookup system
'''
if u'.' not in cmd and not self.cmd_prefix:
if '.' not in cmd and not self.cmd_prefix:
# Form of salt.cmd.run in Jinja -- it's expecting a subdictionary
# containing only 'cmd' module calls, in that case. Create a new
# FunctionWrapper which contains the prefix 'cmd' (again, for the
# salt.cmd.run example)
kwargs = copy.deepcopy(self.kwargs)
id_ = kwargs.pop(u'id_')
host = kwargs.pop(u'host')
id_ = kwargs.pop('id_')
host = kwargs.pop('host')
return FunctionWrapper(self.opts,
id_,
host,
@ -90,7 +90,7 @@ class FunctionWrapper(object):
# We're in an inner FunctionWrapper as created by the code block
# above. Reconstruct the original cmd in the form 'cmd.run' and
# then evaluate as normal
cmd = u'{0}.{1}'.format(self.cmd_prefix, cmd)
cmd = '{0}.{1}'.format(self.cmd_prefix, cmd)
if cmd in self.wfuncs:
return self.wfuncs[cmd]
@ -104,7 +104,7 @@ class FunctionWrapper(object):
'''
argv = [cmd]
argv.extend([json.dumps(arg) for arg in args])
argv.extend([u'{0}={1}'.format(key, json.dumps(val)) for key, val in six.iteritems(kwargs)])
argv.extend(['{0}={1}'.format(key, json.dumps(val)) for key, val in six.iteritems(kwargs)])
single = salt.client.ssh.Single(
self.opts,
argv,
@ -115,21 +115,21 @@ class FunctionWrapper(object):
**self.kwargs
)
stdout, stderr, retcode = single.cmd_block()
if stderr.count(u'Permission Denied'):
return {u'_error': u'Permission Denied',
u'stdout': stdout,
u'stderr': stderr,
u'retcode': retcode}
if stderr.count('Permission Denied'):
return {'_error': 'Permission Denied',
'stdout': stdout,
'stderr': stderr,
'retcode': retcode}
try:
ret = json.loads(stdout, object_hook=salt.utils.data.decode_dict)
if len(ret) < 2 and u'local' in ret:
ret = ret[u'local']
ret = ret.get(u'return', {})
if len(ret) < 2 and 'local' in ret:
ret = ret['local']
ret = ret.get('return', {})
except ValueError:
ret = {u'_error': u'Failed to return clean data',
u'stderr': stderr,
u'stdout': stdout,
u'retcode': retcode}
ret = {'_error': 'Failed to return clean data',
'stderr': stderr,
'stdout': stdout,
'retcode': retcode}
return ret
return caller
@ -137,18 +137,18 @@ class FunctionWrapper(object):
'''
Set aliases for functions
'''
if u'.' not in cmd and not self.cmd_prefix:
if '.' not in cmd and not self.cmd_prefix:
# Form of salt.cmd.run in Jinja -- it's expecting a subdictionary
# containing only 'cmd' module calls, in that case. We don't
# support assigning directly to prefixes in this way
raise KeyError(u'Cannot assign to module key {0} in the '
u'FunctionWrapper'.format(cmd))
raise KeyError('Cannot assign to module key {0} in the '
'FunctionWrapper'.format(cmd))
if self.cmd_prefix:
# We're in an inner FunctionWrapper as created by the first code
# block in __getitem__. Reconstruct the original cmd in the form
# 'cmd.run' and then evaluate as normal
cmd = u'{0}.{1}'.format(self.cmd_prefix, cmd)
cmd = '{0}.{1}'.format(self.cmd_prefix, cmd)
if cmd in self.wfuncs:
self.wfuncs[cmd] = value

View File

@ -4,7 +4,7 @@ Return config information
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
import re
import os
@ -17,44 +17,44 @@ import salt.syspaths as syspaths
from salt.ext import six
# Set up the default values for all systems
DEFAULTS = {u'mongo.db': u'salt',
u'mongo.host': u'salt',
u'mongo.password': u'',
u'mongo.port': 27017,
u'mongo.user': u'',
u'redis.db': u'0',
u'redis.host': u'salt',
u'redis.port': 6379,
u'test.foo': u'unconfigured',
u'ca.cert_base_path': u'/etc/pki',
u'solr.cores': [],
u'solr.host': u'localhost',
u'solr.port': u'8983',
u'solr.baseurl': u'/solr',
u'solr.type': u'master',
u'solr.request_timeout': None,
u'solr.init_script': u'/etc/rc.d/solr',
u'solr.dih.import_options': {u'clean': False, u'optimize': True,
u'commit': True, u'verbose': False},
u'solr.backup_path': None,
u'solr.num_backups': 1,
u'poudriere.config': u'/usr/local/etc/poudriere.conf',
u'poudriere.config_dir': u'/usr/local/etc/poudriere.d',
u'ldap.server': u'localhost',
u'ldap.port': u'389',
u'ldap.tls': False,
u'ldap.scope': 2,
u'ldap.attrs': None,
u'ldap.binddn': u'',
u'ldap.bindpw': u'',
u'hosts.file': u'/etc/hosts',
u'aliases.file': u'/etc/aliases',
u'virt.images': os.path.join(syspaths.SRV_ROOT_DIR, u'salt-images'),
u'virt.tunnel': False,
DEFAULTS = {'mongo.db': 'salt',
'mongo.host': 'salt',
'mongo.password': '',
'mongo.port': 27017,
'mongo.user': '',
'redis.db': '0',
'redis.host': 'salt',
'redis.port': 6379,
'test.foo': 'unconfigured',
'ca.cert_base_path': '/etc/pki',
'solr.cores': [],
'solr.host': 'localhost',
'solr.port': '8983',
'solr.baseurl': '/solr',
'solr.type': 'master',
'solr.request_timeout': None,
'solr.init_script': '/etc/rc.d/solr',
'solr.dih.import_options': {'clean': False, 'optimize': True,
'commit': True, 'verbose': False},
'solr.backup_path': None,
'solr.num_backups': 1,
'poudriere.config': '/usr/local/etc/poudriere.conf',
'poudriere.config_dir': '/usr/local/etc/poudriere.d',
'ldap.server': 'localhost',
'ldap.port': '389',
'ldap.tls': False,
'ldap.scope': 2,
'ldap.attrs': None,
'ldap.binddn': '',
'ldap.bindpw': '',
'hosts.file': '/etc/hosts',
'aliases.file': '/etc/aliases',
'virt.images': os.path.join(syspaths.SRV_ROOT_DIR, 'salt-images'),
'virt.tunnel': False,
}
def backup_mode(backup=u''):
def backup_mode(backup=''):
'''
Return the backup mode
@ -66,7 +66,7 @@ def backup_mode(backup=u''):
'''
if backup:
return backup
return option(u'backup_mode')
return option('backup_mode')
def manage_mode(mode):
@ -97,14 +97,14 @@ def valid_fileproto(uri):
salt '*' config.valid_fileproto salt://path/to/file
'''
try:
return bool(re.match(u'^(?:salt|https?|ftp)://', uri))
return bool(re.match('^(?:salt|https?|ftp)://', uri))
except Exception:
return False
def option(
value,
default=u'',
default='',
omit_opts=False,
omit_master=False,
omit_pillar=False):
@ -121,8 +121,8 @@ def option(
if value in __opts__:
return __opts__[value]
if not omit_master:
if value in __pillar__.get(u'master', {}):
return __pillar__[u'master'][value]
if value in __pillar__.get('master', {}):
return __pillar__['master'][value]
if not omit_pillar:
if value in __pillar__:
return __pillar__[value]
@ -132,7 +132,7 @@ def option(
def merge(value,
default=u'',
default='',
omit_opts=False,
omit_master=False,
omit_pillar=False):
@ -155,8 +155,8 @@ def merge(value,
if isinstance(ret, six.string_types):
return ret
if not omit_master:
if value in __pillar__.get(u'master', {}):
tmp = __pillar__[u'master'][value]
if value in __pillar__.get('master', {}):
tmp = __pillar__['master'][value]
if ret is None:
ret = tmp
if isinstance(ret, six.string_types):
@ -185,7 +185,7 @@ def merge(value,
return ret or default
def get(key, default=u''):
def get(key, default=''):
'''
.. versionadded: 0.14.0
@ -216,17 +216,17 @@ def get(key, default=u''):
salt '*' config.get pkg:apache
'''
ret = salt.utils.data.traverse_dict_and_list(__opts__, key, u'_|-')
if ret != u'_|-':
ret = salt.utils.data.traverse_dict_and_list(__opts__, key, '_|-')
if ret != '_|-':
return ret
ret = salt.utils.data.traverse_dict_and_list(__grains__, key, u'_|-')
if ret != u'_|-':
ret = salt.utils.data.traverse_dict_and_list(__grains__, key, '_|-')
if ret != '_|-':
return ret
ret = salt.utils.data.traverse_dict_and_list(__pillar__, key, u'_|-')
if ret != u'_|-':
ret = salt.utils.data.traverse_dict_and_list(__pillar__, key, '_|-')
if ret != '_|-':
return ret
ret = salt.utils.data.traverse_dict_and_list(__pillar__.get(u'master', {}), key, u'_|-')
if ret != u'_|-':
ret = salt.utils.data.traverse_dict_and_list(__pillar__.get('master', {}), key, '_|-')
if ret != '_|-':
return ret
return default
@ -243,10 +243,10 @@ def dot_vals(value):
salt '*' config.dot_vals host
'''
ret = {}
for key, val in six.iteritems(__pillar__.get(u'master', {})):
if key.startswith(u'{0}.'.format(value)):
for key, val in six.iteritems(__pillar__.get('master', {})):
if key.startswith('{0}.'.format(value)):
ret[key] = val
for key, val in six.iteritems(__opts__):
if key.startswith(u'{0}.'.format(value)):
if key.startswith('{0}.'.format(value)):
ret[key] = val
return ret

View File

@ -3,13 +3,14 @@
Wrap the cp module allowing for managed ssh file transfers
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
import logging
import os
# Import salt libs
import salt.client.ssh
import salt.utils.files
import salt.utils.stringutils
import salt.utils.templates
from salt.exceptions import CommandExecutionError
@ -18,7 +19,7 @@ log = logging.getLogger(__name__)
def get_file(path,
dest,
saltenv=u'base',
saltenv='base',
makedirs=False,
template=None,
gzip=None):
@ -31,83 +32,83 @@ def get_file(path,
cp.get_file. The argument is only accepted for interface compatibility.
'''
if gzip is not None:
log.warning(u'The gzip argument to cp.get_file in salt-ssh is '
u'unsupported')
log.warning('The gzip argument to cp.get_file in salt-ssh is '
'unsupported')
if template is not None:
(path, dest) = _render_filenames(path, dest, saltenv, template)
src = __context__[u'fileclient'].cache_file(
src = __context__['fileclient'].cache_file(
path,
saltenv,
cachedir=os.path.join(u'salt-ssh', __salt__.kwargs[u'id_']))
cachedir=os.path.join('salt-ssh', __salt__.kwargs['id_']))
single = salt.client.ssh.Single(
__opts__,
u'',
'',
**__salt__.kwargs)
ret = single.shell.send(src, dest, makedirs)
return not ret[2]
def get_dir(path, dest, saltenv=u'base'):
def get_dir(path, dest, saltenv='base'):
'''
Transfer a directory down
'''
src = __context__[u'fileclient'].cache_dir(
src = __context__['fileclient'].cache_dir(
path,
saltenv,
cachedir=os.path.join(u'salt-ssh', __salt__.kwargs[u'id_']))
src = u' '.join(src)
cachedir=os.path.join('salt-ssh', __salt__.kwargs['id_']))
src = ' '.join(src)
single = salt.client.ssh.Single(
__opts__,
u'',
'',
**__salt__.kwargs)
ret = single.shell.send(src, dest)
return not ret[2]
def get_url(path, dest, saltenv=u'base'):
def get_url(path, dest, saltenv='base'):
'''
retrieve a URL
'''
src = __context__[u'fileclient'].cache_file(
src = __context__['fileclient'].cache_file(
path,
saltenv,
cachedir=os.path.join(u'salt-ssh', __salt__.kwargs[u'id_']))
cachedir=os.path.join('salt-ssh', __salt__.kwargs['id_']))
single = salt.client.ssh.Single(
__opts__,
u'',
'',
**__salt__.kwargs)
ret = single.shell.send(src, dest)
return not ret[2]
def list_states(saltenv=u'base'):
def list_states(saltenv='base'):
'''
List all the available state modules in an environment
'''
return __context__[u'fileclient'].list_states(saltenv)
return __context__['fileclient'].list_states(saltenv)
def list_master(saltenv=u'base', prefix=u''):
def list_master(saltenv='base', prefix=''):
'''
List all of the files stored on the master
'''
return __context__[u'fileclient'].file_list(saltenv, prefix)
return __context__['fileclient'].file_list(saltenv, prefix)
def list_master_dirs(saltenv=u'base', prefix=u''):
def list_master_dirs(saltenv='base', prefix=''):
'''
List all of the directories stored on the master
'''
return __context__[u'fileclient'].dir_list(saltenv, prefix)
return __context__['fileclient'].dir_list(saltenv, prefix)
def list_master_symlinks(saltenv=u'base', prefix=u''):
def list_master_symlinks(saltenv='base', prefix=''):
'''
List all of the symlinks stored on the master
'''
return __context__[u'fileclient'].symlink_list(saltenv, prefix)
return __context__['fileclient'].symlink_list(saltenv, prefix)
def _render_filenames(path, dest, saltenv, template):
@ -122,16 +123,16 @@ def _render_filenames(path, dest, saltenv, template):
# render the path as a template using path_template_engine as the engine
if template not in salt.utils.templates.TEMPLATE_REGISTRY:
raise CommandExecutionError(
u'Attempted to render file paths with unavailable engine '
u'{0}'.format(template)
'Attempted to render file paths with unavailable engine '
'{0}'.format(template)
)
kwargs = {}
kwargs[u'salt'] = __salt__
kwargs[u'pillar'] = __pillar__
kwargs[u'grains'] = __grains__
kwargs[u'opts'] = __opts__
kwargs[u'saltenv'] = saltenv
kwargs['salt'] = __salt__
kwargs['pillar'] = __pillar__
kwargs['grains'] = __grains__
kwargs['opts'] = __opts__
kwargs['saltenv'] = saltenv
def _render(contents):
'''
@ -140,23 +141,23 @@ def _render_filenames(path, dest, saltenv, template):
'''
# write out path to temp file
tmp_path_fn = salt.utils.files.mkstemp()
with salt.utils.files.fopen(tmp_path_fn, u'w+') as fp_:
fp_.write(contents)
with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_:
fp_.write(salt.utils.stringutils.to_str(contents))
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
tmp_path_fn,
to_str=True,
**kwargs
)
salt.utils.files.safe_rm(tmp_path_fn)
if not data[u'result']:
if not data['result']:
# Failed to render the template
raise CommandExecutionError(
u'Failed to render file path with error: {0}'.format(
data[u'data']
'Failed to render file path with error: {0}'.format(
data['data']
)
)
else:
return data[u'data']
return data['data']
path = _render(path)
dest = _render(dest)

View File

@ -4,7 +4,7 @@ Return/control aspects of the grains data
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
import collections
import copy
import math
@ -29,28 +29,28 @@ def _serial_sanitizer(instr):
'''
length = len(instr)
index = int(math.floor(length * .75))
return u'{0}{1}'.format(instr[:index], u'X' * (length - index))
return '{0}{1}'.format(instr[:index], 'X' * (length - index))
_FQDN_SANITIZER = lambda x: u'MINION.DOMAINNAME'
_HOSTNAME_SANITIZER = lambda x: u'MINION'
_DOMAINNAME_SANITIZER = lambda x: u'DOMAINNAME'
_FQDN_SANITIZER = lambda x: 'MINION.DOMAINNAME'
_HOSTNAME_SANITIZER = lambda x: 'MINION'
_DOMAINNAME_SANITIZER = lambda x: 'DOMAINNAME'
# A dictionary of grain -> function mappings for sanitizing grain output. This
# is used when the 'sanitize' flag is given.
_SANITIZERS = {
u'serialnumber': _serial_sanitizer,
u'domain': _DOMAINNAME_SANITIZER,
u'fqdn': _FQDN_SANITIZER,
u'id': _FQDN_SANITIZER,
u'host': _HOSTNAME_SANITIZER,
u'localhost': _HOSTNAME_SANITIZER,
u'nodename': _HOSTNAME_SANITIZER,
'serialnumber': _serial_sanitizer,
'domain': _DOMAINNAME_SANITIZER,
'fqdn': _FQDN_SANITIZER,
'id': _FQDN_SANITIZER,
'host': _HOSTNAME_SANITIZER,
'localhost': _HOSTNAME_SANITIZER,
'nodename': _HOSTNAME_SANITIZER,
}
def get(key, default=u'', delimiter=DEFAULT_TARGET_DELIM, ordered=True):
def get(key, default='', delimiter=DEFAULT_TARGET_DELIM, ordered=True):
'''
Attempt to retrieve the named value from grains, if the named value is not
available return the passed default. The default return is an empty string.
@ -154,7 +154,7 @@ def item(*args, **kwargs):
ret[arg] = __grains__[arg]
except KeyError:
pass
if salt.utils.data.is_true(kwargs.get(u'sanitize')):
if salt.utils.data.is_true(kwargs.get('sanitize')):
for arg, func in six.iteritems(_SANITIZERS):
if arg in ret:
ret[arg] = func(ret[arg])
@ -175,9 +175,9 @@ def ls(): # pylint: disable=C0103
def filter_by(lookup_dict,
grain=u'os_family',
grain='os_family',
merge=None,
default=u'default',
default='default',
base=None):
'''
.. versionadded:: 0.17.0
@ -271,12 +271,12 @@ def filter_by(lookup_dict,
elif isinstance(base_values, collections.Mapping):
if not isinstance(ret, collections.Mapping):
raise SaltException(u'filter_by default and look-up values must both be dictionaries.')
raise SaltException('filter_by default and look-up values must both be dictionaries.')
ret = salt.utils.dictupdate.update(copy.deepcopy(base_values), ret)
if merge:
if not isinstance(merge, collections.Mapping):
raise SaltException(u'filter_by merge argument must be a dictionary.')
raise SaltException('filter_by merge argument must be a dictionary.')
else:
if ret is None:
ret = merge

View File

@ -7,14 +7,14 @@ Wrapper function for mine operations for salt-ssh
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
import copy
# Import salt libs
import salt.client.ssh
def get(tgt, fun, tgt_type=u'glob', roster=u'flat'):
def get(tgt, fun, tgt_type='glob', roster='flat'):
'''
Get data from the mine based on the target, function and tgt_type
@ -36,15 +36,15 @@ def get(tgt, fun, tgt_type=u'glob', roster=u'flat'):
salt-ssh '*' mine.get '192.168.5.0' network.ipaddrs roster=scan
'''
# Set up opts for the SSH object
opts = copy.deepcopy(__context__[u'master_opts'])
opts = copy.deepcopy(__context__['master_opts'])
minopts = copy.deepcopy(__opts__)
opts.update(minopts)
if roster:
opts[u'roster'] = roster
opts[u'argv'] = [fun]
opts[u'selected_target_option'] = tgt_type
opts[u'tgt'] = tgt
opts[u'arg'] = []
opts['roster'] = roster
opts['argv'] = [fun]
opts['selected_target_option'] = tgt_type
opts['tgt'] = tgt
opts['arg'] = []
# Create the SSH object to handle the actual call
ssh = salt.client.ssh.SSH(opts)
@ -56,8 +56,8 @@ def get(tgt, fun, tgt_type=u'glob', roster=u'flat'):
cret = {}
for host in rets:
if u'return' in rets[host]:
cret[host] = rets[host][u'return']
if 'return' in rets[host]:
cret[host] = rets[host]['return']
else:
cret[host] = rets[host]
return cret

View File

@ -2,7 +2,7 @@
'''
Extract the pillar data for this minion
'''
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
# Import python libs
import collections
@ -14,7 +14,7 @@ import salt.utils.dictupdate
from salt.defaults import DEFAULT_TARGET_DELIM
def get(key, default=u'', merge=False, delimiter=DEFAULT_TARGET_DELIM):
def get(key, default='', merge=False, delimiter=DEFAULT_TARGET_DELIM):
'''
.. versionadded:: 0.14
@ -132,10 +132,10 @@ def keys(key, delimiter=DEFAULT_TARGET_DELIM):
__pillar__, key, KeyError, delimiter)
if ret is KeyError:
raise KeyError(u"Pillar key not found: {0}".format(key))
raise KeyError("Pillar key not found: {0}".format(key))
if not isinstance(ret, dict):
raise ValueError(u"Pillar value in key {0} is not a dict".format(key))
raise ValueError("Pillar value in key {0} is not a dict".format(key))
return ret.keys()

View File

@ -10,7 +10,7 @@ salt-ssh calls and return the data from them.
No access control is needed because calls cannot originate from the minions.
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
import copy
import logging
@ -26,10 +26,10 @@ log = logging.getLogger(__name__)
def _publish(tgt,
fun,
arg=None,
tgt_type=u'glob',
returner=u'',
tgt_type='glob',
returner='',
timeout=None,
form=u'clean',
form='clean',
roster=None):
'''
Publish a command "from the minion out to other minions". In reality, the
@ -55,13 +55,13 @@ def _publish(tgt,
salt-ssh system.example.com publish.publish '*' cmd.run 'ls -la /tmp'
'''
if fun.startswith(u'publish.'):
log.info(u'Cannot publish publish calls. Returning {}')
if fun.startswith('publish.'):
log.info('Cannot publish publish calls. Returning {}')
return {}
# TODO: implement returners? Do they make sense for salt-ssh calls?
if returner:
log.warning(u'Returners currently not supported in salt-ssh publish')
log.warning('Returners currently not supported in salt-ssh publish')
# Make sure args have been processed
if arg is None:
@ -74,17 +74,17 @@ def _publish(tgt,
arg = []
# Set up opts for the SSH object
opts = copy.deepcopy(__context__[u'master_opts'])
opts = copy.deepcopy(__context__['master_opts'])
minopts = copy.deepcopy(__opts__)
opts.update(minopts)
if roster:
opts[u'roster'] = roster
opts['roster'] = roster
if timeout:
opts[u'timeout'] = timeout
opts[u'argv'] = [fun] + arg
opts[u'selected_target_option'] = tgt_type
opts[u'tgt'] = tgt
opts[u'arg'] = arg
opts['timeout'] = timeout
opts['argv'] = [fun] + arg
opts['selected_target_option'] = tgt_type
opts['tgt'] = tgt
opts['arg'] = arg
# Create the SSH object to handle the actual call
ssh = salt.client.ssh.SSH(opts)
@ -94,11 +94,11 @@ def _publish(tgt,
for ret in ssh.run_iter():
rets.update(ret)
if form == u'clean':
if form == 'clean':
cret = {}
for host in rets:
if u'return' in rets[host]:
cret[host] = rets[host][u'return']
if 'return' in rets[host]:
cret[host] = rets[host]['return']
else:
cret[host] = rets[host]
return cret
@ -109,8 +109,8 @@ def _publish(tgt,
def publish(tgt,
fun,
arg=None,
tgt_type=u'glob',
returner=u'',
tgt_type='glob',
returner='',
timeout=5,
roster=None,
expr_form=None):
@ -176,10 +176,10 @@ def publish(tgt,
# performing the cleanup on this deprecation.
if expr_form is not None:
salt.utils.versions.warn_until(
u'Fluorine',
u'the target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
'Fluorine',
'the target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = expr_form
@ -189,15 +189,15 @@ def publish(tgt,
tgt_type=tgt_type,
returner=returner,
timeout=timeout,
form=u'clean',
form='clean',
roster=roster)
def full_data(tgt,
fun,
arg=None,
tgt_type=u'glob',
returner=u'',
tgt_type='glob',
returner='',
timeout=5,
roster=None,
expr_form=None):
@ -226,10 +226,10 @@ def full_data(tgt,
# performing the cleanup on this deprecation.
if expr_form is not None:
salt.utils.versions.warn_until(
u'Fluorine',
u'the target type should be passed using the \'tgt_type\' '
u'argument instead of \'expr_form\'. Support for using '
u'\'expr_form\' will be removed in Salt Fluorine.'
'Fluorine',
'the target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = expr_form
@ -239,7 +239,7 @@ def full_data(tgt,
tgt_type=tgt_type,
returner=returner,
timeout=timeout,
form=u'full',
form='full',
roster=roster)
@ -262,5 +262,5 @@ def runner(fun, arg=None, timeout=5):
arg = []
# Create and run the runner
runner = salt.runner.RunnerClient(__opts__[u'__master_opts__'])
runner = salt.runner.RunnerClient(__opts__['__master_opts__'])
return runner.cmd(fun, arg)

View File

@ -3,21 +3,25 @@
Create ssh executor system
'''
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
# Import python libs
import os
import time
import copy
import json
import logging
# Import salt libs
from salt.exceptions import SaltInvocationError
import salt.client.ssh.shell
import salt.client.ssh.state
import salt.utils.args
import salt.utils.data
import salt.utils.files
import salt.utils.hashutils
import salt.utils.jid
import salt.utils.platform
import salt.utils.state
import salt.utils.thin
import salt.roster
import salt.state
@ -29,11 +33,52 @@ import salt.log
from salt.ext import six
__func_alias__ = {
u'apply_': u'apply'
'apply_': 'apply'
}
log = logging.getLogger(__name__)
def _set_retcode(ret, highstate=None):
'''
Set the return code based on the data back from the state system
'''
# Set default retcode to 0
__context__['retcode'] = 0
if isinstance(ret, list):
__context__['retcode'] = 1
return
if not salt.utils.state.check_result(ret, highstate=highstate):
__context__['retcode'] = 2
def _check_pillar(kwargs, pillar=None):
'''
Check the pillar for errors, refuse to run the state if there are errors
in the pillar and return the pillar errors
'''
if kwargs.get('force'):
return True
pillar_dict = pillar if pillar is not None else __pillar__
if '_errors' in pillar_dict:
return False
return True
def _wait(jid):
'''
Wait for all previously started state jobs to finish running
'''
if jid is None:
jid = salt.utils.jid.gen_jid(__opts__)
states = _prior_running_states(jid)
while states:
time.sleep(1)
states = _prior_running_states(jid)
def _merge_extra_filerefs(*args):
'''
Takes a list of filerefs and returns a merged list
@ -42,35 +87,35 @@ def _merge_extra_filerefs(*args):
for arg in args:
if isinstance(arg, six.string_types):
if arg:
ret.extend(arg.split(u','))
ret.extend(arg.split(','))
elif isinstance(arg, list):
if arg:
ret.extend(arg)
return u','.join(ret)
return ','.join(ret)
def sls(mods, saltenv=u'base', test=None, exclude=None, **kwargs):
def sls(mods, saltenv='base', test=None, exclude=None, **kwargs):
'''
Create the seed file for a state.sls run
'''
st_kwargs = __salt__.kwargs
__opts__[u'grains'] = __grains__
__pillar__.update(kwargs.get(u'pillar', {}))
__opts__['grains'] = __grains__
__pillar__.update(kwargs.get('pillar', {}))
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
__context__['fileclient'])
if isinstance(mods, six.string_types):
mods = mods.split(u',')
mods = mods.split(',')
high_data, errors = st_.render_highstate({saltenv: mods})
if exclude:
if isinstance(exclude, six.string_types):
exclude = exclude.split(u',')
if u'__exclude__' in high_data:
high_data[u'__exclude__'].extend(exclude)
exclude = exclude.split(',')
if '__exclude__' in high_data:
high_data['__exclude__'].extend(exclude)
else:
high_data[u'__exclude__'] = exclude
high_data['__exclude__'] = exclude
high_data, ext_errors = st_.state.reconcile_extend(high_data)
errors += ext_errors
errors += st_.state.verify_high(high_data)
@ -87,38 +132,38 @@ def sls(mods, saltenv=u'base', test=None, exclude=None, **kwargs):
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get(u'extra_filerefs', u''),
__opts__.get(u'extra_filerefs', u'')
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
)
)
roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat'))
roster_grains = roster.opts[u'grains']
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__opts__,
__context__[u'fileclient'],
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs[u'id_'],
st_kwargs['id_'],
roster_grains)
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__[u'hash_type'])
cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__[u'thin_dir'],
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__['thin_dir'],
test,
trans_tar_sum,
__opts__[u'hash_type'])
__opts__['hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__[u'fileclient'],
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir']))
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
@ -131,13 +176,107 @@ def sls(mods, saltenv=u'base', test=None, exclude=None, **kwargs):
try:
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
except Exception as e:
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(six.text_type(e))
# If for some reason the json load fails, return the stdout
return stdout
def running(concurrent=False):
'''
Return a list of strings that contain state return data if a state function
is already running. This function is used to prevent multiple state calls
from being run at the same time.
CLI Example:
.. code-block:: bash
salt '*' state.running
'''
ret = []
if concurrent:
return ret
active = __salt__['saltutil.is_running']('state.*')
for data in active:
err = (
'The function "{0}" is running as PID {1} and was started at '
'{2} with jid {3}'
).format(
data['fun'],
data['pid'],
salt.utils.jid.jid_to_time(data['jid']),
data['jid'],
)
ret.append(err)
return ret
def _prior_running_states(jid):
'''
Return a list of dicts of prior calls to state functions. This function is
used to queue state calls so only one is run at a time.
'''
ret = []
active = __salt__['saltutil.is_running']('state.*')
for data in active:
try:
data_jid = int(data['jid'])
except ValueError:
continue
if data_jid < int(jid):
ret.append(data)
return ret
def _check_queue(queue, kwargs):
'''
Utility function to queue the state run if requested
and to check for conflicts in currently running states
'''
if queue:
_wait(kwargs.get('__pub_jid'))
else:
conflict = running(concurrent=kwargs.get('concurrent', False))
if conflict:
__context__['retcode'] = 1
return conflict
def _get_opts(**kwargs):
'''
Return a copy of the opts for use, optionally load a local config on top
'''
opts = copy.deepcopy(__opts__)
if 'localconfig' in kwargs:
return salt.config.minion_config(kwargs['localconfig'], defaults=opts)
if 'saltenv' in kwargs:
saltenv = kwargs['saltenv']
if saltenv is not None and not isinstance(saltenv, six.string_types):
opts['environment'] = str(kwargs['saltenv'])
else:
opts['environment'] = kwargs['saltenv']
if 'pillarenv' in kwargs:
pillarenv = kwargs['pillarenv']
if pillarenv is not None and not isinstance(pillarenv, six.string_types):
opts['pillarenv'] = str(kwargs['pillarenv'])
else:
opts['pillarenv'] = kwargs['pillarenv']
return opts
def _get_initial_pillar(opts):
return __pillar__ if __opts__['__cli'] == 'salt-call' \
and opts['pillarenv'] == __opts__['pillarenv'] \
else None
def low(data, **kwargs):
'''
Execute a single low data call
@ -150,51 +289,51 @@ def low(data, **kwargs):
salt '*' state.low '{"state": "pkg", "fun": "installed", "name": "vi"}'
'''
st_kwargs = __salt__.kwargs
__opts__[u'grains'] = __grains__
__opts__['grains'] = __grains__
chunks = [data]
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
__context__['fileclient'])
for chunk in chunks:
chunk[u'__id__'] = chunk[u'name'] if not chunk.get(u'__id__') else chunk[u'__id__']
chunk['__id__'] = chunk['name'] if not chunk.get('__id__') else chunk['__id__']
err = st_.state.verify_data(data)
if err:
return err
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get(u'extra_filerefs', u''),
__opts__.get(u'extra_filerefs', u'')
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
)
)
roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat'))
roster_grains = roster.opts[u'grains']
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__opts__,
__context__[u'fileclient'],
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs[u'id_'],
st_kwargs['id_'],
roster_grains)
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__[u'hash_type'])
cmd = u'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
__opts__[u'thin_dir'],
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
__opts__['thin_dir'],
trans_tar_sum,
__opts__[u'hash_type'])
__opts__['hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__[u'fileclient'],
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir']))
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
@ -207,13 +346,28 @@ def low(data, **kwargs):
try:
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
except Exception as e:
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(six.text_type(e))
# If for some reason the json load fails, return the stdout
return stdout
def _get_test_value(test=None, **kwargs):
'''
Determine the correct value for the test flag.
'''
ret = True
if test is None:
if salt.utils.args.test_mode(test=test, **kwargs):
ret = True
else:
ret = __opts__.get('test', None)
else:
ret = test
return ret
def high(data, **kwargs):
'''
Execute the compound calls stored in a single set of high data
@ -225,49 +379,49 @@ def high(data, **kwargs):
salt '*' state.high '{"vim": {"pkg": ["installed"]}}'
'''
__pillar__.update(kwargs.get(u'pillar', {}))
__pillar__.update(kwargs.get('pillar', {}))
st_kwargs = __salt__.kwargs
__opts__[u'grains'] = __grains__
__opts__['grains'] = __grains__
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
__context__['fileclient'])
chunks = st_.state.compile_high_data(data)
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get(u'extra_filerefs', u''),
__opts__.get(u'extra_filerefs', u'')
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
)
)
roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat'))
roster_grains = roster.opts[u'grains']
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__opts__,
__context__[u'fileclient'],
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs[u'id_'],
st_kwargs['id_'],
roster_grains)
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__[u'hash_type'])
cmd = u'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
__opts__[u'thin_dir'],
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
__opts__['thin_dir'],
trans_tar_sum,
__opts__[u'hash_type'])
__opts__['hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__[u'fileclient'],
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir']))
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
@ -280,8 +434,8 @@ def high(data, **kwargs):
try:
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
except Exception as e:
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(six.text_type(e))
# If for some reason the json load fails, return the stdout
return stdout
@ -367,6 +521,8 @@ def check_request(name=None):
serial = salt.payload.Serial(__opts__)
if os.path.isfile(notify_path):
with salt.utils.files.fopen(notify_path, 'rb') as fp_:
# Not sure if this needs to be decoded since it is being returned,
# and msgpack serialization will encode it to bytes anyway.
req = serial.load(fp_)
if name:
return req[name]
@ -459,56 +615,56 @@ def highstate(test=None, **kwargs):
salt '*' state.highstate exclude=sls_to_exclude
salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
'''
__pillar__.update(kwargs.get(u'pillar', {}))
__pillar__.update(kwargs.get('pillar', {}))
st_kwargs = __salt__.kwargs
__opts__[u'grains'] = __grains__
__opts__['grains'] = __grains__
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
__context__['fileclient'])
chunks = st_.compile_low_chunks()
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get(u'extra_filerefs', u''),
__opts__.get(u'extra_filerefs', u'')
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
)
)
# Check for errors
for chunk in chunks:
if not isinstance(chunk, dict):
__context__[u'retcode'] = 1
__context__['retcode'] = 1
return chunks
roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat'))
roster_grains = roster.opts[u'grains']
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__opts__,
__context__[u'fileclient'],
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs[u'id_'],
st_kwargs['id_'],
roster_grains)
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__[u'hash_type'])
cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__[u'thin_dir'],
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__['thin_dir'],
test,
trans_tar_sum,
__opts__[u'hash_type'])
__opts__['hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__[u'fileclient'],
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir']))
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
@ -521,8 +677,8 @@ def highstate(test=None, **kwargs):
try:
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
except Exception as e:
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(six.text_type(e))
# If for some reason the json load fails, return the stdout
return stdout
@ -540,55 +696,55 @@ def top(topfn, test=None, **kwargs):
salt '*' state.top reverse_top.sls exclude=sls_to_exclude
salt '*' state.top reverse_top.sls exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
'''
__pillar__.update(kwargs.get(u'pillar', {}))
__pillar__.update(kwargs.get('pillar', {}))
st_kwargs = __salt__.kwargs
__opts__[u'grains'] = __grains__
__opts__['grains'] = __grains__
if salt.utils.args.test_mode(test=test, **kwargs):
__opts__[u'test'] = True
__opts__['test'] = True
else:
__opts__[u'test'] = __opts__.get(u'test', None)
__opts__['test'] = __opts__.get('test', None)
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
st_.opts[u'state_top'] = os.path.join(u'salt://', topfn)
__context__['fileclient'])
st_.opts['state_top'] = os.path.join('salt://', topfn)
chunks = st_.compile_low_chunks()
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get(u'extra_filerefs', u''),
__opts__.get(u'extra_filerefs', u'')
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
)
)
roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat'))
roster_grains = roster.opts[u'grains']
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__opts__,
__context__[u'fileclient'],
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs[u'id_'],
st_kwargs['id_'],
roster_grains)
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__[u'hash_type'])
cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__[u'thin_dir'],
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__['thin_dir'],
test,
trans_tar_sum,
__opts__[u'hash_type'])
__opts__['hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__[u'fileclient'],
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir']))
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
@ -601,8 +757,8 @@ def top(topfn, test=None, **kwargs):
try:
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
except Exception as e:
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(six.text_type(e))
# If for some reason the json load fails, return the stdout
return stdout
@ -618,12 +774,12 @@ def show_highstate():
salt '*' state.show_highstate
'''
__opts__[u'grains'] = __grains__
__opts__['grains'] = __grains__
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
__context__['fileclient'])
return st_.compile_highstate()
@ -637,16 +793,109 @@ def show_lowstate():
salt '*' state.show_lowstate
'''
__opts__[u'grains'] = __grains__
__opts__['grains'] = __grains__
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
__context__['fileclient'])
return st_.compile_low_chunks()
def show_sls(mods, saltenv=u'base', test=None, **kwargs):
def sls_id(id_, mods, test=None, queue=False, **kwargs):
'''
Call a single ID from the named module(s) and handle all requisites
The state ID comes *before* the module ID(s) on the command line.
id
ID to call
mods
Comma-delimited list of modules to search for given id and its requisites
.. versionadded:: 2017.7.3
saltenv : base
Specify a salt fileserver environment to be used when applying states
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
CLI Example:
.. code-block:: bash
salt '*' state.sls_id my_state my_module
salt '*' state.sls_id my_state my_module,a_common_module
'''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
orig_test = __opts__.get('test', None)
opts = _get_opts(**kwargs)
opts['test'] = _get_test_value(test, **kwargs)
# Since this is running a specific ID within a specific SLS file, fall back
# to the 'base' saltenv if none is configured and none was passed.
if opts['environment'] is None:
opts['environment'] = 'base'
try:
st_ = salt.state.HighState(opts,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts,
initial_pillar=_get_initial_pillar(opts))
if not _check_pillar(kwargs, st_.opts['pillar']):
__context__['retcode'] = 5
err = ['Pillar failed to render with the following messages:']
err += __pillar__['_errors']
return err
if isinstance(mods, six.string_types):
split_mods = mods.split(',')
st_.push_active()
try:
high_, errors = st_.render_highstate({opts['environment']: split_mods})
finally:
st_.pop_active()
errors += st_.state.verify_high(high_)
# Apply requisites to high data
high_, req_in_errors = st_.state.requisite_in(high_)
if req_in_errors:
# This if statement should not be necessary if there were no errors,
# but it is required to get the unit tests to pass.
errors.extend(req_in_errors)
if errors:
__context__['retcode'] = 1
return errors
chunks = st_.state.compile_high_data(high_)
ret = {}
for chunk in chunks:
if chunk.get('__id__', '') == id_:
ret.update(st_.state.call_chunk(chunk, {}, chunks))
_set_retcode(ret, highstate=highstate)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
__opts__['test'] = orig_test
if not ret:
raise SaltInvocationError(
'No matches for ID \'{0}\' found in SLS \'{1}\' within saltenv '
'\'{2}\''.format(id_, mods, opts['environment'])
)
return ret
def show_sls(mods, saltenv='base', test=None, **kwargs):
'''
Display the state data from a specific sls or list of sls files on the
master
@ -657,20 +906,20 @@ def show_sls(mods, saltenv=u'base', test=None, **kwargs):
salt '*' state.show_sls core,edit.vim dev
'''
__pillar__.update(kwargs.get(u'pillar', {}))
__opts__[u'grains'] = __grains__
__pillar__.update(kwargs.get('pillar', {}))
__opts__['grains'] = __grains__
opts = copy.copy(__opts__)
if salt.utils.args.test_mode(test=test, **kwargs):
opts[u'test'] = True
opts['test'] = True
else:
opts[u'test'] = __opts__.get(u'test', None)
opts['test'] = __opts__.get('test', None)
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
__context__['fileclient'])
if isinstance(mods, six.string_types):
mods = mods.split(u',')
mods = mods.split(',')
high_data, errors = st_.render_highstate({saltenv: mods})
high_data, ext_errors = st_.state.reconcile_extend(high_data)
errors += ext_errors
@ -686,7 +935,7 @@ def show_sls(mods, saltenv=u'base', test=None, **kwargs):
return high_data
def show_low_sls(mods, saltenv=u'base', test=None, **kwargs):
def show_low_sls(mods, saltenv='base', test=None, **kwargs):
'''
Display the low state data from a specific sls or list of sls files on the
master.
@ -699,21 +948,21 @@ def show_low_sls(mods, saltenv=u'base', test=None, **kwargs):
salt '*' state.show_sls core,edit.vim dev
'''
__pillar__.update(kwargs.get(u'pillar', {}))
__opts__[u'grains'] = __grains__
__pillar__.update(kwargs.get('pillar', {}))
__opts__['grains'] = __grains__
opts = copy.copy(__opts__)
if salt.utils.args.test_mode(test=test, **kwargs):
opts[u'test'] = True
opts['test'] = True
else:
opts[u'test'] = __opts__.get(u'test', None)
opts['test'] = __opts__.get('test', None)
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
__context__['fileclient'])
if isinstance(mods, six.string_types):
mods = mods.split(u',')
mods = mods.split(',')
high_data, errors = st_.render_highstate({saltenv: mods})
high_data, ext_errors = st_.state.reconcile_extend(high_data)
errors += ext_errors
@ -740,12 +989,12 @@ def show_top():
salt '*' state.show_top
'''
__opts__[u'grains'] = __grains__
__opts__['grains'] = __grains__
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__[u'fileclient'])
__context__['fileclient'])
top_data = st_.get_top()
errors = []
errors += st_.verify_tops(top_data)
@ -775,30 +1024,30 @@ def single(fun, name, test=None, **kwargs):
'''
st_kwargs = __salt__.kwargs
__opts__[u'grains'] = __grains__
__opts__['grains'] = __grains__
# state.fun -> [state, fun]
comps = fun.split(u'.')
comps = fun.split('.')
if len(comps) < 2:
__context__[u'retcode'] = 1
return u'Invalid function passed'
__context__['retcode'] = 1
return 'Invalid function passed'
# Create the low chunk, using kwargs as a base
kwargs.update({u'state': comps[0],
u'fun': comps[1],
u'__id__': name,
u'name': name})
kwargs.update({'state': comps[0],
'fun': comps[1],
'__id__': name,
'name': name})
opts = copy.deepcopy(__opts__)
# Set test mode
if salt.utils.args.test_mode(test=test, **kwargs):
opts[u'test'] = True
opts['test'] = True
else:
opts[u'test'] = __opts__.get(u'test', None)
opts['test'] = __opts__.get('test', None)
# Get the override pillar data
__pillar__.update(kwargs.get(u'pillar', {}))
__pillar__.update(kwargs.get('pillar', {}))
# Create the State environment
st_ = salt.client.ssh.state.SSHState(__opts__, __pillar__)
@ -806,7 +1055,7 @@ def single(fun, name, test=None, **kwargs):
# Verify the low chunk
err = st_.verify_data(kwargs)
if err:
__context__[u'retcode'] = 1
__context__['retcode'] = 1
return err
# Must be a list of low-chunks
@ -817,46 +1066,46 @@ def single(fun, name, test=None, **kwargs):
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get(u'extra_filerefs', u''),
__opts__.get(u'extra_filerefs', u'')
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
)
)
roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat'))
roster_grains = roster.opts[u'grains']
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__opts__,
__context__[u'fileclient'],
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs[u'id_'],
st_kwargs['id_'],
roster_grains)
# Create a hash so we can verify the tar on the target system
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__[u'hash_type'])
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__['hash_type'])
# We use state.pkg to execute the "state package"
cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__[u'thin_dir'],
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__['thin_dir'],
test,
trans_tar_sum,
__opts__[u'hash_type'])
__opts__['hash_type'])
# Create a salt-ssh Single object to actually do the ssh work
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__[u'fileclient'],
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
# Copy the tar down
single.shell.send(
trans_tar,
u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir']))
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
# Run the state.pkg command on the target
stdout, stderr, _ = single.cmd_block()
@ -871,8 +1120,8 @@ def single(fun, name, test=None, **kwargs):
try:
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
except Exception as e:
log.error(u"JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(six.text_type(e))
# If for some reason the json load fails, return the stdout
return stdout

View File

@ -36,6 +36,7 @@ import salt.utils.crypt
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.files
import salt.utils.verify
import salt.syspaths
from salt.template import compile_template
@ -185,6 +186,10 @@ class CloudClient(object):
else:
self.opts = salt.config.cloud_config(path)
# Check the cache-dir exists. If not, create it.
v_dirs = [self.opts['cachedir']]
salt.utils.verify.verify_env(v_dirs, salt.utils.get_user())
if pillars:
for name, provider in six.iteritems(pillars.pop('providers', {})):
driver = provider['driver']
@ -666,7 +671,7 @@ class Cloud(object):
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if 'selected_query_option' not in opts and '{0}.list_nodes_min'.format(driver) in self.clouds:
if opts.get('selected_query_option') is None and '{0}.list_nodes_min'.format(driver) in self.clouds:
this_query = 'list_nodes_min'
fun = '{0}.{1}'.format(driver, this_query)

View File

@ -824,7 +824,7 @@ def query(params=None):
content = request.text
result = json.loads(content, object_hook=salt.utils.data.decode_dict)
result = json.loads(content, object_hook=salt.utils.data.encode_dict)
if 'Code' in result:
raise SaltCloudSystemExit(
pprint.pformat(result.get('Message', {}))

View File

@ -49,6 +49,7 @@ Example Provider Configuration
# Import python libs
from __future__ import absolute_import
import os
import sys
import re
import pprint
import logging
@ -58,6 +59,7 @@ from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
# pylint: disable=import-error
LIBCLOUD_IMPORT_ERROR = None
try:
import libcloud
from libcloud.compute.types import Provider
@ -78,6 +80,7 @@ try:
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem')
HAS_LIBCLOUD = True
except ImportError:
LIBCLOUD_IMPORT_ERROR = sys.exc_info()
HAS_LIBCLOUD = False
# pylint: enable=import-error
@ -155,6 +158,9 @@ def get_dependencies():
'''
Warn if dependencies aren't met.
'''
if LIBCLOUD_IMPORT_ERROR:
log.error("Failure when importing LibCloud: ", exc_info=LIBCLOUD_IMPORT_ERROR)
log.error("Note: The libcloud dependency is called 'apache-libcloud' on PyPi/pip.")
return config.check_driver_dependencies(
__virtualname__,
{'libcloud': HAS_LIBCLOUD}

View File

@ -821,7 +821,7 @@ def avail_images(call=None):
.. code-block:: yaml
image_url: images.joyent.com/image
image_url: images.joyent.com/images
'''
if call == 'action':
raise SaltCloudSystemExit(

File diff suppressed because it is too large Load Diff

View File

@ -188,7 +188,7 @@ def query(params=None):
log.debug(request.url)
content = request.text
result = json.loads(content, object_hook=salt.utils.data.decode_dict)
result = json.loads(content, object_hook=salt.utils.data.encode_dict)
# print('response:')
# pprint.pprint(result)

View File

@ -48,6 +48,10 @@ log = logging.getLogger(__name__)
# The name salt will identify the lib by
__virtualname__ = 'virtualbox'
#if no clone mode is specified in the virtualbox profile
#then default to 0 which was the old default value
DEFAULT_CLONE_MODE = 0
def __virtual__():
'''
@ -85,6 +89,30 @@ def get_configured_provider():
return configured
def map_clonemode(vm_info):
"""
Convert the virtualbox config file values for clone_mode into the integers the API requires
"""
mode_map = {
'state': 0,
'child': 1,
'all': 2
}
if not vm_info:
return DEFAULT_CLONE_MODE
if 'clonemode' not in vm_info:
return DEFAULT_CLONE_MODE
if vm_info['clonemode'] in mode_map:
return mode_map[vm_info['clonemode']]
else:
raise SaltCloudSystemExit(
"Illegal clonemode for virtualbox profile. Legal values are: {}".format(','.join(mode_map.keys()))
)
def create(vm_info):
"""
Creates a virtual machine from the given VM information.
@ -102,6 +130,7 @@ def create(vm_info):
profile: <dict>
driver: <provider>:<profile>
clonefrom: <vm_name>
clonemode: <mode> (default: state, choices: state, child, all)
}
@type vm_info dict
@return dict of resulting vm. !!!Passwords can and should be included!!!
@ -133,6 +162,9 @@ def create(vm_info):
key_filename = config.get_cloud_config_value(
'private_key', vm_info, __opts__, search_global=False, default=None
)
clone_mode = map_clonemode(vm_info)
wait_for_pattern = vm_info['waitforpattern'] if 'waitforpattern' in vm_info.keys() else None
interface_index = vm_info['interfaceindex'] if 'interfaceindex' in vm_info.keys() else 0
log.debug("Going to fire event: starting create")
__utils__['cloud.fire_event'](
@ -147,7 +179,8 @@ def create(vm_info):
# to create the virtual machine.
request_kwargs = {
'name': vm_info['name'],
'clone_from': vm_info['clonefrom']
'clone_from': vm_info['clonefrom'],
'clone_mode': clone_mode
}
__utils__['cloud.fire_event'](
@ -163,17 +196,17 @@ def create(vm_info):
# Booting and deploying if needed
if power:
vb_start_vm(vm_name, timeout=boot_timeout)
ips = vb_wait_for_network_address(wait_for_ip_timeout, machine_name=vm_name)
ips = vb_wait_for_network_address(wait_for_ip_timeout, machine_name=vm_name, wait_for_pattern=wait_for_pattern)
if len(ips):
ip = ips[0]
ip = ips[interface_index]
log.info("[ {0} ] IPv4 is: {1}".format(vm_name, ip))
# ssh or smb using ip and install salt only if deploy is True
if deploy:
vm_info['key_filename'] = key_filename
vm_info['ssh_host'] = ip
res = __utils__['cloud.bootstrap'](vm_info)
res = __utils__['cloud.bootstrap'](vm_info, __opts__)
vm_result.update(res)
__utils__['cloud.fire_event'](

View File

@ -292,12 +292,36 @@ def _add_new_hard_disk_helper(disk_label, size_gb, unit_number, controller_key=1
disk_spec.device.backing.diskMode = 'persistent'
if datastore:
ds_ref = salt.utils.vmware.get_datastore_ref(_get_si(), datastore)
if not ds_ref:
raise SaltCloudSystemExit('Requested {0} disk in datastore {1}, but no such datastore found.'.format(disk_label, datastore))
datastore_path = '[' + str(ds_ref.name) + '] ' + vm_name
datastore_ref = salt.utils.vmware.get_mor_using_container_view(_get_si(), vim.Datastore, datastore)
if not datastore_ref:
# check if it is a datastore cluster instead
datastore_cluster_ref = salt.utils.vmware.get_mor_using_container_view(_get_si(), vim.StoragePod, datastore)
if not datastore_cluster_ref:
# datastore/datastore cluster specified does not exist
raise SaltCloudSystemExit("Specified datastore/datastore cluster ({0}) for disk ({1}) does not exist".format(datastore, disk_label))
# datastore cluster has been specified
# find datastore with most free space available
#
# TODO: Get DRS Recommendations instead of finding datastore with most free space
datastore_list = salt.utils.vmware.get_datastores(_get_si(), datastore_cluster_ref, get_all_datastores=True)
datastore_free_space = 0
for ds_ref in datastore_list:
log.trace("Found datastore ({0}) with free space ({1}) in datastore cluster ({2})".format(ds_ref.name, ds_ref.summary.freeSpace, datastore))
if ds_ref.summary.accessible and ds_ref.summary.freeSpace > datastore_free_space:
datastore_free_space = ds_ref.summary.freeSpace
datastore_ref = ds_ref
if not datastore_ref:
# datastore cluster specified does not have any accessible datastores
raise SaltCloudSystemExit("Specified datastore cluster ({0}) for disk ({1}) does not have any accessible datastores available".format(datastore, disk_label))
datastore_path = '[' + str(datastore_ref.name) + '] ' + vm_name
disk_spec.device.backing.fileName = datastore_path + '/' + disk_label + '.vmdk'
disk_spec.device.backing.datastore = ds_ref
disk_spec.device.backing.datastore = datastore_ref
log.trace("Using datastore ({0}) for disk ({1}), vm_name ({2})".format(datastore_ref.name, disk_label, vm_name))
disk_spec.device.controllerKey = controller_key
disk_spec.device.unitNumber = unit_number

View File

@ -1156,6 +1156,30 @@ VALID_OPTS = {
# Used by salt.modules.dockermod.compare_container_networks to specify which keys are compared
'docker.compare_container_networks': dict,
# SSDP discovery publisher description.
# Contains publisher configuration and minion mapping.
# Setting it to False disables discovery
'discovery': (dict, bool),
# SSDP discovery mapping
# Defines arbitrary data for description and grouping minions across various types of masters,
# especially when masters are not related to each other.
'mapping': dict,
# SSDP discovery mapping matcher policy
# Values: "any" where at least one key/value pair should be found or
# "all", where every key/value should be identical
'match': str,
# Port definition.
'port': int,
# SSDP discovery attempts to send query to the Universe
'attempts': int,
# SSDP discovery pause between the attempts
'pause': int,
}
# default configurations
@ -1440,6 +1464,13 @@ DEFAULT_MINION_OPTS = {
'automatic': ['IPAddress', 'Gateway',
'GlobalIPv6Address', 'IPv6Gateway'],
},
'discovery': {
'attempts': 3,
'pause': 5,
'port': 4520,
'match': 'any',
'mapping': {},
},
}
DEFAULT_MASTER_OPTS = {
@ -1758,6 +1789,10 @@ DEFAULT_MASTER_OPTS = {
'salt_cp_chunk_size': 98304,
'require_minion_sign_messages': False,
'drop_messages_signature_fail': False,
'discovery': {
'port': 4520,
'mapping': {},
},
}
@ -2444,7 +2479,7 @@ def syndic_config(master_config_path,
# Prepend root_dir to other paths
prepend_root_dirs = [
'pki_dir', 'key_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules',
'autosign_file', 'autoreject_file', 'token_dir'
'autosign_file', 'autoreject_file', 'token_dir', 'autosign_grains_dir'
]
for config_key in ('log_file', 'key_logfile', 'syndic_log_file'):
# If this is not a URI and instead a local path
@ -3265,7 +3300,7 @@ def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):
return value
def is_provider_configured(opts, provider, required_keys=()):
def is_provider_configured(opts, provider, required_keys=(), log_message=True):
'''
Check and return the first matching and fully configured cloud provider
configuration.
@ -3278,13 +3313,14 @@ def is_provider_configured(opts, provider, required_keys=()):
return False
for key in required_keys:
if opts['providers'][alias][driver].get(key, None) is None:
# There's at least one require configuration key which is not
# set.
log.warning(
"The required '{0}' configuration setting is missing "
"from the '{1}' driver, which is configured under the "
"'{2}' alias.".format(key, provider, alias)
)
if log_message is True:
# There's at least one require configuration key which is not
# set.
log.warning(
"The required '{0}' configuration setting is missing "
"from the '{1}' driver, which is configured under the "
"'{2}' alias.".format(key, provider, alias)
)
return False
# If we reached this far, there's a properly configured provider.
# Return it!
@ -3300,15 +3336,16 @@ def is_provider_configured(opts, provider, required_keys=()):
skip_provider = False
for key in required_keys:
if provider_details.get(key, None) is None:
# This provider does not include all necessary keys,
# continue to next one.
log.warning(
"The required '{0}' configuration setting is "
"missing from the '{1}' driver, which is configured "
"under the '{2}' alias.".format(
key, provider, alias
if log_message is True:
# This provider does not include all necessary keys,
# continue to next one.
log.warning(
"The required '{0}' configuration setting is "
"missing from the '{1}' driver, which is configured "
"under the '{2}' alias.".format(
key, provider, alias
)
)
)
skip_provider = True
break
@ -3622,23 +3659,23 @@ def apply_minion_config(overrides=None,
if overrides:
opts.update(overrides)
if u'environment' in opts:
if u'saltenv' in opts:
if 'environment' in opts:
if 'saltenv' in opts:
log.warning(
u'The \'saltenv\' and \'environment\' minion config options '
u'cannot both be used. Ignoring \'environment\' in favor of '
u'\'saltenv\'.',
'The \'saltenv\' and \'environment\' minion config options '
'cannot both be used. Ignoring \'environment\' in favor of '
'\'saltenv\'.',
)
# Set environment to saltenv in case someone's custom module is
# refrencing __opts__['environment']
opts[u'environment'] = opts[u'saltenv']
opts['environment'] = opts['saltenv']
else:
log.warning(
u'The \'environment\' minion config option has been renamed '
u'to \'saltenv\'. Using %s as the \'saltenv\' config value.',
opts[u'environment']
'The \'environment\' minion config option has been renamed '
'to \'saltenv\'. Using %s as the \'saltenv\' config value.',
opts['environment']
)
opts[u'saltenv'] = opts[u'environment']
opts['saltenv'] = opts['environment']
opts['__cli'] = os.path.basename(sys.argv[0])
@ -3792,23 +3829,23 @@ def apply_master_config(overrides=None, defaults=None):
if overrides:
opts.update(overrides)
if u'environment' in opts:
if u'saltenv' in opts:
if 'environment' in opts:
if 'saltenv' in opts:
log.warning(
u'The \'saltenv\' and \'environment\' master config options '
u'cannot both be used. Ignoring \'environment\' in favor of '
u'\'saltenv\'.',
'The \'saltenv\' and \'environment\' master config options '
'cannot both be used. Ignoring \'environment\' in favor of '
'\'saltenv\'.',
)
# Set environment to saltenv in case someone's custom runner is
# refrencing __opts__['environment']
opts[u'environment'] = opts[u'saltenv']
opts['environment'] = opts['saltenv']
else:
log.warning(
u'The \'environment\' master config option has been renamed '
u'to \'saltenv\'. Using %s as the \'saltenv\' config value.',
opts[u'environment']
'The \'environment\' master config option has been renamed '
'to \'saltenv\'. Using %s as the \'saltenv\' config value.',
opts['environment']
)
opts[u'saltenv'] = opts[u'environment']
opts['saltenv'] = opts['environment']
if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')
@ -3852,7 +3889,7 @@ def apply_master_config(overrides=None, defaults=None):
prepend_root_dirs = [
'pki_dir', 'key_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules',
'autosign_file', 'autoreject_file', 'token_dir', 'syndic_dir',
'sqlite_queue_dir'
'sqlite_queue_dir', 'autosign_grains_dir'
]
# These can be set to syslog, so, not actual paths on the system

File diff suppressed because it is too large Load Diff

View File

@ -62,7 +62,7 @@ def jobber_check(self):
rms.append(jid)
data = self.shells.value[jid]
stdout, stderr = data['proc'].communicate()
ret = json.loads(salt.utils.stringutils.to_str(stdout), object_hook=salt.utils.data.decode_dict)['local']
ret = json.loads(salt.utils.stringutils.to_str(stdout), object_hook=salt.utils.data.encode_dict)['local']
route = {'src': (self.stack.value.local.name, 'manor', 'jid_ret'),
'dst': (data['msg']['route']['src'][0], None, 'remote_cmd')}
ret['cmd'] = '_return'

View File

@ -38,6 +38,7 @@ import salt.utils.minions
import salt.utils.gzip_util
import salt.utils.jid
import salt.utils.minions
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.user
@ -144,7 +145,7 @@ def clean_pub_auth(opts):
if not os.path.exists(auth_cache):
return
else:
for (dirpath, dirnames, filenames) in os.walk(auth_cache):
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(auth_cache):
for auth_file in filenames:
auth_file_path = os.path.join(dirpath, auth_file)
if not os.path.isfile(auth_file_path):
@ -334,7 +335,7 @@ class AutoKey(object):
expire_minutes = self.opts.get('autosign_timeout', 120)
if expire_minutes > 0:
min_time = time.time() - (60 * int(expire_minutes))
for root, dirs, filenames in os.walk(autosign_dir):
for root, dirs, filenames in salt.utils.path.os_walk(autosign_dir):
for f in filenames:
stub_file = os.path.join(autosign_dir, f)
mtime = os.path.getmtime(stub_file)
@ -348,6 +349,33 @@ class AutoKey(object):
os.remove(stub_file)
return True
def check_autosign_grains(self, autosign_grains):
'''
Check for matching grains in the autosign_grains_dir.
'''
if not autosign_grains or u'autosign_grains_dir' not in self.opts:
return False
autosign_grains_dir = self.opts[u'autosign_grains_dir']
for root, dirs, filenames in os.walk(autosign_grains_dir):
for grain in filenames:
if grain in autosign_grains:
grain_file = os.path.join(autosign_grains_dir, grain)
if not self.check_permissions(grain_file):
message = 'Wrong permissions for {0}, ignoring content'
log.warning(message.format(grain_file))
continue
with salt.utils.files.fopen(grain_file, u'r') as f:
for line in f:
line = line.strip()
if line.startswith(u'#'):
continue
if autosign_grains[grain] == line:
return True
return False
def check_autoreject(self, keyid):
'''
Checks if the specified keyid should automatically be rejected.
@ -357,7 +385,7 @@ class AutoKey(object):
self.opts.get('autoreject_file', None)
)
def check_autosign(self, keyid):
def check_autosign(self, keyid, autosign_grains=None):
'''
Checks if the specified keyid should automatically be signed.
'''
@ -367,6 +395,8 @@ class AutoKey(object):
return True
if self.check_autosign_dir(keyid):
return True
if self.check_autosign_grains(autosign_grains):
return True
return False

View File

@ -2,7 +2,7 @@
'''
This module is a central location for all salt exceptions
'''
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
# Import python libs
import copy
@ -31,16 +31,34 @@ def get_error_message(error):
'''
Get human readable message from Python Exception
'''
return error.args[0] if error.args else u''
return error.args[0] if error.args else ''
class SaltException(Exception):
'''
Base exception class; all Salt-specific exceptions should subclass this
'''
def __init__(self, message=u''):
super(SaltException, self).__init__(message)
self.strerror = message
def __init__(self, message=''):
# Avoid circular import
import salt.utils.stringutils
if six.PY3 or isinstance(message, unicode): # pylint: disable=incompatible-py3-code
super(SaltException, self).__init__(
salt.utils.stringutils.to_str(message)
)
self.message = self.strerror = message
elif isinstance(message, str):
super(SaltException, self).__init__(message)
self.message = self.strerror = \
salt.utils.stringutils.to_unicode(message)
else:
# Some non-string input was passed. Run the parent dunder init with
# a str version, and convert the passed value to unicode for the
# message/strerror attributes.
super(SaltException, self).__init__(str(message)) # future lint: blacklisted-function
self.message = self.strerror = unicode(message) # pylint: disable=incompatible-py3-code
def __unicode__(self):
return self.strerror
def pack(self):
'''
@ -48,8 +66,7 @@ class SaltException(Exception):
transport via msgpack
'''
if six.PY3:
# The message should be a str type, not a unicode
return {u'message': str(self), u'args': self.args}
return {'message': six.text_type(self), 'args': self.args}
return dict(message=self.__unicode__(), args=self.args)
@ -100,16 +117,18 @@ class CommandExecutionError(SaltException):
Used when a module runs a command which returns an error and wants
to show the user the output gracefully instead of dying
'''
def __init__(self, message=u'', info=None):
self.error = exc_str_prefix = message
def __init__(self, message='', info=None):
# Avoid circular import
import salt.utils.stringutils
self.error = exc_str_prefix = salt.utils.stringutils.to_unicode(message)
self.info = info
if self.info:
if exc_str_prefix:
if exc_str_prefix[-1] not in u'.?!':
exc_str_prefix += u'.'
exc_str_prefix += u' '
if exc_str_prefix[-1] not in '.?!':
exc_str_prefix += '.'
exc_str_prefix += ' '
exc_str_prefix += u'Additional info follows:\n\n'
exc_str_prefix += 'Additional info follows:\n\n'
# NOTE: exc_str will be passed to the parent class' constructor and
# become self.strerror.
exc_str = exc_str_prefix + _nested_output(self.info)
@ -120,7 +139,7 @@ class CommandExecutionError(SaltException):
# this information would be redundant).
if isinstance(self.info, dict):
info_without_changes = copy.deepcopy(self.info)
info_without_changes.pop(u'changes', None)
info_without_changes.pop('changes', None)
if info_without_changes:
self.strerror_without_changes = \
exc_str_prefix + _nested_output(info_without_changes)
@ -134,6 +153,9 @@ class CommandExecutionError(SaltException):
else:
self.strerror_without_changes = exc_str = self.error
# We call the parent __init__ last instead of first because we need the
# logic above to derive the message string to use for the exception
# message.
super(CommandExecutionError, self).__init__(exc_str)
@ -165,13 +187,13 @@ class FileLockError(SaltException):
'''
Used when an error occurs obtaining a file lock
'''
def __init__(self, msg, time_start=None, *args, **kwargs):
super(FileLockError, self).__init__(msg, *args, **kwargs)
def __init__(self, message, time_start=None, *args, **kwargs):
super(FileLockError, self).__init__(message, *args, **kwargs)
if time_start is None:
log.warning(
u'time_start should be provided when raising a FileLockError. '
u'Defaulting to current time as a fallback, but this may '
u'result in an inaccurate timeout.'
'time_start should be provided when raising a FileLockError. '
'Defaulting to current time as a fallback, but this may '
'result in an inaccurate timeout.'
)
self.time_start = time.time()
else:
@ -188,10 +210,9 @@ class GitLockError(SaltException):
this exception class can be caught in a try/except without being caught as
an OSError.
'''
def __init__(self, errno, strerror, *args, **kwargs):
super(GitLockError, self).__init__(strerror, *args, **kwargs)
def __init__(self, errno, message, *args, **kwargs):
super(GitLockError, self).__init__(message, *args, **kwargs)
self.errno = errno
self.strerror = strerror
class GitRemoteError(SaltException):
@ -224,28 +245,29 @@ class SaltRenderError(SaltException):
def __init__(self,
message,
line_num=None,
buf=u'',
marker=u' <======================',
buf='',
marker=' <======================',
trace=None):
# Avoid circular import
import salt.utils.stringutils
self.error = message
exc_str = copy.deepcopy(message)
exc_str = salt.utils.stringutils.to_unicode(message)
self.line_num = line_num
self.buffer = buf
self.context = u''
self.context = ''
if trace:
exc_str += u'\n{0}\n'.format(trace)
exc_str += '\n{0}\n'.format(trace)
if self.line_num and self.buffer:
# Avoid circular import
import salt.utils.stringutils
import salt.utils.templates
self.context = salt.utils.templates.get_context(
self.buffer,
self.line_num,
marker=marker
)
exc_str += '; line {0}\n\n{1}'.format( # future lint: disable=non-unicode-string
exc_str += '; line {0}\n\n{1}'.format(
self.line_num,
salt.utils.stringutils.to_str(self.context),
salt.utils.stringutils.to_unicode(self.context),
)
super(SaltRenderError, self).__init__(exc_str)
@ -256,8 +278,8 @@ class SaltClientTimeout(SaltException):
Takes the ``jid`` as a parameter
'''
def __init__(self, msg, jid=None, *args, **kwargs):
super(SaltClientTimeout, self).__init__(msg, *args, **kwargs)
def __init__(self, message, jid=None, *args, **kwargs):
super(SaltClientTimeout, self).__init__(message, *args, **kwargs)
self.jid = jid

View File

@ -10,6 +10,7 @@ import socket
import ctypes
import os
import ipaddress
import salt.ext.six as six
class sockaddr(ctypes.Structure):
@ -36,7 +37,7 @@ def inet_pton(address_family, ip_string):
# This will catch IP Addresses such as 10.1.2
if address_family == socket.AF_INET:
try:
ipaddress.ip_address(ip_string.decode())
ipaddress.ip_address(six.u(ip_string))
except ValueError:
raise socket.error('illegal IP address string passed to inet_pton')
return socket.inet_aton(ip_string)

File diff suppressed because it is too large Load Diff

View File

@ -17,6 +17,7 @@ import time
import salt.loader
import salt.utils.files
import salt.utils.locales
import salt.utils.path
import salt.utils.url
import salt.utils.versions
from salt.utils.args import get_function_argspec as _argspec
@ -182,7 +183,7 @@ def generate_mtime_map(opts, path_map):
file_map = {}
for saltenv, path_list in six.iteritems(path_map):
for path in path_list:
for directory, dirnames, filenames in os.walk(path):
for directory, dirnames, filenames in salt.utils.path.os_walk(path):
# Don't walk any directories that match file_ignore_regex or glob
dirnames[:] = [d for d in dirnames if not is_file_ignored(opts, d)]
for item in filenames:
@ -225,7 +226,7 @@ def reap_fileserver_cache_dir(cache_base, find_func):
'''
for saltenv in os.listdir(cache_base):
env_base = os.path.join(cache_base, saltenv)
for root, dirs, files in os.walk(env_base):
for root, dirs, files in salt.utils.path.os_walk(env_base):
# if we have an empty directory, lets cleanup
# This will only remove the directory on the second time
# "_reap_cache" is called (which is intentional)

View File

@ -207,7 +207,7 @@ def update():
# Walk the cache directory searching for deletions
blob_names = [blob.name for blob in blob_list]
blob_set = set(blob_names)
for root, dirs, files in os.walk(path):
for root, dirs, files in salt.utils.path.os_walk(path):
for f in files:
fname = os.path.join(root, f)
relpath = os.path.relpath(fname, path)

View File

@ -34,6 +34,7 @@ import salt.fileserver
import salt.utils.files
import salt.utils.gzip_util
import salt.utils.hashutils
import salt.utils.path
import salt.utils.stringutils
import salt.utils.url
import salt.utils.versions
@ -275,7 +276,7 @@ def file_list(load):
continue
walk_dir = os.path.join(minion_files_dir, prefix)
# Do not follow links for security reasons
for root, _, files in os.walk(walk_dir, followlinks=False):
for root, _, files in salt.utils.path.os_walk(walk_dir, followlinks=False):
for fname in files:
# Ignore links for security reasons
if os.path.islink(os.path.join(root, fname)):
@ -354,7 +355,7 @@ def dir_list(load):
continue
walk_dir = os.path.join(minion_files_dir, prefix)
# Do not follow links for security reasons
for root, _, _ in os.walk(walk_dir, followlinks=False):
for root, _, _ in salt.utils.path.os_walk(walk_dir, followlinks=False):
relpath = os.path.relpath(root, minion_files_dir)
# Ensure that the current directory and directories outside of
# the minion dir do not end up in return list

View File

@ -15,7 +15,7 @@ be in the :conf_master:`fileserver_backend` list to enable this backend.
Fileserver environments are defined using the :conf_master:`file_roots`
configuration option.
'''
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
# Import python libs
import os
@ -30,6 +30,7 @@ import salt.utils.gzip_util
import salt.utils.hashutils
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.versions
from salt.ext import six
@ -229,7 +230,7 @@ def file_hash(load, fnd):
cache_path = os.path.join(__opts__['cachedir'],
'roots/hash',
load['saltenv'],
u'{0}.hash.{1}'.format(fnd['rel'],
'{0}.hash.{1}'.format(fnd['rel'],
__opts__['hash_type']))
# if we have a cache, serve that if the mtime hasn't changed
if os.path.exists(cache_path):
@ -385,7 +386,7 @@ def _file_lists(load, form):
ret['links'][rel_path] = link_dest
for path in __opts__['file_roots'][load['saltenv']]:
for root, dirs, files in os.walk(
for root, dirs, files in salt.utils.path.os_walk(
path,
followlinks=__opts__['fileserver_followsymlinks']):
_add_to(ret['dirs'], path, root, dirs)

View File

@ -58,6 +58,7 @@ import salt.utils.data
import salt.utils.files
import salt.utils.gzip_util
import salt.utils.hashutils
import salt.utils.path
import salt.utils.stringutils
import salt.utils.url
import salt.utils.versions
@ -754,7 +755,7 @@ def _file_lists(load, form):
# svnfs root (global or per-remote) does not exist in env
continue
for root, dirs, files in os.walk(env_root):
for root, dirs, files in salt.utils.path.os_walk(env_root):
relpath = os.path.relpath(root, env_root)
dir_rel_fn = os.path.join(repo['mountpoint'], relpath)
if relpath != '.':

View File

@ -425,14 +425,14 @@ def _osx_memdata():
sysctl = salt.utils.path.which('sysctl')
if sysctl:
mem = __salt__['cmd.run']('{0} -n hw.memsize'.format(sysctl))
swap_total = __salt__['cmd.run']('{0} -n vm.swapusage').split()[2]
swap_total = __salt__['cmd.run']('{0} -n vm.swapusage'.format(sysctl)).split()[2]
if swap_total.endswith('K'):
_power = 2**10
elif swap_total.endswith('M'):
_power = 2**20
elif swap_total.endswith('G'):
_power = 2**30
swap_total = swap_total[:-1] * _power
swap_total = float(swap_total[:-1]) * _power
grains['mem_total'] = int(mem) // 1024 // 1024
grains['swap_total'] = int(swap_total) // 1024 // 1024

View File

@ -13,6 +13,7 @@ import logging
import salt.utils.files
import salt.utils.platform
__proxyenabled__ = ['*']
log = logging.getLogger(__name__)
@ -39,16 +40,33 @@ def config():
if 'conf_file' not in __opts__:
return {}
if os.path.isdir(__opts__['conf_file']):
gfn = os.path.join(
__opts__['conf_file'],
'grains'
)
if salt.utils.platform.is_proxy():
gfn = os.path.join(
__opts__['conf_file'],
'proxy.d',
__opts__['id'],
'grains'
)
else:
gfn = os.path.join(
__opts__['conf_file'],
'grains'
)
else:
gfn = os.path.join(
os.path.dirname(__opts__['conf_file']),
'grains'
)
if salt.utils.platform.is_proxy():
gfn = os.path.join(
os.path.dirname(__opts__['conf_file']),
'proxy.d',
__opts__['id'],
'grains'
)
else:
gfn = os.path.join(
os.path.dirname(__opts__['conf_file']),
'grains'
)
if os.path.isfile(gfn):
log.debug('Loading static grains from %s', gfn)
with salt.utils.files.fopen(gfn, 'rb') as fp_:
try:
return yaml.safe_load(fp_.read())

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,96 @@
# -*- coding: utf-8 -*-
'''
Manage account locks on AIX systems
.. versionadded:: Oxygen
:depends: none
'''
from __future__ import absolute_import
# Import python librarie
import logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'shadow'
def __virtual__():
'''
Only load if kernel is AIX
'''
if __grains__['kernel'] == 'AIX':
return __virtualname__
return (False, 'The aix_shadow execution module failed to load: '
'only available on AIX systems.')
def login_failures(user):
'''
Query for all accounts which have 3 or more login failures.
CLI Example:
.. code-block:: bash
salt <minion_id> shadow.login_failures ALL
'''
cmd = 'lsuser -a unsuccessful_login_count {0}'.format(user)
cmd += " | grep -E 'unsuccessful_login_count=([3-9]|[0-9][0-9]+)'"
out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=True)
ret = []
lines = out['stdout'].splitlines()
for line in lines:
ret.append(line.split()[0])
return ret
def locked(user):
'''
Query for all accounts which are flagged as locked.
CLI Example:
.. code-block:: bash
salt <minion_id> shadow.locked ALL
'''
cmd = 'lsuser -a account_locked {0}'.format(user)
cmd += ' | grep "account_locked=true"'
out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=True)
ret = []
lines = out['stdout'].splitlines()
for line in lines:
ret.append(line.split()[0])
return ret
def unlock(user):
'''
Unlock user for locked account
CLI Example:
.. code-block:: bash
salt <minion_id> shadow.unlock user
'''
cmd = 'chuser account_locked=false {0} | ' \
'chsec -f /etc/security/lastlog -a "unsuccessful_login_count=0" -s {0}'.format(user)
ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=True)
return ret

View File

@ -797,7 +797,7 @@ def zip_(zip_file, sources, template=None, cwd=None, runas=None):
else:
rel_root = cwd if cwd is not None else '/'
if os.path.isdir(src):
for dir_name, sub_dirs, files in os.walk(src):
for dir_name, sub_dirs, files in salt.utils.path.os_walk(src):
if cwd and dir_name.startswith(cwd):
arc_dir = os.path.relpath(dir_name, cwd)
else:

View File

@ -170,6 +170,9 @@ def atrm(*args):
if not args:
return {'jobs': {'removed': [], 'tag': None}}
# Convert all to strings
args = [str(arg) for arg in args]
if args[0] == 'all':
if len(args) > 1:
opts = list(list(map(str, [j['job'] for j in atq(args[1])['jobs']])))
@ -179,7 +182,7 @@ def atrm(*args):
ret = {'jobs': {'removed': opts, 'tag': None}}
else:
opts = list(list(map(str, [i['job'] for i in atq()['jobs']
if i['job'] in args])))
if str(i['job']) in args])))
ret = {'jobs': {'removed': opts, 'tag': None}}
# Shim to produce output similar to what __virtual__() should do

View File

@ -71,7 +71,7 @@ def uuid(dev=None):
try:
if dev is None:
# take the only directory in /sys/fs/bcache and return it's basename
return list(os.walk('/sys/fs/bcache/'))[0][1][0]
return list(salt.utils.path.os_walk('/sys/fs/bcache/'))[0][1][0]
else:
# basename of the /sys/block/{dev}/bcache/cache symlink target
return os.path.basename(_bcsys(dev, 'cache'))
@ -425,12 +425,12 @@ def status(stats=False, config=False, internals=False, superblock=False, alldevs
:param superblock: include superblock
'''
bdevs = []
for _, links, _ in os.walk('/sys/block/'):
for _, links, _ in salt.utils.path.os_walk('/sys/block/'):
for block in links:
if 'bcache' in block:
continue
for spath, sdirs, _ in os.walk('/sys/block/{0}'.format(block), followlinks=False):
for spath, sdirs, _ in salt.utils.path.os_walk('/sys/block/{0}'.format(block), followlinks=False):
if 'bcache' in sdirs:
bdevs.append(os.path.basename(spath))
statii = {}

View File

@ -398,7 +398,7 @@ def create_hosted_zone(Name, VPCId=None, VPCName=None, VPCRegion=None, CallerRef
if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
log.debug('Throttled by AWS API.')
time.sleep(3)
retries -= 1
tries -= 1
continue
log.error('Failed to create hosted zone {0}: {1}'.format(Name, str(e)))
return []
@ -448,7 +448,7 @@ def update_hosted_zone_comment(Id=None, Name=None, Comment=None, PrivateZone=Non
if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
log.debug('Throttled by AWS API.')
time.sleep(3)
retries -= 1
tries -= 1
continue
log.error('Failed to update comment on hosted zone {0}: {1}'.format(
Name or Id, str(e)))
@ -547,7 +547,7 @@ def associate_vpc_with_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
log.debug('Throttled by AWS API.')
time.sleep(3)
retries -= 1
tries -= 1
continue
log.error('Failed to associate VPC {0} with hosted zone {1}: {2}'.format(
VPCName or VPCId, Name or HostedZoneId, str(e)))
@ -639,7 +639,7 @@ def diassociate_vpc_from_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
log.debug('Throttled by AWS API.')
time.sleep(3)
retries -= 1
tries -= 1
continue
log.error('Failed to associate VPC {0} with hosted zone {1}: {2}'.format(
VPCName or VPCId, Name or HostedZoneId, str(e)))
@ -877,7 +877,7 @@ def change_resource_record_sets(HostedZoneId=None, Name=None,
if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
log.debug('Throttled by AWS API.')
time.sleep(3)
retries -= 1
tries -= 1
continue
log.error('Failed to apply requested changes to the hosted zone {0}: {1}'.format(
Name or HostedZoneId, str(e)))

View File

@ -66,6 +66,7 @@ try:
#pylint: disable=unused-import
import boto
import boto.route53
import boto.route53.healthcheck
from boto.route53.exception import DNSServerError
#pylint: enable=unused-import
# create_zone params were changed in boto 2.35+
@ -340,6 +341,108 @@ def create_zone(zone, private=False, vpc_id=None, vpc_region=None, region=None,
return True
def create_healthcheck(ip_addr=None, fqdn=None, region=None, key=None, keyid=None, profile=None,
port=53, hc_type='TCP', resource_path='', string_match=None, request_interval=30,
failure_threshold=3, retry_on_errors=True, error_retries=5):
'''
Create a Route53 healthcheck
.. versionadded:: Oxygen
ip_addr
IP address to check. ip_addr or fqdn is required.
fqdn
Domain name of the endpoint to check. ip_addr or fqdn is required
port
Port to check
hc_type
Healthcheck type. HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP
resource_path
Path to check
string_match
If hc_type is HTTP_STR_MATCH or HTTPS_STR_MATCH, the string to search for in the
response body from the specified resource
request_interval
The number of seconds between the time that Amazon Route 53 gets a response from
your endpoint and the time that it sends the next health-check request.
failure_threshold
The number of consecutive health checks that an endpoint must pass or fail for
Amazon Route 53 to change the current status of the endpoint from unhealthy to
healthy or vice versa.
region
Region endpoint to connect to
key
AWS key
keyid
AWS keyid
profile
AWS pillar profile
CLI Example::
salt myminion boto_route53.create_healthcheck 192.168.0.1
salt myminion boto_route53.create_healthcheck 192.168.0.1 port=443 hc_type=HTTPS \
resource_path=/ fqdn=blog.saltstack.furniture
'''
if fqdn is None and ip_addr is None:
msg = 'One of the following must be specified: fqdn or ip_addr'
log.error(msg)
return {'error': msg}
hc_ = boto.route53.healthcheck.HealthCheck(ip_addr,
port,
hc_type,
resource_path,
fqdn=fqdn,
string_match=string_match,
request_interval=request_interval,
failure_threshold=failure_threshold)
if region is None:
region = 'universal'
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
while error_retries > 0:
try:
return {'result': conn.create_health_check(hc_)}
except DNSServerError as exc:
log.debug(exc)
if retry_on_errors:
if 'Throttling' == exc.code:
log.debug('Throttled by AWS API.')
elif 'PriorRequestNotComplete' == exc.code:
log.debug('The request was rejected by AWS API.\
Route 53 was still processing a prior request')
time.sleep(3)
error_retries -= 1
continue
return {'error': __utils__['boto.get_error'](exc)}
return False
def delete_zone(zone, region=None, key=None, keyid=None, profile=None):
'''
Delete a Route53 hosted zone.
@ -651,44 +754,71 @@ def delete_record(name, zone, record_type, identifier=None, all_records=False,
raise e
def _wait_for_sync(status, conn, wait_for_sync):
if not wait_for_sync:
def _try_func(conn, func, **args):
tries = 30
while True:
try:
return getattr(conn, func)(**args)
except AttributeError as e:
# Don't include **args in log messages - security concern.
log.error('Function `{0}()` not found for AWS connection object '
'{1}'.format(func, conn))
return None
except DNSServerError as e:
if tries and e.code == 'Throttling':
log.debug('Throttled by AWS API. Will retry in 5 seconds')
time.sleep(5)
tries -= 1
continue
log.error('Failed calling {0}(): {1}'.format(func, str(e)))
return None
def _wait_for_sync(status, conn, wait=True):
### Wait should be a bool or an integer
if wait is True:
wait = 600
if not wait:
return True
retry = 10
i = 0
while i < retry:
log.info('Getting route53 status (attempt {0})'.format(i + 1))
orig_wait = wait
log.info('Waiting up to {0} seconds for Route53 changes to synchronize'.format(orig_wait))
while wait > 0:
change = conn.get_change(status)
log.debug(change.GetChangeResponse.ChangeInfo.Status)
if change.GetChangeResponse.ChangeInfo.Status == 'INSYNC':
current = change.GetChangeResponse.ChangeInfo.Status
if current == 'INSYNC':
return True
i = i + 1
time.sleep(20)
log.error('Timed out waiting for Route53 status update.')
sleep = wait if wait % 60 == wait else 60
log.info('Sleeping {0} seconds waiting for changes to synch (current status {1})'.format(
sleep, current))
time.sleep(sleep)
wait -= sleep
continue
log.error('Route53 changes not synced after {0} seconds.'.format(orig_wait))
return False
def create_hosted_zone(domain_name, caller_ref=None, comment='',
private_zone=False, vpc_id=None, vpc_name=None,
vpc_region=None, region=None, key=None, keyid=None,
def create_hosted_zone(domain_name, caller_ref=None, comment='', private_zone=False, vpc_id=None,
vpc_name=None, vpc_region=None, region=None, key=None, keyid=None,
profile=None):
'''
Create a new Route53 Hosted Zone. Returns a Python data structure with
information about the newly created Hosted Zone.
Create a new Route53 Hosted Zone. Returns a Python data structure with information about the
newly created Hosted Zone.
domain_name
The name of the domain. This should be a fully-specified domain, and
should terminate with a period. This is the name you have registered
with your DNS registrar. It is also the name you will delegate from your
registrar to the Amazon Route 53 delegation servers returned in response
The name of the domain. This must be fully-qualified, terminating with a period. This is
the name you have registered with your domain registrar. It is also the name you will
delegate from your registrar to the Amazon Route 53 delegation servers returned in response
to this request.
caller_ref
A unique string that identifies the request and that allows
create_hosted_zone() calls to be retried without the risk of executing
the operation twice. You want to provide this where possible, since
additional calls while the first is in PENDING status will be accepted
and can lead to multiple copies of the zone being created in Route53.
A unique string that identifies the request and that allows create_hosted_zone() calls to
be retried without the risk of executing the operation twice. It can take several minutes
for the change to replicate globally, and change from PENDING to INSYNC status. Thus it's
best to provide some value for this where possible, since duplicate calls while the first
is in PENDING status will be accepted and can lead to multiple copies of the zone being
created. On the other hand, if a zone is created with a given caller_ref, then deleted,
a second attempt to create a zone with the same caller_ref will fail until that caller_ref
is flushed from the Route53 system, which can take upwards of 24 hours.
comment
Any comments you want to include about the hosted zone.
@ -697,33 +827,30 @@ def create_hosted_zone(domain_name, caller_ref=None, comment='',
Set True if creating a private hosted zone.
vpc_id
When creating a private hosted zone, either the VPC ID or VPC Name to
associate with is required. Exclusive with vpe_name. Ignored if passed
for a non-private zone.
When creating a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with vpe_name. Ignored when creating a non-private zone.
vpc_name
When creating a private hosted zone, either the VPC ID or VPC Name to
associate with is required. Exclusive with vpe_id. Ignored if passed
for a non-private zone.
When creating a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with vpe_id. Ignored when creating a non-private zone.
vpc_region
When creating a private hosted zone, the region of the associated VPC is
required. If not provided, an effort will be made to determine it from
vpc_id or vpc_name, if possible. If this fails, you'll need to provide
an explicit value for this option. Ignored if passed for a non-private
zone.
When creating a private hosted zone, the region of the associated VPC is required. If not
provided, an effort will be made to determine it from vpc_id or vpc_name, where possible.
If this fails, you'll need to provide an explicit value for this option. Ignored when
creating a non-private zone.
region
Region endpoint to connect to
Region endpoint to connect to.
key
AWS key to bind with
AWS key to bind with.
keyid
AWS keyid to bind with
AWS keyid to bind with.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
CLI Example::
@ -776,24 +903,15 @@ def create_hosted_zone(domain_name, caller_ref=None, comment='',
log.info('Options vpc_id, vpc_name, and vpc_region are ignored '
'when creating non-private zones.')
retries = 10
while retries:
try:
# Crazy layers of dereference...
r = conn.create_hosted_zone(**args)
r = r.CreateHostedZoneResponse.__dict__ if hasattr(r,
'CreateHostedZoneResponse') else {}
return r.get('parent', {}).get('CreateHostedZoneResponse')
except DNSServerError as e:
if retries:
if 'Throttling' == e.code:
log.debug('Throttled by AWS API.')
elif 'PriorRequestNotComplete' == e.code:
log.debug('The request was rejected by AWS API.\
Route 53 was still processing a prior request')
time.sleep(3)
retries -= 1
continue
log.error('Failed to create hosted zone {0}: {1}'.format(
domain_name, e.message))
return None
r = _try_func(conn, 'create_hosted_zone', **args)
if r is None:
log.error('Failed to create hosted zone {0}'.format(domain_name))
return None
r = r.get('CreateHostedZoneResponse', {})
# Pop it since it'll be irrelevant by the time we return
status = r.pop('ChangeInfo', {}).get('Id', '').replace('/change/', '')
synced = _wait_for_sync(status, conn, wait=600)
if not synced:
log.error('Hosted zone {0} not synced after 600 seconds.'.format(domain_name))
return None
return r

View File

@ -4,6 +4,9 @@ Cassandra Database Module
.. versionadded:: 2015.5.0
This module works with Cassandra v2 and v3 and hence generates
queries based on the internal schema of said version.
:depends: DataStax Python Driver for Apache Cassandra
https://github.com/datastax/python-driver
pip install cassandra-driver
@ -81,6 +84,7 @@ Cassandra Database Module
from __future__ import absolute_import
import logging
import json
import re
import ssl
# Import Salt Libs
@ -300,7 +304,7 @@ def cql_query(query, contact_points=None, port=None, cql_user=None, cql_pass=Non
.. code-block:: bash
salt '*' cql_query "SELECT * FROM users_by_name WHERE first_name = 'jane'"
salt 'cassandra-server' cassandra_cql.cql_query "SELECT * FROM users_by_name WHERE first_name = 'jane'"
'''
try:
cluster, session = _connect(contact_points=contact_points, port=port, cql_user=cql_user, cql_pass=cql_pass)
@ -314,6 +318,26 @@ def cql_query(query, contact_points=None, port=None, cql_user=None, cql_pass=Non
session.row_factory = dict_factory
ret = []
# Cassandra changed their internal schema from v2 to v3
# If the query contains a dictionary sorted by versions
# Find the query for the current cluster version.
# https://issues.apache.org/jira/browse/CASSANDRA-6717
if isinstance(query, dict):
cluster_version = version(contact_points=contact_points,
port=port,
cql_user=cql_user,
cql_pass=cql_pass)
match = re.match(r'^(\d+)\.(\d+)(?:\.(\d+))?', cluster_version)
major, minor, point = match.groups()
# try to find the specific version in the query dictionary
# then try the major version
# otherwise default to the highest version number
try:
query = query[cluster_version]
except KeyError:
query = query.get(major, max(query))
log.debug('New query is: {0}'.format(query))
try:
results = session.execute(query)
except BaseException as e:
@ -548,8 +572,10 @@ def list_keyspaces(contact_points=None, port=None, cql_user=None, cql_pass=None)
salt 'minion1' cassandra_cql.list_keyspaces contact_points=minion1 port=9000
'''
query = '''select keyspace_name
from system.schema_keyspaces;'''
query = {
'2': 'select keyspace_name from system.schema_keyspaces;',
'3': 'select keyspace_name from system_schema.keyspaces;',
}
ret = {}
@ -594,9 +620,12 @@ def list_column_families(keyspace=None, contact_points=None, port=None, cql_user
'''
where_clause = "where keyspace_name = '{0}'".format(keyspace) if keyspace else ""
query = '''select columnfamily_name
from system.schema_columnfamilies
{0};'''.format(where_clause)
query = {
'2': '''select columnfamily_name from system.schema_columnfamilies
{0};'''.format(where_clause),
'3': '''select column_name from system_schema.columns
{0};'''.format(where_clause),
}
ret = {}
@ -634,14 +663,13 @@ def keyspace_exists(keyspace, contact_points=None, port=None, cql_user=None, cql
.. code-block:: bash
salt 'minion1' cassandra_cql.keyspace_exists keyspace=system
salt 'minion1' cassandra_cql.list_keyspaces keyspace=system contact_points=minion1
'''
# Only project the keyspace_name to make the query efficien.
# Like an echo
query = '''select keyspace_name
from system.schema_keyspaces
where keyspace_name = '{0}';'''.format(keyspace)
query = {
'2': '''select keyspace_name from system.schema_keyspaces
where keyspace_name = '{0}';'''.format(keyspace),
'3': '''select keyspace_name from system_schema.keyspaces
where keyspace_name = '{0}';'''.format(keyspace),
}
try:
ret = cql_query(query, contact_points, port, cql_user, cql_pass)

Some files were not shown because too many files have changed in this diff Show More