mirror of
https://github.com/valitydev/salt.git
synced 2024-11-06 08:35:21 +00:00
Merge pull request #25902 from basepi/merge-forward-2015.8
[2015.8] Merge forward from 2015.5 to 2015.8
This commit is contained in:
commit
7b50807a12
@ -25,7 +25,7 @@
|
||||
# Minions can connect to multiple masters simultaneously (all masters
|
||||
# are "hot"), or can be configured to failover if a master becomes
|
||||
# unavailable. Multiple hot masters are configured by setting this
|
||||
# value to "standard". Failover masters can be requested by setting
|
||||
# value to "str". Failover masters can be requested by setting
|
||||
# to "failover". MAKE SURE TO SET master_alive_interval if you are
|
||||
# using failover.
|
||||
# master_type: str
|
||||
|
@ -206,7 +206,8 @@ using the Python \fBpprint\fP standard library module.
|
||||
.INDENT 7.0
|
||||
.INDENT 3.5
|
||||
If using \fB\-\-out=json\fP, you will probably want \fB\-\-static\fP as well.
|
||||
Without the static option, you will get a JSON string for each minion.
|
||||
Without the static option, you will get a separate JSON string per minion
|
||||
which makes JSON output invalid as a whole.
|
||||
This is due to using an iterative outputter. So if you want to feed it
|
||||
to a JSON parser, use \fB\-\-static\fP as well.
|
||||
.UNINDENT
|
||||
|
@ -255,7 +255,8 @@ using the Python \fBpprint\fP standard library module.
|
||||
.INDENT 7.0
|
||||
.INDENT 3.5
|
||||
If using \fB\-\-out=json\fP, you will probably want \fB\-\-static\fP as well.
|
||||
Without the static option, you will get a JSON string for each minion.
|
||||
Without the static option, you will get a separate JSON string per minion
|
||||
which makes JSON output invalid as a whole.
|
||||
This is due to using an iterative outputter. So if you want to feed it
|
||||
to a JSON parser, use \fB\-\-static\fP as well.
|
||||
.UNINDENT
|
||||
|
@ -135,7 +135,8 @@ using the Python \fBpprint\fP standard library module.
|
||||
.INDENT 7.0
|
||||
.INDENT 3.5
|
||||
If using \fB\-\-out=json\fP, you will probably want \fB\-\-static\fP as well.
|
||||
Without the static option, you will get a JSON string for each minion.
|
||||
Without the static option, you will get a separate JSON string per minion
|
||||
which makes JSON output invalid as a whole.
|
||||
This is due to using an iterative outputter. So if you want to feed it
|
||||
to a JSON parser, use \fB\-\-static\fP as well.
|
||||
.UNINDENT
|
||||
|
@ -221,7 +221,8 @@ using the Python \fBpprint\fP standard library module.
|
||||
.INDENT 7.0
|
||||
.INDENT 3.5
|
||||
If using \fB\-\-out=json\fP, you will probably want \fB\-\-static\fP as well.
|
||||
Without the static option, you will get a JSON string for each minion.
|
||||
Without the static option, you will get a separate JSON string per minion
|
||||
which makes JSON output invalid as a whole.
|
||||
This is due to using an iterative outputter. So if you want to feed it
|
||||
to a JSON parser, use \fB\-\-static\fP as well.
|
||||
.UNINDENT
|
||||
|
@ -82,8 +82,10 @@ query the minions and check on running jobs. Default: 5
|
||||
.B \-s, \-\-static
|
||||
By default as of version 0.9.8 the salt command returns data to the
|
||||
console as it is received from minions, but previous releases would return
|
||||
data only after all data was received. To only return the data with a hard
|
||||
timeout and after all minions have returned then use the static option.
|
||||
data only after all data was received. Use the static option to only return
|
||||
the data with a hard timeout and after all minions have returned.
|
||||
Without the static option, you will get a separate JSON string per minion
|
||||
which makes JSON output invalid as a whole.
|
||||
.UNINDENT
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
@ -280,7 +282,8 @@ using the Python \fBpprint\fP standard library module.
|
||||
.INDENT 7.0
|
||||
.INDENT 3.5
|
||||
If using \fB\-\-out=json\fP, you will probably want \fB\-\-static\fP as well.
|
||||
Without the static option, you will get a JSON string for each minion.
|
||||
Without the static option, you will get a separate JSON string per minion
|
||||
which makes JSON output invalid as a whole.
|
||||
This is due to using an iterative outputter. So if you want to feed it
|
||||
to a JSON parser, use \fB\-\-static\fP as well.
|
||||
.UNINDENT
|
||||
|
@ -18,7 +18,8 @@ Output Options
|
||||
|
||||
.. note::
|
||||
If using ``--out=json``, you will probably want ``--static`` as well.
|
||||
Without the static option, you will get a JSON string for each minion.
|
||||
Without the static option, you will get a separate JSON string per minion
|
||||
which makes JSON output invalid as a whole.
|
||||
This is due to using an iterative outputter. So if you want to feed it
|
||||
to a JSON parser, use ``--static`` as well.
|
||||
|
||||
|
@ -34,8 +34,10 @@ Options
|
||||
|
||||
By default as of version 0.9.8 the salt command returns data to the
|
||||
console as it is received from minions, but previous releases would return
|
||||
data only after all data was received. To only return the data with a hard
|
||||
timeout and after all minions have returned then use the static option.
|
||||
data only after all data was received. Use the static option to only return
|
||||
the data with a hard timeout and after all minions have returned.
|
||||
Without the static option, you will get a separate JSON string per minion
|
||||
which makes JSON output invalid as a whole.
|
||||
|
||||
.. option:: --async
|
||||
|
||||
|
@ -82,9 +82,9 @@ The option can can also be set to a list of masters, enabling
|
||||
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
Default: ``standard``
|
||||
Default: ``str``
|
||||
|
||||
The type of the :conf_minion:`master` variable. Can be ``standard``, ``failover`` or
|
||||
The type of the :conf_minion:`master` variable. Can be ``str``, ``failover`` or
|
||||
``func``.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
@ -10,24 +10,24 @@ Then, once there's a new release, users complain about functionality which was
|
||||
removed and they where using it, etc. This should, at all costs, be avoided,
|
||||
and, in these cases, *that* specific code should be deprecated.
|
||||
|
||||
Depending on the complexity and usage of a specific piece of code, the
|
||||
deprecation time frame should be properly evaluated. As an example, a
|
||||
deprecation warning which is shown for 2 major releases, for example `0.17.0`
|
||||
and `2014.1.0`, gives users enough time to stop using the deprecated code and
|
||||
adapt to the new one.
|
||||
In order to give users enough time to migrate from the old code behavior to the
|
||||
new behavior, the deprecation time frame should be carefully determined based
|
||||
on the significance and complexity of the changes required by the user.
|
||||
|
||||
For example, if you're deprecating the usage of a keyword argument to a
|
||||
function, that specific keyword argument should remain in place for the full
|
||||
deprecation time frame and if that keyword argument is used, a deprecation
|
||||
warning should be shown to the user.
|
||||
A deprecation warning should be in place for at least two major releases before
|
||||
the deprecated code and its accompanying deprecation warning are removed. More
|
||||
time should be given for more complex changes. For example, if the current
|
||||
release under development is ``Sodium``, the deprecated code and associated
|
||||
warnings should remain in place and warn for at least ``Aluminum``.
|
||||
|
||||
To help in this deprecation task, salt provides :func:`salt.utils.warn_until
|
||||
<salt.utils.warn_until>`. The idea behind this helper function is to show the
|
||||
deprecation warning until salt reaches the provided version. Once that provided
|
||||
version is equaled :func:`salt.utils.warn_until <salt.utils.warn_until>` will
|
||||
raise a :py:exc:`RuntimeError` making salt stop its execution. This stoppage
|
||||
is unpleasant and will remind the developer that the deprecation limit has been
|
||||
reached and that the code can then be safely removed.
|
||||
deprecation warning to the user until salt reaches the provided version. Once
|
||||
that provided version is equaled :func:`salt.utils.warn_until
|
||||
<salt.utils.warn_until>` will raise a :py:exc:`RuntimeError` making salt stop
|
||||
its execution. This stoppage is unpleasant and will remind the developer that
|
||||
the deprecation limit has been reached and that the code can then be safely
|
||||
removed.
|
||||
|
||||
Consider the following example:
|
||||
|
||||
@ -36,14 +36,13 @@ Consider the following example:
|
||||
def some_function(bar=False, foo=None):
|
||||
if foo is not None:
|
||||
salt.utils.warn_until(
|
||||
(0, 18),
|
||||
'Aluminum',
|
||||
'The \'foo\' argument has been deprecated and its '
|
||||
'functionality removed, as such, its usage is no longer '
|
||||
'required.'
|
||||
)
|
||||
|
||||
Consider that the current salt release is ``0.16.0``. Whenever ``foo`` is
|
||||
passed a value different from ``None`` that warning will be shown to the user.
|
||||
This will happen in versions ``0.16.2`` to ``2014.1.0``, after which a
|
||||
:py:exc:`RuntimeError` will be raised making us aware that the deprecated code
|
||||
should now be removed.
|
||||
Development begins on the ``Aluminum`` release when the ``Magnesium`` branch is
|
||||
forked from the develop branch. Once this occurs, all uses of the
|
||||
``warn_until`` function targeting ``Aluminum``, along with the code they are
|
||||
warning about should be removed from the code.
|
||||
|
@ -28,7 +28,7 @@ A tutorial on setting up multimaster with "hot" masters is here:
|
||||
Multimaster with Failover
|
||||
=========================
|
||||
|
||||
Changing the ``master_type`` parameter from ``standard`` to ``failover`` will
|
||||
Changing the ``master_type`` parameter from ``str`` to ``failover`` will
|
||||
cause minions to connect to the first responding master in the list of masters.
|
||||
Every ``master_alive_check`` seconds the minions will check to make sure
|
||||
the current master is still responding. If the master does not respond,
|
||||
|
@ -364,17 +364,17 @@ can be created like this:
|
||||
Master Config In Pillar
|
||||
=======================
|
||||
|
||||
For convenience the data stored in the master configuration file is made
|
||||
For convenience the data stored in the master configuration file can be made
|
||||
available in all minion's pillars. This makes global configuration of services
|
||||
and systems very easy but may not be desired if sensitive data is stored in the
|
||||
master configuration.
|
||||
master configuration. This option is disabled by default.
|
||||
|
||||
To disable the master config from being added to the pillar set ``pillar_opts``
|
||||
to ``False``:
|
||||
To enable the master config from being added to the pillar set ``pillar_opts``
|
||||
to ``True``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
pillar_opts: False
|
||||
pillar_opts: True
|
||||
|
||||
|
||||
Minion Config in Pillar
|
||||
|
@ -16,7 +16,7 @@ The easiest way to install a release candidate of Salt is using
|
||||
.. code-block:: bash
|
||||
|
||||
curl -o install_salt.sh -L https://bootstrap.saltstack.com
|
||||
sudo sh install_salt.sh git v2015.8.0rc1
|
||||
sudo sh install_salt.sh git v2015.8.0rc2
|
||||
|
||||
If you want to also install a master using `Salt Bootstrap`_, use the ``-M``
|
||||
flag:
|
||||
@ -24,7 +24,7 @@ flag:
|
||||
.. code-block:: bash
|
||||
|
||||
curl -o install_salt.sh -L https://bootstrap.saltstack.com
|
||||
sudo sh install_salt.sh -M git v2015.8.0rc1
|
||||
sudo sh install_salt.sh -M git v2015.8.0rc2
|
||||
|
||||
If you want to install only a master and not a minion using `Salt Bootstrap`_,
|
||||
use the ``-M`` and ``-N`` flags:
|
||||
@ -32,7 +32,7 @@ use the ``-M`` and ``-N`` flags:
|
||||
.. code-block:: bash
|
||||
|
||||
curl -o install_salt.sh -L https://bootstrap.saltstack.com
|
||||
sudo sh install_salt.sh -M -N git v2015.8.0rc1
|
||||
sudo sh install_salt.sh -M -N git v2015.8.0rc2
|
||||
|
||||
|
||||
Installation from Source Tarball
|
||||
@ -44,9 +44,9 @@ installation docs <_installation>`. Then install salt using the following:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -O https://pypi.python.org/packages/source/s/salt/salt-2015.8.0rc1.tar.gz
|
||||
tar -xzvf salt-2015.8.0rc1.tar.gz
|
||||
cd salt-2015.8.0rc1
|
||||
curl -O https://pypi.python.org/packages/source/s/salt/salt-2015.8.0rc2.tar.gz
|
||||
tar -xzvf salt-2015.8.0rc2.tar.gz
|
||||
cd salt-2015.8.0rc2
|
||||
sudo python setup.py install
|
||||
|
||||
|
||||
|
@ -1,11 +1,11 @@
|
||||
===================
|
||||
Using salt at scale
|
||||
Using Salt at scale
|
||||
===================
|
||||
|
||||
The focus of this tutorial will be building a Salt infrastructure for handling
|
||||
large numbers of minions. This will include tuning, topology, and best practices.
|
||||
|
||||
For how to install the saltmaster please
|
||||
For how to install the Salt Master please
|
||||
go here: `Installing saltstack <http://docs.saltstack.com/topics/installation/index.html>`_
|
||||
|
||||
.. note::
|
||||
@ -17,12 +17,12 @@ go here: `Installing saltstack <http://docs.saltstack.com/topics/installation/in
|
||||
and 'a few' always means 500.
|
||||
|
||||
For simplicity reasons, this tutorial will default to the standard ports
|
||||
used by salt.
|
||||
used by Salt.
|
||||
|
||||
The Master
|
||||
==========
|
||||
|
||||
The most common problems on the salt-master are:
|
||||
The most common problems on the Salt Master are:
|
||||
|
||||
1. too many minions authing at once
|
||||
2. too many minions re-authing at once
|
||||
@ -31,7 +31,7 @@ The most common problems on the salt-master are:
|
||||
5. too few resources (CPU/HDD)
|
||||
|
||||
The first three are all "thundering herd" problems. To mitigate these issues
|
||||
we must configure the minions to back-off appropriately when the master is
|
||||
we must configure the minions to back-off appropriately when the Master is
|
||||
under heavy load.
|
||||
|
||||
The fourth is caused by masters with little hardware resources in combination
|
||||
@ -40,44 +40,51 @@ with a possible bug in ZeroMQ. At least thats what it looks like till today
|
||||
`Issue 5948 <https://github.com/saltstack/salt/issues/5948>`_,
|
||||
`Mail thread <https://groups.google.com/forum/#!searchin/salt-users/lots$20of$20minions/salt-users/WxothArv2Do/t12MigMQDFAJ>`_)
|
||||
|
||||
To fully understand each problem, it is important to understand, how salt works.
|
||||
To fully understand each problem, it is important to understand, how Salt works.
|
||||
|
||||
Very briefly, the saltmaster offers two services to the minions.
|
||||
Very briefly, the Salt Master offers two services to the minions.
|
||||
|
||||
- a job publisher on port 4505
|
||||
- an open port 4506 to receive the minions returns
|
||||
|
||||
All minions are always connected to the publisher on port 4505 and only connect
|
||||
to the open return port 4506 if necessary. On an idle master, there will only
|
||||
to the open return port 4506 if necessary. On an idle Master, there will only
|
||||
be connections on port 4505.
|
||||
|
||||
Too many minions authing
|
||||
------------------------
|
||||
When the minion service is first started up, it will connect to its master's publisher
|
||||
|
||||
When the Minion service is first started up, it will connect to its Master's publisher
|
||||
on port 4505. If too many minions are started at once, this can cause a "thundering herd".
|
||||
This can be avoided by not starting too many minions at once.
|
||||
|
||||
The connection itself usually isn't the culprit, the more likely cause of master-side
|
||||
issues is the authentication that the minion must do with the master. If the master
|
||||
is too heavily loaded to handle the auth request it will time it out. The minion
|
||||
issues is the authentication that the Minion must do with the Master. If the Master
|
||||
is too heavily loaded to handle the auth request it will time it out. The Minion
|
||||
will then wait `acceptance_wait_time` to retry. If `acceptance_wait_time_max` is
|
||||
set then the minion will increase its wait time by the `acceptance_wait_time` each
|
||||
set then the Minion will increase its wait time by the `acceptance_wait_time` each
|
||||
subsequent retry until reaching `acceptance_wait_time_max`.
|
||||
|
||||
|
||||
Too many minions re-authing
|
||||
---------------------------
|
||||
This is most likely to happen in the testing phase, when all minion keys have
|
||||
already been accepted, the framework is being tested and parameters change
|
||||
frequently in the masters configuration file.
|
||||
|
||||
In a few cases (master restart, remove minion key, etc.) the salt-master generates
|
||||
a new AES-key to encrypt its publications with. The minions aren't notified of
|
||||
this but will realize this on the next pub job they receive. When the minion
|
||||
receives such a job it will then re-auth with the master. Since Salt does minion-side
|
||||
filtering this means that all the minions will re-auth on the next command published
|
||||
on the master-- causing another "thundering herd". This can be avoided by
|
||||
setting the
|
||||
This is most likely to happen in the testing phase of a Salt deployment, when
|
||||
all Minion keys have already been accepted, but the framework is being tested
|
||||
and parameters are frequently changed in the Salt Master's configuration
|
||||
file(s).
|
||||
|
||||
The Salt Master generates a new AES key to encrypt its publications at certain
|
||||
events such as a Master restart or the removal of a Minion key. If you are
|
||||
encountering this problem of too many minions re-authing against the Master,
|
||||
you will need to recalibrate your setup to reduce the rate of events like a
|
||||
Master restart or Minion key removal (``salt-key -d``).
|
||||
|
||||
When the Master generates a new AES key, the minions aren't notified of this
|
||||
but will discover it on the next pub job they receive. When the Minion
|
||||
receives such a job it will then re-auth with the Master. Since Salt does
|
||||
minion-side filtering this means that all the minions will re-auth on the next
|
||||
command published on the master-- causing another "thundering herd". This can
|
||||
be avoided by setting the
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -85,11 +92,11 @@ setting the
|
||||
|
||||
in the minions configuration file to a higher value and stagger the amount
|
||||
of re-auth attempts. Increasing this value will of course increase the time
|
||||
it takes until all minions are reachable via salt commands.
|
||||
|
||||
it takes until all minions are reachable via Salt commands.
|
||||
|
||||
Too many minions re-connecting
|
||||
------------------------------
|
||||
|
||||
By default the zmq socket will re-connect every 100ms which for some larger
|
||||
installations may be too quick. This will control how quickly the TCP session is
|
||||
re-established, but has no bearing on the auth load.
|
||||
@ -111,15 +118,15 @@ the sample configuration file (default values)
|
||||
To tune this values to an existing environment, a few decision have to be made.
|
||||
|
||||
|
||||
1. How long can one wait, before the minions should be online and reachable via salt?
|
||||
1. How long can one wait, before the minions should be online and reachable via Salt?
|
||||
|
||||
2. How many reconnects can the master handle without a syn flood?
|
||||
2. How many reconnects can the Master handle without a syn flood?
|
||||
|
||||
These questions can not be answered generally. Their answers depend on the
|
||||
hardware and the administrators requirements.
|
||||
|
||||
Here is an example scenario with the goal, to have all minions reconnect
|
||||
within a 60 second time-frame on a salt-master service restart.
|
||||
within a 60 second time-frame on a Salt Master service restart.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -127,7 +134,7 @@ within a 60 second time-frame on a salt-master service restart.
|
||||
recon_max: 59000
|
||||
recon_randomize: True
|
||||
|
||||
Each minion will have a randomized reconnect value between 'recon_default'
|
||||
Each Minion will have a randomized reconnect value between 'recon_default'
|
||||
and 'recon_default + recon_max', which in this example means between 1000ms
|
||||
and 60000ms (or between 1 and 60 seconds). The generated random-value will
|
||||
be doubled after each attempt to reconnect (ZeroMQ default behavior).
|
||||
@ -157,7 +164,6 @@ round about 16 connection attempts a second. These values should be altered to
|
||||
values that match your environment. Keep in mind though, that it may grow over
|
||||
time and that more minions might raise the problem again.
|
||||
|
||||
|
||||
Too many minions returning at once
|
||||
----------------------------------
|
||||
|
||||
@ -168,11 +174,11 @@ once with
|
||||
|
||||
$ salt * test.ping
|
||||
|
||||
it may cause thousands of minions trying to return their data to the salt-master
|
||||
open port 4506. Also causing a flood of syn-flood if the master can't handle that many
|
||||
it may cause thousands of minions trying to return their data to the Salt Master
|
||||
open port 4506. Also causing a flood of syn-flood if the Master can't handle that many
|
||||
returns at once.
|
||||
|
||||
This can be easily avoided with salts batch mode:
|
||||
This can be easily avoided with Salt's batch mode:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@ -181,19 +187,18 @@ This can be easily avoided with salts batch mode:
|
||||
This will only address 50 minions at once while looping through all addressed
|
||||
minions.
|
||||
|
||||
|
||||
Too few resources
|
||||
=================
|
||||
|
||||
The masters resources always have to match the environment. There is no way
|
||||
to give good advise without knowing the environment the master is supposed to
|
||||
to give good advise without knowing the environment the Master is supposed to
|
||||
run in. But here are some general tuning tips for different situations:
|
||||
|
||||
The master is CPU bound
|
||||
The Master is CPU bound
|
||||
-----------------------
|
||||
|
||||
Salt uses RSA-Key-Pairs on the masters and minions end. Both generate 4096
|
||||
bit key-pairs on first start. While the key-size for the master is currently
|
||||
bit key-pairs on first start. While the key-size for the Master is currently
|
||||
not configurable, the minions keysize can be configured with different
|
||||
key-sizes. For example with a 2048 bit key:
|
||||
|
||||
@ -206,13 +211,13 @@ masters end should not be neglected. See here for reference:
|
||||
`Pull Request 9235 <https://github.com/saltstack/salt/pull/9235>`_ how much
|
||||
influence the key-size can have.
|
||||
|
||||
Downsizing the salt-masters key is not that important, because the minions
|
||||
do not encrypt as many messages as the master does.
|
||||
Downsizing the Salt Master's key is not that important, because the minions
|
||||
do not encrypt as many messages as the Master does.
|
||||
|
||||
The master is disk IO bound
|
||||
The Master is disk IO bound
|
||||
---------------------------
|
||||
|
||||
By default, the master saves every minion's return for every job in its
|
||||
By default, the Master saves every Minion's return for every job in its
|
||||
job-cache. The cache can then be used later, to lookup results for previous
|
||||
jobs. The default directory for this is:
|
||||
|
||||
@ -222,7 +227,7 @@ jobs. The default directory for this is:
|
||||
|
||||
and then in the ``/proc`` directory.
|
||||
|
||||
Each job return for every minion is saved in a single file. Over time this
|
||||
Each job return for every Minion is saved in a single file. Over time this
|
||||
directory can grow quite large, depending on the number of published jobs. The
|
||||
amount of files and directories will scale with the number of jobs published and
|
||||
the retention time defined by
|
||||
@ -245,6 +250,6 @@ If no job history is needed, the job cache can be disabled:
|
||||
If the job cache is necessary there are (currently) 2 options:
|
||||
|
||||
- ext_job_cache: this will have the minions store their return data directly
|
||||
into a returner (not sent through the master)
|
||||
- master_job_cache (New in `2014.7.0`): this will make the master store the job
|
||||
data using a returner (instead of the local job cache on disk).
|
||||
into a returner (not sent through the Master)
|
||||
- master_job_cache (New in `2014.7.0`): this will make the Master store the job
|
||||
data using a returner (instead of the local job cache on disk).
|
||||
|
@ -18,13 +18,9 @@ hash -r
|
||||
|
||||
rm -rf /var/db/pkgin/
|
||||
pkgin -y up
|
||||
pkgin -y in build-essential salt swig py27-pip unzip py27-mysqldb libsodium mysql-client
|
||||
pkgin -y in build-essential salt swig py27-pip unzip py27-mysqldb libsodium mysql-client patchelf
|
||||
pkgin -y rm salt py27-zmq
|
||||
|
||||
cd /opt/local/bin
|
||||
curl -kO 'https://us-east.manta.joyent.com/nahamu/public/smartos/bins/patchelf'
|
||||
chmod +x patchelf
|
||||
|
||||
pip install --egg esky bbfreeze
|
||||
|
||||
cd $HERE
|
||||
|
@ -172,7 +172,10 @@ class SaltCMD(parsers.SaltCMDOptionParser):
|
||||
ret = {}
|
||||
for progress in cmd_func(**kwargs):
|
||||
out = 'progress'
|
||||
self._progress_ret(progress, out)
|
||||
try:
|
||||
self._progress_ret(progress, out)
|
||||
except salt.exceptions.LoaderError as exc:
|
||||
raise salt.exceptions.SaltSystemExit(exc)
|
||||
if 'return_count' not in progress:
|
||||
ret.update(progress)
|
||||
self._progress_end(out)
|
||||
@ -274,7 +277,11 @@ class SaltCMD(parsers.SaltCMDOptionParser):
|
||||
import salt.output
|
||||
# Get the progress bar
|
||||
if not hasattr(self, 'progress_bar'):
|
||||
self.progress_bar = salt.output.get_progress(self.config, out, progress)
|
||||
try:
|
||||
self.progress_bar = salt.output.get_progress(self.config, out, progress)
|
||||
except Exception as exc:
|
||||
raise salt.exceptions.LoaderError('\nWARNING: Install the `progressbar` python package. '
|
||||
'Requested job was still run but output cannot be displayed.\n')
|
||||
salt.output.update_progress(self.config, progress, self.progress_bar, out)
|
||||
|
||||
def _output_ret(self, ret, out):
|
||||
|
@ -653,6 +653,7 @@ class Single(object):
|
||||
'root_dir': os.path.join(self.thin_dir, 'running_data'),
|
||||
'id': self.id,
|
||||
'sock_dir': '/',
|
||||
'log_file': 'salt-call.log'
|
||||
})
|
||||
self.minion_config = yaml.dump(self.minion_opts)
|
||||
self.target = kwargs
|
||||
@ -1075,6 +1076,11 @@ ARGS = {9}\n'''.format(self.minion_config,
|
||||
'checksum mismatched',
|
||||
'The salt thin transfer was corrupted'
|
||||
),
|
||||
(
|
||||
(salt.defaults.exitcodes.EX_SCP_NOT_FOUND,),
|
||||
'scp not found',
|
||||
'No scp binary. openssh-clients package required'
|
||||
),
|
||||
(
|
||||
(salt.defaults.exitcodes.EX_CANTCREAT,),
|
||||
'salt path .* exists but is not a directory',
|
||||
|
@ -16,6 +16,7 @@ import shutil
|
||||
import sys
|
||||
import os
|
||||
import stat
|
||||
import subprocess
|
||||
|
||||
THIN_ARCHIVE = 'salt-thin.tgz'
|
||||
EXT_ARCHIVE = 'salt-ext_mods.tgz'
|
||||
@ -24,6 +25,7 @@ EXT_ARCHIVE = 'salt-ext_mods.tgz'
|
||||
EX_THIN_DEPLOY = 11
|
||||
EX_THIN_CHECKSUM = 12
|
||||
EX_MOD_DEPLOY = 13
|
||||
EX_SCP_NOT_FOUND = 14
|
||||
|
||||
|
||||
class OBJ(object):
|
||||
@ -132,6 +134,10 @@ def main(argv): # pylint: disable=W0613
|
||||
unpack_thin(thin_path)
|
||||
# Salt thin now is available to use
|
||||
else:
|
||||
scpstat = subprocess.Popen(['/bin/bash', '-c', 'command -v scp']).wait()
|
||||
if not scpstat == 0:
|
||||
sys.exit(EX_SCP_NOT_FOUND)
|
||||
|
||||
if not os.path.exists(OPTIONS.saltdir):
|
||||
need_deployment()
|
||||
|
||||
@ -206,14 +212,12 @@ def main(argv): # pylint: disable=W0613
|
||||
sys.stderr.write(OPTIONS.delimiter + '\n')
|
||||
sys.stderr.flush()
|
||||
if OPTIONS.tty:
|
||||
import subprocess
|
||||
stdout, _ = subprocess.Popen(salt_argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
|
||||
sys.stdout.write(stdout)
|
||||
sys.stdout.flush()
|
||||
if OPTIONS.wipe:
|
||||
shutil.rmtree(OPTIONS.saltdir)
|
||||
elif OPTIONS.wipe:
|
||||
import subprocess
|
||||
subprocess.call(salt_argv)
|
||||
shutil.rmtree(OPTIONS.saltdir)
|
||||
else:
|
||||
|
@ -1658,11 +1658,18 @@ class Map(Cloud):
|
||||
for alias, drivers in six.iteritems(query_map):
|
||||
for driver, vms in six.iteritems(drivers):
|
||||
for vm_name, vm_details in six.iteritems(vms):
|
||||
if (vm_details != 'Absent') and \
|
||||
(
|
||||
vm_details['state'].lower() in
|
||||
matching_states[action]
|
||||
):
|
||||
# Only certain actions are support in to use in this case. Those actions are the
|
||||
# "Global" salt-cloud actions defined in the "matching_states" dictionary above.
|
||||
# If a more specific action is passed in, we shouldn't stack-trace - exit gracefully.
|
||||
try:
|
||||
state_action = matching_states[action]
|
||||
except KeyError:
|
||||
log.error(
|
||||
'The use of \'{0}\' as an action is not supported in this context. '
|
||||
'Only \'start\', \'stop\', and \'reboot\' are supported options.'.format(action)
|
||||
)
|
||||
raise SaltCloudException()
|
||||
if (vm_details != 'Absent') and (vm_details['state'].lower() in state_action):
|
||||
vm_names.append(vm_name)
|
||||
return vm_names
|
||||
|
||||
|
@ -196,7 +196,11 @@ class SaltCloud(parsers.SaltCloudParser):
|
||||
self.config.get('map', None)):
|
||||
if self.config.get('map', None):
|
||||
log.info('Applying map from {0!r}.'.format(self.config['map']))
|
||||
names = mapper.get_vmnames_by_action(self.options.action)
|
||||
try:
|
||||
names = mapper.get_vmnames_by_action(self.options.action)
|
||||
except SaltCloudException as exc:
|
||||
msg = 'There was an error actioning virtual machines.'
|
||||
self.handle_exception(msg, exc)
|
||||
else:
|
||||
names = self.config.get('names', None)
|
||||
|
||||
|
@ -19,6 +19,7 @@ Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
|
||||
password: mypassword
|
||||
url: hypervisor.domain.tld
|
||||
driver: proxmox
|
||||
verify_ssl: True
|
||||
|
||||
:maintainer: Frank Klaassen <frank@cloudright.nl>
|
||||
:maturity: new
|
||||
@ -90,13 +91,14 @@ def get_configured_provider():
|
||||
url = None
|
||||
ticket = None
|
||||
csrf = None
|
||||
verify_ssl = None
|
||||
|
||||
|
||||
def _authenticate():
|
||||
'''
|
||||
Retrieve CSRF and API tickets for the Proxmox API
|
||||
'''
|
||||
global url, ticket, csrf
|
||||
global url, ticket, csrf, verify_ssl
|
||||
url = config.get_cloud_config_value(
|
||||
'url', get_configured_provider(), __opts__, search_global=False
|
||||
)
|
||||
@ -106,12 +108,17 @@ def _authenticate():
|
||||
passwd = config.get_cloud_config_value(
|
||||
'password', get_configured_provider(), __opts__, search_global=False
|
||||
)
|
||||
verify_ssl = config.get_cloud_config_value(
|
||||
'verify_ssl', get_configured_provider(), __opts__, search_global=False
|
||||
)
|
||||
if verify_ssl is None:
|
||||
verify_ssl = True
|
||||
|
||||
connect_data = {'username': username, 'password': passwd}
|
||||
full_url = 'https://{0}:8006/api2/json/access/ticket'.format(url)
|
||||
|
||||
returned_data = requests.post(
|
||||
full_url, verify=True, data=connect_data).json()
|
||||
full_url, verify=verify_ssl, data=connect_data).json()
|
||||
|
||||
ticket = {'PVEAuthCookie': returned_data['data']['ticket']}
|
||||
csrf = str(returned_data['data']['CSRFPreventionToken'])
|
||||
@ -135,24 +142,24 @@ def query(conn_type, option, post_data=None):
|
||||
|
||||
if conn_type == 'post':
|
||||
httpheaders['CSRFPreventionToken'] = csrf
|
||||
response = requests.post(full_url, verify=True,
|
||||
response = requests.post(full_url, verify=verify_ssl,
|
||||
data=post_data,
|
||||
cookies=ticket,
|
||||
headers=httpheaders)
|
||||
elif conn_type == 'put':
|
||||
httpheaders['CSRFPreventionToken'] = csrf
|
||||
response = requests.put(full_url, verify=True,
|
||||
response = requests.put(full_url, verify=verify_ssl,
|
||||
data=post_data,
|
||||
cookies=ticket,
|
||||
headers=httpheaders)
|
||||
elif conn_type == 'delete':
|
||||
httpheaders['CSRFPreventionToken'] = csrf
|
||||
response = requests.delete(full_url, verify=True,
|
||||
response = requests.delete(full_url, verify=verify_ssl,
|
||||
data=post_data,
|
||||
cookies=ticket,
|
||||
headers=httpheaders)
|
||||
elif conn_type == 'get':
|
||||
response = requests.get(full_url, verify=True,
|
||||
response = requests.get(full_url, verify=verify_ssl,
|
||||
cookies=ticket)
|
||||
|
||||
response.raise_for_status()
|
||||
|
@ -843,22 +843,24 @@ def _format_instance_info_select(vm, selection):
|
||||
vm_select_info['id'] = vm["name"]
|
||||
|
||||
if 'image' in selection:
|
||||
vm_select_info['image'] = "{0} (Detected)".format(vm["config.guestFullName"])
|
||||
vm_select_info['image'] = "{0} (Detected)".format(vm["config.guestFullName"]) if "config.guestFullName" in vm else "N/A"
|
||||
|
||||
if 'size' in selection:
|
||||
vm_select_info['size'] = u"cpu: {0}\nram: {1}MB".format(vm["config.hardware.numCPU"], vm["config.hardware.memoryMB"])
|
||||
cpu = vm["config.hardware.numCPU"] if "config.hardware.numCPU" in vm else "N/A"
|
||||
ram = "{0} MB".format(vm["config.hardware.memoryMB"]) if "config.hardware.memoryMB" in vm else "N/A"
|
||||
vm_select_info['size'] = u"cpu: {0}\nram: {1}".format(cpu, ram)
|
||||
|
||||
if 'state' in selection:
|
||||
vm_select_info['state'] = str(vm["summary.runtime.powerState"])
|
||||
vm_select_info['state'] = str(vm["summary.runtime.powerState"]) if "summary.runtime.powerState" in vm else "N/A"
|
||||
|
||||
if 'guest_id' in selection:
|
||||
vm_select_info['guest_id'] = vm["config.guestId"]
|
||||
vm_select_info['guest_id'] = vm["config.guestId"] if "config.guestId" in vm else "N/A"
|
||||
|
||||
if 'hostname' in selection:
|
||||
vm_select_info['hostname'] = vm["object"].guest.hostName
|
||||
|
||||
if 'path' in selection:
|
||||
vm_select_info['path'] = vm["config.files.vmPathName"]
|
||||
vm_select_info['path'] = vm["config.files.vmPathName"] if "config.files.vmPathName" in vm else "N/A"
|
||||
|
||||
if 'tools_status' in selection:
|
||||
vm_select_info['tools_status'] = str(vm["guest.toolsStatus"]) if "guest.toolsStatus" in vm else "N/A"
|
||||
@ -868,14 +870,15 @@ def _format_instance_info_select(vm, selection):
|
||||
ip_addresses = []
|
||||
mac_addresses = []
|
||||
|
||||
for net in vm["guest.net"]:
|
||||
network_full_info[net.network] = {
|
||||
'connected': net.connected,
|
||||
'ip_addresses': net.ipAddress,
|
||||
'mac_address': net.macAddress
|
||||
}
|
||||
ip_addresses.extend(net.ipAddress)
|
||||
mac_addresses.append(net.macAddress)
|
||||
if "guest.net" in vm:
|
||||
for net in vm["guest.net"]:
|
||||
network_full_info[net.network] = {
|
||||
'connected': net.connected,
|
||||
'ip_addresses': net.ipAddress,
|
||||
'mac_address': net.macAddress
|
||||
}
|
||||
ip_addresses.extend(net.ipAddress)
|
||||
mac_addresses.append(net.macAddress)
|
||||
|
||||
if 'private_ips' in selection:
|
||||
vm_select_info['private_ips'] = ip_addresses
|
||||
@ -888,6 +891,63 @@ def _format_instance_info_select(vm, selection):
|
||||
|
||||
if 'devices' in selection:
|
||||
device_full_info = {}
|
||||
if "config.hardware.device" in vm:
|
||||
for device in vm["config.hardware.device"]:
|
||||
device_full_info[device.deviceInfo.label] = {
|
||||
'key': device.key,
|
||||
'label': device.deviceInfo.label,
|
||||
'summary': device.deviceInfo.summary,
|
||||
'type': type(device).__name__.rsplit(".", 1)[1],
|
||||
'unitNumber': device.unitNumber
|
||||
}
|
||||
|
||||
if hasattr(device.backing, 'network'):
|
||||
device_full_info[device.deviceInfo.label]['addressType'] = device.addressType
|
||||
device_full_info[device.deviceInfo.label]['macAddress'] = device.macAddress
|
||||
|
||||
if hasattr(device, 'busNumber'):
|
||||
device_full_info[device.deviceInfo.label]['busNumber'] = device.busNumber
|
||||
|
||||
if hasattr(device, 'device'):
|
||||
device_full_info[device.deviceInfo.label]['devices'] = device.device
|
||||
|
||||
if hasattr(device, 'videoRamSizeInKB'):
|
||||
device_full_info[device.deviceInfo.label]['videoRamSizeInKB'] = device.videoRamSizeInKB
|
||||
|
||||
if isinstance(device, vim.vm.device.VirtualDisk):
|
||||
device_full_info[device.deviceInfo.label]['capacityInKB'] = device.capacityInKB
|
||||
device_full_info[device.deviceInfo.label]['diskMode'] = device.backing.diskMode
|
||||
device_full_info[device.deviceInfo.label]['fileName'] = device.backing.fileName
|
||||
|
||||
vm_select_info['devices'] = device_full_info
|
||||
|
||||
if 'storage' in selection:
|
||||
storage_full_info = {
|
||||
'committed': int(vm["summary.storage.committed"]) if "summary.storage.committed" in vm else "N/A",
|
||||
'uncommitted': int(vm["summary.storage.uncommitted"]) if "summary.storage.uncommitted" in vm else "N/A",
|
||||
'unshared': int(vm["summary.storage.unshared"]) if "summary.storage.unshared" in vm else "N/A"
|
||||
}
|
||||
vm_select_info['storage'] = storage_full_info
|
||||
|
||||
if 'files' in selection:
|
||||
file_full_info = {}
|
||||
if "layoutEx.file" in file:
|
||||
for file in vm["layoutEx.file"]:
|
||||
file_full_info[file.key] = {
|
||||
'key': file.key,
|
||||
'name': file.name,
|
||||
'size': file.size,
|
||||
'type': file.type
|
||||
}
|
||||
vm_select_info['files'] = file_full_info
|
||||
|
||||
return vm_select_info
|
||||
|
||||
|
||||
def _format_instance_info(vm):
|
||||
device_full_info = {}
|
||||
|
||||
if "config.hardware.device" in vm:
|
||||
for device in vm["config.hardware.device"]:
|
||||
device_full_info[device.deviceInfo.label] = {
|
||||
'key': device.key,
|
||||
@ -915,18 +975,14 @@ def _format_instance_info_select(vm, selection):
|
||||
device_full_info[device.deviceInfo.label]['diskMode'] = device.backing.diskMode
|
||||
device_full_info[device.deviceInfo.label]['fileName'] = device.backing.fileName
|
||||
|
||||
vm_select_info['devices'] = device_full_info
|
||||
storage_full_info = {
|
||||
'committed': int(vm["summary.storage.committed"]) if "summary.storage.committed" in vm else "N/A",
|
||||
'uncommitted': int(vm["summary.storage.uncommitted"]) if "summary.storage.uncommitted" in vm else "N/A",
|
||||
'unshared': int(vm["summary.storage.unshared"]) if "summary.storage.unshared" in vm else "N/A"
|
||||
}
|
||||
|
||||
if 'storage' in selection:
|
||||
storage_full_info = {
|
||||
'committed': vm["summary.storage.committed"],
|
||||
'uncommitted': vm["summary.storage.uncommitted"],
|
||||
'unshared': vm["summary.storage.unshared"]
|
||||
}
|
||||
vm_select_info['storage'] = storage_full_info
|
||||
|
||||
if 'files' in selection:
|
||||
file_full_info = {}
|
||||
file_full_info = {}
|
||||
if "layoutEx.file" in vm:
|
||||
for file in vm["layoutEx.file"]:
|
||||
file_full_info[file.key] = {
|
||||
'key': file.key,
|
||||
@ -934,83 +990,38 @@ def _format_instance_info_select(vm, selection):
|
||||
'size': file.size,
|
||||
'type': file.type
|
||||
}
|
||||
vm_select_info['files'] = file_full_info
|
||||
|
||||
return vm_select_info
|
||||
|
||||
|
||||
def _format_instance_info(vm):
|
||||
device_full_info = {}
|
||||
for device in vm["config.hardware.device"]:
|
||||
device_full_info[device.deviceInfo.label] = {
|
||||
'key': device.key,
|
||||
'label': device.deviceInfo.label,
|
||||
'summary': device.deviceInfo.summary,
|
||||
'type': type(device).__name__.rsplit(".", 1)[1],
|
||||
'unitNumber': device.unitNumber
|
||||
}
|
||||
|
||||
if hasattr(device.backing, 'network'):
|
||||
device_full_info[device.deviceInfo.label]['addressType'] = device.addressType
|
||||
device_full_info[device.deviceInfo.label]['macAddress'] = device.macAddress
|
||||
|
||||
if hasattr(device, 'busNumber'):
|
||||
device_full_info[device.deviceInfo.label]['busNumber'] = device.busNumber
|
||||
|
||||
if hasattr(device, 'device'):
|
||||
device_full_info[device.deviceInfo.label]['devices'] = device.device
|
||||
|
||||
if hasattr(device, 'videoRamSizeInKB'):
|
||||
device_full_info[device.deviceInfo.label]['videoRamSizeInKB'] = device.videoRamSizeInKB
|
||||
|
||||
if isinstance(device, vim.vm.device.VirtualDisk):
|
||||
device_full_info[device.deviceInfo.label]['capacityInKB'] = device.capacityInKB
|
||||
device_full_info[device.deviceInfo.label]['diskMode'] = device.backing.diskMode
|
||||
device_full_info[device.deviceInfo.label]['fileName'] = device.backing.fileName
|
||||
|
||||
storage_full_info = {
|
||||
'committed': int(vm["summary.storage.committed"]),
|
||||
'uncommitted': int(vm["summary.storage.uncommitted"]),
|
||||
'unshared': int(vm["summary.storage.unshared"])
|
||||
}
|
||||
|
||||
file_full_info = {}
|
||||
for file in vm["layoutEx.file"]:
|
||||
file_full_info[file.key] = {
|
||||
'key': file.key,
|
||||
'name': file.name,
|
||||
'size': file.size,
|
||||
'type': file.type
|
||||
}
|
||||
|
||||
network_full_info = {}
|
||||
ip_addresses = []
|
||||
mac_addresses = []
|
||||
for net in vm["guest.net"]:
|
||||
network_full_info[net.network] = {
|
||||
'connected': net.connected,
|
||||
'ip_addresses': net.ipAddress,
|
||||
'mac_address': net.macAddress
|
||||
}
|
||||
ip_addresses.extend(net.ipAddress)
|
||||
mac_addresses.append(net.macAddress)
|
||||
if "guest.net" in vm:
|
||||
for net in vm["guest.net"]:
|
||||
network_full_info[net.network] = {
|
||||
'connected': net.connected,
|
||||
'ip_addresses': net.ipAddress,
|
||||
'mac_address': net.macAddress
|
||||
}
|
||||
ip_addresses.extend(net.ipAddress)
|
||||
mac_addresses.append(net.macAddress)
|
||||
|
||||
cpu = vm["config.hardware.numCPU"] if "config.hardware.numCPU" in vm else "N/A"
|
||||
ram = "{0} MB".format(vm["config.hardware.memoryMB"]) if "config.hardware.memoryMB" in vm else "N/A"
|
||||
vm_full_info = {
|
||||
'id': str(vm['name']),
|
||||
'image': "{0} (Detected)".format(vm["config.guestFullName"]),
|
||||
'size': u"cpu: {0}\nram: {1}MB".format(vm["config.hardware.numCPU"], vm["config.hardware.memoryMB"]),
|
||||
'state': str(vm["summary.runtime.powerState"]),
|
||||
'image': "{0} (Detected)".format(vm["config.guestFullName"]) if "config.guestFullName" in vm else "N/A",
|
||||
'size': u"cpu: {0}\nram: {1}".format(cpu, ram),
|
||||
'state': str(vm["summary.runtime.powerState"]) if "summary.runtime.powerState" in vm else "N/A",
|
||||
'private_ips': ip_addresses,
|
||||
'public_ips': [],
|
||||
'devices': device_full_info,
|
||||
'storage': storage_full_info,
|
||||
'files': file_full_info,
|
||||
'guest_id': str(vm["config.guestId"]),
|
||||
'guest_id': str(vm["config.guestId"]) if "config.guestId" in vm else "N/A",
|
||||
'hostname': str(vm["object"].guest.hostName),
|
||||
'mac_address': mac_addresses,
|
||||
'networks': network_full_info,
|
||||
'path': str(vm["config.files.vmPathName"]),
|
||||
'tools_status': str(vm["guest.toolsStatus"]),
|
||||
'path': str(vm["config.files.vmPathName"]) if "config.files.vmPathName" in vm else "N/A",
|
||||
'tools_status': str(vm["guest.toolsStatus"]) if "guest.toolsStatus" in vm else "N/A"
|
||||
}
|
||||
|
||||
return vm_full_info
|
||||
@ -1407,11 +1418,13 @@ def list_nodes(kwargs=None, call=None):
|
||||
vm_list = _get_mors_with_properties(vim.VirtualMachine, vm_properties)
|
||||
|
||||
for vm in vm_list:
|
||||
cpu = vm["config.hardware.numCPU"] if "config.hardware.numCPU" in vm else "N/A"
|
||||
ram = "{0} MB".format(vm["config.hardware.memoryMB"]) if "config.hardware.memoryMB" in vm else "N/A"
|
||||
vm_info = {
|
||||
'id': vm["name"],
|
||||
'image': "{0} (Detected)".format(vm["config.guestFullName"]),
|
||||
'size': u"cpu: {0}\nram: {1}MB".format(vm["config.hardware.numCPU"], vm["config.hardware.memoryMB"]),
|
||||
'state': str(vm["summary.runtime.powerState"]),
|
||||
'image': "{0} (Detected)".format(vm["config.guestFullName"]) if "config.guestFullName" in vm else "N/A",
|
||||
'size': u"cpu: {0}\nram: {1}".format(cpu, ram),
|
||||
'state': str(vm["summary.runtime.powerState"]) if "summary.runtime.powerState" in vm else "N/A",
|
||||
'private_ips': [vm["guest.ipAddress"]] if "guest.ipAddress" in vm else [],
|
||||
'public_ips': []
|
||||
}
|
||||
|
@ -162,6 +162,8 @@ VALID_OPTS = {
|
||||
# Allows a user to provide an alternate name for top.sls
|
||||
'state_top': str,
|
||||
|
||||
'state_top_saltenv': str,
|
||||
|
||||
# States to run when a minion starts up
|
||||
'startup_states': str,
|
||||
|
||||
@ -738,6 +740,7 @@ DEFAULT_MINION_OPTS = {
|
||||
'pillarenv': None,
|
||||
'extension_modules': '',
|
||||
'state_top': 'top.sls',
|
||||
'state_top_saltenv': None,
|
||||
'startup_states': '',
|
||||
'sls_list': [],
|
||||
'top_file': '',
|
||||
@ -986,6 +989,7 @@ DEFAULT_MASTER_OPTS = {
|
||||
'renderer': 'yaml_jinja',
|
||||
'failhard': False,
|
||||
'state_top': 'top.sls',
|
||||
'state_top_saltenv': None,
|
||||
'master_tops': {},
|
||||
'order_masters': False,
|
||||
'job_cache': True,
|
||||
|
@ -480,6 +480,7 @@ class RemoteFuncs(object):
|
||||
mopts['renderer'] = self.opts['renderer']
|
||||
mopts['failhard'] = self.opts['failhard']
|
||||
mopts['state_top'] = self.opts['state_top']
|
||||
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
|
||||
mopts['nodegroups'] = self.opts['nodegroups']
|
||||
mopts['state_auto_order'] = self.opts['state_auto_order']
|
||||
mopts['state_events'] = self.opts['state_events']
|
||||
|
@ -14,6 +14,7 @@ EX_THIN_PYTHON_INVALID = 10
|
||||
EX_THIN_DEPLOY = 11
|
||||
EX_THIN_CHECKSUM = 12
|
||||
EX_MOD_DEPLOY = 13
|
||||
EX_SCP_NOT_FOUND = 14
|
||||
|
||||
# One of a collection failed
|
||||
EX_AGGREGATE = 20
|
||||
|
@ -563,7 +563,9 @@ class Client(object):
|
||||
service_url=self.opts.get('s3.service_url',
|
||||
None),
|
||||
verify_ssl=self.opts.get('s3.verify_ssl',
|
||||
True))
|
||||
True),
|
||||
location=self.opts.get('s3.location',
|
||||
None))
|
||||
return dest
|
||||
except Exception:
|
||||
raise MinionError('Could not fetch from {0}'.format(url))
|
||||
|
@ -320,8 +320,11 @@ def _get_s3_key():
|
||||
verify_ssl = __opts__['s3.verify_ssl'] \
|
||||
if 's3.verify_ssl' in __opts__ \
|
||||
else None
|
||||
location = __opts__['s3.location'] \
|
||||
if 's3.location' in __opts__ \
|
||||
else None
|
||||
|
||||
return key, keyid, service_url, verify_ssl
|
||||
return key, keyid, service_url, verify_ssl, location
|
||||
|
||||
|
||||
def _init():
|
||||
@ -391,7 +394,7 @@ def _refresh_buckets_cache_file(cache_file):
|
||||
|
||||
log.debug('Refreshing buckets cache file')
|
||||
|
||||
key, keyid, service_url, verify_ssl = _get_s3_key()
|
||||
key, keyid, service_url, verify_ssl, location = _get_s3_key()
|
||||
metadata = {}
|
||||
|
||||
# helper s3 query function
|
||||
@ -402,6 +405,7 @@ def _refresh_buckets_cache_file(cache_file):
|
||||
bucket=bucket,
|
||||
service_url=service_url,
|
||||
verify_ssl=verify_ssl,
|
||||
location=location,
|
||||
return_bin=False)
|
||||
|
||||
if _is_env_per_bucket():
|
||||
@ -582,7 +586,7 @@ def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
|
||||
Checks the local cache for the file, if it's old or missing go grab the
|
||||
file from S3 and update the cache
|
||||
'''
|
||||
key, keyid, service_url, verify_ssl = _get_s3_key()
|
||||
key, keyid, service_url, verify_ssl, location = _get_s3_key()
|
||||
|
||||
# check the local cache...
|
||||
if os.path.isfile(cached_file_path):
|
||||
@ -617,6 +621,7 @@ def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
|
||||
bucket=bucket_name,
|
||||
service_url=service_url,
|
||||
verify_ssl=verify_ssl,
|
||||
location=location,
|
||||
path=_quote(path),
|
||||
local_file=cached_file_path,
|
||||
full_headers=True
|
||||
@ -645,6 +650,7 @@ def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
|
||||
bucket=bucket_name,
|
||||
service_url=service_url,
|
||||
verify_ssl=verify_ssl,
|
||||
location=location,
|
||||
path=_quote(path),
|
||||
local_file=cached_file_path
|
||||
)
|
||||
|
@ -208,14 +208,14 @@ class SaltLoggingClass(six.with_metaclass(LoggingMixInMeta, LOGGING_LOGGER_CLASS
|
||||
LOGGING_TEMP_HANDLER):
|
||||
continue
|
||||
|
||||
if not handler.lock:
|
||||
handler.createLock()
|
||||
handler.acquire()
|
||||
|
||||
formatter = handler.formatter
|
||||
if not formatter:
|
||||
continue
|
||||
|
||||
if not handler.lock:
|
||||
handler.createLock()
|
||||
handler.acquire()
|
||||
|
||||
fmt = formatter._fmt.replace('%', '%%')
|
||||
|
||||
match = MODNAME_PATTERN.search(fmt)
|
||||
|
@ -969,6 +969,7 @@ class AESFuncs(object):
|
||||
mopts['renderer'] = self.opts['renderer']
|
||||
mopts['failhard'] = self.opts['failhard']
|
||||
mopts['state_top'] = self.opts['state_top']
|
||||
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
|
||||
mopts['nodegroups'] = self.opts['nodegroups']
|
||||
mopts['state_auto_order'] = self.opts['state_auto_order']
|
||||
mopts['state_events'] = self.opts['state_events']
|
||||
|
@ -713,7 +713,7 @@ class Minion(MinionBase):
|
||||
(possibly failed) master will then be removed from the list of masters.
|
||||
'''
|
||||
# check if master_type was altered from its default
|
||||
if opts['master_type'] != 'str':
|
||||
if opts['master_type'] != 'str' and opts['__role'] != 'syndic':
|
||||
# check for a valid keyword
|
||||
if opts['master_type'] == 'func':
|
||||
# split module and function and try loading the module
|
||||
|
@ -104,6 +104,7 @@ def execute(context=None, lens=None, commands=()):
|
||||
|
||||
method_map = {
|
||||
'set': 'set',
|
||||
'setm': 'setm',
|
||||
'mv': 'move',
|
||||
'move': 'move',
|
||||
'ins': 'insert',
|
||||
@ -136,6 +137,13 @@ def execute(context=None, lens=None, commands=()):
|
||||
path = os.path.join(context.rstrip('/'), path.lstrip('/'))
|
||||
value = value.strip('"').strip("'")
|
||||
args = {'path': path, 'value': value}
|
||||
elif method == 'setm':
|
||||
base, sub, value = re.findall('([^\'" ]+|"[^"]*"|\'[^\']*\')', arg)
|
||||
base = base.rstrip()
|
||||
if context:
|
||||
base = os.path.join(context.rstrip('/'), base.lstrip('/'))
|
||||
value = value.strip('"').strip("'")
|
||||
args = {'base': base, 'sub': sub, 'value': value}
|
||||
elif method == 'move':
|
||||
path, dst = arg.split(' ', 1)
|
||||
if context:
|
||||
|
@ -35,56 +35,56 @@ def __virtual__():
|
||||
for simulating UAC forces a GUI prompt, and is not compatible with
|
||||
salt-minion running as SYSTEM.
|
||||
'''
|
||||
if __grains__['os_family'] != 'Windows':
|
||||
if not salt.utils.is_windows():
|
||||
return False
|
||||
elif __grains__['osrelease'] in ('XP', '2003Server'):
|
||||
return False
|
||||
return 'chocolatey'
|
||||
|
||||
|
||||
def _clear_context():
|
||||
def _clear_context(context):
|
||||
'''
|
||||
Clear variables stored in __context__. Run this function when a new version
|
||||
of chocolatey is installed.
|
||||
'''
|
||||
for var in (x for x in __context__ if x.startswith('chocolatey.')):
|
||||
__context__.pop(var)
|
||||
context.pop(var)
|
||||
|
||||
|
||||
def _yes():
|
||||
def _yes(context):
|
||||
'''
|
||||
Returns ['--yes'] if on v0.9.9.0 or later, otherwise returns an empty list
|
||||
'''
|
||||
if 'chocolatey._yes' in __context__:
|
||||
return __context__['chocolatey._yes']
|
||||
return context['chocolatey._yes']
|
||||
if _LooseVersion(chocolatey_version()) >= _LooseVersion('0.9.9'):
|
||||
answer = ['--yes']
|
||||
else:
|
||||
answer = []
|
||||
__context__['chocolatey._yes'] = answer
|
||||
context['chocolatey._yes'] = answer
|
||||
return answer
|
||||
|
||||
|
||||
def _find_chocolatey():
|
||||
def _find_chocolatey(context, salt):
|
||||
'''
|
||||
Returns the full path to chocolatey.bat on the host.
|
||||
'''
|
||||
if 'chocolatey._path' in __context__:
|
||||
return __context__['chocolatey._path']
|
||||
if 'chocolatey._path' in context:
|
||||
return context['chocolatey._path']
|
||||
choc_defaults = ['C:\\Chocolatey\\bin\\chocolatey.bat',
|
||||
'C:\\ProgramData\\Chocolatey\\bin\\chocolatey.exe', ]
|
||||
|
||||
choc_path = __salt__['cmd.which']('chocolatey.exe')
|
||||
choc_path = salt['cmd.which']('chocolatey.exe')
|
||||
if not choc_path:
|
||||
for choc_dir in choc_defaults:
|
||||
if __salt__['cmd.has_exec'](choc_dir):
|
||||
if salt['cmd.has_exec'](choc_dir):
|
||||
choc_path = choc_dir
|
||||
if not choc_path:
|
||||
err = ('Chocolatey not installed. Use chocolatey.bootstrap to '
|
||||
'install the Chocolatey package manager.')
|
||||
log.error(err)
|
||||
raise CommandExecutionError(err)
|
||||
__context__['chocolatey._path'] = choc_path
|
||||
context['chocolatey._path'] = choc_path
|
||||
return choc_path
|
||||
|
||||
|
||||
@ -102,7 +102,7 @@ def chocolatey_version():
|
||||
'''
|
||||
if 'chocolatey._version' in __context__:
|
||||
return __context__['chocolatey._version']
|
||||
cmd = [_find_chocolatey(), 'help']
|
||||
cmd = [_find_chocolatey(__context__, __salt__), 'help']
|
||||
out = __salt__['cmd.run'](cmd, python_shell=False)
|
||||
for line in out.splitlines():
|
||||
line = line.lower()
|
||||
@ -144,7 +144,7 @@ def bootstrap(force=False):
|
||||
'''
|
||||
# Check if Chocolatey is already present in the path
|
||||
try:
|
||||
choc_path = _find_chocolatey()
|
||||
choc_path = _find_chocolatey(__context__, __salt__)
|
||||
except CommandExecutionError:
|
||||
choc_path = None
|
||||
if choc_path and not force:
|
||||
@ -249,7 +249,7 @@ def list_(narrow=None,
|
||||
salt '*' chocolatey.list <narrow>
|
||||
salt '*' chocolatey.list <narrow> all_versions=True
|
||||
'''
|
||||
choc_path = _find_chocolatey()
|
||||
choc_path = _find_chocolatey(__context__, __salt__)
|
||||
cmd = [choc_path, 'list']
|
||||
if narrow:
|
||||
cmd.append(narrow)
|
||||
@ -293,7 +293,7 @@ def list_webpi():
|
||||
|
||||
salt '*' chocolatey.list_webpi
|
||||
'''
|
||||
choc_path = _find_chocolatey()
|
||||
choc_path = _find_chocolatey(__context__, __salt__)
|
||||
cmd = [choc_path, 'list', '-Source', 'webpi']
|
||||
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
|
||||
@ -316,7 +316,7 @@ def list_windowsfeatures():
|
||||
|
||||
salt '*' chocolatey.list_windowsfeatures
|
||||
'''
|
||||
choc_path = _find_chocolatey()
|
||||
choc_path = _find_chocolatey(__context__, __salt__)
|
||||
cmd = [choc_path, 'list', '-Source', 'windowsfeatures']
|
||||
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
|
||||
@ -371,7 +371,7 @@ def install(name,
|
||||
salt '*' chocolatey.install <package name> version=<package version>
|
||||
salt '*' chocolatey.install <package name> install_args=<args> override_args=True
|
||||
'''
|
||||
choc_path = _find_chocolatey()
|
||||
choc_path = _find_chocolatey(__context__, __salt__)
|
||||
# chocolatey helpfully only supports a single package argument
|
||||
cmd = [choc_path, 'install', name]
|
||||
if version:
|
||||
@ -386,7 +386,7 @@ def install(name,
|
||||
cmd.extend(['-OverrideArguments'])
|
||||
if force_x86:
|
||||
cmd.extend(['-forcex86'])
|
||||
cmd.extend(_yes())
|
||||
cmd.extend(_yes(__context__))
|
||||
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
|
||||
if result['retcode'] != 0:
|
||||
@ -394,7 +394,7 @@ def install(name,
|
||||
log.error(err)
|
||||
raise CommandExecutionError(err)
|
||||
elif name == 'chocolatey':
|
||||
_clear_context()
|
||||
_clear_context(__context__)
|
||||
|
||||
return result['stdout']
|
||||
|
||||
@ -422,13 +422,13 @@ def install_cygwin(name, install_args=None, override_args=False):
|
||||
salt '*' chocolatey.install_cygwin <package name>
|
||||
salt '*' chocolatey.install_cygwin <package name> install_args=<args> override_args=True
|
||||
'''
|
||||
choc_path = _find_chocolatey()
|
||||
choc_path = _find_chocolatey(__context__, __salt__)
|
||||
cmd = [choc_path, 'cygwin', name]
|
||||
if install_args:
|
||||
cmd.extend(['-InstallArguments', install_args])
|
||||
if override_args:
|
||||
cmd.extend(['-OverrideArguments'])
|
||||
cmd.extend(_yes())
|
||||
cmd.extend(_yes(__context__))
|
||||
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
|
||||
if result['retcode'] != 0:
|
||||
@ -468,7 +468,7 @@ def install_gem(name, version=None, install_args=None, override_args=False):
|
||||
salt '*' chocolatey.install_gem <package name> version=<package version>
|
||||
salt '*' chocolatey.install_gem <package name> install_args=<args> override_args=True
|
||||
'''
|
||||
choc_path = _find_chocolatey()
|
||||
choc_path = _find_chocolatey(__context__, __salt__)
|
||||
cmd = [choc_path, 'gem', name]
|
||||
if version:
|
||||
cmd.extend(['-Version', version])
|
||||
@ -476,7 +476,7 @@ def install_gem(name, version=None, install_args=None, override_args=False):
|
||||
cmd.extend(['-InstallArguments', install_args])
|
||||
if override_args:
|
||||
cmd.extend(['-OverrideArguments'])
|
||||
cmd.extend(_yes())
|
||||
cmd.extend(_yes(__context__))
|
||||
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
|
||||
if result['retcode'] != 0:
|
||||
@ -515,7 +515,7 @@ def install_missing(name, version=None, source=None):
|
||||
salt '*' chocolatey.install_missing <package name>
|
||||
salt '*' chocolatey.install_missing <package name> version=<package version>
|
||||
'''
|
||||
choc_path = _find_chocolatey()
|
||||
choc_path = _find_chocolatey(__context__, __salt__)
|
||||
if _LooseVersion(chocolatey_version()) >= _LooseVersion('0.9.8.24'):
|
||||
log.warning('installmissing is deprecated, using install')
|
||||
return install(name, version=version)
|
||||
@ -527,7 +527,7 @@ def install_missing(name, version=None, source=None):
|
||||
if source:
|
||||
cmd.extend(['-Source', source])
|
||||
# Shouldn't need this as this code should never run on v0.9.9 and newer
|
||||
cmd.extend(_yes())
|
||||
cmd.extend(_yes(__context__))
|
||||
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
|
||||
if result['retcode'] != 0:
|
||||
@ -566,7 +566,7 @@ def install_python(name, version=None, install_args=None, override_args=False):
|
||||
salt '*' chocolatey.install_python <package name> version=<package version>
|
||||
salt '*' chocolatey.install_python <package name> install_args=<args> override_args=True
|
||||
'''
|
||||
choc_path = _find_chocolatey()
|
||||
choc_path = _find_chocolatey(__context__, __salt__)
|
||||
cmd = [choc_path, 'python', name]
|
||||
if version:
|
||||
cmd.extend(['-Version', version])
|
||||
@ -574,7 +574,7 @@ def install_python(name, version=None, install_args=None, override_args=False):
|
||||
cmd.extend(['-InstallArguments', install_args])
|
||||
if override_args:
|
||||
cmd.extend(['-OverrideArguments'])
|
||||
cmd.extend(_yes())
|
||||
cmd.extend(_yes(__context__))
|
||||
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
|
||||
if result['retcode'] != 0:
|
||||
@ -599,9 +599,9 @@ def install_windowsfeatures(name):
|
||||
|
||||
salt '*' chocolatey.install_windowsfeatures <package name>
|
||||
'''
|
||||
choc_path = _find_chocolatey()
|
||||
choc_path = _find_chocolatey(__context__, __salt__)
|
||||
cmd = [choc_path, 'windowsfeatures', name]
|
||||
cmd.extend(_yes())
|
||||
cmd.extend(_yes(__context__))
|
||||
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
|
||||
if result['retcode'] != 0:
|
||||
@ -635,13 +635,13 @@ def install_webpi(name, install_args=None, override_args=False):
|
||||
salt '*' chocolatey.install_webpi <package name>
|
||||
salt '*' chocolatey.install_webpi <package name> install_args=<args> override_args=True
|
||||
'''
|
||||
choc_path = _find_chocolatey()
|
||||
choc_path = _find_chocolatey(__context__, __salt__)
|
||||
cmd = [choc_path, 'webpi', name]
|
||||
if install_args:
|
||||
cmd.extend(['-InstallArguments', install_args])
|
||||
if override_args:
|
||||
cmd.extend(['-OverrideArguments'])
|
||||
cmd.extend(_yes())
|
||||
cmd.extend(_yes(__context__))
|
||||
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
|
||||
if result['retcode'] != 0:
|
||||
@ -680,7 +680,7 @@ def uninstall(name, version=None, uninstall_args=None, override_args=False):
|
||||
salt '*' chocolatey.uninstall <package name> version=<package version>
|
||||
salt '*' chocolatey.uninstall <package name> version=<package version> uninstall_args=<args> override_args=True
|
||||
'''
|
||||
choc_path = _find_chocolatey()
|
||||
choc_path = _find_chocolatey(__context__, __salt__)
|
||||
# chocolatey helpfully only supports a single package argument
|
||||
cmd = [choc_path, 'uninstall', name]
|
||||
if version:
|
||||
@ -689,7 +689,7 @@ def uninstall(name, version=None, uninstall_args=None, override_args=False):
|
||||
cmd.extend(['-UninstallArguments', uninstall_args])
|
||||
if override_args:
|
||||
cmd.extend(['-OverrideArguments'])
|
||||
cmd.extend(_yes())
|
||||
cmd.extend(_yes(__context__))
|
||||
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
|
||||
if result['retcode'] != 0:
|
||||
@ -723,13 +723,13 @@ def update(name, source=None, pre_versions=False):
|
||||
salt "*" chocolatey.update <package name> pre_versions=True
|
||||
'''
|
||||
# chocolatey helpfully only supports a single package argument
|
||||
choc_path = _find_chocolatey()
|
||||
choc_path = _find_chocolatey(__context__, __salt__)
|
||||
cmd = [choc_path, 'update', name]
|
||||
if source:
|
||||
cmd.extend(['-Source', source])
|
||||
if salt.utils.is_true(pre_versions):
|
||||
cmd.append('-PreRelease')
|
||||
cmd.extend(_yes())
|
||||
cmd.extend(_yes(__context__))
|
||||
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
|
||||
if result['retcode'] != 0:
|
||||
@ -766,7 +766,7 @@ def version(name, check_remote=False, source=None, pre_versions=False):
|
||||
salt "*" chocolatey.version <package name>
|
||||
salt "*" chocolatey.version <package name> check_remote=True
|
||||
'''
|
||||
choc_path = _find_chocolatey()
|
||||
choc_path = _find_chocolatey(__context__, __salt__)
|
||||
if not choc_path:
|
||||
err = 'Chocolatey not installed. Use chocolatey.bootstrap to install the Chocolatey package manager.'
|
||||
log.error(err)
|
||||
|
@ -710,10 +710,11 @@ def push(path, keep_symlinks=False, upload_path=None):
|
||||
'tok': auth.gen_token('salt')}
|
||||
channel = salt.transport.Channel.factory(__opts__)
|
||||
with salt.utils.fopen(path, 'rb') as fp_:
|
||||
init_send = False
|
||||
while True:
|
||||
load['loc'] = fp_.tell()
|
||||
load['data'] = fp_.read(__opts__['file_buffer_size'])
|
||||
if not load['data']:
|
||||
if not load['data'] and init_send:
|
||||
return True
|
||||
ret = channel.send(load)
|
||||
if not ret:
|
||||
@ -721,6 +722,7 @@ def push(path, keep_symlinks=False, upload_path=None):
|
||||
'\'file_recv\' set to \'True\' and that the file is not '
|
||||
'larger than the \'file_recv_size_max\' setting on the master.')
|
||||
return ret
|
||||
init_send = True
|
||||
|
||||
|
||||
def push_dir(path, glob=None, upload_path=None):
|
||||
|
@ -1793,7 +1793,7 @@ def get_routes(iface):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' ip.get_interface eth0
|
||||
salt '*' ip.get_routes eth0
|
||||
'''
|
||||
|
||||
filename = os.path.join(_DEB_NETWORK_UP_DIR, 'route-{0}'.format(iface))
|
||||
|
@ -984,12 +984,11 @@ def uncomment(path,
|
||||
|
||||
salt '*' file.uncomment /etc/hosts.deny 'ALL: PARANOID'
|
||||
'''
|
||||
pattern = '^{0}{1}'.format(char, regex.lstrip('^').rstrip('$'))
|
||||
repl = "{0}".format(regex.lstrip('^').rstrip('$'))
|
||||
return replace(path=path,
|
||||
pattern=pattern,
|
||||
repl=repl,
|
||||
backup=backup)
|
||||
return comment_line(path=path,
|
||||
regex=regex,
|
||||
char=char,
|
||||
cmnt=False,
|
||||
backup=backup)
|
||||
|
||||
|
||||
def comment(path,
|
||||
@ -1027,11 +1026,174 @@ def comment(path,
|
||||
|
||||
salt '*' file.comment /etc/modules pcspkr
|
||||
'''
|
||||
repl = "{0}{1}".format(char, regex.lstrip('^').rstrip('$'))
|
||||
return replace(path=path,
|
||||
pattern=regex,
|
||||
repl=repl,
|
||||
backup=backup)
|
||||
return comment_line(path=path,
|
||||
regex=regex,
|
||||
char=char,
|
||||
cmnt=True,
|
||||
backup=backup)
|
||||
|
||||
|
||||
def comment_line(path,
|
||||
regex,
|
||||
char='#',
|
||||
cmnt=True,
|
||||
backup='.bak'):
|
||||
r'''
|
||||
Comment or Uncomment a line in a text file.
|
||||
|
||||
:param path: string
|
||||
The full path to the text file.
|
||||
|
||||
:param regex: string
|
||||
A regex expression that begins with ``^`` that will find the line you wish
|
||||
to comment. Can be as simple as ``^color =``
|
||||
|
||||
:param char: string
|
||||
The character used to comment a line in the type of file you're referencing.
|
||||
Default is ``#``
|
||||
|
||||
:param cmnt: boolean
|
||||
True to comment the line. False to uncomment the line. Default is True.
|
||||
|
||||
:param backup: string
|
||||
The file extension to give the backup file. Default is ``.bak``
|
||||
|
||||
:return: boolean
|
||||
Returns True if successful, False if not
|
||||
|
||||
CLI Example:
|
||||
|
||||
The following example will comment out the ``pcspkr`` line in the
|
||||
``/etc/modules`` file using the default ``#`` character and create a backup
|
||||
file named ``modules.bak``
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' file.comment_line '/etc/modules' '^pcspkr'
|
||||
|
||||
|
||||
CLI Example:
|
||||
|
||||
The following example will uncomment the ``log_level`` setting in ``minion``
|
||||
config file if it is set to either ``warning``, ``info``, or ``debug`` using
|
||||
the ``#`` character and create a backup file named ``minion.bk``
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' file.comment_line 'C:\salt\conf\minion' '^log_level: (warning|info|debug)' '#' False '.bk'
|
||||
'''
|
||||
# Get the regex for comment or uncomment
|
||||
if cmnt:
|
||||
regex = '{0}({1}){2}'.format(
|
||||
'^' if regex.startswith('^') else '',
|
||||
regex.lstrip('^').rstrip('$'),
|
||||
'$' if regex.endswith('$') else '')
|
||||
else:
|
||||
regex = '^{0}({1}){2}'.format(
|
||||
char,
|
||||
regex.lstrip('^').rstrip('$'),
|
||||
'$' if regex.endswith('$') else '')
|
||||
|
||||
# Load the real path to the file
|
||||
path = os.path.realpath(os.path.expanduser(path))
|
||||
|
||||
# Make sure the file exists
|
||||
if not os.path.exists(path):
|
||||
raise SaltInvocationError('File not found: {0}'.format(path))
|
||||
|
||||
# Make sure it is a text file
|
||||
if not salt.utils.istextfile(path):
|
||||
raise SaltInvocationError(
|
||||
'Cannot perform string replacements on a binary file: {0}'.format(path))
|
||||
|
||||
# First check the whole file, determine whether to make the replacement
|
||||
# Searching first avoids modifying the time stamp if there are no changes
|
||||
found = False
|
||||
# Dictionaries for comparing changes
|
||||
orig_file = []
|
||||
new_file = []
|
||||
# Buffer size for fopen
|
||||
bufsize = os.path.getsize(path)
|
||||
try:
|
||||
# Use a read-only handle to open the file
|
||||
with salt.utils.fopen(path,
|
||||
mode='rb',
|
||||
buffering=bufsize) as r_file:
|
||||
# Loop through each line of the file and look for a match
|
||||
for line in r_file:
|
||||
# Is it in this line
|
||||
if re.match(regex, line):
|
||||
# Load lines into dictionaries, set found to True
|
||||
orig_file.append(line)
|
||||
if cmnt:
|
||||
new_file.append('{0}{1}'.format(char, line))
|
||||
else:
|
||||
new_file.append(line.lstrip(char))
|
||||
found = True
|
||||
except (OSError, IOError) as exc:
|
||||
raise CommandExecutionError(
|
||||
"Unable to open file '{0}'. "
|
||||
"Exception: {1}".format(path, exc)
|
||||
)
|
||||
|
||||
# We've searched the whole file. If we didn't find anything, return False
|
||||
if not found:
|
||||
return False
|
||||
|
||||
# Create a copy to read from and to use as a backup later
|
||||
try:
|
||||
temp_file = _mkstemp_copy(path=path, preserve_inode=False)
|
||||
except (OSError, IOError) as exc:
|
||||
raise CommandExecutionError("Exception: {0}".format(exc))
|
||||
|
||||
try:
|
||||
# Open the file in write mode
|
||||
with salt.utils.fopen(path,
|
||||
mode='wb',
|
||||
buffering=bufsize) as w_file:
|
||||
try:
|
||||
# Open the temp file in read mode
|
||||
with salt.utils.fopen(temp_file,
|
||||
mode='rb',
|
||||
buffering=bufsize) as r_file:
|
||||
# Loop through each line of the file and look for a match
|
||||
for line in r_file:
|
||||
try:
|
||||
# Is it in this line
|
||||
if re.match(regex, line):
|
||||
# Write the new line
|
||||
if cmnt:
|
||||
w_file.write('{0}{1}'.format(char, line))
|
||||
else:
|
||||
w_file.write(line.lstrip(char))
|
||||
else:
|
||||
# Write the existing line (no change)
|
||||
w_file.write(line)
|
||||
except (OSError, IOError) as exc:
|
||||
raise CommandExecutionError(
|
||||
"Unable to write file '{0}'. Contents may "
|
||||
"be truncated. Temporary file contains copy "
|
||||
"at '{1}'. "
|
||||
"Exception: {2}".format(path, temp_file, exc)
|
||||
)
|
||||
except (OSError, IOError) as exc:
|
||||
raise CommandExecutionError("Exception: {0}".format(exc))
|
||||
except (OSError, IOError) as exc:
|
||||
raise CommandExecutionError("Exception: {0}".format(exc))
|
||||
|
||||
# Move the backup file to the original directory
|
||||
backup_name = '{0}{1}'.format(path, backup)
|
||||
try:
|
||||
shutil.move(temp_file, backup_name)
|
||||
except (OSError, IOError) as exc:
|
||||
raise CommandExecutionError(
|
||||
"Unable to move the temp file '{0}' to the "
|
||||
"backup file '{1}'. "
|
||||
"Exception: {2}".format(path, temp_file, exc)
|
||||
)
|
||||
|
||||
# Return a diff using the two dictionaries
|
||||
return ''.join(difflib.unified_diff(orig_file, new_file))
|
||||
|
||||
|
||||
def _get_flags(flags):
|
||||
@ -3826,6 +3988,8 @@ def manage_file(name,
|
||||
if contents is not None:
|
||||
# Write the static contents to a temporary file
|
||||
tmp = salt.utils.mkstemp(text=True)
|
||||
if salt.utils.is_windows():
|
||||
contents = os.linesep.join(contents.splitlines())
|
||||
with salt.utils.fopen(tmp, 'w') as tmp_:
|
||||
tmp_.write(str(contents))
|
||||
|
||||
@ -3998,6 +4162,8 @@ def manage_file(name,
|
||||
if contents is not None:
|
||||
# Write the static contents to a temporary file
|
||||
tmp = salt.utils.mkstemp(text=True)
|
||||
if salt.utils.is_windows():
|
||||
contents = os.linesep.join(contents.splitlines())
|
||||
with salt.utils.fopen(tmp, 'w') as tmp_:
|
||||
tmp_.write(str(contents))
|
||||
# Copy into place
|
||||
|
@ -44,17 +44,30 @@ def _git_run(cmd, cwd=None, runas=None, identity=None, **kwargs):
|
||||
# try each of the identities, independently
|
||||
for id_file in identity:
|
||||
env = {
|
||||
'GIT_SSH': os.path.join(utils.templates.TEMPLATE_DIRNAME,
|
||||
'git/ssh-id-wrapper'),
|
||||
'GIT_IDENTITY': id_file
|
||||
}
|
||||
|
||||
result = __salt__['cmd.run_all'](cmd,
|
||||
cwd=cwd,
|
||||
runas=runas,
|
||||
env=env,
|
||||
python_shell=False,
|
||||
**kwargs)
|
||||
# copy wrapper to area accessible by ``runas`` user
|
||||
# currently no suppport in windows for wrapping git ssh
|
||||
if not utils.is_windows():
|
||||
ssh_id_wrapper = os.path.join(utils.templates.TEMPLATE_DIRNAME,
|
||||
'git/ssh-id-wrapper')
|
||||
tmp_file = utils.mkstemp()
|
||||
utils.files.copyfile(ssh_id_wrapper, tmp_file)
|
||||
os.chmod(tmp_file, 0o500)
|
||||
os.chown(tmp_file, __salt__['file.user_to_uid'](runas), -1)
|
||||
env['GIT_SSH'] = tmp_file
|
||||
|
||||
try:
|
||||
result = __salt__['cmd.run_all'](cmd,
|
||||
cwd=cwd,
|
||||
runas=runas,
|
||||
env=env,
|
||||
python_shell=False,
|
||||
**kwargs)
|
||||
finally:
|
||||
if 'GIT_SSH' in env:
|
||||
os.remove(env['GIT_SSH'])
|
||||
|
||||
# if the command was successful, no need to try additional IDs
|
||||
if result['retcode'] == 0:
|
||||
|
@ -208,12 +208,18 @@ def gen_locale(locale, **kwargs):
|
||||
|
||||
if on_debian or on_gentoo: # file-based search
|
||||
search = '/usr/share/i18n/SUPPORTED'
|
||||
valid = __salt__['file.search'](search, '^{0}$'.format(locale))
|
||||
|
||||
def search_locale():
|
||||
return __salt__['file.search'](search,
|
||||
'^{0}$'.format(locale),
|
||||
flags=re.MULTILINE)
|
||||
|
||||
valid = search_locale()
|
||||
if not valid and not locale_info['charmap']:
|
||||
# charmap was not supplied, so try copying the codeset
|
||||
locale_info['charmap'] = locale_info['codeset']
|
||||
locale = salt.utils.locales.join_locale(locale_info)
|
||||
valid = __salt__['file.search'](search, '^{0}$'.format(locale))
|
||||
valid = search_locale()
|
||||
else: # directory-based search
|
||||
if on_suse:
|
||||
search = '/usr/share/locale'
|
||||
|
@ -44,7 +44,8 @@ def _list_mounts():
|
||||
|
||||
for line in mounts.split('\n'):
|
||||
comps = re.sub(r"\s+", " ", line).split()
|
||||
ret[comps[2]] = comps[0]
|
||||
if len(comps) >= 3:
|
||||
ret[comps[2]] = comps[0]
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -144,11 +144,9 @@ def list_upgrades(refresh=False):
|
||||
else:
|
||||
out = call['stdout']
|
||||
|
||||
output = iter(out.splitlines())
|
||||
next(output) # Skip informational output line
|
||||
for line in output:
|
||||
for line in iter(out.splitlines()):
|
||||
comps = line.split(' ')
|
||||
if len(comps) < 2:
|
||||
if len(comps) != 2:
|
||||
continue
|
||||
upgrades[comps[0]] = comps[1]
|
||||
return upgrades
|
||||
|
@ -746,6 +746,8 @@ def install(name=None,
|
||||
|
||||
if pkg_type == 'file':
|
||||
pkg_cmd = 'add'
|
||||
# pkg add has smaller set of options (i.e. no -y or -n), filter below
|
||||
opts = ''.join([opt for opt in opts if opt in 'AfIMq'])
|
||||
targets = pkg_params
|
||||
elif pkg_type == 'repository':
|
||||
pkg_cmd = 'install'
|
||||
@ -763,6 +765,11 @@ def install(name=None,
|
||||
cmd = '{0} {1} {2} {3} {4}'.format(
|
||||
_pkg(jail, chroot), pkg_cmd, repo_opts, opts, ' '.join(targets)
|
||||
)
|
||||
|
||||
if pkg_cmd == 'add' and salt.utils.is_true(dryrun):
|
||||
# pkg add doesn't have a dryrun mode, so echo out what will be run
|
||||
return cmd
|
||||
|
||||
__salt__['cmd.run'](cmd, python_shell=False, output_loglevel='trace')
|
||||
__context__.pop(_contextkey(jail, chroot), None)
|
||||
__context__.pop(_contextkey(jail, chroot, prefix='pkg.origin'), None)
|
||||
|
@ -1,6 +1,23 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Manage the registry on Windows
|
||||
Manage the registry on Windows.
|
||||
|
||||
The read_key and set_key functions will be updated in Boron to reflect proper
|
||||
registry usage. The registry has three main components. Hives, Keys, and Values.
|
||||
|
||||
### Hives
|
||||
Hives are the main sections of the registry and all begin with the word HKEY.
|
||||
- HKEY_LOCAL_MACHINE
|
||||
- HKEY_CURRENT_USER
|
||||
- HKEY_USER
|
||||
|
||||
### Keys
|
||||
Keys are the folders in the registry. Keys can have many nested subkeys. Keys
|
||||
can have a value assigned to them under the (Default)
|
||||
|
||||
### Values
|
||||
Values are name/data pairs. There can be many values in a key. The (Default)
|
||||
value corresponds to the Key, the rest are their own value pairs.
|
||||
|
||||
:depends: - winreg Python module
|
||||
'''
|
||||
@ -35,9 +52,12 @@ class Registry(object):
|
||||
'''
|
||||
def __init__(self):
|
||||
self.hkeys = {
|
||||
"HKEY_USERS": _winreg.HKEY_USERS,
|
||||
"HKEY_CURRENT_USER": _winreg.HKEY_CURRENT_USER,
|
||||
"HKEY_LOCAL_MACHINE": _winreg.HKEY_LOCAL_MACHINE,
|
||||
"HKEY_USERS": _winreg.HKEY_USERS,
|
||||
"HKCU": _winreg.HKEY_CURRENT_USER,
|
||||
"HKLM": _winreg.HKEY_LOCAL_MACHINE,
|
||||
"HKU": _winreg.HKEY_USERS,
|
||||
}
|
||||
|
||||
self.reflection_mask = {
|
||||
@ -45,6 +65,22 @@ class Registry(object):
|
||||
False: _winreg.KEY_ALL_ACCESS | _winreg.KEY_WOW64_64KEY,
|
||||
}
|
||||
|
||||
self.vtype = {
|
||||
"REG_BINARY": _winreg.REG_BINARY,
|
||||
"REG_DWORD": _winreg.REG_DWORD,
|
||||
"REG_EXPAND_SZ": _winreg.REG_EXPAND_SZ,
|
||||
"REG_MULTI_SZ": _winreg.REG_MULTI_SZ,
|
||||
"REG_SZ": _winreg.REG_SZ
|
||||
}
|
||||
|
||||
self.vtype_reverse = {
|
||||
_winreg.REG_BINARY: "REG_BINARY",
|
||||
_winreg.REG_DWORD: "REG_DWORD",
|
||||
_winreg.REG_EXPAND_SZ: "REG_EXPAND_SZ",
|
||||
_winreg.REG_MULTI_SZ: "REG_MULTI_SZ",
|
||||
_winreg.REG_SZ: "REG_SZ"
|
||||
}
|
||||
|
||||
def __getattr__(self, k):
|
||||
try:
|
||||
return self.hkeys[k]
|
||||
@ -63,10 +99,29 @@ def __virtual__():
|
||||
return False
|
||||
|
||||
|
||||
def read_key(hkey, path, key, reflection=True):
|
||||
def read_key(hkey, path, key=None):
|
||||
'''
|
||||
*** Incorrect Usage ***
|
||||
The name of this function is misleading and will be changed to reflect
|
||||
proper usage in the Boron release of Salt. The path option will be removed
|
||||
and the key will be the actual key. See the following issue:
|
||||
|
||||
https://github.com/saltstack/salt/issues/25618
|
||||
|
||||
In order to not break existing state files this function will call the
|
||||
read_value function if a key is passed. Key will be passed as the value
|
||||
name. If key is not passed, this function will return the default value for
|
||||
the key.
|
||||
|
||||
In the Boron release this function will be removed in favor of read_value.
|
||||
***
|
||||
|
||||
Read registry key value
|
||||
|
||||
Returns the first unnamed value (Default) as a string.
|
||||
Returns none if first unnamed value is empty.
|
||||
Returns False if key not found.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -74,19 +129,115 @@ def read_key(hkey, path, key, reflection=True):
|
||||
salt '*' reg.read_key HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'version'
|
||||
'''
|
||||
|
||||
ret = {'hive': hkey,
|
||||
'key': path,
|
||||
'vdata': None,
|
||||
'success': True}
|
||||
|
||||
if key: # This if statement will be removed in Boron
|
||||
salt.utils.warn_until('Boron', 'Use reg.read_value to read a registry'
|
||||
'value. This functionality will be'
|
||||
'removed in Salt Boron')
|
||||
return read_value(hive=hkey,
|
||||
key=path,
|
||||
vname=key)
|
||||
|
||||
registry = Registry()
|
||||
hkey2 = getattr(registry, hkey)
|
||||
access_mask = registry.reflection_mask[reflection]
|
||||
hive = registry.hkeys[hkey]
|
||||
|
||||
try:
|
||||
handle = _winreg.OpenKeyEx(hkey2, path, 0, access_mask)
|
||||
return _winreg.QueryValueEx(handle, key)[0]
|
||||
except Exception:
|
||||
return None
|
||||
value = _winreg.QueryValue(hive, path)
|
||||
if value:
|
||||
ret['vdata'] = value
|
||||
else:
|
||||
ret['vdata'] = None
|
||||
ret['comment'] = 'Empty Value'
|
||||
except WindowsError as exc: # pylint: disable=E0602
|
||||
log.debug(exc)
|
||||
ret['comment'] = '{0}'.format(exc)
|
||||
ret['success'] = False
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def set_key(hkey, path, key, value, vtype='REG_DWORD', reflection=True):
|
||||
def read_value(hive, key, vname=None):
|
||||
r'''
|
||||
Reads a registry value or the default value for a key.
|
||||
|
||||
:param hive: string
|
||||
The name of the hive. Can be one of the following
|
||||
- HKEY_LOCAL_MACHINE or HKLM
|
||||
- HKEY_CURRENT_USER or HKCU
|
||||
- HKEY_USER or HKU
|
||||
|
||||
:param key: string
|
||||
The key (looks like a path) to the value name.
|
||||
|
||||
:param vname: string
|
||||
The value name. These are the individual name/data pairs under the key. If
|
||||
not passed, the key (Default) value will be returned
|
||||
|
||||
:return: dict
|
||||
A dictionary containing the passed settings as well as the value_data if
|
||||
successful. If unsuccessful, sets success to False
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' reg.read_value HKEY_LOCAL_MACHINE 'SOFTWARE\Salt' 'version'
|
||||
'''
|
||||
|
||||
# Setup the return array
|
||||
ret = {'hive': hive,
|
||||
'key': key,
|
||||
'vname': vname,
|
||||
'vdata': None,
|
||||
'success': True}
|
||||
|
||||
# If no name is passed, the default value of the key will be returned
|
||||
# The value name is Default
|
||||
if not vname:
|
||||
ret['vname'] = '(Default)'
|
||||
|
||||
registry = Registry()
|
||||
hive = registry.hkeys[hive]
|
||||
|
||||
try:
|
||||
handle = _winreg.OpenKey(hive, key)
|
||||
value, vtype = _winreg.QueryValueEx(handle, vname)
|
||||
if value:
|
||||
ret['vdata'] = value
|
||||
ret['vtype'] = registry.vtype_reverse[vtype]
|
||||
else:
|
||||
ret['comment'] = 'Empty Value'
|
||||
except WindowsError as exc: # pylint: disable=E0602
|
||||
log.debug(exc)
|
||||
ret['comment'] = '{0}'.format(exc)
|
||||
ret['success'] = False
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def set_key(hkey, path, value, key=None, vtype='REG_DWORD', reflection=True):
|
||||
'''
|
||||
*** Incorrect Usage ***
|
||||
The name of this function is misleading and will be changed to reflect
|
||||
proper usage in the Boron release of Salt. The path option will be removed
|
||||
and the key will be the actual key. See the following issue:
|
||||
|
||||
https://github.com/saltstack/salt/issues/25618
|
||||
|
||||
In order to not break existing state files this function will call the
|
||||
set_value function if a key is passed. Key will be passed as the value
|
||||
name. If key is not passed, this function will return the default value for
|
||||
the key.
|
||||
|
||||
In the Boron release this function will be removed in favor of set_value.
|
||||
***
|
||||
|
||||
Set a registry key
|
||||
|
||||
vtype: http://docs.python.org/2/library/_winreg.html#value-types
|
||||
|
||||
CLI Example:
|
||||
@ -95,29 +246,104 @@ def set_key(hkey, path, key, value, vtype='REG_DWORD', reflection=True):
|
||||
|
||||
salt '*' reg.set_key HKEY_CURRENT_USER 'SOFTWARE\\Salt' 'version' '0.97' REG_DWORD
|
||||
'''
|
||||
|
||||
if key: # This if statement will be removed in Boron
|
||||
salt.utils.warn_until('Boron', 'Use reg.set_value to set a registry'
|
||||
'value. This functionality will be'
|
||||
'removed in Salt Boron')
|
||||
return set_value(hive=hkey,
|
||||
key=path,
|
||||
vname=key,
|
||||
vdata=value,
|
||||
vtype=vtype)
|
||||
|
||||
registry = Registry()
|
||||
hkey2 = getattr(registry, hkey)
|
||||
hive = registry.hkeys[hkey]
|
||||
vtype = registry.vtype['REG_SZ']
|
||||
|
||||
try:
|
||||
_winreg.SetValue(hive, path, vtype, value)
|
||||
return True
|
||||
except WindowsError as exc: # pylint: disable=E0602
|
||||
log.error(exc)
|
||||
return False
|
||||
|
||||
|
||||
def set_value(hive, key, vname=None, vdata=None, vtype='REG_SZ', reflection=True):
|
||||
'''
|
||||
Sets a registry value.
|
||||
|
||||
:param hive: string
|
||||
The name of the hive. Can be one of the following
|
||||
- HKEY_LOCAL_MACHINE or HKLM
|
||||
- HKEY_CURRENT_USER or HKCU
|
||||
- HKEY_USER or HKU
|
||||
|
||||
:param key: string
|
||||
The key (looks like a path) to the value name.
|
||||
|
||||
:param vname: string
|
||||
The value name. These are the individual name/data pairs under the key. If
|
||||
not passed, the key (Default) value will be set.
|
||||
|
||||
:param vdata: string
|
||||
The value data to be set.
|
||||
|
||||
:param vtype: string
|
||||
The value type. Can be one of the following:
|
||||
- REG_BINARY
|
||||
- REG_DWORD
|
||||
- REG_EXPAND_SZ
|
||||
- REG_MULTI_SZ
|
||||
- REG_SZ
|
||||
|
||||
:param reflection: boolean
|
||||
A boolean value indicating that the value should also be set in the
|
||||
Wow6432Node portion of the registry. Only applies to 64 bit Windows. This
|
||||
setting is ignored for 32 bit Windows.
|
||||
|
||||
:return: boolean
|
||||
Returns True if successful, False if not
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'version' '2015.5.2'
|
||||
'''
|
||||
registry = Registry()
|
||||
hive = registry.hkeys[hive]
|
||||
vtype = registry.vtype[vtype]
|
||||
access_mask = registry.reflection_mask[reflection]
|
||||
|
||||
try:
|
||||
_type = getattr(_winreg, vtype)
|
||||
except AttributeError:
|
||||
return False
|
||||
|
||||
try:
|
||||
handle = _winreg.OpenKey(hkey2, path, 0, access_mask)
|
||||
_winreg.SetValueEx(handle, key, 0, _type, value)
|
||||
handle = _winreg.CreateKeyEx(hive, key, 0, access_mask)
|
||||
_winreg.SetValueEx(handle, vname, 0, vtype, vdata)
|
||||
_winreg.CloseKey(handle)
|
||||
return True
|
||||
except Exception:
|
||||
handle = _winreg.CreateKeyEx(hkey2, path, 0, access_mask)
|
||||
_winreg.SetValueEx(handle, key, 0, _type, value)
|
||||
_winreg.CloseKey(handle)
|
||||
return True
|
||||
except WindowsError as exc: # pylint: disable=E0602
|
||||
log.error(exc)
|
||||
return False
|
||||
|
||||
|
||||
def create_key(hkey, path, key, value=None, reflection=True):
|
||||
def create_key(hkey, path, key=None, value=None, reflection=True):
|
||||
'''
|
||||
*** Incorrect Usage ***
|
||||
The name of this function is misleading and will be changed to reflect
|
||||
proper usage in the Boron release of Salt. The path option will be removed
|
||||
and the key will be the actual key. See the following issue:
|
||||
|
||||
https://github.com/saltstack/salt/issues/25618
|
||||
|
||||
In order to not break existing state files this function will call the
|
||||
set_value function if key is passed. Key will be passed as the value name.
|
||||
If key is not passed, this function will return the default value for the
|
||||
key.
|
||||
|
||||
In the Boron release path will be removed and key will be the path. You will
|
||||
not pass value.
|
||||
***
|
||||
|
||||
Create a registry key
|
||||
|
||||
CLI Example:
|
||||
@ -126,24 +352,48 @@ def create_key(hkey, path, key, value=None, reflection=True):
|
||||
|
||||
salt '*' reg.create_key HKEY_CURRENT_USER 'SOFTWARE\\Salt' 'version' '0.97'
|
||||
'''
|
||||
if key: # This if statement will be removed in Boron
|
||||
salt.utils.warn_until('Boron', 'Use reg.set_value to set a registry'
|
||||
'value. This functionality will be'
|
||||
'removed in Salt Boron')
|
||||
return set_value(hive=hkey,
|
||||
key=path,
|
||||
vname=key,
|
||||
vdata=value,
|
||||
vtype='REG_SZ')
|
||||
|
||||
registry = Registry()
|
||||
hkey2 = getattr(registry, hkey)
|
||||
hive = registry.hkeys[hkey]
|
||||
key = path
|
||||
access_mask = registry.reflection_mask[reflection]
|
||||
|
||||
try:
|
||||
handle = _winreg.OpenKey(hkey2, path, 0, access_mask)
|
||||
handle = _winreg.CreateKeyEx(hive, key, 0, access_mask)
|
||||
_winreg.CloseKey(handle)
|
||||
return True
|
||||
except Exception:
|
||||
handle = _winreg.CreateKeyEx(hkey2, path, 0, access_mask)
|
||||
if value:
|
||||
_winreg.SetValueEx(handle, key, 0, _winreg.REG_DWORD, value)
|
||||
_winreg.CloseKey(handle)
|
||||
return True
|
||||
except WindowsError as exc: # pylint: disable=E0602
|
||||
log.error(exc)
|
||||
return False
|
||||
|
||||
|
||||
def delete_key(hkey, path, key, reflection=True):
|
||||
def delete_key(hkey, path, key=None, reflection=True):
|
||||
'''
|
||||
*** Incorrect Usage ***
|
||||
The name of this function is misleading and will be changed to reflect
|
||||
proper usage in the Boron release of Salt. The path option will be removed
|
||||
and the key will be the actual key. See the following issue:
|
||||
|
||||
https://github.com/saltstack/salt/issues/25618
|
||||
|
||||
In order to not break existing state files this function will call the
|
||||
delete_value function if a key is passed. Key will be passed as the value
|
||||
name. If key is not passed, this function will return the default value for
|
||||
the key.
|
||||
|
||||
In the Boron release path will be removed and key will be the path.
|
||||
reflection will also be removed.
|
||||
***
|
||||
|
||||
Delete a registry key
|
||||
|
||||
Note: This cannot delete a key with subkeys
|
||||
@ -152,24 +402,71 @@ def delete_key(hkey, path, key, reflection=True):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' reg.delete_key HKEY_CURRENT_USER 'SOFTWARE\\Salt' 'version'
|
||||
salt '*' reg.delete_key HKEY_CURRENT_USER 'SOFTWARE\\Salt'
|
||||
'''
|
||||
|
||||
if key: # This if statement will be removed in Boron
|
||||
salt.utils.warn_until('Boron', 'Use reg.set_value to set a registry'
|
||||
'value. This functionality will be'
|
||||
'removed in Salt Boron')
|
||||
return delete_value(hive=hkey,
|
||||
key=path,
|
||||
vname=key,
|
||||
reflection=reflection)
|
||||
|
||||
registry = Registry()
|
||||
hive = registry.hkeys[hkey]
|
||||
key = path
|
||||
|
||||
try:
|
||||
_winreg.DeleteKey(hive, key)
|
||||
return True
|
||||
except WindowsError as exc: # pylint: disable=E0602
|
||||
log.error(exc)
|
||||
return False
|
||||
|
||||
|
||||
def delete_value(hive, key, vname=None, reflection=True):
|
||||
'''
|
||||
Deletes a registry value.
|
||||
|
||||
:param hive: string
|
||||
The name of the hive. Can be one of the following
|
||||
- HKEY_LOCAL_MACHINE or HKLM
|
||||
- HKEY_CURRENT_USER or HKCU
|
||||
- HKEY_USER or HKU
|
||||
|
||||
:param key: string
|
||||
The key (looks like a path) to the value name.
|
||||
|
||||
:param vname: string
|
||||
The value name. These are the individual name/data pairs under the key. If
|
||||
not passed, the key (Default) value will be deleted.
|
||||
|
||||
:param reflection: boolean
|
||||
A boolean value indicating that the value should also be set in the
|
||||
Wow6432Node portion of the registry. Only applies to 64 bit Windows. This
|
||||
setting is ignored for 32 bit Windows.
|
||||
|
||||
:return: boolean
|
||||
Returns True if successful, False if not
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' reg.delete_value HKEY_CURRENT_USER 'SOFTWARE\\Salt' 'version'
|
||||
'''
|
||||
registry = Registry()
|
||||
hkey2 = getattr(registry, hkey)
|
||||
hive = registry.hkeys[hive]
|
||||
access_mask = registry.reflection_mask[reflection]
|
||||
|
||||
try:
|
||||
handle = _winreg.OpenKey(hkey2, path, 0, access_mask)
|
||||
_winreg.DeleteKeyEx(handle, key)
|
||||
handle = _winreg.OpenKey(hive, key, 0, access_mask)
|
||||
_winreg.DeleteValue(handle, vname)
|
||||
_winreg.CloseKey(handle)
|
||||
return True
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
_winreg.DeleteValue(handle, key)
|
||||
_winreg.CloseKey(handle)
|
||||
return True
|
||||
except Exception:
|
||||
except WindowsError as exc: # pylint: disable=E0602
|
||||
_winreg.CloseKey(handle)
|
||||
log.error(exc)
|
||||
return False
|
||||
|
@ -30,12 +30,18 @@ Connection module for Amazon S3
|
||||
|
||||
SSL verification may also be turned off in the configuration:
|
||||
|
||||
s3.verify_ssl: False
|
||||
s3.verify_ssl: False
|
||||
|
||||
This is required if using S3 bucket names that contain a period, as
|
||||
these will not match Amazon's S3 wildcard certificates. Certificate
|
||||
verification is enabled by default.
|
||||
|
||||
AWS region may be specified in the configuration:
|
||||
|
||||
s3.location: eu-central-1
|
||||
|
||||
Default is us-east-1.
|
||||
|
||||
This module should be usable to query other S3-like services, such as
|
||||
Eucalyptus.
|
||||
|
||||
@ -61,7 +67,7 @@ def __virtual__():
|
||||
|
||||
|
||||
def delete(bucket, path=None, action=None, key=None, keyid=None,
|
||||
service_url=None, verify_ssl=None):
|
||||
service_url=None, verify_ssl=None, location=None):
|
||||
'''
|
||||
Delete a bucket, or delete an object from a bucket.
|
||||
|
||||
@ -73,8 +79,8 @@ def delete(bucket, path=None, action=None, key=None, keyid=None,
|
||||
|
||||
salt myminion s3.delete mybucket remoteobject
|
||||
'''
|
||||
key, keyid, service_url, verify_ssl = _get_key(key, keyid, service_url,
|
||||
verify_ssl)
|
||||
key, keyid, service_url, verify_ssl, location = _get_key(
|
||||
key, keyid, service_url, verify_ssl, location)
|
||||
|
||||
return salt.utils.s3.query(method='DELETE',
|
||||
bucket=bucket,
|
||||
@ -83,12 +89,13 @@ def delete(bucket, path=None, action=None, key=None, keyid=None,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
service_url=service_url,
|
||||
verify_ssl=verify_ssl)
|
||||
verify_ssl=verify_ssl,
|
||||
location=location)
|
||||
|
||||
|
||||
def get(bucket=None, path=None, return_bin=False, action=None,
|
||||
local_file=None, key=None, keyid=None, service_url=None,
|
||||
verify_ssl=None):
|
||||
verify_ssl=None, location=None):
|
||||
'''
|
||||
List the contents of a bucket, or return an object from a bucket. Set
|
||||
return_bin to True in order to retrieve an object wholesale. Otherwise,
|
||||
@ -140,8 +147,8 @@ def get(bucket=None, path=None, return_bin=False, action=None,
|
||||
|
||||
salt myminion s3.get mybucket myfile.png action=acl
|
||||
'''
|
||||
key, keyid, service_url, verify_ssl = _get_key(key, keyid, service_url,
|
||||
verify_ssl)
|
||||
key, keyid, service_url, verify_ssl, location = _get_key(
|
||||
key, keyid, service_url, verify_ssl, location)
|
||||
|
||||
return salt.utils.s3.query(method='GET',
|
||||
bucket=bucket,
|
||||
@ -152,11 +159,12 @@ def get(bucket=None, path=None, return_bin=False, action=None,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
service_url=service_url,
|
||||
verify_ssl=verify_ssl)
|
||||
verify_ssl=verify_ssl,
|
||||
location=location)
|
||||
|
||||
|
||||
def head(bucket, path=None, key=None, keyid=None, service_url=None,
|
||||
verify_ssl=None):
|
||||
verify_ssl=None, location=None):
|
||||
'''
|
||||
Return the metadata for a bucket, or an object in a bucket.
|
||||
|
||||
@ -167,8 +175,8 @@ def head(bucket, path=None, key=None, keyid=None, service_url=None,
|
||||
salt myminion s3.head mybucket
|
||||
salt myminion s3.head mybucket myfile.png
|
||||
'''
|
||||
key, keyid, service_url, verify_ssl = _get_key(key, keyid, service_url,
|
||||
verify_ssl)
|
||||
key, keyid, service_url, verify_ssl, location = _get_key(
|
||||
key, keyid, service_url, verify_ssl, location)
|
||||
|
||||
return salt.utils.s3.query(method='HEAD',
|
||||
bucket=bucket,
|
||||
@ -177,11 +185,12 @@ def head(bucket, path=None, key=None, keyid=None, service_url=None,
|
||||
keyid=keyid,
|
||||
service_url=service_url,
|
||||
verify_ssl=verify_ssl,
|
||||
location=location,
|
||||
full_headers=True)
|
||||
|
||||
|
||||
def put(bucket, path=None, return_bin=False, action=None, local_file=None,
|
||||
key=None, keyid=None, service_url=None, verify_ssl=None):
|
||||
key=None, keyid=None, service_url=None, verify_ssl=None, location=None):
|
||||
'''
|
||||
Create a new bucket, or upload an object to a bucket.
|
||||
|
||||
@ -197,8 +206,8 @@ def put(bucket, path=None, return_bin=False, action=None, local_file=None,
|
||||
|
||||
salt myminion s3.put mybucket remotepath local_file=/path/to/file
|
||||
'''
|
||||
key, keyid, service_url, verify_ssl = _get_key(key, keyid, service_url,
|
||||
verify_ssl)
|
||||
key, keyid, service_url, verify_ssl, location = _get_key(
|
||||
key, keyid, service_url, verify_ssl, location)
|
||||
|
||||
return salt.utils.s3.query(method='PUT',
|
||||
bucket=bucket,
|
||||
@ -209,10 +218,11 @@ def put(bucket, path=None, return_bin=False, action=None, local_file=None,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
service_url=service_url,
|
||||
verify_ssl=verify_ssl)
|
||||
verify_ssl=verify_ssl,
|
||||
location=location)
|
||||
|
||||
|
||||
def _get_key(key, keyid, service_url, verify_ssl):
|
||||
def _get_key(key, keyid, service_url, verify_ssl, location):
|
||||
'''
|
||||
Examine the keys, and populate as necessary
|
||||
'''
|
||||
@ -234,4 +244,7 @@ def _get_key(key, keyid, service_url, verify_ssl):
|
||||
if verify_ssl is None:
|
||||
verify_ssl = True
|
||||
|
||||
return key, keyid, service_url, verify_ssl
|
||||
if location is None and __salt__['config.option']('s3.location') is not None:
|
||||
location = __salt__['config.option']('s3.location')
|
||||
|
||||
return key, keyid, service_url, verify_ssl, location
|
||||
|
@ -733,6 +733,7 @@ def sls(mods,
|
||||
def top(topfn,
|
||||
test=None,
|
||||
queue=False,
|
||||
saltenv=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Execute a specific top file instead of the default
|
||||
@ -769,6 +770,8 @@ def top(topfn,
|
||||
st_ = salt.state.HighState(opts, pillar)
|
||||
st_.push_active()
|
||||
st_.opts['state_top'] = salt.utils.url.create(topfn)
|
||||
if saltenv:
|
||||
st_.opts['state_top_saltenv'] = saltenv
|
||||
try:
|
||||
ret = st_.call_highstate(
|
||||
exclude=kwargs.get('exclude', []),
|
||||
|
@ -48,7 +48,6 @@ def list_():
|
||||
'HKLM\\Software\\Microsoft\\Windows\\CurrentVersion\\Run /reg:64',
|
||||
'HKCU\\Software\\Microsoft\\Windows\\CurrentVersion\\Run'
|
||||
]
|
||||
winver = __grains__['osfullname']
|
||||
for key in keys:
|
||||
autoruns[key] = []
|
||||
cmd = ['reg', 'query', key]
|
||||
@ -57,12 +56,12 @@ def list_():
|
||||
autoruns[key].append(line)
|
||||
|
||||
# Find autoruns in user's startup folder
|
||||
if '7' in winver:
|
||||
user_dir = 'C:\\Users\\'
|
||||
startup_dir = '\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup'
|
||||
else:
|
||||
if os.path.exists('C:\\Documents and Settings\\'):
|
||||
user_dir = 'C:\\Documents and Settings\\'
|
||||
startup_dir = '\\Start Menu\\Programs\\Startup'
|
||||
else:
|
||||
user_dir = 'C:\\Users\\'
|
||||
startup_dir = '\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup'
|
||||
|
||||
for user in os.listdir(user_dir):
|
||||
try:
|
||||
|
@ -61,7 +61,7 @@ from salt.modules.file import (check_hash, # pylint: disable=W0611
|
||||
search, _get_flags, extract_hash, _error, _sed_esc, _psed,
|
||||
RE_FLAG_TABLE, blockreplace, prepend, seek_read, seek_write, rename,
|
||||
lstat, path_exists_glob, write, pardir, join, HASHES, comment,
|
||||
uncomment, _add_flags)
|
||||
uncomment, _add_flags, comment_line)
|
||||
|
||||
from salt.utils import namespaced_function as _namespaced_function
|
||||
|
||||
@ -83,7 +83,7 @@ def __virtual__():
|
||||
global remove, append, _error, directory_exists, touch, contains
|
||||
global contains_regex, contains_regex_multiline, contains_glob
|
||||
global find, psed, get_sum, check_hash, get_hash, delete_backup
|
||||
global get_diff, _get_flags, extract_hash
|
||||
global get_diff, _get_flags, extract_hash, comment_line
|
||||
global access, copy, readdir, rmdir, truncate, replace, search
|
||||
global _binary_replace, _get_bkroot, list_backups, restore_backup
|
||||
global blockreplace, prepend, seek_read, seek_write, rename, lstat
|
||||
@ -143,6 +143,7 @@ def __virtual__():
|
||||
join = _namespaced_function(join, globals())
|
||||
comment = _namespaced_function(comment, globals())
|
||||
uncomment = _namespaced_function(uncomment, globals())
|
||||
comment_line = _namespaced_function(comment_line, globals())
|
||||
_mkstemp_copy = _namespaced_function(_mkstemp_copy, globals())
|
||||
_add_flags = _namespaced_function(_add_flags, globals())
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
Manage Windows features via the ServerManager powershell module
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
import logging
|
||||
|
||||
# Import python libs
|
||||
try:
|
||||
@ -14,6 +14,8 @@ except ImportError:
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
@ -68,7 +70,8 @@ def list_available():
|
||||
|
||||
def list_installed():
|
||||
'''
|
||||
List installed features
|
||||
List installed features. Supported on Windows Server 2008 and Windows 8 and
|
||||
newer.
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -83,10 +86,12 @@ def list_installed():
|
||||
name = splt.pop(-1)
|
||||
display_name = ' '.join(splt)
|
||||
ret[name] = display_name
|
||||
state = _srvmgr('Get-WindowsFeature -erroraction silentlycontinue -warningaction silentlycontinue | Select InstallState,Name')
|
||||
state = _srvmgr('Get-WindowsFeature -erroraction silentlycontinue -warningaction silentlycontinue | Select Installed,Name')
|
||||
for line in state.splitlines()[2:]:
|
||||
splt = line.split()
|
||||
if splt[0] != 'Installed' and splt[1] in ret:
|
||||
if splt[0] == 'False' and splt[1] in ret:
|
||||
del ret[splt[1]]
|
||||
if '----' in splt[0]:
|
||||
del ret[splt[1]]
|
||||
return ret
|
||||
|
||||
|
@ -15,6 +15,11 @@ NOTE: This currently only works with local user accounts, not domain accounts
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
try:
|
||||
from shlex import quote as _cmd_quote # pylint: disable=E0611
|
||||
except: # pylint: disable=W0702
|
||||
from pipes import quote as _cmd_quote
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
from salt.ext.six import string_types
|
||||
@ -355,14 +360,18 @@ def addgroup(name, group):
|
||||
|
||||
salt '*' user.addgroup username groupname
|
||||
'''
|
||||
name = _cmd_quote(name)
|
||||
group = _cmd_quote(group).lstrip('\'').rstrip('\'')
|
||||
|
||||
user = info(name)
|
||||
if not user:
|
||||
return False
|
||||
if group in user['groups']:
|
||||
return True
|
||||
ret = __salt__['cmd.run_all'](
|
||||
'net localgroup {0} {1} /add'.format(group, name)
|
||||
)
|
||||
|
||||
cmd = 'net localgroup "{0}" {1} /add'.format(group, name)
|
||||
ret = __salt__['cmd.run_all'](cmd, python_shell=True)
|
||||
|
||||
return ret['retcode'] == 0
|
||||
|
||||
|
||||
@ -376,6 +385,9 @@ def removegroup(name, group):
|
||||
|
||||
salt '*' user.removegroup username groupname
|
||||
'''
|
||||
name = _cmd_quote(name)
|
||||
group = _cmd_quote(group).lstrip('\'').rstrip('\'')
|
||||
|
||||
user = info(name)
|
||||
|
||||
if not user:
|
||||
@ -384,9 +396,9 @@ def removegroup(name, group):
|
||||
if group not in user['groups']:
|
||||
return True
|
||||
|
||||
ret = __salt__['cmd.run_all'](
|
||||
'net localgroup {0} {1} /delete'.format(group, name)
|
||||
)
|
||||
cmd = 'net localgroup "{0}" {1} /delete'.format(group, name)
|
||||
ret = __salt__['cmd.run_all'](cmd, python_shell=True)
|
||||
|
||||
return ret['retcode'] == 0
|
||||
|
||||
|
||||
@ -450,10 +462,10 @@ def chfullname(name, fullname):
|
||||
return update(name=name, fullname=fullname)
|
||||
|
||||
|
||||
def chgroups(name, groups, append=False):
|
||||
def chgroups(name, groups, append=True):
|
||||
'''
|
||||
Change the groups this user belongs to, add append to append the specified
|
||||
groups
|
||||
Change the groups this user belongs to, add append=False to make the user a
|
||||
member of only the specified groups
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -469,17 +481,22 @@ def chgroups(name, groups, append=False):
|
||||
if ugrps == set(groups):
|
||||
return True
|
||||
|
||||
name = _cmd_quote(name)
|
||||
|
||||
if not append:
|
||||
for group in ugrps:
|
||||
group = _cmd_quote(group).lstrip('\'').rstrip('\'')
|
||||
if group not in groups:
|
||||
__salt__['cmd.retcode'](
|
||||
'net localgroup {0} {1} /delete'.format(group, name))
|
||||
cmd = 'net localgroup "{0}" {1} /delete'.format(group, name)
|
||||
__salt__['cmd.run_all'](cmd, python_shell=True)
|
||||
|
||||
for group in groups:
|
||||
if group in ugrps:
|
||||
continue
|
||||
__salt__['cmd.retcode'](
|
||||
'net localgroup {0} {1} /add'.format(group, name))
|
||||
group = _cmd_quote(group).lstrip('\'').rstrip('\'')
|
||||
cmd = 'net localgroup "{0}" {1} /add'.format(group, name)
|
||||
__salt__['cmd.run_all'](cmd, python_shell=True)
|
||||
|
||||
agrps = set(list_groups(name))
|
||||
return len(ugrps - agrps) == 0
|
||||
|
||||
|
@ -238,9 +238,9 @@ def create(pool_name, *vdevs, **kwargs):
|
||||
ret[vdev] = '{0} not present on filesystem'.format(vdev)
|
||||
return ret
|
||||
mode = os.stat(vdev).st_mode
|
||||
if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode):
|
||||
# Not a block device or file vdev so error and return
|
||||
ret[vdev] = '{0} is not a block device or a file vdev'.format(vdev)
|
||||
if not stat.S_ISBLK(mode) and not stat.S_ISREG(mode) and not stat.S_ISCHR(mode):
|
||||
# Not a block device, file vdev, or character special device so error and return
|
||||
ret[vdev] = '{0} is not a block device, a file vdev, or character special device'.format(vdev)
|
||||
return ret
|
||||
dlist.append(vdev)
|
||||
|
||||
|
@ -98,12 +98,14 @@ _s3_sync_on_update = True # sync cache on update rather than jit
|
||||
|
||||
|
||||
class S3Credentials(object):
|
||||
def __init__(self, key, keyid, bucket, service_url, verify_ssl=True):
|
||||
def __init__(self, key, keyid, bucket, service_url, verify_ssl,
|
||||
location):
|
||||
self.key = key
|
||||
self.keyid = keyid
|
||||
self.bucket = bucket
|
||||
self.service_url = service_url
|
||||
self.verify_ssl = verify_ssl
|
||||
self.location = location
|
||||
|
||||
|
||||
def ext_pillar(minion_id,
|
||||
@ -112,6 +114,7 @@ def ext_pillar(minion_id,
|
||||
key=None,
|
||||
keyid=None,
|
||||
verify_ssl=True,
|
||||
location=None,
|
||||
multiple_env=False,
|
||||
environment='base',
|
||||
prefix='',
|
||||
@ -120,7 +123,8 @@ def ext_pillar(minion_id,
|
||||
Execute a command and read the output as YAML
|
||||
'''
|
||||
|
||||
s3_creds = S3Credentials(key, keyid, bucket, service_url, verify_ssl)
|
||||
s3_creds = S3Credentials(key, keyid, bucket, service_url, verify_ssl,
|
||||
location)
|
||||
|
||||
# normpath is needed to remove appended '/' if root is empty string.
|
||||
pillar_dir = os.path.normpath(os.path.join(_get_cache_dir(), environment,
|
||||
@ -235,6 +239,7 @@ def _refresh_buckets_cache_file(creds, cache_file, multiple_env, environment, pr
|
||||
bucket=creds.bucket,
|
||||
service_url=creds.service_url,
|
||||
verify_ssl=creds.verify_ssl,
|
||||
location=creds.location,
|
||||
return_bin=False,
|
||||
params={'prefix': prefix})
|
||||
|
||||
@ -373,5 +378,6 @@ def _get_file_from_s3(creds, metadata, saltenv, bucket, path,
|
||||
service_url=creds.service_url,
|
||||
path=_quote(path),
|
||||
local_file=cached_file_path,
|
||||
verify_ssl=creds.verify_ssl
|
||||
verify_ssl=creds.verify_ssl,
|
||||
location=creds.location
|
||||
)
|
||||
|
@ -382,7 +382,7 @@ class Compiler(object):
|
||||
# Add the requires to the reqs dict and check them
|
||||
# all for recursive requisites.
|
||||
argfirst = next(iter(arg))
|
||||
if argfirst in ('require', 'watch', 'prereq'):
|
||||
if argfirst in ('require', 'watch', 'prereq', 'onchanges'):
|
||||
if not isinstance(arg[argfirst], list):
|
||||
errors.append(('The {0}'
|
||||
' statement in state {1!r} in SLS {2!r} '
|
||||
@ -995,7 +995,7 @@ class State(object):
|
||||
'formed as a list'
|
||||
.format(name, body['__sls__'])
|
||||
)
|
||||
if argfirst in ('require', 'watch', 'prereq'):
|
||||
if argfirst in ('require', 'watch', 'prereq', 'onchanges'):
|
||||
if not isinstance(arg[argfirst], list):
|
||||
errors.append(
|
||||
'The {0} statement in state {1!r} in '
|
||||
@ -2299,6 +2299,7 @@ class BaseHighState(object):
|
||||
opts['state_top'] = salt.utils.url.create(mopts['state_top'][1:])
|
||||
else:
|
||||
opts['state_top'] = salt.utils.url.create(mopts['state_top'])
|
||||
opts['state_top_saltenv'] = mopts.get('state_top_saltenv', None)
|
||||
opts['nodegroups'] = mopts.get('nodegroups', {})
|
||||
opts['state_auto_order'] = mopts.get(
|
||||
'state_auto_order',
|
||||
@ -2345,7 +2346,8 @@ class BaseHighState(object):
|
||||
]
|
||||
else:
|
||||
found = 0
|
||||
for saltenv in self._get_envs():
|
||||
if self.opts.get('state_top_saltenv', False):
|
||||
saltenv = self.opts['state_top_saltenv']
|
||||
contents = self.client.cache_file(
|
||||
self.opts['state_top'],
|
||||
saltenv
|
||||
@ -2363,6 +2365,25 @@ class BaseHighState(object):
|
||||
saltenv=saltenv
|
||||
)
|
||||
)
|
||||
else:
|
||||
for saltenv in self._get_envs():
|
||||
contents = self.client.cache_file(
|
||||
self.opts['state_top'],
|
||||
saltenv
|
||||
)
|
||||
if contents:
|
||||
found = found + 1
|
||||
else:
|
||||
log.debug('No contents loaded for env: {0}'.format(saltenv))
|
||||
|
||||
tops[saltenv].append(
|
||||
compile_template(
|
||||
contents,
|
||||
self.state.rend,
|
||||
self.state.opts['renderer'],
|
||||
saltenv=saltenv
|
||||
)
|
||||
)
|
||||
|
||||
if found == 0:
|
||||
log.error('No contents found in top file')
|
||||
|
@ -76,7 +76,7 @@ def change(name, context=None, changes=None, lens=None, **kwargs):
|
||||
|
||||
changes
|
||||
List of changes that are issued to Augeas. Available commands are
|
||||
``set``, ``mv``/``move``, ``ins``/``insert``, and ``rm``/``remove``.
|
||||
``set``, ``setm``, ``mv``/``move``, ``ins``/``insert``, and ``rm``/``remove``.
|
||||
|
||||
lens
|
||||
The lens to use, needs to be suffixed with `.lns`, e.g.: `Nginx.lns`. See
|
||||
|
@ -291,7 +291,6 @@ def _load_accumulators():
|
||||
|
||||
|
||||
def _persist_accummulators(accumulators, accumulators_deps):
|
||||
|
||||
accumm_data = {'accumulators': accumulators,
|
||||
'accumulators_deps': accumulators_deps}
|
||||
|
||||
@ -895,17 +894,17 @@ def symlink(
|
||||
if os.path.lexists(backupname):
|
||||
if not force:
|
||||
return _error(ret, ((
|
||||
'File exists where the backup target {0} should go'
|
||||
).format(backupname)))
|
||||
'File exists where the backup target {0} should go'
|
||||
).format(backupname)))
|
||||
elif os.path.isfile(backupname):
|
||||
os.remove(backupname)
|
||||
elif os.path.isdir(backupname):
|
||||
shutil.rmtree(backupname)
|
||||
else:
|
||||
return _error(ret, ((
|
||||
'Something exists where the backup target {0}'
|
||||
'should go'
|
||||
).format(backupname)))
|
||||
'Something exists where the backup target {0}'
|
||||
'should go'
|
||||
).format(backupname)))
|
||||
os.rename(name, backupname)
|
||||
elif force:
|
||||
# Remove whatever is in the way
|
||||
@ -922,8 +921,8 @@ def symlink(
|
||||
.format(name)))
|
||||
else:
|
||||
return _error(ret, ((
|
||||
'Directory exists where the symlink {0} should be'
|
||||
).format(name)))
|
||||
'Directory exists where the symlink {0} should be'
|
||||
).format(name)))
|
||||
|
||||
if not os.path.exists(name):
|
||||
# The link is not present, make it
|
||||
@ -1401,7 +1400,8 @@ def managed(name,
|
||||
|
||||
if not replace and os.path.exists(name):
|
||||
# Check and set the permissions if necessary
|
||||
ret, _ = __salt__['file.check_perms'](name, ret, user, group, mode, follow_symlinks)
|
||||
ret, _ = __salt__['file.check_perms'](name, ret, user, group, mode,
|
||||
follow_symlinks)
|
||||
if __opts__['test']:
|
||||
ret['comment'] = 'File {0} not updated'.format(name)
|
||||
elif not ret['changes'] and ret['result']:
|
||||
@ -1747,8 +1747,8 @@ def directory(name,
|
||||
if os.path.lexists(backupname):
|
||||
if not force:
|
||||
return _error(ret, ((
|
||||
'File exists where the backup target {0} should go'
|
||||
).format(backupname)))
|
||||
'File exists where the backup target {0} should go'
|
||||
).format(backupname)))
|
||||
elif os.path.isfile(backupname):
|
||||
os.remove(backupname)
|
||||
elif os.path.islink(backupname):
|
||||
@ -1757,9 +1757,9 @@ def directory(name,
|
||||
shutil.rmtree(backupname)
|
||||
else:
|
||||
return _error(ret, ((
|
||||
'Something exists where the backup target {0}'
|
||||
'should go'
|
||||
).format(backupname)))
|
||||
'Something exists where the backup target {0}'
|
||||
'should go'
|
||||
).format(backupname)))
|
||||
os.rename(name, backupname)
|
||||
elif force:
|
||||
# Remove whatever is in the way
|
||||
@ -1774,10 +1774,12 @@ def directory(name,
|
||||
else:
|
||||
if os.path.isfile(name):
|
||||
return _error(
|
||||
ret, 'Specified location {0} exists and is a file'.format(name))
|
||||
ret,
|
||||
'Specified location {0} exists and is a file'.format(name))
|
||||
elif os.path.islink(name):
|
||||
return _error(
|
||||
ret, 'Specified location {0} exists and is a symlink'.format(name))
|
||||
ret,
|
||||
'Specified location {0} exists and is a symlink'.format(name))
|
||||
|
||||
if __opts__['test']:
|
||||
ret['result'], ret['comment'] = _check_directory(
|
||||
@ -1838,7 +1840,8 @@ def directory(name,
|
||||
if not isinstance(recurse, list):
|
||||
ret['result'] = False
|
||||
ret['comment'] = '"recurse" must be formed as a list of strings'
|
||||
elif not set(['user', 'group', 'mode', 'ignore_files', 'ignore_dirs']) >= set(recurse):
|
||||
elif not set(['user', 'group', 'mode', 'ignore_files',
|
||||
'ignore_dirs']) >= set(recurse):
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Types for "recurse" limited to "user", ' \
|
||||
'"group", "mode", "ignore_files, and "ignore_dirs"'
|
||||
@ -2883,7 +2886,10 @@ def comment(name, regex, char='#', backup='.bak'):
|
||||
'''
|
||||
name = os.path.expanduser(name)
|
||||
|
||||
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
if not name:
|
||||
return _error(ret, 'Must provide name to file.comment')
|
||||
|
||||
@ -2908,8 +2914,9 @@ def comment(name, regex, char='#', backup='.bak'):
|
||||
return ret
|
||||
with salt.utils.fopen(name, 'rb') as fp_:
|
||||
slines = fp_.readlines()
|
||||
|
||||
# Perform the edit
|
||||
__salt__['file.comment'](name, regex, char, backup)
|
||||
__salt__['file.comment_line'](name, regex, char, True, backup)
|
||||
|
||||
with salt.utils.fopen(name, 'rb') as fp_:
|
||||
nlines = fp_.readlines()
|
||||
@ -2970,7 +2977,10 @@ def uncomment(name, regex, char='#', backup='.bak'):
|
||||
'''
|
||||
name = os.path.expanduser(name)
|
||||
|
||||
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
if not name:
|
||||
return _error(ret, 'Must provide name to file.uncomment')
|
||||
|
||||
@ -3004,7 +3014,7 @@ def uncomment(name, regex, char='#', backup='.bak'):
|
||||
slines = fp_.readlines()
|
||||
|
||||
# Perform the edit
|
||||
__salt__['file.uncomment'](name, regex, char, backup)
|
||||
__salt__['file.comment_line'](name, regex, char, False, backup)
|
||||
|
||||
with salt.utils.fopen(name, 'rb') as fp_:
|
||||
nlines = fp_.readlines()
|
||||
@ -4285,7 +4295,8 @@ def serialize(name,
|
||||
|
||||
if ret['changes']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Dataset will be serialized and stored into {0}'.format(name)
|
||||
ret['comment'] = 'Dataset will be serialized and stored into {0}'.format(
|
||||
name)
|
||||
else:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'The file {0} is in the correct state'.format(name)
|
||||
@ -4418,7 +4429,7 @@ def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode='0600'):
|
||||
ret['comment'] = (
|
||||
'Character device {0} exists and has a different '
|
||||
'major/minor {1}/{2}. Cowardly refusing to continue'
|
||||
.format(name, devmaj, devmin)
|
||||
.format(name, devmaj, devmin)
|
||||
)
|
||||
# Check the perms
|
||||
else:
|
||||
|
@ -358,6 +358,13 @@ def latest(name,
|
||||
if os.path.isdir(target):
|
||||
# git clone is required, target exists but force is turned on
|
||||
if force:
|
||||
if __opts__['test']:
|
||||
return _neutral_test(
|
||||
ret,
|
||||
'Repository {0} is about to be cloned to {1}.'
|
||||
'Since force option is in use, deleting.'.format(
|
||||
name, target))
|
||||
|
||||
log.debug(('target {0} found, but not a git repository. Since '
|
||||
'force option is in use, deleting.').format(target))
|
||||
if os.path.islink(target):
|
||||
|
@ -205,6 +205,7 @@ def mounted(name,
|
||||
'comment',
|
||||
'defaults',
|
||||
'delay_connect',
|
||||
'direct-io-mode',
|
||||
'intr',
|
||||
'loop',
|
||||
'nointr',
|
||||
@ -226,6 +227,7 @@ def mounted(name,
|
||||
mount_invisible_keys = [
|
||||
'actimeo',
|
||||
'comment',
|
||||
'direct-io-mode',
|
||||
'password',
|
||||
'retry',
|
||||
'port',
|
||||
|
@ -182,7 +182,8 @@ def managed(name, **kwargs):
|
||||
keyid option must also be set for this option to work.
|
||||
|
||||
key_url
|
||||
URL to retrieve a GPG key from.
|
||||
URL to retrieve a GPG key from. Allows the usage of ``http://``,
|
||||
``https://`` as well as ``salt://``.
|
||||
|
||||
consolidate
|
||||
If set to true, this will consolidate all sources definitions to
|
||||
|
@ -2,6 +2,11 @@
|
||||
'''
|
||||
Manage the registry on Windows
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
@ -11,20 +16,30 @@ def __virtual__():
|
||||
return 'reg' if 'reg.read_key' in __salt__ else False
|
||||
|
||||
|
||||
def _parse_key(key):
|
||||
def _parse_key_value(key):
|
||||
'''
|
||||
split the full path in the registry to the key and the rest
|
||||
'''
|
||||
splt = key.split("\\")
|
||||
hive = splt.pop(0)
|
||||
key = splt.pop(-1)
|
||||
path = r'\\'.join(splt)
|
||||
return hive, path, key
|
||||
vname = splt.pop(-1)
|
||||
key = r'\\'.join(splt)
|
||||
return hive, key, vname
|
||||
|
||||
|
||||
def present(name, value, vtype='REG_DWORD', reflection=True):
|
||||
def _parse_key(key):
|
||||
'''
|
||||
Set a registry entry
|
||||
split the hive from the key
|
||||
'''
|
||||
splt = key.split("\\")
|
||||
hive = splt.pop(0)
|
||||
key = r'\\'.join(splt)
|
||||
return hive, key
|
||||
|
||||
|
||||
def present(name, value, vtype='REG_SZ', reflection=True):
|
||||
'''
|
||||
Set a registry value
|
||||
|
||||
Optionally set ``reflection`` to ``False`` to disable reflection.
|
||||
``reflection`` has no effect on a 32-bit OS.
|
||||
@ -42,27 +57,37 @@ def present(name, value, vtype='REG_DWORD', reflection=True):
|
||||
- value: 0.15.3
|
||||
- vtype: REG_SZ
|
||||
- reflection: False
|
||||
|
||||
In the above example the path is interpreted as follows:
|
||||
- ``HKEY_CURRENT_USER`` is the hive
|
||||
- ``SOFTWARE\\Salt`` is the key
|
||||
- ``version`` is the value name
|
||||
So ``version`` will be created in the ``SOFTWARE\\Salt`` key in the
|
||||
``HKEY_CURRENT_USER`` hive and given the ``REG_SZ`` value of ``0.15.3``.
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {},
|
||||
'comment': ''}
|
||||
|
||||
# determine what to do
|
||||
hive, path, key = _parse_key(name)
|
||||
if value == __salt__['reg.read_key'](hive, path, key, reflection):
|
||||
hive, key, vname = _parse_key_value(name)
|
||||
|
||||
# Determine what to do
|
||||
if value == __salt__['reg.read_value'](hive, key, vname)['vdata']:
|
||||
ret['comment'] = '{0} is already configured'.format(name)
|
||||
return ret
|
||||
else:
|
||||
ret['changes'] = {'reg': 'configured to {0}'.format(value)}
|
||||
|
||||
# Check for test option
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
return ret
|
||||
|
||||
# configure the key
|
||||
ret['result'] = __salt__['reg.set_key'](hive, path, key, value, vtype,
|
||||
reflection)
|
||||
# Configure the value
|
||||
ret['result'] = __salt__['reg.set_value'](hive, key, vname, value, vtype,
|
||||
reflection)
|
||||
|
||||
if not ret:
|
||||
ret['changes'] = {}
|
||||
ret['comment'] = 'could not configure the registry key'
|
||||
@ -72,7 +97,7 @@ def present(name, value, vtype='REG_DWORD', reflection=True):
|
||||
|
||||
def absent(name):
|
||||
'''
|
||||
Remove a registry key
|
||||
Remove a registry value
|
||||
|
||||
Example:
|
||||
|
||||
@ -80,24 +105,35 @@ def absent(name):
|
||||
|
||||
'HKEY_CURRENT_USER\\SOFTWARE\\Salt\\version':
|
||||
reg.absent
|
||||
|
||||
In the above example the path is interpreted as follows:
|
||||
- ``HKEY_CURRENT_USER`` is the hive
|
||||
- ``SOFTWARE\\Salt`` is the key
|
||||
- ``version`` is the value name
|
||||
So the value ``version`` will be deleted from the ``SOFTWARE\\Salt`` key in
|
||||
the ``HKEY_CURRENT_USER`` hive.
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {},
|
||||
'comment': ''}
|
||||
|
||||
hive, path, key = _parse_key(name)
|
||||
if not __salt__['reg.read_key'](hive, path, key):
|
||||
hive, key, vname = _parse_key_value(name)
|
||||
|
||||
# Determine what to do
|
||||
if not __salt__['reg.read_value'](hive, key, vname)['success']:
|
||||
ret['comment'] = '{0} is already absent'.format(name)
|
||||
return ret
|
||||
else:
|
||||
ret['changes'] = {'reg': 'Removed {0}'.format(name)}
|
||||
|
||||
# Check for test option
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
return ret
|
||||
|
||||
ret['result'] = __salt__['reg.delete_key'](hive, path, key)
|
||||
# Delete the value
|
||||
ret['result'] = __salt__['reg.delete_value'](hive, key, vname)
|
||||
if not ret['result']:
|
||||
ret['changes'] = {}
|
||||
ret['comment'] = 'failed to remove registry key {0}'.format(name)
|
||||
|
@ -95,7 +95,7 @@ def state(
|
||||
The default salt environment to pull sls files from
|
||||
|
||||
ssh
|
||||
Set to `True` to use the ssh client instaed of the standard salt client
|
||||
Set to `True` to use the ssh client instead of the standard salt client
|
||||
|
||||
roster
|
||||
In the event of using salt-ssh, a roster system can be set
|
||||
@ -333,7 +333,7 @@ def function(
|
||||
based on the returned data dict for individual minions
|
||||
|
||||
ssh
|
||||
Set to `True` to use the ssh client instaed of the standard salt client
|
||||
Set to `True` to use the ssh client instead of the standard salt client
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
@ -554,6 +554,12 @@ def runner(name, **kwargs):
|
||||
The name of the function to run
|
||||
kwargs
|
||||
Any keyword arguments to pass to the runner function
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
run-manage-up:
|
||||
salt.runner:
|
||||
- name: manage.up
|
||||
'''
|
||||
ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''}
|
||||
out = __salt__['saltutil.runner'](name, **kwargs)
|
||||
|
@ -1,6 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Manage Windows Package Repository
|
||||
|
||||
.. note::
|
||||
|
||||
This state only loads on minions that have the ``roles: salt-master`` grain
|
||||
set.
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
@ -30,8 +35,14 @@ def genrepo(name, force=False, allow_empty=False):
|
||||
'''
|
||||
Refresh the winrepo.p file of the repository (salt-run winrepo.genrepo)
|
||||
|
||||
if force is True no checks will be made and the repository will be generated
|
||||
if allow_empty is True then the state will not return an error if there are 0 packages
|
||||
If ``force`` is ``True`` no checks will be made and the repository will be
|
||||
generated if ``allow_empty`` is ``True`` then the state will not return an
|
||||
error if there are 0 packages,
|
||||
|
||||
.. note::
|
||||
|
||||
This state only loads on minions that have the ``roles: salt-master``
|
||||
grain set.
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -114,7 +114,7 @@ def yamlify_arg(arg):
|
||||
# Only yamlify if it parses into a non-string type, to prevent
|
||||
# loss of content due to # as comment character
|
||||
parsed_arg = yamlloader.load(arg, Loader=yamlloader.SaltYamlSafeLoader)
|
||||
if isinstance(parsed_arg, six.string_types):
|
||||
if isinstance(parsed_arg, six.string_types) or parsed_arg is None:
|
||||
return arg
|
||||
return parsed_arg
|
||||
if arg == 'None':
|
||||
|
@ -63,8 +63,25 @@ def _convert_key_to_str(key):
|
||||
return key
|
||||
|
||||
|
||||
def get_iam_region(version='latest', url='http://169.254.169.254',
|
||||
timeout=None, num_retries=5):
|
||||
'''
|
||||
Gets instance identity document and returns region
|
||||
'''
|
||||
instance_identity_url = '{0}/{1}/latest/dynamic/instance-identity/document'.format(url, version)
|
||||
|
||||
region = None
|
||||
try:
|
||||
document = _retry_get_url(instance_identity_url, num_retries, timeout)
|
||||
region = json.loads(document)['region']
|
||||
except (ValueError, TypeError, KeyError):
|
||||
# JSON failed to decode
|
||||
log.error('Failed to read region from instance metadata. Giving up.')
|
||||
return region
|
||||
|
||||
|
||||
def get_iam_metadata(version='latest', url='http://169.254.169.254',
|
||||
timeout=None, num_retries=5):
|
||||
timeout=None, num_retries=5):
|
||||
'''
|
||||
Grabs the first IAM role from this instances metadata if it exists.
|
||||
'''
|
||||
|
@ -24,7 +24,7 @@ def running(opts):
|
||||
data = _read_proc_file(path, opts)
|
||||
if data is not None:
|
||||
ret.append(data)
|
||||
except IOError:
|
||||
except (IOError, OSError):
|
||||
# proc files may be removed at any time during this process by
|
||||
# the minion process that is executing the JID in question, so
|
||||
# we must ignore ENOENT during this process
|
||||
|
@ -1542,7 +1542,7 @@ class SaltCMDOptionParser(six.with_metaclass(OptionParserMeta,
|
||||
'-p', '--progress',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help=('Display a progress graph')
|
||||
help=('Display a progress graph. [Requires `progressbar` python package.]')
|
||||
)
|
||||
self.add_option(
|
||||
'--failhard',
|
||||
|
@ -30,7 +30,7 @@ DEFAULT_LOCATION = 'us-east-1'
|
||||
def query(key, keyid, method='GET', params=None, headers=None,
|
||||
requesturl=None, return_url=False, bucket=None, service_url=None,
|
||||
path='', return_bin=False, action=None, local_file=None,
|
||||
verify_ssl=True, location=DEFAULT_LOCATION, full_headers=False):
|
||||
verify_ssl=True, location=None, full_headers=False):
|
||||
'''
|
||||
Perform a query against an S3-like API. This function requires that a
|
||||
secret key and the id for that key are passed in. For instance:
|
||||
@ -38,7 +38,10 @@ def query(key, keyid, method='GET', params=None, headers=None,
|
||||
s3.keyid: GKTADJGHEIQSXMKKRBJ08H
|
||||
s3.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
|
||||
|
||||
A service_url may also be specified in the configuration::
|
||||
If keyid or key is not specified, an attempt to fetch them from EC2 IAM
|
||||
metadata service will be made.
|
||||
|
||||
A service_url may also be specified in the configuration:
|
||||
|
||||
s3.service_url: s3.amazonaws.com
|
||||
|
||||
@ -58,6 +61,13 @@ def query(key, keyid, method='GET', params=None, headers=None,
|
||||
This is required if using S3 bucket names that contain a period, as
|
||||
these will not match Amazon's S3 wildcard certificates. Certificate
|
||||
verification is enabled by default.
|
||||
|
||||
A region may be specified:
|
||||
|
||||
s3.location: eu-central-1
|
||||
|
||||
If region is not specified, an attempt to fetch the region from EC2 IAM
|
||||
metadata service will be made. Failing that, default is us-east-1
|
||||
'''
|
||||
if not HAS_REQUESTS:
|
||||
log.error('There was an error: requests is required for s3 access')
|
||||
@ -77,12 +87,15 @@ def query(key, keyid, method='GET', params=None, headers=None,
|
||||
endpoint = service_url
|
||||
|
||||
# Try grabbing the credentials from the EC2 instance IAM metadata if available
|
||||
token = None
|
||||
if not key or not keyid:
|
||||
iam_creds = iam.get_iam_metadata()
|
||||
key = iam_creds['secret_key']
|
||||
keyid = iam_creds['access_key']
|
||||
token = iam_creds['security_token']
|
||||
|
||||
if not location:
|
||||
location = iam.get_iam_region()
|
||||
if not location:
|
||||
location = DEFAULT_LOCATION
|
||||
|
||||
data = ''
|
||||
if method == 'PUT':
|
||||
|
@ -42,6 +42,68 @@ TEMPLATE_DIRNAME = os.path.join(saltpath[0], 'templates')
|
||||
SLS_ENCODING = 'utf-8' # this one has no BOM.
|
||||
SLS_ENCODER = codecs.getencoder(SLS_ENCODING)
|
||||
|
||||
ALIAS_WARN = (
|
||||
'Starting in 2015.5, cmd.run uses python_shell=False by default, '
|
||||
'which doesn\'t support shellisms (pipes, env variables, etc). '
|
||||
'cmd.run is currently aliased to cmd.shell to prevent breakage. '
|
||||
'Please switch to cmd.shell or set python_shell=True to avoid '
|
||||
'breakage in the future, when this aliasing is removed.'
|
||||
)
|
||||
ALIASES = {
|
||||
'cmd.run': 'cmd.shell',
|
||||
'cmd': {'run': 'shell'},
|
||||
}
|
||||
|
||||
|
||||
class AliasedLoader(object):
|
||||
'''
|
||||
Light wrapper around the LazyLoader to redirect 'cmd.run' calls to
|
||||
'cmd.shell', for easy use of shellisms during templating calls
|
||||
|
||||
Dotted aliases ('cmd.run') must resolve to another dotted alias
|
||||
(e.g. 'cmd.shell')
|
||||
|
||||
Non-dotted aliases ('cmd') must resolve to a dictionary of function
|
||||
aliases for that module (e.g. {'run': 'shell'})
|
||||
'''
|
||||
|
||||
def __init__(self, wrapped):
|
||||
self.wrapped = wrapped
|
||||
|
||||
def __getitem__(self, name):
|
||||
if name in ALIASES:
|
||||
salt.utils.warn_until('Nitrogen', ALIAS_WARN)
|
||||
return self.wrapped[ALIASES[name]]
|
||||
else:
|
||||
return self.wrapped[name]
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name in ALIASES:
|
||||
salt.utils.warn_until('Nitrogen', ALIAS_WARN)
|
||||
return AliasedModule(getattr(self.wrapped, name), ALIASES[name])
|
||||
else:
|
||||
return getattr(self.wrapped, name)
|
||||
|
||||
|
||||
class AliasedModule(object):
|
||||
'''
|
||||
Light wrapper around module objects returned by the LazyLoader's getattr
|
||||
for the purposes of `salt.cmd.run()` syntax in templates
|
||||
|
||||
Allows for aliasing specific functions, such as `run` to `shell` for easy
|
||||
use of shellisms during templating calls
|
||||
'''
|
||||
def __init__(self, wrapped, aliases):
|
||||
self.aliases = aliases
|
||||
self.wrapped = wrapped
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name in self.aliases:
|
||||
salt.utils.warn_until('Nitrogen', ALIAS_WARN)
|
||||
return getattr(self.wrapped, self.aliases[name])
|
||||
else:
|
||||
return getattr(self.wrapped, name)
|
||||
|
||||
|
||||
def wrap_tmpl_func(render_str):
|
||||
|
||||
@ -58,11 +120,7 @@ def wrap_tmpl_func(render_str):
|
||||
# Alias cmd.run to cmd.shell to make python_shell=True the default for
|
||||
# templated calls
|
||||
if 'salt' in kws:
|
||||
if 'cmd.run' in kws['salt'] and 'cmd.shell' in kws['salt']:
|
||||
kws['salt']['cmd.run'] = kws['salt']['cmd.shell']
|
||||
if 'run' in kws['salt'].get('cmd', {}) \
|
||||
and 'shell' in kws['salt'].get('cmd', {}):
|
||||
kws['salt']['cmd']['run'] = kws['salt']['cmd']['shell']
|
||||
kws['salt'] = AliasedLoader(kws['salt'])
|
||||
|
||||
# We want explicit context to overwrite the **kws
|
||||
kws.update(context)
|
||||
|
@ -1766,6 +1766,23 @@ class FileTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn):
|
||||
finally:
|
||||
os.remove(source)
|
||||
|
||||
def test_issue_25250_force_copy_deletes(self):
|
||||
'''
|
||||
ensure force option in copy state does not delete target file
|
||||
'''
|
||||
dest = os.path.join(integration.TMP, 'dest')
|
||||
source = os.path.join(integration.TMP, 'source')
|
||||
shutil.copyfile(os.path.join(integration.FILES, 'hosts'), source)
|
||||
shutil.copyfile(os.path.join(integration.FILES, 'file/base/cheese'), dest)
|
||||
|
||||
self.run_state('file.copy', name=dest, source=source, force=True)
|
||||
self.assertTrue(os.path.exists(dest))
|
||||
self.assertTrue(filecmp.cmp(source, dest))
|
||||
|
||||
os.remove(source)
|
||||
os.remove(dest)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(FileTest)
|
||||
|
@ -114,7 +114,7 @@ class LocalemodTestCase(TestCase):
|
||||
'''
|
||||
Tests the return of successful gen_locale on Debian system without a charmap
|
||||
'''
|
||||
def file_search(search, pattern):
|
||||
def file_search(search, pattern, flags):
|
||||
'''
|
||||
mock file.search
|
||||
'''
|
||||
@ -164,7 +164,7 @@ class LocalemodTestCase(TestCase):
|
||||
'''
|
||||
Tests the return of successful gen_locale on Gentoo system without a charmap
|
||||
'''
|
||||
def file_search(search, pattern):
|
||||
def file_search(search, pattern, flags):
|
||||
'''
|
||||
mock file.search
|
||||
'''
|
||||
|
@ -1,202 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Rahul Handay <rahulha@saltstack.com>`
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting import TestCase, skipIf
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
from salttesting.mock import (
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON
|
||||
)
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import Salt Libs
|
||||
from salt.modules import reg
|
||||
|
||||
|
||||
class MockWinReg(object):
|
||||
'''
|
||||
Mock class of winreg
|
||||
'''
|
||||
HKEY_USERS = "HKEY_USERS"
|
||||
HKEY_CURRENT_USER = "HKEY_CURRENT_USER"
|
||||
HKEY_LOCAL_MACHINE = "HKEY_LOCAL_MACHINE"
|
||||
KEY_ALL_ACCESS = True
|
||||
KEY_WOW64_64KEY = False
|
||||
flag = None
|
||||
flag1 = None
|
||||
flag2 = None
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def OpenKeyEx(self, hkey2, path, bol, access_mask):
|
||||
'''
|
||||
Mock openKeyEx method
|
||||
'''
|
||||
if self.flag:
|
||||
return hkey2, path, bol, access_mask
|
||||
else:
|
||||
raise Exception("Error")
|
||||
|
||||
@staticmethod
|
||||
def QueryValueEx(handle, key):
|
||||
'''
|
||||
Mock QueryValueEx method
|
||||
'''
|
||||
return [handle, key]
|
||||
|
||||
def OpenKey(self, hkey2, path, bol, access_mask):
|
||||
'''
|
||||
Mock OpenKey Mothod
|
||||
'''
|
||||
if self.flag:
|
||||
return hkey2, path, bol, access_mask
|
||||
else:
|
||||
raise Exception("Error")
|
||||
|
||||
@staticmethod
|
||||
def SetValueEx(handle, key, bol, _type, value):
|
||||
'''
|
||||
Mock SetValueEx method
|
||||
'''
|
||||
return handle, key, bol, _type, value
|
||||
|
||||
@staticmethod
|
||||
def CloseKey(handle):
|
||||
'''
|
||||
Mock CloseKey method
|
||||
'''
|
||||
return handle
|
||||
|
||||
@staticmethod
|
||||
def CreateKeyEx(hkey2, path, bol, access_mask):
|
||||
'''
|
||||
Mock CreateKeyEx method
|
||||
'''
|
||||
return hkey2, path, bol, access_mask
|
||||
|
||||
def DeleteKeyEx(self, handle, key):
|
||||
'''
|
||||
Mock DeleteKeyEx method
|
||||
'''
|
||||
if self.flag1:
|
||||
return handle, key
|
||||
else:
|
||||
raise Exception("Error")
|
||||
|
||||
def DeleteValue(self, handle, key):
|
||||
'''
|
||||
Mock DeleteValue
|
||||
'''
|
||||
if self.flag2:
|
||||
return handle, key
|
||||
else:
|
||||
raise Exception("Error")
|
||||
|
||||
reg._winreg = MockWinReg()
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class RegTestCase(TestCase):
|
||||
'''
|
||||
Test cases for salt.modules.reg
|
||||
'''
|
||||
def test_read_key(self):
|
||||
'''
|
||||
Test to read registry key value
|
||||
'''
|
||||
MockWinReg.flag = False
|
||||
self.assertEqual(reg.read_key("HKEY_LOCAL_MACHINE",
|
||||
"SOFTWARE\\Salt",
|
||||
"2014.7.0"),
|
||||
None)
|
||||
|
||||
MockWinReg.flag = True
|
||||
self.assertTrue(reg.read_key("HKEY_LOCAL_MACHINE",
|
||||
"SOFTWARE\\Salt",
|
||||
"2014.7.0"))
|
||||
|
||||
def test_set_key(self):
|
||||
'''
|
||||
Test to set a registry key
|
||||
'''
|
||||
self.assertFalse(reg.set_key("HKEY_CURRENT_USER",
|
||||
"SOFTWARE\\Salt",
|
||||
"2014.7.0",
|
||||
"0.97")
|
||||
)
|
||||
|
||||
MockWinReg.flag = True
|
||||
self.assertTrue(reg.set_key("HKEY_CURRENT_USER",
|
||||
"SOFTWARE\\Salt",
|
||||
"2014.7.0",
|
||||
"0.97",
|
||||
"OpenKey")
|
||||
)
|
||||
|
||||
MockWinReg.flag = False
|
||||
self.assertTrue(reg.set_key("HKEY_CURRENT_USER",
|
||||
"SOFTWARE\\Salt",
|
||||
"2014.7.0",
|
||||
"0.97",
|
||||
"OpenKey")
|
||||
)
|
||||
|
||||
def test_create_key(self):
|
||||
'''
|
||||
Test to Create a registry key
|
||||
'''
|
||||
MockWinReg.flag = True
|
||||
self.assertTrue(reg.create_key("HKEY_CURRENT_USER",
|
||||
"SOFTWARE\\Salt",
|
||||
"2014.7.0"
|
||||
)
|
||||
)
|
||||
|
||||
MockWinReg.flag = False
|
||||
self.assertTrue(reg.create_key("HKEY_CURRENT_USER",
|
||||
"SOFTWARE\\Salt",
|
||||
"2014.7.0"
|
||||
)
|
||||
)
|
||||
|
||||
def test_delete_key(self):
|
||||
'''
|
||||
Test to delete key
|
||||
'''
|
||||
MockWinReg.flag = True
|
||||
MockWinReg.flag1 = True
|
||||
self.assertTrue(reg.delete_key("HKEY_CURRENT_USER",
|
||||
"SOFTWARE\\Salt",
|
||||
"2014.7.0"
|
||||
)
|
||||
)
|
||||
|
||||
MockWinReg.flag = True
|
||||
MockWinReg.flag1 = False
|
||||
MockWinReg.flag2 = False
|
||||
self.assertFalse(reg.delete_key("HKEY_CURRENT_USER",
|
||||
"SOFTWARE\\Salt",
|
||||
"2014.7.0"
|
||||
)
|
||||
)
|
||||
|
||||
MockWinReg.flag = True
|
||||
MockWinReg.flag1 = False
|
||||
MockWinReg.flag2 = True
|
||||
self.assertTrue(reg.delete_key("HKEY_CURRENT_USER",
|
||||
"SOFTWARE\\Salt",
|
||||
"2014.7.0"
|
||||
)
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(RegTestCase, needs_daemon=False)
|
@ -33,7 +33,7 @@ class S3TestCase(TestCase):
|
||||
'''
|
||||
with patch.object(s3, '_get_key',
|
||||
return_value=('key', 'keyid', 'service_url',
|
||||
'verify_ssl')):
|
||||
'verify_ssl', 'location')):
|
||||
with patch.object(salt.utils.s3, 'query', return_value='A'):
|
||||
self.assertEqual(s3.delete('bucket'), 'A')
|
||||
|
||||
@ -44,7 +44,7 @@ class S3TestCase(TestCase):
|
||||
'''
|
||||
with patch.object(s3, '_get_key',
|
||||
return_value=('key', 'keyid', 'service_url',
|
||||
'verify_ssl')):
|
||||
'verify_ssl', 'location')):
|
||||
with patch.object(salt.utils.s3, 'query', return_value='A'):
|
||||
self.assertEqual(s3.get(), 'A')
|
||||
|
||||
@ -54,7 +54,7 @@ class S3TestCase(TestCase):
|
||||
'''
|
||||
with patch.object(s3, '_get_key',
|
||||
return_value=('key', 'keyid', 'service_url',
|
||||
'verify_ssl')):
|
||||
'verify_ssl', 'location')):
|
||||
with patch.object(salt.utils.s3, 'query', return_value='A'):
|
||||
self.assertEqual(s3.head('bucket'), 'A')
|
||||
|
||||
@ -64,7 +64,7 @@ class S3TestCase(TestCase):
|
||||
'''
|
||||
with patch.object(s3, '_get_key',
|
||||
return_value=('key', 'keyid', 'service_url',
|
||||
'verify_ssl')):
|
||||
'verify_ssl', 'location')):
|
||||
with patch.object(salt.utils.s3, 'query', return_value='A'):
|
||||
self.assertEqual(s3.put('bucket'), 'A')
|
||||
|
||||
|
@ -877,7 +877,8 @@ class FileTestCase(TestCase):
|
||||
with patch.dict(filestate.__salt__,
|
||||
{'file.contains_regex_multiline': mock_t,
|
||||
'file.search': mock_t,
|
||||
'file.comment': mock_t}):
|
||||
'file.comment': mock_t,
|
||||
'file.comment_line': mock_t}):
|
||||
with patch.dict(filestate.__opts__, {'test': True}):
|
||||
comt = ('File {0} is set to be updated'.format(name))
|
||||
ret.update({'comment': comt, 'result': None})
|
||||
@ -922,7 +923,8 @@ class FileTestCase(TestCase):
|
||||
with patch.dict(filestate.__salt__,
|
||||
{'file.contains_regex_multiline': mock,
|
||||
'file.search': mock,
|
||||
'file.uncomment': mock_t}):
|
||||
'file.uncomment': mock_t,
|
||||
'file.comment_line': mock_t}):
|
||||
comt = ('Pattern already uncommented')
|
||||
ret.update({'comment': comt, 'result': True})
|
||||
self.assertDictEqual(filestate.uncomment(name, regex), ret)
|
||||
|
@ -44,10 +44,10 @@ class RegTestCase(TestCase):
|
||||
'result': True,
|
||||
'comment': '{0} is already configured'.format(name)}
|
||||
|
||||
mock = MagicMock(side_effect=[value, 'a', 'a'])
|
||||
mock = MagicMock(side_effect=[{'vdata': value}, {'vdata': 'a'}, {'vdata': 'a'}])
|
||||
mock_t = MagicMock(return_value=True)
|
||||
with patch.dict(reg.__salt__, {'reg.read_key': mock,
|
||||
'reg.set_key': mock_t}):
|
||||
with patch.dict(reg.__salt__, {'reg.read_value': mock,
|
||||
'reg.set_value': mock_t}):
|
||||
self.assertDictEqual(reg.present(name, value), ret)
|
||||
|
||||
with patch.dict(reg.__opts__, {'test': True}):
|
||||
@ -72,10 +72,10 @@ class RegTestCase(TestCase):
|
||||
'result': True,
|
||||
'comment': '{0} is already absent'.format(name)}
|
||||
|
||||
mock = MagicMock(side_effect=[False, True, True])
|
||||
mock = MagicMock(side_effect=[{'success': False}, {'success': True}, {'success': True}])
|
||||
mock_t = MagicMock(return_value=True)
|
||||
with patch.dict(reg.__salt__, {'reg.read_key': mock,
|
||||
'reg.delete_key': mock_t}):
|
||||
with patch.dict(reg.__salt__, {'reg.read_value': mock,
|
||||
'reg.delete_value': mock_t}):
|
||||
self.assertDictEqual(reg.absent(name), ret)
|
||||
|
||||
with patch.dict(reg.__opts__, {'test': True}):
|
||||
|
Loading…
Reference in New Issue
Block a user