Merge branch 'sam_raet_50' into ephemeral

Conflicts:
	salt/daemons/flo/core.py
This commit is contained in:
Samuel M Smith 2014-07-08 15:17:09 -06:00
commit 995813d535
78 changed files with 1904 additions and 578 deletions

View File

@ -4592,3 +4592,9 @@ source_file = _build/locale/topics/releases/version_numbers.pot
source_lang = en
source_name = topics/releases/version_numbers.rst
[salt.security--index]
file_filter = locale/<lang>/LC_MESSAGES/security/index.po
source_file = _build/locale/security/index.pot
source_lang = en
source_name = security/index.rst

View File

@ -179,6 +179,9 @@ autosummary_generate = True
# Define a substitution for linking to the latest release tarball
rst_prolog = """\
.. |saltrepo| replace:: https://github.com/saltstack/salt
.. |salt-users| replace:: `salt-users <https://groups.google.com/forum/#!forum/salt-users>`_
.. |salt-announce| replace:: `salt-announce <https://groups.google.com/forum/#!forum/salt-announce>`_
.. |salt-packagers| replace:: `salt-packagers <https://groups.google.com/forum/#!forum/salt-packagers>`_
"""
# A shortcut for linking to tickets on the GitHub issue tracker

View File

@ -31,5 +31,6 @@ Salt Table of Contents
topics/development/index
topics/releases/index
topics/projects/index
security/index
faq
glossary

View File

@ -276,5 +276,8 @@ More information about the project
:doc:`Translate Documentation </topics/development/translating>`
How to help out translating Salt to your language.
:ref:`Security disclosures <disclosure>`
The SaltStack security disclosure policy
.. _`salt-contrib`: https://github.com/saltstack/salt-contrib
.. _`salt-states`: https://github.com/saltstack/salt-states

108
doc/security/index.rst Normal file
View File

@ -0,0 +1,108 @@
.. _disclosure:
==========================
Security disclosure policy
==========================
:email: security@saltstack.com
:gpg key ID: 4EA0793D
:gpg key fingerprint: ``8ABE 4EFC F0F4 B24B FF2A AF90 D570 F2D3 4EA0 793D``
**gpg public key**:
.. code-block:: text
-----BEGIN PGP PUBLIC KEY BLOCK----
Version: GnuPG/MacGPG2 v2.0.22 (Darwin)
mQINBFO15mMBEADa3CfQwk5ED9wAQ8fFDku277CegG3U1hVGdcxqKNvucblwoKCb
hRK6u9ihgaO9V9duV2glwgjytiBI/z6lyWqdaD37YXG/gTL+9Md+qdSDeaOa/9eg
7y+g4P+FvU9HWUlujRVlofUn5Dj/IZgUywbxwEybutuzvvFVTzsn+DFVwTH34Qoh
QIuNzQCSEz3Lhh8zq9LqkNy91ZZQO1ZIUrypafspH6GBHHcE8msBFgYiNBnVcUFH
u0r4j1Rav+621EtD5GZsOt05+NJI8pkaC/dDKjURcuiV6bhmeSpNzLaXUhwx6f29
Vhag5JhVGGNQxlRTxNEM86HEFp+4zJQ8m/wRDrGX5IAHsdESdhP+ljDVlAAX/ttP
/Ucl2fgpTnDKVHOA00E515Q87ZHv6awJ3GL1veqi8zfsLaag7rw1TuuHyGLOPkDt
t5PAjsS9R3KI7pGnhqI6bTOi591odUdgzUhZChWUUX1VStiIDi2jCvyoOOLMOGS5
AEYXuWYP7KgujZCDRaTNqRDdgPd93Mh9JI8UmkzXDUgijdzVpzPjYgFaWtyK8lsc
Fizqe3/Yzf9RCVX/lmRbiEH+ql/zSxcWlBQd17PKaL+TisQFXcmQzccYgAxFbj2r
QHp5ABEu9YjFme2Jzun7Mv9V4qo3JF5dmnUk31yupZeAOGZkirIsaWC3hwARAQAB
tDBTYWx0U3RhY2sgU2VjdXJpdHkgVGVhbSA8c2VjdXJpdHlAc2FsdHN0YWNrLmNv
bT6JAj4EEwECACgFAlO15mMCGwMFCQeGH4AGCwkIBwMCBhUIAgkKCwQWAgMBAh4B
AheAAAoJENVw8tNOoHk9z/MP/2vzY27fmVxU5X8joiiturjlgEqQw41IYEmWv1Bw
4WVXYCHP1yu/1MC1uuvOmOd5BlI8YO2C2oyW7d1B0NorguPtz55b7jabCElekVCh
h/H4ZVThiwqgPpthRv/2npXjIm7SLSs/kuaXo6Qy2JpszwDVFw+xCRVL0tH9KJxz
HuNBeVq7abWD5fzIWkmGM9hicG/R2D0RIlco1Q0VNKy8klG+pOFOW886KnwkSPc7
JUYp1oUlHsSlhTmkLEG54cyVzrTP/XuZuyMTdtyTc3mfgW0adneAL6MARtC5UB/h
q+v9dqMf4iD3wY6ctu8KWE8Vo5MUEsNNO9EA2dUR88LwFZ3ZnnXdQkizgR/Aa515
dm17vlNkSoomYCo84eN7GOTfxWcq+iXYSWcKWT4X+h/ra+LmNndQWQBRebVUtbKE
ZDwKmiQz/5LY5EhlWcuU4lVmMSFpWXt5FR/PtzgTdZAo9QKkBjcv97LYbXvsPI69
El1BLAg+m+1UpE1L7zJT1il6PqVyEFAWBxW46wXCCkGssFsvz2yRp0PDX8A6u4yq
rTkt09uYht1is61joLDJ/kq3+6k8gJWkDOW+2NMrmf+/qcdYCMYXmrtOpg/wF27W
GMNAkbdyzgeX/MbUBCGCMdzhevRuivOI5bu4vT5s3KdshG+yhzV45bapKRd5VN+1
mZRquQINBFO15mMBEAC5UuLii9ZLz6qHfIJp35IOW9U8SOf7QFhzXR7NZ3DmJsd3
f6Nb/habQFIHjm3K9wbpj+FvaW2oWRlFVvYdzjUq6c82GUUjW1dnqgUvFwdmM835
1n0YQ2TonmyaF882RvsRZrbJ65uvy7SQxlouXaAYOdqwLsPxBEOyOnMPSktW5V2U
IWyxsNP3sADchWIGq9p5D3Y/loyIMsS1dj+TjoQZOKSj7CuRT98+8yhGAY8YBEXu
9r3I9o6mDkuPpAljuMc8r09Im6az2egtK/szKt4Hy1bpSSBZU4W/XR7XwQNywmb3
wxjmYT6Od3Mwj0jtzc3gQiH8hcEy3+BO+NNmyzFVyIwOLziwjmEcw62S57wYKUVn
HD2nglMsQa8Ve0e6ABBMEY7zGEGStva59rfgeh0jUMJiccGiUDTMs0tdkC6knYKb
u/fdRqNYFoNuDcSeLEw4DdCuP01l2W4yY+fiK6hAcL25amjzc+yYo9eaaqTn6RAT
bzdhHQZdpAMxY+vNT0+NhP1Zo5gYBMR65Zp/VhFsf67ijb03FUtdw9N8dHwiR2m8
vVA8kO/gCD6wS2p9RdXqrJ9JhnHYWjiVuXR+f755ZAndyQfRtowMdQIoiXuJEXYw
6XN+/BX81gJaynJYc0uw0MnxWQX+A5m8HqEsbIFUXBYXPgbwXTm7c4IHGgXXdwAR
AQABiQIlBBgBAgAPBQJTteZjAhsMBQkHhh+AAAoJENVw8tNOoHk91rcQAIhxLv4g
duF/J1Cyf6Wixz4rqslBQ7DgNztdIUMjCThg3eB6pvIzY5d3DNROmwU5JvGP1rEw
hNiJhgBDFaB0J/y28uSci+orhKDTHb/cn30IxfuAuqrv9dujvmlgM7JUswOtLZhs
5FYGa6v1RORRWhUx2PQsF6ORg22QAaagc7OlaO3BXBoiE/FWsnEQCUsc7GnnPqi7
um45OJl/pJntsBUKvivEU20fj7j1UpjmeWz56NcjXoKtEvGh99gM5W2nSMLE3aPw
vcKhS4yRyLjOe19NfYbtID8m8oshUDji0XjQ1z5NdGcf2V1YNGHU5xyK6zwyGxgV
xZqaWnbhDTu1UnYBna8BiUobkuqclb4T9k2WjbrUSmTwKixokCOirFDZvqISkgmN
r6/g3w2TRi11/LtbUciF0FN2pd7rj5mWrOBPEFYJmrB6SQeswWNhr5RIsXrQd/Ho
zvNm0HnUNEe6w5YBfA6sXQy8B0Zs6pcgLogkFB15TuHIIIpxIsVRv5z8SlEnB7HQ
Io9hZT58yjhekJuzVQB9loU0C/W0lzci/pXTt6fd9puYQe1DG37pSifRG6kfHxrR
if6nRyrfdTlawqbqdkoqFDmEybAM9/hv3BqriGahGGH/hgplNQbYoXfNwYMYaHuB
aSkJvrOQW8bpuAzgVyd7TyNFv+t1kLlfaRYJ
=wBTJ
-----END PGP PUBLIC KEY BLOCK-----
The SaltStack Security Team is available at security@saltstack.com for
security-related bug reports or questions.
We request the disclosure of any security-related bugs or issues be reported
non-publicly until such time as the issue can be resolved and a security-fix
release can be prepared. At that time we will release the fix and make a public
announcement with upgrade instructions and download locations.
Security response proceedure
============================
SaltStack takes security and the trust of our customers and users very
seriously. Our disclosure policy is intended to resolve security issues as
quickly and safely as is possible.
1. A security report sent to security@saltstack.com is assigned to a team
member. This person is the primary contact for questions and will
coordinate the fix, release, and announcement.
2. The reported issue is reproduced and confirmed. A list of affected projects
and releases is made.
3. Fixes are implemented for all affected projects and releases that are
actively supported. Back-ports of the fix are made to any old releases that
are actively supported.
4. Packagers are notified via the |salt-packagers| mailing list that an issue
was reported and resolved, and that an announcement is incoming.
5. A new release is created and pushed to all affected repositories. The
release documentation provides a full description of the issue, plus any
upgrade instructions or other relevant details.
6. An announcement is made to the |salt-users| and |salt-announce| mailing
lists. The announcement contains a description of the issue and a link to
the full release documentation and download locations.
Receiving security announcemnts
===============================
The fastest place to receive security announcements is via the |salt-announce|
mailing list. This list is low-traffic.

View File

@ -175,7 +175,9 @@ do this, add ``-l debug`` to the calls to ``salt-master`` and ``salt-minion``.
If you would like to log to the console instead of to the log file, remove the
``-d``.
Once the minion starts, you may see an error like the following::
Once the minion starts, you may see an error like the following:
.. code-block:: bash
zmq.core.error.ZMQError: ipc path "/path/to/your/virtualenv/var/run/salt/minion/minion_event_7824dcbcfd7a8f6755939af70b96249f_pub.ipc" is longer than 107 characters (sizeof(sockaddr_un.sun_path)).

View File

@ -159,7 +159,9 @@ what the master does in response to that event, and it will also include the
rendered SLS file (or any errors generated while rendering the SLS file).
1. Stop the master.
2. Start the master manually::
2. Start the master manually:
.. code-block:: bash
salt-master -l debug

View File

@ -2,7 +2,7 @@
Salt 2014.1.6 Release Notes
===========================
:release: 2014-07-03
:release: 2014-07-08
Version 2014.1.6 is another bugfix release for :doc:`2014.1.0
</topics/releases/2014.1.0>`. Changes include:

View File

@ -62,3 +62,12 @@ Halite
halite
Using Salt at scale
===================
.. toctree::
:maxdepth: 2
intro_scale
the_master
the_minion

View File

@ -0,0 +1,291 @@
===================================
Introduction to using salt at scale
===================================
Using salt at scale can be quite a tricky task. If its planned to use saltstack
for thousands of minions on one or more masters, this tutorial will give advise
on tuning the master and minion settings, will give general tips what can or
should be enabled/disabled and also give some insights to what errors may be
caused by what situation. It will not go into the details of any setup
procedure required.
For how to install the saltmaster and get everything up and running, please
go here: `Installing saltstack <http://docs.saltstack.com/topics/installation/index.html>`_
Note
This tutorial is not intended for users with less than a thousand minions.
Even though it can not hurt, to tune a few settings mentioned in this
tutorial if the environment consists of less than a thousand minions.
When used with minions, the term 'many' always means at least a thousand
and 'a few' always means 500.
For simplicity reasons, this tutorial will default to the standard ports
used by salt.
The Master
==========
The most common problems on the salt-master that can occur with many minions
are:
1. too many minions connecting at once
2. too many minions re-connecting at once
3. too many minions returning at once
4. too little ressources (CPU/HDD)
The first three have the same cause. Its usually TCP-SYN-Floods that can occur
in different situations when doing certain things without knowing what actually
happens under the hood.
The fourth is caused by masters with little hardware ressources in combination
with a possible bug in ZeroMQ. At least thats what it looks like till today
(`Issue 118651 <https://github.com/saltstack/salt/issues/11865>`_,
`Issue 5948 <https://github.com/saltstack/salt/issues/5948>`_,
`Mail thread <https://groups.google.com/forum/#!searchin/salt-users/lots$20of$20minions/salt-users/WxothArv2Do/t12MigMQDFAJ>`_)
None of these problems is actually caused by salt itself. Salt and ZeroMQ as
well can handle several thousand minions a master easily. Its usually
misconfigurations in a few places that can be easily fixed.
To fully understand each problem, it is important to understand, how salt works.
Very briefly, the saltmaster offers two services to the minions.
- a job publisher on port 4505
- an open port 4506 to receive the minions returns
All minions are always connected to the publisher on port 4505 and only connect
to the open return port 4506 if necessary. On an idle master, there will only
be connections on port 4505.
Too many minions connecting
===========================
When the minion service is first started up on all machines, they connect to
their masters publisher on port 4505. If too many minion services are started
at once, this can already cause a TCP-SYN-flood on the master. This can be
easily avoided by not starting too many minions at once. This is rarely a
problem though.
It is much more likely to happen, that if many minions have already made their
first connection to the master and wait for their key to be accepted, they
check in every 10 seconds (conf_minion:`acceptance_wait_time`). With the
default of 10 seconds and a thousand minions, thats about 100 minions
checking in every second. If all keys are now accepted at once with
.. code-block:: bash
$ salt-key -A -y
the master may run into a bug where it consumes 100% CPU and growing amounts
of memory. This has been reported on the mailing list and the issue-tracker
on github a few times (
`Issue 118651 <https://github.com/saltstack/salt/issues/11865>`_,
`Issue 5948 <https://github.com/saltstack/salt/issues/5948>`_,
`Mail thread <https://groups.google.com/forum/#!searchin/salt-users/lots$20of$20minions/salt-users/WxothArv2Do/t12MigMQDFAJ>`_),
but the root cause has not yet been found.
The easiest way around this is, to not accept too many minions at once. It
only has to be done once, no need to rush.
Too many minions re-connecting
==============================
This is most likely to happen in the testing phase, when all minion keys have
already been accepted, the framework is being tested and parameters change
frequently in the masters configuration file.
Upon a service restart, the salt-master generates a new AES-key to encrypt
its publications with, but the minions don't yet know about the masters new
AES-key. When the first job after the masters restart is published, the
minions realize, that they have received a publication they can not decrypt
and try to re-auth themselves on the master.
Because all minions always receive all publications, every single minion who
can not decrypt a/the publication, will try to re-auth immediately, causing
thousands of minions trying to re-auth at once. This can be avoided by
setting the
.. code-block:: yaml
random_reauth_delay: 60
in the minions configuration file to a higher value and stagger the amount
of re-auth attempts. Increasing this value will of course increase the time
it takes, until all minions are reachable again via salt commands.
But this is not only the salt part that requires tuning. The ZeroMQ socket
settings on the minion side should also be tweaked.
As described before, the master and the minions are permanently connected
with each other through the publisher on port 4505. Restarting the salt-master
service shuts down the publishing-socket on the masters only to bring it
back up within seconds.
This change is detected by the ZeroMQ-socket on the minions end. Not being
connected does not really matter to the minion pull-socket or the minion.
The pull-socket just waits and tries to reconnect, while the minion just does
not receive publications while not being connected.
In this situation, its the pull-sockets reconnect value (default 100ms)
that might be too low. With each and every minions pull-socket trying to
reconnect within 100ms as soon as the master publisher port comes back up,
its a piece of cake to cause a syn-flood on the masters publishing port.
To tune the minions sockets reconnect attempts, there are a few values in
the sample configuration file (default values)
.. code-block:: yaml
recon_default: 100ms
recon_max: 5000
recon_randomize: True
- recon_default: the default value the socket should use, i.e. 100ms
- recon_max: the max value that the socket should use as a delay before trying to reconnect
- recon_randomize: enables randomization between recon_default and recon_max
To tune this values to an existing environment, a few decision have to be made.
1. How long can one wait, before the minions should be back online and reachable with salt?
2. How many reconnects can the master handle without detecting a syn flood?
These questions can not be answered generally. Their answers highly depend
on the hardware and the administrators requirements.
Here is an example scenario with the goal, to have all minions reconnect
within a 60 second time-frame on a salt-master service restart.
.. code-block:: yaml
recon_default: 1000
recon_max: 59000
recon_randomize: True
Each minion will have a randomized reconnect value between 'recon_default'
and 'recon_default + recon_max', which in this example means between 1000ms
and 60000ms (or between 1 and 60 seconds). The generated random-value will
be doubled after each attempt to reconnect (ZeroMQ default behaviour).
Lets say the generated random value is 11 seconds (or 11000ms).
reconnect 1: wait 11 seconds
reconnect 2: wait 22 seconds
reconnect 3: wait 33 seconds
reconnect 4: wait 44 seconds
reconnect 5: wait 55 seconds
reconnect 6: wait time is bigger than 60 seconds (recon_default + recon_max)
reconnect 7: wait 11 seconds
reconnect 8: wait 22 seconds
reconnect 9: wait 33 seconds
reconnect x: etc.
With a thousand minions this will mean
.. code-block:: math
1000/60 = ~16
round about 16 connection attempts a second. These values should be altered to
values that match your environment. Keep in mind though, that it may grow over
time and that more minions might raise the problem again.
Too many minions returning at once
==================================
This can also happen during the testing phase, if all minions are addressed at
once with
.. code-block:: bash
$ salt * test.ping
it may cause thousands of minions trying to return their data to the salt-master
open port 4506. Also causing a syn-flood if the master cant handle that many
returns at once.
This can be easily avoided with salts batch mode:
.. code-block:: bash
$ salt * test.ping -b 50
This will only address 50 minions at once while looping through all addressed
minions.
Too little ressources
=====================
The masters resources always have to match the environment. There is no way
to give good advise without knowing the environment the master is supposed to
run in. But here are some general tuning tips for different situations:
The master has little CPU-Power
-------------------------------
Salt uses RSA-Key-Pairs on the masters and minions end. Both generate 4096
bit key-pairs on first start. While the key-size for the master is currently
not configurable, the minions keysize can be configured with different
key-sizes. For example with a 2048 bit key:
.. code-block:: yaml
keysize: 2048
With thousands of decrpytions, the amount of time that can be saved on the
masters end should not be neglected. See here for reference:
`Pull Request 9235 <https://github.com/saltstack/salt/pull/9235>`_ how much
influence the key-size can have.
Downsizing the salt-masters key is not that important, because the minions
do not encrypt as many messages as the master does.
The master has slow disks
-------------------------
By default, the master saves every minions return for every job in its
job-cache. The cache can then be used later, to lookup results for previous
jobs. The default directory for this is:
.. code-block:: yaml
cachedir: /var/cache/salt
and then in the ``/proc`` directory.
Each jobs return for every minion is saved in a single file. Over time this
directory can grow immensly, depending on the number of published jobs and if
.. code-block:: yaml
keep_jobs: 24
was raised to have a longer job-history than 24 hours. Saving the files is
not that expensive, but cleaning up can be over time.
.. code-block:: math
250 jobs/day * 2000 minions returns = 500.000 files a day
If no job history is needed, the job cache can be disabled:
.. code-block:: yaml
job_cache: False
For legal reasons, it might be required, that there is a permanent job-cache
for a certain amount of time. If thats the case, there are currently only two
alternatives.
- use returners and disable the job-cache
- use salt-eventsd and disable the job-cache
The first one has the disadvantage of losing the encryption used by salt
unless the returner implements it.
The second one is not part of the official salt environment and therefore
not broadly known on the mailing list or by the core salt-developers.
`salt-eventsd on github <https://github.com/felskrone/salt/salt-eventsd>`_

View File

@ -87,6 +87,7 @@ class Master(parsers.MasterOptionParser):
v_dirs.append(os.path.join(self.config['pki_dir'], 'accepted'))
v_dirs.append(os.path.join(self.config['pki_dir'], 'pending'))
v_dirs.append(os.path.join(self.config['pki_dir'], 'rejected'))
v_dirs.append(os.path.join(self.config['cachedir'], 'raet'))
verify_env(
v_dirs,
self.config['user'],
@ -195,6 +196,7 @@ class Minion(parsers.MinionOptionParser):
v_dirs.append(os.path.join(self.config['pki_dir'], 'accepted'))
v_dirs.append(os.path.join(self.config['pki_dir'], 'pending'))
v_dirs.append(os.path.join(self.config['pki_dir'], 'rejected'))
v_dirs.append(os.path.join(self.config['cachedir'], 'raet'))
verify_env(
v_dirs,
self.config['user'],
@ -238,6 +240,8 @@ class Minion(parsers.MinionOptionParser):
self.minion = salt.minion.Minion(self.config)
else:
import salt.daemons.flo
self.daemonize_if_required()
self.set_pidfile()
self.minion = salt.daemons.flo.IofloMinion(self.config)
def start(self):

View File

@ -6,6 +6,7 @@ The main entry point for salt-api
import logging
import multiprocessing
import signal
import os
# Import salt-api libs
import salt.loader
@ -19,27 +20,50 @@ class NetapiClient(object):
'''
def __init__(self, opts):
self.opts = opts
self.processes = []
# pid -> {fun: foo, Process: object}
self.pid_map = {}
self.netapi = salt.loader.netapi(self.opts)
def add_process(self, fun):
'''
Start a netapi child process of "fun"
'''
p = multiprocessing.Process(target=self.netapi[fun])
p.start()
logger.info("Started '{0}' api module with pid {1}".format(fun, p.pid))
self.pid_map[p.pid] = {'fun': fun,
'Process': p}
def run(self):
'''
Load and start all available api modules
'''
netapi = salt.loader.netapi(self.opts)
for fun in netapi:
for fun in self.netapi:
if fun.endswith('.start'):
logger.info("Starting '{0}' api module".format(fun))
p = multiprocessing.Process(target=netapi[fun])
p.start()
self.processes.append(p)
self.add_process(fun)
# make sure to kill the subprocesses if the parent is killed
signal.signal(signal.SIGTERM, self.kill_children)
while True:
pid, exit_status = os.wait()
if pid not in self.pid_map:
logger.info(('Process of pid {0} died, not a known netapi'
' process, will not restart').format(pid))
continue
logger.info(('Process {0} ({1}) died with exit status {2},'
' restarting...').format(self.pid_map[pid]['fun'],
pid,
exit_status))
self.pid_map[pid]['Process'].join(1)
self.add_process(self.pid_map[pid]['fun'])
del self.pid_map[pid]
def kill_children(self, *args):
'''
Kill all of the children
'''
for p in self.processes:
p.terminate()
p.join()
for pid, p_map in self.pid_map.items():
p_map['Process'].terminate()
p_map['Process'].join()
del self.pid_map[pid]

View File

@ -276,6 +276,7 @@ def query(params=None, setname=None, requesturl=None, location=None,
attempts = 5
while attempts > 0:
params_with_headers = params.copy()
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
if not location:
@ -294,13 +295,13 @@ def query(params=None, setname=None, requesturl=None, location=None,
DEFAULT_EC2_API_VERSION
)
params['AWSAccessKeyId'] = provider['id']
params['SignatureVersion'] = '2'
params['SignatureMethod'] = 'HmacSHA256'
params['Timestamp'] = '{0}'.format(timestamp)
params['Version'] = ec2_api_version
keys = sorted(params.keys())
values = map(params.get, keys)
params_with_headers['AWSAccessKeyId'] = provider['id']
params_with_headers['SignatureVersion'] = '2'
params_with_headers['SignatureMethod'] = 'HmacSHA256'
params_with_headers['Timestamp'] = '{0}'.format(timestamp)
params_with_headers['Version'] = ec2_api_version
keys = sorted(params_with_headers.keys())
values = map(params_with_headers.get, keys)
querystring = urllib.urlencode(list(zip(keys, values)))
uri = '{0}\n{1}\n/\n{2}'.format(method.encode('utf-8'),
@ -309,13 +310,13 @@ def query(params=None, setname=None, requesturl=None, location=None,
hashed = hmac.new(provider['key'], uri, hashlib.sha256)
sig = binascii.b2a_base64(hashed.digest())
params['Signature'] = sig.strip()
params_with_headers['Signature'] = sig.strip()
requesturl = 'https://{0}/'.format(endpoint)
log.debug('EC2 Request: {0}'.format(requesturl))
try:
result = requests.get(requesturl, params=params)
result = requests.get(requesturl, params=params_with_headers)
log.debug(
'EC2 Response Status Code: {0}'.format(
# result.getcode()

View File

@ -58,6 +58,12 @@ VALID_OPTS = {
'master_finger': str,
'master_shuffle': bool,
'master_alive_interval': int,
'master_sign_key_name': str,
'master_sign_pubkey': bool,
'verify_master_pubkey_sign': bool,
'always_verify_signature': bool,
'master_pubkey_signature': str,
'master_use_pubkey_signature': bool,
'syndic_finger': str,
'user': str,
'root_dir': str,
@ -254,6 +260,9 @@ DEFAULT_MINION_OPTS = {
'master_finger': '',
'master_shuffle': False,
'master_alive_interval': 0,
'verify_master_pubkey_sign': False,
'always_verify_signature': False,
'master_sign_key_name': 'master_sign',
'syndic_finger': '',
'user': 'root',
'root_dir': salt.syspaths.ROOT_DIR,
@ -357,7 +366,7 @@ DEFAULT_MINION_OPTS = {
'random_master': False,
'minion_floscript': os.path.join(FLO_DIR, 'minion.flo'),
'ioflo_verbose': 0,
'ioflo_period': 0.01,
'ioflo_period': 0.1,
'ioflo_realtime': True,
'raet_port': 4510,
'restart_on_error': False,
@ -516,6 +525,10 @@ DEFAULT_MASTER_OPTS = {
'queue_dirs': [],
'cli_summary': False,
'max_minions': 0,
'master_sign_key_name': 'master_sign',
'master_sign_pubkey': False,
'master_pubkey_signature': 'master_pubkey_signature',
'master_use_pubkey_signature': False,
}
# ----- Salt Cloud Configuration Defaults ----------------------------------->

View File

@ -14,6 +14,7 @@ import shutil
import hashlib
import logging
import traceback
import binascii
# Import third party libs
try:
@ -146,26 +147,61 @@ class MasterKeys(dict):
'''
The Master Keys class is used to manage the public key pair used for
authentication by the master.
It also generates a signing key-pair if enabled with master_sign_key_name.
'''
def __init__(self, opts):
super(MasterKeys, self).__init__()
self.opts = opts
self.pub_path = os.path.join(self.opts['pki_dir'], 'master.pub')
self.rsa_path = os.path.join(self.opts['pki_dir'], 'master.pem')
self.key = self.__get_keys()
self.token = self.__gen_token()
self.pub_signature = None
def __get_keys(self):
'''
Returns a key objects for the master
'''
if os.path.exists(self.rsa_path):
key = RSA.load_key(self.rsa_path)
log.debug('Loaded master key: {0}'.format(self.rsa_path))
# set names for the signing key-pairs
if opts['master_sign_pubkey']:
# if only the signature is available, use that
if opts['master_use_pubkey_signature']:
self.sig_path = os.path.join(self.opts['pki_dir'],
opts['master_pubkey_signature'])
if os.path.isfile(self.sig_path):
self.pub_signature = salt.utils.fopen(self.sig_path).read()
log.info('Read {0}\'s signature from {1}'
''.format(os.path.basename(self.pub_path),
self.opts['master_pubkey_signature']))
else:
log.info('Generating keys: {0}'.format(self.opts['pki_dir']))
log.error('Signing the master.pub key with a signature is enabled '
'but no signature file found at the defined location '
'{0}'.format(self.sig_path))
log.error('The signature-file may be either named differently '
'or has to be created with \'salt-key --gen-signature\'')
sys.exit(1)
# create a new signing key-pair to sign the masters
# auth-replies when a minion tries to connect
else:
self.pub_sign_path = os.path.join(self.opts['pki_dir'],
opts['master_sign_key_name'] + '.pub')
self.rsa_sign_path = os.path.join(self.opts['pki_dir'],
opts['master_sign_key_name'] + '.pem')
self.sign_key = self.__get_keys(name=opts['master_sign_key_name'])
def __get_keys(self, name='master'):
'''
Returns a key object for a key in the pki-dir
'''
path = os.path.join(self.opts['pki_dir'],
name + '.pem')
if os.path.exists(path):
key = RSA.load_key(path)
log.debug('Loaded {0} key: {1}'.format(name, path))
else:
log.info('Generating {0} keys: {1}'.format(name, self.opts['pki_dir']))
gen_keys(self.opts['pki_dir'],
'master',
name,
self.opts['keysize'],
self.opts.get('user'))
key = RSA.load_key(self.rsa_path)
@ -177,14 +213,30 @@ class MasterKeys(dict):
'''
return self.key.private_encrypt('salty bacon', 5)
def get_pub_str(self):
def get_pub_str(self, name='master'):
'''
Return the string representation of the public key
Return the string representation of a public key
in the pki-directory
'''
if not os.path.isfile(self.pub_path):
path = os.path.join(self.opts['pki_dir'],
name + '.pub')
if not os.path.isfile(path):
key = self.__get_keys()
key.save_pub_key(self.pub_path)
return salt.utils.fopen(self.pub_path, 'r').read()
key.save_pub_key(path)
return salt.utils.fopen(path, 'r').read()
def get_mkey_paths(self):
return self.pub_path, self.rsa_path
def get_sign_paths(self):
return self.pub_sign_path, self.rsa_sign_path
def pubkey_signature(self):
'''
returns the base64 encoded signature from the signature file
or None if the master has its own signing keys
'''
return self.pub_signature
class Auth(object):
@ -296,22 +348,90 @@ class Auth(object):
return key_str, ''
return '', ''
def verify_master(self, payload):
def verify_pubkey_sig(self, message, sig):
'''
Verify that the master is the same one that was previously accepted.
wraps the verify_signature method so we have
additional checks and return a bool
'''
m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub)
if os.path.isfile(m_pub_fn) and not self.opts['open_mode']:
local_master_pub = salt.utils.fopen(m_pub_fn).read()
if payload['pub_key'] != local_master_pub:
if self.opts['master_sign_key_name']:
path = os.path.join(self.opts['pki_dir'],
self.opts['master_sign_key_name'] + '.pub')
# This is not the last master we connected to
log.error('The master key has changed, the salt master could '
'have been subverted, verify salt master\'s public '
'key')
return ''
if os.path.isfile(path):
res = verify_signature(path,
message,
binascii.a2b_base64(sig))
else:
log.error('Verification public key {0} does not exist. You '
'need to copy it from the master to the minions '
'pki directory'.format(os.path.basename(path)))
return False
if res:
log.debug('Successfully verified signature of master '
'public key with verification public key '
'{0}'.format(self.opts['master_sign_key_name'] + '.pub'))
return True
else:
log.debug('Failed to verify signature of public key')
return False
else:
log.error('Failed to verify the signature of the message because '
'the verification key-pairs name is not defined. Please '
'make sure, master_sign_key_name is defined.')
return False
def verify_signing_master(self, payload):
try:
aes, token = self.decrypt_aes(payload)
if self.verify_pubkey_sig(payload['pub_key'],
payload['pub_sig']):
log.info('Received signed and verified master pubkey '
'from master {0}'.format(self.opts['master']))
m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub)
salt.utils.fopen(m_pub_fn, 'w+').write(payload['pub_key'])
return True
else:
log.error('Received signed public-key from master {0} '
'but signature verification failed!'.format(self.opts['master']))
return False
except Exception as sign_exc:
log.error('There was an error while verifying the masters public-key signature')
raise Exception(sign_exc)
def check_auth_deps(self, payload):
'''
checks if both master and minion either sign (master) and
verify (minion). If one side does not, it should fail
'''
# master and minion sign and verify
if 'pub_sig' in payload and self.opts['verify_master_pubkey_sign']:
return True
# master and minion do NOT sign and do NOT verify
elif 'pub_sig' not in payload and not self.opts['verify_master_pubkey_sign']:
return True
# master signs, but minion does NOT verify
elif 'pub_sig' in payload and not self.opts['verify_master_pubkey_sign']:
log.error('The masters sent its public-key signature, but signature '
'verification is not enabled on the minion. Either enable '
'signature verification on the minion or disable signing '
'the public key on the master!')
return False
# master does NOT sign but minion wants to verify
elif 'pub_sig' not in payload and self.opts['verify_master_pubkey_sign']:
log.error('The master did not send its public-key signature, but '
'signature verification is enabled on the minion. Either '
'disable signature verification on the minion or enable '
'signing the public on the master!')
return False
def extract_aes(self, payload, master_pub=True):
'''
return the aes key received from the master
when the minion has been successfully authed
'''
if master_pub:
try:
aes, token = self.decrypt_aes(payload, master_pub)
if token != self.token:
log.error(
'The master failed to decrypt the random minion token'
@ -324,10 +444,66 @@ class Auth(object):
return ''
return aes
else:
salt.utils.fopen(m_pub_fn, 'w+').write(payload['pub_key'])
aes, token = self.decrypt_aes(payload, False)
aes, token = self.decrypt_aes(payload, master_pub)
return aes
def verify_master(self, payload):
'''
Verify that the master is the same one that was previously accepted.
'''
m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub)
if os.path.isfile(m_pub_fn) and not self.opts['open_mode']:
local_master_pub = salt.utils.fopen(m_pub_fn).read()
if payload['pub_key'] != local_master_pub:
if not self.check_auth_deps(payload):
return ''
if self.opts['verify_master_pubkey_sign']:
if self.verify_signing_master(payload):
return self.extract_aes(payload, master_pub=False)
else:
return ''
else:
# This is not the last master we connected to
log.error('The master key has changed, the salt master could '
'have been subverted, verify salt master\'s public '
'key')
return ''
else:
if not self.check_auth_deps(payload):
return ''
# verify the signature of the pubkey even if it has
# not changed compared with the one we already have
if self.opts['always_verify_signature']:
if self.verify_signing_master(payload):
return self.extract_aes(payload)
else:
log.error('The masters public could not be verified. Is the '
'verification pubkey {0} up to date?'
''.format(self.opts['master_sign_key_name'] + '.pub'))
return ''
else:
return self.extract_aes(payload)
else:
if not self.check_auth_deps(payload):
return ''
# verify the masters pubkey signature if the minion
# has not received any masters pubkey before
if self.opts['verify_master_pubkey_sign']:
if self.verify_signing_master(payload):
return self.extract_aes(payload, master_pub=False)
else:
return ''
# the minion has not received any masters pubkey yet, write
# the newly received pubkey to minion_master.pub
else:
salt.utils.fopen(m_pub_fn, 'w+').write(payload['pub_key'])
return self.extract_aes(payload, master_pub=False)
def sign_in(self, timeout=60, safe=True, tries=1):
'''
Send a sign in request to the master, sets the key information and

View File

@ -421,7 +421,7 @@ class SaltRaetRoadStackPrinter(ioflo.base.deeding.Deed):
'''
rxMsgs = self.rxmsgs.value
while rxMsgs:
msg = rxMsgs.popleft()
msg, name = rxMsgs.popleft()
console.terse("\nReceived....\n{0}\n".format(msg))
@ -513,7 +513,7 @@ class LoadPillar(ioflo.base.deeding.Deed):
while True:
time.sleep(0.01)
if self.udp_stack.value.rxMsgs:
for msg in self.udp_stack.value.rxMsgs:
for msg, rnmid in self.udp_stack.value.rxMsgs:
self.pillar.value = msg.get('return', {})
self.opts.value['pillar'] = self.pillar.value
return
@ -720,9 +720,11 @@ class Router(ioflo.base.deeding.Deed):
'uxd_stack': '.salt.uxd.stack.stack',
'udp_stack': '.raet.udp.stack.stack'}
def _process_udp_rxmsg(self, msg):
def _process_udp_rxmsg(self, msg, rnmid):
'''
Send to the right queue
msg is the message body dict
rnmid is the unique name identifyer of the remote estate that sent the message
'''
try:
d_estate = msg['route']['dst'][0]
@ -804,7 +806,8 @@ class Router(ioflo.base.deeding.Deed):
Process the messages!
'''
while self.udp_stack.value.rxMsgs:
self._process_udp_rxmsg(self.udp_stack.value.rxMsgs.popleft())
msg, name = self.udp_stack.value.rxMsgs.popleft()
self._process_udp_rxmsg(msg=msg, rnmid=name)
while self.uxd_stack.value.rxMsgs:
self._process_uxd_rxmsg(self.uxd_stack.value.rxMsgs.popleft())
@ -947,8 +950,14 @@ class NixExecutor(ioflo.base.deeding.Deed):
ret_stack = LaneStack(
name=stackname,
lanename=mid,
<<<<<<< HEAD
yid=yid, # jid
sockdirpath=self.opts['sock_dir'])
=======
yid=yid, # jid
sockdirpath=self.opts['sock_dir'],
basedirpath=dirpath)
>>>>>>> sam_raet_50
ret_stack.Pk = raeting.packKinds.pack
main_yard = RemoteYard(

View File

@ -10,6 +10,7 @@ import logging
import os
import re
import time
import stat
try:
import pwd
except ImportError:
@ -216,6 +217,130 @@ def fileserver_update(fileserver):
)
class AutoKey(object):
'''
Impliment the methods to run auto key acceptance and rejection
'''
def __init__(self, opts):
self.opts = opts
def check_permissions(self, filename):
'''
Check if the specified filename has correct permissions
'''
if salt.utils.is_windows():
return True
# After we've ascertained we're not on windows
try:
user = self.opts['user']
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
groups = salt.utils.get_gid_list(user, include_default=False)
except KeyError:
log.error(
'Failed to determine groups for user {0}. The user is not '
'available.\n'.format(
user
)
)
return False
fmode = os.stat(filename)
if os.getuid() == 0:
if fmode.st_uid == uid or fmode.st_gid != gid:
return True
elif self.opts.get('permissive_pki_access', False) \
and fmode.st_gid in groups:
return True
else:
if stat.S_IWOTH & fmode.st_mode:
# don't allow others to write to the file
return False
# check group flags
if self.opts.get('permissive_pki_access', False) and stat.S_IWGRP & fmode.st_mode:
return True
elif stat.S_IWGRP & fmode.st_mode:
return False
# check if writable by group or other
if not (stat.S_IWGRP & fmode.st_mode or
stat.S_IWOTH & fmode.st_mode):
return True
return False
def check_signing_file(self, keyid, signing_file):
'''
Check a keyid for membership in a signing file
'''
if not signing_file or not os.path.exists(signing_file):
return False
if not self.check_permissions(signing_file):
message = 'Wrong permissions for {0}, ignoring content'
log.warn(message.format(signing_file))
return False
with salt.utils.fopen(signing_file, 'r') as fp_:
for line in fp_:
line = line.strip()
if line.startswith('#'):
continue
else:
if salt.utils.expr_match(keyid, line):
return True
return False
def check_autosign_dir(self, keyid):
'''
Check a keyid for membership in a autosign directory.
'''
autosign_dir = os.path.join(self.opts['pki_dir'], 'minions_autosign')
# cleanup expired files
expire_minutes = self.opts.get('autosign_expire_minutes', 10)
if expire_minutes > 0:
min_time = time.time() - (60 * int(expire_minutes))
for root, dirs, filenames in os.walk(autosign_dir):
for f in filenames:
stub_file = os.path.join(autosign_dir, f)
mtime = os.path.getmtime(stub_file)
if mtime < min_time:
log.warn('Autosign keyid expired {0}'.format(stub_file))
os.remove(stub_file)
stub_file = os.path.join(autosign_dir, keyid)
if not os.path.exists(stub_file):
return False
os.remove(stub_file)
return True
def check_autoreject(self, keyid):
'''
Checks if the specified keyid should automatically be rejected.
'''
return self.check_signing_file(
keyid,
self.opts.get('autoreject_file', None)
)
def check_autosign(self, keyid):
'''
Checks if the specified keyid should automatically be signed.
'''
if self.opts['auto_accept']:
return True
if self.check_signing_file(keyid, self.opts.get('autosign_file', None)):
return True
if self.check_autosign_dir(keyid):
return True
return False
class RemoteFuncs(object):
'''
Funcitons made available to minions, this class includes the raw routines

View File

@ -1043,6 +1043,14 @@ def os_data():
os=grains['osfullname'],
ver=grains['osrelease'])
if grains.get('osrelease', ''):
osrelease_info = grains['osrelease'].split('.')
for idx, value in enumerate(osrelease_info):
if not value.isdigit():
continue
osrelease_info[idx] = int(value)
grains['osrelease_info'] = tuple(osrelease_info)
return grains

View File

@ -18,6 +18,7 @@ import json
import salt.crypt
import salt.utils
import salt.utils.event
import salt.daemons.masterapi
from salt.utils.event import tagify
@ -708,6 +709,7 @@ class RaetKey(Key):
'''
def __init__(self, opts):
Key.__init__(self, opts)
self.auto_key = salt.daemons.masterapi.AutoKey(self.opts)
self.serial = salt.payload.Serial(self.opts)
def _check_minions_directories(self):
@ -816,16 +818,33 @@ class RaetKey(Key):
else:
return 'rejected'
elif os.path.isfile(pre_path):
auto_reject = self.auto_key.check_autoreject(minion_id)
auto_sign = self.auto_key.check_autosign(minion_id)
with salt.utils.fopen(pre_path, 'rb') as fp_:
keydata = self.serial.loads(fp_.read())
if keydata['pub'] == pub and keydata['verify'] == verify:
if auto_reject:
self.reject(minion_id)
return 'rejected'
elif auto_sign:
self.accept(minion_id)
return 'accepted'
return 'pending'
else:
return 'rejected'
# This is a new key, place it in pending
# This is a new key, evaluate auto accept/reject files and place
# accordingly
auto_reject = self.auto_key.check_autoreject(minion_id)
auto_sign = self.auto_key.check_autosign(minion_id)
if self.opts['auto_accept']:
w_path = acc_path
ret = 'accepted'
elif auto_sign:
w_path = acc_path
ret = 'accepted'
elif auto_reject:
w_path = rej_path
ret = 'rejected'
else:
w_path = pre_path
ret = 'pending'

View File

@ -28,7 +28,7 @@ __salt__ = {
}
log = logging.getLogger(__name__)
SALT_BASE_PATH = os.path.dirname(salt.__file__)
SALT_BASE_PATH = os.path.abspath(os.path.dirname(salt.__file__))
LOADED_BASE_NAME = 'salt.loaded'
# Because on the cloud drivers we do `from salt.cloud.libcloudfuncs import *`

View File

@ -11,13 +11,8 @@ import time
import errno
import signal
import shutil
import stat
import logging
import hashlib
try:
import pwd
except ImportError: # This is in case windows minion is importing
pass
import resource
import multiprocessing
import sys
@ -50,6 +45,7 @@ import salt.utils.gzip_util
from salt.utils.debug import enable_sigusr1_handler, enable_sigusr2_handler, inspect_stack
from salt.exceptions import MasterExit
from salt.utils.event import tagify
import binascii
# Import halite libs
try:
@ -1008,6 +1004,7 @@ class AESFuncs(object):
for mod in mods:
sys.modules[mod].__grains__ = load['grains']
pillar_dirs = {}
pillar = salt.pillar.Pillar(
self.opts,
load['grains'],
@ -1015,7 +1012,7 @@ class AESFuncs(object):
load.get('saltenv', load.get('env')),
load.get('ext'),
self.mminion.functions)
data = pillar.compile_pillar()
data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
if self.opts.get('minion_data_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
@ -1277,122 +1274,7 @@ class ClearFuncs(object):
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
def __check_permissions(self, filename):
'''
Check if the specified filename has correct permissions
'''
if salt.utils.is_windows():
return True
# After we've ascertained we're not on windows
try:
user = self.opts['user']
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
groups = salt.utils.get_gid_list(user, include_default=False)
except KeyError:
log.error(
'Failed to determine groups for user {0}. The user is not '
'available.\n'.format(
user
)
)
return False
fmode = os.stat(filename)
if os.getuid() == 0:
if fmode.st_uid == uid or fmode.st_gid != gid:
return True
elif self.opts.get('permissive_pki_access', False) \
and fmode.st_gid in groups:
return True
else:
if stat.S_IWOTH & fmode.st_mode:
# don't allow others to write to the file
return False
# check group flags
if self.opts.get('permissive_pki_access', False) and stat.S_IWGRP & fmode.st_mode:
return True
elif stat.S_IWGRP & fmode.st_mode:
return False
# check if writable by group or other
if not (stat.S_IWGRP & fmode.st_mode or
stat.S_IWOTH & fmode.st_mode):
return True
return False
def __check_signing_file(self, keyid, signing_file):
'''
Check a keyid for membership in a signing file
'''
if not signing_file or not os.path.exists(signing_file):
return False
if not self.__check_permissions(signing_file):
message = 'Wrong permissions for {0}, ignoring content'
log.warn(message.format(signing_file))
return False
with salt.utils.fopen(signing_file, 'r') as fp_:
for line in fp_:
line = line.strip()
if line.startswith('#'):
continue
else:
if salt.utils.expr_match(keyid, line):
return True
return False
def __check_autosign_dir(self, keyid):
'''
Check a keyid for membership in a autosign directory.
'''
autosign_dir = os.path.join(self.opts['pki_dir'], 'minions_autosign')
# cleanup expired files
expire_minutes = self.opts.get('autosign_expire_minutes', 10)
if expire_minutes > 0:
min_time = time.time() - (60 * int(expire_minutes))
for root, dirs, filenames in os.walk(autosign_dir):
for f in filenames:
stub_file = os.path.join(autosign_dir, f)
mtime = os.path.getmtime(stub_file)
if mtime < min_time:
log.warn('Autosign keyid expired {0}'.format(stub_file))
os.remove(stub_file)
stub_file = os.path.join(autosign_dir, keyid)
if not os.path.exists(stub_file):
return False
os.remove(stub_file)
return True
def __check_autoreject(self, keyid):
'''
Checks if the specified keyid should automatically be rejected.
'''
return self.__check_signing_file(
keyid,
self.opts.get('autoreject_file', None)
)
def __check_autosign(self, keyid):
'''
Checks if the specified keyid should automatically be signed.
'''
if self.opts['auto_accept']:
return True
if self.__check_signing_file(keyid, self.opts.get('autosign_file', None)):
return True
if self.__check_autosign_dir(keyid):
return True
return False
self.auto_key = salt.daemons.masterapi.AutoKey(opts)
def _auth(self, load):
'''
@ -1441,8 +1323,8 @@ class ClearFuncs(object):
'load': {'ret': 'full'}}
# Check if key is configured to be auto-rejected/signed
auto_reject = self.__check_autoreject(load['id'])
auto_sign = self.__check_autosign(load['id'])
auto_reject = self.auto_key.check_autoreject(load['id'])
auto_sign = self.auto_key.check_autosign(load['id'])
pubfn = os.path.join(self.opts['pki_dir'],
'minions',
@ -1642,6 +1524,23 @@ class ClearFuncs(object):
ret = {'enc': 'pub',
'pub_key': self.master_key.get_pub_str(),
'publish_port': self.opts['publish_port']}
# sign the masters pubkey (if enabled) before it is
# send to the minion that was just authenticated
if self.opts['master_sign_pubkey']:
# append the pre-computed signature to the auth-reply
if self.master_key.pubkey_signature():
log.debug('Adding pubkey signature to auth-reply')
log.debug(self.master_key.pubkey_signature())
ret.update({'pub_sig': self.master_key.pubkey_signature()})
else:
# the master has its own signing-keypair, compute the master.pub's
# signature and append that to the auth-reply
log.debug("Signing master public key before sending")
pub_sign = salt.crypt.sign_message(self.master_key.get_sign_paths()[1],
ret['pub_key'])
ret.update({'pub_sig': binascii.b2a_base64(pub_sign)})
if self.opts['auth_mode'] >= 2:
if 'token' in load:
try:

View File

@ -617,7 +617,8 @@ class Minion(MinionBase):
'seconds': opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'args': [True]
'kwargs': {'master_ip': self.opts['master'],
'connected': True}
}
})
@ -721,7 +722,9 @@ class Minion(MinionBase):
opts.update(resolve_dns(opts))
super(Minion, self).__init__(opts)
# make a backup of the master list for later use
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in self.opts:
self.opts['master_list'] = local_masters
try:
@ -735,10 +738,12 @@ class Minion(MinionBase):
continue
if not conn:
self.connected = False
msg = ('No master could be reached or all masters denied '
'the minions connection attempt.')
log.error(msg)
else:
self.connected = True
return opts['master']
# single master sign in
@ -746,11 +751,13 @@ class Minion(MinionBase):
opts.update(resolve_dns(opts))
super(Minion, self).__init__(opts)
if self.authenticate(timeout, safe) == 'full':
self.connected = False
msg = ('master {0} rejected the minions connection because too '
'many minions are already connected.'.format(opts['master']))
log.error(msg)
sys.exit(salt.exitcodes.EX_GENERIC)
else:
self.connected = True
return opts['master']
def _prep_mod_opts(self):
@ -1527,7 +1534,7 @@ class Minion(MinionBase):
ping_interval = self.opts.get('ping_interval', 0) * 60
ping_at = None
self.connected = True
while self._running is True:
loop_interval = self.process_schedule(self, loop_interval)
try:
@ -1565,26 +1572,58 @@ class Minion(MinionBase):
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif package.startswith('__master_disconnected'):
# handle this event only once. otherwise it will polute the log
if self.connected:
log.info('Connection to master {0} lost'.format(self.opts['master']))
if self.opts['master_type'] == 'failover':
log.info('Trying to tune in to next master from master-list')
self.eval_master(opts=self.opts,
failed=True)
# modify the __master_alive job to only fire,
# once the connection was re-established
# we are not connected anymore
self.connected = False
# modify the scheduled job to fire only on reconnect
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'connected': False}
'kwargs': {'master_ip': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
log.info('Connection to master {0} lost'.format(self.opts['master']))
if self.opts['master_type'] == 'failover':
log.info('Trying to tune in to next master from master-list')
# if eval_master finds a new master for us, self.connected
# will be True again on successfull master authentication
self.opts['master'] = self.eval_master(opts=self.opts,
failed=True)
if self.connected:
# re-init the subsystems to work with the new master
log.info('Re-initialising subsystems for new '
'master {0}'.format(self.opts['master']))
del self.socket
del self.context
del self.poller
self._init_context_and_poller()
self.socket = self.context.socket(zmq.SUB)
self._set_reconnect_ivl()
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self.poller.register(self.epull_sock, zmq.POLLIN)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master_ip': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
self.connected = False
elif package.startswith('__master_connected'):
# handle this event only once. otherwise it will polute the log
@ -1598,12 +1637,12 @@ class Minion(MinionBase):
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'connected': True}
'kwargs': {'master_ip': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
self.connected = True
self.epub_sock.send(package)
except Exception:
log.debug('Exception while handling events', exc_info=True)

View File

@ -159,6 +159,7 @@ def latest_version(*names, **kwargs):
salt '*' pkg.latest_version <package1> <package2> <package3> ...
'''
refresh = salt.utils.is_true(kwargs.pop('refresh', True))
show_installed = salt.utils.is_true(kwargs.pop('show_installed', False))
if 'repo' in kwargs:
# Remember to kill _get_repo() too when removing this warning.
@ -216,6 +217,8 @@ def latest_version(*names, **kwargs):
installed = pkgs.get(name, [])
if not installed:
ret[name] = candidate
elif installed and show_installed:
ret[name] = candidate
elif candidate:
# If there are no installed versions that are greater than or equal
# to the install candidate, then the candidate is an upgrade, so
@ -380,8 +383,30 @@ def install(name=None,
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
'''
refreshdb = False
if salt.utils.is_true(refresh):
refresh_db()
refreshdb = True
if 'version' in kwargs and kwargs['version']:
refreshdb = False
_latest_version = latest_version(name, refresh=False, show_installed=True)
_version = kwargs.get('version')
# If the versions don't match, refresh is True, otherwise no need to refresh
if not _latest_version == _version:
refreshdb = True
if pkgs:
refreshdb = False
for pkg in pkgs:
if isinstance(pkg, dict):
_name = pkg.keys()[0]
_latest_version = latest_version(_name, refresh=False, show_installed=True)
_version = pkg[_name]
# If the versions don't match, refresh is True, otherwise no need to refresh
if not _latest_version == _version:
refreshdb = True
else:
# No version specified, so refresh should be True
refreshdb = True
if debconf:
__salt__['debconf.set_file'](debconf)
@ -442,6 +467,9 @@ def install(name=None,
cmd.append('install')
cmd.extend(targets)
if refreshdb:
refresh_db()
__salt__['cmd.run'](cmd, env=kwargs.get('env'), python_shell=False)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()

View File

@ -509,8 +509,8 @@ def register_instances(name, instances, region=None, key=None, keyid=None,
conn = _get_conn(region, key, keyid, profile)
if not conn:
return False
elb = conn.get_all_load_balancers(name)[0]
return elb.register_instances(instances)
load_balancer = conn.get_all_load_balancers(name)[0]
return load_balancer.register_instances(instances)
def deregister_instances(name, instances, region=None, key=None, keyid=None,
@ -527,8 +527,8 @@ def deregister_instances(name, instances, region=None, key=None, keyid=None,
conn = _get_conn(region, key, keyid, profile)
if not conn:
return False
elb = conn.get_all_load_balancers(name)[0]
return elb.deregister_instances(instances)
load_balancer = conn.get_all_load_balancers(name)[0]
return load_balancer.deregister_instances(instances)
def _get_conn(region, key, keyid, profile):

View File

@ -43,9 +43,9 @@ def _find_chocolatey():
Returns the full path to chocolatey.bat on the host.
'''
choc_defaults = ['C:\\Chocolatey\\bin\\chocolatey.bat',
'C:\\ProgramData\\Chocolatey\\bin\\chocolatey.bat', ]
'C:\\ProgramData\\Chocolatey\\bin\\chocolatey.exe', ]
choc_path = __salt__['cmd.which']('chocolatey.bat')
choc_path = __salt__['cmd.which']('chocolatey.exe')
if not choc_path:
for choc_dir in choc_defaults:
if __salt__['cmd.has_exec'](choc_dir):

View File

@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
'''
Management of dockers
Management of Dockers
=====================
.. versionadded:: 2014.1.0
@ -9,7 +9,7 @@ Management of dockers
The DockerIO integration is still in beta; the API is subject to change
General notes
General Notes
-------------
- As we use states, we don't want to be continuously popping dockers, so we
@ -17,24 +17,29 @@ General notes
- As a corollary, we will resolve a container id either directly by the id
or try to find a container id matching something stocked in grain.
Installation prerequisites
Installation Prerequisites
--------------------------
- You will need the 'docker-py' python package in your python installation
running salt. The version of docker-py should support `version 1.12 of docker
remote API.
<http://docs.docker.io/en/latest/reference/api/docker_remote_api_v1.12>`_.
- For now, you need docker-py 0.3.2
- You will need the ``docker-py`` python package in your python installation
path that is running salt. Its version should support `Docker Remote API
v1.12 <http://docs.docker.io/en/latest/reference/api/docker_remote_api_v1.12>`_.
Currently, ``docker-py 0.3.2`` is known to support `Docker Remote API v1.12
<http://docs.docker.io/en/latest/reference/api/docker_remote_api_v1.12>`_
.. code-block:: bash
pip install docker-py==0.3.2
Prerequisite pillar configuration for authentication
Prerequisite Pillar Configuration for Authentication
----------------------------------------------------
- To push or pull you will need to be authenticated as the docker-py bindings
- To push or pull you will need to be authenticated as the ``docker-py`` bindings
require it
- For this to happen, you will need to configure a mapping in the pillar
representing your per URL authentication bits::
representing your per URL authentication bits:
.. code-block:: yaml
docker-registries:
registry_url:
@ -42,7 +47,9 @@ Prerequisite pillar configuration for authentication
password: s3cr3t
username: foo
- You need at least an entry to the default docker index::
- You need at least an entry to the default docker index:
.. code-block:: yaml
docker-registries:
https://index.docker.io/v1:
@ -50,8 +57,10 @@ Prerequisite pillar configuration for authentication
password: s3cr3t
username: foo
you can define multiple registries blocks for them to be aggregated, their id
just must finish with -docker-registries::
- You can define multiple registry blocks for them to be aggregated. The only thing to keep
in mind is that their ID must finish with ``-docker-registries``:
.. code-block:: yaml
ac-docker-registries:
https://index.bar.io/v1:
@ -65,7 +74,9 @@ just must finish with -docker-registries::
password: s3cr3t
username: foo
Would be the equivalent to::
This could be also written as:
.. code-block:: yaml
docker-registries:
https://index.bar.io/v1:
@ -77,24 +88,22 @@ Would be the equivalent to::
password: s3cr3t
username: foo
Registry dialog methods
Registry Dialog Methods
-----------------------
- login
- push
- pull
Docker management
Docker Management
-----------------
- version
- info
Image management
Image Management
----------------
You have those methods:
- search
- inspect_image
- get_images
@ -103,11 +112,9 @@ You have those methods:
- build
- tag
Container management
Container Management
--------------------
You have those methods:
- start
- stop
- restart
@ -126,15 +133,15 @@ You have those methods:
- export
- get_container_root
Runtime execution within a specific already existing and running container
Runtime Execution within a specific, already existing/running container
--------------------------------------------------------------------------
- Idea is to use lxc-attach to execute inside the container context.
- We do not use a "docker run command" but want to execute something inside a
running container.
Idea is to use `lxc-attach <http://linux.die.net/man/1/lxc-attach>`_ to execute
inside the container context.
We do not want to use ``docker run`` but want to execute something inside a
running container.
You have those methods:
These are the available methods:
- retcode
- run
@ -173,7 +180,7 @@ HAS_NSENTER = bool(salt.utils.which('nsenter'))
log = logging.getLogger(__name__)
INVALID_RESPONSE = 'We did not get any expectable answer from docker'
INVALID_RESPONSE = 'We did not get any expected answer from docker'
VALID_RESPONSE = ''
NOTSET = object()
base_status = {
@ -1800,7 +1807,7 @@ def _run_wrapper(status, container, func, cmd, *args, **kwargs):
' {cmd}'.format(pid=container_pid, cmd=cmd))
else:
raise NotImplementedError(
'Unknown docker ExecutionDriver {0!r}. Or didn\'t found command'
'Unknown docker ExecutionDriver {0!r}. Or didn\'t find command'
' to attach to the container'.format(driver))
# now execute the command

View File

@ -439,7 +439,7 @@ def service_delete(service_id=None, name=None, profile=None, **connection_args):
salt '*' keystone.service_delete c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.service_delete name=nova
'''
kstone = auth(profile)
kstone = auth(profile, **connection_args)
if name:
service_id = service_get(name=name, profile=profile,
**connection_args)[name]['id']

View File

@ -120,6 +120,7 @@ def install(pkgs=None,
cwd=None,
activate=False,
pre_releases=False,
cert=None,
__env__=None,
saltenv='base'):
'''
@ -221,7 +222,8 @@ def install(pkgs=None,
before running install.
pre_releases
Include pre-releases in the available versions
cert
Provide a path to an alternate CA bundle
CLI Example:
@ -448,6 +450,9 @@ def install(pkgs=None,
if salt.utils.compare_versions(ver1=pip_version, oper='>=', ver2='1.4'):
cmd.append('--pre')
if cert:
cmd.append('--cert={0}'.format(cert))
if global_options:
if isinstance(global_options, string_types):
global_options = [go.strip() for go in global_options.split(',')]

View File

@ -381,7 +381,10 @@ def get_flags_from_package_conf(conf, atom):
package_file = '{0}/{1}'.format(BASE_PATH.format(conf), _p_to_cp(atom))
if '/' not in atom:
atom = _p_to_cp(atom)
try:
match_list = set(_porttree().dbapi.xmatch("match-all", atom))
except AttributeError:
return []
flags = []
try:
file_handler = salt.utils.fopen(package_file)

View File

@ -89,12 +89,20 @@ def available(name):
Returns ``True`` if the specified service is available, otherwise returns
``False``.
The SmartOS if statement uses svcs to return the service name from the
package name.
CLI Example:
.. code-block:: bash
salt '*' service.available net-snmp
'''
if 'SmartOS' in __grains__['os']:
cmd = '/usr/bin/svcs -H -o SVC {0}'.format(name)
name = __salt__['cmd.run'](cmd)
return name in get_all()
else:
return name in get_all()
@ -110,6 +118,11 @@ def missing(name):
salt '*' service.missing net-snmp
'''
if 'SmartOS' in __grains__['os']:
cmd = '/usr/bin/svcs -H -o SVC {0}'.format(name)
name = __salt__['cmd.run'](cmd)
return name not in get_all()
else:
return name not in get_all()

View File

@ -545,7 +545,7 @@ def version():
return ret
def master(connected=True):
def master(master_ip=None, connected=True):
'''
.. versionadded:: Helium
@ -558,15 +558,14 @@ def master(connected=True):
salt '*' status.master
'''
ip = __salt__['config.option']('master')
port = int(__salt__['config.option']('publish_port'))
ips = _remote_port_tcp(port)
if connected:
if ip not in ips:
if master_ip not in ips:
event = salt.utils.event.get_event('minion', opts=__opts__, listen=False)
event.fire_event({'master': ip}, '__master_disconnected')
event.fire_event({'master': master_ip}, '__master_disconnected')
else:
if ip in ips:
if master_ip in ips:
event = salt.utils.event.get_event('minion', opts=__opts__, listen=False)
event.fire_event({'master': ip}, '__master_connected')
event.fire_event({'master': master_ip}, '__master_connected')

View File

@ -82,7 +82,7 @@ def _get_all_units():
r')\s+loaded\s+(?P<active>[^\s]+)')
out = __salt__['cmd.run_stdout'](
'systemctl --full --no-legend --no-pager list-units | col -b'
'systemctl --all --full --no-legend --no-pager list-units | col -b'
)
ret = {}

View File

@ -43,11 +43,34 @@ def __virtual__():
return False
def _cert_base_path():
def cert_base_path(cacert_path=None):
'''
Return the base path for certs
Return the base path for certs from CLI or from options
cacert_path
absolute path to ca certificates root directory
'''
return __salt__['config.option']('ca.cert_base_path')
if not cacert_path:
cacert_path = __salt__['config.option']('ca.contextual_cert_base_path')
if not cacert_path:
cacert_path = __salt__['config.option']('ca.cert_base_path')
return cacert_path
def _cert_base_path(cacert_path=None):
'''
Retrocompatible wrapper
'''
return cert_base_path(cacert_path)
def set_ca_path(cacert_path):
'''
If wanted, store the aforementionned cacert_path in context
to be used as the basepath for further operations
'''
if cacert_path:
__opts__['ca.contextual_cert_base_path'] = cacert_path
return cert_base_path()
def _new_serial(ca_name, CN):
@ -83,7 +106,7 @@ def _new_serial(ca_name, CN):
return hashnum
def _write_cert_to_database(ca_name, cert):
def _write_cert_to_database(ca_name, cert, cacert_path=None):
'''
write out the index.txt database file in the appropriate directory to
track certificates
@ -92,8 +115,12 @@ def _write_cert_to_database(ca_name, cert):
name of the CA
cert
certificate to be recorded
cacert_path
absolute path to ca certificates root directory
'''
index_file = "{0}/{1}/index.txt".format(_cert_base_path(), ca_name)
set_ca_path(cacert_path)
index_file = "{0}/{1}/index.txt".format(cert_base_path(),
ca_name)
expire_date = cert.get_notAfter()
serial_number = cert.get_serial_number()
@ -119,7 +146,7 @@ def _write_cert_to_database(ca_name, cert):
ofile.write(index_data)
def maybe_fix_ssl_version(ca_name):
def maybe_fix_ssl_version(ca_name, cacert_path=None):
'''
Check that the X509 version is correct
(was incorrectly setted in previous salt versions).
@ -127,13 +154,16 @@ def maybe_fix_ssl_version(ca_name):
ca_name
ca authority name
cacert_path
absolute path to ca certificates root directory
'''
set_ca_path(cacert_path)
certp = '{0}/{1}/{2}_ca_cert.crt'.format(
_cert_base_path(),
cert_base_path(),
ca_name,
ca_name)
ca_keyp = '{0}/{1}/{2}_ca_cert.key'.format(
_cert_base_path(), ca_name, ca_name)
cert_base_path(), ca_name, ca_name)
with open(certp) as fic:
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
fic.read())
@ -170,15 +200,16 @@ def maybe_fix_ssl_version(ca_name):
fixmode=True)
def _ca_exists(ca_name):
def ca_exists(ca_name, cacert_path=None):
'''
Verify whether a Certificate Authority (CA) already exists
ca_name
name of the CA
'''
set_ca_path(cacert_path)
certp = '{0}/{1}/{2}_ca_cert.crt'.format(
_cert_base_path(),
cert_base_path(),
ca_name,
ca_name)
if os.path.exists(certp):
@ -187,8 +218,37 @@ def _ca_exists(ca_name):
return False
def create_ca(
def _ca_exists(ca_name, cacert_path=None):
'''Retrocompatible wrapper'''
return ca_exists(ca_name, cacert_path)
def get_ca(ca_name, as_text=False, cacert_path=None):
'''
Get the certificate path or content
ca_name
name of the CA
as_text
if true, return the certificate content instead of the path
cacert_path
absolute path to ca certificates root directory
'''
set_ca_path(cacert_path)
certp = '{0}/{1}/{2}_ca_cert.crt'.format(
cert_base_path(),
ca_name,
ca_name)
if not os.path.exists(certp):
raise ValueError('Certificate does not exists for {0}'.format(ca_name))
else:
if as_text:
with open(certp) as fic:
certp = fic.read()
return certp
def create_ca(ca_name,
bits=2048,
days=365,
CN='localhost',
@ -198,7 +258,8 @@ def create_ca(
O='SaltStack',
OU=None,
emailAddress='xyz@pdq.net',
fixmode=False):
fixmode=False,
cacert_path=None):
'''
Create a Certificate Authority (CA)
@ -222,6 +283,8 @@ def create_ca(
organizational unit, default is None
emailAddress
email address for the CA owner, default is 'xyz@pdq.net'
cacert_path
absolute path to ca certificates root directory
Writes out a CA certificate based upon defined config values. If the file
already exists, the function just returns assuming the CA certificate
@ -243,11 +306,12 @@ def create_ca(
salt '*' tls.create_ca test_ca
'''
set_ca_path(cacert_path)
certp = '{0}/{1}/{2}_ca_cert.crt'.format(
_cert_base_path(), ca_name, ca_name)
cert_base_path(), ca_name, ca_name)
ca_keyp = '{0}/{1}/{2}_ca_cert.key'.format(
_cert_base_path(), ca_name, ca_name)
if (not fixmode) and _ca_exists(ca_name):
cert_base_path(), ca_name, ca_name)
if (not fixmode) and ca_exists(ca_name):
return (
'Certificate for CA named "{0}" '
'already exists').format(ca_name)
@ -255,8 +319,11 @@ def create_ca(
if fixmode and not os.path.exists(certp):
raise ValueError('{0} does not exists, can\'t fix'.format(certp))
if not os.path.exists('{0}/{1}'.format(_cert_base_path(), ca_name)):
os.makedirs('{0}/{1}'.format(_cert_base_path(), ca_name))
if not os.path.exists('{0}/{1}'.format(
cert_base_path(), ca_name)
):
os.makedirs('{0}/{1}'.format(cert_base_path(),
ca_name))
# try to reuse existing ssl key
key = None
@ -331,15 +398,14 @@ def create_ca(
_write_cert_to_database(ca_name, ca)
ret = ('Created Private Key: "{1}/{2}/{3}_ca_cert.key." ').format(
ca_name, _cert_base_path(), ca_name, ca_name)
ca_name, cert_base_path(), ca_name, ca_name)
ret += ('Created CA "{0}": "{1}/{2}/{3}_ca_cert.crt."').format(
ca_name, _cert_base_path(), ca_name, ca_name)
ca_name, cert_base_path(), ca_name, ca_name)
return ret
def create_csr(
ca_name,
def create_csr(ca_name,
bits=2048,
CN='localhost',
C='US',
@ -348,7 +414,8 @@ def create_csr(
O='SaltStack',
OU=None,
emailAddress='xyz@pdq.net',
subjectAltName=None):
subjectAltName=None,
cacert_path=None):
'''
Create a Certificate Signing Request (CSR) for a
particular Certificate Authority (CA)
@ -375,6 +442,8 @@ def create_csr(
subjectAltName
valid subjectAltNames in full form, eg to add DNS entry you would call
this function with this value: **['DNS:myapp.foo.comm']**
cacert_path
absolute path to ca certificates root directory
Writes out a Certificate Signing Request (CSR) If the file already
exists, the function just returns assuming the CSR already exists.
@ -397,15 +466,21 @@ def create_csr(
salt '*' tls.create_csr test
'''
set_ca_path(cacert_path)
if not _ca_exists(ca_name):
if not ca_exists(ca_name):
return ('Certificate for CA named "{0}" does not exist, please create '
'it first.').format(ca_name)
if not os.path.exists('{0}/{1}/certs/'.format(_cert_base_path(), ca_name)):
os.makedirs("{0}/{1}/certs/".format(_cert_base_path(), ca_name))
if not os.path.exists('{0}/{1}/certs/'.format(
cert_base_path(),
ca_name)
):
os.makedirs("{0}/{1}/certs/".format(cert_base_path(),
ca_name))
csr_f = '{0}/{1}/certs/{2}.csr'.format(_cert_base_path(), ca_name, CN)
csr_f = '{0}/{1}/certs/{2}.csr'.format(cert_base_path(),
ca_name, CN)
if os.path.exists(csr_f):
return 'Certificate Request "{0}" already exists'.format(csr_f)
@ -432,7 +507,8 @@ def create_csr(
# Write private key and request
priv_key = salt.utils.fopen(
'{0}/{1}/certs/{2}.key'.format(_cert_base_path(), ca_name, CN),
'{0}/{1}/certs/{2}.key'.format(cert_base_path(),
ca_name, CN),
'w+'
)
priv_key.write(
@ -450,13 +526,13 @@ def create_csr(
csr.close()
ret = 'Created Private Key: "{0}/{1}/certs/{2}.key." '.format(
_cert_base_path(),
cert_base_path(),
ca_name,
CN
)
ret += 'Created CSR for "{0}": "{1}/{2}/certs/{3}.csr."'.format(
ca_name,
_cert_base_path(),
cert_base_path(),
ca_name,
CN
)
@ -464,8 +540,7 @@ def create_csr(
return ret
def create_self_signed_cert(
tls_dir='tls',
def create_self_signed_cert(tls_dir='tls',
bits=2048,
days=365,
CN='localhost',
@ -474,8 +549,8 @@ def create_self_signed_cert(
L='Salt Lake City',
O='SaltStack',
OU=None,
emailAddress='xyz@pdq.net'):
emailAddress='xyz@pdq.net',
cacert_path=None):
'''
Create a Self-Signed Certificate (CERT)
@ -498,6 +573,8 @@ def create_self_signed_cert(
organizational unit, default is None
emailAddress
email address for the request, default is 'xyz@pdq.net'
cacert_path
absolute path to ca certificates root directory
Writes out a Self-Signed Certificate (CERT). If the file already
exists, the function just returns.
@ -526,12 +603,15 @@ def create_self_signed_cert(
salt 'minion' tls.create_self_signed_cert CN='test.mysite.org'
'''
set_ca_path(cacert_path)
if not os.path.exists('{0}/{1}/certs/'.format(_cert_base_path(), tls_dir)):
os.makedirs("{0}/{1}/certs/".format(_cert_base_path(), tls_dir))
if not os.path.exists('{0}/{1}/certs/'.format(cert_base_path(tls_dir))):
os.makedirs("{0}/{1}/certs/".format(cert_base_path(),
tls_dir))
if os.path.exists(
'{0}/{1}/certs/{2}.crt'.format(_cert_base_path(), tls_dir, CN)
'{0}/{1}/certs/{2}.crt'.format(cert_base_path(),
tls_dir, CN)
):
return 'Certificate "{0}" already exists'.format(CN)
@ -540,7 +620,7 @@ def create_self_signed_cert(
# create certificate
cert = OpenSSL.crypto.X509()
cert.set_version(3)
cert.set_version(2)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(int(days) * 24 * 60 * 60)
@ -561,7 +641,8 @@ def create_self_signed_cert(
# Write private key and cert
priv_key = salt.utils.fopen(
'{0}/{1}/certs/{2}.key'.format(_cert_base_path(), tls_dir, CN),
'{0}/{1}/certs/{2}.key'.format(cert_base_path(),
tls_dir, CN),
'w+'
)
priv_key.write(
@ -570,7 +651,7 @@ def create_self_signed_cert(
priv_key.close()
crt = salt.utils.fopen('{0}/{1}/certs/{2}.crt'.format(
_cert_base_path(),
cert_base_path(),
tls_dir,
CN
), 'w+')
@ -585,12 +666,12 @@ def create_self_signed_cert(
_write_cert_to_database(tls_dir, cert)
ret = 'Created Private Key: "{0}/{1}/certs/{2}.key." '.format(
_cert_base_path(),
cert_base_path(),
tls_dir,
CN
)
ret += 'Created Certificate: "{0}/{1}/certs/{2}.crt."'.format(
_cert_base_path(),
cert_base_path(),
tls_dir,
CN
)
@ -598,7 +679,7 @@ def create_self_signed_cert(
return ret
def create_ca_signed_cert(ca_name, CN, days=365):
def create_ca_signed_cert(ca_name, CN, days=365, cacert_path=None):
'''
Create a Certificate (CERT) signed by a
named Certificate Authority (CA)
@ -615,6 +696,8 @@ def create_ca_signed_cert(ca_name, CN, days=365):
The CN *must* match an existing CSR generated by create_csr. If it
does not, this method does nothing.
cacert_path
absolute path to ca certificates root directory
If the following values were set::
@ -633,8 +716,10 @@ def create_ca_signed_cert(ca_name, CN, days=365):
salt '*' tls.create_ca_signed_cert test localhost
'''
set_ca_path(cacert_path)
if os.path.exists(
'{0}/{1}/{2}.crt'.format(_cert_base_path(), ca_name, CN)
'{0}/{1}/{2}.crt'.format(cert_base_path(),
ca_name, CN)
):
return 'Certificate "{0}" already exists'.format(ca_name)
@ -643,14 +728,14 @@ def create_ca_signed_cert(ca_name, CN, days=365):
ca_cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM,
salt.utils.fopen('{0}/{1}/{2}_ca_cert.crt'.format(
_cert_base_path(),
cert_base_path(),
ca_name, ca_name
)).read()
)
ca_key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM,
salt.utils.fopen('{0}/{1}/{2}_ca_cert.key'.format(
_cert_base_path(),
cert_base_path(),
ca_name,
ca_name
)).read()
@ -662,7 +747,7 @@ def create_ca_signed_cert(ca_name, CN, days=365):
req = OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_PEM,
salt.utils.fopen('{0}/{1}/certs/{2}.csr'.format(
_cert_base_path(),
cert_base_path(),
ca_name,
CN
)).read()
@ -685,6 +770,7 @@ def create_ca_signed_cert(ca_name, CN, days=365):
log.error('Support for extensions is not available, upgrade PyOpenSSL')
cert = OpenSSL.crypto.X509()
cert.set_version(2)
cert.set_subject(req.get_subject())
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(int(days) * 24 * 60 * 60)
@ -696,7 +782,7 @@ def create_ca_signed_cert(ca_name, CN, days=365):
cert.sign(ca_key, 'sha1')
crt = salt.utils.fopen('{0}/{1}/certs/{2}.crt'.format(
_cert_base_path(),
cert_base_path(),
ca_name,
CN
), 'w+')
@ -713,13 +799,13 @@ def create_ca_signed_cert(ca_name, CN, days=365):
return ('Created Certificate for "{0}": '
'"{1}/{2}/certs/{3}.crt"').format(
ca_name,
_cert_base_path(),
cert_base_path(),
ca_name,
CN
)
def create_pkcs12(ca_name, CN, passphrase=''):
def create_pkcs12(ca_name, CN, passphrase='', cacert_path=None):
'''
Create a PKCS#12 browser certificate for a particular Certificate (CN)
@ -729,6 +815,8 @@ def create_pkcs12(ca_name, CN, passphrase=''):
common name matching the certificate signing request
passphrase
used to unlock the PKCS#12 certificate when loaded into the browser
cacert_path
absolute path to ca certificates root directory
If the following values were set::
@ -747,9 +835,10 @@ def create_pkcs12(ca_name, CN, passphrase=''):
salt '*' tls.create_pkcs12 test localhost
'''
set_ca_path(cacert_path)
if os.path.exists(
'{0}/{1}/certs/{2}.p12'.format(
_cert_base_path(),
cert_base_path(),
ca_name,
CN)
):
@ -759,7 +848,7 @@ def create_pkcs12(ca_name, CN, passphrase=''):
ca_cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM,
salt.utils.fopen('{0}/{1}/{2}_ca_cert.crt'.format(
_cert_base_path(),
cert_base_path(),
ca_name,
ca_name
)).read()
@ -771,7 +860,7 @@ def create_pkcs12(ca_name, CN, passphrase=''):
cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM,
salt.utils.fopen('{0}/{1}/certs/{2}.crt'.format(
_cert_base_path(),
cert_base_path(),
ca_name,
CN
)).read()
@ -779,7 +868,7 @@ def create_pkcs12(ca_name, CN, passphrase=''):
key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM,
salt.utils.fopen('{0}/{1}/certs/{2}.key'.format(
_cert_base_path(),
cert_base_path(),
ca_name,
CN
)).read()
@ -794,7 +883,7 @@ def create_pkcs12(ca_name, CN, passphrase=''):
pkcs12.set_privatekey(key)
with salt.utils.fopen('{0}/{1}/certs/{2}.p12'.format(
_cert_base_path(),
cert_base_path(),
ca_name,
CN
), 'w') as ofile:
@ -803,7 +892,7 @@ def create_pkcs12(ca_name, CN, passphrase=''):
return ('Created PKCS#12 Certificate for "{0}": '
'"{1}/{2}/certs/{3}.p12"').format(
CN,
_cert_base_path(),
cert_base_path(),
ca_name,
CN
)

View File

@ -343,7 +343,13 @@ def get_pgid(path, follow_symlinks=True):
# can't load a file descriptor for the file, we default
# to "Everyone" - http://support.microsoft.com/kb/243330
except MemoryError:
# generic memory error (win2k3+)
return 'S-1-1-0'
except pywinerror as exc:
# Incorrect function error (win2k8+)
if exc.winerror == 1:
return 'S-1-1-0'
raise
group_sid = secdesc.GetSecurityDescriptorGroup()
return win32security.ConvertSidToStringSid(group_sid)
@ -533,7 +539,13 @@ def get_uid(path, follow_symlinks=True):
path, win32security.OWNER_SECURITY_INFORMATION
)
except MemoryError:
# generic memory error (win2k3+)
return 'S-1-1-0'
except pywinerror as exc:
# Incorrect function error (win2k8+)
if exc.winerror == 1:
return 'S-1-1-0'
raise
owner_sid = secdesc.GetSecurityDescriptorOwner()
return win32security.ConvertSidToStringSid(owner_sid)

View File

@ -1262,7 +1262,11 @@ class Events(object):
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event('master', opts=self.opts)
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts)
stream = event.iter_events(full=True)
yield u'retry: {0}\n'.format(400)
@ -1427,7 +1431,11 @@ class WebsocketEndpoint(object):
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event('master', opts=self.opts)
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts)
stream = event.iter_events(full=True)
SaltInfo = event_processor.SaltInfo(handler)
while True:
@ -1509,7 +1517,12 @@ class Webhook(object):
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event('master', opts=self.opts)
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_token.on'] = False

View File

@ -125,7 +125,6 @@ class Pillar(object):
# location of file_roots. Issue 5951
ext_pillar_opts = dict(self.opts)
ext_pillar_opts['file_roots'] = self.actual_file_roots
self.merge_strategy = 'smart'
if opts.get('pillar_source_merging_strategy'):
self.merge_strategy = opts['pillar_source_merging_strategy']
@ -458,7 +457,37 @@ class Pillar(object):
return pillar, errors
def ext_pillar(self, pillar):
def _external_pillar_data(self,
pillar,
val,
pillar_dirs,
key):
'''
Builds actual pillar data structure
and update
the variable ``pillar``
'''
ext = None
# try the new interface, which includes the minion ID
# as first argument
if isinstance(val, dict):
ext = self.ext_pillars[key](self.opts['id'], pillar, **val)
elif isinstance(val, list):
ext = self.ext_pillars[key](self.opts['id'], pillar, *val)
else:
if key == 'git':
ext = self.ext_pillars[key](self.opts['id'],
val,
pillar_dirs)
else:
ext = self.ext_pillars[key](self.opts['id'],
pillar,
val)
return ext
def ext_pillar(self, pillar, pillar_dirs):
'''
Render the external pillar data
'''
@ -467,6 +496,7 @@ class Pillar(object):
if not isinstance(self.opts['ext_pillar'], list):
log.critical('The "ext_pillar" option is malformed')
return pillar
ext = None
for run in self.opts['ext_pillar']:
if not isinstance(run, dict):
log.critical('The "ext_pillar" option is malformed')
@ -479,16 +509,10 @@ class Pillar(object):
continue
try:
try:
# try the new interface, which includes the minion ID
# as first argument
if isinstance(val, dict):
ext = self.ext_pillars[key](self.opts['id'], pillar, **val)
elif isinstance(val, list):
ext = self.ext_pillars[key](self.opts['id'], pillar, *val)
else:
ext = self.ext_pillars[key](self.opts['id'], pillar, val)
pillar = self.merge_sources(pillar, ext)
ext = self._external_pillar_data(pillar,
val,
pillar_dirs,
key)
except TypeError as exc:
if exc.message.startswith('ext_pillar() takes exactly '):
log.warning('Deprecation warning: ext_pillar "{0}"'
@ -497,14 +521,10 @@ class Pillar(object):
else:
raise
if isinstance(val, dict):
ext = self.ext_pillars[key](pillar, **val)
elif isinstance(val, list):
ext = self.ext_pillars[key](pillar, *val)
else:
ext = self.ext_pillars[key](pillar, val)
pillar = self.merge_sources(pillar, ext)
ext = self._external_pillar_data(pillar,
val,
pillar_dirs,
key)
except Exception as exc:
log.exception(
'Failed to load ext_pillar {0}: {1}'.format(
@ -512,6 +532,9 @@ class Pillar(object):
exc
)
)
if ext:
pillar = self.merge_sources(pillar, ext)
ext = None
return pillar
def merge_sources(self, obj_a, obj_b):
@ -536,7 +559,7 @@ class Pillar(object):
return merged
def compile_pillar(self, ext=True):
def compile_pillar(self, ext=True, pillar_dirs=None):
'''
Render the pillar data and return
'''
@ -544,7 +567,7 @@ class Pillar(object):
matches = self.top_matches(top)
pillar, errors = self.render_pillar(matches)
if ext:
pillar = self.ext_pillar(pillar)
pillar = self.ext_pillar(pillar, pillar_dirs)
errors.extend(terrors)
if self.opts.get('pillar_opts', True):
mopts = dict(self.opts)

View File

@ -221,10 +221,14 @@ def _extract_key_val(kv, delim='='):
return key, val
def ext_pillar(minion_id, pillar, repo_string):
def ext_pillar(minion_id,
repo_string,
pillar_dirs):
'''
Execute a command and read the output as YAML
'''
if pillar_dirs is None:
return
# split the branch, repo name and optional extra (key=val) parameters.
options = repo_string.strip().split()
branch = options[0]
@ -251,6 +255,13 @@ def ext_pillar(minion_id, pillar, repo_string):
# normpath is needed to remove appended '/' if root is empty string.
pillar_dir = os.path.normpath(os.path.join(gitpil.working_dir, root))
pillar_dirs.setdefault(pillar_dir, {})
if pillar_dirs[pillar_dir].get(branch, False):
return {} # we've already seen this combo
pillar_dirs[pillar_dir].setdefault(branch, True)
# Don't recurse forever-- the Pillar object will re-call the ext_pillar
# function
if __opts__['pillar_roots'].get(branch, []) == [pillar_dir]:

View File

@ -71,9 +71,12 @@ Now you can include your ciphers in your pillar data like so:
'''
import re
import salt.utils
try:
import gnupg
HAS_GPG = True
if salt.utils.which('gpg') is None:
HAS_GPG = False
except ImportError:
HAS_GPG = False
import logging

View File

@ -174,9 +174,9 @@ def change(name, context=None, changes=None, lens=None, **kwargs):
if context:
filename = re.sub('^/files|/$', '', context)
if os.path.isfile(filename):
file = open(filename, 'r')
old_file = file.readlines()
file.close()
file_ = open(filename, 'r')
old_file = file_.readlines()
file_.close()
result = __salt__['augeas.execute'](context=context, lens=lens, commands=changes)
ret['result'] = result['retval']
@ -186,9 +186,9 @@ def change(name, context=None, changes=None, lens=None, **kwargs):
return ret
if old_file:
file = open(filename, 'r')
diff = ''.join(difflib.unified_diff(old_file, file.readlines(), n=0))
file.close()
file_ = open(filename, 'r')
diff = ''.join(difflib.unified_diff(old_file, file_.readlines(), n=0))
file_.close()
if diff:
ret['comment'] = 'Changes have been saved'
@ -220,12 +220,12 @@ def setvalue(name, prefix=None, changes=None, **kwargs):
if not isinstance(changes, list):
ret['comment'] = '\'changes\' must be formatted as a list'
return ret
for change in changes:
if not isinstance(change, dict) or len(change) > 1:
for change_ in changes:
if not isinstance(change_, dict) or len(change_) > 1:
ret['comment'] = 'Invalidly-formatted change'
return ret
key = next(iter(change))
args.extend([key, change[key]])
key = next(iter(change_))
args.extend([key, change_[key]])
if prefix is not None:
args.insert(0, 'prefix={0}'.format(prefix))
@ -244,6 +244,6 @@ def setvalue(name, prefix=None, changes=None, **kwargs):
return ret
ret['comment'] = 'Success'
for change in changes:
ret['changes'].update(change)
for change_ in changes:
ret['changes'].update(change_)
return ret

View File

@ -8,23 +8,26 @@ Manage Autoscale Groups
Create and destroy autoscale groups. Be aware that this interacts with Amazon's
services, and so may incur charges.
This module uses boto, which can be installed via package, or pip.
This module uses ``boto``, which can be installed via package, or pip.
This module accepts explicit autoscale credentials but can also utilize
IAM roles assigned to the instance trough Instance Profiles. Dynamic
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More Information available at::
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file::
.. code-block:: yaml
asg.keyid: GKTADJGHEIQSXMKKRBJ08H
asg.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either
passed in as a dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H

View File

@ -12,23 +12,26 @@ Note: This module currently only supports creation and deletion of
elasticache resources and will not modify clusters when their configuration
changes in your state files.
This module uses boto, which can be installed via package, or pip.
This module uses ``boto``, which can be installed via package, or pip.
This module accepts explicit elasticache credentials but can also utilize
IAM roles assigned to the instance trough Instance Profiles. Dynamic
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More Information available at::
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file::
.. code-block:: yaml
elasticache.keyid: GKTADJGHEIQSXMKKRBJ08H
elasticache.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either
passed in as a dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H

View File

@ -8,23 +8,24 @@ Manage ELBs
Create and destroy ELBs. Be aware that this interacts with Amazon's
services, and so may incur charges.
This module uses boto, which can be installed via package, or pip.
This module uses ``boto``, which can be installed via package, or pip.
This module accepts explicit elb credentials but can also utilize
IAM roles assigned to the instance trough Instance Profiles. Dynamic
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More Information available at:
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file::
.. code-block:: yaml
elb.keyid: GKTADJGHEIQSXMKKRBJ08H
elb.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either
passed in as a dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml

View File

@ -5,23 +5,24 @@ Manage IAM roles.
.. versionadded:: Helium
This module uses boto, which can be installed via package, or pip.
This module uses ``boto``, which can be installed via package, or pip.
This module accepts explicit IAM credentials but can also utilize
IAM roles assigned to the instance trough Instance Profiles. Dynamic
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More Information available at:
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file::
.. code-block:: yaml
iam.keyid: GKTADJGHEIQSXMKKRBJ08H
iam.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either
passed in as a dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml

View File

@ -19,23 +19,26 @@ Also note that a launch configuration that's in use by an autoscale group can
not be deleted until the autoscale group is no longer using it. This may affect
the way in which you want to order your states.
This module uses boto, which can be installed via package, or pip.
This module uses ``boto``, which can be installed via package, or pip.
This module accepts explicit autoscale credentials but can also utilize
IAM roles assigned to the instance trough Instance Profiles. Dynamic
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More Information available at::
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file::
.. code-block:: yaml
asg.keyid: GKTADJGHEIQSXMKKRBJ08H
asg.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either
passed in as a dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H

View File

@ -8,23 +8,24 @@ Manage Route53 records
Create and delete Route53 records. Be aware that this interacts with Amazon's
services, and so may incur charges.
This module uses boto, which can be installed via package, or pip.
This module uses ``boto``, which can be installed via package, or pip.
This module accepts explicit route53 credentials but can also utilize
IAM roles assigned to the instance trough Instance Profiles. Dynamic
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More Information available at:
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file::
.. code-block:: yaml
route53.keyid: GKTADJGHEIQSXMKKRBJ08H
route53.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either
passed in as a dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml

View File

@ -8,23 +8,26 @@ Manage Security Groups
Create and destroy Security Groups. Be aware that this interacts with Amazon's
services, and so may incur charges.
This module uses boto, which can be installed via package, or pip.
This module uses ``boto``, which can be installed via package, or pip.
This module accepts explicit ec2 credentials but can also utilize
IAM roles assigned to the instance trough Instance Profiles. Dynamic
This module accepts explicit EC2 credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More Information available at::
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file::
.. code-block:: yaml
secgroup.keyid: GKTADJGHEIQSXMKKRBJ08H
secgroup.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either
passed in as a dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H

View File

@ -8,23 +8,24 @@ Manage SQS Queues
Create and destroy SQS queues. Be aware that this interacts with Amazon's
services, and so may incur charges.
This module uses boto, which can be installed via package, or pip.
This module uses ``boto``, which can be installed via package, or pip.
This module accepts explicit sqs credentials but can also utilize
IAM roles assigned to the instance trough Instance Profiles. Dynamic
This module accepts explicit SQS credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More Information available at:
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file::
.. code-block:: yaml
sqs.keyid: GKTADJGHEIQSXMKKRBJ08H
sqs.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either
passed in as a dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml

View File

@ -83,7 +83,9 @@ a simple protocol described below:
state.`
Here's an example of how one might write a shell script for use with a
stateful command::
stateful command:
.. code-block:: bash
#!/bin/bash
#
@ -93,7 +95,9 @@ a simple protocol described below:
echo # an empty line here so the next line will be the last.
echo "changed=yes comment='something has changed' whatever=123"
And an example SLS file using this module::
And an example SLS file using this module:
.. code-block:: yaml
Run myscript:
cmd.run:

View File

@ -50,7 +50,7 @@ then a new cron job will be added to the user's crontab.
The current behavior is still relying on that mechanism, but you can also
specify an identifier to identify your crontabs:
.. versionadded:: 2014.2
.. code-block:: yaml
date > /tmp/crontest:
@ -60,8 +60,10 @@ specify an identifier to identify your crontabs:
- minute: 7
- hour: 2
.. versionadded:: 2014.1.2
And, some months later, you modify it:
.. versionadded:: 2014.2
.. code-block:: yaml
superscript > /tmp/crontest:
@ -71,6 +73,8 @@ And, some months later, you modify it:
- minute: 3
- hour: 4
.. versionadded:: 2014.1.2
The old **date > /tmp/crontest** will be replaced by
**superscript > /tmp/crontest**.
@ -376,7 +380,7 @@ def file(name,
hosted on either the salt master server, or on an HTTP or FTP server.
For files hosted on the salt file server, if the file is located on
the master in the directory named spam, and is called eggs, the source
string is salt://spam/eggs.
string is ``salt://spam/eggs``
If the file is hosted on a HTTP or FTP server then the source_hash
argument is also required
@ -385,7 +389,7 @@ def file(name,
This can be either a file which contains a source hash string for
the source, or a source hash string. The source hash string is the
hash algorithm followed by the hash of the file:
md5=e138491e9d5b97023cea823fe17bac22
``md5=e138491e9d5b97023cea823fe17bac22``
user
The user to whom the crontab should be assigned. This defaults to

View File

@ -248,6 +248,14 @@ def pulled(name, tag=None, force=False, *args, **kwargs):
return _valid(
name=name,
comment='Image already pulled: {0}'.format(name))
if __opts__['test'] and force:
comment = 'Image {0} will be pulled'.format(name)
return {'name': name,
'changes': {},
'result': None,
'comment': comment}
previous_id = image_infos['out']['Id'] if image_infos['status'] else None
pull = __salt__['docker.pull']
returned = pull(name, tag=tag)
@ -278,6 +286,14 @@ def pushed(name):
name
Name of the image
'''
if __opts__['test']:
comment = 'Image {0} will be pushed'.format(name)
return {'name': name,
'changes': {},
'result': None,
'comment': comment}
push = __salt__['docker.push']
returned = push(name)
log.debug("Returned: "+str(returned))
@ -314,6 +330,14 @@ def built(name,
name=name,
comment='Image already built: {0}, id: {1}'.format(
name, image_infos['out']['Id']))
if __opts__['test'] and force:
comment = 'Image {0} will be built'.format(name)
return {'name': name,
'changes': {},
'result': None,
'comment': comment}
previous_id = image_infos['out']['Id'] if image_infos['status'] else None
build = __salt__['docker.build']
kw = dict(tag=name,

View File

@ -1015,7 +1015,9 @@ def managed(name,
The function accepts the first encountered long unbroken alphanumeric
string of correct length as a valid hash, in order from most secure to
least secure::
least secure:
.. code-block:: text
Type Length
====== ======
@ -1033,7 +1035,9 @@ def managed(name,
Debian file type ``*.dsc`` is supported.
Examples::
Examples:
.. code-block:: text
/etc/rc.conf ef6e82e4006dee563d98ada2a2a80a27
sha254c8525aee419eb649f0233be91c151178b30f0dff8ebbdcc8de71b1d5c8bcc06a /etc/resolv.conf
@ -2261,7 +2265,9 @@ def blockreplace(
:rtype: bool or str
Example of usage with an accumulator and with a variable::
Example of usage with an accumulator and with a variable:
.. code-block:: yaml
{% set myvar = 42 %}
hosts-config-block-{{ myvar }}:
@ -2292,7 +2298,9 @@ def blockreplace(
- require_in:
- file: hosts-config-block-{{ myvar }}
will generate and maintain a block of content in ``/etc/hosts``::
will generate and maintain a block of content in ``/etc/hosts``:
.. code-block:: text
# START managed zone 42 -DO-NOT-EDIT-
First line of content
@ -2391,7 +2399,9 @@ def sed(name,
.. versionadded:: 0.17.0
Usage::
Usage:
.. code-block:: yaml
# Disable the epel repo by default
/etc/yum.repos.d/epel.repo:
@ -2504,7 +2514,9 @@ def comment(name, regex, char='#', backup='.bak'):
``uncomment`` is called. Meaning the backup will only be useful
after the first invocation.
Usage::
Usage:
.. code-block:: yaml
/etc/fstab:
file.comment:
@ -2582,7 +2594,9 @@ def uncomment(name, regex, char='#', backup='.bak'):
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
Usage::
Usage:
.. code-block:: yaml
/etc/adduser.conf:
file.uncomment:
@ -2697,7 +2711,9 @@ def append(name,
The function accepts the first encountered long unbroken alphanumeric
string of correct length as a valid hash, in order from most secure to
least secure::
least secure:
.. code-block:: text
Type Length
====== ======
@ -2715,7 +2731,9 @@ def append(name,
Debian file type ``*.dsc`` is supported.
Examples::
Examples:
.. code-block:: text
/etc/rc.conf ef6e82e4006dee563d98ada2a2a80a27
sha254c8525aee419eb649f0233be91c151178b30f0dff8ebbdcc8de71b1d5c8bcc06a /etc/resolv.conf
@ -2753,7 +2771,9 @@ def append(name,
context
Overrides default context variables passed to the template.
Multi-line example::
Multi-line example:
.. code-block:: yaml
/etc/motd:
file.append:
@ -2762,7 +2782,9 @@ def append(name,
than sugar with the Courtiers of Italy.
- Benjamin Franklin
Multiple lines of text::
Multiple lines of text:
.. code-block:: yaml
/etc/motd:
file.append:
@ -2770,7 +2792,9 @@ def append(name,
- Trust no one unless you have eaten much salt with him.
- "Salt is born of the purest of parents: the sun and the sea."
Gather text from multiple template files::
Gather text from multiple template files:
.. code-block:: yaml
/etc/motd:
file:
@ -2925,7 +2949,9 @@ def prepend(name,
The text will not be prepended again if it already exists in the file. You
may specify a single line of text or a list of lines to append.
Multi-line example::
Multi-line example:
.. code-block:: yaml
/etc/motd:
file.prepend:
@ -2934,7 +2960,9 @@ def prepend(name,
than sugar with the Courtiers of Italy.
- Benjamin Franklin
Multiple lines of text::
Multiple lines of text:
.. code-block:: yaml
/etc/motd:
file.prepend:
@ -2942,7 +2970,9 @@ def prepend(name,
- Trust no one unless you have eaten much salt with him.
- "Salt is born of the purest of parents: the sun and the sea."
Gather text from multiple template files::
Gather text from multiple template files:
.. code-block:: yaml
/etc/motd:
file:
@ -3109,7 +3139,9 @@ def patch(name,
by the ``source`` parameter. If not provided, this defaults to the
environment from which the state is being executed.
Usage::
Usage:
.. code-block:: yaml
# Equivalent to ``patch --forward /opt/file.txt file.patch``
/opt/file.txt:
@ -3203,7 +3235,9 @@ def touch(name, atime=None, mtime=None, makedirs=False):
whether we should create the parent directory/directories in order to
touch the file
Usage::
Usage:
.. code-block:: yaml
/var/log/httpd/logrotate.empty:
file.touch
@ -3442,7 +3476,9 @@ def accumulated(name, filename, text, **kwargs):
Example:
Given the following::
Given the following:
.. code-block:: yaml
animals_doing_things:
file.accumulated:
@ -3457,11 +3493,15 @@ def accumulated(name, filename, text, **kwargs):
- source: salt://animal_file.txt
- template: jinja
One might write a template for animal_file.txt like the following::
One might write a template for ``animal_file.txt`` like the following:
.. code-block:: jinja
The quick brown fox{% for animal in accumulator['animals_doing_things'] %}{{ animal }}{% endfor %}
Collectively, the above states and template file will produce::
Collectively, the above states and template file will produce:
.. code-block:: text
The quick brown fox jumps over the lazy dog.
@ -3608,7 +3648,9 @@ def serialize(name,
.. versionadded:: Helium
For example, this state::
For example, this state:
.. code-block:: yaml
/etc/dummy/package.json:
file.serialize:
@ -3622,7 +3664,9 @@ def serialize(name,
engine: node 0.4.1
- formatter: json
will manage the file ``/etc/dummy/package.json``::
will manage the file ``/etc/dummy/package.json``:
.. code-block:: json
{
"author": "A confused individual <iam@confused.com>",
@ -3631,8 +3675,8 @@ def serialize(name,
"optimist": ">= 0.1.0"
},
"description": "A package using naive versioning",
"engine": "node 0.4.1"
"name": "naive",
"engine": "node 0.4.1",
"name": "naive"
}
'''
@ -3765,7 +3809,9 @@ def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode='0600'):
mode
permissions on the device/pipe
Usage::
Usage:
.. code-block:: yaml
/dev/chr:
file.mknod:

View File

@ -3,7 +3,7 @@
Management of addresses and names in hosts file
===============================================
The /etc/hosts file can be managed to contain definitions for specific hosts:
The ``/etc/hosts`` file can be managed to contain definitions for specific hosts:
.. code-block:: yaml
@ -11,7 +11,7 @@ The /etc/hosts file can be managed to contain definitions for specific hosts:
host.present:
- ip: 192.168.0.42
Or using the "names:" directive, you can put several names for the same IP.
Or using the ``names`` directive, you can put several names for the same IP.
(Do not try one name with space-separated values).
.. code-block:: yaml
@ -23,8 +23,12 @@ Or using the "names:" directive, you can put several names for the same IP.
- server1
- florida
NOTE: changing the name(s) in the present() function does not cause an
update to remove the old entry.
.. note::
Changing the ``names`` in ``host.present`` does not cause an
update to remove the old entry.
.. code-block:: yaml
server1:
host.present:

View File

@ -31,7 +31,7 @@ def __virtual__():
def user_exists(name, password=None, htpasswd_file=None, options='',
force=False, **kwargs):
'''
Make sure the user is inside the /etc/nginx/htpasswd
Make sure the user is inside the ``/etc/nginx/htpasswd``
``name``
username

View File

@ -6,10 +6,10 @@ Management of incron, the inotify cron
The incron state module allows for user incrontabs to be cleanly managed.
Incron declarations require a number of parameters. The parameters needed
to be declared: path, mask, and cmd. The user whose incrontab is to be edited
to be declared: ``path``, ``mask``, and ``cmd``. The ``user`` whose incrontab is to be edited
also needs to be defined.
When making changes to an existing incron job, the path declaration is the unique
When making changes to an existing incron job, the ``path`` declaration is the unique
factor, so if an existing cron that looks like this:
.. code-block:: yaml

View File

@ -308,7 +308,7 @@ def append(name, family='ipv4', **kwargs):
Network family, ipv4 or ipv6.
All other arguments are passed in with the same name as the long option
that would normally be used for iptables, with one exception: `--state` is
that would normally be used for iptables, with one exception: ``--state`` is
specified as `connstate` instead of `state` (not to be confused with
`ctstate`).
'''
@ -379,7 +379,7 @@ def insert(name, family='ipv4', **kwargs):
Networking family, either ipv4 or ipv6
All other arguments are passed in with the same name as the long option
that would normally be used for iptables, with one exception: `--state` is
that would normally be used for iptables, with one exception: ``--state`` is
specified as `connstate` instead of `state` (not to be confused with
`ctstate`).
'''
@ -446,7 +446,7 @@ def delete(name, family='ipv4', **kwargs):
Networking family, either ipv4 or ipv6
All other arguments are passed in with the same name as the long option
that would normally be used for iptables, with one exception: `--state` is
that would normally be used for iptables, with one exception: ``--state`` is
specified as `connstate` instead of `state` (not to be confused with
`ctstate`).
'''

View File

@ -30,7 +30,7 @@ def present(name, persist=False):
The name of the kernel module to verify is loaded
persist
Also add module to /etc/modules
Also add module to ``/etc/modules``
'''
ret = {'name': name,
'result': True,
@ -75,10 +75,10 @@ def absent(name, persist=False, comment=True):
The name of the kernel module to verify is not loaded
persist
Delete module from /etc/modules
Delete module from ``/etc/modules``
comment
Don't remove module from /etc/modules, only comment it
Don't remove module from ``/etc/modules``, only comment it
'''
ret = {'name': name,
'result': True,

View File

@ -27,7 +27,7 @@ def keys(name, basepath='/etc/pki'):
The name variable used to track the execution
basepath
Defaults to `/etc/pki`, this is the root location used for libvirt
Defaults to ``/etc/pki``, this is the root location used for libvirt
keys on the hypervisor
'''
#libvirt.serverkey.pem

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
'''
Management of languages/locales
==============================+
==============================
The locale can be managed for the system:

View File

@ -3,7 +3,7 @@
Management of Gentoo make.conf
==============================
A state module to manage Gentoo's make.conf file
A state module to manage Gentoo's ``make.conf`` file
.. code-block:: yaml
@ -36,12 +36,12 @@ def _make_set(var):
def present(name, value=None, contains=None, excludes=None):
'''
Verify that the variable is in the make.conf and has the provided
Verify that the variable is in the ``make.conf`` and has the provided
settings. If value is set, contains and excludes will be ignored.
name
The variable name. This will automatically be converted to all Upper
Case since variables in make.conf are Upper Case
The variable name. This will automatically be converted to upper
case since variables in ``make.conf`` are in upper case
value
Enforce that the value of the variable is set to the provided value
@ -160,11 +160,11 @@ def present(name, value=None, contains=None, excludes=None):
def absent(name):
'''
Verify that the variable is not in the make.conf.
Verify that the variable is not in the ``make.conf``.
name
The variable name. This will automatically be converted to all Upper
Case since variables in make.conf are Upper Case
The variable name. This will automatically be converted to upper
case since variables in ``make.conf`` are in upper case
'''
ret = {'changes': {},
'comment': '',

View File

@ -51,34 +51,34 @@ def mounted(name,
The path to the location where the device is to be mounted
device
The device name, typically the device node, such as /dev/sdb1
or UUID=066e0200-2867-4ebe-b9e6-f30026ca2314
The device name, typically the device node, such as ``/dev/sdb1``
or ``UUID=066e0200-2867-4ebe-b9e6-f30026ca2314``
fstype
The filesystem type, this will be xfs, ext2/3/4 in the case of classic
filesystems, and fuse in the case of fuse mounts
The filesystem type, this will be ``xfs``, ``ext2/3/4`` in the case of classic
filesystems, and ``fuse`` in the case of fuse mounts
mkmnt
If the mount point is not present then the state will fail, set mkmnt
to True to create the mount point if it is otherwise not present
If the mount point is not present then the state will fail, set ``mkmnt: True``
to create the mount point if it is otherwise not present
opts
A list object of options or a comma delimited list
dump
The dump value to be passed into the fstab, default to 0
The dump value to be passed into the fstab, Default is ``0``
pass_num
The pass value to be passed into the fstab, default to 0
The pass value to be passed into the fstab, Default is ``0``
config
Set an alternative location for the fstab, default to /etc/fstab
Set an alternative location for the fstab, Default is ``/etc/fstab``
persist
Set if the mount should be saved in the fstab, default to True
Set if the mount should be saved in the fstab, Default is ``True``
mount
Set if the mount should be mounted immediately, default to True
Set if the mount should be mounted immediately, Default is ``True``
'''
ret = {'name': name,
'changes': {},
@ -333,10 +333,10 @@ def unmounted(name,
The path to the location where the device is to be unmounted from
config
Set an alternative location for the fstab, default to /etc/fstab
Set an alternative location for the fstab, Default is ``/etc/fstab``
persist
Set if the mount should be purged from the fstab, default to False
Set if the mount should be purged from the fstab, Default is ``False``
'''
ret = {'name': name,
'changes': {},

View File

@ -91,10 +91,10 @@ def present(name,
The network/host that the grant should apply to
grant_option
Adds the WITH GRANT OPTION to the defined grant. default: False
Adds the WITH GRANT OPTION to the defined grant. Default is ``False``
escape
Defines if the database value gets escaped or not. default: True
Defines if the database value gets escaped or not. Default is ``True``
revoke_first
By default, MySQL will not do anything if you issue a command to grant
@ -111,7 +111,7 @@ def present(name,
unknown and potentially dangerous state.
Use with caution!
default: False
Default is ``False``
ssl_option
Adds the specified ssl options for the connecting user as requirements for
@ -120,6 +120,8 @@ def present(name,
Possible key/value pairings for the dicts in the value:
.. code-block:: text
- SSL: True
- X509: True
- SUBJECT: <subject>
@ -130,7 +132,7 @@ def present(name,
be an appropriate value as specified by the MySQL documentation for these
options.
default: False (no ssl options will be used)
Default is ``False`` (no ssl options will be used)
'''
comment = 'Grant {0} on {1} to {2}@{3} is already present'
ret = {'name': name,

View File

@ -123,7 +123,8 @@ def installed(name,
no_chown=False,
cwd=None,
activate=False,
pre_releases=False):
pre_releases=False,
cert=None):
'''
Make sure the package is installed
@ -422,6 +423,7 @@ def installed(name,
cwd=cwd,
activate=activate,
pre_releases=pre_releases,
cert=cert,
saltenv=__env__
)

View File

@ -409,7 +409,9 @@ def installed(
Distros which use APT (Debian, Ubuntu, etc.) do not have a concept
of repositories, in the same way as YUM-based distros do. When a
source is added, it is assigned to a given release. Consider the
following source configuration::
following source configuration:
.. code-block:: text
deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main
@ -649,7 +651,7 @@ def installed(
appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt
CLI commands.
Usage::
.. code-block:: yaml
mypkgs:
pkg.installed:
@ -1030,7 +1032,7 @@ def latest(
pkgs
A list of packages to maintain at the latest available version.
Usage::
.. code-block:: yaml
mypkgs:
pkg.latest:

View File

@ -12,6 +12,10 @@ from collections import defaultdict
import salt.payload
import salt.auth
import salt.utils
import logging
log = logging.getLogger(__name__)
try:
from raet import raeting, nacling
from raet.road.stacking import RoadStack
@ -24,6 +28,9 @@ except ImportError:
class Channel(object):
'''
Factory class to create communication-channels for different transport
'''
@staticmethod
def factory(opts, **kwargs):
# Default to ZeroMQ for now
@ -137,7 +144,18 @@ class ZeroMQChannel(Channel):
@property
def sreq(self):
key = self.sreq_key
if key not in ZeroMQChannel.sreq_cache:
master_type = self.opts.get('master_type', None)
if master_type == 'failover':
# remove all cached sreqs to the old master to prevent
# zeromq from reconnecting to old masters automagically
for check_key in self.sreq_cache.keys():
if self.opts['master_uri'] != check_key[0]:
del self.sreq_cache[check_key]
log.debug('Removed obsolete sreq-object from '
'sreq_cache for master {0}'.format(check_key[0]))
ZeroMQChannel.sreq_cache[key] = salt.payload.SREQ(self.master_uri)
return ZeroMQChannel.sreq_cache[key]

View File

@ -1870,7 +1870,7 @@ def request_minion_cachedir(
'provider': provider,
}
fname = '{0}.pp'.format(minion_id)
fname = '{0}.p'.format(minion_id)
path = os.path.join(base, 'requested', fname)
with salt.utils.fopen(path, 'w') as fh_:
msgpack.dump(data, fh_)
@ -1901,7 +1901,7 @@ def change_minion_cachedir(
if base is None:
base = os.path.join(syspaths.CACHE_DIR, 'cloud')
fname = '{0}.pp'.format(minion_id)
fname = '{0}.p'.format(minion_id)
path = os.path.join(base, cachedir, fname)
with salt.utils.fopen(path, 'r') as fh_:
@ -1922,7 +1922,7 @@ def activate_minion_cachedir(minion_id, base=None):
if base is None:
base = os.path.join(syspaths.CACHE_DIR, 'cloud')
fname = '{0}.pp'.format(minion_id)
fname = '{0}.p'.format(minion_id)
src = os.path.join(base, 'requested', fname)
dst = os.path.join(base, 'active')
shutil.move(src, dst)
@ -1941,7 +1941,7 @@ def delete_minion_cachedir(minion_id, provider, opts, base=None):
base = os.path.join(syspaths.CACHE_DIR, 'cloud')
driver = opts['providers'][provider].keys()[0]
fname = '{0}.pp'.format(minion_id)
fname = '{0}.p'.format(minion_id)
for cachedir in ('requested', 'active'):
path = os.path.join(base, cachedir, driver, provider, fname)
log.debug('path: {0}'.format(path))
@ -2113,7 +2113,7 @@ def cache_node_list(nodes, provider, opts):
for node in nodes:
diff_node_cache(prov_dir, node, nodes[node], opts)
path = os.path.join(prov_dir, '{0}.pp'.format(node))
path = os.path.join(prov_dir, '{0}.p'.format(node))
with salt.utils.fopen(path, 'w') as fh_:
msgpack.dump(nodes[node], fh_)
@ -2135,7 +2135,7 @@ def cache_node(node, provider, opts):
prov_dir = os.path.join(base, driver, provider)
if not os.path.exists(prov_dir):
os.makedirs(prov_dir)
path = os.path.join(prov_dir, '{0}.pp'.format(node['name']))
path = os.path.join(prov_dir, '{0}.p'.format(node['name']))
with salt.utils.fopen(path, 'w') as fh_:
msgpack.dump(node, fh_)
@ -2156,7 +2156,7 @@ def missing_node_cache(prov_dir, node_list, provider, opts):
'''
cached_nodes = []
for node in os.listdir(prov_dir):
cached_nodes.append(node.replace('.pp', ''))
cached_nodes.append(node.replace('.p', ''))
log.debug(sorted(cached_nodes))
log.debug(sorted(node_list))
@ -2191,7 +2191,7 @@ def diff_node_cache(prov_dir, node, new_data, opts):
return
path = os.path.join(prov_dir, node)
path = '{0}.pp'.format(path)
path = '{0}.p'.format(path)
if not os.path.exists(path):
event_data = _strip_cache_events(new_data, opts)

View File

@ -327,8 +327,10 @@ class Schedule(object):
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in self.opts:
if 'schedule' in self.opts and 'schedule' in schedule:
self.opts['schedule'].update(schedule['schedule'])
elif 'schedule' in self.opts:
self.opts['schedule'].update(schedule)
else:
self.opts['schedule'] = schedule

View File

@ -40,7 +40,6 @@ SALT_LIBS = os.path.dirname(CODE_DIR)
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.case import ShellTestCase
from salttesting.helpers import skip_if_binaries_missing
from salttesting.mixins import CheckShellBinaryNameAndVersionMixIn
from salttesting.parser import PNUM, print_header, SaltTestcaseParser
from salttesting.helpers import ensure_in_syspath, RedirectStdStreams
@ -57,6 +56,7 @@ import salt.runner
import salt.output
import salt.version
import salt.utils
import salt.utils.process
from salt.utils import fopen, get_colors
from salt.utils.verify import verify_env
@ -544,23 +544,28 @@ class TestDaemon(object):
os.path.join(master_opts['pki_dir'], 'minions_pre'),
os.path.join(master_opts['pki_dir'], 'minions_rejected'),
os.path.join(master_opts['cachedir'], 'jobs'),
os.path.join(master_opts['cachedir'], 'raet'),
os.path.join(syndic_master_opts['pki_dir'], 'minions'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_pre'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_rejected'),
os.path.join(syndic_master_opts['cachedir'], 'jobs'),
os.path.join(syndic_master_opts['cachedir'], 'raet'),
os.path.join(master_opts['pki_dir'], 'accepted'),
os.path.join(master_opts['pki_dir'], 'rejected'),
os.path.join(master_opts['pki_dir'], 'pending'),
os.path.join(syndic_master_opts['pki_dir'], 'accepted'),
os.path.join(syndic_master_opts['pki_dir'], 'rejected'),
os.path.join(syndic_master_opts['pki_dir'], 'pending'),
os.path.join(syndic_master_opts['cachedir'], 'raet'),
os.path.join(minion_opts['pki_dir'], 'accepted'),
os.path.join(minion_opts['pki_dir'], 'rejected'),
os.path.join(minion_opts['pki_dir'], 'pending'),
os.path.join(minion_opts['cachedir'], 'raet'),
os.path.join(sub_minion_opts['pki_dir'], 'accepted'),
os.path.join(sub_minion_opts['pki_dir'], 'rejected'),
os.path.join(sub_minion_opts['pki_dir'], 'pending'),
os.path.join(sub_minion_opts['cachedir'], 'raet'),
os.path.dirname(master_opts['log_file']),
minion_opts['extension_modules'],
sub_minion_opts['extension_modules'],
@ -586,19 +591,19 @@ class TestDaemon(object):
'''
Kill the minion and master processes
'''
salt.master.clean_proc(self.sub_minion_process, wait_for_kill=50)
salt.utils.process.clean_proc(self.sub_minion_process, wait_for_kill=50)
self.sub_minion_process.join()
salt.master.clean_proc(self.minion_process, wait_for_kill=50)
salt.utils.process.clean_proc(self.minion_process, wait_for_kill=50)
self.minion_process.join()
salt.master.clean_proc(self.master_process, wait_for_kill=50)
salt.utils.process.clean_proc(self.master_process, wait_for_kill=50)
self.master_process.join()
try:
salt.master.clean_proc(self.syndic_process, wait_for_kill=50)
salt.utils.process.clean_proc(self.syndic_process, wait_for_kill=50)
self.syndic_process.join()
except AttributeError:
pass
try:
salt.master.clean_proc(self.smaster_process, wait_for_kill=50)
salt.utils.process.clean_proc(self.smaster_process, wait_for_kill=50)
self.smaster_process.join()
except AttributeError:
pass

View File

@ -20,6 +20,8 @@ token_file: /tmp/ksfjhdgiuebfgnkefvsikhfjdgvkjahcsidk
file_buffer_size: 8192
ext_pillar:
- git: master https://github.com/saltstack/pillar1.git
- git: master https://github.com/saltstack/pillar2.git
- test_ext_pillar_opts:
- test_issue_5951_actual_file_roots_in_opts

View File

@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Nicole Thomas <nicole@satlstack.com>`
:codeauthor: :email:`Nicole Thomas <nicole@saltstack.com>`
'''
# Import Python Libs

View File

@ -25,6 +25,26 @@ class PillarModuleTest(integration.ModuleCase):
else:
self.assertEqual(pillar['class'], 'other')
def test_two_ext_pillar_sources_override(self):
'''
https://github.com/saltstack/salt/issues/12647
'''
self.assertEqual(
self.run_function('pillar.data')['info'],
'bar'
)
def test_two_ext_pillar_sources(self):
'''
https://github.com/saltstack/salt/issues/12647
'''
self.assertEqual(
self.run_function('pillar.data')['abc'],
'def'
)
def test_issue_5449_report_actual_file_roots_in_pillar(self):
'''
pillar['master']['file_roots'] is overwritten by the master

View File

@ -6,6 +6,8 @@
# Import python libs
import os
import pwd
import random
# Import Salt Testing libs
from salttesting.helpers import (
@ -18,9 +20,6 @@ import integration
from salttesting import skipIf
import random
import pwd
class AuthTest(integration.ShellCase):
'''
@ -84,6 +83,7 @@ class AuthTest(integration.ShellCase):
if pwd.getpwnam('saltdev'):
self.run_call('user.delete saltdev')
if __name__ == '__main__':
from integration import run_tests
run_tests(AuthTest)

View File

@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Nicole Thomas <nicole@satlstack.com>`
:codeauthor: :email:`Nicole Thomas <nicole@saltstack.com>`
'''
# Import Salt Libs

View File

@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Nicole Thomas <nicole@satlstack.com>`
:codeauthor: :email:`Nicole Thomas <nicole@saltstack.com>`
'''
# Import Salt Testing Libs

View File

@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Nicole Thomas <nicole@satlstack.com>`
:codeauthor: :email:`Nicole Thomas <nicole@saltstack.com>`
'''
# Import Salt Libs

View File

@ -0,0 +1 @@
# -*- coding: utf-8 -*-

View File

@ -0,0 +1,44 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Nicole Thomas <nicole@saltstack.com>`
'''
# Import Salt Libs
from salt.output import grains
# Import Salt Testing Libs
from salttesting import TestCase
from salttesting.mock import patch
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
grains.__opts__ = {}
colors = {'LIGHT_GREEN': '\x1b[1;32m',
'ENDC': '\x1b[0m',
'CYAN': '\x1b[0;36m',
'GREEN': '\x1b[0;32m'}
class GrainsTestCase(TestCase):
'''
TestCase for salt.output.grains module
'''
def test_output_unicode(self):
'''
Tests grains output when using unicode characters like ®
'''
test_grains = {'locale_info': {'defaultencoding': 'unknown'},
'test': {'bad_string': 'Windows®'}}
ret = u'\x1b[0;32mtest\x1b[0m:\n \x1b' \
u'[0;36mbad_string\x1b[0m: \x1b[1;32mWindows\xae\x1b' \
u'[0m\n\x1b[0;32mlocale_info\x1b[0m:\n \x1b' \
u'[0;36mdefaultencoding\x1b[0m: \x1b[1;32munknown\x1b[0m\n'
with patch.dict(grains.__opts__, {'color': colors}):
self.assertEqual(grains.output(test_grains), ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(GrainsTestCase, needs_daemon=False)

View File

@ -2,7 +2,6 @@
# Import Python libs
import os
from collections import OrderedDict
from imp import find_module
# Import Salt Testing libs
@ -15,9 +14,16 @@ ensure_in_syspath('../../')
# Import Salt libs
import salt.loader
import salt.config
import salt.utils
from salt.state import HighState
from integration import TMP
try:
from collections import OrderedDict
OD_AVAILABLE = True
except ImportError:
OD_AVAILABLE = False
GPG_KEYDIR = os.path.join(TMP, 'gpg-keydir')
# The keyring library uses `getcwd()`, let's make sure we in a good directory
@ -50,6 +56,9 @@ try:
except ImportError:
SKIP = True
if salt.utils.which('gpg') is None:
SKIP = True
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(SKIP, "GPG must be installed")
@ -73,6 +82,7 @@ class GPGTestCase(TestCase):
decrypted_data_mock.__str__ = lambda x: DECRYPTED_STRING
return decrypted_data_mock
@skipIf(not OD_AVAILABLE, 'OrderedDict not available. Skipping.')
def make_nested_object(self, s):
return OrderedDict([
('array_key', [1, False, s]),

View File

@ -0,0 +1,172 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Nicole Thomas <nicole@saltstack.com>`
'''
# Import Salt Libs
from salt.utils.schedule import Schedule
# Import Salt Testing Libs
from salttesting import TestCase
from salttesting.mock import MagicMock, patch
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
class ScheduleTestCase(TestCase):
'''
Unit tests for salt.utils.schedule module
'''
def setUp(self):
with patch('salt.utils.schedule.clean_proc_dir', MagicMock(return_value=None)):
self.schedule = Schedule({}, {}, returners={})
# delete_job tests
def test_delete_job_exists(self):
'''
Tests ensuring the job exists and deleting it
'''
self.schedule.opts = {'schedule': {'foo': 'bar'}, 'pillar': ''}
self.schedule.delete_job('foo')
self.assertNotIn('foo', self.schedule.opts)
def test_delete_job_in_pillar(self):
'''
Tests deleting job in pillar
'''
self.schedule.opts = {'pillar': {'schedule': {'foo': 'bar'}}, 'schedule': ''}
self.schedule.delete_job('foo')
self.assertNotIn('foo', self.schedule.opts)
def test_delete_job_intervals(self):
'''
Tests removing job from intervals
'''
self.schedule.opts = {'pillar': '', 'schedule': ''}
self.schedule.intervals = {'foo': 'bar'}
self.schedule.delete_job('foo')
self.assertNotIn('foo', self.schedule.intervals)
# add_job tests
def test_add_job_data_not_dict(self):
'''
Tests if data is a dictionary
'''
data = 'foo'
self.assertRaises(ValueError, Schedule.add_job, self.schedule, data)
def test_add_job_multiple_jobs(self):
'''
Tests if more than one job is scheduled at a time
'''
data = {'key1': 'value1', 'key2': 'value2'}
self.assertRaises(ValueError, Schedule.add_job, self.schedule, data)
# enable_job tests
def test_enable_job(self):
'''
Tests enabling a job
'''
self.schedule.opts = {'schedule': {'name': {'enabled': 'foo'}}}
Schedule.enable_job(self.schedule, 'name')
self.assertTrue(self.schedule.opts['schedule']['name']['enabled'])
def test_enable_job_pillar(self):
'''
Tests enabling a job in pillar
'''
self.schedule.opts = {'pillar': {'schedule': {'name': {'enabled': 'foo'}}}}
Schedule.enable_job(self.schedule, 'name', where='pillar')
self.assertTrue(self.schedule.opts['pillar']['schedule']['name']['enabled'])
# disable_job tests
def test_disable_job(self):
'''
Tests disabling a job
'''
self.schedule.opts = {'schedule': {'name': {'enabled': 'foo'}}}
Schedule.disable_job(self.schedule, 'name')
self.assertFalse(self.schedule.opts['schedule']['name']['enabled'])
def test_disable_job_pillar(self):
'''
Tests disabling a job in pillar
'''
self.schedule.opts = {'pillar': {'schedule': {'name': {'enabled': 'foo'}}}}
Schedule.disable_job(self.schedule, 'name', where='pillar')
self.assertFalse(self.schedule.opts['pillar']['schedule']['name']['enabled'])
# enable_schedule tests
def test_enable_schedule(self):
'''
Tests enabling the scheduler
'''
self.schedule.opts = {'schedule': {'enabled': 'foo'}}
Schedule.enable_schedule(self.schedule)
self.assertTrue(self.schedule.opts['schedule']['enabled'])
# disable_schedule tests
def test_disable_schedule(self):
'''
Tests disabling the scheduler
'''
self.schedule.opts = {'schedule': {'enabled': 'foo'}}
Schedule.disable_schedule(self.schedule)
self.assertFalse(self.schedule.opts['schedule']['enabled'])
# reload tests
def test_reload_update_schedule_key(self):
'''
Tests reloading the schedule from saved schedule where both the
saved schedule and self.schedule.opts contain a schedule key
'''
saved = {'schedule': {'foo': 'bar'}}
ret = {'schedule': {'foo': 'bar', 'hello': 'world'}}
self.schedule.opts = {'schedule': {'hello': 'world'}}
Schedule.reload(self.schedule, saved)
self.assertEqual(self.schedule.opts, ret)
def test_reload_update_schedule_no_key(self):
'''
Tests reloading the schedule from saved schedule that does not
contain a schedule key but self.schedule.opts does not
'''
saved = {'foo': 'bar'}
ret = {'schedule': {'foo': 'bar', 'hello': 'world'}}
self.schedule.opts = {'schedule': {'hello': 'world'}}
Schedule.reload(self.schedule, saved)
self.assertEqual(self.schedule.opts, ret)
def test_reload_no_schedule_in_opts(self):
'''
Tests reloading the schedule from saved schedule that does not
contain a schedule key and neither does self.schedule.opts
'''
saved = {'foo': 'bar'}
ret = {'schedule': {'foo': 'bar'}}
Schedule.reload(self.schedule, saved)
self.assertEqual(self.schedule.opts, ret)
def test_reload_schedule_in_saved_but_not_opts(self):
'''
Tests reloading the schedule from saved schedule that contains
a schedule key, but self.schedule.opts does not
'''
saved = {'schedule': {'foo': 'bar'}}
ret = {'schedule': {'schedule': {'foo': 'bar'}}}
Schedule.reload(self.schedule, saved)
self.assertEqual(self.schedule.opts, ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(ScheduleTestCase, needs_daemon=False)