mirror of
https://github.com/valitydev/salt.git
synced 2024-11-08 01:18:58 +00:00
Merge branch 'develop' of https://github.com/saltstack/salt into feature-ext_pillar-file_tree-nodegroups
This commit is contained in:
commit
066d054780
@ -456,6 +456,11 @@
|
||||
# - hiera: /etc/hiera.yaml
|
||||
# - cmd_yaml: cat /etc/salt/yaml
|
||||
|
||||
# The ext_pillar_first option allows for external pillar sources to populate
|
||||
# before file system pillar. This allows for targeting file system pillar from
|
||||
# ext_pillar.
|
||||
#ext_pillar_first: False
|
||||
|
||||
# The pillar_gitfs_ssl_verify option specifies whether to ignore ssl certificate
|
||||
# errors when contacting the pillar gitfs backend. You might want to set this to
|
||||
# false if you're using a git backend that uses a self-signed certificate but
|
||||
|
@ -1759,6 +1759,21 @@ Default: ``None``
|
||||
|
||||
There are additional details at :ref:`salt-pillars`
|
||||
|
||||
.. conf_master:: ext_pillar_first
|
||||
|
||||
``ext_pillar_first``
|
||||
--------------
|
||||
|
||||
The ext_pillar_first option allows for external pillar sources to populate
|
||||
before file system pillar. This allows for targeting file system pillar from
|
||||
ext_pillar.
|
||||
|
||||
Default: ``False``
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar_first: False
|
||||
|
||||
.. conf_master:: pillar_source_merging_strategy
|
||||
|
||||
``pillar_source_merging_strategy``
|
||||
|
@ -353,6 +353,8 @@ be set:
|
||||
my-linode-config:
|
||||
apikey: asldkgfakl;sdfjsjaslfjaklsdjf;askldjfaaklsjdfhasldsadfghdkf
|
||||
password: F00barbaz
|
||||
ssh_pubkey: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKHEOLLbeXgaqRQT9NBAopVz366SdYc0KKX33vAnq+2R user@host
|
||||
ssh_key_file: ~/.ssh/id_ed25519
|
||||
provider: linode
|
||||
|
||||
|
||||
|
@ -23,6 +23,8 @@ instances also needs to be set:
|
||||
my-linode-config:
|
||||
apikey: asldkgfakl;sdfjsjaslfjaklsdjf;askldjfaaklsjdfhasldsadfghdkf
|
||||
password: F00barbaz
|
||||
ssh_pubkey: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKHEOLLbeXgaqRQT9NBAopVz366SdYc0KKX33vAnq+2R user@host
|
||||
ssh_key_file: ~/.ssh/id_ed25519
|
||||
provider: linode
|
||||
|
||||
The password needs to be 8 characters and contain lowercase, uppercase and
|
||||
|
@ -52,16 +52,17 @@ and has the same structure:
|
||||
'*':
|
||||
- packages
|
||||
|
||||
In the above top file, it is declared that in the 'base' environment, the glob
|
||||
matching all minions will have the pillar data found in the 'packages' pillar
|
||||
available to it. Assuming the 'pillar_roots' value of '/srv/salt' taken from
|
||||
above, the 'packages' pillar would be located at '/srv/salt/packages.sls'.
|
||||
In the above top file, it is declared that in the ``base`` environment, the
|
||||
glob matching all minions will have the pillar data found in the ``packages``
|
||||
pillar available to it. Assuming the ``pillar_roots`` value of ``/srv/salt``
|
||||
taken from above, the ``packages`` pillar would be located at
|
||||
``/srv/salt/packages.sls``.
|
||||
|
||||
Another example shows how to use other standard top matching types
|
||||
to deliver specific salt pillar data to minions with different properties.
|
||||
|
||||
Here is an example using the 'grains' matcher to target pillars to minions
|
||||
by their 'os' grain:
|
||||
Here is an example using the ``grains`` matcher to target pillars to minions
|
||||
by their ``os`` grain:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -85,13 +86,14 @@ by their 'os' grain:
|
||||
company: Foo Industries
|
||||
|
||||
The above pillar sets two key/value pairs. If a minion is running RedHat, then
|
||||
the 'apache' key is set to 'httpd' and the 'git' key is set to the value of
|
||||
'git'. If the minion is running Debian, those values are changed to 'apache2'
|
||||
and 'git-core' respctively. All minions that have this pillar targeting to them
|
||||
via a top file will have the key of 'company' with a value of 'Foo Industries'.
|
||||
the ``apache`` key is set to ``httpd`` and the ``git`` key is set to the value
|
||||
of ``git``. If the minion is running Debian, those values are changed to
|
||||
``apache2`` and ``git-core`` respctively. All minions that have this pillar
|
||||
targeting to them via a top file will have the key of ``company`` with a value
|
||||
of ``Foo Industries``.
|
||||
|
||||
Consequently this data can be used from within modules, renderers, State SLS files, and
|
||||
more via the shared pillar :ref:`dict <python2:typesmapping>`:
|
||||
Consequently this data can be used from within modules, renderers, State SLS
|
||||
files, and more via the shared pillar :ref:`dict <python2:typesmapping>`:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -153,9 +155,9 @@ and a ``services.sls`` file of:
|
||||
|
||||
bind: named
|
||||
|
||||
Then a request for the ``bind`` pillar will only return 'named'; the 'bind9'
|
||||
value is not available. It is better to structure your pillar files with more
|
||||
hierarchy. For example your ``package.sls`` file could look like:
|
||||
Then a request for the ``bind`` pillar will only return ``named``; the
|
||||
``bind9`` value is not available. It is better to structure your pillar files
|
||||
with more hierarchy. For example your ``package.sls`` file could look like:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
@ -181,6 +183,7 @@ merged below a single key:
|
||||
And a ``packages.sls`` file like:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
bind:
|
||||
package-name: bind9
|
||||
version: 9.9.5
|
||||
@ -188,6 +191,7 @@ And a ``packages.sls`` file like:
|
||||
And a ``services.sls`` file like:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
bind:
|
||||
port: 53
|
||||
listen-on: any
|
||||
@ -195,6 +199,7 @@ And a ``services.sls`` file like:
|
||||
The resulting pillar will be as follows:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ salt-call pillar.get bind
|
||||
local:
|
||||
----------
|
||||
|
@ -15,7 +15,7 @@ Learn more about range here:
|
||||
https://github.com/ytoolshed/range/wiki/
|
||||
|
||||
Prerequisites
|
||||
============
|
||||
=============
|
||||
|
||||
To utilize range support in Salt, a range server is required. Setting up a
|
||||
range server is outside the scope of this document. Apache modules are included
|
||||
@ -31,7 +31,9 @@ Additionally, the Python seco range libraries must be installed on the salt
|
||||
master. One can verify that they have been installed correctly via the
|
||||
following command:
|
||||
|
||||
`python -c 'import seco.range'`
|
||||
.. code-block:: bash
|
||||
|
||||
python -c 'import seco.range'
|
||||
|
||||
If no errors are returned, range is installed successfully on the salt master.
|
||||
|
||||
@ -41,7 +43,9 @@ Preparing Salt
|
||||
Range support must be enabled on the salt master by setting the hostname and
|
||||
port of the range server inside the master configuration file:
|
||||
|
||||
range_server: my.range.server.com:80
|
||||
.. code-block:: yaml
|
||||
|
||||
range_server: my.range.server.com:80
|
||||
|
||||
Following this, the master must be restarted for the change to have an effect.
|
||||
|
||||
@ -49,24 +53,30 @@ Targeting with Range
|
||||
====================
|
||||
|
||||
Once a cluster has been defined, it can be targeted with a salt command by
|
||||
using the '-R' or '--range' flags.
|
||||
using the ``-R`` or ``--range`` flags.
|
||||
|
||||
For example, given the following range YAML file being served from a range
|
||||
server:
|
||||
|
||||
$ cat /etc/range/test.yaml
|
||||
CLUSTER: host1..100.test.com
|
||||
APPS:
|
||||
- frontend
|
||||
- backend
|
||||
- mysql
|
||||
.. code-block:: bash
|
||||
|
||||
$ cat /etc/range/test.yaml
|
||||
CLUSTER: host1..100.test.com
|
||||
APPS:
|
||||
- frontend
|
||||
- backend
|
||||
- mysql
|
||||
|
||||
|
||||
One might target host1 through host100 in the test.com domain with Salt as follows:
|
||||
|
||||
salt --range %test:CLUSTER test.ping
|
||||
.. code-block:: bash
|
||||
|
||||
salt --range %test:CLUSTER test.ping
|
||||
|
||||
|
||||
The following salt command would target three hosts: 'frontend', 'backend' and 'mysql':
|
||||
The following salt command would target three hosts: ``frontend``, ``backend`` and ``mysql``:
|
||||
|
||||
salt --range %test:APPS test.ping
|
||||
.. code-block:: bash
|
||||
|
||||
salt --range %test:APPS test.ping
|
||||
|
@ -12,74 +12,88 @@ and all their dependencies - including shared objects / DLLs.
|
||||
Getting Started
|
||||
===============
|
||||
|
||||
To build frozen applications, you'll need a suitable build environment for each
|
||||
of your platforms. You should probably set up a virtualenv in order to limit
|
||||
the scope of Q/A.
|
||||
To build frozen applications, suitable build environment will be needed for
|
||||
each platform. You should probably set up a virtualenv in order to limit the
|
||||
scope of Q/A.
|
||||
|
||||
This process does work on Windows. Follow the directions at
|
||||
This process does work on Windows. Directions are available at
|
||||
`<https://github.com/saltstack/salt-windows-install>`_ for details on
|
||||
installing Salt in Windows. Only the 32-bit Python and dependencies have been
|
||||
tested, but they have been tested on 64-bit Windows.
|
||||
|
||||
You will need to install ``esky`` and ``bbfreeze`` from PyPI in order to enable
|
||||
the ``bdist_esky`` command in ``setup.py``.
|
||||
Install ``bbfreeze``, and then ``esky`` from PyPI in order to enable the
|
||||
``bdist_esky`` command in ``setup.py``. Salt itself must also be installed, in
|
||||
addition to its dependencies.
|
||||
|
||||
Building and Freezing
|
||||
=====================
|
||||
|
||||
Once you have your tools installed and the environment configured, you can then
|
||||
``python setup.py bdist`` to get the eggs prepared. After that is done, run
|
||||
``python setup.py bdist_esky`` to have Esky traverse the module tree and pack
|
||||
all the scripts up into a redistributable. There will be an appropriately
|
||||
versioned ``salt-VERSION.zip`` in ``dist/`` if everything went smoothly.
|
||||
Once you have your tools installed and the environment configured, use
|
||||
``setup.py`` to prepare the distribution files.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
python setup.py sdist
|
||||
python setup.py bdist
|
||||
|
||||
Once the distribution files are in place, Esky can be used traverse the module
|
||||
tree and pack all the scripts up into a redistributable.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
python setup.py bdist_esky
|
||||
|
||||
There will be an appropriately versioned ``salt-VERSION.zip`` in ``dist/`` if
|
||||
everything went smoothly.
|
||||
|
||||
Windows
|
||||
-------
|
||||
You will need to add ``C:\Python27\lib\site-packages\zmq`` to your PATH
|
||||
``C:\Python27\lib\site-packages\zmq`` will need to be added to the PATH
|
||||
variable. This helps bbfreeze find the zmq DLL so it can pack it up.
|
||||
|
||||
Using the Frozen Build
|
||||
======================
|
||||
|
||||
Unpack the zip file in your desired install location. Scripts like
|
||||
Unpack the zip file in the desired install location. Scripts like
|
||||
``salt-minion`` and ``salt-call`` will be in the root of the zip file. The
|
||||
associated libraries and bootstrapping will be in the directories at the same
|
||||
level. (Check the `Esky <https://github.com/cloudmatrix/esky>`_ documentation
|
||||
for more information)
|
||||
|
||||
To support updating your minions in the wild, put your builds on a web server
|
||||
that your minions can reach. :py:func:`salt.modules.saltutil.update` will
|
||||
To support updating your minions in the wild, put the builds on a web server
|
||||
that the minions can reach. :py:func:`salt.modules.saltutil.update` will
|
||||
trigger an update and (optionally) a restart of the minion service under the
|
||||
new version.
|
||||
|
||||
Gotchas
|
||||
=======
|
||||
Troubleshooting
|
||||
===============
|
||||
|
||||
My Windows minion isn't responding
|
||||
----------------------------------
|
||||
The process dispatch on Windows is slower than it is on \*nix. You may need to
|
||||
add '-t 15' to your salt calls to give them plenty of time to return.
|
||||
A Windows minion isn't responding
|
||||
---------------------------------
|
||||
The process dispatch on Windows is slower than it is on \*nix. It may be
|
||||
necessary to add '-t 15' to salt commands to give minions plenty of time to
|
||||
return.
|
||||
|
||||
Windows and the Visual Studio Redist
|
||||
------------------------------------
|
||||
You will need to install the Visual C++ 2008 32-bit redistributable on all
|
||||
The Visual C++ 2008 32-bit redistributable will need to be installed on all
|
||||
Windows minions. Esky has an option to pack the library into the zipfile,
|
||||
but OpenSSL does not seem to acknowledge the new location. If you get a
|
||||
``no OPENSSL_Applink`` error on the console when trying to start your
|
||||
frozen minion, you have forgotten to install the redistributable.
|
||||
but OpenSSL does not seem to acknowledge the new location. If a
|
||||
``no OPENSSL_Applink`` error appears on the console when trying to start a
|
||||
frozen minion, the redistributable is not installed.
|
||||
|
||||
Mixed Linux environments and Yum
|
||||
--------------------------------
|
||||
The Yum Python module doesn't appear to be available on any of the standard
|
||||
Python package mirrors. If you need to support RHEL/CentOS systems, you
|
||||
should build on that platform to support all your Linux nodes. Also remember
|
||||
to build your virtualenv with ``--system-site-packages`` so that the
|
||||
``yum`` module is included.
|
||||
Python package mirrors. If RHEL/CentOS systems need to be supported, the frozen
|
||||
build should created on that platform to support all the Linux nodes. Remember
|
||||
to build the virtualenv with ``--system-site-packages`` so that the ``yum``
|
||||
module is included.
|
||||
|
||||
Automatic (Python) module discovery
|
||||
-----------------------------------
|
||||
Automatic (Python) module discovery does not work with the late-loaded scheme that
|
||||
Salt uses for (Salt) modules. You will need to explicitly add any
|
||||
misbehaving modules to the ``freezer_includes`` in Salt's ``setup.py``.
|
||||
Always check the zipped application to make sure that the necessary modules
|
||||
were included.
|
||||
Automatic (Python) module discovery does not work with the late-loaded scheme
|
||||
that Salt uses for (Salt) modules. Any misbehaving modules will need to be
|
||||
explicitly added to the ``freezer_includes`` in Salt's ``setup.py``. Always
|
||||
check the zipped application to make sure that the necessary modules were
|
||||
included.
|
||||
|
@ -101,7 +101,7 @@ to them asking that they fetch their pillars from the master:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' saltutil.pillar_refresh
|
||||
salt '*' saltutil.refresh_pillar
|
||||
|
||||
Now that the minions have the new pillar, it can be retreived:
|
||||
|
||||
|
@ -272,22 +272,24 @@ class FSCache(multiprocessing.Process):
|
||||
log.debug('Shutting down')\
|
||||
|
||||
if __name__ == '__main__':
|
||||
def run_test():
|
||||
opts = salt.config.master_config('./master')
|
||||
|
||||
opts = salt.config.master_config('./master')
|
||||
wlk = FSCache(opts)
|
||||
# add two jobs for jobs and cache-files
|
||||
wlk.add_job(**{
|
||||
'name': 'grains',
|
||||
'path': '/var/cache/salt/master/minions',
|
||||
'ival': [2, 12, 22],
|
||||
'patt': '^.*$'
|
||||
})
|
||||
|
||||
wlk = FSCache(opts)
|
||||
# add two jobs for jobs and cache-files
|
||||
wlk.add_job(**{
|
||||
'name': 'grains',
|
||||
'path': '/var/cache/salt/master/minions',
|
||||
'ival': [2, 12, 22],
|
||||
'patt': '^.*$'
|
||||
})
|
||||
wlk.add_job(**{
|
||||
'name': 'mine',
|
||||
'path': '/var/cache/salt/master/jobs/',
|
||||
'ival': [4, 14, 24, 34, 44, 54],
|
||||
'patt': '^.*$'
|
||||
})
|
||||
wlk.start()
|
||||
|
||||
wlk.add_job(**{
|
||||
'name': 'mine',
|
||||
'path': '/var/cache/salt/master/jobs/',
|
||||
'ival': [4, 14, 24, 34, 44, 54],
|
||||
'patt': '^.*$'
|
||||
})
|
||||
wlk.start()
|
||||
run_test()
|
||||
|
@ -142,25 +142,28 @@ class FSWorker(multiprocessing.Process):
|
||||
|
||||
# test code for the FSWalker class
|
||||
if __name__ == '__main__':
|
||||
context = zmq.Context()
|
||||
cupd_in = context.socket(zmq.REP)
|
||||
cupd_in.setsockopt(zmq.LINGER, 100)
|
||||
cupd_in.bind("ipc:///tmp/fsc_upd")
|
||||
def run_test():
|
||||
context = zmq.Context()
|
||||
cupd_in = context.socket(zmq.REP)
|
||||
cupd_in.setsockopt(zmq.LINGER, 100)
|
||||
cupd_in.bind("ipc:///tmp/fsc_upd")
|
||||
|
||||
poller = zmq.Poller()
|
||||
poller.register(cupd_in, zmq.POLLIN)
|
||||
serial = salt.payload.Serial('msgpack')
|
||||
fsw = FSWorker({'serial': 'msgpack'},
|
||||
'test',
|
||||
**{'path': '/tmp', 'patt': '.*'})
|
||||
fsw.start()
|
||||
poller = zmq.Poller()
|
||||
poller.register(cupd_in, zmq.POLLIN)
|
||||
serial = salt.payload.Serial('msgpack')
|
||||
fsw = FSWorker({'serial': 'msgpack'},
|
||||
'test',
|
||||
**{'path': '/tmp', 'patt': '.*'})
|
||||
fsw.start()
|
||||
|
||||
while 1:
|
||||
socks = dict(poller.poll())
|
||||
if socks.get(cupd_in) == zmq.POLLIN:
|
||||
reply = serial.loads(cupd_in.recv())
|
||||
print reply
|
||||
cupd_in.send(serial.dumps('OK'))
|
||||
break
|
||||
fsw.join()
|
||||
sys.exit(0)
|
||||
while 1:
|
||||
socks = dict(poller.poll())
|
||||
if socks.get(cupd_in) == zmq.POLLIN:
|
||||
reply = serial.loads(cupd_in.recv())
|
||||
print reply
|
||||
cupd_in.send(serial.dumps('OK'))
|
||||
break
|
||||
fsw.join()
|
||||
sys.exit(0)
|
||||
|
||||
run_test()
|
||||
|
@ -24,16 +24,18 @@ Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
|
||||
import copy
|
||||
import pprint
|
||||
import logging
|
||||
from os.path import exists, expanduser
|
||||
|
||||
# Import libcloud
|
||||
try:
|
||||
from libcloud.compute.base import NodeAuthPassword
|
||||
from libcloud.compute.base import NodeAuthPassword, NodeAuthSSHKey
|
||||
HAS_LIBCLOUD = True
|
||||
except ImportError:
|
||||
HAS_LIBCLOUD = False
|
||||
|
||||
# Import salt cloud libs
|
||||
import salt.config as config
|
||||
from salt.cloud.exceptions import SaltCloudConfigError
|
||||
from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401
|
||||
from salt.utils import namespaced_function
|
||||
|
||||
@ -126,6 +128,43 @@ def get_password(vm_):
|
||||
)
|
||||
|
||||
|
||||
def get_pubkey(vm_):
|
||||
'''
|
||||
Return the SSH pubkey to use
|
||||
'''
|
||||
return config.get_cloud_config_value(
|
||||
'ssh_pubkey', vm_, __opts__, search_global=False)
|
||||
|
||||
|
||||
def get_auth(vm_):
|
||||
'''
|
||||
Return either NodeAuthSSHKey or NodeAuthPassword, preferring
|
||||
NodeAuthSSHKey if both are provided.
|
||||
'''
|
||||
if get_pubkey(vm_) is not None:
|
||||
return NodeAuthSSHKey(get_pubkey(vm_))
|
||||
elif get_password(vm_) is not None:
|
||||
return NodeAuthPassword(get_password(vm_))
|
||||
else:
|
||||
raise SaltCloudConfigError(
|
||||
'The Linode driver requires either a password or ssh_pubkey with '
|
||||
'corresponding ssh_private_key.')
|
||||
|
||||
|
||||
def get_ssh_key_filename(vm_):
|
||||
'''
|
||||
Return path to filename if get_auth() returns a NodeAuthSSHKey.
|
||||
'''
|
||||
key_filename = config.get_cloud_config_value(
|
||||
'ssh_key_file', vm_, __opts__,
|
||||
default=config.get_cloud_config_value(
|
||||
'ssh_pubkey', vm_, __opts__, search_global=False
|
||||
), search_global=False)
|
||||
if exists(expanduser(key_filename)):
|
||||
return expanduser(key_filename)
|
||||
return None
|
||||
|
||||
|
||||
def get_private_ip(vm_):
|
||||
'''
|
||||
Return True if a private ip address is requested
|
||||
@ -167,7 +206,7 @@ def create(vm_):
|
||||
'image': get_image(conn, vm_),
|
||||
'size': get_size(conn, vm_),
|
||||
'location': get_location(conn, vm_),
|
||||
'auth': NodeAuthPassword(get_password(vm_)),
|
||||
'auth': get_auth(vm_),
|
||||
'ex_private': get_private_ip(vm_),
|
||||
'ex_rsize': get_disk_size(vm_, get_size(conn, vm_), get_swap(vm_)),
|
||||
'ex_swap': get_swap(vm_)
|
||||
@ -252,6 +291,9 @@ def create(vm_):
|
||||
'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_)
|
||||
}
|
||||
|
||||
if get_ssh_key_filename(vm_) is not None and get_pubkey(vm_) is not None:
|
||||
deploy_kwargs['key_filename'] = get_ssh_key_filename(vm_)
|
||||
|
||||
# Deploy salt-master files, if necessary
|
||||
if config.get_cloud_config_value('make_master', vm_, __opts__) is True:
|
||||
deploy_kwargs['make_master'] = True
|
||||
|
@ -17,7 +17,7 @@
|
||||
# CREATED: 10/15/2012 09:49:37 PM WEST
|
||||
#======================================================================================================================
|
||||
set -o nounset # Treat unset variables as an error
|
||||
__ScriptVersion="2014.08.23"
|
||||
__ScriptVersion="2014.08.30"
|
||||
__ScriptName="bootstrap-salt.sh"
|
||||
|
||||
#======================================================================================================================
|
||||
@ -702,6 +702,9 @@ __gather_linux_system_info() {
|
||||
DISTRO_NAME="Oracle Linux"
|
||||
elif [ "${DISTRO_NAME}" = "AmazonAMI" ]; then
|
||||
DISTRO_NAME="Amazon Linux AMI"
|
||||
elif [ "${DISTRO_NAME}" = "Arch" ]; then
|
||||
DISTRO_NAME="Arch Linux"
|
||||
return
|
||||
fi
|
||||
rv=$(lsb_release -sr)
|
||||
[ "${rv}" != "" ] && DISTRO_VERSION=$(__parse_version_string "$rv")
|
||||
@ -1871,10 +1874,13 @@ install_ubuntu_restart_daemons() {
|
||||
|
||||
install_ubuntu_check_services() {
|
||||
for fname in minion master syndic api; do
|
||||
# Skip salt-api since the service should be opt-in and not necessarily started on boot
|
||||
[ $fname = "api" ] && continue
|
||||
|
||||
# Skip if not meant to be installed
|
||||
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
|
||||
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
|
||||
[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
||||
#[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
||||
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
|
||||
if [ -f /sbin/initctl ] && [ -f /etc/init/salt-${fname}.conf ]; then
|
||||
__check_services_upstart salt-$fname || return 1
|
||||
@ -2314,10 +2320,13 @@ install_debian_restart_daemons() {
|
||||
|
||||
install_debian_check_services() {
|
||||
for fname in minion master syndic api; do
|
||||
# Skip salt-api since the service should be opt-in and not necessarily started on boot
|
||||
[ $fname = "api" ] && continue
|
||||
|
||||
# Skip if not meant to be installed
|
||||
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
|
||||
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
|
||||
[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ ! -f "/etc/init.d/salt-$fname" ]) && continue
|
||||
#[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ ! -f "/etc/init.d/salt-$fname" ]) && continue
|
||||
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
|
||||
__check_services_debian salt-$fname || return 1
|
||||
done
|
||||
@ -2441,10 +2450,13 @@ install_fedora_restart_daemons() {
|
||||
|
||||
install_fedora_check_services() {
|
||||
for fname in minion master syndic api; do
|
||||
# Skip salt-api since the service should be opt-in and not necessarily started on boot
|
||||
[ $fname = "api" ] && continue
|
||||
|
||||
# Skip if not meant to be installed
|
||||
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
|
||||
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
|
||||
[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
||||
#[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
||||
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
|
||||
__check_services_systemd salt-$fname || return 1
|
||||
done
|
||||
@ -2483,7 +2495,7 @@ __install_epel_repository() {
|
||||
elif [ "$DISTRO_MAJOR_VERSION" -eq 6 ]; then
|
||||
rpm -Uvh --force "http://download.fedoraproject.org/pub/epel/6/${EPEL_ARCH}/epel-release-6-8.noarch.rpm" || return 1
|
||||
elif [ "$DISTRO_MAJOR_VERSION" -eq 7 ]; then
|
||||
rpm -Uvh --force "http://download.fedoraproject.org/pub/epel/beta/7/${EPEL_ARCH}/epel-release-7-0.2.noarch.rpm" || return 1
|
||||
rpm -Uvh --force "http://download.fedoraproject.org/pub/epel/7/${EPEL_ARCH}/epel-release-7-1.noarch.rpm" || return 1
|
||||
else
|
||||
echoerror "Failed add EPEL repository support."
|
||||
return 1
|
||||
@ -2632,11 +2644,12 @@ install_centos_git() {
|
||||
}
|
||||
|
||||
install_centos_git_post() {
|
||||
for fname in master minion syndic; do
|
||||
for fname in minion master minion api; do
|
||||
|
||||
# Skip if not meant to be installed
|
||||
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
|
||||
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
|
||||
[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
||||
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
|
||||
|
||||
# While the RPM's use init.d, so will we.
|
||||
@ -2718,10 +2731,13 @@ install_centos_testing_post() {
|
||||
|
||||
install_centos_check_services() {
|
||||
for fname in minion master syndic api; do
|
||||
# Skip salt-api since the service should be opt-in and not necessarily started on boot
|
||||
[ $fname = "api" ] && continue
|
||||
|
||||
# Skip if not meant to be installed
|
||||
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
|
||||
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
|
||||
[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
||||
#[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
||||
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
|
||||
if [ -f /sbin/initctl ] && [ -f /etc/init/salt-${fname}.conf ]; then
|
||||
__check_services_upstart salt-$fname || return 1
|
||||
@ -2765,7 +2781,7 @@ __test_rhel_optionals_packages() {
|
||||
yum --config "${__YUM_CONF_FILE}" install -y ${package} --enablerepo=${_EPEL_REPO} >/dev/null 2>&1
|
||||
fi
|
||||
if [ $? -ne 0 ]; then
|
||||
echoerror "Failed to find an installable '${package}' package. The optional repository or it's subscription might be missing."
|
||||
echoerror "Failed to find an installable '${package}' package. The optional repository or its subscription might be missing."
|
||||
rm -rf "${__YUM_CONF_DIR}"
|
||||
return 1
|
||||
fi
|
||||
@ -3409,10 +3425,13 @@ install_arch_check_services() {
|
||||
fi
|
||||
|
||||
for fname in minion master syndic api; do
|
||||
# Skip salt-api since the service should be opt-in and not necessarily started on boot
|
||||
[ $fname = "api" ] && continue
|
||||
|
||||
# Skip if not meant to be installed
|
||||
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
|
||||
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
|
||||
[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
||||
#[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
||||
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
|
||||
__check_services_systemd salt-$fname || return 1
|
||||
done
|
||||
@ -3822,7 +3841,7 @@ install_opensuse_stable_deps() {
|
||||
|
||||
zypper --gpg-auto-import-keys --non-interactive refresh
|
||||
if [ $? -ne 0 ] && [ $? -ne 4 ]; then
|
||||
# If the exit code is not 0, and it's not 4(failed to update a
|
||||
# If the exit code is not 0, and it's not 4 (failed to update a
|
||||
# repository) return a failure. Otherwise continue.
|
||||
return 1
|
||||
fi
|
||||
@ -3967,10 +3986,13 @@ install_opensuse_check_services() {
|
||||
fi
|
||||
|
||||
for fname in minion master syndic api; do
|
||||
# Skip salt-api since the service should be opt-in and not necessarily started on boot
|
||||
[ $fname = "api" ] && continue
|
||||
|
||||
# Skip if not meant to be installed
|
||||
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
|
||||
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
|
||||
[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
||||
#[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
||||
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
|
||||
__check_services_systemd salt-$fname > /dev/null 2>&1 || __check_services_systemd salt-$fname.service > /dev/null 2>&1 || return 1
|
||||
done
|
||||
@ -4152,10 +4174,13 @@ install_suse_check_services() {
|
||||
fi
|
||||
|
||||
for fname in minion master syndic api; do
|
||||
# Skip salt-api since the service should be opt-in and not necessarily started on boot
|
||||
[ $fname = "api" ] && continue
|
||||
|
||||
# Skip if not meant to be installed
|
||||
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
|
||||
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
|
||||
[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
||||
#[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
||||
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
|
||||
__check_services_systemd salt-$fname || return 1
|
||||
done
|
||||
@ -4294,10 +4319,13 @@ install_gentoo_check_services() {
|
||||
fi
|
||||
|
||||
for fname in minion master syndic api; do
|
||||
# Skip salt-api since the service should be opt-in and not necessarily started on boot
|
||||
[ $fname = "api" ] && continue
|
||||
|
||||
# Skip if not meant to be installed
|
||||
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
|
||||
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
|
||||
[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
||||
#[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
||||
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
|
||||
__check_services_systemd salt-$fname || return 1
|
||||
done
|
||||
|
@ -256,7 +256,7 @@ VALID_OPTS = {
|
||||
'username': str,
|
||||
'password': str,
|
||||
'zmq_filtering': bool,
|
||||
'fs_cache': bool,
|
||||
'con_cache': bool,
|
||||
}
|
||||
|
||||
# default configurations
|
||||
@ -543,7 +543,7 @@ DEFAULT_MASTER_OPTS = {
|
||||
'master_pubkey_signature': 'master_pubkey_signature',
|
||||
'master_use_pubkey_signature': False,
|
||||
'zmq_filtering': False,
|
||||
'fs_cache': False,
|
||||
'con_cache': False,
|
||||
}
|
||||
|
||||
# ----- Salt Cloud Configuration Defaults ----------------------------------->
|
||||
|
@ -1035,7 +1035,7 @@ def os_data():
|
||||
|
||||
# Load additional OS family grains
|
||||
if grains['os_family'] == "RedHat":
|
||||
grains['osmajorrelease'] = grains['osrelease'].split('.', 1)
|
||||
grains['osmajorrelease'] = grains['osrelease'].split('.', 1)[0]
|
||||
|
||||
grains['osfinger'] = '{os}-{ver}'.format(
|
||||
os=grains['osfullname'],
|
||||
@ -1044,6 +1044,12 @@ def os_data():
|
||||
grains['osfinger'] = '{os}-{ver}'.format(
|
||||
os=grains['osfullname'],
|
||||
ver=grains['osrelease'])
|
||||
elif grains['osfullname'] == "Debian":
|
||||
grains['osmajorrelease'] = grains['osrelease'].split('.', 1)[0]
|
||||
|
||||
grains['osfinger'] = '{os}-{ver}'.format(
|
||||
os=grains['osfullname'],
|
||||
ver=grains['osrelease'].partition('.')[0])
|
||||
|
||||
if grains.get('osrelease', ''):
|
||||
osrelease_info = grains['osrelease'].split('.')
|
||||
|
@ -47,7 +47,8 @@ from salt.utils.debug import enable_sigusr1_handler, enable_sigusr2_handler, ins
|
||||
from salt.exceptions import MasterExit
|
||||
from salt.utils.event import tagify
|
||||
import binascii
|
||||
import salt.caches
|
||||
from salt.utils.master import ConnectedCache
|
||||
from salt.utils.cache import CacheCli
|
||||
|
||||
# Import halite libs
|
||||
try:
|
||||
@ -333,8 +334,8 @@ class Master(SMaster):
|
||||
clean_proc(reqserv.halite)
|
||||
if hasattr(reqserv, 'reactor'):
|
||||
clean_proc(reqserv.reactor)
|
||||
if hasattr(reqserv, 'fscache'):
|
||||
clean_proc(reqserv.fscache)
|
||||
if hasattr(reqserv, 'con_cache'):
|
||||
clean_proc(reqserv.con_cache)
|
||||
for proc in reqserv.work_procs:
|
||||
clean_proc(proc)
|
||||
raise MasterExit
|
||||
@ -554,20 +555,12 @@ class ReqServer(object):
|
||||
'''
|
||||
start all available caches if configured
|
||||
'''
|
||||
|
||||
if self.opts['fs_cache']:
|
||||
self.fscache = salt.caches.FSCache(self.opts)
|
||||
|
||||
# add a job that caches grains and mine data every 30 seconds
|
||||
self.fscache.add_job(
|
||||
**{
|
||||
'name': 'minions',
|
||||
'path': '/var/cache/salt/master/minions',
|
||||
'ival': [0, 30],
|
||||
'patt': '^.*$'
|
||||
}
|
||||
)
|
||||
self.fscache.start()
|
||||
if self.opts['con_cache']:
|
||||
log.debug('Starting ConCache')
|
||||
self.con_cache = ConnectedCache(self.opts)
|
||||
self.con_cache.start()
|
||||
else:
|
||||
return False
|
||||
|
||||
def start_halite(self):
|
||||
'''
|
||||
@ -1323,6 +1316,7 @@ class ClearFuncs(object):
|
||||
self.wheel_ = salt.wheel.Wheel(opts)
|
||||
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
|
||||
self.auto_key = salt.daemons.masterapi.AutoKey(opts)
|
||||
self.cache_cli = CacheCli(self.opts)
|
||||
|
||||
def _auth(self, load):
|
||||
'''
|
||||
@ -1350,7 +1344,16 @@ class ClearFuncs(object):
|
||||
|
||||
# 0 is default which should be 'unlimited'
|
||||
if self.opts['max_minions'] > 0:
|
||||
minions = salt.utils.minions.CkMinions(self.opts).connected_ids()
|
||||
# use the ConCache if enabled, else use the minion utils
|
||||
if self.cache_cli:
|
||||
minions = self.cache_cli.get_cached()
|
||||
else:
|
||||
minions = self.ckminions.connected_ids()
|
||||
if len(minions) > 1000:
|
||||
log.info('With large numbers of minions it is advised '
|
||||
'to enable the ConCache with \'con_cache: True\' '
|
||||
'in the masters configuration file.')
|
||||
|
||||
if not len(minions) < self.opts['max_minions']:
|
||||
# we reject new minions, minions that are already
|
||||
# connected must be allowed for the mine, highstate, etc.
|
||||
@ -1559,6 +1562,10 @@ class ClearFuncs(object):
|
||||
fp_.write(load['pub'])
|
||||
pub = None
|
||||
|
||||
# the con_cache is enabled, send the minion id to the cache
|
||||
if self.cache_cli:
|
||||
self.cache_cli.put_cache([load['id']])
|
||||
|
||||
# The key payload may sometimes be corrupt when using auto-accept
|
||||
# and an empty request comes in
|
||||
try:
|
||||
|
@ -109,3 +109,22 @@ def dump(device, args=None):
|
||||
return ret
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def resize2fs(device):
|
||||
'''
|
||||
Resizes the filesystem.
|
||||
|
||||
CLI Example:
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' blockdev.resize2fs /dev/sda1
|
||||
'''
|
||||
ret = {}
|
||||
cmd = 'resize2fs {0}'.format(device)
|
||||
try:
|
||||
out = __salt__['cmd.run_all'](cmd)
|
||||
except subprocess.CalledProcessError as err:
|
||||
return False
|
||||
if out['retcode'] == 0:
|
||||
return True
|
||||
|
@ -63,12 +63,62 @@ def info(name):
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
ret['change'] = change
|
||||
ret['expire'] = expire
|
||||
ret['change'] = int(change)
|
||||
ret['expire'] = int(expire)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def set_change(name, change):
|
||||
'''
|
||||
Sets the time at which the password expires (in seconds since the EPOCH).
|
||||
See man usermod on NetBSD and OpenBSD or man pw on FreeBSD.
|
||||
"0" means the password never expires.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' shadow.set_change username 1419980400
|
||||
'''
|
||||
pre_info = info(name)
|
||||
if change == pre_info['change']:
|
||||
return True
|
||||
if __grains__['kernel'] == 'FreeBSD':
|
||||
cmd = 'pw user mod {0} -f {1}'.format(name, change)
|
||||
else:
|
||||
cmd = 'usermod -f {0} {1}'.format(change, name)
|
||||
__salt__['cmd.run'](cmd)
|
||||
post_info = info(name)
|
||||
if post_info['change'] != pre_info['change']:
|
||||
return post_info['change'] == change
|
||||
|
||||
|
||||
def set_expire(name, expire):
|
||||
'''
|
||||
Sets the time at which the account expires (in seconds since the EPOCH).
|
||||
See man usermod on NetBSD and OpenBSD or man pw on FreeBSD.
|
||||
"0" means the account never expires.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' shadow.set_expire username 1419980400
|
||||
'''
|
||||
pre_info = info(name)
|
||||
if expire == pre_info['expire']:
|
||||
return True
|
||||
if __grains__['kernel'] == 'FreeBSD':
|
||||
cmd = 'pw user mod {0} -e {1}'.format(name, expire)
|
||||
else:
|
||||
cmd = 'usermod -e {0} {1}'.format(expire, name)
|
||||
__salt__['cmd.run'](cmd)
|
||||
post_info = info(name)
|
||||
if post_info['expire'] != pre_info['expire']:
|
||||
return post_info['expire'] == expire
|
||||
|
||||
|
||||
def set_password(name, password):
|
||||
'''
|
||||
Set the password for a named user. The password must be a properly defined
|
||||
|
@ -1146,7 +1146,7 @@ def replace(path,
|
||||
found = True
|
||||
|
||||
# Identity check each potential change until one change is made
|
||||
if has_changes is False and result is not line:
|
||||
if has_changes is False and result != line:
|
||||
has_changes = True
|
||||
|
||||
if show_changes:
|
||||
|
@ -5,17 +5,23 @@ Salt interface to LDAP commands
|
||||
:depends: - ldap Python module
|
||||
:configuration: In order to connect to LDAP, certain configuration is required
|
||||
in the minion config on the LDAP server. The minimum configuration items
|
||||
that must be set are::
|
||||
that must be set are:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ldap.basedn: dc=acme,dc=com (example values, adjust to suit)
|
||||
|
||||
If your LDAP server requires authentication then you must also set::
|
||||
If your LDAP server requires authentication then you must also set:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ldap.anonymous: False
|
||||
ldap.binddn: admin
|
||||
ldap.bindpw: password
|
||||
|
||||
In addition, the following optional values may be set::
|
||||
In addition, the following optional values may be set:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ldap.server: localhost (default=localhost, see warning below)
|
||||
ldap.port: 389 (default=389, standard port)
|
||||
|
@ -50,7 +50,10 @@ def available(name):
|
||||
|
||||
salt '*' service.available sshd
|
||||
'''
|
||||
return name in get_all()
|
||||
cmd = '{0} status {1}'.format(_cmd(), name)
|
||||
if __salt__['cmd.retcode'](cmd) == 2:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def missing(name):
|
||||
@ -64,7 +67,7 @@ def missing(name):
|
||||
|
||||
salt '*' service.missing sshd
|
||||
'''
|
||||
return name not in get_all()
|
||||
return not available(name)
|
||||
|
||||
|
||||
def get_all():
|
||||
|
@ -56,9 +56,8 @@ def get_zone():
|
||||
elif 'Gentoo' in __grains__['os_family']:
|
||||
with salt.utils.fopen('/etc/timezone', 'r') as ofh:
|
||||
return ofh.read()
|
||||
elif 'FreeBSD' in __grains__['os_family']:
|
||||
return ('FreeBSD does not store a human-readable timezone. Please'
|
||||
'consider using timezone.get_zonecode or timezone.zonecompare')
|
||||
elif __grains__['os_family'] in ('FreeBSD', 'OpenBSD', 'NetBSD'):
|
||||
return os.readlink('/etc/localtime').lstrip('/usr/share/zoneinfo/')
|
||||
elif 'Solaris' in __grains__['os_family']:
|
||||
cmd = 'grep "TZ=" /etc/TIMEZONE'
|
||||
out = __salt__['cmd.run'](cmd).split('=')
|
||||
|
@ -78,7 +78,8 @@ def add(name,
|
||||
roomnumber='',
|
||||
workphone='',
|
||||
homephone='',
|
||||
createhome=True):
|
||||
createhome=True,
|
||||
loginclass=None):
|
||||
'''
|
||||
Add a user to the minion
|
||||
|
||||
@ -146,6 +147,10 @@ def add(name,
|
||||
and __grains__['kernel'] != 'OpenBSD'):
|
||||
cmd.append('-r')
|
||||
|
||||
if __grains__['kernel'] == 'OpenBSD':
|
||||
if loginclass is not None:
|
||||
cmd.extend(['-L', loginclass])
|
||||
|
||||
cmd.append(name)
|
||||
|
||||
ret = __salt__['cmd.run_all'](' '.join(cmd))
|
||||
@ -473,6 +478,29 @@ def chhomephone(name, homephone):
|
||||
return False
|
||||
|
||||
|
||||
def chloginclass(name, loginclass):
|
||||
'''
|
||||
Change the default login class of the user
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' user.chloginclass foo staff
|
||||
'''
|
||||
if __grains__['kernel'] != 'OpenBSD':
|
||||
return False
|
||||
pre_info = get_loginclass(name)
|
||||
if loginclass == pre_info['loginclass']:
|
||||
return True
|
||||
cmd = 'usermod -L {0} {1}'.format(loginclass, name)
|
||||
__salt__['cmd.run'](cmd)
|
||||
post_info = get_loginclass(name)
|
||||
if post_info['loginclass'] != pre_info['loginclass']:
|
||||
return post_info['loginclass'] == loginclass
|
||||
return False
|
||||
|
||||
|
||||
def info(name):
|
||||
'''
|
||||
Return user information
|
||||
@ -491,6 +519,29 @@ def info(name):
|
||||
return _format_info(data)
|
||||
|
||||
|
||||
def get_loginclass(name):
|
||||
'''
|
||||
Get the login class of the user
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' user.get_loginclass foo
|
||||
'''
|
||||
if __grains__['kernel'] != 'OpenBSD':
|
||||
return False
|
||||
info = __salt__['cmd.run_stdout']('userinfo {0}'.format(name),
|
||||
output_loglevel='debug')
|
||||
for line in info.splitlines():
|
||||
if line.startswith("class"):
|
||||
loginclass = line.split()
|
||||
if len(loginclass) == 2:
|
||||
return {'loginclass': loginclass[1]}
|
||||
else:
|
||||
return {'loginclass': '""'}
|
||||
|
||||
|
||||
def _format_info(data):
|
||||
'''
|
||||
Return user information in a pretty way
|
||||
|
@ -564,10 +564,19 @@ class Pillar(object):
|
||||
Render the pillar data and return
|
||||
'''
|
||||
top, terrors = self.get_top()
|
||||
matches = self.top_matches(top)
|
||||
pillar, errors = self.render_pillar(matches)
|
||||
if ext:
|
||||
pillar = self.ext_pillar(pillar, pillar_dirs)
|
||||
if self.opts.get('ext_pillar_first', False):
|
||||
self.opts['pillar'] = self.ext_pillar({}, pillar_dirs)
|
||||
matches = self.top_matches(top)
|
||||
pillar, errors = self.render_pillar(matches)
|
||||
pillar = self.merge_sources(pillar, self.opts['pillar'])
|
||||
else:
|
||||
matches = self.top_matches(top)
|
||||
pillar, errors = self.render_pillar(matches)
|
||||
pillar = self.ext_pillar(pillar, pillar_dirs)
|
||||
else:
|
||||
matches = self.top_matches(top)
|
||||
pillar, errors = self.render_pillar(matches)
|
||||
errors.extend(terrors)
|
||||
if self.opts.get('pillar_opts', True):
|
||||
mopts = dict(self.opts)
|
||||
|
@ -1,7 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Manage Cloudwatch alarms
|
||||
=================
|
||||
========================
|
||||
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
|
@ -342,9 +342,9 @@ def loaded(name, source=None, source_hash='', force=False):
|
||||
|
||||
.. note::
|
||||
|
||||
See first the documentation for salt file.managed
|
||||
<http://docs.saltstack.com/en/latest/ref/states/all/_
|
||||
salt.states.file.html#salt.states.file.managed>
|
||||
See first the documentation for salt file.managed
|
||||
<http://docs.saltstack.com/en/latest/ref/states/all/_
|
||||
salt.states.file.html#salt.states.file.managed>
|
||||
|
||||
force
|
||||
Load even if the image exists
|
||||
@ -560,26 +560,43 @@ def absent(name):
|
||||
'''
|
||||
ins_container = __salt__['docker.inspect_container']
|
||||
cinfos = ins_container(name)
|
||||
changes = {}
|
||||
|
||||
if cinfos['status']:
|
||||
cid = cinfos['id']
|
||||
changes[cid] = {}
|
||||
is_running = __salt__['docker.is_running'](cid)
|
||||
# destroy if we found meat to do
|
||||
|
||||
# Stop container gracefully, if running
|
||||
if is_running:
|
||||
changes[cid]['old'] = 'running'
|
||||
__salt__['docker.stop'](cid)
|
||||
is_running = __salt__['docker.is_running'](cid)
|
||||
if is_running:
|
||||
return _invalid(
|
||||
comment=('Container {0!r}'
|
||||
' could not be stopped'.format(cid)))
|
||||
return _invalid(comment=("Container {0!r} could not be stopped"
|
||||
.format(cid)))
|
||||
else:
|
||||
return _valid(comment=('Container {0!r}'
|
||||
' was stopped,'.format(cid)),
|
||||
changes={name: True})
|
||||
changes[cid]['new'] = 'stopped'
|
||||
else:
|
||||
return _valid(comment=('Container {0!r}'
|
||||
' is stopped,'.format(cid)))
|
||||
changes[cid]['old'] = 'stopped'
|
||||
|
||||
# Remove the stopped container
|
||||
removal = __salt__['docker.remove_container'](cid)
|
||||
|
||||
if removal['status'] is True:
|
||||
changes[cid]['new'] = 'removed'
|
||||
return _valid(comment=("Container {0!r} has been destroyed"
|
||||
.format(cid)),
|
||||
changes=changes)
|
||||
else:
|
||||
if 'new' not in changes[cid]:
|
||||
changes = None
|
||||
return _invalid(comment=("Container {0!r} could not be destroyed"
|
||||
.format(cid)),
|
||||
changes=changes)
|
||||
|
||||
else:
|
||||
return _valid(comment='Container {0!r} not found'.format(name))
|
||||
return _valid(comment="Container {0!r} not found".format(name))
|
||||
|
||||
|
||||
def present(name):
|
||||
|
@ -61,6 +61,7 @@ def _changes(name,
|
||||
roomnumber='',
|
||||
workphone='',
|
||||
homephone='',
|
||||
loginclass=None,
|
||||
date=0,
|
||||
mindays=0,
|
||||
maxdays=999999,
|
||||
@ -146,6 +147,12 @@ def _changes(name,
|
||||
if 'user.chhomephone' in __salt__:
|
||||
if homephone is not None and lusr['homephone'] != homephone:
|
||||
change['homephone'] = homephone
|
||||
# OpenBSD login class
|
||||
if __grains__['kernel'] == 'OpenBSD':
|
||||
if not loginclass:
|
||||
loginclass = '""'
|
||||
if __salt__['user.get_loginclass'](name)['loginclass'] != loginclass:
|
||||
change['loginclass'] = loginclass
|
||||
|
||||
return change
|
||||
|
||||
@ -169,6 +176,7 @@ def present(name,
|
||||
roomnumber=None,
|
||||
workphone=None,
|
||||
homephone=None,
|
||||
loginclass=None,
|
||||
date=None,
|
||||
mindays=None,
|
||||
maxdays=None,
|
||||
@ -244,8 +252,11 @@ def present(name,
|
||||
Choose UID in the range of FIRST_SYSTEM_UID and LAST_SYSTEM_UID, Default is
|
||||
``False``.
|
||||
|
||||
loginclass
|
||||
The login class, defaults to empty
|
||||
(BSD only)
|
||||
|
||||
User comment field (GECOS) support (currently Linux, FreeBSD, and MacOS
|
||||
User comment field (GECOS) support (currently Linux, BSD, and MacOS
|
||||
only):
|
||||
|
||||
The below values should be specified as strings to avoid ambiguities when
|
||||
@ -368,6 +379,8 @@ def present(name,
|
||||
# The user is present
|
||||
if 'shadow.info' in __salt__:
|
||||
lshad = __salt__['shadow.info'](name)
|
||||
if __grains__['kernel'] == 'OpenBSD':
|
||||
lcpre = __salt__['user.get_loginclass'](name)
|
||||
pre = __salt__['user.info'](name)
|
||||
for key, val in changes.items():
|
||||
if key == 'passwd' and not empty_password:
|
||||
@ -409,6 +422,8 @@ def present(name,
|
||||
if 'shadow.info' in __salt__:
|
||||
if lshad['passwd'] != password:
|
||||
spost = __salt__['shadow.info'](name)
|
||||
if __grains__['kernel'] == 'OpenBSD':
|
||||
lcpost = __salt__['user.get_loginclass'](name)
|
||||
# See if anything changed
|
||||
for key in post:
|
||||
if post[key] != pre[key]:
|
||||
@ -417,6 +432,9 @@ def present(name,
|
||||
for key in spost:
|
||||
if lshad[key] != spost[key]:
|
||||
ret['changes'][key] = spost[key]
|
||||
if __grains__['kernel'] == 'OpenBSD':
|
||||
if lcpost['loginclass'] != lcpre['loginclass']:
|
||||
ret['changes']['loginclass'] = lcpost['loginclass']
|
||||
if ret['changes']:
|
||||
ret['comment'] = 'Updated user {0}'.format(name)
|
||||
changes = _changes(name,
|
||||
@ -435,6 +453,7 @@ def present(name,
|
||||
roomnumber,
|
||||
workphone,
|
||||
homephone,
|
||||
loginclass,
|
||||
date,
|
||||
mindays,
|
||||
maxdays,
|
||||
@ -471,6 +490,7 @@ def present(name,
|
||||
roomnumber=roomnumber,
|
||||
workphone=workphone,
|
||||
homephone=homephone,
|
||||
loginclass=loginclass,
|
||||
createhome=createhome):
|
||||
ret['comment'] = 'New user {0} created'.format(name)
|
||||
ret['changes'] = __salt__['user.info'](name)
|
||||
|
@ -1,5 +1,9 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import time
|
||||
import salt.config
|
||||
import zmq
|
||||
import salt.payload
|
||||
import os
|
||||
|
||||
|
||||
class CacheDict(dict):
|
||||
@ -38,3 +42,60 @@ class CacheDict(dict):
|
||||
def __contains__(self, key):
|
||||
self._enforce_ttl_key(key)
|
||||
return dict.__contains__(self, key)
|
||||
|
||||
|
||||
class CacheCli(object):
|
||||
'''
|
||||
Connection client for the ConCache. Should be used by all
|
||||
components that need the list of currently connected minions
|
||||
'''
|
||||
|
||||
def __init__(self, opts):
|
||||
'''
|
||||
Sets up the zmq-connection to the ConCache
|
||||
'''
|
||||
super(CacheCli, self).__init__()
|
||||
self.opts = opts
|
||||
self.serial = salt.payload.Serial(self.opts.get('serial', ''))
|
||||
self.cache_sock = os.path.join(self.opts['sock_dir'], 'con_cache.ipc')
|
||||
self.cache_upd_sock = os.path.join(self.opts['sock_dir'], 'con_upd.ipc')
|
||||
|
||||
context = zmq.Context()
|
||||
|
||||
# the socket for talking to the cache
|
||||
self.creq_out = context.socket(zmq.REQ)
|
||||
self.creq_out.setsockopt(zmq.LINGER, 100)
|
||||
self.creq_out.connect('ipc://' + self.cache_sock)
|
||||
|
||||
# the socket for sending updates to the cache
|
||||
self.cupd_out = context.socket(zmq.PUB)
|
||||
self.cupd_out.setsockopt(zmq.LINGER, 1)
|
||||
self.cupd_out.connect('ipc://' + self.cache_upd_sock)
|
||||
|
||||
def put_cache(self, minions):
|
||||
'''
|
||||
published the given minions to the ConCache
|
||||
'''
|
||||
self.cupd_out.send(self.serial.dumps(minions))
|
||||
|
||||
def get_cached(self):
|
||||
'''
|
||||
queries the ConCache for a list of currently connected minions
|
||||
'''
|
||||
msg = self.serial.dumps('minions')
|
||||
self.creq_out.send(msg)
|
||||
min_list = self.serial.loads(self.creq_out.recv())
|
||||
return min_list
|
||||
|
||||
# test code for the CacheCli
|
||||
if __name__ == '__main__':
|
||||
|
||||
opts = salt.config.master_config('/etc/salt/master')
|
||||
|
||||
ccli = CacheCli(opts)
|
||||
|
||||
ccli.put_cache(['test1', 'test10', 'test34'])
|
||||
ccli.put_cache(['test12'])
|
||||
ccli.put_cache(['test18'])
|
||||
ccli.put_cache(['test21'])
|
||||
print "minions: ", ccli.get_cached()
|
||||
|
@ -10,14 +10,19 @@
|
||||
# Import python libs
|
||||
import os
|
||||
import logging
|
||||
|
||||
from threading import Thread, Event
|
||||
import multiprocessing
|
||||
import zmq
|
||||
import signal
|
||||
# Import salt libs
|
||||
import salt.log
|
||||
import salt.client
|
||||
import salt.pillar
|
||||
import salt.utils
|
||||
import salt.utils.minions
|
||||
import salt.payload
|
||||
from salt.exceptions import SaltException
|
||||
import salt.config
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -382,3 +387,241 @@ class MasterPillarUtil(object):
|
||||
except (OSError, IOError):
|
||||
return True
|
||||
return True
|
||||
|
||||
|
||||
class CacheTimer(Thread):
|
||||
'''
|
||||
A basic timer class the fires timer-events every second.
|
||||
This is used for cleanup by the ConnectedCache()
|
||||
'''
|
||||
def __init__(self, opts, event):
|
||||
Thread.__init__(self)
|
||||
self.opts = opts
|
||||
self.stopped = event
|
||||
self.daemon = True
|
||||
self.serial = salt.payload.Serial(opts.get('serial', ''))
|
||||
self.timer_sock = os.path.join(self.opts['sock_dir'], 'con_timer.ipc')
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
main loop that fires the event every second
|
||||
'''
|
||||
context = zmq.Context()
|
||||
# the socket for outgoing timer events
|
||||
socket = context.socket(zmq.PUB)
|
||||
socket.setsockopt(zmq.LINGER, 100)
|
||||
socket.bind('ipc://' + self.timer_sock)
|
||||
|
||||
count = 0
|
||||
log.debug('ConCache-Timer started')
|
||||
while not self.stopped.wait(1):
|
||||
socket.send(self.serial.dumps(count))
|
||||
|
||||
count += 1
|
||||
if count >= 60:
|
||||
count = 0
|
||||
|
||||
|
||||
class ConnectedCache(multiprocessing.Process):
|
||||
'''
|
||||
Provides access to all minions ids that the master has
|
||||
successfully authenticated. The cache is cleaned up regularly by
|
||||
comparing it to the IPs that have open connections to
|
||||
the master publisher port.
|
||||
'''
|
||||
|
||||
def __init__(self, opts):
|
||||
'''
|
||||
starts the timer and inits the cache itself
|
||||
'''
|
||||
super(ConnectedCache, self).__init__()
|
||||
log.debug('ConCache initializing...')
|
||||
|
||||
# the possible settings for the cache
|
||||
self.opts = opts
|
||||
|
||||
# the actual cached minion ids
|
||||
self.minions = []
|
||||
|
||||
self.cache_sock = os.path.join(self.opts['sock_dir'], 'con_cache.ipc')
|
||||
self.update_sock = os.path.join(self.opts['sock_dir'], 'con_upd.ipc')
|
||||
self.upd_t_sock = os.path.join(self.opts['sock_dir'], 'con_timer.ipc')
|
||||
self.cleanup()
|
||||
|
||||
# the timer provides 1-second intervals to the loop in run()
|
||||
# to make the cache system most responsive, we do not use a loop-
|
||||
# delay which makes it hard to get 1-second intervals without a timer
|
||||
self.timer_stop = Event()
|
||||
self.timer = CacheTimer(self.opts, self.timer_stop)
|
||||
self.timer.start()
|
||||
self.running = True
|
||||
|
||||
def signal_handler(self, sig, frame):
|
||||
'''
|
||||
handle signals and shutdown
|
||||
'''
|
||||
self.stop()
|
||||
|
||||
def renew(self):
|
||||
'''
|
||||
compares the current minion list against the ips
|
||||
connected on the master publisher port and updates
|
||||
the minion list accordingly
|
||||
'''
|
||||
log.debug('ConCache renewing minion cache')
|
||||
new_mins = list(salt.utils.minions.CkMinions(self.opts).connected_ids())
|
||||
self.minions = new_mins
|
||||
log.debug('ConCache received {0} minion ids'.format(len(new_mins)))
|
||||
|
||||
def cleanup(self):
|
||||
'''
|
||||
remove sockets on shutdown
|
||||
'''
|
||||
log.debug('ConCache cleaning up')
|
||||
if os.path.exists(self.cache_sock):
|
||||
os.remove(self.cache_sock)
|
||||
if os.path.exists(self.update_sock):
|
||||
os.remove(self.update_sock)
|
||||
if os.path.exists(self.upd_t_sock):
|
||||
os.remove(self.upd_t_sock)
|
||||
|
||||
def secure(self):
|
||||
'''
|
||||
secure the sockets for root-only access
|
||||
'''
|
||||
log.debug('ConCache securing sockets')
|
||||
if os.path.exists(self.cache_sock):
|
||||
os.chmod(self.cache_sock, 0600)
|
||||
if os.path.exists(self.update_sock):
|
||||
os.chmod(self.update_sock, 0600)
|
||||
if os.path.exists(self.upd_t_sock):
|
||||
os.chmod(self.upd_t_sock, 0600)
|
||||
|
||||
def stop(self):
|
||||
'''
|
||||
shutdown cache process
|
||||
'''
|
||||
# avoid getting called twice
|
||||
self.cleanup()
|
||||
if self.running:
|
||||
self.running = False
|
||||
self.timer_stop.set()
|
||||
self.timer.join()
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Main loop of the ConCache, starts updates in intervals and
|
||||
answers requests from the MWorkers
|
||||
'''
|
||||
context = zmq.Context()
|
||||
# the socket for incoming cache requests
|
||||
creq_in = context.socket(zmq.REP)
|
||||
creq_in.setsockopt(zmq.LINGER, 100)
|
||||
creq_in.bind('ipc://' + self.cache_sock)
|
||||
|
||||
# the socket for incoming cache-updates from workers
|
||||
cupd_in = context.socket(zmq.SUB)
|
||||
cupd_in.setsockopt(zmq.SUBSCRIBE, '')
|
||||
cupd_in.setsockopt(zmq.LINGER, 100)
|
||||
cupd_in.bind('ipc://' + self.update_sock)
|
||||
|
||||
# the socket for the timer-event
|
||||
timer_in = context.socket(zmq.SUB)
|
||||
timer_in.setsockopt(zmq.SUBSCRIBE, '')
|
||||
timer_in.setsockopt(zmq.LINGER, 100)
|
||||
timer_in.connect('ipc://' + self.upd_t_sock)
|
||||
|
||||
poller = zmq.Poller()
|
||||
poller.register(creq_in, zmq.POLLIN)
|
||||
poller.register(cupd_in, zmq.POLLIN)
|
||||
poller.register(timer_in, zmq.POLLIN)
|
||||
|
||||
# our serializer
|
||||
serial = salt.payload.Serial(self.opts.get('serial', ''))
|
||||
|
||||
# register a signal handler
|
||||
signal.signal(signal.SIGINT, self.signal_handler)
|
||||
|
||||
# secure the sockets from the world
|
||||
self.secure()
|
||||
|
||||
log.info('ConCache started')
|
||||
|
||||
while self.running:
|
||||
|
||||
# we check for new events with the poller
|
||||
try:
|
||||
socks = dict(poller.poll(1))
|
||||
except KeyboardInterrupt:
|
||||
self.stop()
|
||||
except zmq.ZMQError as zmq_err:
|
||||
log.error('ConCache ZeroMQ-Error occured')
|
||||
log.exception(zmq_err)
|
||||
self.stop()
|
||||
|
||||
# check for next cache-request
|
||||
if socks.get(creq_in) == zmq.POLLIN:
|
||||
msg = serial.loads(creq_in.recv())
|
||||
log.trace('ConCache Received request: {0}'.format(msg))
|
||||
|
||||
# requests to the minion list are send as str's
|
||||
if isinstance(msg, str):
|
||||
if msg == 'minions':
|
||||
# Send reply back to client
|
||||
reply = serial.dumps(self.minions)
|
||||
creq_in.send(reply)
|
||||
|
||||
else:
|
||||
reply = serial.dumps(False)
|
||||
creq_in.send(reply)
|
||||
|
||||
# check for next cache-update from workers
|
||||
elif socks.get(cupd_in) == zmq.POLLIN:
|
||||
new_c_data = serial.loads(cupd_in.recv())
|
||||
# tell the worker to exit
|
||||
#cupd_in.send(serial.dumps('ACK'))
|
||||
|
||||
# check if the returned data is usable
|
||||
if not isinstance(new_c_data, list):
|
||||
log.error('ConCache Worker returned unusable result')
|
||||
del new_c_data
|
||||
continue
|
||||
|
||||
# the cache will receive lists of minions
|
||||
# 1. if the list only has 1 item, its from an MWorker, we append it
|
||||
# 2. anything else is considered malformed
|
||||
|
||||
if len(new_c_data) == 0:
|
||||
log.debug('ConCache Got empty update from worker')
|
||||
elif len(new_c_data) == 1:
|
||||
if new_c_data[0] not in self.minions:
|
||||
log.trace('ConCache Adding minion {0} to cache'.format(new_c_data[0]))
|
||||
self.minions.append(new_c_data[0])
|
||||
else:
|
||||
log.debug('ConCache Got malformed result dict from worker')
|
||||
del new_c_data
|
||||
|
||||
log.info('ConCache {0} entries in cache'.format(len(self.minions)))
|
||||
|
||||
# check for next timer-event to start new jobs
|
||||
elif socks.get(timer_in) == zmq.POLLIN:
|
||||
sec_event = serial.loads(timer_in.recv())
|
||||
|
||||
# update the list every 30 seconds
|
||||
if (sec_event == 31) or (sec_event == 1):
|
||||
self.renew()
|
||||
|
||||
self.stop()
|
||||
creq_in.close()
|
||||
cupd_in.close()
|
||||
timer_in.close()
|
||||
context.term()
|
||||
log.debug('ConCache Shutting down')
|
||||
|
||||
# test code for the ConCache class
|
||||
if __name__ == '__main__':
|
||||
|
||||
opts = salt.config.master_config('/etc/salt/master')
|
||||
|
||||
conc = ConnectedCache(opts)
|
||||
conc.start()
|
||||
|
@ -624,7 +624,7 @@ class Schedule(object):
|
||||
if data['_when_run']:
|
||||
data['_when_run'] = False
|
||||
run = True
|
||||
if 'cron' in data:
|
||||
elif 'cron' in data:
|
||||
if seconds == 1:
|
||||
run = True
|
||||
else:
|
||||
@ -644,7 +644,7 @@ class Schedule(object):
|
||||
if data['_when_run']:
|
||||
data['_when_run'] = False
|
||||
run = True
|
||||
if 'cron' in data:
|
||||
elif 'cron' in data:
|
||||
if seconds == 1:
|
||||
run = True
|
||||
else:
|
||||
|
Loading…
Reference in New Issue
Block a user