Merge pull request #37100 from rallytime/merge-carbon

[carbon] Merge forward from 2016.3 to carbon
This commit is contained in:
Mike Place 2016-10-20 14:40:00 +09:00 committed by GitHub
commit f26475753e
15 changed files with 343 additions and 102 deletions

View File

@ -963,6 +963,57 @@ talking to the intended master.
syndic_finger: 'ab:30:65:2a:d6:9e:20:4f:d8:b2:f3:a7:d4:65:50:10'
.. conf_minion:: proxy_host
``proxy_host``
--------------
Default: ``''``
The hostname used for HTTP proxy access.
.. code-block:: yaml
proxy_host: proxy.my-domain
.. conf_minion:: proxy_port
``proxy_port``
--------------
Default: ``0``
The port number used for HTTP proxy access.
.. code-block:: yaml
proxy_port: 31337
.. conf_minion:: proxy_username
``proxy_username``
--------------
Default: ``''``
The username used for HTTP proxy access.
.. code-block:: yaml
proxy_username: charon
.. conf_minion:: proxy_password
``proxy_password``
--------------
Default: ``''``
The password used for HTTP proxy access.
.. code-block:: yaml
proxy_password: obolus
Minion Module Management
========================

View File

@ -56,6 +56,9 @@ This function forms a basic query, but with some add-ons not present in the
currently available in these libraries has been added, but can be in future
iterations.
HTTPS Request Methods
`````````````````````
A basic query can be performed by calling this function with no more than a
single URL:
@ -82,7 +85,10 @@ required by the remote server (XML, JSON, plain text, etc).
data=json.loads(mydict)
)
Bear in mind that this data must be sent pre-formatted; this function will not
Data Formatting and Templating
``````````````````````````````
Bear in mind that the data must be sent pre-formatted; this function will not
format it for you. However, a templated file stored on the local system may be
passed through, along with variables to populate it with. To pass through only
the file (untemplated):
@ -147,6 +153,9 @@ However, this can be changed to ``master`` if necessary.
node='master'
)
Headers
```````
Headers may also be passed through, either as a ``header_list``, a
``header_dict``, or as a ``header_file``. As with the ``data_file``, the
``header_file`` may also be templated. Take note that because HTTP headers are
@ -168,6 +177,9 @@ Because much of the data that would be templated between headers and data may be
the same, the ``template_data`` is the same for both. Correcting possible
variable name collisions is up to the user.
Authentication
``````````````
The ``query()`` function supports basic HTTP authentication. A username and
password may be passed in as ``username`` and ``password``, respectively.
@ -179,6 +191,9 @@ password may be passed in as ``username`` and ``password``, respectively.
password=`5700g3543v4r`,
)
Cookies and Sessions
````````````````````
Cookies are also supported, using Python's built-in ``cookielib``. However, they
are turned off by default. To turn cookies on, set ``cookies`` to True.
@ -232,6 +247,29 @@ The format of this file is msgpack, which is consistent with much of the rest
of Salt's internal structure. Historically, the extension for this file is
``.p``. There are no current plans to make this configurable.
Proxy
`````
If the ``tornado`` backend is used (``tornado`` is the default), proxy
information configured in ``proxy_host``, ``proxy_port``, ``proxy_username``,
and ``proxy_password`` from the ``__opts__`` dictionary will be used. Normally
these are set in the minion configuration file.
.. code-block:: yaml
proxy_host: proxy.my-domain
proxy_port: 31337
proxy_username: charon
proxy_password: obolus
.. code-block:: python
salt.utils.http.query(
'http://example.com',
opts=__opts__,
backend='tornado'
)
Return Data
~~~~~~~~~~~

View File

@ -522,6 +522,15 @@ class MinionBase(object):
if opts['random_master']:
shuffle(opts['local_masters'])
last_exc = None
opts['master_uri_list'] = list()
# This sits outside of the connection loop below because it needs to set
# up a list of master URIs regardless of which masters are available
# to connect _to_. This is primarily used for masterless mode, when
# we need a list of master URIs to fire calls back to.
for master in opts['local_masters']:
opts['master'] = master
opts['master_uri_list'].append(resolve_dns(opts)['master_uri'])
while True:
attempts += 1

View File

@ -2824,14 +2824,28 @@ def powershell(cmd,
saltenv='base',
use_vt=False,
password=None,
depth=None,
encode_cmd=False,
**kwargs):
'''
Execute the passed PowerShell command and return the output as a string.
Execute the passed PowerShell command and return the output as a dictionary.
Other ``cmd.*`` functions return the raw text output of the command. This
function appends ``| ConvertTo-JSON`` to the command and then parses the
JSON into a Python dictionary. If you want the raw textual result of your
PowerShell command you should use ``cmd.run`` with the ``shell=powershell``
option.
For example:
.. code-block:: bash
salt '*' cmd.run '$PSVersionTable.CLRVersion' shell=powershell
salt '*' cmd.run 'Get-NetTCPConnection' shell=powershell
.. versionadded:: 2016.3.0
.. warning ::
.. warning::
This passes the cmd argument directly to PowerShell
without any further processing! Be absolutely sure that you
@ -2841,6 +2855,16 @@ def powershell(cmd,
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
In addition to the normal ``cmd.run`` parameters, this command offers the
``depth`` parameter to change the Windows default depth for the
``ConvertTo-JSON`` powershell command. The Windows default is 2. If you need
more depth, set that here.
.. note::
For some commands, setting the depth to a value greater than 4 greatly
increases the time it takes for the command to return and in many cases
returns useless data.
:param str cmd: The powershell command to run.
:param str cwd: The current working directory to execute the command in
@ -2933,10 +2957,19 @@ def powershell(cmd,
:param str saltenv: The salt environment to use. Default is 'base'
:param int depth: The number of levels of contained objects to be included.
Default is 2. Values greater than 4 seem to greatly increase the time
it takes for the command to complete for some commands. eg: ``dir``
.. versionadded:: 2016.3.4
:param bool encode_cmd: Encode the command before executing. Use in cases
where characters may be dropped or incorrectly converted when executed.
Default is False.
:returns:
:dict: A dictionary of data returned by the powershell command.
CLI Example:
.. code-block:: powershell
@ -2949,7 +2982,9 @@ def powershell(cmd,
python_shell = True
# Append PowerShell Object formatting
cmd = '{0} | ConvertTo-Json -Depth 32'.format(cmd)
cmd += ' | ConvertTo-JSON'
if depth is not None:
cmd += ' -Depth {0}'.format(depth)
if encode_cmd:
# Convert the cmd to UTF-16LE without a BOM and base64 encode.

View File

@ -63,6 +63,13 @@ def fire_master(data, tag, preload=None):
ip=salt.utils.ip_bracket(__opts__['interface']),
port=__opts__.get('ret_port', '4506') # TODO, no fallback
)
masters = list()
ret = True
if 'master_uri_list' in __opts__:
for master_uri in __opts__['master_uri_list']:
masters.append(master_uri)
else:
masters.append(__opts__['master_uri'])
auth = salt.crypt.SAuth(__opts__)
load = {'id': __opts__['id'],
'tag': tag,
@ -73,12 +80,13 @@ def fire_master(data, tag, preload=None):
if isinstance(preload, dict):
load.update(preload)
channel = salt.transport.Channel.factory(__opts__)
try:
channel.send(load)
except Exception:
pass
return True
for master in masters:
channel = salt.transport.Channel.factory(__opts__, master_uri=master)
try:
channel.send(load)
except Exception:
ret = False
return ret
else:
# Usually, we can send the event via the minion, which is faster
# because it is already authenticated

View File

@ -3510,6 +3510,7 @@ def get_managed(
'''
return {'hsum': get_hash(path, form='sha256'), 'hash_type': 'sha256'}
source_hash_name = kwargs.pop('source_hash_name', None)
# If we have a source defined, let's figure out what the hash is
if source:
urlparsed_source = _urlparse(source)
@ -3558,7 +3559,8 @@ def get_managed(
if not hash_fn:
return '', {}, ('Source hash file {0} not found'
.format(source_hash))
source_sum = extract_hash(hash_fn, '', name)
source_sum = extract_hash(
hash_fn, '', source_hash_name or name)
if source_sum is None:
return _invalid_source_hash_format()

View File

@ -13,7 +13,7 @@ import salt.crypt
import salt.payload
import salt.transport
import salt.utils.args
from salt.exceptions import SaltReqTimeoutError
from salt.exceptions import SaltReqTimeoutError, SaltInvocationError
log = logging.getLogger(__name__)
@ -46,7 +46,8 @@ def _publish(
returner='',
timeout=5,
form='clean',
wait=False):
wait=False,
via_master=None):
'''
Publish a command from the minion out to other minions, publications need
to be enabled on the Salt master and the minion needs to have permission
@ -75,7 +76,35 @@ def _publish(
arg = _parse_args(arg)
log.info('Publishing \'{0}\' to {master_uri}'.format(fun, **__opts__))
if via_master:
if 'master_uri_list' not in __opts__:
raise SaltInvocationError(message='Could not find list of masters \
in minion configuration but `via_master` was specified.')
else:
# Find the master in the list of master_uris generated by the minion base class
matching_master_uris = [master for master
in __opts__['master_uri_list']
if '//{0}:'.format(via_master)
in master]
if not matching_master_uris:
raise SaltInvocationError('Could not find match for {0} in \
list of configured masters {1} when using `via_master` option'.format(
via_master, __opts__['master_uri_list']))
if len(matching_master_uris) > 1:
# If we have multiple matches, consider this a non-fatal error
# and continue with whatever we found first.
log.warning('The `via_master` flag found \
more than one possible match found for {0} when evaluating \
list {1}'.format(via_master, __opts__['master_uri_list']))
master_uri = matching_master_uris.pop()
else:
# If no preference is expressed by the user, just publish to the first master
# in the list.
master_uri = __opts__['master_uri']
log.info('Publishing \'{0}\' to {1}'.format(fun, master_uri))
auth = salt.crypt.SAuth(__opts__)
tok = auth.gen_token('salt')
load = {'cmd': 'minion_pub',
@ -89,7 +118,7 @@ def _publish(
'form': form,
'id': __opts__['id']}
channel = salt.transport.Channel.factory(__opts__)
channel = salt.transport.Channel.factory(__opts__, master_uri=master_uri)
try:
peer_data = channel.send(load)
except SaltReqTimeoutError:
@ -145,7 +174,7 @@ def _publish(
return ret
def publish(tgt, fun, arg=None, expr_form='glob', returner='', timeout=5):
def publish(tgt, fun, arg=None, expr_form='glob', returner='', timeout=5, via_master=None):
'''
Publish a command from the minion out to other minions.
@ -204,7 +233,9 @@ def publish(tgt, fun, arg=None, expr_form='glob', returner='', timeout=5):
salt '*' publish.publish test.kwarg arg="['cheese=spam','spam=cheese']"
When running via salt-call, the `via_master` flag may be set to specific which
master the publication should be sent to. Only one master may be specified. If
unset, the publication will be sent only to the first master in minion configuration.
'''
return _publish(tgt,
fun,
@ -213,7 +244,8 @@ def publish(tgt, fun, arg=None, expr_form='glob', returner='', timeout=5):
returner=returner,
timeout=timeout,
form='clean',
wait=True)
wait=True,
via_master=via_master)
def full_data(tgt, fun, arg=None, expr_form='glob', returner='', timeout=5):

View File

@ -993,17 +993,26 @@ def revoke_auth(preserve_minion_cache=False):
salt '*' saltutil.revoke_auth
'''
channel = salt.transport.Channel.factory(__opts__)
tok = channel.auth.gen_token('salt')
load = {'cmd': 'revoke_auth',
'id': __opts__['id'],
'tok': tok,
'preserve_minion_cache': preserve_minion_cache}
masters = list()
ret = True
if 'master_uri_list' in __opts__:
for master_uri in __opts__['master_uri_list']:
masters.append(master_uri)
else:
masters.append(__opts__['master_uri'])
try:
return channel.send(load)
except SaltReqTimeoutError:
return False
for master in masters:
channel = salt.transport.Channel.factory(__opts__, master_uri=master)
tok = channel.auth.gen_token('salt')
load = {'cmd': 'revoke_auth',
'id': __opts__['id'],
'tok': tok,
'preserve_minion_cache': preserve_minion_cache}
try:
channel.send(load)
except SaltReqTimeoutError:
ret = False
return ret
def _get_ssh_or_api_client(cfgfile, ssh=False):

View File

@ -371,7 +371,7 @@ def info(name):
def start(name):
'''
Start the specified service
Start the specified service.
.. warning::
You cannot start a disabled service in Windows. If the service is

View File

@ -194,6 +194,9 @@ def _get_csr_extensions(csr):
csrexts = \
csryaml['Certificate Request']['Data']['Requested Extensions']
if not csrexts:
return ret
for short_name, long_name in six.iteritems(EXT_NAME_MAPPINGS):
if csrexts and long_name in csrexts:
ret[short_name] = csrexts[long_name]
@ -817,7 +820,7 @@ def create_private_key(path=None, text=False, bits=2048, verbose=True):
def create_crl( # pylint: disable=too-many-arguments,too-many-locals
path=None, text=False, signing_private_key=None,
signing_cert=None, revoked=None, include_expired=False,
days_valid=100):
days_valid=100, digest=''):
'''
Create a CRL
@ -867,6 +870,10 @@ def create_crl( # pylint: disable=too-many-arguments,too-many-locals
The number of days that the CRL should be valid. This sets the Next
Update field in the CRL.
digest:
The digest to use for signing the CRL.
This has no effect on versions of pyOpenSSL less than 0.14
.. note
At this time the pyOpenSSL library does not allow choosing a signing
@ -938,8 +945,11 @@ def create_crl( # pylint: disable=too-many-arguments,too-many-locals
OpenSSL.crypto.FILETYPE_PEM,
get_pem_entry(signing_private_key))
crltext = crl.export(
cert, key, OpenSSL.crypto.FILETYPE_PEM, days=days_valid)
try:
crltext = crl.export(cert, key, OpenSSL.crypto.FILETYPE_PEM, days=days_valid, digest=bytes(digest))
except TypeError:
log.warning('Error signing crl with specified digest. Are you using pyopenssl 0.15 or newer? The default md5 digest will be used.')
crltext = crl.export(cert, key, OpenSSL.crypto.FILETYPE_PEM, days=days_valid)
if text:
return crltext
@ -1300,8 +1310,7 @@ def create_certificate(
# Including listen_in and preqreuired because they are not included
# in STATE_INTERNAL_KEYWORDS
# for salt 2014.7.2
for ignore in list(_STATE_INTERNAL_KEYWORDS) + \
['listen_in', 'preqrequired']:
for ignore in list(_STATE_INTERNAL_KEYWORDS) + ['listen_in', 'preqrequired', '__prerequired__']:
kwargs.pop(ignore, None)
cert_txt = __salt__['publish.publish'](
@ -1471,6 +1480,9 @@ def create_csr(path=None, text=False, **kwargs):
If ``True``, return the PEM text without writing to a file.
Default ``False``.
algorithm:
The hashing algorithm to be used for signing this request. Defaults to sha256.
kwargs:
The subject, extension and version arguments from
:mod:`x509.create_certificate <salt.modules.x509.create_certificate>`
@ -1493,15 +1505,24 @@ def create_csr(path=None, text=False, **kwargs):
csr = M2Crypto.X509.Request()
subject = csr.get_subject()
_version = 3
if 'version' in kwargs:
_version = kwargs.get('version')
csr.set_version(_version - 1)
for prop, default in six.iteritems(CERT_DEFAULTS):
if prop not in kwargs:
kwargs[prop] = default
csr.set_version(kwargs['version'] - 1)
if 'private_key' not in kwargs and 'public_key' in kwargs:
kwargs['private_key'] = kwargs['public_key']
log.warning("OpenSSL no longer allows working with non-signed CSRs. A private_key must be specified. Attempting to use public_key as private_key")
if 'private_key' not in kwargs not in kwargs:
raise salt.exceptions.SaltInvocationError('private_key is required')
if 'public_key' not in kwargs:
raise salt.exceptions.SaltInvocationError('public_key is required')
_key = get_public_key(kwargs['public_key'], asObj=True)
csr.set_pubkey(_key)
kwargs['public_key'] = kwargs['private_key']
csr.set_pubkey(get_public_key(kwargs['public_key'], asObj=True))
# pylint: disable=unused-variable
for entry, num in six.iteritems(subject.nid):
@ -1539,7 +1560,7 @@ def create_csr(path=None, text=False, **kwargs):
csr.add_extensions(extstack)
csr.sign(pkey=_key, md=kwargs.get('algorithm', 'sha256'))
csr.sign(_get_private_key_obj(kwargs['private_key']), kwargs['algorithm'])
if path:
return write_pem(
@ -1637,7 +1658,7 @@ def verify_crl(crl, cert):
cmd = ('openssl crl -noout -in {0} -CAfile {1}'.format(
crltempfile.name, certtempfile.name))
output = __salt__['cmd.run_stdout'](cmd)
output = __salt__['cmd.run_stderr'](cmd)
crltempfile.close()
certtempfile.close()

View File

@ -16,6 +16,7 @@ from contextlib import closing
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import shlex_quote as _cmd_quote
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module
# Import salt libs
import salt.utils
@ -276,19 +277,47 @@ def extracted(name,
if not name.endswith('/'):
name += '/'
if __opts__['test']:
source_match = source
else:
try:
source_match = __salt__['file.source_list'](source,
source_hash,
__env__)[0]
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = exc.strerror
return ret
urlparsed_source = _urlparse(source_match)
source_hash_name = urlparsed_source.path or urlparsed_source.netloc
if if_missing is None:
if_missing = name
if source_hash and source_hash_update:
hash = source_hash.split("=")
source_file = '{0}.{1}'.format(os.path.basename(source), hash[0])
hash_fname = os.path.join(__opts__['cachedir'],
'files',
__env__,
source_file)
if _compare_checksum(hash_fname, name, hash[1]):
ret['result'] = True
ret['comment'] = 'Hash {0} has not changed'.format(hash[1])
if urlparsed_source.scheme != '':
ret['result'] = False
ret['comment'] = (
'\'source_hash_update\' is not yet implemented for a remote '
'source_hash'
)
return ret
else:
try:
hash_type, hsum = source_hash.split('=')
except ValueError:
ret['result'] = False
ret['comment'] = 'Invalid source_hash format'
return ret
source_file = '{0}.{1}'.format(os.path.basename(source), hash_type)
hash_fname = os.path.join(__opts__['cachedir'],
'files',
__env__,
source_file)
if _compare_checksum(hash_fname, name, hsum):
ret['result'] = True
ret['comment'] = 'Hash {0} has not changed'.format(hsum)
return ret
elif (
__salt__['file.directory_exists'](if_missing)
or __salt__['file.file_exists'](if_missing)
@ -304,18 +333,6 @@ def extracted(name,
'{0}.{1}'.format(re.sub('[:/\\\\]', '_', if_missing),
archive_format))
if __opts__['test']:
source_match = source
else:
try:
source_match = __salt__['file.source_list'](source,
source_hash,
__env__)[0]
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = exc.strerror
return ret
if not os.path.exists(filename):
if __opts__['test']:
ret['result'] = None
@ -328,13 +345,15 @@ def extracted(name,
return ret
log.debug('%s is not in cache, downloading it', source_match)
file_result = __salt__['state.single']('file.managed',
filename,
source=source,
source=source_match,
source_hash=source_hash,
makedirs=True,
skip_verify=skip_verify,
saltenv=__env__)
saltenv=__env__,
source_hash_name=source_hash_name)
log.debug('file.managed: {0}'.format(file_result))
# get value of first key
try:

View File

@ -303,7 +303,7 @@ def csr_managed(name,
/etc/pki/mycert.csr:
x509.csr_managed:
- public_key: /etc/pki/mycert.key
- private_key: /etc/pki/mycert.key
- CN: www.example.com
- C: US
- ST: Utah
@ -493,6 +493,7 @@ def crl_managed(name,
signing_cert=None,
revoked=None,
days_valid=100,
digest="",
days_remaining=30,
include_expired=False,
backup=False,):
@ -518,6 +519,10 @@ def crl_managed(name,
days_valid:
The number of days the certificate should be valid for. Default is 100.
digest:
The digest to use for signing the CRL.
This has no effect on versions of pyOpenSSL less than 0.14
days_remaining:
The crl should be automatically recreated if there are less than ``days_remaining``
days until the crl expires. Set to 0 to disable automatic renewal. Default is 30.
@ -574,7 +579,7 @@ def crl_managed(name,
current = '{0} does not exist.'.format(name)
new_crl = __salt__['x509.create_crl'](text=True, signing_private_key=signing_private_key,
signing_cert=signing_cert, revoked=revoked, days_valid=days_valid, include_expired=include_expired)
signing_cert=signing_cert, revoked=revoked, days_valid=days_valid, digest=digest, include_expired=include_expired)
new = __salt__['x509.read_crl'](crl=new_crl)
new_comp = new.copy()

View File

@ -3159,3 +3159,11 @@ def simple_types_filter(data):
return simpledict
return data
def substr_in_list(string_to_search_for, list_to_search):
'''
Return a boolean value that indicates whether or not a given
string is present in any of the strings which comprise a list
'''
return any(string_to_search_for in s for s in list_to_search)

View File

@ -17,6 +17,7 @@ def running(opts):
'''
Return the running jobs on this minion
'''
ret = []
proc_dir = os.path.join(opts['cachedir'], 'proc')
if not os.path.isdir(proc_dir):
@ -110,13 +111,30 @@ def _read_proc_file(path, opts):
def _check_cmdline(data):
'''
Check the proc filesystem cmdline to see if this process is a salt process
In some cases where there are an insane number of processes being created
on a system a PID can get recycled or assigned to a non-Salt process.
This fn checks to make sure the PID we are checking on is actually
a Salt process.
For non-Linux systems with no procfs style /proc mounted
we punt and just return True (assuming that the data has a PID in it)
'''
if salt.utils.is_windows():
return True
pid = data.get('pid')
if not pid:
return False
if not os.path.isdir('/proc') or salt.utils.is_windows():
return True
# Some BSDs have a /proc dir, but procfs is not mounted there. Since
# processes are represented by directories in /proc, if there are no
# dirs in proc, this is a non-procfs supporting OS. In this case
# like the one above we just return True
dirs_in_proc = False
for dirpath, dirnames, files in os.walk('/proc'):
if dirnames:
dirs_in_proc = True
break
if not dirs_in_proc:
return True
path = os.path.join('/proc/{0}/cmdline'.format(pid))
if not os.path.isfile(path):
return False

View File

@ -339,6 +339,7 @@ import salt.utils
import salt.utils.jid
import salt.utils.process
import salt.utils.args
import salt.utils.minion
import salt.loader
import salt.minion
import salt.payload
@ -771,37 +772,22 @@ class Schedule(object):
# dict we treat it like it was there and is True
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
for basefilename in os.listdir(salt.minion.get_proc_dir(self.opts['cachedir'])):
fn_ = os.path.join(salt.minion.get_proc_dir(self.opts['cachedir']), basefilename)
if not os.path.exists(fn_):
log.debug('schedule.handle_func: {0} was processed '
'in another thread, skipping.'.format(
basefilename))
continue
with salt.utils.fopen(fn_, 'rb') as fp_:
job = salt.payload.Serial(self.opts).load(fp_)
if job:
if 'schedule' in job:
log.debug('schedule.handle_func: Checking job against '
'fun {0}: {1}'.format(ret['fun'], job))
if ret['schedule'] == job['schedule'] and os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, now '
'{0}, maxrunning is {1}'.format(
jobcount, data['maxrunning']))
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job {0} '
'was not started, {1} already running'.format(
ret['schedule'], data['maxrunning']))
return False
else:
try:
log.info('Invalid job file found. Removing.')
os.remove(fn_)
except OSError:
log.info('Unable to remove file: {0}.'.format(fn_))
for job in salt.utils.minion.running(self.opts):
if 'schedule' in job:
log.debug('schedule.handle_func: Checking job against '
'fun {0}: {1}'.format(ret['fun'], job))
if ret['schedule'] == job['schedule'] and os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, now '
'{0}, maxrunning is {1}'.format(
jobcount, data['maxrunning']))
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job {0} '
'was not started, {1} already running'.format(
ret['schedule'], data['maxrunning']))
return False
if multiprocessing_enabled and not salt.utils.is_windows():
# Reconfigure multiprocessing logging after daemonizing