mirror of
https://github.com/valitydev/salt.git
synced 2024-11-06 08:35:21 +00:00
Merge branch 'fluorine' into 'develop'
Conflicts: - LICENSE - README.rst - doc/ref/pillar/all/salt.pillar.saltclass.rst - doc/ref/tops/all/index.rst - doc/ref/tops/all/salt.tops.saltclass.rst - salt/modules/consul.py - salt/modules/lxc.py - salt/modules/win_lgpo.py - salt/states/pip_state.py
This commit is contained in:
commit
d2aa886ce7
3
.ci/docs
3
.ci/docs
@ -48,6 +48,9 @@ pipeline {
|
||||
description: 'The docs job has failed',
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/docs"
|
||||
slackSend channel: "#jenkins-prod-pr",
|
||||
color: '#FF0000',
|
||||
message: "FAILED: PR-Job: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -72,6 +72,9 @@ timeout(time: 8, unit: 'HOURS') {
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
slackSend channel: "#jenkins-prod-pr",
|
||||
color: '#FF0000',
|
||||
message: "FAILED: PR-Job: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -72,6 +72,9 @@ timeout(time: 6, unit: 'HOURS') {
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
slackSend channel: "#jenkins-prod-pr",
|
||||
color: '#FF0000',
|
||||
message: "FAILED: PR-Job: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ timeout(time: 6, unit: 'HOURS') {
|
||||
} finally {
|
||||
cleanWs notFailBuild: true
|
||||
def currentResult = currentBuild.result ?: 'SUCCESS'
|
||||
if ( currentResult == 'SUCCESS') {
|
||||
if (currentResult == 'SUCCESS') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
@ -72,6 +72,9 @@ timeout(time: 6, unit: 'HOURS') {
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
slackSend channel: "#jenkins-prod-pr",
|
||||
color: '#FF0000',
|
||||
message: "FAILED: PR-Job: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -72,6 +72,9 @@ timeout(time: 6, unit: 'HOURS') {
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
slackSend channel: "#jenkins-prod-pr",
|
||||
color: '#FF0000',
|
||||
message: "FAILED: PR-Job: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -72,6 +72,9 @@ timeout(time: 6, unit: 'HOURS') {
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
slackSend channel: "#jenkins-prod-pr",
|
||||
color: '#FF0000',
|
||||
message: "FAILED: PR-Job: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -72,6 +72,9 @@ timeout(time: 6, unit: 'HOURS') {
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
slackSend channel: "#jenkins-prod-pr",
|
||||
color: '#FF0000',
|
||||
message: "FAILED: PR-Job: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
3
.ci/lint
3
.ci/lint
@ -94,6 +94,9 @@ pipeline {
|
||||
description: 'The lint job has failed',
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/lint"
|
||||
slackSend channel: "#jenkins-prod-pr",
|
||||
color: '#FF0000',
|
||||
message: "FAILED: PR-Job: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
13
CONTRIBUTING.rst
Normal file
13
CONTRIBUTING.rst
Normal file
@ -0,0 +1,13 @@
|
||||
Developing Salt
|
||||
===============
|
||||
|
||||
The Salt development team is welcoming, positive, and dedicated to
|
||||
helping people get new code and fixes into SaltStack projects. Log into
|
||||
GitHub and get started with one of the largest developer communities in
|
||||
the world. The following links should get you started:
|
||||
|
||||
`<https://github.com/saltstack>`_
|
||||
|
||||
`<https://docs.saltstack.com/en/latest/topics/development/index.html>`_
|
||||
|
||||
`<https://docs.saltstack.com/en/develop/topics/development/pull_requests.html>`_
|
9
Gemfile
9
Gemfile
@ -12,12 +12,15 @@ group :docker do
|
||||
end
|
||||
|
||||
group :windows do
|
||||
gem 'vagrant-wrapper'
|
||||
gem 'kitchen-vagrant'
|
||||
gem 'winrm', '~>2.0'
|
||||
gem 'winrm-fs', '~>1.2.1'
|
||||
gem 'winrm-fs', :git => 'https://github.com/dwoz/winrm-fs.git', :branch => 'chunked_downloads'
|
||||
end
|
||||
|
||||
group :ec2 do
|
||||
gem 'kitchen-ec2'
|
||||
end
|
||||
|
||||
group :vagrant do
|
||||
gem 'vagrant-wrapper'
|
||||
gem 'kitchen-vagrant'
|
||||
end
|
||||
|
5
NOTICE
Normal file
5
NOTICE
Normal file
@ -0,0 +1,5 @@
|
||||
Apache SaltStack
|
||||
Copyright 2014-2019 The Apache Software Foundation
|
||||
|
||||
This product includes software developed at
|
||||
The Apache Software Foundation (http://www.apache.org/).
|
47
README.rst
47
README.rst
@ -34,39 +34,6 @@ documentation.
|
||||
|
||||
`<https://docs.saltstack.com/en/latest/>`_
|
||||
|
||||
Get SaltStack Support and Help
|
||||
==============================
|
||||
|
||||
**IRC Chat** - Join the vibrant, helpful and positive SaltStack chat room in
|
||||
Freenode at #salt. There is no need to introduce yourself, or ask permission to
|
||||
join in, just help and be helped! Make sure to wait for an answer, sometimes it
|
||||
may take a few moments for someone to reply.
|
||||
|
||||
`<http://webchat.freenode.net/?channels=salt&uio=Mj10cnVlJjk9dHJ1ZSYxMD10cnVl83>`_
|
||||
|
||||
**SaltStack Slack** - Alongside IRC is our SaltStack Community Slack for the
|
||||
SaltStack Working groups. Use the following link to request an invitation.
|
||||
|
||||
`<https://saltstackcommunity.herokuapp.com/>`_
|
||||
|
||||
**Mailing List** - The SaltStack community users mailing list is hosted by
|
||||
Google groups. Anyone can post to ask questions about SaltStack products and
|
||||
anyone can help answer. Join the conversation!
|
||||
|
||||
`<https://groups.google.com/forum/#!forum/salt-users>`_
|
||||
|
||||
You may subscribe to the list without a Google account by emailing
|
||||
salt-users+subscribe@googlegroups.com and you may post to the list by emailing
|
||||
salt-users@googlegroups.com
|
||||
|
||||
**Reporting Issues** - To report an issue with Salt, please follow the
|
||||
guidelines for filing bug reports:
|
||||
`<https://docs.saltstack.com/en/develop/topics/development/reporting_bugs.html>`_
|
||||
|
||||
**SaltStack Support** - If you need dedicated, prioritized support, please
|
||||
consider a SaltStack Support package that fits your needs:
|
||||
`<http://www.saltstack.com/support>`_
|
||||
|
||||
Engage SaltStack
|
||||
================
|
||||
|
||||
@ -101,20 +68,6 @@ services`_ offerings.
|
||||
.. _SaltStack Certified Engineer (SSCE): http://saltstack.com/certification/
|
||||
.. _SaltStack professional services: http://saltstack.com/services/
|
||||
|
||||
Developing Salt
|
||||
===============
|
||||
|
||||
The Salt development team is welcoming, positive, and dedicated to
|
||||
helping people get new code and fixes into SaltStack projects. Log into
|
||||
GitHub and get started with one of the largest developer communities in
|
||||
the world. The following links should get you started:
|
||||
|
||||
`<https://github.com/saltstack>`_
|
||||
|
||||
`<https://docs.saltstack.com/en/latest/topics/development/index.html>`_
|
||||
|
||||
`<https://docs.saltstack.com/en/develop/topics/development/pull_requests.html>`_
|
||||
|
||||
License
|
||||
=======
|
||||
|
||||
|
33
SUPPORT.rst
Normal file
33
SUPPORT.rst
Normal file
@ -0,0 +1,33 @@
|
||||
Get SaltStack Support and Help
|
||||
==============================
|
||||
|
||||
**IRC Chat** - Join the vibrant, helpful and positive SaltStack chat room in
|
||||
Freenode at #salt. There is no need to introduce yourself, or ask permission to
|
||||
join in, just help and be helped! Make sure to wait for an answer, sometimes it
|
||||
may take a few moments for someone to reply.
|
||||
|
||||
`<http://webchat.freenode.net/?channels=salt&uio=Mj10cnVlJjk9dHJ1ZSYxMD10cnVl83>`_
|
||||
|
||||
**SaltStack Slack** - Alongside IRC is our SaltStack Community Slack for the
|
||||
SaltStack Working groups. Use the following link to request an invitation.
|
||||
|
||||
`<https://saltstackcommunity.herokuapp.com/>`_
|
||||
|
||||
**Mailing List** - The SaltStack community users mailing list is hosted by
|
||||
Google groups. Anyone can post to ask questions about SaltStack products and
|
||||
anyone can help answer. Join the conversation!
|
||||
|
||||
`<https://groups.google.com/forum/#!forum/salt-users>`_
|
||||
|
||||
You may subscribe to the list without a Google account by emailing
|
||||
salt-users+subscribe@googlegroups.com and you may post to the list by emailing
|
||||
salt-users@googlegroups.com
|
||||
|
||||
**Reporting Issues** - To report an issue with Salt, please follow the
|
||||
guidelines for filing bug reports:
|
||||
`<https://docs.saltstack.com/en/develop/topics/development/reporting_bugs.html>`_
|
||||
|
||||
**SaltStack Support** - If you need dedicated, prioritized support, please
|
||||
consider a SaltStack Support package that fits your needs:
|
||||
`<http://www.saltstack.com/support>`_
|
||||
|
18
conf/master
18
conf/master
@ -269,24 +269,6 @@
|
||||
# The publisher interface ZeroMQPubServerChannel
|
||||
#pub_hwm: 1000
|
||||
|
||||
# These two ZMQ HWM settings, salt_event_pub_hwm and event_publisher_pub_hwm
|
||||
# are significant for masters with thousands of minions. When these are
|
||||
# insufficiently high it will manifest in random responses missing in the CLI
|
||||
# and even missing from the job cache. Masters that have fast CPUs and many
|
||||
# cores with appropriate worker_threads will not need these set as high.
|
||||
|
||||
# On deployment with 8,000 minions, 2.4GHz CPUs, 24 cores, 32GiB memory has
|
||||
# these settings:
|
||||
#
|
||||
# salt_event_pub_hwm: 128000
|
||||
# event_publisher_pub_hwm: 64000
|
||||
|
||||
# ZMQ high-water-mark for SaltEvent pub socket
|
||||
#salt_event_pub_hwm: 20000
|
||||
|
||||
# ZMQ high-water-mark for EventPublisher pub socket
|
||||
#event_publisher_pub_hwm: 10000
|
||||
|
||||
# The master may allocate memory per-event and not
|
||||
# reclaim it.
|
||||
# To set a high-water mark for memory allocation, use
|
||||
|
@ -263,24 +263,6 @@ syndic_user: salt
|
||||
# The publisher interface ZeroMQPubServerChannel
|
||||
#pub_hwm: 1000
|
||||
|
||||
# These two ZMQ HWM settings, salt_event_pub_hwm and event_publisher_pub_hwm
|
||||
# are significant for masters with thousands of minions. When these are
|
||||
# insufficiently high it will manifest in random responses missing in the CLI
|
||||
# and even missing from the job cache. Masters that have fast CPUs and many
|
||||
# cores with appropriate worker_threads will not need these set as high.
|
||||
|
||||
# On deployment with 8,000 minions, 2.4GHz CPUs, 24 cores, 32GiB memory has
|
||||
# these settings:
|
||||
#
|
||||
# salt_event_pub_hwm: 128000
|
||||
# event_publisher_pub_hwm: 64000
|
||||
|
||||
# ZMQ high-water-mark for SaltEvent pub socket
|
||||
#salt_event_pub_hwm: 20000
|
||||
|
||||
# ZMQ high-water-mark for EventPublisher pub socket
|
||||
#event_publisher_pub_hwm: 10000
|
||||
|
||||
# The master may allocate memory per-event and not
|
||||
# reclaim it.
|
||||
# To set a high-water mark for memory allocation, use
|
||||
|
@ -1892,40 +1892,6 @@ The listen queue size of the ZeroMQ backlog.
|
||||
|
||||
zmq_backlog: 1000
|
||||
|
||||
.. conf_master:: salt_event_pub_hwm
|
||||
.. conf_master:: event_publisher_pub_hwm
|
||||
|
||||
``salt_event_pub_hwm`` and ``event_publisher_pub_hwm``
|
||||
------------------------------------------------------
|
||||
|
||||
These two ZeroMQ High Water Mark settings, ``salt_event_pub_hwm`` and
|
||||
``event_publisher_pub_hwm`` are significant for masters with thousands of
|
||||
minions. When these are insufficiently high it will manifest in random
|
||||
responses missing in the CLI and even missing from the job cache. Masters
|
||||
that have fast CPUs and many cores with appropriate ``worker_threads``
|
||||
will not need these set as high.
|
||||
|
||||
The ZeroMQ high-water-mark for the ``SaltEvent`` pub socket default is:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
salt_event_pub_hwm: 20000
|
||||
|
||||
The ZeroMQ high-water-mark for the ``EventPublisher`` pub socket default is:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
event_publisher_pub_hwm: 10000
|
||||
|
||||
As an example, on single master deployment with 8,000 minions, 2.4GHz CPUs,
|
||||
24 cores, and 32GiB memory has these settings:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
salt_event_pub_hwm: 128000
|
||||
event_publisher_pub_hwm: 64000
|
||||
|
||||
|
||||
.. _master-module-management:
|
||||
|
||||
Master Module Management
|
||||
@ -2481,6 +2447,12 @@ Master will not be returned to the Minion.
|
||||
------------------------------
|
||||
|
||||
.. versionadded:: 2014.1.0
|
||||
.. deprecated:: 2018.3.4
|
||||
This option is now ignored. Firstly, it only traversed
|
||||
:conf_master:`file_roots`, which means it did not work for the other
|
||||
fileserver backends. Secondly, since this option was added we have added
|
||||
caching to the code that traverses the file_roots (and gitfs, etc.), which
|
||||
greatly reduces the amount of traversal that is done.
|
||||
|
||||
Default: ``False``
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
===========================
|
||||
=====================
|
||||
salt.pillar.saltclass
|
||||
===========================
|
||||
=====================
|
||||
|
||||
.. automodule:: salt.pillar.saltclass
|
||||
:members:
|
||||
|
@ -15,4 +15,4 @@ master tops modules
|
||||
mongo
|
||||
reclass_adapter
|
||||
saltclass
|
||||
varstack
|
||||
varstack_top
|
||||
|
@ -1,6 +1,6 @@
|
||||
=========================
|
||||
===================
|
||||
salt.tops.saltclass
|
||||
=========================
|
||||
===================
|
||||
|
||||
.. automodule:: salt.tops.saltclass
|
||||
:members:
|
||||
|
@ -2,5 +2,5 @@
|
||||
salt.tops.varstack
|
||||
==================
|
||||
|
||||
.. automodule:: salt.tops.varstack
|
||||
.. automodule:: salt.tops.varstack_top
|
||||
:members:
|
||||
|
@ -1,4 +1,3 @@
|
||||
|
||||
.. _salt-top:
|
||||
|
||||
===========
|
||||
|
16
doc/topics/releases/2017.7.9.rst
Normal file
16
doc/topics/releases/2017.7.9.rst
Normal file
@ -0,0 +1,16 @@
|
||||
========================================
|
||||
In Progress: Salt 2017.7.9 Release Notes
|
||||
========================================
|
||||
|
||||
Version 2017.7.9 is an **unreleased** bugfix release for :ref:`2017.7.0 <release-2017-7-0>`.
|
||||
This release is still in progress and has not been released yet.
|
||||
|
||||
Salt Cloud Features
|
||||
===================
|
||||
|
||||
GCE Driver
|
||||
----------
|
||||
The GCE salt cloud driver can now be used with GCE instance credentials by
|
||||
setting the configuration paramaters ``service_account_private_key`` and
|
||||
``service_account_private_email`` to an empty string.
|
||||
|
6
doc/topics/releases/2018.3.4.rst
Normal file
6
doc/topics/releases/2018.3.4.rst
Normal file
@ -0,0 +1,6 @@
|
||||
========================================
|
||||
In Progress: Salt 2018.3.4 Release Notes
|
||||
========================================
|
||||
|
||||
Version 2018.3.4 is an **unreleased** bugfix release for :ref:`2018.3.0 <release-2018-3-0>`.
|
||||
This release is still in progress and has not been released yet.
|
@ -488,6 +488,18 @@ in. Because of the non-deterministic order that grains are rendered in, the
|
||||
only grains that can be relied upon to be passed in are ``core.py`` grains,
|
||||
since those are compiled first.
|
||||
|
||||
More Precise ``virtual`` Grain
|
||||
==============================
|
||||
|
||||
This release improves the accuracy of the ``virtual`` grain when running Salt in
|
||||
a nested virtualization environment (e.g. ``systemd-nspawn`` container inside a
|
||||
VM) and having ``virt-what`` installed.
|
||||
|
||||
Until now, the ``virtual`` grain was determined by matching against all output
|
||||
lines of ``virt-what`` instead of individual items which could lead to not quite
|
||||
precise results (e.g. reporting ``HyperV`` inside a ``systemd-nspawn`` container
|
||||
running within a Hyper-V-based VM.
|
||||
|
||||
Configurable Module Environment
|
||||
===============================
|
||||
|
||||
|
@ -244,14 +244,12 @@ Write-Output " - $script_name :: Copying DLLs . . ."
|
||||
Write-Output " ----------------------------------------------------------------"
|
||||
# Architecture Specific DLL's
|
||||
ForEach($key in $ini[$bitDLLs].Keys) {
|
||||
If ($arrInstalled -notcontains $key) {
|
||||
Write-Output " - $key . . ."
|
||||
$file = "$($ini[$bitDLLs][$key])"
|
||||
$url = "$($ini['Settings']['SaltRepo'])/$bitFolder/$file"
|
||||
$file = "$($ini['Settings']['DownloadDir'])\$bitFolder\$file"
|
||||
DownloadFileWithProgress $url $file
|
||||
Copy-Item $file -destination $($ini['Settings']['Python2Dir'])
|
||||
}
|
||||
}
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
|
@ -251,14 +251,12 @@ Write-Output " - $script_name :: Copying DLLs . . ."
|
||||
Write-Output " ----------------------------------------------------------------"
|
||||
# Architecture Specific DLL's
|
||||
ForEach($key in $ini[$bitDLLs].Keys) {
|
||||
If ($arrInstalled -notcontains $key) {
|
||||
Write-Output " - $key . . ."
|
||||
$file = "$($ini[$bitDLLs][$key])"
|
||||
$url = "$($ini['Settings']['SaltRepo'])/$bitFolder/$file"
|
||||
$file = "$($ini['Settings']['DownloadDir'])\$bitFolder\$file"
|
||||
DownloadFileWithProgress $url $file
|
||||
Copy-Item $file -destination $($ini['Settings']['Python3Dir'])
|
||||
}
|
||||
}
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
|
@ -11,7 +11,7 @@ Watch files and translate the changes into salt events
|
||||
the beacon configuration.
|
||||
|
||||
:note: The `inotify` beacon only works on OSes that have `inotify`
|
||||
kernel support. Currently this excludes FreeBSD, macOS, and Windows.
|
||||
kernel support.
|
||||
|
||||
'''
|
||||
# Import Python libs
|
||||
|
@ -1074,7 +1074,7 @@ class LocalClient(object):
|
||||
# stop the iteration, since the jid is invalid
|
||||
raise StopIteration()
|
||||
except Exception as exc:
|
||||
log.warning('Returner unavailable: %s', exc)
|
||||
log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG)
|
||||
# Wait for the hosts to check in
|
||||
last_time = False
|
||||
# iterator for this job's return
|
||||
|
@ -596,7 +596,7 @@ class SlackClient(object):
|
||||
Run each of them through ``get_configured_target(('foo', f), 'pillar.get')`` and confirm a valid target
|
||||
|
||||
'''
|
||||
# Default to targetting all minions with a type of glob
|
||||
# Default to targeting all minions with a type of glob
|
||||
null_target = {'target': '*', 'tgt_type': 'glob'}
|
||||
|
||||
def check_cmd_against_group(cmd):
|
||||
@ -634,6 +634,8 @@ class SlackClient(object):
|
||||
'''
|
||||
Print out YAML using the block mode
|
||||
'''
|
||||
# emulate the yaml_out output formatter. It relies on a global __opts__ object which
|
||||
# we can't obviously pass in
|
||||
try:
|
||||
try:
|
||||
outputter = data[next(iter(data))].get('out')
|
||||
|
@ -60,7 +60,7 @@ def get_file_client(opts, pillar=False):
|
||||
return {
|
||||
'remote': RemoteClient,
|
||||
'local': FSClient,
|
||||
'pillar': LocalClient,
|
||||
'pillar': PillarClient,
|
||||
}.get(client, RemoteClient)(opts)
|
||||
|
||||
|
||||
@ -346,58 +346,17 @@ class Client(object):
|
||||
Return a list of all available sls modules on the master for a given
|
||||
environment
|
||||
'''
|
||||
|
||||
limit_traversal = self.opts.get('fileserver_limit_traversal', False)
|
||||
states = []
|
||||
|
||||
if limit_traversal:
|
||||
if saltenv not in self.opts['file_roots']:
|
||||
log.warning(
|
||||
'During an attempt to list states for saltenv \'%s\', '
|
||||
'the environment could not be found in the configured '
|
||||
'file roots', saltenv
|
||||
)
|
||||
return states
|
||||
for path in self.opts['file_roots'][saltenv]:
|
||||
for root, dirs, files in os.walk(path, topdown=True): # future lint: disable=blacklisted-function
|
||||
root = salt.utils.data.decode(root)
|
||||
files = salt.utils.data.decode(files)
|
||||
log.debug(
|
||||
'Searching for states in dirs %s and files %s',
|
||||
salt.utils.data.decode(dirs), files
|
||||
)
|
||||
if not [filename.endswith('.sls') for filename in files]:
|
||||
# Use shallow copy so we don't disturb the memory used
|
||||
# by os.walk. Otherwise this breaks!
|
||||
del dirs[:]
|
||||
else:
|
||||
for found_file in files:
|
||||
stripped_root = os.path.relpath(root, path)
|
||||
if salt.utils.platform.is_windows():
|
||||
stripped_root = stripped_root.replace('\\', '/')
|
||||
stripped_root = stripped_root.replace('/', '.')
|
||||
if found_file.endswith(('.sls')):
|
||||
if found_file.endswith('init.sls'):
|
||||
if stripped_root.endswith('.'):
|
||||
stripped_root = stripped_root.rstrip('.')
|
||||
states.append(stripped_root)
|
||||
else:
|
||||
if not stripped_root.endswith('.'):
|
||||
stripped_root += '.'
|
||||
if stripped_root.startswith('.'):
|
||||
stripped_root = stripped_root.lstrip('.')
|
||||
states.append(stripped_root + found_file[:-4])
|
||||
else:
|
||||
states = set()
|
||||
for path in self.file_list(saltenv):
|
||||
if salt.utils.platform.is_windows():
|
||||
path = path.replace('\\', '/')
|
||||
if path.endswith('.sls'):
|
||||
# is an sls module!
|
||||
if path.endswith('/init.sls'):
|
||||
states.append(path.replace('/', '.')[:-9])
|
||||
states.add(path.replace('/', '.')[:-9])
|
||||
else:
|
||||
states.append(path.replace('/', '.')[:-4])
|
||||
return states
|
||||
states.add(path.replace('/', '.')[:-4])
|
||||
return sorted(states)
|
||||
|
||||
def get_state(self, sls, saltenv, cachedir=None):
|
||||
'''
|
||||
@ -844,13 +803,10 @@ class Client(object):
|
||||
)
|
||||
|
||||
|
||||
class LocalClient(Client):
|
||||
class PillarClient(Client):
|
||||
'''
|
||||
Use the local_roots option to parse a local file root
|
||||
Used by pillar to handle fileclient requests
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
Client.__init__(self, opts)
|
||||
|
||||
def _find_file(self, path, saltenv='base'):
|
||||
'''
|
||||
Locate the file path
|
||||
@ -861,7 +817,7 @@ class LocalClient(Client):
|
||||
if salt.utils.url.is_escaped(path):
|
||||
# The path arguments are escaped
|
||||
path = salt.utils.url.unescape(path)
|
||||
for root in self.opts['file_roots'].get(saltenv, []):
|
||||
for root in self.opts['pillar_roots'].get(saltenv, []):
|
||||
full = os.path.join(root, path)
|
||||
if os.path.isfile(full):
|
||||
fnd['path'] = full
|
||||
@ -895,7 +851,7 @@ class LocalClient(Client):
|
||||
'''
|
||||
ret = []
|
||||
prefix = prefix.strip('/')
|
||||
for path in self.opts['file_roots'].get(saltenv, []):
|
||||
for path in self.opts['pillar_roots'].get(saltenv, []):
|
||||
for root, dirs, files in salt.utils.path.os_walk(
|
||||
os.path.join(path, prefix), followlinks=True
|
||||
):
|
||||
@ -908,12 +864,12 @@ class LocalClient(Client):
|
||||
|
||||
def file_list_emptydirs(self, saltenv='base', prefix=''):
|
||||
'''
|
||||
List the empty dirs in the file_roots
|
||||
List the empty dirs in the pillar_roots
|
||||
with optional relative prefix path to limit directory traversal
|
||||
'''
|
||||
ret = []
|
||||
prefix = prefix.strip('/')
|
||||
for path in self.opts['file_roots'].get(saltenv, []):
|
||||
for path in self.opts['pillar_roots'].get(saltenv, []):
|
||||
for root, dirs, files in salt.utils.path.os_walk(
|
||||
os.path.join(path, prefix), followlinks=True
|
||||
):
|
||||
@ -925,12 +881,12 @@ class LocalClient(Client):
|
||||
|
||||
def dir_list(self, saltenv='base', prefix=''):
|
||||
'''
|
||||
List the dirs in the file_roots
|
||||
List the dirs in the pillar_roots
|
||||
with optional relative prefix path to limit directory traversal
|
||||
'''
|
||||
ret = []
|
||||
prefix = prefix.strip('/')
|
||||
for path in self.opts['file_roots'].get(saltenv, []):
|
||||
for path in self.opts['pillar_roots'].get(saltenv, []):
|
||||
for root, dirs, files in salt.utils.path.os_walk(
|
||||
os.path.join(path, prefix), followlinks=True
|
||||
):
|
||||
@ -957,7 +913,7 @@ class LocalClient(Client):
|
||||
|
||||
def hash_file(self, path, saltenv='base'):
|
||||
'''
|
||||
Return the hash of a file, to get the hash of a file in the file_roots
|
||||
Return the hash of a file, to get the hash of a file in the pillar_roots
|
||||
prepend the path with salt://<file on server> otherwise, prepend the
|
||||
file with / for a local file.
|
||||
'''
|
||||
@ -980,7 +936,7 @@ class LocalClient(Client):
|
||||
|
||||
def hash_and_stat_file(self, path, saltenv='base'):
|
||||
'''
|
||||
Return the hash of a file, to get the hash of a file in the file_roots
|
||||
Return the hash of a file, to get the hash of a file in the pillar_roots
|
||||
prepend the path with salt://<file on server> otherwise, prepend the
|
||||
file with / for a local file.
|
||||
|
||||
@ -1025,7 +981,10 @@ class LocalClient(Client):
|
||||
'''
|
||||
Return the available environments
|
||||
'''
|
||||
return list(self.opts['file_roots'])
|
||||
ret = []
|
||||
for saltenv in self.opts['pillar_roots']:
|
||||
ret.append(saltenv)
|
||||
return ret
|
||||
|
||||
def master_tops(self):
|
||||
'''
|
||||
@ -1417,6 +1376,11 @@ class FSClient(RemoteClient):
|
||||
self.auth = DumbAuth()
|
||||
|
||||
|
||||
# Provide backward compatibility for anyone directly using LocalClient (but no
|
||||
# one should be doing this).
|
||||
LocalClient = FSClient
|
||||
|
||||
|
||||
class DumbAuth(object):
|
||||
'''
|
||||
The dumbauth class is used to stub out auth calls fired from the FSClient
|
||||
|
@ -131,7 +131,10 @@ def check_file_list_cache(opts, form, list_cache, w_lock):
|
||||
if os.path.exists(list_cache):
|
||||
# calculate filelist age is possible
|
||||
cache_stat = os.stat(list_cache)
|
||||
age = time.time() - cache_stat.st_mtime
|
||||
# st_time can have a greater precision than time, removing
|
||||
# float precision makes sure age will never be a negative
|
||||
# number.
|
||||
age = int(time.time()) - int(cache_stat.st_mtime)
|
||||
else:
|
||||
# if filelist does not exists yet, mark it as expired
|
||||
age = opts.get('fileserver_list_cache_time', 20) + 1
|
||||
|
@ -54,6 +54,7 @@ import salt.utils.dns
|
||||
import salt.utils.files
|
||||
import salt.utils.network
|
||||
import salt.utils.path
|
||||
import salt.utils.pkg.rpm
|
||||
import salt.utils.platform
|
||||
import salt.utils.stringutils
|
||||
import salt.utils.versions
|
||||
@ -621,6 +622,8 @@ def _windows_virtual(osdata):
|
||||
if osdata['kernel'] != 'Windows':
|
||||
return grains
|
||||
|
||||
grains['virtual'] = 'physical'
|
||||
|
||||
# It is possible that the 'manufacturer' and/or 'productname' grains
|
||||
# exist but have a value of None.
|
||||
manufacturer = osdata.get('manufacturer', '')
|
||||
@ -785,6 +788,7 @@ def _virtual(osdata):
|
||||
grains['virtual'] = 'LXC'
|
||||
break
|
||||
elif command == 'virt-what':
|
||||
output = output.splitlines()[-1]
|
||||
if output in ('kvm', 'qemu', 'uml', 'xen', 'lxc'):
|
||||
grains['virtual'] = output
|
||||
break
|
||||
@ -1528,6 +1532,34 @@ def _parse_os_release(*os_release_files):
|
||||
return ret
|
||||
|
||||
|
||||
def _parse_cpe_name(cpe):
|
||||
'''
|
||||
Parse CPE_NAME data from the os-release
|
||||
|
||||
Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe
|
||||
|
||||
:param cpe:
|
||||
:return:
|
||||
'''
|
||||
part = {
|
||||
'o': 'operating system',
|
||||
'h': 'hardware',
|
||||
'a': 'application',
|
||||
}
|
||||
ret = {}
|
||||
cpe = (cpe or '').split(':')
|
||||
if len(cpe) > 4 and cpe[0] == 'cpe':
|
||||
if cpe[1].startswith('/'): # WFN to URI
|
||||
ret['vendor'], ret['product'], ret['version'] = cpe[2:5]
|
||||
ret['phase'] = cpe[5] if len(cpe) > 5 else None
|
||||
ret['part'] = part.get(cpe[1][1:])
|
||||
elif len(cpe) == 13 and cpe[1] == '2.3': # WFN to a string
|
||||
ret['vendor'], ret['product'], ret['version'], ret['phase'] = [x if x != '*' else None for x in cpe[3:7]]
|
||||
ret['part'] = part.get(cpe[2])
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def os_data():
|
||||
'''
|
||||
Return grains pertaining to the operating system
|
||||
@ -1724,13 +1756,20 @@ def os_data():
|
||||
codename = codename_match.group(1)
|
||||
grains['lsb_distrib_codename'] = codename
|
||||
if 'CPE_NAME' in os_release:
|
||||
if ":suse:" in os_release['CPE_NAME'] or ":opensuse:" in os_release['CPE_NAME']:
|
||||
cpe = _parse_cpe_name(os_release['CPE_NAME'])
|
||||
if not cpe:
|
||||
log.error('Broken CPE_NAME format in /etc/os-release!')
|
||||
elif cpe.get('vendor', '').lower() in ['suse', 'opensuse']:
|
||||
grains['os'] = "SUSE"
|
||||
# openSUSE `osfullname` grain normalization
|
||||
if os_release.get("NAME") == "openSUSE Leap":
|
||||
grains['osfullname'] = "Leap"
|
||||
elif os_release.get("VERSION") == "Tumbleweed":
|
||||
grains['osfullname'] = os_release["VERSION"]
|
||||
# Override VERSION_ID, if CPE_NAME around
|
||||
if cpe.get('version') and cpe.get('vendor') == 'opensuse': # Keep VERSION_ID for SLES
|
||||
grains['lsb_distrib_release'] = cpe['version']
|
||||
|
||||
elif os.path.isfile('/etc/SuSE-release'):
|
||||
log.trace('Parsing distrib info from /etc/SuSE-release')
|
||||
grains['lsb_distrib_id'] = 'SUSE'
|
||||
@ -1848,8 +1887,7 @@ def os_data():
|
||||
# Commit introducing this comment should be reverted after the upstream bug is released.
|
||||
if 'CentOS Linux 7' in grains.get('lsb_distrib_codename', ''):
|
||||
grains.pop('lsb_distrib_release', None)
|
||||
grains['osrelease'] = \
|
||||
grains.get('lsb_distrib_release', osrelease).strip()
|
||||
grains['osrelease'] = grains.get('lsb_distrib_release', osrelease).strip()
|
||||
grains['oscodename'] = grains.get('lsb_distrib_codename', '').strip() or oscodename
|
||||
if 'Red Hat' in grains['oscodename']:
|
||||
grains['oscodename'] = oscodename
|
||||
@ -1887,8 +1925,7 @@ def os_data():
|
||||
r'((?:Open|Oracle )?Solaris|OpenIndiana|OmniOS) (Development)?'
|
||||
r'\s*(\d+\.?\d*|v\d+)\s?[A-Z]*\s?(r\d+|\d+\/\d+|oi_\S+|snv_\S+)?'
|
||||
)
|
||||
osname, development, osmajorrelease, osminorrelease = \
|
||||
release_re.search(rel_data).groups()
|
||||
osname, development, osmajorrelease, osminorrelease = release_re.search(rel_data).groups()
|
||||
except AttributeError:
|
||||
# Set a blank osrelease grain and fallback to 'Solaris'
|
||||
# as the 'os' grain.
|
||||
@ -1975,8 +2012,8 @@ def os_data():
|
||||
# architecture.
|
||||
if grains.get('os_family') == 'Debian':
|
||||
osarch = __salt__['cmd.run']('dpkg --print-architecture').strip()
|
||||
elif grains.get('os_family') == 'RedHat':
|
||||
osarch = __salt__['cmd.run']('rpm --eval %{_host_cpu}').strip()
|
||||
elif grains.get('os_family') in ['RedHat', 'Suse']:
|
||||
osarch = salt.utils.pkg.rpm.get_osarch()
|
||||
elif grains.get('os_family') in ('NILinuxRT', 'Poky'):
|
||||
archinfo = {}
|
||||
for line in __salt__['cmd.run']('opkg print-architecture').splitlines():
|
||||
@ -2630,7 +2667,7 @@ def _hw_data(osdata):
|
||||
break
|
||||
elif osdata['kernel'] == 'AIX':
|
||||
cmd = salt.utils.path.which('prtconf')
|
||||
if data:
|
||||
if cmd:
|
||||
data = __salt__['cmd.run']('{0}'.format(cmd)) + os.linesep
|
||||
for dest, regstring in (('serialnumber', r'(?im)^\s*Machine\s+Serial\s+Number:\s+(\S+)'),
|
||||
('systemfirmware', r'(?im)^\s*Firmware\s+Version:\s+(.*)')):
|
||||
|
@ -331,7 +331,9 @@ class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
'''
|
||||
Fire presence events if enabled
|
||||
'''
|
||||
if self.presence_events:
|
||||
# On the first run it may need more time for the EventPublisher
|
||||
# to come up and be ready. Set the timeout to account for this.
|
||||
if self.presence_events and self.event.connect_pull(timeout=3):
|
||||
present = self.ckminions.connected_ids()
|
||||
new = present.difference(old_present)
|
||||
lost = old_present.difference(present)
|
||||
@ -341,9 +343,7 @@ class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
'lost': list(lost)}
|
||||
self.event.fire_event(data, tagify('change', 'presence'))
|
||||
data = {'present': list(present)}
|
||||
# On the first run it may need more time for the EventPublisher
|
||||
# to come up and be ready. Set the timeout to account for this.
|
||||
self.event.fire_event(data, tagify('present', 'presence'), timeout=3)
|
||||
self.event.fire_event(data, tagify('present', 'presence'))
|
||||
old_present.clear()
|
||||
old_present.update(present)
|
||||
|
||||
|
@ -1600,7 +1600,7 @@ class Minion(MinionBase):
|
||||
|
||||
sdata = {'pid': os.getpid()}
|
||||
sdata.update(data)
|
||||
log.info('Starting a new job with PID %s', sdata['pid'])
|
||||
log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid'])
|
||||
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
|
||||
fp_.write(minion_instance.serial.dumps(sdata))
|
||||
ret = {'success': False}
|
||||
|
@ -400,9 +400,9 @@ def update_distribution(
|
||||
keyid=keyid,
|
||||
profile=profile
|
||||
)
|
||||
if 'error' in distribution_result:
|
||||
return distribution_result
|
||||
dist_with_tags = distribution_result['result']
|
||||
if 'error' in distribution_ret:
|
||||
return distribution_ret
|
||||
dist_with_tags = distribution_ret['result']
|
||||
|
||||
current_distribution = dist_with_tags['distribution']
|
||||
current_config = current_distribution['DistributionConfig']
|
||||
|
@ -420,7 +420,9 @@ def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None):
|
||||
result = _client().get_url(
|
||||
path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash)
|
||||
if not result:
|
||||
log.error('Unable to fetch file %s from saltenv %s.', path, saltenv)
|
||||
log.error('Unable to fetch file %s from saltenv %s.',
|
||||
salt.utils.url.redact_http_basic_auth(path),
|
||||
saltenv)
|
||||
return result
|
||||
|
||||
|
||||
|
@ -4163,7 +4163,7 @@ def get_managed(
|
||||
msg = (
|
||||
'Unable to verify upstream hash of source file {0}, '
|
||||
'please set source_hash or set skip_verify to True'
|
||||
.format(source)
|
||||
.format(salt.utils.url.redact_http_basic_auth(source))
|
||||
)
|
||||
return '', {}, msg
|
||||
|
||||
@ -4195,12 +4195,14 @@ def get_managed(
|
||||
except Exception as exc:
|
||||
# A 404 or other error code may raise an exception, catch it
|
||||
# and return a comment that will fail the calling state.
|
||||
return '', {}, 'Failed to cache {0}: {1}'.format(source, exc)
|
||||
_source = salt.utils.url.redact_http_basic_auth(source)
|
||||
return '', {}, 'Failed to cache {0}: {1}'.format(_source, exc)
|
||||
|
||||
# If cache failed, sfn will be False, so do a truth check on sfn first
|
||||
# as invoking os.path.exists() on a bool raises a TypeError.
|
||||
if not sfn or not os.path.exists(sfn):
|
||||
return sfn, {}, 'Source file \'{0}\' not found'.format(source)
|
||||
_source = salt.utils.url.redact_http_basic_auth(source)
|
||||
return sfn, {}, 'Source file \'{0}\' not found'.format(_source)
|
||||
if sfn == name:
|
||||
raise SaltInvocationError(
|
||||
'Source file cannot be the same as destination'
|
||||
|
@ -1035,11 +1035,8 @@ def _parse_conf(conf_file=None, in_mem=False, family='ipv4'):
|
||||
if args[-1].startswith('-'):
|
||||
args.append('')
|
||||
parsed_args = []
|
||||
if sys.version.startswith('2.6'):
|
||||
(opts, leftover_args) = parser.parse_args(args)
|
||||
opts, _ = parser.parse_known_args(args)
|
||||
parsed_args = vars(opts)
|
||||
else:
|
||||
parsed_args = vars(parser.parse_args(args))
|
||||
ret_args = {}
|
||||
chain = parsed_args['append']
|
||||
for arg in parsed_args:
|
||||
|
@ -176,7 +176,7 @@ def db_remove(database_name, **kwargs):
|
||||
salt minion mssql.db_remove database_name='DBNAME'
|
||||
'''
|
||||
try:
|
||||
if db_exists(database_name) and database_name not in ['master', 'model', 'msdb', 'tempdb']:
|
||||
if db_exists(database_name, **kwargs) and database_name not in ['master', 'model', 'msdb', 'tempdb']:
|
||||
conn = _get_connection(**kwargs)
|
||||
conn.autocommit(True)
|
||||
cur = conn.cursor()
|
||||
|
@ -35,7 +35,6 @@ Module to provide MySQL compatibility to salt.
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
import hashlib
|
||||
import time
|
||||
import logging
|
||||
import re
|
||||
@ -201,12 +200,6 @@ def __virtual__():
|
||||
return bool(MySQLdb), 'No python mysql client installed.' if MySQLdb is None else ''
|
||||
|
||||
|
||||
def __mysql_hash_password(password):
|
||||
_password = hashlib.sha1(password).digest()
|
||||
_password = '*{0}'.format(hashlib.sha1(_password).hexdigest().upper())
|
||||
return _password
|
||||
|
||||
|
||||
def __check_table(name, table, **connection_args):
|
||||
dbc = _connect(**connection_args)
|
||||
if dbc is None:
|
||||
@ -1208,6 +1201,7 @@ def user_exists(user,
|
||||
salt '*' mysql.user_exists 'username' passwordless=True
|
||||
salt '*' mysql.user_exists 'username' password_column='authentication_string'
|
||||
'''
|
||||
run_verify = False
|
||||
server_version = version(**connection_args)
|
||||
dbc = _connect(**connection_args)
|
||||
# Did we fail to connect with the user we are checking
|
||||
@ -1240,10 +1234,8 @@ def user_exists(user,
|
||||
else:
|
||||
qry += ' AND ' + password_column + ' = \'\''
|
||||
elif password:
|
||||
if salt.utils.versions.version_cmp(server_version, '8.0.11') <= 0:
|
||||
# Hash the password before comparing
|
||||
_password = __mysql_hash_password(password)
|
||||
qry += ' AND ' + password_column + ' = %(password)s'
|
||||
if salt.utils.versions.version_cmp(server_version, '8.0.11') >= 0:
|
||||
run_verify = True
|
||||
else:
|
||||
_password = password
|
||||
qry += ' AND ' + password_column + ' = PASSWORD(%(password)s)'
|
||||
@ -1252,6 +1244,9 @@ def user_exists(user,
|
||||
qry += ' AND ' + password_column + ' = %(password)s'
|
||||
args['password'] = password_hash
|
||||
|
||||
if run_verify:
|
||||
if not verify_login(user, host, password):
|
||||
return False
|
||||
try:
|
||||
_execute(cur, qry, args)
|
||||
except MySQLdb.OperationalError as exc:
|
||||
@ -1366,7 +1361,7 @@ def user_create(user,
|
||||
qry += ' IDENTIFIED BY %(password)s'
|
||||
args['password'] = six.text_type(password)
|
||||
elif password_hash is not None:
|
||||
if salt.utils.versions.version_cmp(server_version, '8.0.11') <= 0:
|
||||
if salt.utils.versions.version_cmp(server_version, '8.0.11') >= 0:
|
||||
qry += ' IDENTIFIED BY %(password)s'
|
||||
else:
|
||||
qry += ' IDENTIFIED BY PASSWORD %(password)s'
|
||||
@ -1452,7 +1447,7 @@ def user_chpass(user,
|
||||
server_version = version(**connection_args)
|
||||
args = {}
|
||||
if password is not None:
|
||||
if salt.utils.versions.version_cmp(server_version, '8.0.11') <= 0:
|
||||
if salt.utils.versions.version_cmp(server_version, '8.0.11') >= 0:
|
||||
password_sql = '%(password)s'
|
||||
else:
|
||||
password_sql = 'PASSWORD(%(password)s)'
|
||||
@ -1475,6 +1470,11 @@ def user_chpass(user,
|
||||
password_column = __password_column(**connection_args)
|
||||
|
||||
cur = dbc.cursor()
|
||||
if salt.utils.versions.version_cmp(server_version, '8.0.11') >= 0:
|
||||
qry = ("ALTER USER '" + user + "'@'" + host + "'"
|
||||
" IDENTIFIED BY '" + password + "';")
|
||||
args = {}
|
||||
else:
|
||||
qry = ('UPDATE mysql.user SET ' + password_column + '='
|
||||
+ password_sql +
|
||||
' WHERE User=%(user)s AND Host = %(host)s;')
|
||||
@ -1483,6 +1483,11 @@ def user_chpass(user,
|
||||
if salt.utils.data.is_true(allow_passwordless) and \
|
||||
salt.utils.data.is_true(unix_socket):
|
||||
if host == 'localhost':
|
||||
if salt.utils.versions.version_cmp(server_version, '8.0.11') >= 0:
|
||||
qry = ("ALTER USER '" + user + "'@'" + host + "'"
|
||||
" IDENTIFIED BY '" + password + "';")
|
||||
args = {}
|
||||
else:
|
||||
qry = ('UPDATE mysql.user SET ' + password_column + '='
|
||||
+ password_sql + ', plugin=%(unix_socket)s' +
|
||||
' WHERE User=%(user)s AND Host = %(host)s;')
|
||||
@ -1497,6 +1502,15 @@ def user_chpass(user,
|
||||
log.error(err)
|
||||
return False
|
||||
|
||||
if salt.utils.versions.version_cmp(server_version, '8.0.11') >= 0:
|
||||
_execute(cur, 'FLUSH PRIVILEGES;')
|
||||
log.info(
|
||||
'Password for user \'%s\'@\'%s\' has been %s',
|
||||
user, host,
|
||||
'changed' if any((password, password_hash)) else 'cleared'
|
||||
)
|
||||
return True
|
||||
else:
|
||||
if result:
|
||||
_execute(cur, 'FLUSH PRIVILEGES;')
|
||||
log.info(
|
||||
@ -1973,6 +1987,9 @@ def processlist(**connection_args):
|
||||
"SHOW FULL PROCESSLIST".
|
||||
|
||||
Returns: a list of dicts, with each dict representing a process:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{'Command': 'Query',
|
||||
'Host': 'localhost',
|
||||
'Id': 39,
|
||||
@ -2214,3 +2231,29 @@ def showglobal(**connection_args):
|
||||
|
||||
log.debug('%s-->%s', mod, len(rtnv[0]))
|
||||
return rtnv
|
||||
|
||||
|
||||
def verify_login(user, host='localhost', password=None, **connection_args):
|
||||
'''
|
||||
Attempt to login using the provided credentials.
|
||||
If successful, return true. Otherwise, return False.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' mysql.verify_login root localhost password
|
||||
'''
|
||||
# Override the connection args
|
||||
connection_args['connection_user'] = user
|
||||
connection_args['connection_host'] = host
|
||||
connection_args['connection_pass'] = password
|
||||
|
||||
dbc = _connect(**connection_args)
|
||||
if dbc is None:
|
||||
# Clear the mysql.error if unable to connect
|
||||
# if the connection fails, we simply return False
|
||||
if 'mysql.error' in __context__:
|
||||
del __context__['mysql.error']
|
||||
return False
|
||||
return True
|
||||
|
@ -35,7 +35,7 @@ import fnmatch # do not remove, used in imported file.py functions
|
||||
import mmap # do not remove, used in imported file.py functions
|
||||
import glob # do not remove, used in imported file.py functions
|
||||
# do not remove, used in imported file.py functions
|
||||
import salt.ext.six as six # pylint: disable=import-error,no-name-in-module
|
||||
from salt.ext import six
|
||||
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module
|
||||
import salt.utils.atomicfile # do not remove, used in imported file.py functions
|
||||
from salt.exceptions import CommandExecutionError, SaltInvocationError
|
||||
|
@ -5010,8 +5010,7 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
|
||||
adml_policy_resources=None,
|
||||
display_language='en-US',
|
||||
registry_class='Machine'):
|
||||
# pylint: disable=null-byte-unicode-literal
|
||||
u'''
|
||||
r'''
|
||||
helper function to prep/write adm template data to the Registry.pol file
|
||||
|
||||
each file begins with REGFILE_SIGNATURE (u'\u5250\u6765') and
|
||||
|
@ -699,7 +699,6 @@ def refresh_db(**kwargs):
|
||||
include_pat='*.sls',
|
||||
exclude_pat=r'E@\/\..*?\/' # Exclude all hidden directories (.git)
|
||||
)
|
||||
|
||||
return genrepo(saltenv=saltenv, verbose=verbose, failhard=failhard)
|
||||
|
||||
|
||||
|
@ -345,8 +345,6 @@ class Pillar(object):
|
||||
if pillarenv is None:
|
||||
if opts.get('pillarenv_from_saltenv', False):
|
||||
opts['pillarenv'] = saltenv
|
||||
# Store the file_roots path so we can restore later. Issue 5449
|
||||
self.actual_file_roots = opts['file_roots']
|
||||
# use the local file client
|
||||
self.opts = self.__gen_opts(opts, grains, saltenv=saltenv, pillarenv=pillarenv)
|
||||
self.saltenv = saltenv
|
||||
@ -369,9 +367,6 @@ class Pillar(object):
|
||||
self.matchers = salt.loader.matchers(self.opts)
|
||||
self.rend = salt.loader.render(self.opts, self.functions)
|
||||
ext_pillar_opts = copy.deepcopy(self.opts)
|
||||
# Fix self.opts['file_roots'] so that ext_pillars know the real
|
||||
# location of file_roots. Issue 5951
|
||||
ext_pillar_opts['file_roots'] = self.actual_file_roots
|
||||
# Keep the incoming opts ID intact, ie, the master id
|
||||
if 'id' in opts:
|
||||
ext_pillar_opts['id'] = opts['id']
|
||||
@ -438,7 +433,6 @@ class Pillar(object):
|
||||
The options need to be altered to conform to the file client
|
||||
'''
|
||||
opts = copy.deepcopy(opts_in)
|
||||
opts['file_roots'] = opts['pillar_roots']
|
||||
opts['file_client'] = 'local'
|
||||
if not grains:
|
||||
opts['grains'] = {}
|
||||
@ -463,22 +457,25 @@ class Pillar(object):
|
||||
opts['ext_pillar'].append(self.ext)
|
||||
else:
|
||||
opts['ext_pillar'] = [self.ext]
|
||||
if '__env__' in opts['file_roots']:
|
||||
if '__env__' in opts['pillar_roots']:
|
||||
env = opts.get('pillarenv') or opts.get('saltenv') or 'base'
|
||||
if env not in opts['file_roots']:
|
||||
if env not in opts['pillar_roots']:
|
||||
log.debug("pillar environment '%s' maps to __env__ pillar_roots directory", env)
|
||||
opts['file_roots'][env] = opts['file_roots'].pop('__env__')
|
||||
opts['pillar_roots'][env] = opts['pillar_roots'].pop('__env__')
|
||||
else:
|
||||
log.debug("pillar_roots __env__ ignored (environment '%s' found in pillar_roots)",
|
||||
env)
|
||||
opts['file_roots'].pop('__env__')
|
||||
opts['pillar_roots'].pop('__env__')
|
||||
return opts
|
||||
|
||||
def _get_envs(self):
|
||||
'''
|
||||
Pull the file server environments out of the master options
|
||||
'''
|
||||
return set(['base']) | set(self.opts.get('file_roots', []))
|
||||
envs = set(['base'])
|
||||
if 'pillar_roots' in self.opts:
|
||||
envs.update(list(self.opts['pillar_roots']))
|
||||
return envs
|
||||
|
||||
def get_tops(self):
|
||||
'''
|
||||
@ -494,11 +491,11 @@ class Pillar(object):
|
||||
if self.opts['pillarenv']:
|
||||
# If the specified pillarenv is not present in the available
|
||||
# pillar environments, do not cache the pillar top file.
|
||||
if self.opts['pillarenv'] not in self.opts['file_roots']:
|
||||
if self.opts['pillarenv'] not in self.opts['pillar_roots']:
|
||||
log.debug(
|
||||
'pillarenv \'%s\' not found in the configured pillar '
|
||||
'environments (%s)',
|
||||
self.opts['pillarenv'], ', '.join(self.opts['file_roots'])
|
||||
self.opts['pillarenv'], ', '.join(self.opts['pillar_roots'])
|
||||
)
|
||||
else:
|
||||
saltenvs.add(self.opts['pillarenv'])
|
||||
@ -1002,8 +999,6 @@ class Pillar(object):
|
||||
mopts = dict(self.opts)
|
||||
if 'grains' in mopts:
|
||||
mopts.pop('grains')
|
||||
# Restore the actual file_roots path. Issue 5449
|
||||
mopts['file_roots'] = self.actual_file_roots
|
||||
mopts['saltversion'] = __version__
|
||||
pillar['master'] = mopts
|
||||
if 'pillar' in self.opts and self.opts.get('ssh_merge_pillar', False):
|
||||
@ -1030,10 +1025,6 @@ class Pillar(object):
|
||||
if decrypt_errors:
|
||||
pillar.setdefault('_errors', []).extend(decrypt_errors)
|
||||
|
||||
# Reset the file_roots for the renderers
|
||||
for mod_name in sys.modules:
|
||||
if mod_name.startswith('salt.loaded.int.render.'):
|
||||
sys.modules[mod_name].__opts__['file_roots'] = self.actual_file_roots
|
||||
return pillar
|
||||
|
||||
def decrypt_pillar(self, pillar):
|
||||
|
@ -95,6 +95,8 @@ class SPMClient(object):
|
||||
self.files_prov = self.opts.get('spm_files_provider', 'local')
|
||||
self._prep_pkgdb()
|
||||
self._prep_pkgfiles()
|
||||
self.db_conn = None
|
||||
self.files_conn = None
|
||||
self._init()
|
||||
|
||||
def _prep_pkgdb(self):
|
||||
@ -104,9 +106,15 @@ class SPMClient(object):
|
||||
self.pkgfiles = salt.loader.pkgfiles(self.opts)
|
||||
|
||||
def _init(self):
|
||||
if not self.db_conn:
|
||||
self.db_conn = self._pkgdb_fun('init')
|
||||
if not self.files_conn:
|
||||
self.files_conn = self._pkgfiles_fun('init')
|
||||
|
||||
def _close(self):
|
||||
if self.db_conn:
|
||||
self.db_conn.close()
|
||||
|
||||
def run(self, args):
|
||||
'''
|
||||
Run the SPM command
|
||||
@ -133,6 +141,8 @@ class SPMClient(object):
|
||||
self._info(args)
|
||||
elif command == 'list':
|
||||
self._list(args)
|
||||
elif command == 'close':
|
||||
self._close()
|
||||
else:
|
||||
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
|
||||
except SPMException as exc:
|
||||
@ -249,7 +259,7 @@ class SPMClient(object):
|
||||
if pkg.endswith('.spm'):
|
||||
if self._pkgfiles_fun('path_exists', pkg):
|
||||
comps = pkg.split('-')
|
||||
comps = '-'.join(comps[:-2]).split('/')
|
||||
comps = os.path.split('-'.join(comps[:-2]))
|
||||
pkg_name = comps[-1]
|
||||
|
||||
formula_tar = tarfile.open(pkg, 'r:bz2')
|
||||
@ -265,6 +275,7 @@ class SPMClient(object):
|
||||
to_install.extend(to_)
|
||||
optional.extend(op_)
|
||||
recommended.extend(re_)
|
||||
formula_tar.close()
|
||||
else:
|
||||
raise SPMInvocationError('Package file {0} not found'.format(pkg))
|
||||
else:
|
||||
@ -901,6 +912,7 @@ class SPMClient(object):
|
||||
formula_def = salt.utils.yaml.safe_load(formula_ref)
|
||||
|
||||
self.ui.status(self._get_info(formula_def))
|
||||
formula_tar.close()
|
||||
|
||||
def _info(self, args):
|
||||
'''
|
||||
|
@ -73,7 +73,9 @@ def info(package, conn=None):
|
||||
'''
|
||||
List info for a package
|
||||
'''
|
||||
close = False
|
||||
if conn is None:
|
||||
close = True
|
||||
conn = init()
|
||||
|
||||
fields = (
|
||||
@ -94,6 +96,8 @@ def info(package, conn=None):
|
||||
(package, )
|
||||
)
|
||||
row = data.fetchone()
|
||||
if close:
|
||||
conn.close()
|
||||
if not row:
|
||||
return None
|
||||
|
||||
@ -107,7 +111,9 @@ def list_packages(conn=None):
|
||||
'''
|
||||
List files for an installed package
|
||||
'''
|
||||
close = False
|
||||
if conn is None:
|
||||
close = True
|
||||
conn = init()
|
||||
|
||||
ret = []
|
||||
@ -115,6 +121,8 @@ def list_packages(conn=None):
|
||||
for pkg in data.fetchall():
|
||||
ret.append(pkg)
|
||||
|
||||
if close:
|
||||
conn.close()
|
||||
return ret
|
||||
|
||||
|
||||
@ -122,17 +130,23 @@ def list_files(package, conn=None):
|
||||
'''
|
||||
List files for an installed package
|
||||
'''
|
||||
close = False
|
||||
if conn is None:
|
||||
close = True
|
||||
conn = init()
|
||||
|
||||
data = conn.execute('SELECT package FROM packages WHERE package=?', (package, ))
|
||||
if not data.fetchone():
|
||||
if close:
|
||||
conn.close()
|
||||
return None
|
||||
|
||||
ret = []
|
||||
data = conn.execute('SELECT path, sum FROM files WHERE package=?', (package, ))
|
||||
for file_ in data.fetchall():
|
||||
ret.append(file_)
|
||||
if close:
|
||||
conn.close()
|
||||
|
||||
return ret
|
||||
|
||||
@ -141,7 +155,9 @@ def register_pkg(name, formula_def, conn=None):
|
||||
'''
|
||||
Register a package in the package database
|
||||
'''
|
||||
close = False
|
||||
if conn is None:
|
||||
close = True
|
||||
conn = init()
|
||||
|
||||
conn.execute('INSERT INTO packages VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (
|
||||
@ -157,13 +173,17 @@ def register_pkg(name, formula_def, conn=None):
|
||||
formula_def['summary'],
|
||||
formula_def['description'],
|
||||
))
|
||||
if close:
|
||||
conn.close()
|
||||
|
||||
|
||||
def register_file(name, member, path, digest='', conn=None):
|
||||
'''
|
||||
Register a file in the package database
|
||||
'''
|
||||
close = False
|
||||
if conn is None:
|
||||
close = True
|
||||
conn = init()
|
||||
|
||||
conn.execute('INSERT INTO files VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (
|
||||
@ -180,6 +200,8 @@ def register_file(name, member, path, digest='', conn=None):
|
||||
member.gname,
|
||||
member.mtime
|
||||
))
|
||||
if close:
|
||||
conn.close()
|
||||
|
||||
|
||||
def unregister_pkg(name, conn=None):
|
||||
@ -196,10 +218,14 @@ def unregister_file(path, pkg=None, conn=None): # pylint: disable=W0612
|
||||
'''
|
||||
Unregister a file from the package database
|
||||
'''
|
||||
close = False
|
||||
if conn is None:
|
||||
close = True
|
||||
conn = init()
|
||||
|
||||
conn.execute('DELETE FROM files WHERE path=?', (path, ))
|
||||
if close:
|
||||
conn.close()
|
||||
|
||||
|
||||
def db_exists(db_):
|
||||
|
@ -392,23 +392,33 @@ def _present(name,
|
||||
{'old': zones,
|
||||
'new': name}})
|
||||
|
||||
if block_icmp or prune_block_icmp:
|
||||
block_icmp = block_icmp or []
|
||||
new_icmp_types = []
|
||||
old_icmp_types = []
|
||||
|
||||
try:
|
||||
_valid_icmp_types = __salt__['firewalld.get_icmp_types'](
|
||||
permanent=True)
|
||||
_current_icmp_blocks = __salt__['firewalld.list_icmp_block'](name,
|
||||
permanent=True)
|
||||
except CommandExecutionError as err:
|
||||
ret['comment'] = 'Error: {0}'.format(err)
|
||||
return ret
|
||||
|
||||
new_icmp_types = set(block_icmp) - set(_current_icmp_blocks)
|
||||
old_icmp_types = []
|
||||
if block_icmp:
|
||||
try:
|
||||
_valid_icmp_types = __salt__['firewalld.get_icmp_types'](
|
||||
permanent=True)
|
||||
except CommandExecutionError as err:
|
||||
ret['comment'] = 'Error: {0}'.format(err)
|
||||
return ret
|
||||
|
||||
# log errors for invalid ICMP types in block_icmp input
|
||||
for icmp_type in set(block_icmp) - set(_valid_icmp_types):
|
||||
log.error('%s is an invalid ICMP type', icmp_type)
|
||||
block_icmp.remove(icmp_type)
|
||||
|
||||
new_icmp_types = set(block_icmp) - set(_current_icmp_blocks)
|
||||
for icmp_type in new_icmp_types:
|
||||
if icmp_type in _valid_icmp_types:
|
||||
if not __opts__['test']:
|
||||
try:
|
||||
__salt__['firewalld.block_icmp'](name, icmp_type,
|
||||
@ -416,8 +426,6 @@ def _present(name,
|
||||
except CommandExecutionError as err:
|
||||
ret['comment'] = 'Error: {0}'.format(err)
|
||||
return ret
|
||||
else:
|
||||
log.error('%s is an invalid ICMP type', icmp_type)
|
||||
|
||||
if prune_block_icmp:
|
||||
old_icmp_types = set(_current_icmp_blocks) - set(block_icmp)
|
||||
@ -461,14 +469,14 @@ def _present(name,
|
||||
{'old': default_zone,
|
||||
'new': name}})
|
||||
|
||||
if masquerade:
|
||||
try:
|
||||
masquerade_ret = __salt__['firewalld.get_masquerade'](name,
|
||||
permanent=True)
|
||||
except CommandExecutionError as err:
|
||||
ret['comment'] = 'Error: {0}'.format(err)
|
||||
return ret
|
||||
if not masquerade_ret:
|
||||
|
||||
if masquerade and not masquerade_ret:
|
||||
if not __opts__['test']:
|
||||
try:
|
||||
__salt__['firewalld.add_masquerade'](name, permanent=True)
|
||||
@ -478,15 +486,7 @@ def _present(name,
|
||||
ret['changes'].update({'masquerade':
|
||||
{'old': '',
|
||||
'new': 'Masquerading successfully set.'}})
|
||||
|
||||
if not masquerade:
|
||||
try:
|
||||
masquerade_ret = __salt__['firewalld.get_masquerade'](name,
|
||||
permanent=True)
|
||||
except CommandExecutionError as err:
|
||||
ret['comment'] = 'Error: {0}'.format(err)
|
||||
return ret
|
||||
if masquerade_ret:
|
||||
elif not masquerade and masquerade_ret:
|
||||
if not __opts__['test']:
|
||||
try:
|
||||
__salt__['firewalld.remove_masquerade'](name,
|
||||
@ -499,6 +499,7 @@ def _present(name,
|
||||
'new': 'Masquerading successfully '
|
||||
'disabled.'}})
|
||||
|
||||
if ports or prune_ports:
|
||||
ports = ports or []
|
||||
try:
|
||||
_current_ports = __salt__['firewalld.list_ports'](name, permanent=True)
|
||||
@ -537,6 +538,7 @@ def _present(name,
|
||||
{'old': _current_ports,
|
||||
'new': ports}})
|
||||
|
||||
if port_fwd or prune_port_fwd:
|
||||
port_fwd = port_fwd or []
|
||||
try:
|
||||
_current_port_fwd = __salt__['firewalld.list_port_fwd'](name,
|
||||
@ -589,6 +591,7 @@ def _present(name,
|
||||
_current_port_fwd],
|
||||
'new': [fwd.todict() for fwd in port_fwd]}})
|
||||
|
||||
if services or prune_services:
|
||||
services = services or []
|
||||
try:
|
||||
_current_services = __salt__['firewalld.list_services'](name,
|
||||
@ -629,6 +632,7 @@ def _present(name,
|
||||
{'old': _current_services,
|
||||
'new': services}})
|
||||
|
||||
if interfaces or prune_interfaces:
|
||||
interfaces = interfaces or []
|
||||
try:
|
||||
_current_interfaces = __salt__['firewalld.get_interfaces'](name,
|
||||
@ -669,6 +673,7 @@ def _present(name,
|
||||
{'old': _current_interfaces,
|
||||
'new': interfaces}})
|
||||
|
||||
if sources or prune_sources:
|
||||
sources = sources or []
|
||||
try:
|
||||
_current_sources = __salt__['firewalld.get_sources'](name,
|
||||
@ -708,6 +713,7 @@ def _present(name,
|
||||
{'old': _current_sources,
|
||||
'new': sources}})
|
||||
|
||||
if rich_rules or prune_rich_rules:
|
||||
rich_rules = rich_rules or []
|
||||
try:
|
||||
_current_rich_rules = __salt__['firewalld.get_rich_rules'](name,
|
||||
|
@ -48,7 +48,7 @@ def present(name,
|
||||
The keyId or keyIds to add to the GPG keychain.
|
||||
|
||||
user
|
||||
Add GPG keys to the user's keychain
|
||||
Add GPG keys to the specified user's keychain
|
||||
|
||||
keyserver
|
||||
The keyserver to retrieve the keys from.
|
||||
@ -151,7 +151,7 @@ def absent(name,
|
||||
The keyId or keyIds to add to the GPG keychain.
|
||||
|
||||
user
|
||||
Add GPG keys to the user's keychain
|
||||
Remove GPG keys from the specified user's keychain
|
||||
|
||||
gnupghome
|
||||
Override GNUPG Home directory
|
||||
|
@ -42,7 +42,7 @@ def __virtual__():
|
||||
|
||||
def pv_present(name, **kwargs):
|
||||
'''
|
||||
Set a physical device to be used as an LVM physical volume
|
||||
Set a Physical Device to be used as an LVM Physical Volume
|
||||
|
||||
name
|
||||
The device name to initialize.
|
||||
@ -106,13 +106,13 @@ def pv_absent(name):
|
||||
|
||||
def vg_present(name, devices=None, **kwargs):
|
||||
'''
|
||||
Create an LVM volume group
|
||||
Create an LVM Volume Group
|
||||
|
||||
name
|
||||
The volume group name to create
|
||||
The Volume Group name to create
|
||||
|
||||
devices
|
||||
A list of devices that will be added to the volume group
|
||||
A list of devices that will be added to the Volume Group
|
||||
|
||||
kwargs
|
||||
Any supported options to vgcreate. See
|
||||
@ -214,16 +214,16 @@ def lv_present(name,
|
||||
force=False,
|
||||
**kwargs):
|
||||
'''
|
||||
Create a new logical volume
|
||||
Create a new Logical Volume
|
||||
|
||||
name
|
||||
The name of the logical volume
|
||||
The name of the Logical Volume
|
||||
|
||||
vgname
|
||||
The volume group name for this logical volume
|
||||
The name of the Volume Group on which the Logical Volume resides
|
||||
|
||||
size
|
||||
The initial size of the logical volume
|
||||
The initial size of the Logical Volume
|
||||
|
||||
extents
|
||||
The number of logical extents to allocate
|
||||
@ -232,7 +232,7 @@ def lv_present(name,
|
||||
The name of the snapshot
|
||||
|
||||
pv
|
||||
The physical volume to use
|
||||
The Physical Volume to use
|
||||
|
||||
kwargs
|
||||
Any supported options to lvcreate. See
|
||||
@ -241,10 +241,10 @@ def lv_present(name,
|
||||
.. versionadded:: to_complete
|
||||
|
||||
thinvolume
|
||||
Logical volume is thinly provisioned
|
||||
Logical Volume is thinly provisioned
|
||||
|
||||
thinpool
|
||||
Logical volume is a thin pool
|
||||
Logical Volume is a thin pool
|
||||
|
||||
.. versionadded:: 2018.3.0
|
||||
|
||||
@ -297,13 +297,13 @@ def lv_present(name,
|
||||
|
||||
def lv_absent(name, vgname=None):
|
||||
'''
|
||||
Remove a given existing logical volume from a named existing volume group
|
||||
Remove a given existing Logical Volume from a named existing volume group
|
||||
|
||||
name
|
||||
The logical volume to remove
|
||||
The Logical Volume to remove
|
||||
|
||||
vgname
|
||||
The volume group name
|
||||
The name of the Volume Group on which the Logical Volume resides
|
||||
'''
|
||||
ret = {'changes': {},
|
||||
'comment': '',
|
||||
|
@ -25,8 +25,9 @@ import re
|
||||
import logging
|
||||
try:
|
||||
import pkg_resources
|
||||
HAS_PKG_RESOURCES = True
|
||||
except ImportError:
|
||||
pkg_resources = None
|
||||
HAS_PKG_RESOURCES = False
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.versions
|
||||
@ -74,7 +75,11 @@ def __virtual__():
|
||||
'''
|
||||
Only load if the pip module is available in __salt__
|
||||
'''
|
||||
return 'pip.list' in __salt__ and __virtualname__ or False
|
||||
if HAS_PKG_RESOURCES is False:
|
||||
return False, 'The pkg_resources python library is not installed'
|
||||
if 'pip.list' in __salt__:
|
||||
return __virtualname__
|
||||
return False
|
||||
|
||||
|
||||
def _find_key(prefix, pip_list):
|
||||
|
@ -26,7 +26,7 @@ def absent(name, user=None, signal=None):
|
||||
The pattern to match.
|
||||
|
||||
user
|
||||
The user process belongs
|
||||
The user to which the process belongs
|
||||
|
||||
signal
|
||||
Signal to send to the process(es).
|
||||
|
@ -55,7 +55,7 @@ def managed(name, port, services=None, user=None, password=None, bypass_domains=
|
||||
The username to use for the proxy server if required
|
||||
|
||||
password
|
||||
The password to use if required by the server
|
||||
The password to use for the proxy server if required
|
||||
|
||||
bypass_domains
|
||||
An array of the domains that should bypass the proxy
|
||||
|
@ -289,10 +289,13 @@ def set_(name,
|
||||
) == {}
|
||||
|
||||
if not policies_are_equal:
|
||||
additional_policy_comments = []
|
||||
if policy_data['policy_lookup'][policy_name]['rights_assignment'] and cumulative_rights_assignments:
|
||||
for user in policy_data['requested_policy'][policy_name]:
|
||||
if user not in current_policy[policy_data['output_section']][pol_id]:
|
||||
changes = True
|
||||
else:
|
||||
additional_policy_comments.append('"{0}" is already granted the right'.format(user))
|
||||
else:
|
||||
changes = True
|
||||
if changes:
|
||||
@ -303,6 +306,11 @@ def set_(name,
|
||||
requested_policy_json, current_policy_json
|
||||
)
|
||||
policy_changes.append(policy_name)
|
||||
else:
|
||||
if additional_policy_comments:
|
||||
ret['comment'] = '"{0}" is already set ({1}).\n'.format(policy_name, ', '.join(additional_policy_comments))
|
||||
else:
|
||||
ret['comment'] = '"{0}" is already set.\n'.format(policy_name) + ret['comment']
|
||||
else:
|
||||
log.debug('%s current setting matches '
|
||||
'the requested setting', policy_name)
|
||||
|
@ -166,36 +166,46 @@ def hostname(name):
|
||||
return ret
|
||||
|
||||
|
||||
def join_domain(name, username=None, password=None, account_ou=None,
|
||||
account_exists=False, restart=False):
|
||||
|
||||
def join_domain(name,
|
||||
username=None,
|
||||
password=None,
|
||||
account_ou=None,
|
||||
account_exists=False,
|
||||
restart=False):
|
||||
'''
|
||||
Checks if a computer is joined to the Domain.
|
||||
If the computer is not in the Domain, it will be joined.
|
||||
Checks if a computer is joined to the Domain. If the computer is not in the
|
||||
Domain, it will be joined.
|
||||
|
||||
name:
|
||||
Args:
|
||||
|
||||
name (str):
|
||||
The name of the Domain.
|
||||
|
||||
username:
|
||||
username (str):
|
||||
Username of an account which is authorized to join computers to the
|
||||
specified domain. Need to be either fully qualified like user@domain.tld
|
||||
or simply user.
|
||||
specified domain. Need to be either fully qualified like
|
||||
user@domain.tld or simply user.
|
||||
|
||||
password:
|
||||
password (str):
|
||||
Password of the account to add the computer to the Domain.
|
||||
|
||||
account_ou:
|
||||
account_ou (str):
|
||||
The DN of the OU below which the account for this computer should be
|
||||
created when joining the domain,
|
||||
e.g. ou=computers,ou=departm_432,dc=my-company,dc=com.
|
||||
|
||||
account_exists:
|
||||
Needs to be set to True to allow re-using an existing computer account.
|
||||
account_exists (bool):
|
||||
Needs to be set to ``True`` to allow re-using an existing computer
|
||||
account.
|
||||
|
||||
restart:
|
||||
Needs to be set to True to restart the computer after a successful join.
|
||||
restart (bool):
|
||||
Needs to be set to ``True`` to restart the computer after a
|
||||
successful join.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
.. code-block::yaml
|
||||
join_to_domain:
|
||||
system.join_domain:
|
||||
- name: mydomain.local.com
|
||||
@ -209,9 +219,6 @@ def join_domain(name, username=None, password=None, account_ou=None,
|
||||
'result': True,
|
||||
'comment': 'Computer already added to \'{0}\''.format(name)}
|
||||
|
||||
# Set name to domain, needed for the add to domain module.
|
||||
domain = name
|
||||
|
||||
current_domain_dic = __salt__['system.get_domain_workgroup']()
|
||||
if 'Domain' in current_domain_dic:
|
||||
current_domain = current_domain_dic['Domain']
|
||||
@ -220,7 +227,7 @@ def join_domain(name, username=None, password=None, account_ou=None,
|
||||
else:
|
||||
current_domain = None
|
||||
|
||||
if domain == current_domain:
|
||||
if name.lower() == current_domain.lower():
|
||||
ret['comment'] = 'Computer already added to \'{0}\''.format(name)
|
||||
return ret
|
||||
|
||||
@ -229,11 +236,20 @@ def join_domain(name, username=None, password=None, account_ou=None,
|
||||
ret['comment'] = 'Computer will be added to \'{0}\''.format(name)
|
||||
return ret
|
||||
|
||||
result = __salt__['system.join_domain'](domain, username, password,
|
||||
account_ou, account_exists,
|
||||
restart)
|
||||
result = __salt__['system.join_domain'](domain=name,
|
||||
username=username,
|
||||
password=password,
|
||||
account_ou=account_ou,
|
||||
account_exists=account_exists,
|
||||
restart=restart)
|
||||
if result is not False:
|
||||
ret['comment'] = 'Computer added to \'{0}\''.format(name)
|
||||
if restart:
|
||||
ret['comment'] += '\nSystem will restart'
|
||||
else:
|
||||
ret['comment'] += '\nSystem needs to be restarted'
|
||||
ret['changes'] = {'old': current_domain,
|
||||
'new': name}
|
||||
else:
|
||||
ret['comment'] = 'Computer failed to join \'{0}\''.format(name)
|
||||
ret['result'] = False
|
||||
|
@ -491,11 +491,7 @@ def query(url,
|
||||
data = _urlencode(data)
|
||||
|
||||
if verify_ssl:
|
||||
# tornado requires a str, cannot be unicode str in py2
|
||||
if ca_bundle is None:
|
||||
req_kwargs['ca_certs'] = ca_bundle
|
||||
else:
|
||||
req_kwargs['ca_certs'] = salt.utils.stringutils.to_str(ca_bundle)
|
||||
|
||||
max_body = opts.get('http_max_body', salt.config.DEFAULT_MINION_OPTS['http_max_body'])
|
||||
connect_timeout = opts.get('http_connect_timeout', salt.config.DEFAULT_MINION_OPTS['http_connect_timeout'])
|
||||
@ -541,31 +537,36 @@ def query(url,
|
||||
|
||||
supports_max_body_size = 'max_body_size' in client_argspec.args
|
||||
|
||||
req_kwargs.update({
|
||||
'method': method,
|
||||
'headers': header_dict,
|
||||
'auth_username': username,
|
||||
'auth_password': password,
|
||||
'body': data,
|
||||
'validate_cert': verify_ssl,
|
||||
'allow_nonstandard_methods': True,
|
||||
'streaming_callback': streaming_callback,
|
||||
'header_callback': header_callback,
|
||||
'connect_timeout': connect_timeout,
|
||||
'request_timeout': timeout,
|
||||
'proxy_host': proxy_host,
|
||||
'proxy_port': proxy_port,
|
||||
'proxy_username': proxy_username,
|
||||
'proxy_password': proxy_password,
|
||||
'raise_error': raise_error,
|
||||
'decompress_response': False,
|
||||
})
|
||||
|
||||
# Unicode types will cause a TypeError when Tornado's curl HTTPClient
|
||||
# invokes setopt. Therefore, make sure all arguments we pass which
|
||||
# contain strings are str types.
|
||||
req_kwargs = salt.utils.data.decode(req_kwargs, to_str=True)
|
||||
|
||||
try:
|
||||
download_client = HTTPClient(max_body_size=max_body) \
|
||||
if supports_max_body_size \
|
||||
else HTTPClient()
|
||||
result = download_client.fetch(
|
||||
url_full,
|
||||
method=method,
|
||||
headers=header_dict,
|
||||
auth_username=username,
|
||||
auth_password=password,
|
||||
body=data,
|
||||
validate_cert=verify_ssl,
|
||||
allow_nonstandard_methods=True,
|
||||
streaming_callback=streaming_callback,
|
||||
header_callback=header_callback,
|
||||
connect_timeout=connect_timeout,
|
||||
request_timeout=timeout,
|
||||
proxy_host=proxy_host,
|
||||
proxy_port=proxy_port,
|
||||
proxy_username=proxy_username,
|
||||
proxy_password=proxy_password,
|
||||
raise_error=raise_error,
|
||||
decompress_response=False,
|
||||
**req_kwargs
|
||||
)
|
||||
result = download_client.fetch(url_full, **req_kwargs)
|
||||
except tornado.httpclient.HTTPError as exc:
|
||||
ret['status'] = exc.code
|
||||
ret['error'] = six.text_type(exc)
|
||||
|
@ -59,16 +59,16 @@ class SaltCacheLoader(BaseLoader):
|
||||
self.opts = opts
|
||||
self.saltenv = saltenv
|
||||
self.encoding = encoding
|
||||
if self.opts['file_roots'] is self.opts['pillar_roots']:
|
||||
if saltenv not in self.opts['file_roots']:
|
||||
self.pillar_rend = pillar_rend
|
||||
if self.pillar_rend:
|
||||
if saltenv not in self.opts['pillar_roots']:
|
||||
self.searchpath = []
|
||||
else:
|
||||
self.searchpath = opts['file_roots'][saltenv]
|
||||
self.searchpath = opts['pillar_roots'][saltenv]
|
||||
else:
|
||||
self.searchpath = [os.path.join(opts['cachedir'], 'files', saltenv)]
|
||||
log.debug('Jinja search path: %s', self.searchpath)
|
||||
self.cached = []
|
||||
self.pillar_rend = pillar_rend
|
||||
self._file_client = None
|
||||
# Instantiate the fileclient
|
||||
self.file_client()
|
||||
|
@ -9,6 +9,7 @@ import collections
|
||||
import datetime
|
||||
import logging
|
||||
import subprocess
|
||||
import salt.utils.stringutils
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
@ -50,7 +51,7 @@ def get_osarch():
|
||||
close_fds=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE).communicate()[0]
|
||||
return ret or 'unknown'
|
||||
return salt.utils.stringutils.to_str(ret).strip() or 'unknown'
|
||||
|
||||
|
||||
def check_32(arch, osarch=None):
|
||||
|
@ -1924,7 +1924,7 @@ def _check_perms(obj_name, obj_type, new_perms, cur_perms, access_mode, ret):
|
||||
# Check Perms for basic perms
|
||||
if isinstance(new_perms[user]['perms'], six.string_types):
|
||||
if not has_permission(obj_name=obj_name,
|
||||
principal=user,
|
||||
principal=user_name,
|
||||
permission=new_perms[user]['perms'],
|
||||
access_mode=access_mode,
|
||||
obj_type=obj_type,
|
||||
@ -1937,7 +1937,7 @@ def _check_perms(obj_name, obj_type, new_perms, cur_perms, access_mode, ret):
|
||||
else:
|
||||
for perm in new_perms[user]['perms']:
|
||||
if not has_permission(obj_name=obj_name,
|
||||
principal=user,
|
||||
principal=user_name,
|
||||
permission=perm,
|
||||
access_mode=access_mode,
|
||||
obj_type=obj_type,
|
||||
@ -2013,7 +2013,7 @@ def _check_perms(obj_name, obj_type, new_perms, cur_perms, access_mode, ret):
|
||||
try:
|
||||
set_permissions(
|
||||
obj_name=obj_name,
|
||||
principal=user,
|
||||
principal=user_name,
|
||||
permissions=perms,
|
||||
access_mode=access_mode,
|
||||
applies_to=applies_to,
|
||||
@ -2196,7 +2196,8 @@ def check_perms(obj_name,
|
||||
cur_perms = get_permissions(obj_name=obj_name, obj_type=obj_type)
|
||||
for user_name in cur_perms['Not Inherited']:
|
||||
# case insensitive dictionary search
|
||||
if user_name.lower() not in set(k.lower() for k in grant_perms):
|
||||
if grant_perms is not None and \
|
||||
user_name.lower() not in set(k.lower() for k in grant_perms):
|
||||
if 'grant' in cur_perms['Not Inherited'][user_name]:
|
||||
if __opts__['test'] is True:
|
||||
if 'remove_perms' not in ret['changes']:
|
||||
@ -2214,7 +2215,8 @@ def check_perms(obj_name,
|
||||
ret['changes']['remove_perms'].update(
|
||||
{user_name: cur_perms['Not Inherited'][user_name]})
|
||||
# case insensitive dictionary search
|
||||
if user_name.lower() not in set(k.lower() for k in deny_perms):
|
||||
if deny_perms is not None and \
|
||||
user_name.lower() not in set(k.lower() for k in deny_perms):
|
||||
if 'deny' in cur_perms['Not Inherited'][user_name]:
|
||||
if __opts__['test'] is True:
|
||||
if 'remove_perms' not in ret['changes']:
|
||||
|
@ -123,6 +123,12 @@ def get_current_user(with_domain=True):
|
||||
'''
|
||||
Gets the user executing the process
|
||||
|
||||
Args:
|
||||
|
||||
with_domain (bool):
|
||||
``True`` will prepend the user name with the machine name or domain
|
||||
separated by a backslash
|
||||
|
||||
Returns:
|
||||
str: The user name
|
||||
'''
|
||||
|
@ -6,7 +6,7 @@ from __future__ import absolute_import, print_function, unicode_literals
|
||||
# Import Salt Testing libs
|
||||
from tests.support.case import ModuleCase
|
||||
from tests.support.unit import skipIf
|
||||
from tests.support.helpers import TestsLoggingHandler
|
||||
from tests.support.helpers import TestsLoggingHandler, flaky
|
||||
|
||||
import logging
|
||||
import salt.ext.six as six
|
||||
@ -25,6 +25,7 @@ class LoggingJIDsTest(ModuleCase):
|
||||
self.handler = TestsLoggingHandler(format=log_format,
|
||||
level=logging.DEBUG)
|
||||
|
||||
@flaky
|
||||
def test_jid_in_logs(self):
|
||||
'''
|
||||
Test JID in log_format
|
||||
|
@ -249,7 +249,11 @@ class GroupModuleTest(ModuleCase):
|
||||
self.run_function('user.add', [self._user])
|
||||
self.run_function('user.add', [self._user1])
|
||||
m = '{0},{1}'.format(self._user, self._user1)
|
||||
self.assertTrue(self.run_function('group.members', [self._group, m]))
|
||||
ret = self.run_function('group.members', [self._group, m])
|
||||
if salt.utils.platform.is_windows():
|
||||
self.assertTrue(ret['result'])
|
||||
else:
|
||||
self.assertTrue(ret)
|
||||
group_info = self.run_function('group.info', [self._group])
|
||||
self.assertIn(self._user, str(group_info['members']))
|
||||
self.assertIn(self._user1, str(group_info['members']))
|
||||
|
@ -13,6 +13,7 @@ import textwrap
|
||||
from tests.support.case import ModuleCase
|
||||
from tests.support.helpers import flaky
|
||||
from tests.support.paths import TMP_PILLAR_TREE
|
||||
from tests.support.unit import skipIf
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.utils.files
|
||||
@ -177,6 +178,7 @@ class SaltUtilSyncModuleTest(ModuleCase):
|
||||
self.assertEqual(ret, expected_return)
|
||||
|
||||
|
||||
@skipIf(True, 'Pillar refresh test is flaky. Skipping for now.')
|
||||
class SaltUtilSyncPillarTest(ModuleCase):
|
||||
'''
|
||||
Testcase for the saltutil sync pillar module
|
||||
|
@ -15,7 +15,8 @@ from tests.support.runtests import RUNTIME_VARS
|
||||
import salt.utils.files
|
||||
import salt.utils.platform
|
||||
|
||||
CURL = os.path.join(RUNTIME_VARS.FILES, 'file', 'base', 'win', 'repo-ng', 'curl.sls')
|
||||
REPO_DIR = os.path.join(RUNTIME_VARS.FILES, 'file', 'base', 'win', 'repo-ng')
|
||||
CURL = os.path.join(REPO_DIR, 'curl.sls')
|
||||
|
||||
|
||||
@skipIf(not salt.utils.platform.is_windows(), 'windows test only')
|
||||
@ -33,8 +34,10 @@ class WinPKGTest(ModuleCase):
|
||||
Test add and removing a new pkg sls
|
||||
in the windows software repository
|
||||
'''
|
||||
def _check_pkg(pkgs, exists=True):
|
||||
self.run_function('pkg.refresh_db')
|
||||
def _check_pkg(pkgs, check_refresh, exists=True):
|
||||
refresh = self.run_function('pkg.refresh_db')
|
||||
self.assertEqual(check_refresh, refresh['total'],
|
||||
msg='total returned {0}. Expected return {1}'.format(refresh['total'], check_refresh))
|
||||
repo_data = self.run_function('pkg.get_repo_data', timeout=300)
|
||||
repo_cache = os.path.join(RUNTIME_VARS.TMP, 'rootdir', 'cache', 'files', 'base', 'win', 'repo-ng')
|
||||
for pkg in pkgs:
|
||||
@ -51,7 +54,7 @@ class WinPKGTest(ModuleCase):
|
||||
|
||||
pkgs = ['putty', '7zip']
|
||||
# check putty and 7zip are in cache and repo query
|
||||
_check_pkg(pkgs)
|
||||
_check_pkg(pkgs, 2)
|
||||
|
||||
# now add new sls
|
||||
with salt.utils.files.fopen(CURL, 'w') as fp_:
|
||||
@ -74,11 +77,13 @@ class WinPKGTest(ModuleCase):
|
||||
'''))
|
||||
# now check if curl is also in cache and repo query
|
||||
pkgs.append('curl')
|
||||
_check_pkg(pkgs)
|
||||
for pkg in pkgs:
|
||||
self.assertIn(pkg + '.sls', os.listdir(REPO_DIR))
|
||||
_check_pkg(pkgs, 3)
|
||||
|
||||
# remove curl sls and check its not in cache and repo query
|
||||
os.remove(CURL)
|
||||
_check_pkg(['curl'], exists=False)
|
||||
_check_pkg(['curl'], 2, exists=False)
|
||||
|
||||
def tearDown(self):
|
||||
if os.path.isfile(CURL):
|
||||
|
@ -101,13 +101,13 @@ from salt.utils.gitfs import (
|
||||
# Check for requisite components
|
||||
try:
|
||||
HAS_GITPYTHON = GITPYTHON_VERSION >= GITPYTHON_MINVER
|
||||
except ImportError:
|
||||
except Exception:
|
||||
HAS_GITPYTHON = False
|
||||
|
||||
try:
|
||||
HAS_PYGIT2 = PYGIT2_VERSION >= PYGIT2_MINVER \
|
||||
and LIBGIT2_VERSION >= LIBGIT2_MINVER
|
||||
except AttributeError:
|
||||
except Exception:
|
||||
HAS_PYGIT2 = False
|
||||
|
||||
HAS_SSHD = bool(salt.utils.path.which('sshd'))
|
||||
|
@ -14,13 +14,16 @@ import os
|
||||
import pipes
|
||||
import shutil
|
||||
import tempfile
|
||||
import logging
|
||||
|
||||
# Import Salt Testing libs
|
||||
from tests.support.case import ShellCase
|
||||
from tests.support.paths import TMP
|
||||
from tests.support.mixins import ShellCaseCommonTestsMixin
|
||||
from tests.support.unit import skipIf
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.platform
|
||||
import salt.utils.files
|
||||
import salt.utils.yaml
|
||||
|
||||
@ -28,6 +31,9 @@ import salt.utils.yaml
|
||||
from salt.ext import six
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CopyTest(ShellCase, ShellCaseCommonTestsMixin):
|
||||
|
||||
_call_binary_ = 'salt-cp'
|
||||
@ -54,19 +60,24 @@ class CopyTest(ShellCase, ShellCaseCommonTestsMixin):
|
||||
with salt.utils.files.fopen(testfile, 'r') as fh_:
|
||||
testfile_contents = fh_.read()
|
||||
|
||||
def quote(arg):
|
||||
if salt.utils.platform.is_windows():
|
||||
return arg
|
||||
return pipes.quote(arg)
|
||||
|
||||
for idx, minion in enumerate(minions):
|
||||
if 'localhost' in minion:
|
||||
continue
|
||||
ret = self.run_salt(
|
||||
'--out yaml {0} file.directory_exists {1}'.format(
|
||||
pipes.quote(minion), TMP
|
||||
quote(minion), TMP
|
||||
)
|
||||
)
|
||||
data = salt.utils.yaml.safe_load('\n'.join(ret))
|
||||
if data[minion] is False:
|
||||
ret = self.run_salt(
|
||||
'--out yaml {0} file.makedirs {1}'.format(
|
||||
pipes.quote(minion),
|
||||
quote(minion),
|
||||
TMP
|
||||
)
|
||||
)
|
||||
@ -79,19 +90,23 @@ class CopyTest(ShellCase, ShellCaseCommonTestsMixin):
|
||||
)
|
||||
|
||||
ret = self.run_cp('--out pprint {0} {1} {2}'.format(
|
||||
pipes.quote(minion),
|
||||
pipes.quote(testfile),
|
||||
pipes.quote(minion_testfile)
|
||||
quote(minion),
|
||||
quote(testfile),
|
||||
quote(minion_testfile),
|
||||
))
|
||||
|
||||
data = eval('\n'.join(ret), {}, {}) # pylint: disable=eval-used
|
||||
for part in six.itervalues(data):
|
||||
self.assertTrue(part[minion_testfile])
|
||||
if salt.utils.platform.is_windows():
|
||||
key = minion_testfile.replace('\\', '\\\\')
|
||||
else:
|
||||
key = minion_testfile
|
||||
self.assertTrue(part[key])
|
||||
|
||||
ret = self.run_salt(
|
||||
'--out yaml {0} file.file_exists {1}'.format(
|
||||
pipes.quote(minion),
|
||||
pipes.quote(minion_testfile)
|
||||
quote(minion),
|
||||
quote(minion_testfile)
|
||||
)
|
||||
)
|
||||
data = salt.utils.yaml.safe_load('\n'.join(ret))
|
||||
@ -99,22 +114,23 @@ class CopyTest(ShellCase, ShellCaseCommonTestsMixin):
|
||||
|
||||
ret = self.run_salt(
|
||||
'--out yaml {0} file.contains {1} {2}'.format(
|
||||
pipes.quote(minion),
|
||||
pipes.quote(minion_testfile),
|
||||
pipes.quote(testfile_contents)
|
||||
quote(minion),
|
||||
quote(minion_testfile),
|
||||
quote(testfile_contents)
|
||||
)
|
||||
)
|
||||
data = salt.utils.yaml.safe_load('\n'.join(ret))
|
||||
self.assertTrue(data[minion])
|
||||
ret = self.run_salt(
|
||||
'--out yaml {0} file.remove {1}'.format(
|
||||
pipes.quote(minion),
|
||||
pipes.quote(minion_testfile)
|
||||
quote(minion),
|
||||
quote(minion_testfile)
|
||||
)
|
||||
)
|
||||
data = salt.utils.yaml.safe_load('\n'.join(ret))
|
||||
self.assertTrue(data[minion])
|
||||
|
||||
@skipIf(salt.utils.platform.is_windows(), 'Skip on Windows OS')
|
||||
def test_issue_7754(self):
|
||||
config_dir = os.path.join(TMP, 'issue-7754')
|
||||
|
||||
|
@ -8,8 +8,10 @@ import textwrap
|
||||
# Import Salt Testing libs
|
||||
from tests.support.case import ModuleCase
|
||||
from tests.support.paths import FILES
|
||||
from tests.support.unit import skipIf
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.utils.platform
|
||||
import salt.utils.files
|
||||
|
||||
|
||||
@ -25,6 +27,7 @@ class EnabledTest(ModuleCase):
|
||||
"export SALTY_VARIABLE='saltines' && echo $SALTY_VARIABLE ; "
|
||||
"echo duh &> /dev/null")
|
||||
|
||||
@skipIf(salt.utils.platform.is_windows(), 'Skip on Windows OS')
|
||||
def test_shell_default_enabled(self):
|
||||
'''
|
||||
ensure that python_shell defaults to True for cmd.run
|
||||
@ -33,6 +36,7 @@ class EnabledTest(ModuleCase):
|
||||
ret = self.run_function('cmd.run', [self.cmd])
|
||||
self.assertEqual(ret.strip(), enabled_ret)
|
||||
|
||||
@skipIf(salt.utils.platform.is_windows(), 'Skip on Windows OS')
|
||||
def test_shell_disabled(self):
|
||||
'''
|
||||
test shell disabled output for cmd.run
|
||||
@ -42,6 +46,7 @@ class EnabledTest(ModuleCase):
|
||||
ret = self.run_function('cmd.run', [self.cmd], python_shell=False)
|
||||
self.assertEqual(ret, disabled_ret)
|
||||
|
||||
@skipIf(salt.utils.platform.is_windows(), 'Skip on Windows OS')
|
||||
def test_template_shell(self):
|
||||
'''
|
||||
Test cmd.shell works correctly when using a template.
|
||||
@ -72,6 +77,7 @@ class EnabledTest(ModuleCase):
|
||||
finally:
|
||||
os.remove(state_file)
|
||||
|
||||
@skipIf(salt.utils.platform.is_windows(), 'Skip on Windows OS')
|
||||
def test_template_default_disabled(self):
|
||||
'''
|
||||
test shell disabled output for templates (python_shell=False is the default
|
||||
|
@ -234,6 +234,9 @@ class KeyTest(ShellCase, ShellCaseCommonTestsMixin):
|
||||
for fname in key_names:
|
||||
self.assertTrue(os.path.isfile(os.path.join(tempdir, fname)))
|
||||
finally:
|
||||
for dirname, dirs, files in os.walk(tempdir):
|
||||
for filename in files:
|
||||
os.chmod(os.path.join(dirname, filename), 0o700)
|
||||
shutil.rmtree(tempdir)
|
||||
|
||||
def test_keys_generation_keysize_minmax(self):
|
||||
|
@ -63,7 +63,7 @@ class MatchTest(ShellCase, ShellCaseCommonTestsMixin):
|
||||
|
||||
def test_compound_pcre_grain_and_grain(self):
|
||||
match = 'P@test_grain:^cheese$ and * and G@test_grain:cheese'
|
||||
data = self.run_salt('-t 1 -C \'{0}\' test.ping'.format(match))
|
||||
data = self.run_salt('-t 1 -C "{0}" test.ping'.format(match))
|
||||
assert minion_in_returns('minion', data) is True
|
||||
assert minion_in_returns('sub_minion', data) is False
|
||||
|
||||
@ -74,22 +74,22 @@ class MatchTest(ShellCase, ShellCaseCommonTestsMixin):
|
||||
assert minion_in_returns('minion', data) is False
|
||||
|
||||
def test_compound_not_sub_minion(self):
|
||||
data = self.run_salt("-C 'not sub_minion' test.ping")
|
||||
data = self.run_salt('-C "not sub_minion" test.ping')
|
||||
assert minion_in_returns('minion', data) is True
|
||||
assert minion_in_returns('sub_minion', data) is False
|
||||
|
||||
def test_compound_all_and_not_grains(self):
|
||||
data = self.run_salt("-C '* and ( not G@test_grain:cheese )' test.ping")
|
||||
data = self.run_salt('-C "* and ( not G@test_grain:cheese )" test.ping')
|
||||
assert minion_in_returns('minion', data) is False
|
||||
assert minion_in_returns('sub_minion', data) is True
|
||||
|
||||
def test_compound_grain_regex(self):
|
||||
data = self.run_salt("-C 'G%@planets%merc*' test.ping")
|
||||
data = self.run_salt('-C "G%@planets%merc*" test.ping')
|
||||
assert minion_in_returns('minion', data) is True
|
||||
assert minion_in_returns('sub_minion', data) is False
|
||||
|
||||
def test_coumpound_pcre_grain_regex(self):
|
||||
data = self.run_salt("-C 'P%@planets%^(mercury|saturn)$' test.ping")
|
||||
data = self.run_salt('-C "P%@planets%^(mercury|saturn)$" test.ping')
|
||||
assert minion_in_returns('minion', data) is True
|
||||
assert minion_in_returns('sub_minion', data) is True
|
||||
|
||||
@ -313,7 +313,7 @@ class MatchTest(ShellCase, ShellCaseCommonTestsMixin):
|
||||
self.assertIn('minion', data.replace('sub_minion', 'stub'))
|
||||
|
||||
def test_ipcidr(self):
|
||||
subnets_data = self.run_salt('--out yaml \'*\' network.subnets')
|
||||
subnets_data = self.run_salt('--out yaml "*" network.subnets')
|
||||
yaml_data = salt.utils.yaml.safe_load('\n'.join(subnets_data))
|
||||
|
||||
# We're just after the first defined subnet from 'minion'
|
||||
@ -370,7 +370,11 @@ class MatchTest(ShellCase, ShellCaseCommonTestsMixin):
|
||||
data = self.run_salt('-d minion salt ldap.search "filter=ou=People"', catch_stderr=True)
|
||||
self.assertIn('You can only get documentation for one method at one time', '\n'.join(data[1]))
|
||||
|
||||
@skipIf(salt.utils.platform.is_windows(), 'Skip on Windows OS')
|
||||
def test_issue_7754(self):
|
||||
'''
|
||||
Skip on Windows because Syslog is not installed
|
||||
'''
|
||||
old_cwd = os.getcwd()
|
||||
config_dir = os.path.join(TMP, 'issue-7754')
|
||||
if not os.path.isdir(config_dir):
|
||||
|
@ -31,6 +31,7 @@ from salt.ext import six
|
||||
# Import salt libs
|
||||
import salt.utils.files
|
||||
import salt.utils.yaml
|
||||
import salt.utils.platform
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -271,9 +272,12 @@ class MinionTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMix
|
||||
for minion in minions:
|
||||
minion.shutdown()
|
||||
|
||||
@skipIf(salt.utils.platform.is_windows(), 'Skip on Windows OS')
|
||||
def test_exit_status_unknown_user(self):
|
||||
'''
|
||||
Ensure correct exit status when the minion is configured to run as an unknown user.
|
||||
|
||||
Skipped on windows because daemonization not supported
|
||||
'''
|
||||
|
||||
minion = testprogram.TestDaemonSaltMinion(
|
||||
@ -302,6 +306,7 @@ class MinionTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMix
|
||||
minion.shutdown()
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
# @skipIf(salt.utils.platform.is_windows(), 'Skip on Windows OS')
|
||||
def test_exit_status_unknown_argument(self):
|
||||
'''
|
||||
Ensure correct exit status when an unknown argument is passed to salt-minion.
|
||||
@ -331,9 +336,12 @@ class MinionTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMix
|
||||
# cause timeout exceptions and respective traceback
|
||||
minion.shutdown()
|
||||
|
||||
@skipIf(salt.utils.platform.is_windows(), 'Skip on Windows OS')
|
||||
def test_exit_status_correct_usage(self):
|
||||
'''
|
||||
Ensure correct exit status when salt-minion starts correctly.
|
||||
|
||||
Skipped on windows because daemonization not supported
|
||||
'''
|
||||
|
||||
minion = testprogram.TestDaemonSaltMinion(
|
||||
|
@ -10,13 +10,18 @@
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
import logging
|
||||
|
||||
from tests.support.unit import skipIf
|
||||
|
||||
# Import salt tests libs
|
||||
import tests.integration.utils
|
||||
from tests.integration.utils import testprogram
|
||||
|
||||
import salt.utils.platform
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@skipIf(salt.utils.platform.is_windows(), 'Skip on Windows OS')
|
||||
class ProxyTest(testprogram.TestProgramCase):
|
||||
'''
|
||||
Various integration tests for the salt-proxy executable.
|
||||
@ -25,6 +30,8 @@ class ProxyTest(testprogram.TestProgramCase):
|
||||
def test_exit_status_no_proxyid(self):
|
||||
'''
|
||||
Ensure correct exit status when --proxyid argument is missing.
|
||||
|
||||
Skip on Windows because daemonization not supported
|
||||
'''
|
||||
|
||||
proxy = testprogram.TestDaemonSaltProxy(
|
||||
@ -61,6 +68,8 @@ class ProxyTest(testprogram.TestProgramCase):
|
||||
def test_exit_status_unknown_user(self):
|
||||
'''
|
||||
Ensure correct exit status when the proxy is configured to run as an unknown user.
|
||||
|
||||
Skip on Windows because daemonization not supported
|
||||
'''
|
||||
|
||||
proxy = testprogram.TestDaemonSaltProxy(
|
||||
@ -92,6 +101,8 @@ class ProxyTest(testprogram.TestProgramCase):
|
||||
def test_exit_status_unknown_argument(self):
|
||||
'''
|
||||
Ensure correct exit status when an unknown argument is passed to salt-proxy.
|
||||
|
||||
Skip on Windows because daemonization not supported
|
||||
'''
|
||||
|
||||
proxy = testprogram.TestDaemonSaltProxy(
|
||||
@ -120,6 +131,8 @@ class ProxyTest(testprogram.TestProgramCase):
|
||||
def test_exit_status_correct_usage(self):
|
||||
'''
|
||||
Ensure correct exit status when salt-proxy starts correctly.
|
||||
|
||||
Skip on Windows because daemonization not supported
|
||||
'''
|
||||
|
||||
proxy = testprogram.TestDaemonSaltProxy(
|
||||
|
@ -15,12 +15,14 @@ from tests.support.case import ShellCase
|
||||
from tests.support.paths import TMP
|
||||
from tests.support.mixins import ShellCaseCommonTestsMixin
|
||||
from tests.support.helpers import skip_if_not_root
|
||||
from tests.support.unit import skipIf
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils.files
|
||||
import salt.utils.platform
|
||||
import salt.utils.yaml
|
||||
|
||||
|
||||
USERA = 'saltdev'
|
||||
USERA_PWD = 'saltdev'
|
||||
HASHED_USERA_PWD = '$6$SALTsalt$ZZFD90fKFWq8AGmmX0L3uBtS9fXL62SrTk5zcnQ6EkD6zoiM3kB88G1Zvs0xm/gZ7WXJRs5nsTBybUvGSqZkT.'
|
||||
@ -90,7 +92,11 @@ class RunTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMixin)
|
||||
data = self.run_run('-d virt.list foo', catch_stderr=True)
|
||||
self.assertIn('You can only get documentation for one method at one time', '\n'.join(data[1]))
|
||||
|
||||
@skipIf(salt.utils.platform.is_windows(), 'Skip on Windows OS')
|
||||
def test_issue_7754(self):
|
||||
'''
|
||||
Skip on windows because syslog not available
|
||||
'''
|
||||
old_cwd = os.getcwd()
|
||||
config_dir = os.path.join(TMP, 'issue-7754')
|
||||
if not os.path.isdir(config_dir):
|
||||
|
@ -18,11 +18,13 @@ import logging
|
||||
from tests.support.case import ShellCase
|
||||
from tests.support.paths import TMP
|
||||
from tests.support.mixins import ShellCaseCommonTestsMixin
|
||||
from tests.support.unit import skipIf
|
||||
from tests.integration.utils import testprogram
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.files
|
||||
import salt.utils.yaml
|
||||
import salt.utils.platform
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -80,9 +82,12 @@ class SyndicTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMix
|
||||
if os.path.isdir(config_dir):
|
||||
shutil.rmtree(config_dir)
|
||||
|
||||
@skipIf(salt.utils.platform.is_windows(), 'Skip on Windows OS')
|
||||
def test_exit_status_unknown_user(self):
|
||||
'''
|
||||
Ensure correct exit status when the syndic is configured to run as an unknown user.
|
||||
|
||||
Skipped on windows because daemonization not supported
|
||||
'''
|
||||
|
||||
syndic = testprogram.TestDaemonSaltSyndic(
|
||||
@ -110,9 +115,12 @@ class SyndicTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMix
|
||||
syndic.shutdown()
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
@skipIf(salt.utils.platform.is_windows(), 'Skip on Windows OS')
|
||||
def test_exit_status_unknown_argument(self):
|
||||
'''
|
||||
Ensure correct exit status when an unknown argument is passed to salt-syndic.
|
||||
|
||||
Skipped on windows because daemonization not supported
|
||||
'''
|
||||
|
||||
syndic = testprogram.TestDaemonSaltSyndic(
|
||||
@ -138,9 +146,12 @@ class SyndicTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMix
|
||||
# cause timeout exceptions and respective traceback
|
||||
syndic.shutdown()
|
||||
|
||||
@skipIf(salt.utils.platform.is_windows(), 'Skip on Windows OS')
|
||||
def test_exit_status_correct_usage(self):
|
||||
'''
|
||||
Ensure correct exit status when salt-syndic starts correctly.
|
||||
|
||||
Skipped on windows because daemonization not supported
|
||||
'''
|
||||
|
||||
syndic = testprogram.TestDaemonSaltSyndic(
|
||||
|
@ -5,10 +5,14 @@ Integration tests for the lxd states
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
|
||||
# Import Salt Testing libs
|
||||
from tests.support.helpers import flaky
|
||||
|
||||
# Import Lxd Test Case
|
||||
import tests.integration.states.test_lxd
|
||||
|
||||
|
||||
@flaky
|
||||
class LxdContainerTestCase(tests.integration.states.test_lxd.LxdTestCase):
|
||||
|
||||
def setUp(self):
|
||||
|
@ -763,6 +763,7 @@ class SPMCase(TestCase, AdaptedConfigurationTestCaseMixin):
|
||||
def run_spm(self, cmd, config, arg=None):
|
||||
client = self._spm_client(config)
|
||||
spm_cmd = client.run([cmd, arg])
|
||||
client._close()
|
||||
return self.ui._status
|
||||
|
||||
|
||||
|
@ -168,24 +168,3 @@ class RootsTest(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModuleMockMix
|
||||
finally:
|
||||
if self.test_symlink_list_file_roots:
|
||||
self.opts['file_roots'] = orig_file_roots
|
||||
|
||||
|
||||
class RootsLimitTraversalTest(TestCase, AdaptedConfigurationTestCaseMixin):
|
||||
|
||||
def test_limit_traversal(self):
|
||||
'''
|
||||
1) Set up a deep directory structure
|
||||
2) Enable the configuration option 'fileserver_limit_traversal'
|
||||
3) Ensure that we can find SLS files in a directory so long as there is
|
||||
an SLS file in a directory above.
|
||||
4) Ensure that we cannot find an SLS file in a directory that does not
|
||||
have an SLS file in a directory above.
|
||||
|
||||
'''
|
||||
file_client_opts = self.get_temp_config('master')
|
||||
file_client_opts['fileserver_limit_traversal'] = True
|
||||
|
||||
ret = salt.fileclient.Client(file_client_opts).list_states('base')
|
||||
self.assertIn('test_deep.test', ret)
|
||||
self.assertIn('test_deep.a.test', ret)
|
||||
self.assertNotIn('test_deep.b.2.test', ret)
|
||||
|
@ -87,6 +87,50 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
|
||||
"UBUNTU_CODENAME": "artful",
|
||||
})
|
||||
|
||||
def test_parse_cpe_name_wfn(self):
|
||||
'''
|
||||
Parse correct CPE_NAME data WFN formatted
|
||||
:return:
|
||||
'''
|
||||
for cpe, cpe_ret in [('cpe:/o:opensuse:leap:15.0',
|
||||
{'phase': None, 'version': '15.0', 'product': 'leap',
|
||||
'vendor': 'opensuse', 'part': 'operating system'}),
|
||||
('cpe:/o:vendor:product:42:beta',
|
||||
{'phase': 'beta', 'version': '42', 'product': 'product',
|
||||
'vendor': 'vendor', 'part': 'operating system'})]:
|
||||
ret = core._parse_cpe_name(cpe)
|
||||
for key in cpe_ret:
|
||||
assert key in ret
|
||||
assert cpe_ret[key] == ret[key]
|
||||
|
||||
def test_parse_cpe_name_v23(self):
|
||||
'''
|
||||
Parse correct CPE_NAME data v2.3 formatted
|
||||
:return:
|
||||
'''
|
||||
for cpe, cpe_ret in [('cpe:2.3:o:microsoft:windows_xp:5.1.601:beta:*:*:*:*:*:*',
|
||||
{'phase': 'beta', 'version': '5.1.601', 'product': 'windows_xp',
|
||||
'vendor': 'microsoft', 'part': 'operating system'}),
|
||||
('cpe:2.3:h:corellian:millenium_falcon:1.0:*:*:*:*:*:*:*',
|
||||
{'phase': None, 'version': '1.0', 'product': 'millenium_falcon',
|
||||
'vendor': 'corellian', 'part': 'hardware'}),
|
||||
('cpe:2.3:*:dark_empire:light_saber:3.0:beta:*:*:*:*:*:*',
|
||||
{'phase': 'beta', 'version': '3.0', 'product': 'light_saber',
|
||||
'vendor': 'dark_empire', 'part': None})]:
|
||||
ret = core._parse_cpe_name(cpe)
|
||||
for key in cpe_ret:
|
||||
assert key in ret
|
||||
assert cpe_ret[key] == ret[key]
|
||||
|
||||
def test_parse_cpe_name_broken(self):
|
||||
'''
|
||||
Parse broken CPE_NAME data
|
||||
:return:
|
||||
'''
|
||||
for cpe in ['cpe:broken', 'cpe:broken:in:all:ways:*:*:*:*',
|
||||
'cpe:x:still:broken:123', 'who:/knows:what:is:here']:
|
||||
assert core._parse_cpe_name(cpe) == {}
|
||||
|
||||
def test_missing_os_release(self):
|
||||
with patch('salt.utils.files.fopen', mock_open(read_data={})):
|
||||
os_release = core._parse_os_release('/etc/os-release', '/usr/lib/os-release')
|
||||
|
@ -38,6 +38,7 @@ class MySQLTestCase(TestCase, LoaderModuleMockMixin):
|
||||
|
||||
Do it before test_user_create_when_user_exists mocks the user_exists call
|
||||
'''
|
||||
with patch.object(mysql, 'version', return_value='8.0.10'):
|
||||
self._test_call(mysql.user_exists,
|
||||
{'sql': ('SELECT User,Host FROM mysql.user WHERE '
|
||||
'User = %(user)s AND Host = %(host)s AND '
|
||||
@ -52,15 +53,38 @@ class MySQLTestCase(TestCase, LoaderModuleMockMixin):
|
||||
password='BLUECOW'
|
||||
)
|
||||
|
||||
with patch.object(mysql, 'version', return_value='8.0.11'):
|
||||
self._test_call(mysql.user_exists,
|
||||
{'sql': ('SELECT User,Host FROM mysql.user WHERE '
|
||||
'User = %(user)s AND Host = %(host)s'),
|
||||
'sql_args': {'host': 'localhost',
|
||||
'user': 'mytestuser'
|
||||
}
|
||||
},
|
||||
user='mytestuser',
|
||||
host='localhost',
|
||||
password='BLUECOW'
|
||||
)
|
||||
|
||||
# test_user_create_when_user_exists(self):
|
||||
# ensure we don't try to create a user when one already exists
|
||||
# mock the version of MySQL
|
||||
with patch.object(mysql, 'version', MagicMock(return_value='8.0.10')):
|
||||
with patch.object(mysql, 'version', return_value='8.0.10'):
|
||||
with patch.object(mysql, 'user_exists', MagicMock(return_value=True)):
|
||||
with patch.dict(mysql.__salt__, {'config.option': MagicMock()}):
|
||||
ret = mysql.user_create('testuser')
|
||||
self.assertEqual(False, ret)
|
||||
|
||||
# test_user_create_when_user_exists(self):
|
||||
# ensure we don't try to create a user when one already exists
|
||||
# mock the version of MySQL
|
||||
with patch.object(mysql, 'version', return_value='8.0.11'):
|
||||
with patch.object(mysql, 'user_exists', MagicMock(return_value=True)):
|
||||
with patch.object(mysql, 'verify_login', MagicMock(return_value=True)):
|
||||
with patch.dict(mysql.__salt__, {'config.option': MagicMock()}):
|
||||
ret = mysql.user_create('testuser')
|
||||
self.assertEqual(False, ret)
|
||||
|
||||
def test_user_create(self):
|
||||
'''
|
||||
Test the creation of a MySQL user in mysql exec module
|
||||
@ -82,6 +106,7 @@ class MySQLTestCase(TestCase, LoaderModuleMockMixin):
|
||||
'''
|
||||
connect_mock = MagicMock()
|
||||
with patch.object(mysql, '_connect', connect_mock):
|
||||
with patch.object(mysql, 'version', return_value='8.0.10'):
|
||||
with patch.dict(mysql.__salt__, {'config.option': MagicMock()}):
|
||||
mysql.user_chpass('testuser', password='BLUECOW')
|
||||
calls = (
|
||||
@ -96,6 +121,19 @@ class MySQLTestCase(TestCase, LoaderModuleMockMixin):
|
||||
)
|
||||
connect_mock.assert_has_calls(calls, any_order=True)
|
||||
|
||||
connect_mock = MagicMock()
|
||||
with patch.object(mysql, '_connect', connect_mock):
|
||||
with patch.object(mysql, 'version', return_value='8.0.11'):
|
||||
with patch.dict(mysql.__salt__, {'config.option': MagicMock()}):
|
||||
mysql.user_chpass('testuser', password='BLUECOW')
|
||||
calls = (
|
||||
call().cursor().execute(
|
||||
"ALTER USER 'testuser'@'localhost' IDENTIFIED BY 'BLUECOW';"
|
||||
),
|
||||
call().cursor().execute('FLUSH PRIVILEGES;'),
|
||||
)
|
||||
connect_mock.assert_has_calls(calls, any_order=True)
|
||||
|
||||
def test_user_remove(self):
|
||||
'''
|
||||
Test the removal of a MySQL user in mysql exec module
|
||||
|
@ -8,16 +8,13 @@ import os
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from tests.support.unit import TestCase, skipIf
|
||||
from tests.support.mock import (
|
||||
patch,
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON
|
||||
)
|
||||
from tests.support.mock import patch, NO_MOCK, NO_MOCK_REASON
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.modules.win_file as win_file
|
||||
from salt.exceptions import CommandExecutionError
|
||||
import salt.utils.platform
|
||||
import salt.utils.win_dacl
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
|
@ -338,7 +338,7 @@ class PillarTestCase(TestCase):
|
||||
'extension_modules': '',
|
||||
}
|
||||
pillar = salt.pillar.Pillar(opts, {}, 'mocked-minion', 'base', pillarenv='dev')
|
||||
self.assertEqual(pillar.opts['file_roots'],
|
||||
self.assertEqual(pillar.opts['pillar_roots'],
|
||||
{'base': ['/srv/pillar/base'], 'dev': ['/srv/pillar/__env__']})
|
||||
|
||||
def test_ignored_dynamic_pillarenv(self):
|
||||
@ -353,7 +353,7 @@ class PillarTestCase(TestCase):
|
||||
'extension_modules': '',
|
||||
}
|
||||
pillar = salt.pillar.Pillar(opts, {}, 'mocked-minion', 'base', pillarenv='base')
|
||||
self.assertEqual(pillar.opts['file_roots'], {'base': ['/srv/pillar/base']})
|
||||
self.assertEqual(pillar.opts['pillar_roots'], {'base': ['/srv/pillar/base']})
|
||||
|
||||
@patch('salt.fileclient.Client.list_states')
|
||||
def test_malformed_pillar_sls(self, mock_list_states):
|
||||
|
@ -78,6 +78,24 @@ integration.shell.test_arguments
|
||||
integration.shell.test_auth
|
||||
integration.shell.test_call
|
||||
integration.shell.test_cloud
|
||||
integration.shell.test_cp
|
||||
integration.shell.test_enabled
|
||||
integration.shell.test_key
|
||||
integration.shell.test_master
|
||||
integration.shell.test_master_tops
|
||||
integration.shell.test_matcher
|
||||
integration.shell.test_minion
|
||||
integration.shell.test_proxy
|
||||
integration.shell.test_runner
|
||||
integration.shell.test_saltcli
|
||||
integration.shell.test_spm
|
||||
integration.shell.test_syndic
|
||||
integration.spm.test_build
|
||||
integration.spm.test_files
|
||||
integration.spm.test_info
|
||||
integration.spm.test_install
|
||||
integration.spm.test_remove
|
||||
integration.spm.test_repo
|
||||
integration.states.test_host
|
||||
integration.states.test_pip_state
|
||||
integration.states.test_pkg
|
||||
|
Loading…
Reference in New Issue
Block a user