Merge pull request #49501 from rallytime/merge-fluorine

[fluorine] Merge forward from 2018.3 to fluorine
This commit is contained in:
Nicole Thomas 2018-09-04 17:02:43 -04:00 committed by GitHub
commit d53cd16601
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
31 changed files with 452 additions and 63 deletions

View File

@ -0,0 +1,73 @@
pipeline {
agent { label 'kitchen-slave' }
options {
timestamps()
ansiColor('xterm')
}
environment {
SALT_KITCHEN_PLATFORMS = "/var/jenkins/workspace/platforms.yml"
SALT_KITCHEN_DRIVER = "/var/jenkins/workspace/driver.yml"
PATH = "/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin"
RBENV_VERSION = "2.4.2"
TEST_SUITE = "py2"
TEST_PLATFORM = "windows-2016"
PY_COLORS = 1
}
stages {
stage('github-pending') {
steps {
githubNotify credentialsId: 'test-jenkins-credentials',
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
status: 'PENDING',
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
}
}
stage('setup') {
steps {
sh 'bundle install --with ec2 windows --without opennebula docker'
}
}
stage('run kitchen') {
steps {
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sshagent(credentials: ['jenkins-testing-ssh-key']) {
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
}
}}
}
post {
always {
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sshagent(credentials: ['jenkins-testing-ssh-key']) {
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
}
}}
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
archiveArtifacts artifacts: 'artifacts/logs/minion'
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
}
}
}
}
post {
always {
junit 'artifacts/xml-unittests-output/*.xml'
cleanWs()
}
success {
githubNotify credentialsId: 'test-jenkins-credentials',
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
status: 'SUCCESS',
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
}
failure {
githubNotify credentialsId: 'test-jenkins-credentials',
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
status: 'FAILURE',
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
}
}
}

View File

@ -0,0 +1,73 @@
pipeline {
agent { label 'kitchen-slave' }
options {
timestamps()
ansiColor('xterm')
}
environment {
SALT_KITCHEN_PLATFORMS = "/var/jenkins/workspace/platforms.yml"
SALT_KITCHEN_DRIVER = "/var/jenkins/workspace/driver.yml"
PATH = "/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin"
RBENV_VERSION = "2.4.2"
TEST_SUITE = "py3"
TEST_PLATFORM = "windows-2016"
PY_COLORS = 1
}
stages {
stage('github-pending') {
steps {
githubNotify credentialsId: 'test-jenkins-credentials',
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
status: 'PENDING',
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
}
}
stage('setup') {
steps {
sh 'bundle install --with ec2 windows --without opennebula docker'
}
}
stage('run kitchen') {
steps {
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sshagent(credentials: ['jenkins-testing-ssh-key']) {
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
}
}}
}
post {
always {
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sshagent(credentials: ['jenkins-testing-ssh-key']) {
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
}
}}
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
archiveArtifacts artifacts: 'artifacts/logs/minion'
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
}
}
}
}
post {
always {
junit 'artifacts/xml-unittests-output/*.xml'
cleanWs()
}
success {
githubNotify credentialsId: 'test-jenkins-credentials',
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
status: 'SUCCESS',
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
}
failure {
githubNotify credentialsId: 'test-jenkins-credentials',
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
status: 'FAILURE',
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
}
}
}

View File

@ -20,8 +20,15 @@ pipeline {
}
stage('setup') {
steps {
sh 'eval "$(pyenv init -)"; pyenv install 2.7.14 || echo "We already have this python."; pyenv local 2.7.14; pyenv shell 2.7.14'
sh 'eval "$(pyenv init -)"; pip install tox'
sh '''
eval "$(pyenv init -)"
pyenv --version
pyenv install --skip-existing 2.7.14
pyenv local 2.7.14
pyenv shell 2.7.14
python --version
pip install tox
'''
}
}
stage('linting') {
@ -29,13 +36,32 @@ pipeline {
parallel {
stage('salt linting') {
steps {
sh 'eval "$(pyenv init - --no-rehash)"; tox -e pylint-salt $(find salt/ -name "*.py" -exec git diff --name-only "origin/$CHANGE_TARGET" "origin/$BRANCH_NAME" setup.py {} +) | tee pylint-report.xml'
sh '''
eval "$(pyenv init - --no-rehash)"
_FILES="$(find salt/ -name "*.py" -exec git diff --name-only "origin/$CHANGE_TARGET" "origin/$BRANCH_NAME" {} +)"
_FILES="$_FILES $(git diff --name-only "origin/$CHANGE_TARGET" "origin/$BRANCH_NAME" setup.py)"
if [[ -z ${_FILES} ]]; then
echo "No pylint run, no changes found in the files"
echo "empty" pylint-reports.xml
else
tox -e pylint-salt ${_FILES} | tee pylint-report.xml
fi
'''
archiveArtifacts artifacts: 'pylint-report.xml'
}
}
stage('test linting') {
steps {
sh 'eval "$(pyenv init - --no-rehash)"; tox -e pylint-tests $(find tests/ -name "*.py" -exec git diff --name-only "origin/$CHANGE_TARGET" "origin/$BRANCH_NAME" {} +) | tee pylint-report-tests.xml'
sh '''
eval "$(pyenv init - --no-rehash)"
_FILES="$(find tests/ -name "*.py" -exec git diff --name-only "origin/$CHANGE_TARGET" "origin/$BRANCH_NAME" setup.py {} +)"
if [[ -z ${_FILES} ]]; then
echo "No pylint run, no changes found in the files"
touch pylint-report-tests.xml
else
tox -e pylint-tests ${_FILES} | tee pylint-report-tests.xml
fi
'''
archiveArtifacts artifacts: 'pylint-report-tests.xml'
}
}

View File

@ -125,7 +125,7 @@
# The master can take a while to start up when lspci and/or dmidecode is used
# to populate the grains for the master. Enable if you want to see GPU hardware
# data for your master.
# enable_gpu_grains: True
# enable_gpu_grains: False
# The master maintains a job cache. While this is a great addition, it can be
# a burden on the master for larger deployments (over 5000 minions).

View File

@ -152,6 +152,11 @@
# Set the directory used to hold unix sockets.
#sock_dir: /var/run/salt/minion
# The minion can take a while to start up when lspci and/or dmidecode is used
# to populate the grains for the minion. Set this to False if you do not need
# GPU hardware grains for your minion.
# enable_gpu_grains: True
# Set the default outputter used by the salt-call command. The default is
# "nested".
#output: nested

View File

@ -127,7 +127,7 @@ syndic_user: salt
# The master can take a while to start up when lspci and/or dmidecode is used
# to populate the grains for the master. Enable if you want to see GPU hardware
# data for your master.
# enable_gpu_grains: True
# enable_gpu_grains: False
# The master maintains a job cache. While this is a great addition, it can be
# a burden on the master for larger deployments (over 5000 minions).

View File

@ -5256,7 +5256,7 @@ sock_dir: /var/run/salt/master
.UNINDENT
.SS \fBenable_gpu_grains\fP
.sp
Default: \fBFalse\fP
Default: \fBTrue\fP
.sp
Enable GPU hardware data for your master. Be aware that the master can
take a while to start up when lspci and/or dmidecode is used to populate the
@ -15225,7 +15225,7 @@ and \fBmine_functions\fP\&.
# The master can take a while to start up when lspci and/or dmidecode is used
# to populate the grains for the master. Enable if you want to see GPU hardware
# data for your master.
# enable_gpu_grains: True
# enable_gpu_grains: False
# The master maintains a job cache. While this is a great addition, it can be
# a burden on the master for larger deployments (over 5000 minions).

View File

@ -478,6 +478,10 @@ Enable GPU hardware data for your master. Be aware that the master can
take a while to start up when lspci and/or dmidecode is used to populate the
grains for the master.
.. code-block:: yaml
enable_gpu_grains: True
.. conf_master:: job_cache
``job_cache``

View File

@ -936,6 +936,22 @@ The directory where Unix sockets will be kept.
sock_dir: /var/run/salt/minion
.. conf_minion:: enable_gpu_grains
``enable_gpu_grains``
---------------------
Default: ``True``
Enable GPU hardware data for your master. Be aware that the minion can
take a while to start up when lspci and/or dmidecode is used to populate the
grains for the minion, so this can be set to ``False`` if you do not need these
grains.
.. code-block:: yaml
enable_gpu_grains: False
.. conf_minion:: outputter_dirs
``outputter_dirs``

View File

@ -50,6 +50,7 @@ value to ``etcd``:
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import base64
try:
import etcd
HAS_ETCD = True
@ -112,7 +113,7 @@ def _init_client():
log.info("etcd: Setting up client with params: %r", etcd_kwargs)
client = etcd.Client(**etcd_kwargs)
try:
client.get(path_prefix)
client.read(path_prefix)
except etcd.EtcdKeyNotFound:
log.info("etcd: Creating dir %r", path_prefix)
client.write(path_prefix, None, dir=True)
@ -126,7 +127,7 @@ def store(bank, key, data):
etcd_key = '{0}/{1}/{2}'.format(path_prefix, bank, key)
try:
value = __context__['serial'].dumps(data)
client.set(etcd_key, value)
client.write(etcd_key, base64.b64encode(value))
except Exception as exc:
raise SaltCacheError(
'There was an error writing the key, {0}: {1}'.format(etcd_key, exc)
@ -140,10 +141,10 @@ def fetch(bank, key):
_init_client()
etcd_key = '{0}/{1}/{2}'.format(path_prefix, bank, key)
try:
value = client.get(etcd_key).value
if value is None:
return {}
return __context__['serial'].loads(value)
value = client.read(etcd_key).value
return __context__['serial'].loads(base64.b64decode(value))
except etcd.EtcdKeyNotFound:
return {}
except Exception as exc:
raise SaltCacheError(
'There was an error reading the key, {0}: {1}'.format(
@ -162,7 +163,7 @@ def flush(bank, key=None):
else:
etcd_key = '{0}/{1}/{2}'.format(path_prefix, bank, key)
try:
client.get(etcd_key)
client.read(etcd_key)
except etcd.EtcdKeyNotFound:
return # nothing to flush
try:
@ -184,7 +185,7 @@ def _walk(r):
return [r.key.split('/', 3)[3]]
keys = []
for c in client.get(r.key).children:
for c in client.read(r.key).children:
keys.extend(_walk(c))
return keys
@ -197,7 +198,7 @@ def ls(bank):
_init_client()
path = '{0}/{1}'.format(path_prefix, bank)
try:
return _walk(client.get(path))
return _walk(client.read(path))
except Exception as exc:
raise SaltCacheError(
'There was an error getting the key "{0}": {1}'.format(
@ -213,7 +214,7 @@ def contains(bank, key):
_init_client()
etcd_key = '{0}/{1}/{2}'.format(path_prefix, bank, key)
try:
r = client.get(etcd_key)
r = client.read(etcd_key)
# return True for keys, not dirs
return r.dir is False
except etcd.EtcdKeyNotFound:

View File

@ -446,6 +446,9 @@ VALID_OPTS = {
# Tell the loader to attempt to import *.pyx cython files if cython is available
'cython_enable': bool,
# Whether or not to load grains for the GPU
'enable_gpu_grains': bool,
# Tell the loader to attempt to import *.zip archives
'enable_zip_modules': bool,
@ -1395,6 +1398,7 @@ DEFAULT_MINION_OPTS = {
'test': False,
'ext_job_cache': '',
'cython_enable': False,
'enable_gpu_grains': True,
'enable_zip_modules': False,
'state_verbose': True,
'state_output': 'full',

View File

@ -514,6 +514,15 @@ class Fileserver(object):
return ret
return list(ret)
def file_envs(self, load=None):
'''
Return environments for all backends for requests from fileclient
'''
if load is None:
load = {}
load.pop('cmd', None)
return self.envs(**load)
def init(self, back=None):
'''
Initialize the backend, only do so if the fs supports an init function

View File

@ -1184,7 +1184,7 @@ class AESFuncs(object):
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.envs
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''

View File

@ -2267,7 +2267,7 @@ def acl_info(consul_url=None, **kwargs):
function = 'acl/info/{0}'.format(kwargs['id'])
ret = _query(consul_url=consul_url,
data=data,
method='PUT',
method='GET',
function=function)
return ret

View File

@ -301,6 +301,19 @@ def get_file(path,
gzip)
def envs():
'''
List available environments for fileserver
CLI Example
.. code-block:: bash
salt '*' cp.envs
'''
return _client().envs()
def get_template(path,
dest,
template='jinja',

View File

@ -466,7 +466,8 @@ def list_users():
salt '*' user.list_users
'''
return sorted([user.pw_name for user in pwd.getpwall()])
users = _dscl(['/users'], 'list')['stdout']
return users.split()
def rename(name, new_name):

View File

@ -1555,7 +1555,7 @@ class _policy_info(object):
'AllocateDASD': {
'Policy': 'Devices: Allowed to format and eject '
'removable media',
'Settings': ["", "0", "1", "2"],
'Settings': ['9999', '0', '1', '2'],
'lgpo_section': self.security_options_gpedit_path,
'Registry': {
'Hive': 'HKEY_LOCAL_MACHINE',
@ -5610,7 +5610,7 @@ def _lookup_admin_template(policy_name,
def get_policy_info(policy_name,
policy_class,
adml_language='en-US'):
'''
r'''
Returns information about a specified policy
Args:
@ -5629,6 +5629,124 @@ def get_policy_info(policy_name,
.. code-block:: bash
salt '*' lgpo.get_policy_info 'Maximum password age' machine
You can use ``lgpo.get_policy_info`` to get all the possible names that
could be used in a state file or from the command line (along with elements
that need to be set/etc). The key is to match the text you see in the
``gpedit.msc`` gui exactly, including quotes around words or phrases. The
"full path" style is really only needed when there are multiple policies
that use the same base name. For example, ``Access data sources across
domains`` exists in ~10 different paths. If you put that through
``get_policy_info`` you'll get back a message that it is used for multiple
policies and you need to be more specific.
CLI Example:
.. code-block:: bash
salt-call --local lgpo.get_policy_info ShellRemoveOrderPrints_2 machine
local:
----------
message:
policy_aliases:
- Turn off the "Order Prints" picture task
- ShellRemoveOrderPrints_2
- System\Internet Communication Management\Internet Communication settings\Turn off the "Order Prints" picture task
policy_class:
machine
policy_elements:
policy_found:
True
policy_name:
ShellRemoveOrderPrints_2
rights_assignment:
False
Escaping can get tricky in cmd/Powershell. The following is an example of
escaping in Powershell using backquotes:
.. code-block:: bash
PS>salt-call --local lgpo.get_policy_info "Turn off the `\`"Order Prints`\`" picture task" machine
local:
----------
message:
policy_aliases:
- Turn off the "Order Prints" picture task
- ShellRemoveOrderPrints_2
- System\Internet Communication Management\Internet Communication settings\Turn off the "Order Prints" picture task
policy_class:
machine
policy_elements:
policy_found:
True
policy_name:
Turn off the "Order Prints" picture task
rights_assignment:
False
This function can then be used to get the options available for specifying
Group Policy Objects to be used in state files. Based on the above any of
these *should* be usable:
.. code-block:: bash
internet_communications_settings:
lgpo.set:
- computer_policy:
Turn off the "Order Prints" picture task: Enabled
.. code-block:: bash
internet_communications_settings:
lgpo.set:
- computer_policy:
ShellRemoveOrderPrints_2: Enabled
When using the full path, it might be a good idea to use single quotes
around the path:
.. code-block:: bash
internet_communications_settings:
lgpo.set:
- computer_policy:
'System\Internet Communication Management\Internet Communication settings\Turn off the "Order Prints" picture task': 'Enabled'
If you struggle to find the policy from ``get_policy_info`` using the name
as you see in ``gpedit.msc``, the names such as "ShellRemoveOrderPrints_2"
come from the ``.admx`` files. If you know nothing about ``.admx/.adml``
relationships (ADML holds what you see in the GUI, ADMX holds the more
technical details), then this may be a little bit too much info, but here is
an example with the above policy using Powershell:
.. code-block:: bash
PS>Get-ChildItem -Path C:\Windows\PolicyDefinitions -Recurse -Filter *.adml | Select-String "Order Prints"
C:\windows\PolicyDefinitions\en-US\ICM.adml:152: <string id="ShellRemoveOrderPrints">Turn off the "Order Prints" picture task</string>
C:\windows\PolicyDefinitions\en-US\ICM.adml:153: <string id="ShellRemoveOrderPrints_Help">This policy setting specifies whether the "Order Prints Online" task is available from Picture Tasks in Windows folders.
C:\windows\PolicyDefinitions\en-US\ICM.adml:155:The Order Prints Online Wizard is used to download a list of providers and allow users to order prints online.
C:\windows\PolicyDefinitions\en-US\ICM.adml:157:If you enable this policy setting, the task "Order Prints Online" is removed from Picture Tasks in File Explorer folders.
From this grep, we can see id "ShellRemoveOrderPrints" is the ID of the
string used to describe this policy, then we search for it in the ADMX:
.. code-block:: bash
PS>Get-ChildItem -Path C:\Windows\PolicyDefinitions -Recurse -Filter *.admx | Select-String "ShellRemoveOrderPrints"
C:\windows\PolicyDefinitions\ICM.admx:661: <policy name="ShellRemoveOrderPrints_1" class="User" displayName="$(string.ShellRemoveOrderPrints)" explainText="$(string.ShellRemoveOrderPrints_Help)" key="Software\Microsoft\Windows\CurrentVersion\Policies\Explorer" valueName="NoOnlinePrintsWizard">
C:\windows\PolicyDefinitions\ICM.admx:671: <policy name="ShellRemoveOrderPrints_2" class="Machine" displayName="$(string.ShellRemoveOrderPrints)" explainText="$(string.ShellRemoveOrderPrints_Help)" key="Software\Microsoft\Windows\CurrentVersion\Policies\Explorer" valueName="NoOnlinePrintsWizard">
Now we have two to pick from. And if you notice the ``class="Machine"`` and
``class="User"`` (which details if it is a computer policy or user policy
respectively) the ``ShellRemoveOrderPrints_2`` is the "short name" we could
use to pass through ``get_policy_info`` to see what the module itself is
expecting.
'''
# return the possible policy names and element names
ret = {'policy_name': policy_name,

View File

@ -28,7 +28,8 @@ from salt.runners.winrepo import (
genrepo as _genrepo,
update_git_repos as _update_git_repos,
PER_REMOTE_OVERRIDES,
PER_REMOTE_ONLY
PER_REMOTE_ONLY,
GLOBAL_ONLY
)
from salt.ext import six
try:

View File

@ -304,7 +304,7 @@ def shutdown(message=None, timeout=5, force_close=True, reboot=False, # pylint:
force_close, reboot)
return True
except pywintypes.error as exc:
(number, context, message) = exc
(number, context, message) = exc.args
log.error('Failed to shutdown the system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
@ -347,7 +347,7 @@ def shutdown_abort():
win32api.AbortSystemShutdown('127.0.0.1')
return True
except pywintypes.error as exc:
(number, context, message) = exc
(number, context, message) = exc.args
log.error('Failed to abort system shutdown')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
@ -486,7 +486,7 @@ def set_computer_desc(desc=None):
try:
win32net.NetServerSetInfo(None, 101, system_info)
except win32net.error as exc:
(number, context, message) = exc
(number, context, message) = exc.args
log.error('Failed to update system')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
@ -1019,7 +1019,7 @@ def set_system_date_time(years=None,
try:
date_time = win32api.GetLocalTime()
except win32api.error as exc:
(number, context, message) = exc
(number, context, message) = exc.args
log.error('Failed to get local time')
log.error('nbr: %s', number)
log.error('ctx: %s', context)

View File

@ -167,7 +167,6 @@ def add(name,
try:
win32net.NetUserAdd(None, 1, user_info)
except win32net.error as exc:
(number, context, message) = exc
log.error('Failed to create user %s', name)
log.error('nbr: %s', exc.winerror)
log.error('ctx: %s', exc.funcname)
@ -271,7 +270,6 @@ def update(name,
try:
user_info = win32net.NetUserGetInfo(None, name, 4)
except win32net.error as exc:
(number, context, message) = exc
log.error('Failed to update user %s', name)
log.error('nbr: %s', exc.winerror)
log.error('ctx: %s', exc.funcname)
@ -331,7 +329,6 @@ def update(name,
try:
win32net.NetUserSetInfo(None, name, 4, user_info)
except win32net.error as exc:
(number, context, message) = exc
log.error('Failed to update user %s', name)
log.error('nbr: %s', exc.winerror)
log.error('ctx: %s', exc.funcname)
@ -420,7 +417,7 @@ def delete(name,
sid = getUserSid(name)
win32profile.DeleteProfile(sid)
except pywintypes.error as exc:
(number, context, message) = exc
(number, context, message) = exc.args
if number == 2: # Profile Folder Not Found
pass
else:
@ -434,7 +431,6 @@ def delete(name,
try:
win32net.NetUserDel(None, name)
except win32net.error as exc:
(number, context, message) = exc
log.error('Failed to delete user %s', name)
log.error('nbr: %s', exc.winerror)
log.error('ctx: %s', exc.funcname)

View File

@ -108,6 +108,7 @@ from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import salt libs
import salt.utils.data
import salt.utils.dictdiffer
import salt.utils.json
@ -268,7 +269,26 @@ def set_(name,
changes = False
requested_policy_json = salt.utils.json.dumps(policy_data['requested_policy'][policy_name], sort_keys=True).lower()
current_policy_json = salt.utils.json.dumps(current_policy[policy_data['output_section']][pol_id], sort_keys=True).lower()
if requested_policy_json != current_policy_json:
policies_are_equal = False
requested_policy_check = salt.utils.json.loads(requested_policy_json)
current_policy_check = salt.utils.json.loads(current_policy_json)
# Compared dicts, lists, and strings
if isinstance(requested_policy_check, six.string_types):
policies_are_equal = requested_policy_check == current_policy_check
elif isinstance(requested_policy_check, list):
policies_are_equal = salt.utils.data.compare_lists(
requested_policy_check,
current_policy_check
) == {}
elif isinstance(requested_policy_check, dict):
policies_are_equal = salt.utils.data.compare_dicts(
requested_policy_check,
current_policy_check
) == {}
if not policies_are_equal:
if policy_data['policy_lookup'][policy_name]['rights_assignment'] and cumulative_rights_assignments:
for user in policy_data['requested_policy'][policy_name]:
if user not in current_policy[policy_data['output_section']][pol_id]:
@ -286,7 +306,7 @@ def set_(name,
else:
log.debug('%s current setting matches '
'the requested setting', policy_name)
ret['comment'] = '"{0}" is already set.'.format(policy_name) + ret['comment']
ret['comment'] = '"{0}" is already set.\n'.format(policy_name) + ret['comment']
else:
policy_changes.append(policy_name)
log.debug('policy %s is not set, we will configure it',
@ -294,8 +314,8 @@ def set_(name,
if __opts__['test']:
if policy_changes:
ret['result'] = None
ret['comment'] = 'The following policies are set to change: {0}.'.format(
', '.join(policy_changes))
ret['comment'] = 'The following policies are set to change:\n{0}.'.format(
'\n'.join(policy_changes))
else:
ret['comment'] = 'All specified policies are properly configured'
else:
@ -306,6 +326,8 @@ def set_(name,
adml_language=adml_language)
if _ret:
ret['result'] = _ret
ret['comment'] = 'The following policies changed:\n{0}.'.format(
'\n'.join(policy_changes))
ret['changes'] = salt.utils.dictdiffer.deep_diff(
current_policy,
__salt__['lgpo.get'](policy_class=policy_class,

View File

@ -72,13 +72,7 @@ class GitConfigParser(RawConfigParser, object): # pylint: disable=undefined-var
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if six.PY2:
try:
line = line.decode(__salt_system_encoding__)
except UnicodeDecodeError:
# Fall back to UTF-8
line = line.decode('utf-8')
line = salt.utils.stringutils.to_unicode(fp.readline())
if not line:
break
lineno = lineno + 1

View File

@ -41,7 +41,7 @@ def _handle_sigusr1(sig, stack):
filename = 'salt-debug-{0}.log'.format(int(time.time()))
destfile = os.path.join(tempfile.gettempdir(), filename)
with salt.utils.files.fopen(destfile, 'w') as output:
_makepretty(output, salt.utils.stringutils.to_str(stack))
_makepretty(output, stack)
def _handle_sigusr2(sig, stack):

View File

@ -68,6 +68,10 @@ class ArchiveTest(ModuleCase):
else:
filename = 'file'
with salt.utils.files.fopen(os.path.join(self.src, filename), 'wb') as theorem:
if six.PY3 and salt.utils.platform.is_windows():
encoding = 'utf-8'
else:
encoding = None
theorem.write(salt.utils.stringutils.to_bytes(textwrap.dedent('''\
Compression theorem of computational complexity theory:
@ -85,7 +89,7 @@ class ArchiveTest(ModuleCase):
and
$\\mathrm C(φ_i) \\mathrm{C}(φ_{f(i)})$.
''')))
'''), encoding=encoding))
# Create destination
os.makedirs(self.dst)
@ -127,10 +131,10 @@ class ArchiveTest(ModuleCase):
dir_in_ret = None
file_in_ret = None
for line in ret:
if normdir(self.src) in line \
and not normdir(self.src_file) in line:
if normdir(self.src) in os.path.normcase(line) \
and not normdir(self.src_file) in os.path.normcase(line):
dir_in_ret = True
if normdir(self.src_file) in line:
if normdir(self.src_file) in os.path.normcase(line):
file_in_ret = True
# Assert number of lines, reporting of source directory and file
@ -297,7 +301,7 @@ class ArchiveTest(ModuleCase):
# Test create archive
ret = self.run_function('archive.unzip', [self.arch, self.dst])
self.assertTrue(isinstance(ret, list), six.text_type(ret))
self._assert_artifacts_in_ret(ret, unix_sep=True if six.PY2 else False)
self._assert_artifacts_in_ret(ret, unix_sep=False)
self._tear_down()

View File

@ -665,3 +665,6 @@ class CPModuleTest(ModuleCase):
self.assertTrue(os.path.isfile(tgt_cache_file), 'File was not cached on the master')
finally:
os.unlink(tgt_cache_file)
def test_envs(self):
self.assertEqual(self.run_function('cp.envs'), ['base', 'prod'])

View File

@ -268,8 +268,8 @@ class UseraddModuleTestWindows(ModuleCase):
self._add_group()
self.run_function('user.addgroup', [self.user_name, self.group_name])
self.assertIn(self.group_name, self.run_function('user.list_groups', [self.user_name]))
self.run_function('user.removegroup', [self.group_name])
self.assertIn(self.group_name, self.run_function('user.list_groups', [self.user_name]))
self.run_function('user.removegroup', [self.user_name, self.group_name])
self.assertNotIn(self.group_name, self.run_function('user.list_groups', [self.user_name]))
def test_user_rename(self):
'''

View File

@ -31,6 +31,7 @@ import salt.utils.files
import salt.utils.platform
import salt.utils.win_functions
import salt.utils.yaml
import salt.ext.six
import salt.utils.gitfs
from salt.utils.gitfs import (
@ -144,6 +145,9 @@ class GitfsConfigTestCase(TestCase, LoaderModuleMockMixin):
try:
shutil.rmtree(path, onerror=_rmtree_error)
except OSError as exc:
if exc.errno == errno.EACCES:
log.error("Access error removeing file %s", path)
continue
if exc.errno != errno.EEXIST:
raise
@ -391,7 +395,9 @@ class GitFSTestBase(object):
try:
shutil.rmtree(TMP_REPO_DIR)
except OSError as exc:
if exc.errno != errno.ENOENT:
if exc.errno == errno.EACCES:
log.error("Access error removeing file %s", TMP_REPO_DIR)
elif exc.errno != errno.ENOENT:
raise
shutil.copytree(INTEGRATION_BASE_FILES, TMP_REPO_DIR + '/')
@ -443,7 +449,9 @@ class GitFSTestBase(object):
try:
salt.utils.files.rm_rf(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
if exc.errno == errno.EACCES:
log.error("Access error removeing file %s", path)
elif exc.errno != errno.EEXIST:
raise
def setUp(self):
@ -464,8 +472,14 @@ class GitFSTestBase(object):
try:
salt.utils.files.rm_rf(os.path.join(self.tmp_cachedir, subdir))
except OSError as exc:
if exc.errno == errno.EACCES:
log.warning("Access error removeing file %s", os.path.join(self.tmp_cachedir, subdir))
continue
if exc.errno != errno.ENOENT:
raise
if salt.ext.six.PY3 and salt.utils.platform.is_windows():
self.setUpClass()
self.setup_loader_modules()
@skipIf(not HAS_GITPYTHON, 'GitPython >= {0} required'.format(GITPYTHON_MINVER))

View File

@ -312,6 +312,11 @@ class MacUserTestCase(TestCase, LoaderModuleMockMixin):
'''
Tests the list of all users
'''
with patch('pwd.getpwall', MagicMock(return_value=self.mock_pwall)):
ret = ['_amavisd', '_appleevents', '_appowner']
self.assertEqual(mac_user.list_users(), ret)
expected = ['spongebob', 'patrick', 'squidward']
mock_run = MagicMock(return_value={'pid': 4948,
'retcode': 0,
'stderr': '',
'stdout': '\n'.join(expected)})
with patch.dict(mac_user.__grains__, {'osrelease_info': (10, 9, 1)}), \
patch.dict(mac_user.__salt__, {'cmd.run_all': mock_run}):
self.assertEqual(mac_user.list_users(), expected)

View File

@ -27,7 +27,7 @@ from salt.utils.odict import OrderedDict
# Import test support libs
from tests.support.helpers import flaky
SKIP_MESSAGE = '%s is unavailable, do prerequisites have been met?'
SKIP_MESSAGE = '%s is unavailable, have prerequisites been met?'
@flaky(condition=six.PY3)
@ -102,6 +102,7 @@ class TestSerializers(TestCase):
@skipIf(not yaml.available, SKIP_MESSAGE % 'yaml')
@skipIf(not yamlex.available, SKIP_MESSAGE % 'sls')
@flaky
def test_compare_sls_vs_yaml_with_jinja(self):
tpl = '{{ data }}'
env = jinja2.Environment()

View File

@ -22,6 +22,8 @@ from tests.support.paths import TMP
import salt.utils.files
import salt.utils.stringutils
import salt.utils.configparser
import salt.utils.platform
from salt.ext import six
# The user.name param here is intentionally indented with spaces instead of a
# tab to test that we properly load a file with mixed indentation.
@ -75,7 +77,8 @@ class TestGitConfigParser(TestCase):
)
)
self.conf = salt.utils.configparser.GitConfigParser()
self.conf.read(self.orig_config)
with salt.utils.files.fopen(self.orig_config, 'rb') as fp:
self.conf._read(fp, self.orig_config)
@classmethod
def tearDownClass(cls):
@ -99,11 +102,14 @@ class TestGitConfigParser(TestCase):
@staticmethod
def get_lines(path):
with salt.utils.files.fopen(path, 'r') as fp_:
with salt.utils.files.fopen(path, 'rb') as fp_:
return salt.utils.stringutils.to_unicode(fp_.read()).splitlines()
def _test_write(self, mode):
with salt.utils.files.fopen(self.new_config, mode) as fp_:
kwargs = {'mode': mode}
if six.PY3 and salt.utils.platform.is_windows() and 'b' not in mode:
kwargs['encoding'] = 'utf-8'
with salt.utils.files.fopen(self.new_config, **kwargs) as fp_:
self.conf.write(fp_)
self.assertEqual(
self.get_lines(self.new_config),
@ -182,7 +188,7 @@ class TestGitConfigParser(TestCase):
[orig_refspec, new_refspec]
)
# Write the config object to a file
with salt.utils.files.fopen(self.new_config, 'w') as fp_:
with salt.utils.files.fopen(self.new_config, 'wb') as fp_:
self.conf.write(fp_)
# Confirm that the new file was written correctly
expected = self.fix_indent(ORIG_CONFIG)

View File

@ -39,7 +39,7 @@ integration.modules.test_sysmod
integration.modules.test_system
integration.modules.test_test
integration.modules.test_useradd
integration.modules.test_win_autoruns
integration.modules.test_autoruns
integration.modules.test_win_dns_client
integration.modules.test_win_firewall
integration.modules.test_win_pkg