Merge branch '2016.3' into 'develop'

Conflicts:
  - salt/cloud/clouds/joyent.py
  - salt/minion.py
This commit is contained in:
rallytime 2016-03-18 16:00:35 -06:00
commit 52111eb01a
77 changed files with 1324 additions and 414 deletions

View File

@ -80,11 +80,21 @@ disable=R,
I0011,
I0012,
I0013,
E0302,
E0401,
E1101,
E1103,
E1136,
E8114,
C0102,
C0103,
C0111,
C0112,
C0122,
C0123,
C0411,
C0412,
C0413,
C0203,
C0204,
C0301,
@ -130,11 +140,19 @@ disable=R,
# I0011 (locally-disabling)
# I0012 (locally-enabling)
# I0013 (file-ignored)
# E0302 (unexpected-special-method-signature)
# E0401 (import-error)
# E1101 (no-member) [pylint isn't smart enough]
# E1103 (maybe-no-member)
# E1136 (unsubscriptable-object)
# E8114 (indentation-is-not-a-multiple-of-four-comment)
# C0102 (blacklisted-name) [because it activates C0103 too]
# C0103 (invalid-name)
# C0111 (missing-docstring)
# C0113 (unneeded-not)
# C0122 (misplaced-comparison-constant)
# C0123 (unidiomatic-typecheck)
# C0412 (ungrouped-imports)
# C0203 (bad-mcs-method-argument)
# C0204 (bad-mcs-classmethod-argument)
# C0301 (line-too-long)
@ -180,6 +198,7 @@ output-format=text
files-output=no
# Tells whether to display a full report or only the messages
# This will be removed in pylint 1.6
reports=no
# Python expression which should return a note less than 10 (10 is the highest

View File

@ -71,15 +71,15 @@ defaults:
{{ salt['grains.get']('os') }}
env
====
saltenv
=======
The `env` variable is available in only in sls files when gathering the sls
The `saltenv` variable is available in only in sls files when gathering the sls
from an environment.
.. code-block:: jinja
{{ env }}
{{ saltenv }}
sls
====
@ -91,4 +91,4 @@ include option.
.. code-block:: jinja
{{ sls }}
{{ sls }}

View File

@ -101,6 +101,8 @@ Set up an initial profile at ``/etc/salt/cloud.profiles``:
ssh_password: verybadpass
slot: production
media_link: 'http://portalvhdabcdefghijklmn.blob.core.windows.net/vhds'
virtual_network_name: azure-virtual-network
subnet_name: azure-subnet
These options are described in more detail below. Once configured, the profile
can be realized with a salt command:
@ -195,6 +197,16 @@ service_name
The name of the service in which to create the VM. If this is not specified,
then a service will be created with the same name as the VM.
virtual_network_name
------------
Optional. The name of the virtual network for the VM to join. If this is not
specified, then no virtual network will be joined.
subnet_name
------------
Optional. The name of the subnet in the virtual network for the VM to join.
Requires that a ``virtual_network_name`` is specified.
Show Instance
=============

View File

@ -9,7 +9,7 @@ the fully automated run of integration and/or unit tests from a single
interface.
To learn the basics of how Salt's test suite works, be sure to check
out the :ref:`Salt's Test Suite: An Introduction <tutorial-salt-testing>`_
out the :ref:`Salt's Test Suite: An Introduction <tutorial-salt-testing>`
tutorial.
@ -19,8 +19,8 @@ Test Directory Structure
Salt's test suite is located in the ``tests`` directory in the root of
Salt's codebase. The test suite is divided into two main groups:
* :ref:`Integration Tests <integration-tests>`_
* :ref:`Unit Tests <unit-tests>`_
* :ref:`Integration Tests <integration-tests>`
* :ref:`Unit Tests <unit-tests>`
Within each of these groups, the directory structure roughly mirrors the
structure of Salt's own codebase. Notice that there are directories for
@ -47,14 +47,14 @@ shell commands, among other segments of Salt's ecosystem. By utilizing
the integration test daemons, integration tests are easy to write. They
are also SaltStack's gerneally preferred method of adding new tests.
The discussion in the :ref:`Integration vs. Unit <integration-vs-unit>`_
section of the :ref:`testing tutorial <tutorial-salt-testing>`_ is
The discussion in the :ref:`Integration vs. Unit <integration-vs-unit>`
section of the :ref:`testing tutorial <tutorial-salt-testing>` is
beneficial in learning why you might want to write integration tests
vs. unit tests. Both testing arenas add value to Salt's test suite and
you should consider adding both types of tests if possible and appropriate
when contributing to Salt.
* :ref:`Integration Test Documentation <integration-tests>`_
* :ref:`Integration Test Documentation <integration-tests>`
Unit Tests
@ -66,12 +66,12 @@ testing against specific interactions, unit tests should be used to test
a function's logic as well as any ``return`` or ``raises`` statements.
Unit tests also rely heavily on mocking external resources.
The discussion in the :ref:`Integration vs. Unit <integration-vs-unit>`_
section of the :ref:`testing tutorial <tutorial-salt-testing>`_ is useful
The discussion in the :ref:`Integration vs. Unit <integration-vs-unit>`
section of the :ref:`testing tutorial <tutorial-salt-testing>` is useful
in determining when you should consider writing unit tests instead of,
or in addition to, integration tests when contributing to Salt.
* :ref:`Unit Test Documentation <unit-tests>`_
* :ref:`Unit Test Documentation <unit-tests>`
Running The Tests
@ -137,9 +137,9 @@ there are several ways to run only specific groups of tests or individual tests:
``./tests/runtests.py -n integration.module.virt.VirtTest.test_default_kvm_profile``
For more specific examples of how to run various test subsections or individual
tests, please see the :ref:`Test Selection Options <test-selection-options>`_
documentation or the :ref:`Running Specific Tests <running-specific-tests>`_
section of the :ref:`Salt's Test Suite: An Introduction <tutorial-salt-testing>`_
tests, please see the :ref:`Test Selection Options <test-selection-options>`
documentation or the :ref:`Running Specific Tests <running-specific-tests>`
section of the :ref:`Salt's Test Suite: An Introduction <tutorial-salt-testing>`
tutorial.
@ -330,8 +330,8 @@ other contexts, but for Salt they are defined this way:
Salt testing uses unittest2 from the python standard library and MagicMock.
* :ref:`Writing integration tests <integration-tests>`_
* :ref:`Writing unit tests <unit-tests>`_
* :ref:`Writing integration tests <integration-tests>`
* :ref:`Writing unit tests <unit-tests>`
Naming Conventions
@ -380,8 +380,8 @@ Tests to Accompany a Bugfix
If you are writing tests for code that fixes a bug in Salt, please write the test
in the same pull request as the bugfix. If you're unsure of where to submit your
bugfix and accompanying test, please review the
:ref:`Which Salt Branch? <which-salt-branch>`_ documentation in Salt's
:ref:`Contributing <contributing>`_ guide.
:ref:`Which Salt Branch? <which-salt-branch>` documentation in Salt's
:ref:`Contributing <contributing>` guide.
Tests for Entire Files or Functions

View File

@ -0,0 +1,90 @@
===========================
Salt 2014.7.8 Release Notes
===========================
Changes for v2014.7.7..v2014.7.8
--------------------------------
Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs):
*Generated at: 2016-03-11T21:18:48Z*
Statistics:
- Total Merges: **7**
- Total Issue references: **3**
- Total PR references: **10**
Changes:
- **PR** `#28839`_: (*cachedout*) Revert `#28740`_
@ *2015-11-12T22:54:28Z*
- **PR** `#28740`_: (*MasterNayru*) Add missing S3 module import
| refs: `#28777`_
* 4b8bdd0 Merge pull request `#28839`_ from cachedout/revert_28740
* 215b26c Revert `#28740`_
- **PR** `#28777`_: (*rallytime*) Back-port `#28740`_ to 2014.7
@ *2015-11-11T18:00:00Z*
- **PR** `#28740`_: (*MasterNayru*) Add missing S3 module import
| refs: `#28777`_
* 76e69b4 Merge pull request `#28777`_ from rallytime/`bp-28740`_-2014.7
* da5fac2 Back-port `#28740`_ to 2014.7
- **PR** `#28716`_: (*rallytime*) Back-port `#28705`_ to 2014.7
@ *2015-11-10T16:15:03Z*
- **PR** `#28705`_: (*cachedout*) Account for new headers class in tornado 4.3
| refs: `#28716`_
* 45c73eb Merge pull request `#28716`_ from rallytime/`bp-28705`_
* 32e7bd3 Account for new headers class in tornado 4.3
- **PR** `#28717`_: (*cachedout*) Add note about recommended umask
@ *2015-11-09T23:26:20Z*
- **ISSUE** `#28199`_: (*felskrone*) Non-standard umasks might break the master
| refs: `#28717`_
* f4fe921 Merge pull request `#28717`_ from cachedout/umask_note
* 1874300 Add note about recommended umask
- **PR** `#28461`_: (*cachedout*) Wrap all cache calls in state.sls in correct umask
@ *2015-11-02T17:11:02Z*
- **ISSUE** `#28455`_: (*zmalone*) highstate.cache is world readable, and contains secrets
| refs: `#28461`_
* 4bf56ca Merge pull request `#28461`_ from cachedout/issue_28455
* 097838e Wrap all cache calls in state.sls in correct umask
- **PR** `#28407`_: (*DmitryKuzmenko*) Don't request creds if auth with key.
@ *2015-10-29T16:12:30Z*
- **ISSUE** `#24910`_: (*bocig*) -T, --make-token flag does NOT work- LDAP Groups
| refs: `#28407`_
* f3e61db Merge pull request `#28407`_ from DSRCompany/issues/24910_token_auth_fix_2014
* b7b5bec Don't request creds if auth with key.
- **PR** `#27390`_: (*JaseFace*) Ensure we pass on the enable setting if present, or use the default of True if not in build_schedule_item()
@ *2015-10-05T18:09:33Z*
* d284eb1 Merge pull request `#27390`_ from JaseFace/schedule-missing-enabled
* 563db71 Ensure we pass on the enable setting if present, or use the default of True if not in build_schedule_item() Prior to this, when schedule.present compares the existing schedule to the one crafted by this function, enabled will actually be removed at each run. schedule.present sees a modification needs to be made, and invokes schedule.modify, which does so with enabled: True, creating and endless loop of an 'enabled' removal and addition.
.. _`#24910`: https://github.com/saltstack/salt/issues/24910
.. _`#27390`: https://github.com/saltstack/salt/pull/27390
.. _`#28199`: https://github.com/saltstack/salt/issues/28199
.. _`#28407`: https://github.com/saltstack/salt/pull/28407
.. _`#28455`: https://github.com/saltstack/salt/issues/28455
.. _`#28461`: https://github.com/saltstack/salt/pull/28461
.. _`#28705`: https://github.com/saltstack/salt/pull/28705
.. _`#28716`: https://github.com/saltstack/salt/pull/28716
.. _`#28717`: https://github.com/saltstack/salt/pull/28717
.. _`#28740`: https://github.com/saltstack/salt/pull/28740
.. _`#28777`: https://github.com/saltstack/salt/pull/28777
.. _`#28839`: https://github.com/saltstack/salt/pull/28839
.. _`bp-28705`: https://github.com/saltstack/salt/pull/28705
.. _`bp-28740`: https://github.com/saltstack/salt/pull/28740

View File

@ -0,0 +1,45 @@
===========================
Salt 2014.7.9 Release Notes
===========================
Changes for v2014.7.8..v2014.7.9
--------------------------------
Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs):
*Generated at: 2016-03-11T20:58:58Z*
Statistics:
- Total Merges: **3**
- Total Issue references: **1**
- Total PR references: **3**
Changes:
- **PR** `#31826`_: (*gtmanfred*) Remove ability of authenticating user to specify pam service
@ *2016-03-11T20:41:01Z*
* c5e7c03 Merge pull request `#31826`_ from gtmanfred/2014.7
* d73f70e Remove ability of authenticating user to specify pam service
- **PR** `#29392`_: (*jacobhammons*) updated version number to not reference a specific build from the lat…
@ *2015-12-03T15:54:31Z*
* 85aa70a Merge pull request `#29392`_ from jacobhammons/2014.7
* d7f0db1 updated version number to not reference a specific build from the latest branch
- **PR** `#29296`_: (*douardda*) Use process KillMode on Debian systems also
@ *2015-12-01T16:00:16Z*
- **ISSUE** `#29295`_: (*douardda*) systemd's service file should use the 'process' KillMode option on Debian also
| refs: `#29296`_
* d2fb210 Merge pull request `#29296`_ from douardda/patch-3
* d288539 Use process KillMode on Debian systems also
.. _`#29295`: https://github.com/saltstack/salt/issues/29295
.. _`#29296`: https://github.com/saltstack/salt/pull/29296
.. _`#29392`: https://github.com/saltstack/salt/pull/29392
.. _`#31826`: https://github.com/saltstack/salt/pull/31826

View File

@ -7,12 +7,64 @@ Changes for v2015.8.7..v2015.8.8
Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs):
*Generated at: 2016-03-11T23:06:54Z*
*Generated at: 2016-03-17T21:03:44Z*
Total Merges: **277**
Total Merges: **312**
Changes:
- **PR** `#31947`_: (*cro*) Move proxymodule assignment earlier in proxy minion init
- **PR** `#31948`_: (*rallytime*) Revert "not not" deletion and add comment as to why that is there
- **PR** `#31952`_: (*rallytime*) Fix lint for 2015.8 branch
- **PR** `#31933`_: (*rallytime*) Fix linking syntax in testing docs
- **PR** `#31930`_: (*cro*) Backport changes from 2016.3
- **PR** `#31924`_: (*jfindlay*) update 2015.8.8 release notes
- **PR** `#31922`_: (*cachedout*) For 2015.8 head
- **PR** `#31904`_: (*rallytime*) [2015.8] Merge forward from 2015.5 to 2015.8
- **PR** `#31906`_: (*sbreidba*) Win_dacl module: fix FULLCONTROL / FILE_ALL_ACCESS definition
- **PR** `#31745`_: (*isbm*) Fix the always-false behavior on checking state
- **PR** `#31911`_: (*rallytime*) Merge `#31903`_ with pylint fix
- **PR** `#31883`_: (*paiou*) Fix scaleway cloud provider and manage x86 servers
- **PR** `#31903`_: (*terminalmage*) Use remote_ref instead of local_ref to see if checkout is necessary
- **PR** `#31845`_: (*sakateka*) Now a check_file_meta deletes temporary files when test=True
- **PR** `#31901`_: (*rallytime*) Back-port `#31846`_ to 2015.8
- **PR** `#31905`_: (*terminalmage*) Update versionadded directive
- **PR** `#31902`_: (*rallytime*) Update versionadded tag for new funcs
- **PR** `#31888`_: (*terminalmage*) Fix salt.utils.decorators.Depends
- **PR** `#31857`_: (*sjorge*) gen_password and del_password missing from solaris_shadow
- **PR** `#31879`_: (*cro*) Clarify some comments
- **PR** `#31815`_: (*dr4Ke*) Fix template on contents 2015.8
- **PR** `#31818`_: (*anlutro*) Prevent event logs from writing huge amounts of data
- **PR** `#31836`_: (*terminalmage*) Fix git_pillar race condition
- **PR** `#31824`_: (*rallytime*) Back-port `#31819`_ to 2015.8
- **PR** `#31856`_: (*szeestraten*) Adds missing docs for Virtual Network and Subnet options in salt-cloud Azure cloud profile
- **PR** `#31839`_: (*jfindlay*) add 2015.8.8 release notes
- **PR** `#31828`_: (*gtmanfred*) Remove ability of authenticating user to specify pam service
- **PR** `#31787`_: (*anlutro*) Fix user_create and db_create for new versions of influxdb
@ -748,6 +800,7 @@ Changes:
.. _`#31593`: https://github.com/saltstack/salt/pull/31593
.. _`#31594`: https://github.com/saltstack/salt/pull/31594
.. _`#31598`: https://github.com/saltstack/salt/pull/31598
.. _`#31601`: https://github.com/saltstack/salt/pull/31601
.. _`#31604`: https://github.com/saltstack/salt/pull/31604
.. _`#31622`: https://github.com/saltstack/salt/pull/31622
.. _`#31627`: https://github.com/saltstack/salt/pull/31627
@ -782,6 +835,7 @@ Changes:
.. _`#31740`: https://github.com/saltstack/salt/pull/31740
.. _`#31743`: https://github.com/saltstack/salt/pull/31743
.. _`#31744`: https://github.com/saltstack/salt/pull/31744
.. _`#31745`: https://github.com/saltstack/salt/pull/31745
.. _`#31747`: https://github.com/saltstack/salt/pull/31747
.. _`#31750`: https://github.com/saltstack/salt/pull/31750
.. _`#31752`: https://github.com/saltstack/salt/pull/31752
@ -795,4 +849,40 @@ Changes:
.. _`#31793`: https://github.com/saltstack/salt/pull/31793
.. _`#31797`: https://github.com/saltstack/salt/pull/31797
.. _`#31800`: https://github.com/saltstack/salt/pull/31800
.. _`#31810`: https://github.com/saltstack/salt/pull/31810
.. _`#31815`: https://github.com/saltstack/salt/pull/31815
.. _`#31818`: https://github.com/saltstack/salt/pull/31818
.. _`#31819`: https://github.com/saltstack/salt/pull/31819
.. _`#31824`: https://github.com/saltstack/salt/pull/31824
.. _`#31825`: https://github.com/saltstack/salt/pull/31825
.. _`#31826`: https://github.com/saltstack/salt/pull/31826
.. _`#31827`: https://github.com/saltstack/salt/pull/31827
.. _`#31828`: https://github.com/saltstack/salt/pull/31828
.. _`#31833`: https://github.com/saltstack/salt/pull/31833
.. _`#31834`: https://github.com/saltstack/salt/pull/31834
.. _`#31836`: https://github.com/saltstack/salt/pull/31836
.. _`#31839`: https://github.com/saltstack/salt/pull/31839
.. _`#31845`: https://github.com/saltstack/salt/pull/31845
.. _`#31846`: https://github.com/saltstack/salt/pull/31846
.. _`#31852`: https://github.com/saltstack/salt/pull/31852
.. _`#31856`: https://github.com/saltstack/salt/pull/31856
.. _`#31857`: https://github.com/saltstack/salt/pull/31857
.. _`#31878`: https://github.com/saltstack/salt/pull/31878
.. _`#31879`: https://github.com/saltstack/salt/pull/31879
.. _`#31883`: https://github.com/saltstack/salt/pull/31883
.. _`#31888`: https://github.com/saltstack/salt/pull/31888
.. _`#31900`: https://github.com/saltstack/salt/pull/31900
.. _`#31901`: https://github.com/saltstack/salt/pull/31901
.. _`#31902`: https://github.com/saltstack/salt/pull/31902
.. _`#31903`: https://github.com/saltstack/salt/pull/31903
.. _`#31904`: https://github.com/saltstack/salt/pull/31904
.. _`#31905`: https://github.com/saltstack/salt/pull/31905
.. _`#31906`: https://github.com/saltstack/salt/pull/31906
.. _`#31911`: https://github.com/saltstack/salt/pull/31911
.. _`#31922`: https://github.com/saltstack/salt/pull/31922
.. _`#31924`: https://github.com/saltstack/salt/pull/31924
.. _`#31930`: https://github.com/saltstack/salt/pull/31930
.. _`#31933`: https://github.com/saltstack/salt/pull/31933
.. _`#31947`: https://github.com/saltstack/salt/pull/31947
.. _`#31948`: https://github.com/saltstack/salt/pull/31948
.. _`#31952`: https://github.com/saltstack/salt/pull/31952

View File

@ -8,13 +8,13 @@ Salt's Test Suite: An Introduction
This tutorial makes a couple of assumptions. The first assumption is that
you have a basic knowledge of Salt. To get up to speed, check out the
:ref:`Salt Walkthrough </topics/tutorials/walkthrough>`_.
:ref:`Salt Walkthrough </topics/tutorials/walkthrough>`.
The second assumption is that your Salt development environment is already
configured and that you have a basic understanding of contributing to the
Salt codebase. If you're unfamiliar with either of these topics, please refer
to the :ref:`Installing Salt for Development<installing-for-development>`_
and the :ref:`Contributing<contributing>`_ pages, respectively.
to the :ref:`Installing Salt for Development<installing-for-development>`
and the :ref:`Contributing<contributing>` pages, respectively.
Salt comes with a powerful integration and unit test suite. The test suite
allows for the fully automated run of integration and/or unit tests from a
@ -335,7 +335,7 @@ use case that protects against potential regressions.
The examples above all use the ``run_function`` option to test execution module
functions in a traditional master/minion environment. To see examples of how to
test other common Salt components such as runners, salt-api, and more, please
refer to the :ref:`Integration Test Class Examples<integration-class-examples>`_
refer to the :ref:`Integration Test Class Examples<integration-class-examples>`
documentation.
@ -379,7 +379,7 @@ function calls, external data either globally available or passed in through
function arguments, file data, etc. This practice helps to isolate unit tests to
test Salt logic. One handy way to think about writing unit tests is to "block
all of the exits". More information about how to properly mock external resources
can be found in Salt's :ref:`Unit Test<unit-tests>`_ documentation.
can be found in Salt's :ref:`Unit Test<unit-tests>` documentation.
Salt's unit tests utilize Python's mock class as well as `MagicMock`_. The
``@patch`` decorator is also heavily used when "blocking all the exits".
@ -446,9 +446,9 @@ In addition to this tutorial, there are some other helpful resources and documen
that go into more depth on Salt's test runner, writing tests for Salt code, and general
Python testing documentation. Please see the follow references for more information:
* :ref:`Salt's Test Suite Documentation<salt-test-suite>`_
* :ref:`Integration Tests<integration-tests>`_
* :ref:`Unit Tests<unit-tests>`_
* :ref:`Salt's Test Suite Documentation<salt-test-suite>`
* :ref:`Integration Tests<integration-tests>`
* :ref:`Unit Tests<unit-tests>`
* `MagicMock`_
* `Python Unittest`_
* `Python's Assert Functions`_

View File

@ -60,7 +60,7 @@ def __define_global_system_encoding_variable__():
# than expected. See:
# https://github.com/saltstack/salt/issues/21036
if sys.version_info[0] < 3:
import __builtin__ as builtins # pylint: disable=incompatible-py3-code
import __builtin__ as builtins
else:
import builtins # pylint: disable=import-error

View File

@ -41,7 +41,7 @@ if PY3:
import builtins
exceptions = builtins
else:
import exceptions # pylint: disable=incompatible-py3-code
import exceptions
if not hasattr(ElementTree, 'ParseError'):

View File

@ -15,7 +15,6 @@ import struct
# Import Salt Libs
import salt.utils
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
__virtualname__ = 'btmp'
BTMP = '/var/log/btmp'
@ -87,9 +86,9 @@ def beacon(config):
__context__[LOC_KEY] = fp_.tell()
pack = struct.unpack(FMT, raw)
event = {}
for ind in range(len(FIELDS)):
event[FIELDS[ind]] = pack[ind]
if isinstance(event[FIELDS[ind]], str):
event[FIELDS[ind]] = event[FIELDS[ind]].strip('\x00')
for ind, field in enumerate(FIELDS):
event[field] = pack[ind]
if isinstance(event[field], str):
event[field] = event[field].strip('\x00')
ret.append(event)
return ret

View File

@ -10,7 +10,6 @@ import time
# Import salt libs
import salt.utils
import salt.utils.vt
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
__virtualname__ = 'sh'
@ -95,12 +94,12 @@ def beacon(config):
'tag': pid}
if 'execve' in line:
comps = line.split('execve')[1].split('"')
for ind in range(len(comps)):
for ind, field in enumerate(comps):
if ind == 1:
event['cmd'] = comps[ind]
event['cmd'] = field
continue
if ind % 2 != 0:
event['args'].append(comps[ind])
event['args'].append(field)
event['user'] = __context__[pkey][pid]['user']
ret.append(event)
if not __context__[pkey][pid]['vt'].isalive():

View File

@ -13,9 +13,6 @@ from __future__ import absolute_import
import os
import struct
# Import 3rd-party libs
from salt.ext.six.moves import range
# Import salt libs
import salt.utils
@ -91,9 +88,9 @@ def beacon(config):
__context__[LOC_KEY] = fp_.tell()
pack = struct.unpack(FMT, raw)
event = {}
for ind in range(len(FIELDS)):
event[FIELDS[ind]] = pack[ind]
if isinstance(event[FIELDS[ind]], str):
event[FIELDS[ind]] = event[FIELDS[ind]].strip('\x00')
for ind, field in enumerate(FIELDS):
event[field] = pack[ind]
if isinstance(event[field], str):
event[field] = event[field].strip('\x00')
ret.append(event)
return ret

View File

@ -840,7 +840,7 @@ class Single(object):
fsclient=self.fsclient,
minion_opts=self.minion_opts,
**self.target)
opts_pkg = pre_wrapper['test.opts_pkg']()
opts_pkg = pre_wrapper['test.opts_pkg']() # pylint: disable=E1102
opts_pkg['file_roots'] = self.opts['file_roots']
opts_pkg['pillar_roots'] = self.opts['pillar_roots']
opts_pkg['ext_pillar'] = self.opts['ext_pillar']

View File

@ -140,7 +140,7 @@ def main(argv): # pylint: disable=W0613
# Salt thin now is available to use
else:
scpstat = subprocess.Popen(['/bin/sh', '-c', 'command -v scp']).wait()
if not scpstat == 0:
if scpstat != 0:
sys.exit(EX_SCP_NOT_FOUND)
if not os.path.exists(OPTIONS.saltdir):

View File

@ -4410,6 +4410,7 @@ def get_console_output(
ret = {}
data = aws.query(params,
return_root=True,
location=location,
provider=get_provider(),
opts=__opts__,

View File

@ -198,7 +198,7 @@ def query_instance(vm_=None, call=None):
if isinstance(data, dict) and 'error' in data:
log.warning(
'There was an error in the query {0}'.format(data['error']) # pylint: disable=E1126
'There was an error in the query {0}'.format(data.get('error'))
)
# Trigger a failure in the wait for IP function
return False

View File

@ -238,10 +238,15 @@ def create(server_):
'access_key', get_configured_provider(), __opts__, search_global=False
)
commercial_type = config.get_cloud_config_value(
'commercial_type', server_, __opts__, default='C1'
)
kwargs = {
'name': server_['name'],
'organization': access_key,
'image': get_image(server_),
'commercial_type': commercial_type,
}
salt.utils.cloud.fire_event(
@ -332,7 +337,7 @@ def query(method='servers', server_id=None, command=None, args=None,
get_configured_provider(),
__opts__,
search_global=False,
default='https://api.scaleway.com'
default='https://api.cloud.online.net'
))
path = '{0}/{1}/'.format(base_path, method)
@ -361,7 +366,7 @@ def query(method='servers', server_id=None, command=None, args=None,
raise SaltCloudSystemExit(
'An error occurred while querying Scaleway. HTTP Code: {0} '
'Error: \'{1}\''.format(
request.getcode(),
request.status_code,
request.text
)
)

View File

@ -980,7 +980,7 @@ def _format_instance_info_select(vm, selection):
if 'files' in selection:
file_full_info = {}
if "layoutEx.file" in file:
if "layoutEx.file" in file: # pylint: disable=E1135
for file in vm["layoutEx.file"]:
file_full_info[file.key] = {
'key': file.key,

View File

@ -91,6 +91,14 @@ VALID_OPTS = {
# is interrupted and try another master in the list.
'master_alive_interval': int,
# When in multi-master failover mode, fail back to the first master in the list if it's back
# online.
'master_failback': bool,
# When in multi-master mode, and master_failback is enabled ping the top master with this
# interval.
'master_failback_interval': int,
# The name of the signing key-pair
'master_sign_key_name': str,
@ -813,6 +821,8 @@ DEFAULT_MINION_OPTS = {
'master_finger': '',
'master_shuffle': False,
'master_alive_interval': 0,
'master_failback': False,
'master_failback_interval': 0,
'verify_master_pubkey_sign': False,
'always_verify_signature': False,
'master_sign_key_name': 'master_sign',

View File

@ -164,6 +164,22 @@ class FileLockError(SaltException):
self.time_start = time_start
class GitLockError(SaltException):
'''
Raised when an uncaught error occurs in the midst of obtaining an
update/checkout lock in salt.utils.gitfs.
NOTE: While this uses the errno param similar to an OSError, this exception
class is *not* as subclass of OSError. This is done intentionally, so that
this exception class can be caught in a try/except without being caught as
an OSError.
'''
def __init__(self, errno, strerror, *args, **kwargs):
super(GitLockError, self).__init__(strerror, *args, **kwargs)
self.errno = errno
self.strerror = strerror
class SaltInvocationError(SaltException, TypeError):
'''
Used when the wrong number of arguments are sent to modules or invalid

View File

@ -585,7 +585,7 @@ class Client(object):
**get_kwargs
)
if 'handle' not in query:
raise MinionError('Error: {0}'.format(query['error']))
raise MinionError('Error: {0} reading {1}'.format(query['error'], url))
if no_cache:
return ''.join(result)
else:

View File

@ -272,7 +272,7 @@ def is_file_ignored(opts, fname):
return False
def clear_lock(clear_func, lock_type, remote=None):
def clear_lock(clear_func, role, remote=None, lock_type='update'):
'''
Function to allow non-fileserver functions to clear update locks
@ -282,7 +282,7 @@ def clear_lock(clear_func, lock_type, remote=None):
lists, one containing messages describing successfully cleared locks,
and one containing messages describing errors encountered.
lock_type
role
What type of lock is being cleared (gitfs, git_pillar, etc.). Used
solely for logging purposes.
@ -290,14 +290,16 @@ def clear_lock(clear_func, lock_type, remote=None):
Optional string which should be used in ``func`` to pattern match so
that a subset of remotes can be targeted.
lock_type : update
Which type of lock to clear
Returns the return data from ``clear_func``.
'''
msg = 'Clearing update lock for {0} remotes'.format(lock_type)
msg = 'Clearing {0} lock for {1} remotes'.format(lock_type, role)
if remote:
msg += ' matching {0}'.format(remote)
log.debug(msg)
return clear_func(remote=remote)
return clear_func(remote=remote, lock_type=lock_type)
class Fileserver(object):

View File

@ -93,13 +93,13 @@ def clear_cache():
return gitfs.clear_cache()
def clear_lock(remote=None):
def clear_lock(remote=None, lock_type='update'):
'''
Clear update.lk
'''
gitfs = salt.utils.gitfs.GitFS(__opts__)
gitfs.init_remotes(__opts__['gitfs_remotes'], PER_REMOTE_OVERRIDES)
return gitfs.clear_lock(remote=remote)
return gitfs.clear_lock(remote=remote, lock_type=lock_type)
def lock(remote=None):

View File

@ -31,7 +31,7 @@ def disks():
if salt.utils.is_freebsd():
return _freebsd_disks()
elif salt.utils.is_linux():
return {'SSDs': _linux_ssds()}
return _linux_disks()
else:
log.trace('Disk grain does not support OS')
@ -118,23 +118,23 @@ def _freebsd_camcontrol(device):
return ret
def _linux_ssds():
def _linux_disks():
'''
Return list of disk devices that are SSD (non-rotational)
Return list of disk devices and work out if they are SSD or HDD.
'''
ssd_devices = []
ret = {'disks': [], 'SSDs': []}
for entry in glob.glob('/sys/block/*/queue/rotational'):
with salt.utils.fopen(entry) as entry_fp:
device = entry.split('/')[3]
flag = entry_fp.read(1)
if flag == '0':
ssd_devices.append(device)
ret['SSDs'].append(device)
log.trace('Device {0} reports itself as an SSD'.format(device))
elif flag == '1':
log.trace('Device {0} does not report itself as an SSD'
.format(device))
ret['disks'].append(device)
log.trace('Device {0} reports itself as an HDD'.format(device))
else:
log.trace('Unable to identify device {0} as an SSD or not.'
log.trace('Unable to identify device {0} as an SSD or HDD.'
' It does not report 0 or 1'.format(device))
return ssd_devices
return ret

View File

@ -177,5 +177,5 @@ if sys.version_info < (3, 2):
except Exception:
self.handleError(record)
else:
class QueueHandler(ExcInfoOnLogLevelFormatMixIn, logging.handlers.QueueHandler): # pylint: disable=no-member
class QueueHandler(ExcInfoOnLogLevelFormatMixIn, logging.handlers.QueueHandler): # pylint: disable=no-member,E0240
pass

View File

@ -132,8 +132,8 @@ class ExcInfoOnLogLevelFormatMixIn(object):
# We also use replace for when there are multiple
# encodings, e.g. UTF-8 for the filesystem and latin-1
# for a script. See issue 13232.
formatted_record += record.record.exc_info_on_loglevel_formatted.decode(sys.getfilesystemencoding(),
'replace')
formatted_record += record.exc_info_on_loglevel_formatted.decode(sys.getfilesystemencoding(),
'replace')
# Reset the record.exc_info_on_loglevel_instance because it might need
# to "travel" through a multiprocessing process and it might contain
# data which is not pickle'able

View File

@ -2354,6 +2354,12 @@ class ClearFuncs(object):
log.debug('Published command details {0}'.format(load))
return load
def ping(self, clear_load):
'''
Send the load back to the sender.
'''
return clear_load
class FloMWorker(MWorker):
'''

View File

@ -131,7 +131,7 @@ log = logging.getLogger(__name__)
# 6. Handle publications
def resolve_dns(opts):
def resolve_dns(opts, fallback=True):
'''
Resolves the master_ip and master_uri options
'''
@ -168,7 +168,10 @@ def resolve_dns(opts):
except SaltClientError:
pass
else:
ret['master_ip'] = '127.0.0.1'
if fallback:
ret['master_ip'] = '127.0.0.1'
else:
raise
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
@ -434,8 +437,15 @@ class MinionBase(object):
log.info('Got list of available master addresses:'
' {0}'.format(opts['master']))
if opts['master_shuffle']:
shuffle(opts['master'])
if opts['master_failback']:
secondary_masters = opts['master'][1:]
shuffle(secondary_masters)
opts['master'][1:] = secondary_masters
else:
shuffle(opts['master'])
opts['auth_tries'] = 0
if opts['master_failback'] and opts['master_failback_interval'] == 0:
opts['master_failback_interval'] = opts['master_alive_interval']
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], str) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
@ -806,6 +816,7 @@ class Minion(MinionBase):
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.connected = False
if io_loop is None:
if HAS_ZMQ:
@ -964,8 +975,24 @@ class Minion(MinionBase):
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
'__master_failback':
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job('__master_failback', persist=True)
else:
self.schedule.delete_job('__master_alive', persist=True)
self.schedule.delete_job('__master_failback', persist=True)
self.grains_cache = self.opts['grains']
@ -1076,7 +1103,17 @@ class Minion(MinionBase):
return functions, returners, errors, executors
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60):
def _send_req_sync(self, load, timeout):
channel = salt.transport.Channel.factory(self.opts)
return channel.send(load, timeout=timeout)
@tornado.gen.coroutine
def _send_req_async(self, load, timeout):
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
ret = yield channel.send(load, timeout=timeout)
raise tornado.gen.Return(ret)
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True):
'''
Fire an event on the master, or drop message if unable to send.
'''
@ -1094,15 +1131,23 @@ class Minion(MinionBase):
load['tag'] = tag
else:
return
channel = salt.transport.Channel.factory(self.opts)
try:
result = channel.send(load, timeout=timeout)
return True
except salt.exceptions.SaltReqTimeoutError:
def timeout_handler(*_):
log.info('fire_master failed: master could not be contacted. Request timed out.')
except Exception:
log.info('fire_master failed: {0}'.format(traceback.format_exc()))
return False
return True
if sync:
try:
self._send_req_sync(load, timeout)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
except Exception:
log.info('fire_master failed: {0}'.format(traceback.format_exc()))
return False
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True
def _handle_decoded_payload(self, data):
'''
@ -1454,7 +1499,7 @@ class Minion(MinionBase):
)
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60):
def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
@ -1469,7 +1514,6 @@ class Minion(MinionBase):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
channel = salt.transport.Channel.factory(self.opts)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
@ -1517,15 +1561,24 @@ class Minion(MinionBase):
if not os.path.isdir(jdir):
os.makedirs(jdir)
salt.utils.fopen(fn_, 'w+b').write(self.serial.dumps(ret))
try:
ret_val = channel.send(load, timeout=timeout)
except SaltReqTimeoutError:
def timeout_handler(*_):
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider increasing '
'the worker_threads value.').format(jid)
log.warning(msg)
return ''
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = {0}'.format(ret_val)) # pylint: disable=no-member
return ret_val
@ -1774,10 +1827,20 @@ class Minion(MinionBase):
elif package.startswith('fire_master'):
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif package.startswith('__master_disconnected'):
elif package.startswith('__master_disconnected') or package.startswith('__master_failback'):
# if the master disconnect event is for a different master, raise an exception
if data['master'] != self.opts['master']:
raise Exception()
if package.startswith('__master_disconnected') and data['master'] != self.opts['master']:
raise SaltException('Bad master disconnected \'{0}\' when mine one is \'{1}\''.format(
data['master'], self.opts['master']))
if package.startswith('__master_failback'):
# if the master failback event is not for the top master, raise an exception
if data['master'] != self.opts['master_list'][0]:
raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format(
data['master'], self.opts['master']))
# if the master failback event is for the current master, raise an exception
elif data['master'] == self.opts['master'][0]:
raise SaltException('Already connected to \'{0}\''.format(data['master']))
if self.connected:
# we are not connected anymore
self.connected = False
@ -1787,7 +1850,7 @@ class Minion(MinionBase):
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
@ -1829,13 +1892,27 @@ class Minion(MinionBase):
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
if self.opts['master_failback'] and 'master_list' in self.opts:
if self.opts['master'] != self.opts['master_list'][0]:
schedule = {
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master_list'][0]}
}
self.schedule.modify_job(name='__master_failback',
schedule=schedule)
else:
self.schedule.delete_job(name='__master_failback', persist=True)
elif package.startswith('__master_connected'):
# handle this event only once. otherwise it will pollute the log
if not self.connected:
@ -1848,13 +1925,15 @@ class Minion(MinionBase):
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('__schedule_return'):
self._return_pub(data, ret_cmd='_return', sync=False)
elif package.startswith('_salt_error'):
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
self._fire_master(data, tag)
@ -1897,7 +1976,7 @@ class Minion(MinionBase):
if start:
self.sync_connect_master()
if hasattr(self, 'connected') and self.connected:
if self.connected:
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
@ -2822,6 +2901,10 @@ class ProxyMinion(Minion):
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts)
# Check config 'add_proxymodule_to_opts' Remove this in Boron.
if self.opts['add_proxymodule_to_opts']:
self.opts['proxymodule'] = self.proxy
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors, self.executors = self._load_modules(proxy=self.proxy)
self.functions.pack['__proxy__'] = self.proxy
@ -2845,10 +2928,6 @@ class ProxyMinion(Minion):
# functions here, and then force a grains sync in modules_refresh
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh=True)
# Check config 'add_proxymodule_to_opts' Remove this in Carbon.
if self.opts['add_proxymodule_to_opts']:
self.opts['proxymodule'] = self.proxy
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
@ -2891,7 +2970,23 @@ class ProxyMinion(Minion):
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
'__master_failback':
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job('__master_failback', persist=True)
else:
self.schedule.delete_job('__master_alive', persist=True)
self.schedule.delete_job('__master_failback', persist=True)
self.grains_cache = self.opts['grains']

View File

@ -1199,7 +1199,7 @@ def session_create(consul_url=None, **kwargs):
if str(_ttl).endswith('s'):
_ttl = _ttl[:-1]
if not int(_ttl) >= 0 and not int(_ttl) <= 3600:
if int(_ttl) < 0 or int(_ttl) > 3600:
ret['message'] = ('TTL must be ',
'between 0 and 3600.')
ret['res'] = False

View File

@ -1226,7 +1226,7 @@ def _validate_input(kwargs,
kwargs['memory_swap'] = \
salt.utils.human_size_to_bytes(kwargs['memory_swap'])
except ValueError:
if kwargs['memory_swap'] in -1:
if kwargs['memory_swap'] == -1:
# memory_swap of -1 means swap is disabled
return
raise SaltInvocationError(

View File

@ -3375,6 +3375,61 @@ def source_list(source, source_hash, saltenv):
return source, source_hash
def apply_template_on_contents(
contents,
template,
context,
defaults,
saltenv):
'''
Return the contents after applying the templating engine
contents
template string
template
template format
context
Overrides default context variables passed to the template.
defaults
Default context passed to the template.
CLI Example:
.. code-block:: bash
salt '*' file.apply_template_on_contents \\
contents='This is a {{ template }} string.' \\
template=jinja \\
"context={}" "defaults={'template': 'cool'}" \\
saltenv=base
'''
if template in salt.utils.templates.TEMPLATE_REGISTRY:
context_dict = defaults if defaults else {}
if context:
context_dict.update(context)
# Apply templating
contents = salt.utils.templates.TEMPLATE_REGISTRY[template](
contents,
from_str=True,
to_str=True,
context=context_dict,
saltenv=saltenv,
grains=__grains__,
pillar=__pillar__,
salt=__salt__,
opts=__opts__)['data'].encode('utf-8')
else:
ret = {}
ret['result'] = False
ret['comment'] = ('Specified template format {0} is not supported'
).format(template)
return ret
return contents
def get_managed(
name,
template,
@ -3956,6 +4011,7 @@ def check_file_meta(
salt.utils.fopen(name, 'r')) as (src, name_):
slines = src.readlines()
nlines = name_.readlines()
__clean_tmp(tmp)
if ''.join(nlines) != ''.join(slines):
if __salt__['config.option']('obfuscate_templates'):
changes['diff'] = '<Obfuscated Template>'

View File

@ -299,7 +299,7 @@ def __process_tokens_internal(tokens, start_at=0):
log.debug(" TYPE: ASSIGNMENT")
is_assignment = True
else:
raise CommandExecutionError('Unknown token!', 'Token:'+token)
raise CommandExecutionError('Unknown token! Token: {0}'.format(token))
token_no = token_no + 1
@ -343,7 +343,7 @@ def __is_long(token):
def __get_long(token):
if six.PY2:
return long(token[0:-1]) # pylint: disable=incompatible-py3-code
return long(token[0:-1])
else:
return int(token[0:-1])

View File

@ -3026,7 +3026,7 @@ def update_lxc_conf(name, lxc_conf, lxc_conf_unset, path=None):
({line[0]: line[1:]}, {key: item}))
break
if not matched:
if not (key, item) in lines:
if (key, item) not in lines:
lines.append((key, item))
changes['added'].append({key: item})
dest_lxc_conf = []

View File

@ -672,7 +672,10 @@ def bootstrap_container(name, dist=None, version=None):
'nspawn.bootstrap: no dist provided, defaulting to \'{0}\''
.format(dist)
)
return globals()['_bootstrap_{0}'.format(dist)](name, version=version)
try:
return globals()['_bootstrap_{0}'.format(dist)](name, version=version)
except KeyError:
raise CommandExecutionError('Unsupported distribution "{0}"'.format(dist))
def _needs_install(name):

View File

@ -30,7 +30,7 @@ except ImportError:
def __virtual__():
if not HAS_PSUTIL:
return (False, 'The ps execution module cannot be loaded: the psutil python module is not available.')
return False, 'The ps module cannot be loaded: python module psutil not installed.'
# Functions and attributes used in this execution module seem to have been
# added as of psutil 0.3.0, from an inspection of the source code. Only

View File

@ -142,7 +142,7 @@ def _deleted_files():
matched = mapline.match(line)
if matched:
path = matched.group(2)
if file:
if path:
if _valid_deleted_file(path):
val = (pinfo['name'], pinfo['pid'], path[0:-10])
if val not in deleted_files:

View File

@ -19,6 +19,13 @@ except ImportError:
# Import salt libs
import salt.utils
from salt.exceptions import CommandExecutionError
try:
import salt.utils.pycrypto
HAS_CRYPT = True
except ImportError:
HAS_CRYPT = False
# Define the module's virtual name
__virtualname__ = 'shadow'
@ -190,6 +197,66 @@ def set_mindays(name, mindays):
return False
def gen_password(password, crypt_salt=None, algorithm='sha512'):
'''
.. versionadded:: 2015.8.8
Generate hashed password
.. note::
When called this function is called directly via remote-execution,
the password argument may be displayed in the system's process list.
This may be a security risk on certain systems.
password
Plaintext password to be hashed.
crypt_salt
Crpytographic salt. If not given, a random 8-character salt will be
generated.
algorithm
The following hash algorithms are supported:
* md5
* blowfish (not in mainline glibc, only available in distros that add it)
* sha256
* sha512 (default)
CLI Example:
.. code-block:: bash
salt '*' shadow.gen_password 'I_am_password'
salt '*' shadow.gen_password 'I_am_password' crypt_salt='I_am_salt' algorithm=sha256
'''
if not HAS_CRYPT:
raise CommandExecutionError(
'gen_password is not available on this operating system '
'because the "crypt" python module is not available.'
)
return salt.utils.pycrypto.gen_hash(crypt_salt, password, algorithm)
def del_password(name):
'''
.. versionadded:: 2015.8.8
Delete the password from name user
CLI Example:
.. code-block:: bash
salt '*' shadow.del_password username
'''
cmd = 'passwd -d {0}'.format(name)
__salt__['cmd.run'](cmd, python_shell=False, output_loglevel='quiet')
uinfo = info(name)
return not uinfo['passwd']
def set_password(name, password):
'''
Set the password for a named user. The password must be a properly defined

View File

@ -12,6 +12,7 @@ import os
import re
import fnmatch
import collections
import copy
# Import 3rd-party libs
import salt.ext.six as six
@ -19,6 +20,7 @@ from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-
# Import salt libs
import salt.config
import salt.minion
import salt.utils
import salt.utils.event
from salt.utils.network import host_to_ip as _host_to_ip
@ -831,6 +833,49 @@ def master(master=None, connected=True):
event.fire_event({'master': master}, '__master_connected')
def ping_master(master):
'''
.. versionadded:: 2016.3.0
Sends ping request to the given master. Fires '__master_alive' event on success.
Returns bool result.
CLI Example:
.. code-block:: bash
salt '*' status.ping_master localhost
'''
if master is None or master == '':
return False
opts = copy.deepcopy(__opts__)
opts['master'] = master
del opts['master_ip'] # avoid 'master ip changed' warning
opts.update(salt.minion.prep_ip_port(opts))
try:
opts.update(salt.minion.resolve_dns(opts, fallback=False))
except Exception:
return False
timeout = opts.get('auth_timeout', 60)
load = {'cmd': 'ping'}
result = False
channel = salt.transport.client.ReqChannel.factory(opts, crypt='clear')
try:
payload = channel.send(load, tries=0, timeout=timeout)
result = True
except Exception as e:
pass
if result:
event = salt.utils.event.get_event('minion', opts=__opts__, listen=False)
event.fire_event({'master': master}, '__master_failback')
return result
def time(format='%A, %d. %B %Y %I:%M%p'):
'''
.. versionadded:: 2016.3.0

View File

@ -81,9 +81,9 @@ def doc(*args):
else:
target_mod = ''
if _use_fnmatch:
for fun in fnmatch.filter(__salt__.keys(), target_mod): # pylint: disable=incompatible-py3-code
docs[fun] = __salt__[fun].__doc__ # There's no problem feeding fnmatch.filter()
else: # with a Py3's dict_keys() instance
for fun in fnmatch.filter(__salt__.keys(), target_mod):
docs[fun] = __salt__[fun].__doc__
else:
for fun in __salt__:
if fun == module or fun.startswith(target_mod):
@ -315,9 +315,8 @@ def renderer_doc(*args):
for module in args:
if '*' in module:
for fun in fnmatch.filter(renderers_.keys(), module): # pylint: disable=incompatible-py3-code
docs[fun] = renderers_[fun].__doc__ # There's no problem feeding fnmatch.filter()
# with a Py3's dict_keys() instance
for fun in fnmatch.filter(renderers_.keys(), module):
docs[fun] = renderers_[fun].__doc__
else:
for fun in six.iterkeys(renderers_):
docs[fun] = renderers_[fun].__doc__
@ -747,9 +746,9 @@ def list_returners(*args):
return sorted(returners)
for module in args:
for func in fnmatch.filter(returners_.keys(), module): # pylint: disable=incompatible-py3-code
comps = func.split('.') # There's no problem feeding fnmatch.filter()
if len(comps) < 2: # with a Py3's dict_keys() instance
for func in fnmatch.filter(returners_.keys(), module):
comps = func.split('.')
if len(comps) < 2:
continue
returners.add(comps[0])
return sorted(returners)

View File

@ -180,8 +180,9 @@ def create_event(message_type=None, routing_key='everybody', **kwargs):
timestamp = datetime.datetime.strptime(kwargs['timestamp'], timestamp_fmt)
data['timestamp'] = int(time.mktime(timestamp.timetuple()))
except (TypeError, ValueError):
raise SaltInvocationError('Date string could not be parsed: %s, %s',
kwargs['timestamp'], timestamp_fmt)
raise SaltInvocationError('Date string could not be parsed: {0}, {1}'.format(
kwargs['timestamp'], timestamp_fmt)
)
if 'state_start_time' in kwargs:
state_start_time_fmt = kwargs.get('state_start_time_fmt', '%Y-%m-%dT%H:%M:%S')
@ -190,8 +191,9 @@ def create_event(message_type=None, routing_key='everybody', **kwargs):
state_start_time = datetime.datetime.strptime(kwargs['state_start_time'], state_start_time_fmt)
data['state_start_time'] = int(time.mktime(state_start_time.timetuple()))
except (TypeError, ValueError):
raise SaltInvocationError('Date string could not be parsed: %s, %s',
kwargs['state_start_time'], state_start_time_fmt)
raise SaltInvocationError('Date string could not be parsed: {0}, {1}'.format(
kwargs['state_start_time'], state_start_time_fmt)
)
for kwarg in keyword_args:
if kwarg in kwargs:

View File

@ -40,6 +40,12 @@ class daclConstants(object):
'''
dacl constants used throughout the module
'''
# Definition in ntsecuritycon is incorrect (does not match winnt.h). The version
# in ntsecuritycon has the extra bits 0x200 enabled.
# Note that you when you set this permission what you'll generally get back is it
# ORed with 0x200 (SI_NO_ACL_PROTECT), which is what ntsecuritycon incorrectly defines.
FILE_ALL_ACCESS = (ntsecuritycon.STANDARD_RIGHTS_REQUIRED | ntsecuritycon.SYNCHRONIZE | 0x1ff)
def __init__(self):
self.hkeys_security = {
'HKEY_LOCAL_MACHINE': 'MACHINE',
@ -82,7 +88,7 @@ class daclConstants(object):
ntsecuritycon.DELETE,
'TEXT': 'modify'},
'FULLCONTROL': {
'BITS': ntsecuritycon.FILE_ALL_ACCESS,
'BITS': daclConstants.FILE_ALL_ACCESS,
'TEXT': 'full control'}
}
}
@ -293,18 +299,21 @@ class daclConstants(object):
return path
class User(object):
def _getUserSid(user):
'''
class object that returns a users SID
return a state error dictionary, with 'sid' as a field if it could be returned
if user is None, sid will also be None
'''
def __getattr__(self, u):
try:
sid = win32security.LookupAccountName('', u)[0]
return sid
except Exception as e:
raise CommandExecutionError((
'There was an error obtaining the SID of user "{0}". Error returned: {1}'
).format(u, e))
ret = {}
try:
sid = win32security.LookupAccountName('', user)[0] if user else None
ret['result'] = True
ret['sid'] = sid
except Exception as e:
ret['result'] = False
ret['comment'] = 'Unable to obtain the security identifier for {0}. The exception was {1}.'.format(
user, e)
return ret
def __virtual__():
@ -329,13 +338,16 @@ def _get_dacl(path, objectType):
return dacl
def get(path, objectType):
def get(path, objectType, user=None):
'''
get the acl of an object
Get the acl of an object. Will filter by user if one is provided.
'''
ret = {'Path': path,
'ACLs': []}
sidRet = _getUserSid(user)
if not sidRet['result']:
return sidRet
if path and objectType:
dc = daclConstants()
objectTypeBit = dc.getObjectTypeBit(objectType)
@ -344,7 +356,8 @@ def get(path, objectType):
if tdacl:
for counter in range(0, tdacl.GetAceCount()):
tAce = tdacl.GetAce(counter)
ret['ACLs'].append(_ace_to_text(tAce, objectTypeBit))
if not sidRet['sid'] or (tAce[2] == sidRet['sid']):
ret['ACLs'].append(_ace_to_text(tAce, objectTypeBit))
return ret
@ -377,13 +390,14 @@ def add_ace(path, objectType, user, permission, acetype, propagation):
dc = daclConstants()
objectTypeBit = dc.getObjectTypeBit(objectType)
path = dc.processPath(path, objectTypeBit)
u = User()
user = user.strip()
permission = permission.strip().upper()
acetype = acetype.strip().upper()
propagation = propagation.strip().upper()
thisSid = getattr(u, user)
sidRet = _getUserSid(user)
if not sidRet['result']:
return sidRet
permissionbit = dc.getPermissionBit(objectTypeBit, permission)
acetypebit = dc.getAceTypeBit(acetype)
propagationbit = dc.getPropagationBit(objectTypeBit, propagation)
@ -393,9 +407,9 @@ def add_ace(path, objectType, user, permission, acetype, propagation):
acesAdded = []
try:
if acetypebit == 0:
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION, propagationbit, permissionbit, thisSid)
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION, propagationbit, permissionbit, sidRet['sid'])
elif acetypebit == 1:
dacl.AddAccessDeniedAceEx(win32security.ACL_REVISION, propagationbit, permissionbit, thisSid)
dacl.AddAccessDeniedAceEx(win32security.ACL_REVISION, propagationbit, permissionbit, sidRet['sid'])
win32security.SetNamedSecurityInfo(
path, objectTypeBit, win32security.DACL_SECURITY_INFORMATION,
None, None, dacl, None)
@ -419,7 +433,7 @@ def add_ace(path, objectType, user, permission, acetype, propagation):
return ret
def rm_ace(path, objectType, user, permission, acetype, propagation):
def rm_ace(path, objectType, user, permission=None, acetype=None, propagation=None):
r'''
remove an ace to an object
@ -429,7 +443,7 @@ def rm_ace(path, objectType, user, permission, acetype, propagation):
acetypes: either allow/deny for each user/permission (ALLOW, DENY)
propagation: how the ACE applies to children for Registry Keys and Directories(KEY, KEY&SUBKEYS, SUBKEYS)
***The entire ACE must match to be removed***
If any of the optional parameters are ommitted (or set to None) they act as wildcards.
CLI Example:
@ -442,50 +456,38 @@ def rm_ace(path, objectType, user, permission, acetype, propagation):
'changes': {},
'comment': ''}
if (path and user and
permission and acetype
and propagation):
if path and user:
dc = daclConstants()
if objectType.upper() == "FILE":
if propagation and objectType.upper() == "FILE":
propagation = "FILE"
objectTypeBit = dc.getObjectTypeBit(objectType)
path = dc.processPath(path, objectTypeBit)
u = User()
user = user.strip()
permission = permission.strip().upper()
acetype = acetype.strip().upper()
propagation = propagation.strip().upper()
permission = permission.strip().upper() if permission else None
acetype = acetype.strip().upper() if acetype else None
propagation = propagation.strip().upper() if propagation else None
if check_ace(path, objectType, user, permission, acetype, propagation, True)['Exists']:
thisSid = getattr(u, user)
permissionbit = dc.getPermissionBit(objectTypeBit, permission)
acetypebit = dc.getAceTypeBit(acetype)
propagationbit = dc.getPropagationBit(objectTypeBit, propagation)
sidRet = _getUserSid(user)
if not sidRet['result']:
return sidRet
permissionbit = dc.getPermissionBit(objectTypeBit, permission) if permission else None
acetypebit = dc.getAceTypeBit(acetype) if acetype else None
propagationbit = dc.getPropagationBit(objectTypeBit, propagation) if propagation else None
dacl = _get_dacl(path, objectTypeBit)
counter = 0
acesRemoved = []
if objectTypeBit == win32security.SE_FILE_OBJECT:
if check_inheritance(path, objectType)['Inheritance']:
if permission == 'FULLCONTROL':
# if inhertiance is enabled on an SE_FILE_OBJECT, then the SI_NO_ACL_PROTECT
# gets unset on FullControl which greys out the include inheritable permission
# checkbox on the advanced security settings gui page
permissionbit = permissionbit ^ ntsecuritycon.SI_NO_ACL_PROTECT
while counter < dacl.GetAceCount():
tAce = dacl.GetAce(counter)
if (tAce[0][1] & win32security.INHERITED_ACE) != win32security.INHERITED_ACE:
if tAce[2] == thisSid:
if tAce[0][0] == acetypebit:
if (tAce[0][1] & propagationbit) == propagationbit:
if tAce[1] == permissionbit:
if tAce[2] == sidRet['sid']:
if not acetypebit or tAce[0][0] == acetypebit:
if not propagationbit or ((tAce[0][1] & propagationbit) == propagationbit):
if not permissionbit or tAce[1] == permissionbit:
dacl.DeleteAce(counter)
counter = counter - 1
acesRemoved.append((
'{0} {1} {2} on {3}'
).format(user, dc.getAceTypeText(acetype),
dc.getPermissionText(objectTypeBit, permission),
dc.getPropagationText(objectTypeBit, propagation)))
acesRemoved.append(_ace_to_text(tAce, objectTypeBit))
counter = counter + 1
if acesRemoved:
@ -530,11 +532,6 @@ def _ace_to_text(ace, objectType):
if dc.rights[objectType][x]['BITS'] == tPerm:
tPerm = dc.rights[objectType][x]['TEXT']
break
else:
if objectType == win32security.SE_FILE_OBJECT:
if (tPerm ^ ntsecuritycon.FILE_ALL_ACCESS) == ntsecuritycon.SI_NO_ACL_PROTECT:
tPerm = 'full control'
break
if (tProps & win32security.INHERITED_ACE) == win32security.INHERITED_ACE:
tInherited = '[Inherited]'
tProps = (tProps ^ win32security.INHERITED_ACE)
@ -633,18 +630,23 @@ def disable_inheritance(path, objectType, copy=True):
return _set_dacl_inheritance(path, objectType, False, copy, None)
def check_inheritance(path, objectType):
def check_inheritance(path, objectType, user=None):
'''
check a specified path to verify if inheritance is enabled
returns 'Inheritance' of True/False
hkey: HKEY_LOCAL_MACHINE, HKEY_CURRENT_USER, etc
path: path of the registry key to check
path: path of the registry key or file system object to check
user: if provided, will consider only the ACEs for that user
'''
ret = {'result': False,
'Inheritance': False,
'comment': ''}
sidRet = _getUserSid(user)
if not sidRet['result']:
return sidRet
dc = daclConstants()
objectType = dc.getObjectTypeBit(objectType)
path = dc.processPath(path, objectType)
@ -660,12 +662,14 @@ def check_inheritance(path, objectType):
for counter in range(0, dacls.GetAceCount()):
ace = dacls.GetAce(counter)
if (ace[0][1] & win32security.INHERITED_ACE) == win32security.INHERITED_ACE:
ret['Inheritance'] = True
if not sidRet['sid'] or ace[2] == sidRet['sid']:
ret['Inheritance'] = True
return ret
ret['result'] = True
return ret
def check_ace(path, objectType, user=None, permission=None, acetype=None, propagation=None, exactPermissionMatch=False):
def check_ace(path, objectType, user, permission=None, acetype=None, propagation=None, exactPermissionMatch=False):
'''
checks a path to verify the ACE (access control entry) specified exists
returns 'Exists' true if the ACE exists, false if it does not
@ -686,36 +690,29 @@ def check_ace(path, objectType, user=None, permission=None, acetype=None, propag
objectTypeBit = dc.getObjectTypeBit(objectType)
path = dc.processPath(path, objectTypeBit)
permission = permission.upper()
acetype = acetype.upper()
propagation = propagation.upper()
permission = permission.upper() if permission else None
acetype = acetype.upper() if permission else None
propagation = propagation.upper() if propagation else None
permissionbit = dc.getPermissionBit(objectTypeBit, permission)
acetypebit = dc.getAceTypeBit(acetype)
propagationbit = dc.getPropagationBit(objectTypeBit, propagation)
permissionbit = dc.getPermissionBit(objectTypeBit, permission) if permission else None
acetypebit = dc.getAceTypeBit(acetype) if acetype else None
propagationbit = dc.getPropagationBit(objectTypeBit, propagation) if propagation else None
try:
userSid = win32security.LookupAccountName('', user)[0]
except Exception as e:
ret['result'] = False
ret['comment'] = 'Unable to obtain the security identifier for {0}. The exception was {1}.'.format(user, e)
return ret
sidRet = _getUserSid(user)
if not sidRet['result']:
return sidRet
dacls = _get_dacl(path, objectTypeBit)
ret['result'] = True
if dacls:
if objectTypeBit == win32security.SE_FILE_OBJECT:
if check_inheritance(path, objectType)['Inheritance']:
if permission == 'FULLCONTROL':
# if inhertiance is enabled on an SE_FILE_OBJECT, then the SI_NO_ACL_PROTECT
# gets unset on FullControl which greys out the include inheritable permission
# checkbox on the advanced security settings gui page
permissionbit = permissionbit ^ ntsecuritycon.SI_NO_ACL_PROTECT
for counter in range(0, dacls.GetAceCount()):
ace = dacls.GetAce(counter)
if ace[2] == userSid:
if ace[0][0] == acetypebit:
if (ace[0][1] & propagationbit) == propagationbit:
if ace[2] == sidRet['sid']:
if not acetypebit or ace[0][0] == acetypebit:
if not propagationbit or (ace[0][1] & propagationbit) == propagationbit:
if not permissionbit:
ret['Exists'] = True
return ret
if exactPermissionMatch:
if ace[1] == permissionbit:
ret['Exists'] = True

View File

@ -63,7 +63,7 @@ from salt.modules.file import (check_hash, # pylint: disable=W0611
search, _get_flags, extract_hash, _error, _sed_esc, _psed,
RE_FLAG_TABLE, blockreplace, prepend, seek_read, seek_write, rename,
lstat, path_exists_glob, write, pardir, join, HASHES, comment,
uncomment, _add_flags, comment_line)
uncomment, _add_flags, comment_line, apply_template_on_contents)
from salt.utils import namespaced_function as _namespaced_function
@ -89,7 +89,7 @@ def __virtual__():
global access, copy, readdir, rmdir, truncate, replace, search
global _binary_replace, _get_bkroot, list_backups, restore_backup
global blockreplace, prepend, seek_read, seek_write, rename, lstat
global write, pardir, join, _add_flags
global write, pardir, join, _add_flags, apply_template_on_contents
global path_exists_glob, comment, uncomment, _mkstemp_copy
replace = _namespaced_function(replace, globals())
@ -147,6 +147,7 @@ def __virtual__():
comment_line = _namespaced_function(comment_line, globals())
_mkstemp_copy = _namespaced_function(_mkstemp_copy, globals())
_add_flags = _namespaced_function(_add_flags, globals())
apply_template_on_contents = _namespaced_function(apply_template_on_contents, globals())
return __virtualname__
return (False, "Module win_file: module only works on Windows systems")

View File

@ -909,7 +909,7 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
else:
if not version_num == old.get(target) \
and not old.get(target) == "Not Found" \
and not version_num == 'latest':
and version_num != 'latest':
log.error('{0} {1} not installed'.format(target, version))
ret[target] = {'current': '{0} not installed'.format(version_num)}
continue

View File

@ -287,7 +287,7 @@ def iostat(zpool=None, sample_time=0):
dev = None
if root_vdev:
if root_vdev not in config_data:
if not config_data.get(root_vdev):
config_data[root_vdev] = {}
if len(stat_data) > 0:
config_data[root_vdev] = stat_data

View File

@ -78,7 +78,7 @@ def _is_zypper_error(retcode):
Otherwise False
'''
# see man zypper for existing exit codes
return not int(retcode) in [0, 100, 101, 102, 103]
return int(retcode) not in [0, 100, 101, 102, 103]
def _zypper_check_result(result, xml=False):
@ -340,7 +340,8 @@ def upgrade_available(name):
salt '*' pkg.upgrade_available <package name>
'''
return not not latest_version(name)
# The "not not" tactic is intended here as it forces the return to be False.
return not not latest_version(name) # pylint: disable=C0113
def version(*names, **kwargs):

View File

@ -152,7 +152,7 @@ class Serial(object):
for idx, entry in enumerate(obj):
obj[idx] = verylong_encoder(entry)
return obj
if six.PY2 and isinstance(obj, long) and long > pow(2, 64): # pylint: disable=incompatible-py3-code
if six.PY2 and isinstance(obj, long) and long > pow(2, 64):
return str(obj)
elif six.PY3 and isinstance(obj, int) and int > pow(2, 64):
return str(obj)
@ -211,7 +211,7 @@ class Serial(object):
return obj
return obj
return msgpack.dumps(odict_encoder(msg))
except (SystemError, TypeError) as exc:
except (SystemError, TypeError) as exc: # pylint: disable=W0705
log.critical('Unable to serialize message! Consider upgrading msgpack. '
'Message which failed was {failed_message} '
'with exception {exception_message}').format(msg, exc)

View File

@ -37,6 +37,7 @@ from distutils.version import StrictVersion # pylint: disable=no-name-in-module
try:
import boto.ec2
import boto.utils
import boto.exception
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
@ -123,7 +124,7 @@ def ext_pillar(minion_id,
try:
conn = boto.ec2.connect_to_region(region)
except boto.exception as e:
except boto.exception as e: # pylint: disable=E0712
log.error("%s: invalid AWS credentials.", __name__)
return None

View File

@ -212,7 +212,7 @@ def clean_old_jobs():
load_key = ret_key.replace('ret:', 'load:', 1)
if load_key not in living_jids:
to_remove.append(ret_key)
serv.delete(**to_remove)
serv.delete(**to_remove) # pylint: disable=E1134
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument

View File

@ -171,7 +171,7 @@ def clear_all(tgt=None, expr_form='glob'):
clear_mine_flag=True)
def clear_git_lock(role, remote=None):
def clear_git_lock(role, remote=None, **kwargs):
'''
.. versionadded:: 2015.8.2
@ -194,12 +194,23 @@ def clear_git_lock(role, remote=None):
have their lock cleared. For example, a ``remote`` value of **github**
will remove the lock from all github.com remotes.
type : update,checkout
The types of lock to clear. Can be ``update``, ``checkout``, or both of
et (either comma-separated or as a Python list).
.. versionadded:: 2015.8.8
CLI Example:
.. code-block:: bash
salt-run cache.clear_git_lock git_pillar
'''
kwargs = salt.utils.clean_kwargs(**kwargs)
type_ = salt.utils.split_input(kwargs.pop('type', ['update', 'checkout']))
if kwargs:
salt.utils.invalid_kwargs(kwargs)
if role == 'gitfs':
git_objects = [salt.utils.gitfs.GitFS(__opts__)]
git_objects[0].init_remotes(__opts__['gitfs_remotes'],
@ -248,11 +259,15 @@ def clear_git_lock(role, remote=None):
ret = {}
for obj in git_objects:
cleared, errors = _clear_lock(obj.clear_lock, role, remote)
if cleared:
ret.setdefault('cleared', []).extend(cleared)
if errors:
ret.setdefault('errors', []).extend(errors)
for lock_type in type_:
cleared, errors = _clear_lock(obj.clear_lock,
role,
remote=remote,
lock_type=lock_type)
if cleared:
ret.setdefault('cleared', []).extend(cleared)
if errors:
ret.setdefault('errors', []).extend(errors)
if not ret:
ret = 'No locks were removed'
salt.output.display_output(ret, 'nested', opts=__opts__)
return 'No locks were removed'
return ret

View File

@ -72,7 +72,7 @@ def execution():
for v in six.itervalues(ret):
docs.update(v)
except SaltClientError as exc:
print(exc) # pylint: disable=W1698
print(exc)
return []
i = itertools.chain.from_iterable([six.iteritems(docs['ret'])])

View File

@ -293,8 +293,8 @@ def clear_cache(backend=None):
if errors:
ret['errors'] = errors
if not ret:
ret = 'No cache was cleared'
salt.output.display_output(ret, 'nested', opts=__opts__)
return 'No cache was cleared'
return ret
def clear_lock(backend=None, remote=None):
@ -334,8 +334,8 @@ def clear_lock(backend=None, remote=None):
if errors:
ret['errors'] = errors
if not ret:
ret = 'No locks were removed'
salt.output.display_output(ret, 'nested', opts=__opts__)
return 'No locks were removed'
return ret
def lock(backend=None, remote=None):
@ -376,5 +376,5 @@ def lock(backend=None, remote=None):
if errors:
ret['errors'] = errors
if not ret:
ret = 'No locks were set'
salt.output.display_output(ret, 'nested', opts=__opts__)
return 'No locks were set'
return ret

View File

@ -390,7 +390,7 @@ Dumper.add_multi_representer(type(None), Dumper.represent_none)
if six.PY2:
Dumper.add_multi_representer(six.binary_type, Dumper.represent_str)
Dumper.add_multi_representer(six.text_type, Dumper.represent_unicode)
Dumper.add_multi_representer(long, Dumper.represent_long) # pylint: disable=incompatible-py3-code
Dumper.add_multi_representer(long, Dumper.represent_long)
else:
Dumper.add_multi_representer(six.binary_type, Dumper.represent_binary)
Dumper.add_multi_representer(six.text_type, Dumper.represent_str)

View File

@ -49,7 +49,7 @@ data in pillar. Here's an example pillar structure:
- 'server-1': blade1
- 'server-2': blade2
blades:
servers:
server-1:
idrac_password: saltstack1
ipmi_over_lan: True
@ -114,8 +114,8 @@ pillar stated above:
- server-3: powercycle
- server-4: powercycle
# Set idrac_passwords for blades
{% for k, v in details['blades'].iteritems() %}
# Set idrac_passwords for blades. racadm needs them to be called 'server-x'
{% for k, v in details['servers'].iteritems() %}
{{ k }}:
dellchassis.blade_idrac:
- idrac_password: {{ v['idrac_password'] }}
@ -175,7 +175,10 @@ def blade_idrac(name, idrac_password=None, idrac_ipmi=None,
'''
Set parameters for iDRAC in a blade.
:param idrac_password: Password to establish for the iDRAC interface
:param idrac_password: Password to use to connect to the iDRACs directly
(idrac_ipmi and idrac_dnsname must be set directly on the iDRAC. They
can't be set through the CMC. If this password is present, use it
instead of the CMC password)
:param idrac_ipmi: Enable/Disable IPMI over LAN
:param idrac_ip: Set IP address for iDRAC
:param idrac_netmask: Set netmask for iDRAC

View File

@ -1512,6 +1512,23 @@ def managed(name,
contents = os.linesep.join(validated_contents)
if contents_newline and not contents.endswith(os.linesep):
contents += os.linesep
if template:
contents = __salt__['file.apply_template_on_contents'](
contents,
template=template,
context=context,
defaults=defaults,
saltenv=__env__)
if not isinstance(contents, six.string_types):
if 'result' in contents:
ret['result'] = contents['result']
else:
ret['result'] = False
if 'comment' in contents:
ret['comment'] = contents['comment']
else:
ret['comment'] = 'Error while applying template on contents'
return ret
# Make sure that leading zeros stripped by YAML loader are added back
mode = __salt__['config.manage_mode'](mode)

View File

@ -148,7 +148,7 @@ def datasource_exists(name, jboss_config, datasource_properties, recreate=False,
ret['result'] = False
ret['comment'] = 'Could not create datasource. Stdout: '+create_result['stdout']
else:
raise CommandExecutionError('Unable to handle error', ds_result['failure-description'])
raise CommandExecutionError('Unable to handle error: {0}'.format(ds_result['failure-description']))
if ret['result']:
log.debug("ds_new_properties=%s", str(ds_new_properties))

View File

@ -198,7 +198,7 @@ def module(name, module_state='Enabled', version='any'):
'{1} module.'.format(module_state, module)
ret['result'] = False
return ret
if not version == 'any':
if version != 'any':
installed_version = modules[name]['Version']
if not installed_version == version:
ret['comment'] = 'Module version is {0} and does not match ' \

View File

@ -812,7 +812,7 @@ def vm_absent(name, archive=False):
else:
ret['result'] = True
if not isinstance(ret['result'], (bool)) and 'Error' in ret['result']:
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to delete vm {0}'.format(name)
else:
@ -847,7 +847,7 @@ def vm_running(name):
else:
# start the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.start'](name, key='hostname')
if not isinstance(ret['result'], (bool)) and 'Error' in ret['result']:
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to start {0}'.format(name)
else:
@ -882,7 +882,7 @@ def vm_stopped(name):
else:
# stop the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.stop'](name, key='hostname')
if not isinstance(ret['result'], (bool)) and 'Error' in ret['result']:
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to stop {0}'.format(name)
else:

View File

@ -249,7 +249,7 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf
force=config['force'],
dir=config['import_dirs']
)
ret['result'] = name in ret['result'] and ret['result'][name] == 'imported'
ret['result'] = ret['result'].get(name) == 'imported'
if ret['result']:
ret['changes'][name] = 'imported'
ret['comment'] = 'storage pool {0} was imported'.format(name)
@ -280,10 +280,10 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf
# execute zpool.create
ret['result'] = __salt__['zpool.create'](*params, force=config['force'], properties=properties, filesystem_properties=filesystem_properties)
if name in ret['result'] and ret['result'][name] == 'created':
if ret['result'].get(name) == 'created':
ret['result'] = True
else:
if name in ret['result']:
if ret['result'].get(name):
ret['comment'] = ret['result'][name]
ret['result'] = False
@ -325,14 +325,14 @@ def absent(name, export=False, force=False):
ret['result'] = True
else:
ret['result'] = __salt__['zpool.export'](name, force=force)
ret['result'] = name in ret['result'] and ret['result'][name] == 'exported'
ret['result'] = ret['result'].get(name) == 'exported'
else: # try to destroy the zpool
if __opts__['test']:
ret['result'] = True
else:
ret['result'] = __salt__['zpool.destroy'](name, force=force)
ret['result'] = name in ret['result'] and ret['result'][name] == 'destroyed'
ret['result'] = ret['result'].get(name) == 'destroyed'
if ret['result']: # update the changes and comment
ret['changes'][name] = 'exported' if export else 'destroyed'

View File

@ -43,4 +43,7 @@ class LocalChannel(ReqChannel):
return ret
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
super(LocalChannel, self).crypted_transfer_decode_dictentry()
super(LocalChannel, self).crypted_transfer_decode_dictentry(load,
dictkey=dictkey,
tries=tries,
timeout=timeout)

View File

@ -1788,7 +1788,7 @@ def gen_state_tag(low):
return '{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(low)
def check_state_result(running):
def check_state_result(running, recurse=False):
'''
Check the total return value of the run and determine if the running
dict has any issues
@ -1801,20 +1801,15 @@ def check_state_result(running):
ret = True
for state_result in six.itervalues(running):
if not isinstance(state_result, dict):
# return false when hosts return a list instead of a dict
if not recurse and not isinstance(state_result, dict):
ret = False
if ret:
if ret and isinstance(state_result, dict):
result = state_result.get('result', _empty)
if result is False:
ret = False
# only override return value if we are not already failed
elif (
result is _empty
and isinstance(state_result, dict)
and ret
):
ret = check_state_result(state_result)
elif result is _empty and isinstance(state_result, dict) and ret:
ret = check_state_result(state_result, recurse=True)
# return as soon as we got a failure
if not ret:
break
@ -1901,7 +1896,7 @@ def rm_rf(path):
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
raise # pylint: disable=E0704
shutil.rmtree(path, onerror=_onerror)
@ -2086,7 +2081,7 @@ def alias_function(fun, name, doc=None):
if six.PY3:
orig_name = fun.__name__
else:
orig_name = fun.func_name # pylint: disable=incompatible-py3-code
orig_name = fun.func_name
alias_msg = ('\nThis function is an alias of '
'``{0}``.\n'.format(orig_name))
@ -2865,7 +2860,7 @@ def to_str(s, encoding=None):
else:
if isinstance(s, bytearray):
return str(s)
if isinstance(s, unicode): # pylint: disable=incompatible-py3-code
if isinstance(s, unicode):
return s.encode(encoding or __salt_system_encoding__)
raise TypeError('expected str, bytearray, or unicode')
@ -2896,7 +2891,7 @@ def to_unicode(s, encoding=None):
else:
if isinstance(s, str):
return s.decode(encoding or __salt_system_encoding__)
return unicode(s) # pylint: disable=incompatible-py3-code
return unicode(s)
def is_list(value):
@ -2967,6 +2962,6 @@ def split_input(val):
if isinstance(val, list):
return val
try:
return val.split(',')
return [x.strip() for x in val.split(',')]
except AttributeError:
return str(val).split(',')
return [x.strip() for x in str(val).split(',')]

View File

@ -27,7 +27,7 @@ def condition_input(args, kwargs):
# XXX: We might need to revisit this code when we move to Py3
# since long's are int's in Py3
if (six.PY3 and isinstance(arg, six.integer_types)) or \
(six.PY2 and isinstance(arg, long)): # pylint: disable=incompatible-py3-code
(six.PY2 and isinstance(arg, long)):
ret.append(str(arg))
else:
ret.append(arg)

View File

@ -38,7 +38,7 @@ def deepcopy_bound(name):
'''
def _deepcopy_method(x, memo):
return type(x)(x.im_func, copy.deepcopy(x.im_self, memo), x.im_class) # pylint: disable=W1699
return type(x)(x.im_func, copy.deepcopy(x.im_self, memo), x.im_class)
try:
pre_dispatch = copy._deepcopy_dispatch
copy._deepcopy_dispatch[types.MethodType] = _deepcopy_method

View File

@ -101,7 +101,8 @@ class Depends(object):
)
continue
if dependency in frame.f_globals:
if dependency in frame.f_globals \
or dependency in frame.f_locals:
log.trace(
'Dependency ({0}) already loaded inside {1}, '
'skipping'.format(

View File

@ -1066,16 +1066,17 @@ class EventReturn(salt.utils.process.SignalHandlingMultiprocessingProcess):
try:
self.minion.returners[event_return](self.event_queue)
except Exception as exc:
log.error('Could not store events {0}. '
'Returner raised exception: {1}'.format(
self.event_queue, exc))
log.error('Could not store events - returner \'{0}\' raised '
'exception: {1}'.format(self.opts['event_return'], exc))
# don't waste processing power unnecessarily on converting a
# potentially huge dataset to a string
if log.level <= logging.DEBUG:
log.debug('Event data that caused an exception: {0}'.format(
self.event_queue))
del self.event_queue[:]
else:
log.error(
'Could not store return for event(s) {0}. Returner '
'\'{1}\' not found.'
.format(self.event_queue, self.opts['event_return'])
)
log.error('Could not store return for event(s) - returner '
'\'{1}\' not found.'.format(self.opts['event_return']))
def run(self):
'''

View File

@ -3,6 +3,7 @@
# Import python libs
from __future__ import absolute_import
import copy
import contextlib
import distutils.version # pylint: disable=import-error,no-name-in-module
import errno
import fnmatch
@ -15,6 +16,7 @@ import shlex
import shutil
import stat
import subprocess
import time
from datetime import datetime
VALID_PROVIDERS = ('gitpython', 'pygit2', 'dulwich')
@ -57,7 +59,7 @@ import salt.utils
import salt.utils.itertools
import salt.utils.url
import salt.fileserver
from salt.exceptions import FileserverConfigError
from salt.exceptions import FileserverConfigError, GitLockError
from salt.utils.event import tagify
# Import third party libs
@ -298,29 +300,8 @@ class GitProvider(object):
_check_ref(ret, base_ref, rname)
return ret
def check_lock(self):
'''
Used by the provider-specific fetch() function to check the existence
of an update lock, and set the lock if not present. If the lock exists
already, or if there was a problem setting the lock, this function
returns False. If the lock was successfully set, return True.
'''
if os.path.exists(self.lockfile):
log.warning(
'Update lockfile is present for {0} remote \'{1}\', '
'skipping. If this warning persists, it is possible that the '
'update process was interrupted. Removing {2} or running '
'\'salt-run cache.clear_git_lock {0}\' will allow updates to '
'continue for this remote.'
.format(self.role, self.id, self.lockfile)
)
return False
errors = self.lock()[-1]
if errors:
log.error('Unable to set update lock for {0} remote \'{1}\', '
'skipping.'.format(self.role, self.id))
return False
return True
def _get_lock_file(self, lock_type='update'):
return os.path.join(self.gitdir, lock_type + '.lk')
def check_root(self):
'''
@ -344,65 +325,143 @@ class GitProvider(object):
'''
return []
def clear_lock(self):
def clear_lock(self, lock_type='update'):
'''
Clear update.lk
'''
lock_file = self._get_lock_file(lock_type=lock_type)
def _add_error(errlist, exc):
msg = ('Unable to remove update lock for {0} ({1}): {2} '
.format(self.url, self.lockfile, exc))
.format(self.url, lock_file, exc))
log.debug(msg)
errlist.append(msg)
success = []
failed = []
if os.path.exists(self.lockfile):
try:
os.remove(self.lockfile)
except OSError as exc:
if exc.errno == errno.EISDIR:
# Somehow this path is a directory. Should never happen
# unless some wiseguy manually creates a directory at this
# path, but just in case, handle it.
try:
shutil.rmtree(self.lockfile)
except OSError as exc:
_add_error(failed, exc)
else:
try:
os.remove(lock_file)
except OSError as exc:
if exc.errno == errno.ENOENT:
# No lock file present
pass
elif exc.errno == errno.EISDIR:
# Somehow this path is a directory. Should never happen
# unless some wiseguy manually creates a directory at this
# path, but just in case, handle it.
try:
shutil.rmtree(lock_file)
except OSError as exc:
_add_error(failed, exc)
else:
msg = 'Removed lock for {0} remote \'{1}\''.format(
self.role,
self.id
)
log.debug(msg)
success.append(msg)
_add_error(failed, exc)
else:
msg = 'Removed {0} lock for {1} remote \'{2}\''.format(
lock_type,
self.role,
self.id
)
log.debug(msg)
success.append(msg)
return success, failed
def fetch(self):
'''
Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False.
This function requires that a _fetch() function be implemented in a
sub-class.
'''
try:
with self.gen_lock(lock_type='update'):
log.debug('Fetching %s remote \'%s\'', self.role, self.id)
# Run provider-specific fetch code
return self._fetch()
except GitLockError as exc:
if exc.errno == errno.EEXIST:
log.warning(
'Update lock file is present for %s remote \'%s\', '
'skipping. If this warning persists, it is possible that '
'the update process was interrupted, but the lock could '
'also have been manually set. Removing %s or running '
'\'salt-run cache.clear_git_lock %s type=update\' will '
'allow updates to continue for this remote.',
self.role,
self.id,
self._get_lock_file(lock_type='update'),
self.role,
)
return False
def _lock(self, lock_type='update', failhard=False):
'''
Place a lock file if (and only if) it does not already exist.
'''
try:
fh_ = os.open(self._get_lock_file(lock_type),
os.O_CREAT | os.O_EXCL | os.O_WRONLY)
with os.fdopen(fh_, 'w'):
# Write the lock file and close the filehandle
pass
except (OSError, IOError) as exc:
if exc.errno == errno.EEXIST:
if failhard:
raise
return None
else:
msg = 'Unable to set {0} lock for {1} ({2}): {3} '.format(
lock_type,
self.id,
self._get_lock_file(lock_type),
exc
)
log.error(msg)
raise GitLockError(exc.errno, msg)
msg = 'Set {0} lock for {1} remote \'{2}\''.format(
lock_type,
self.role,
self.id
)
log.debug(msg)
return msg
def lock(self):
'''
Place an update.lk
Place an lock file and report on the success/failure. This is an
interface to be used by the fileserver runner, so it is hard-coded to
perform an update lock. We aren't using the gen_lock()
contextmanager here because the lock is meant to stay and not be
automatically removed.
'''
success = []
failed = []
if not os.path.exists(self.lockfile):
try:
with salt.utils.fopen(self.lockfile, 'w+') as fp_:
fp_.write('')
except (IOError, OSError) as exc:
msg = ('Unable to set update lock for {0} ({1}): {2} '
.format(self.url, self.lockfile, exc))
log.error(msg)
failed.append(msg)
else:
msg = 'Set lock for {0} remote \'{1}\''.format(
self.role,
self.id
)
log.debug(msg)
success.append(msg)
try:
result = self._lock(lock_type='update')
except GitLockError as exc:
failed.append(exc.strerror)
else:
if result is not None:
success.append(result)
return success, failed
@contextlib.contextmanager
def gen_lock(self, lock_type='update'):
'''
Set and automatically clear a lock
'''
lock_set = False
try:
self._lock(lock_type=lock_type, failhard=True)
lock_set = True
yield
except (OSError, IOError, GitLockError) as exc:
raise GitLockError(exc.errno, exc.strerror)
finally:
if lock_set:
self.clear_lock(lock_type=lock_type)
def init_remote(self):
'''
This function must be overridden in a sub-class
@ -432,13 +491,14 @@ class GitProvider(object):
blacklist=self.env_blacklist
)
def envs(self):
def _fetch(self):
'''
This function must be overridden in a sub-class
Provider-specific code for fetching, must be implemented in a
sub-class.
'''
raise NotImplementedError()
def fetch(self):
def envs(self):
'''
This function must be overridden in a sub-class
'''
@ -504,17 +564,67 @@ class GitPython(GitProvider):
def checkout(self):
'''
Checkout the configured branch/tag
Checkout the configured branch/tag. We catch an "Exception" class here
instead of a specific exception class because the exceptions raised by
GitPython when running these functions vary in different versions of
GitPython.
'''
for ref in ('origin/' + self.branch, self.branch):
try:
head_sha = self.repo.rev_parse('HEAD').hexsha
except Exception:
# Should only happen the first time we are checking out, since
# we fetch first before ever checking anything out.
head_sha = None
# 'origin/' + self.branch ==> matches a branch head
# 'tags/' + self.branch + '@{commit}' ==> matches tag's commit
for rev_parse_target, checkout_ref in (
('origin/' + self.branch, 'origin/' + self.branch),
('tags/' + self.branch + '@{commit}', 'tags/' + self.branch)):
try:
self.repo.git.checkout(ref)
target_sha = self.repo.rev_parse(rev_parse_target).hexsha
except Exception:
# ref does not exist
continue
else:
if head_sha == target_sha:
# No need to checkout, we're already up-to-date
return self.check_root()
try:
with self.gen_lock(lock_type='checkout'):
self.repo.git.checkout(checkout_ref)
log.debug(
'%s remote \'%s\' has been checked out to %s',
self.role,
self.id,
checkout_ref
)
except GitLockError as exc:
if exc.errno == errno.EEXIST:
# Re-raise with a different strerror containing a
# more meaningful error message for the calling
# function.
raise GitLockError(
exc.errno,
'Checkout lock exists for {0} remote \'{1}\''
.format(self.role, self.id)
)
else:
log.error(
'Error %d encountered obtaining checkout lock '
'for %s remote \'%s\'',
exc.errno,
self.role,
self.id
)
return None
except Exception:
continue
return self.check_root()
log.error(
'Failed to checkout {0} from {1} remote \'{2}\': remote ref does '
'not exist'.format(self.branch, self.role, self.id)
'Failed to checkout %s from %s remote \'%s\': remote ref does '
'not exist', self.branch, self.role, self.id
)
return None
@ -555,7 +665,7 @@ class GitPython(GitProvider):
log.error(_INVALID_REPO.format(self.cachedir, self.url))
return new
self.lockfile = os.path.join(self.repo.working_dir, 'update.lk')
self.gitdir = os.path.join(self.repo.working_dir, '.git')
if not self.repo.remotes:
try:
@ -606,13 +716,11 @@ class GitPython(GitProvider):
ref_paths = [x.path for x in self.repo.refs]
return self._get_envs_from_ref_paths(ref_paths)
def fetch(self):
def _fetch(self):
'''
Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False.
'''
if not self.check_lock():
return False
origin = self.repo.remotes[0]
try:
fetch_results = origin.fetch()
@ -774,7 +882,61 @@ class Pygit2(GitProvider):
remote_ref = 'refs/remotes/origin/' + self.branch
tag_ref = 'refs/tags/' + self.branch
try:
local_head = self.repo.lookup_reference('HEAD')
except KeyError:
log.warning(
'HEAD not present in %s remote \'%s\'', self.role, self.id
)
return None
try:
head_sha = local_head.get_object().hex
except AttributeError:
# Shouldn't happen, but just in case a future pygit2 API change
# breaks things, avoid a traceback and log an error.
log.error(
'Unable to get SHA of HEAD for %s remote \'%s\'',
self.role, self.id
)
return None
except KeyError:
head_sha = None
refs = self.repo.listall_references()
def _perform_checkout(checkout_ref, branch=True):
'''
DRY function for checking out either a branch or a tag
'''
try:
with self.gen_lock(lock_type='checkout'):
# Checkout the local branch corresponding to the
# remote ref.
self.repo.checkout(checkout_ref)
if branch:
self.repo.reset(oid, pygit2.GIT_RESET_HARD)
return True
except GitLockError as exc:
if exc.errno == errno.EEXIST:
# Re-raise with a different strerror containing a
# more meaningful error message for the calling
# function.
raise GitLockError(
exc.errno,
'Checkout lock exists for {0} remote \'{1}\''
.format(self.role, self.id)
)
else:
log.error(
'Error %d encountered obtaining checkout lock '
'for %s remote \'%s\'',
exc.errno,
self.role,
self.id
)
return False
try:
if remote_ref in refs:
# Get commit id for the remote ref
@ -784,41 +946,99 @@ class Pygit2(GitProvider):
# it at the commit id of the remote ref
self.repo.create_reference(local_ref, oid)
# Check HEAD ref existence (checking out local_ref when HEAD
# ref doesn't exist will raise an exception in pygit2 >= 0.21),
# and create the HEAD ref if it is missing.
head_ref = self.repo.lookup_reference('HEAD').target
if head_ref not in refs and head_ref != local_ref:
branch_name = head_ref.partition('refs/heads/')[-1]
if not branch_name:
# Shouldn't happen, but log an error if it does
log.error(
'pygit2 was unable to resolve branch name from '
'HEAD ref \'{0}\' in {1} remote \'{2}\''.format(
head_ref, self.role, self.id
)
)
return None
remote_head = 'refs/remotes/origin/' + branch_name
if remote_head not in refs:
log.error(
'Unable to find remote ref \'{0}\' in {1} remote '
'\'{2}\''.format(head_ref, self.role, self.id)
)
return None
self.repo.create_reference(
head_ref,
self.repo.lookup_reference(remote_head).target
try:
target_sha = \
self.repo.lookup_reference(remote_ref).get_object().hex
except KeyError:
log.error(
'pygit2 was unable to get SHA for %s in %s remote '
'\'%s\'', local_ref, self.role, self.id
)
return None
# Point HEAD at the local ref
self.repo.checkout(local_ref)
# Reset HEAD to the commit id of the remote ref
self.repo.reset(oid, pygit2.GIT_RESET_HARD)
# Only perform a checkout if HEAD and target are not pointing
# at the same SHA1.
if head_sha != target_sha:
# Check existence of the ref in refs/heads/ which
# corresponds to the local HEAD. Checking out local_ref
# below when no local ref for HEAD is missing will raise an
# exception in pygit2 >= 0.21. If this ref is not present,
# create it. The "head_ref != local_ref" check ensures we
# don't try to add this ref if it is not necessary, as it
# would have been added above already. head_ref would be
# the same as local_ref if the branch name was changed but
# the cachedir was not (for example if a "name" parameter
# was used in a git_pillar remote, or if we are using
# winrepo which takes the basename of the repo as the
# cachedir).
head_ref = local_head.target
# If head_ref is not a string, it will point to a
# pygit2.Oid object and we are in detached HEAD mode.
# Therefore, there is no need to add a local reference. If
# head_ref == local_ref, then the local reference for HEAD
# in refs/heads/ already exists and again, no need to add.
if isinstance(head_ref, six.string_types) \
and head_ref not in refs and head_ref != local_ref:
branch_name = head_ref.partition('refs/heads/')[-1]
if not branch_name:
# Shouldn't happen, but log an error if it does
log.error(
'pygit2 was unable to resolve branch name from '
'HEAD ref \'{0}\' in {1} remote \'{2}\''.format(
head_ref, self.role, self.id
)
)
return None
remote_head = 'refs/remotes/origin/' + branch_name
if remote_head not in refs:
log.error(
'Unable to find remote ref \'{0}\' in {1} remote '
'\'{2}\''.format(head_ref, self.role, self.id)
)
return None
self.repo.create_reference(
head_ref,
self.repo.lookup_reference(remote_head).target
)
if not _perform_checkout(local_ref, branch=True):
return None
# Return the relative root, if present
return self.check_root()
elif tag_ref in refs:
self.repo.checkout(tag_ref)
return self.check_root()
tag_obj = self.repo.revparse_single(tag_ref)
if not isinstance(tag_obj, pygit2.Tag):
log.error(
'%s does not correspond to pygit2.Tag object',
tag_ref
)
else:
try:
# If no AttributeError raised, this is an annotated tag
tag_sha = tag_obj.target.hex
except AttributeError:
try:
tag_sha = tag_obj.hex
except AttributeError:
# Shouldn't happen, but could if a future pygit2
# API change breaks things.
log.error(
'Unable to resolve %s from %s remote \'%s\' '
'to either an annotated or non-annotated tag',
tag_ref, self.role, self.id
)
return None
if head_sha != target_sha:
if not _perform_checkout(local_ref, branch=False):
return None
# Return the relative root, if present
return self.check_root()
except GitLockError:
raise
except Exception as exc:
log.error(
'Failed to checkout {0} from {1} remote \'{2}\': {3}'.format(
@ -923,7 +1143,7 @@ class Pygit2(GitProvider):
log.error(_INVALID_REPO.format(self.cachedir, self.url))
return new
self.lockfile = os.path.join(self.repo.workdir, 'update.lk')
self.gitdir = os.path.join(self.repo.workdir, '.git')
if not self.repo.remotes:
try:
@ -1001,13 +1221,11 @@ class Pygit2(GitProvider):
ref_paths = self.repo.listall_references()
return self._get_envs_from_ref_paths(ref_paths)
def fetch(self):
def _fetch(self):
'''
Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False.
'''
if not self.check_lock():
return False
origin = self.repo.remotes[0]
refs_pre = self.repo.listall_references()
fetch_kwargs = {}
@ -1350,13 +1568,11 @@ class Dulwich(GitProvider): # pylint: disable=abstract-method
ref_paths = self.get_env_refs(self.repo.get_refs())
return self._get_envs_from_ref_paths(ref_paths)
def fetch(self):
def _fetch(self):
'''
Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False.
'''
if not self.check_lock():
return False
# origin is just a url here, there is no origin object
origin = self.url
client, path = \
@ -1628,7 +1844,7 @@ class Dulwich(GitProvider): # pylint: disable=abstract-method
log.error(_INVALID_REPO.format(self.cachedir, self.url))
return new
self.lockfile = os.path.join(self.repo.path, 'update.lk')
self.gitdir = os.path.join(self.repo.path, '.git')
# Read in config file and look for the remote
try:
@ -1842,9 +2058,9 @@ class GitBase(object):
)
return errors
def clear_lock(self, remote=None):
def clear_lock(self, remote=None, lock_type='update'):
'''
Clear update.lk
Clear update.lk for all remotes
'''
cleared = []
errors = []
@ -1859,7 +2075,7 @@ class GitBase(object):
# remote was non-string, try again
if not fnmatch.fnmatch(repo.url, six.text_type(remote)):
continue
success, failed = repo.clear_lock()
success, failed = repo.clear_lock(lock_type=lock_type)
cleared.extend(success)
errors.extend(failed)
return cleared, errors
@ -1885,8 +2101,6 @@ class GitBase(object):
'\'{2}\''.format(exc, self.role, repo.id),
exc_info_on_loglevel=logging.DEBUG
)
finally:
repo.clear_lock()
return changed
def lock(self, remote=None):
@ -1951,7 +2165,7 @@ class GitBase(object):
self.hash_cachedir,
self.find_file
)
except (IOError, OSError):
except (OSError, IOError):
# Hash file won't exist if no files have yet been served up
pass
@ -2188,6 +2402,38 @@ class GitBase(object):
)
)
def do_checkout(self, repo):
'''
Common code for git_pillar/winrepo to handle locking and checking out
of a repo.
'''
time_start = time.time()
while time.time() - time_start <= 5:
try:
return repo.checkout()
except GitLockError as exc:
if exc.errno == errno.EEXIST:
time.sleep(0.1)
continue
else:
log.error(
'Error %d encountered while obtaining checkout '
'lock for %s remote \'%s\': %s',
exc.errno,
repo.role,
repo.id,
exc
)
break
else:
log.error(
'Timed out waiting for checkout lock to be released for '
'%s remote \'%s\'. If this error persists, run \'salt-run '
'cache.clear_git_lock %s type=checkout\' to clear it.',
self.role, repo.id, self.role
)
return None
class GitFS(GitBase):
'''
@ -2486,7 +2732,7 @@ class GitPillar(GitBase):
'''
self.pillar_dirs = {}
for repo in self.remotes:
cachedir = repo.checkout()
cachedir = self.do_checkout(repo)
if cachedir is not None:
# Figure out which environment this remote should be assigned
if repo.env:
@ -2528,6 +2774,6 @@ class WinRepo(GitBase):
'''
self.winrepo_dirs = {}
for repo in self.remotes:
cachedir = repo.checkout()
cachedir = self.do_checkout(repo)
if cachedir is not None:
self.winrepo_dirs[repo.url] = cachedir
self.winrepo_dirs[repo.id] = cachedir

View File

@ -287,7 +287,7 @@ except (ImportError, AttributeError):
'''
if isinstance(other, OrderedDict):
return len(self) == len(other) and self.items() == other.items() # pylint: disable=incompatible-py3-code
return len(self) == len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
@ -335,7 +335,7 @@ class DefaultOrderedDict(OrderedDict):
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items() # pylint: disable=incompatible-py3-code
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
@ -346,7 +346,7 @@ class DefaultOrderedDict(OrderedDict):
def __deepcopy__(self):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items())) # pylint: disable=incompatible-py3-code
copy.deepcopy(self.items()))
def __repr__(self, _repr_running={}): # pylint: disable=W0102
return 'DefaultOrderedDict({0}, {1})'.format(self.default_factory,

View File

@ -764,11 +764,11 @@ class Schedule(object):
# Send back to master so the job is included in the job list
mret = ret.copy()
mret['jid'] = 'req'
channel = salt.transport.Channel.factory(self.opts, usage='salt_schedule')
event = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
channel.send(load)
event.fire_event(load, '__schedule_return')
log.debug('schedule.handle_func: Removing {0}'.format(proc_fn))
os.unlink(proc_fn)

View File

@ -565,10 +565,10 @@ class Schema(six.with_metaclass(SchemaMeta, object)):
serialized['type'] = 'object'
properties = OrderedDict()
cls.after_items_update = []
for name in cls._order:
for name in cls._order: # pylint: disable=E1133
skip_order = False
item_name = None
if name in cls._sections:
if name in cls._sections: # pylint: disable=E1135
section = cls._sections[name]
serialized_section = section.serialize(None if section.__flatten__ is True else name)
if section.__flatten__ is True:
@ -586,7 +586,7 @@ class Schema(six.with_metaclass(SchemaMeta, object)):
# Store it as a configuration section
properties[name] = serialized_section
if name in cls._items:
if name in cls._items: # pylint: disable=E1135
config = cls._items[name]
item_name = config.__item_name__ or name
# Handle the configuration items defined in the class instance

View File

@ -243,7 +243,7 @@ def gen_thin(cachedir, extra_mods='', overwrite=False, so_mods='',
'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))))); exit(0);\' '
'\'{0}\''.format(json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods}))
)
cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True)
cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = cmd.communicate()
if cmd.returncode == 0:
try:
@ -405,7 +405,7 @@ def gen_min(cachedir, extra_mods='', overwrite=False, so_mods='',
'print(json.dumps(salt.utils.thin.get_tops(**(json.loads(sys.argv[1]))))); exit(0);\' '
'\'{0}\''.format(json.dumps({'extra_mods': extra_mods, 'so_mods': so_mods}))
)
cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, shell=True)
cmd = subprocess.Popen(py_shell_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = cmd.communicate()
if cmd.returncode == 0:
try:

View File

@ -3,8 +3,6 @@
Set up the version of Salt
'''
# pylint: disable=incompatible-py3-code
# Import python libs
from __future__ import absolute_import, print_function
import re

View File

@ -31,6 +31,60 @@ from salt.config import minion_config
from salt.loader import LazyLoader, _module_dirs, grains
loader_template = '''
import os
from salt.utils.decorators import depends
@depends('os')
def loaded():
return True
@depends('non_existantmodulename')
def not_loaded():
return True
'''
class LazyLoaderTest(TestCase):
'''
Test the loader
'''
module_name = 'lazyloadertest'
def setUp(self):
self.opts = minion_config(None)
self.opts['disable_modules'] = ['pillar']
self.opts['grains'] = grains(self.opts)
# Setup the module
self.module_dir = tempfile.mkdtemp(dir=integration.TMP)
self.module_file = os.path.join(self.module_dir,
'{0}.py'.format(self.module_name))
with open(self.module_file, 'w') as fh:
fh.write(loader_template)
fh.flush()
os.fsync(fh.fileno())
# Invoke the loader
self.loader = LazyLoader([self.module_dir], self.opts, tag='module')
def tearDown(self):
shutil.rmtree(self.module_dir)
def test_depends(self):
'''
Test that the depends decorator works properly
'''
# Make sure depends correctly allowed a function to load. If this
# results in a KeyError, the decorator is broken.
self.assertTrue(
inspect.isfunction(
self.loader[self.module_name + '.loaded']
)
)
# Make sure depends correctly kept a function from loading
self.assertTrue(self.module_name + '.not_loaded' not in self.loader)
class LazyLoaderVirtualEnabledTest(TestCase):
'''

View File

@ -39,9 +39,7 @@ class CopyTest(integration.ShellCase, integration.ShellCaseCommonTestsMixIn):
if not line:
continue
data = yaml.load(line)
minions.extend(data.keys()) # pylint: disable=incompatible-py3-code
# since we're extending a list, the Py3 dict_keys view will behave
# as expected.
minions.extend(data.keys())
self.assertNotEqual(minions, [])

View File

@ -124,7 +124,7 @@ class PCAPParser(object):
'tcp': {}
}
(header, packet) = cap.next() # pylint: disable=W1699
(header, packet) = cap.next()
eth_length, eth_protocol = self.parse_ether(packet)

View File

@ -25,7 +25,12 @@ filemod.__salt__ = {
'cmd.run': cmdmod.run,
'cmd.run_all': cmdmod.run_all
}
filemod.__opts__ = {'test': False}
filemod.__opts__ = {
'test': False,
'file_roots': {'base': 'tmp'},
'pillar_roots': {'base': 'tmp'},
'cachedir': 'tmp',
}
filemod.__grains__ = {'kernel': 'Linux'}
SED_CONTENT = """test
@ -35,6 +40,8 @@ content
here
"""
filemod.__pillar__ = {}
class FileReplaceTestCase(TestCase):
MULTILINE_STRING = textwrap.dedent('''\
@ -550,6 +557,20 @@ class FileModuleTestCase(TestCase):
cmd_mock.assert_called_once_with(cmd, python_shell=False)
self.assertEqual('test_retval', ret)
def test_apply_template_on_contents(self):
'''
Tests that the templating engine works on string contents
'''
contents = 'This is a {{ template }}.'
defaults = {'template': 'templated file'}
ret = filemod.apply_template_on_contents(
contents,
template='jinja',
context={'opts': filemod.__opts__},
defaults=defaults,
saltenv='base')
self.assertEqual(ret, 'This is a templated file.')
if __name__ == '__main__':
from integration import run_tests

View File

@ -1837,7 +1837,7 @@ class ConfigTestCase(TestCase):
)
self.assertEqual(
item.serialize(), {
'anyOf': [i.serialize() for i in item.items]
'anyOf': [i.serialize() for i in item.items] # pylint: disable=E1133
}
)
@ -1889,7 +1889,7 @@ class ConfigTestCase(TestCase):
)
self.assertEqual(
item.serialize(), {
'allOf': [i.serialize() for i in item.items]
'allOf': [i.serialize() for i in item.items] # pylint: disable=E1133
}
)

View File

@ -395,7 +395,7 @@ class UtilsTestCase(TestCase):
('test_state0', {'result': True}),
('test_state', {'result': True}),
])),
('host2', [])
('host2', OrderedDict([]))
]))
])
}