Merge branch '2017.7' into 2017.7-local-client-hotfix

This commit is contained in:
gwiyeong 2018-02-13 11:09:06 +09:00 committed by GitHub
commit 949aefc82b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 272 additions and 77 deletions

View File

@ -2,7 +2,7 @@
Salt 2016.11.9 Release Notes
============================
Version 2016.11.9 is a bugfix release for :ref:`2016.11.0 <release-2016-11-0>`.]
Version 2016.11.9 is a bugfix release for :ref:`2016.11.0 <release-2016-11-0>`.
Changes for v2016.11.8..v2016.11.9
----------------------------------------------------------------

View File

@ -0,0 +1,5 @@
===========================
Salt 2017.7.4 Release Notes
===========================
Version 2017.7.4 is a bugfix release for :ref:`2017.7.0 <release-2017-7-0>`.

View File

@ -32,7 +32,7 @@ from salt.utils.versions import LooseVersion as _LooseVersion
# Import libcloud
try:
import libcloud
from libcloud.compute.base import NodeState
from libcloud.compute.base import NodeDriver, NodeState
from libcloud.compute.base import NodeAuthPassword
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
@ -52,12 +52,6 @@ try:
except ImportError:
HAS_LIBCLOUD = False
# Import generic libcloud functions
# from salt.cloud.libcloudfuncs import *
# Import salt libs
import salt.utils
# Import salt.cloud libs
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils import namespaced_function
@ -220,7 +214,6 @@ def create(vm_):
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
rootPw = NodeAuthPassword(vm_['auth'])
location = conn.ex_get_location_by_id(vm_['location'])
images = conn.list_images(location=location)
@ -251,15 +244,13 @@ def create(vm_):
kwargs = {
'name': vm_['name'],
'image': image,
'auth': rootPw,
'ex_description': vm_['description'],
'ex_network_domain': network_domain,
'ex_vlan': vlan,
'ex_is_started': vm_['is_started']
}
event_data = kwargs.copy()
del event_data['auth']
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
@ -270,6 +261,10 @@ def create(vm_):
transport=__opts__['transport']
)
# Initial password (excluded from event payload)
initial_password = NodeAuthPassword(vm_['auth'])
kwargs['auth'] = initial_password
try:
data = conn.create_node(**kwargs)
except Exception as exc:
@ -283,7 +278,7 @@ def create(vm_):
return False
try:
data = salt.utils.cloud.wait_for_ip(
data = __utils__['cloud.wait_for_ip'](
_query_node_data,
update_args=(vm_, data),
timeout=config.get_cloud_config_value(
@ -309,7 +304,7 @@ def create(vm_):
ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Using IP address %s', ip_address)
if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips':
if __utils__['cloud.get_salt_interface'](vm_, __opts__) == 'private_ips':
salt_ip_address = preferred_ip(vm_, data.private_ips)
log.info('Salt interface set to: %s', salt_ip_address)
else:
@ -325,7 +320,7 @@ def create(vm_):
vm_['ssh_host'] = ip_address
vm_['password'] = vm_['auth']
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
@ -418,11 +413,13 @@ def create_lb(kwargs=None, call=None):
log.debug('Network Domain: %s', network_domain.id)
lb_conn.ex_set_current_network_domain(network_domain.id)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'create load_balancer',
'salt/cloud/loadbalancer/creating',
args=kwargs,
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
@ -431,11 +428,13 @@ def create_lb(kwargs=None, call=None):
name, port, protocol, algorithm, members
)
event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
'created load_balancer',
'salt/cloud/loadbalancer/created',
args=kwargs,
args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
@ -577,3 +576,46 @@ def get_lb_conn(dd_driver=None):
'Missing dimensiondata_driver for get_lb_conn method.'
)
return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region)
def _to_event_data(obj):
'''
Convert the specified object into a form that can be serialised by msgpack as event data.
:param obj: The object to convert.
'''
if obj is None:
return None
if isinstance(obj, bool):
return obj
if isinstance(obj, int):
return obj
if isinstance(obj, float):
return obj
if isinstance(obj, str):
return obj
if isinstance(obj, bytes):
return obj
if isinstance(obj, dict):
return obj
if isinstance(obj, NodeDriver): # Special case for NodeDriver (cyclic references)
return obj.name
if isinstance(obj, list):
return [_to_event_data(item) for item in obj]
event_data = {}
for attribute_name in dir(obj):
if attribute_name.startswith('_'):
continue
attribute_value = getattr(obj, attribute_name)
if callable(attribute_value): # Strip out methods
continue
event_data[attribute_name] = _to_event_data(attribute_value)
return event_data

View File

@ -128,7 +128,7 @@ def _linux_disks():
for entry in glob.glob('/sys/block/*/queue/rotational'):
try:
with salt.utils.files.fopen(entry) as entry_fp:
with salt.utils.fopen(entry) as entry_fp:
device = entry.split('/')[3]
flag = entry_fp.read(1)
if flag == '0':

View File

@ -17,6 +17,7 @@ import logging.handlers
# Import salt libs
from salt.log.mixins import NewStyleClassMixIn, ExcInfoOnLogLevelFormatMixIn
from salt.ext.six.moves import queue
log = logging.getLogger(__name__)
@ -174,7 +175,12 @@ if sys.version_info < (3, 2):
this method if you want to use blocking, timeouts or custom queue
implementations.
'''
self.queue.put_nowait(record)
try:
self.queue.put_nowait(record)
except queue.Full:
sys.stderr.write('[WARNING ] Message queue is full, '
'unable to write "{0}" to log'.format(record)
)
def prepare(self, record):
'''

View File

@ -426,7 +426,7 @@ def _run(cmd,
elif __grains__['os_family'] in ['Solaris']:
env_cmd = ('su', '-', runas, '-c', sys.executable)
elif __grains__['os_family'] in ['AIX']:
env_cmd = ('su', runas, '-c', sys.executable)
env_cmd = ('su', '-', runas, '-c', sys.executable)
else:
env_cmd = ('su', '-s', shell, '-', runas, '-c', sys.executable)
env_encoded = subprocess.Popen(

View File

@ -777,8 +777,8 @@ def highstate(test=None, queue=False, **kwargs):
.. code-block:: bash
salt '*' state.higstate exclude=bar,baz
salt '*' state.higstate exclude=foo*
salt '*' state.highstate exclude=bar,baz
salt '*' state.highstate exclude=foo*
salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
saltenv

View File

@ -508,7 +508,7 @@ def get_pem_entries(glob_path):
.. code-block:: bash
salt '*' x509.read_pem_entries "/etc/pki/*.crt"
salt '*' x509.get_pem_entries "/etc/pki/*.crt"
'''
ret = {}

View File

@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
'''
The module used to execute states in salt. A state is unlike a module
execution in that instead of just executing a command it ensure that a
certain state is present on the system.
The State Compiler is used to execute states in Salt. A state is unlike
an execution module in that instead of just executing a command, it
ensures that a certain state is present on the system.
The data sent to the state calls is as follows:
{ 'state': '<state module name>',

View File

@ -975,7 +975,7 @@ def extracted(name,
if result['result']:
# Get the path of the file in the minion cache
cached = __salt__['cp.is_cached'](source_match)
cached = __salt__['cp.is_cached'](source_match, saltenv=__env__)
else:
log.debug(
'failed to download %s',

View File

@ -6535,37 +6535,7 @@ def cached(name,
.. code-block:: python
cached = __salt__['cp.is_cached'](source_match)
This function will return the cached path of the file, or an empty string
if the file is not present in the minion cache.
This state will in most cases not be useful in SLS files, but it is useful
when writing a state or remote-execution module that needs to make sure
that a file at a given URL has been downloaded to the cachedir. One example
of this is in the :py:func:`archive.extracted <salt.states.file.extracted>`
state:
.. code-block:: python
result = __states__['file.cached'](source_match,
source_hash=source_hash,
source_hash_name=source_hash_name,
skip_verify=skip_verify,
saltenv=__env__)
This will return a dictionary containing the state's return data, including
a ``result`` key which will state whether or not the state was successful.
Note that this will not catch exceptions, so it is best used within a
try/except.
Once this state has been run from within another state or remote-execution
module, the actual location of the cached file can be obtained using
:py:func:`cp.is_cached <salt.modules.cp.is_cached>`:
.. code-block:: python
cached = __salt__['cp.is_cached'](source_match)
cached = __salt__['cp.is_cached'](source_match, saltenv=__env__)
This function will return the cached path of the file, or an empty string
if the file is not present in the minion cache.

View File

@ -39,7 +39,7 @@ A more involved example involves pulling from a custom repository.
- keyserver: keyserver.ubuntu.com
logstash:
pkg.installed
pkg.installed:
- fromrepo: ppa:wolfnet/logstash
Multiple packages can also be installed with the use of the pkgs

View File

@ -532,7 +532,7 @@ def query(url,
not isinstance(result_text, six.text_type):
result_text = result_text.decode(res_params['charset'])
ret['body'] = result_text
if 'Set-Cookie' in result_headers.keys() and cookies is not None:
if 'Set-Cookie' in result_headers and cookies is not None:
result_cookies = parse_cookie_header(result_headers['Set-Cookie'])
for item in result_cookies:
sess_cookies.set_cookie(item)
@ -857,12 +857,10 @@ def parse_cookie_header(header):
for cookie in cookies:
name = None
value = None
for item in cookie:
for item in list(cookie):
if item in attribs:
continue
name = item
value = cookie[item]
del cookie[name]
value = cookie.pop(item)
# cookielib.Cookie() requires an epoch
if 'expires' in cookie:
@ -870,7 +868,7 @@ def parse_cookie_header(header):
# Fill in missing required fields
for req in reqd:
if req not in cookie.keys():
if req not in cookie:
cookie[req] = ''
if cookie['version'] == '':
cookie['version'] = 0

View File

@ -98,7 +98,7 @@ def _get_vault_connection():
Get the connection details for calling Vault, from local configuration if
it exists, or from the master otherwise
'''
if 'vault' in __opts__ and __opts__.get('__role', 'minion') == 'master':
def _use_local_config():
log.debug('Using Vault connection details from local config')
try:
return {
@ -108,6 +108,11 @@ def _get_vault_connection():
except KeyError as err:
errmsg = 'Minion has "vault" config section, but could not find key "{0}" within'.format(err.message)
raise salt.exceptions.CommandExecutionError(errmsg)
if 'vault' in __opts__ and __opts__.get('__role', 'minion') == 'master':
return _use_local_config()
elif '_ssh_version' in __opts__:
return _use_local_config()
else:
log.debug('Contacting master for Vault connection details')
return _get_token_and_url_from_master()

View File

@ -0,0 +1,137 @@
# -*- coding: utf-8 -*-
'''
Integration tests for the Dimension Data cloud provider
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import random
import string
# Import Salt Testing Libs
from tests.support.case import ShellCase
from tests.support.paths import FILES
from tests.support.helpers import expensiveTest
# Import Salt Libs
from salt.config import cloud_providers_config
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
# Create the cloud instance name to be used throughout the tests
INSTANCE_NAME = _random_name('CLOUD-TEST-')
PROVIDER_NAME = 'dimensiondata'
def _random_name(size=6):
'''
Generates a random cloud instance name
'''
return 'cloud-test-' + ''.join(
random.choice(string.ascii_lowercase + string.digits)
for x in range(size)
)
class DimensionDataTest(ShellCase):
'''
Integration tests for the Dimension Data cloud provider in Salt-Cloud
'''
@expensiveTest
def setUp(self):
'''
Sets up the test requirements
'''
super(DimensionDataTest, self).setUp()
# check if appropriate cloud provider and profile files are present
profile_str = 'dimensiondata-config'
providers = self.run_cloud('--list-providers')
if profile_str + ':' not in providers:
self.skipTest(
'Configuration file for {0} was not found. Check {0}.conf files '
'in tests/integration/files/conf/cloud.*.d/ to run these tests.'
.format(PROVIDER_NAME)
)
# check if user_id, key, and region are present
config = cloud_providers_config(
os.path.join(
FILES,
'conf',
'cloud.providers.d',
PROVIDER_NAME + '.conf'
)
)
user_id = config[profile_str][PROVIDER_NAME]['user_id']
key = config[profile_str][PROVIDER_NAME]['key']
region = config[profile_str][PROVIDER_NAME]['region']
if user_id == '' or key == '' or region == '':
self.skipTest(
'A user Id, password, and a region '
'must be provided to run these tests. Check '
'tests/integration/files/conf/cloud.providers.d/{0}.conf'
.format(PROVIDER_NAME)
)
def test_list_images(self):
'''
Tests the return of running the --list-images command for the dimensiondata cloud provider
'''
image_list = self.run_cloud('--list-images {0}'.format(PROVIDER_NAME))
self.assertIn(
'Ubuntu 14.04 2 CPU',
[i.strip() for i in image_list]
)
def test_list_locations(self):
'''
Tests the return of running the --list-locations command for the dimensiondata cloud provider
'''
_list_locations = self.run_cloud('--list-locations {0}'.format(PROVIDER_NAME))
self.assertIn(
'Australia - Melbourne MCP2',
[i.strip() for i in _list_locations]
)
def test_list_sizes(self):
'''
Tests the return of running the --list-sizes command for the dimensiondata cloud provider
'''
_list_sizes = self.run_cloud('--list-sizes {0}'.format(PROVIDER_NAME))
self.assertIn(
'default',
[i.strip() for i in _list_sizes]
)
def test_instance(self):
'''
Test creating an instance on Dimension Data's cloud
'''
# check if instance with salt installed returned
try:
self.assertIn(
INSTANCE_NAME,
[i.strip() for i in self.run_cloud('-p dimensiondata-test {0}'.format(INSTANCE_NAME), timeout=500)]
)
except AssertionError:
self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)
raise
# delete the instance
try:
self.assertIn(
'True',
[i.strip() for i in self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)]
)
except AssertionError:
raise
# Final clean-up of created instance, in case something went wrong.
# This was originally in a tearDown function, but that didn't make sense
# To run this for each test when not all tests create instances.
if INSTANCE_NAME in [i.strip() for i in self.run_cloud('--query')]:
self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)

View File

@ -0,0 +1,11 @@
dimensiondata-test:
provider: dimensiondata-config
image: 42816eb2-9846-4483-95c3-7d7fbddebf2c
size: default
location: AU10
is_started: yes
description: 'Salt Ubuntu test'
network_domain: ''
vlan: ''
ssh_interface: private_ips
auth: ''

View File

@ -0,0 +1,5 @@
dimensiondata-config:
driver: dimensiondata
user_id: ''
key: ''
region: 'dd-au'

View File

@ -0,0 +1,5 @@
test_non_base_env:
archive.extracted:
- name: {{ pillar['issue45893.name'] }}
- source: salt://issue45893/custom.tar.gz
- keep: False

View File

@ -68,6 +68,16 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin):
log.debug('Checking for extracted file: %s', path)
self.assertTrue(os.path.isfile(path))
def run_function(self, *args, **kwargs):
ret = super(ArchiveTest, self).run_function(*args, **kwargs)
log.debug('ret = %s', ret)
return ret
def run_state(self, *args, **kwargs):
ret = super(ArchiveTest, self).run_state(*args, **kwargs)
log.debug('ret = %s', ret)
return ret
def test_archive_extracted_skip_verify(self):
'''
test archive.extracted with skip_verify
@ -75,7 +85,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
source=self.archive_tar_source, archive_format='tar',
skip_verify=True)
log.debug('ret = %s', ret)
if 'Timeout' in ret:
self.skipTest('Timeout talking to local tornado server.')
self.assertSaltTrueReturn(ret)
@ -91,7 +100,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
source=self.archive_tar_source, archive_format='tar',
source_hash=ARCHIVE_TAR_HASH)
log.debug('ret = %s', ret)
if 'Timeout' in ret:
self.skipTest('Timeout talking to local tornado server.')
@ -111,7 +119,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin):
source=self.archive_tar_source, archive_format='tar',
source_hash=ARCHIVE_TAR_HASH,
user='root', group=r_group)
log.debug('ret = %s', ret)
if 'Timeout' in ret:
self.skipTest('Timeout talking to local tornado server.')
@ -128,7 +135,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin):
source_hash=ARCHIVE_TAR_HASH,
options='--strip=1',
enforce_toplevel=False)
log.debug('ret = %s', ret)
if 'Timeout' in ret:
self.skipTest('Timeout talking to local tornado server.')
@ -145,7 +151,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin):
source_hash=ARCHIVE_TAR_HASH,
options='--strip-components=1',
enforce_toplevel=False)
log.debug('ret = %s', ret)
if 'Timeout' in ret:
self.skipTest('Timeout talking to local tornado server.')
@ -160,7 +165,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
source=self.archive_tar_source,
source_hash=ARCHIVE_TAR_HASH)
log.debug('ret = %s', ret)
if 'Timeout' in ret:
self.skipTest('Timeout talking to local tornado server.')
self.assertSaltTrueReturn(ret)
@ -177,7 +181,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin):
source_hash=ARCHIVE_TAR_HASH,
use_cmd_unzip=False,
archive_format='tar')
log.debug('ret = %s', ret)
if 'Timeout' in ret:
self.skipTest('Timeout talking to local tornado server.')
self.assertSaltTrueReturn(ret)
@ -190,7 +193,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin):
'''
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar')
log.debug('ret = %s', ret)
self.assertSaltTrueReturn(ret)
@ -203,7 +205,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar',
source_hash=ARCHIVE_TAR_BAD_HASH, skip_verify=True)
log.debug('ret = %s', ret)
self.assertSaltTrueReturn(ret)
@ -216,7 +217,6 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar',
source_hash=ARCHIVE_TAR_HASH)
log.debug('ret = %s', ret)
self.assertSaltTrueReturn(ret)
@ -229,6 +229,17 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar',
source_hash=ARCHIVE_TAR_BAD_HASH)
log.debug('ret = %s', ret)
self.assertSaltFalseReturn(ret)
def test_archive_extracted_with_non_base_saltenv(self):
'''
test archive.extracted with a saltenv other than `base`
'''
ret = self.run_function(
'state.sls',
['issue45893'],
pillar={'issue45893.name': ARCHIVE_DIR},
saltenv='prod')
self.assertSaltTrueReturn(ret)
self._check_extracted(os.path.join(ARCHIVE_DIR, UNTAR_FILE))