Merge pull request #45512 from terminalmage/py3-b

[PY3] Update modules/states for PY3/Unicode compatibility (B)
This commit is contained in:
Nicole Thomas 2018-01-18 10:53:58 -05:00 committed by GitHub
commit 8123025a3b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
125 changed files with 991 additions and 1027 deletions

View File

@ -14,7 +14,7 @@ Requires a ``subdomain`` and an ``apikey`` in ``/etc/salt/minion``:
'''
# Import python libs
from __future__ import absolute_import, print_function
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import salt libs
@ -265,7 +265,7 @@ def _query(action=None,
if command:
path += '/{0}'.format(command)
log.debug('BambooHR URL: {0}'.format(path))
log.debug('BambooHR URL: %s', path)
if not isinstance(args, dict):
args = {}
@ -283,10 +283,6 @@ def _query(action=None,
status=True,
opts=__opts__,
)
log.debug(
'BambooHR Response Status Code: {0}'.format(
result['status']
)
)
log.debug('BambooHR Response Status Code: %s', result['status'])
return [result['status'], result['text']]

View File

@ -16,7 +16,7 @@ This module needs the bcache userspace tools to function.
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import time
@ -96,7 +96,7 @@ def attach_(dev=None):
'''
cache = uuid()
if not cache:
log.error('No cache to attach {0} to'.format(dev))
log.error('No cache to attach %s to', dev)
return False
if dev is None:
@ -113,12 +113,12 @@ def attach_(dev=None):
bcache = uuid(dev)
if bcache:
if bcache == cache:
log.info('{0} is already attached to bcache {1}, doing nothing'.format(dev, cache))
log.info('%s is already attached to bcache %s, doing nothing', dev, cache)
return None
elif not detach(dev):
return False
log.debug('Attaching {0} to bcache {1}'.format(dev, cache))
log.debug('Attaching %s to bcache %s', dev, cache)
if not _bcsys(dev, 'attach', cache,
'error', 'Error attaching {0} to bcache {1}'.format(dev, cache)):
@ -155,7 +155,7 @@ def detach(dev=None):
else:
return None
log.debug('Detaching {0}'.format(dev))
log.debug('Detaching %s', dev)
if not _bcsys(dev, 'detach', 'goaway', 'error', 'Error detaching {0}'.format(dev)):
return False
return _wait(lambda: uuid(dev) is False, 'error', '{0} received detach, but did not comply'.format(dev), 300)
@ -172,7 +172,7 @@ def start():
salt '*' bcache.start
'''
if not _run_all('udevadm trigger', 'error', 'Error starting bcache: {{0}}'):
if not _run_all('udevadm trigger', 'error', 'Error starting bcache: %s'):
return False
elif not _wait(lambda: uuid() is not False, 'warn', 'Bcache system started, but no active cache set found.'):
return False
@ -196,7 +196,7 @@ def stop(dev=None):
'''
if dev is not None:
log.warning('Stopping {0}, device will only reappear after reregistering!'.format(dev))
log.warning('Stopping %s, device will only reappear after reregistering!', dev)
if not _bcsys(dev, 'stop', 'goaway', 'error', 'Error stopping {0}'.format(dev)):
return False
return _wait(lambda: _sysfs_attr(_bcpath(dev)) is False, 'error', 'Device {0} did not stop'.format(dev), 300)
@ -239,7 +239,7 @@ def back_make(dev, cache_mode='writeback', force=False, attach=True, bucket_size
return False
elif _sysfs_attr(_bcpath(dev)):
if not force:
log.error('{0} already contains a bcache. Wipe it manually or use force'.format(dev))
log.error('%s already contains a bcache. Wipe it manually or use force', dev)
return False
elif uuid(dev) and not detach(dev):
return False
@ -256,10 +256,10 @@ def back_make(dev, cache_mode='writeback', force=False, attach=True, bucket_size
if force:
cmd += ' --wipe-bcache'
if not _run_all(cmd, 'error', 'Error creating backing device {0}: {{0}}'.format(dev)):
if not _run_all(cmd, 'error', 'Error creating backing device {0}: %s'.format(dev)):
return False
elif not _sysfs_attr('fs/bcache/register', _devpath(dev),
'error', 'Error registering backing device {0}: {{0}}'.format(dev)):
'error', 'Error registering backing device {0}'.format(dev)):
return False
elif not _wait(lambda: _sysfs_attr(_bcpath(dev)) is not False,
'error', 'Backing device {0} did not register'.format(dev)):
@ -298,7 +298,7 @@ def cache_make(dev, reserved=None, force=False, block_size=None, bucket_size=Non
cache = uuid()
if cache:
if not force:
log.error('BCache cache {0} is already on the system'.format(cache))
log.error('BCache cache %s is already on the system', cache)
return False
cache = _bdev()
@ -307,7 +307,7 @@ def cache_make(dev, reserved=None, force=False, block_size=None, bucket_size=Non
if ('ID_FS_TYPE' in udev or (udev.get('DEVTYPE', None) != 'partition' and 'ID_PART_TABLE_TYPE' in udev)) \
and not force:
log.error('{0} already contains data, wipe first or force'.format(dev))
log.error('%s already contains data, wipe first or force', dev)
return False
elif reserved is not None and udev.get('DEVTYPE', None) != 'disk':
log.error('Need a partitionable blockdev for reserved to work')
@ -340,7 +340,7 @@ def cache_make(dev, reserved=None, force=False, block_size=None, bucket_size=Non
'/dev/{0} mklabel gpt mkpart bcache-reserved 1M {1} mkpart bcache {1} 100%'.format(dev, reserved)
# if wipe was incomplete & part layout remains the same,
# this is one condition set where udev would make it accidentally popup again
if not _run_all(cmd, 'error', 'Error creating bcache partitions on {0}: {{0}}'.format(dev)):
if not _run_all(cmd, 'error', 'Error creating bcache partitions on {0}: %s'.format(dev)):
return False
dev = '{0}2'.format(dev)
@ -351,7 +351,7 @@ def cache_make(dev, reserved=None, force=False, block_size=None, bucket_size=Non
if bucket_size:
cmd += ' --bucket {0}'.format(bucket_size)
if not _run_all(cmd, 'error', 'Error creating cache {0}: {{0}}'.format(dev)):
if not _run_all(cmd, 'error', 'Error creating cache {0}: %s'.format(dev)):
return False
elif not _wait(lambda: uuid() is not False,
'error', 'Cache {0} seemingly created OK, but FS did not activate'.format(dev)):
@ -582,7 +582,7 @@ def super_(dev):
dev = _devpath(dev)
ret = {}
res = _run_all('bcache-super-show {0}'.format(dev), 'error', 'Error reading superblock on {0}: {{0}}'.format(dev))
res = _run_all('bcache-super-show {0}'.format(dev), 'error', 'Error reading superblock on {0}: %s'.format(dev))
if not res:
return False
@ -714,7 +714,7 @@ def _sysfs_attr(name, value=None, log_lvl=None, log_msg=None):
if isinstance(name, six.string_types):
name = [name]
res = __salt__['sysfs.attr'](os.path.join(*name), value)
if not res and log_lvl is not None:
if not res and log_lvl is not None and log_msg is not None:
log.log(LOG[log_lvl], log_msg)
return res
@ -894,10 +894,10 @@ def _wipe(dev):
size, block, discard = _sizes(dev)
if discard is None:
log.error('Unable to read SysFS props for {0}'.format(dev))
log.error('Unable to read SysFS props for %s', dev)
return None
elif not discard:
log.warning('{0} seems unable to discard'.format(dev))
log.warning('%s seems unable to discard', dev)
wiper = 'dd'
elif not HAS_BLKDISCARD:
log.warning('blkdiscard binary not available, properly wipe the dev manually for optimal results')
@ -905,7 +905,7 @@ def _wipe(dev):
else:
wiper = 'blkdiscard'
wipe_failmsg = 'Error wiping {0}: {{0}}'.format(dev)
wipe_failmsg = 'Error wiping {0}: %s'.format(dev)
if wiper == 'dd':
blocks = 4
cmd = 'dd if=/dev/zero of=/dev/{0} bs=1M count={1}'.format(dev, blocks)
@ -918,7 +918,7 @@ def _wipe(dev):
elif wiper == 'blkdiscard':
cmd = 'blkdiscard /dev/{0}'.format(dev)
endres += _run_all(cmd, 'warn', wipe_failmsg)
# TODO: FUCKING annoying bug failing blkdiscard by trying to discard 1 sector past blkdev
# TODO: fix annoying bug failing blkdiscard by trying to discard 1 sector past blkdev
endres = 1
return endres > 0
@ -956,7 +956,7 @@ def _run_all(cmd, log_lvl=None, log_msg=None, exitcode=0):
return True
if log_lvl is not None:
log.log(LOG[log_lvl], log_msg.format(res['stderr']))
log.log(LOG[log_lvl], log_msg, res['stderr'])
return False

View File

@ -7,7 +7,7 @@ Module for managing the Salt beacons on a minion
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import difflib
import logging
import os
@ -62,7 +62,7 @@ def list_(return_yaml=True,
'manage_beacons')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_beacons_list_complete', wait=30)
log.debug('event_ret {0}'.format(event_ret))
log.debug('event_ret %s', event_ret)
if event_ret and event_ret['complete']:
beacons = event_ret['beacons']
except KeyError:
@ -435,7 +435,7 @@ def disable(**kwargs):
res = __salt__['event.fire']({'func': 'disable'}, 'manage_beacons')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_beacons_disabled_complete', wait=30)
log.debug('event_ret {0}'.format(event_ret))
log.debug('event_ret %s', event_ret)
if event_ret and event_ret['complete']:
beacons = event_ret['beacons']
if 'enabled' in beacons and not beacons['enabled']:

View File

@ -6,9 +6,8 @@ An execution module which can manipulate an f5 bigip via iControl REST
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import salt.utils.json
import logging as logger
# Import third party libs
try:
@ -24,9 +23,6 @@ from salt.ext import six
# Import salt libs
import salt.exceptions
# Setup the logger
log = logger.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'bigip'
@ -168,8 +164,6 @@ def _set_value(value):
A function to detect if user is trying to pass a dictionary or list. parse it and return a
dictionary list or a string
'''
logger.error(value)
#don't continue if already an acceptable data-type
if isinstance(value, bool) or isinstance(value, dict) or isinstance(value, list):
return value

View File

@ -9,10 +9,7 @@ The following packages are required packages for this module:
bluez-utils >= 5.7
pybluez >= 0.18
'''
from __future__ import absolute_import
# Import python libs
import logging
from __future__ import absolute_import, print_function, unicode_literals
# Import 3rd-party libs
# pylint: disable=import-error
@ -24,7 +21,6 @@ import salt.utils.validate.net
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
HAS_PYBLUEZ = False
try:
import bluetooth # pylint: disable=import-error

View File

@ -47,7 +47,7 @@ Execution module for Amazon Elasticache using boto3
#pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
@ -132,8 +132,10 @@ def _delete_resource(name, name_param, desc, res_type, wait=0, status_param=None
"int or boolean.".format(wait))
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if name_param in args:
log.info("'name: {0}' param being overridden by explicitly provided "
"'{1}: {2}'".format(name, name_param, args[name_param]))
log.info(
"'name: %s' param being overridden by explicitly provided '%s: %s'",
name, name_param, args[name_param]
)
name = args[name_param]
else:
args[name_param] = name
@ -150,25 +152,25 @@ def _delete_resource(name, name_param, desc, res_type, wait=0, status_param=None
f(**args)
if not wait:
log.info('{0} {1} deletion requested.'.format(desc.title(), name))
log.info('%s %s deletion requested.', desc.title(), name)
return True
log.info('Waiting up to {0} seconds for {1} {2} to be deleted.'.format(wait, desc, name))
log.info('Waiting up to %s seconds for %s %s to be deleted.', wait, desc, name)
orig_wait = wait
while wait > 0:
r = s(name=name, conn=conn)
if not r or not len(r) or r[0].get(status_param) == status_gone:
log.info('{0} {1} deleted.'.format(desc.title(), name))
log.info('%s %s deleted.', desc.title(), name)
return True
sleep = wait if wait % 60 == wait else 60
log.info('Sleeping {0} seconds for {1} {2} to be deleted.'.format(sleep, desc,
name))
log.info('Sleeping %s seconds for %s %s to be deleted.',
sleep, desc, name)
time.sleep(sleep)
wait -= sleep
log.error('{0} {1} not deleted after {2} seconds!'.format(desc.title(), name, orig_wait))
log.error('%s %s not deleted after %s seconds!', desc.title(), name, orig_wait)
return False
except botocore.exceptions.ClientError as e:
log.error('Failed to delete {0} {1}: {2}'.format(desc, name, e))
log.error('Failed to delete %s %s: %s', desc, name, e)
return False
@ -182,8 +184,10 @@ def _create_resource(name, name_param=None, desc=None, res_type=None, wait=0, st
"int or boolean.".format(wait))
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if name_param in args:
log.info("'name: {0}' param being overridden by explicitly provided "
"'{1}: {2}'".format(name, name_param, args[name_param]))
log.info(
"'name: %s' param being overridden by explicitly provided '%s: %s'",
name, name_param, args[name_param]
)
name = args[name_param]
else:
args[name_param] = name
@ -199,22 +203,23 @@ def _create_resource(name, name_param=None, desc=None, res_type=None, wait=0, st
try:
f(**args)
if not wait:
log.info('{0} {1} created.'.format(desc.title(), name))
log.info('%s %s created.', desc.title(), name)
return True
log.info('Waiting up to {0} seconds for {1} {2} to be become available.'.format(wait, desc,
name))
log.info('Waiting up to %s seconds for %s %s to be become available.',
wait, desc, name)
orig_wait = wait
while wait > 0:
r = s(name=name, conn=conn)
if r and r[0].get(status_param) == status_good:
log.info('{0} {1} created and available.'.format(desc.title(), name))
log.info('%s %s created and available.', desc.title(), name)
return True
sleep = wait if wait % 60 == wait else 60
log.info('Sleeping {0} seconds for {1} {2} to become available.'.format(sleep, desc,
name))
log.info('Sleeping %s seconds for %s %s to become available.',
sleep, desc, name)
time.sleep(sleep)
wait -= sleep
log.error('{0} {1} not available after {2} seconds!'.format(desc.title(), name, orig_wait))
log.error('%s %s not available after %s seconds!',
desc.title(), name, orig_wait)
return False
except botocore.exceptions.ClientError as e:
msg = 'Failed to create {0} {1}: {2}'.format(desc, name, e)
@ -232,8 +237,10 @@ def _modify_resource(name, name_param=None, desc=None, res_type=None, wait=0, st
"int or boolean.".format(wait))
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if name_param in args:
log.info("'name: {0}' param being overridden by explicitly provided "
"'{1}: {2}'".format(name, name_param, args[name_param]))
log.info(
"'name: %s' param being overridden by explicitly provided '%s: %s'",
name, name_param, args[name_param]
)
name = args[name_param]
else:
args[name_param] = name
@ -249,22 +256,23 @@ def _modify_resource(name, name_param=None, desc=None, res_type=None, wait=0, st
try:
f(**args)
if not wait:
log.info('{0} {1} modification requested.'.format(desc.title(), name))
log.info('%s %s modification requested.', desc.title(), name)
return True
log.info('Waiting up to {0} seconds for {1} {2} to be become available.'.format(wait, desc,
name))
log.info('Waiting up to %s seconds for %s %s to be become available.',
wait, desc, name)
orig_wait = wait
while wait > 0:
r = s(name=name, conn=conn)
if r and r[0].get(status_param) == status_good:
log.info('{0} {1} modified and available.'.format(desc.title(), name))
log.info('%s %s modified and available.', desc.title(), name)
return True
sleep = wait if wait % 60 == wait else 60
log.info('Sleeping {0} seconds for {1} {2} to become available.'.format(sleep, desc,
name))
log.info('Sleeping %s seconds for %s %s to become available.',
sleep, desc, name)
time.sleep(sleep)
wait -= sleep
log.error('{0} {1} not available after {2} seconds!'.format(desc.title(), name, orig_wait))
log.error('%s %s not available after %s seconds!',
desc.title(), name, orig_wait)
return False
except botocore.exceptions.ClientError as e:
msg = 'Failed to modify {0} {1}: {2}'.format(desc, name, e)
@ -552,13 +560,13 @@ def create_cache_subnet_group(name, subnets=None, region=None, key=None, keyid=N
sn = __salt__['boto_vpc.describe_subnets'](subnet_names=subnet, region=region, key=key,
keyid=keyid, profile=profile).get('subnets')
if not sn:
raise SaltInvocationError('Could not resolve Subnet Name {0} to an '
'ID.'.format(subnet))
raise SaltInvocationError(
'Could not resolve Subnet Name {0} to an ID.'.format(subnet))
if len(sn) == 1:
args['SubnetIds'] += [sn[0]['id']]
elif len(sn) > 1:
raise CommandExecutionError('Subnet Name {0} returned more than one '
'ID.'.format(subnet))
raise CommandExecutionError(
'Subnet Name {0} returned more than one ID.'.format(subnet))
args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
return _create_resource(name, name_param='CacheSubnetGroupName', desc='cache subnet group',
res_type='cache_subnet_group',
@ -589,14 +597,14 @@ def modify_cache_subnet_group(name, subnets=None, region=None, key=None, keyid=N
if len(sn) == 1:
args['SubnetIds'] += [sn[0]['id']]
elif len(sn) > 1:
raise CommandExecutionError('Subnet Name {0} returned more than one '
'ID.'.format(subnet))
raise CommandExecutionError(
'Subnet Name {0} returned more than one ID.'.format(subnet))
elif subnet.startswith('subnet-'):
# Moderately safe assumption... :) Will be caught later if incorrect.
args['SubnetIds'] += [subnet]
else:
raise SaltInvocationError('Could not resolve Subnet Name {0} to an '
'ID.'.format(subnet))
raise SaltInvocationError(
'Could not resolve Subnet Name {0} to an ID.'.format(subnet))
args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
return _modify_resource(name, name_param='CacheSubnetGroupName', desc='cache subnet group',
res_type='cache_subnet_group',
@ -693,18 +701,22 @@ def authorize_cache_security_group_ingress(name, region=None, key=None, keyid=No
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if 'CacheSecurityGroupName' in args:
log.info("'name: {0}' param being overridden by explicitly provided "
"'CacheSecurityGroupName: {1}'".format(name, args['CacheSecurityGroupName']))
log.info(
"'name: %s' param being overridden by explicitly provided "
"'CacheSecurityGroupName: %s'",
name, args['CacheSecurityGroupName']
)
name = args['CacheSecurityGroupName']
else:
args['CacheSubnetGroupName'] = name
args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
try:
conn.authorize_cache_security_group_ingress(**args)
log.info('Authorized {0} to cache security group {1}.'.format(args['EC2SecurityGroupName'], name))
log.info('Authorized %s to cache security group %s.',
args['EC2SecurityGroupName'], name)
return True
except botocore.exceptions.ClientError as e:
log.error('Failed to update security group {0}: {1}'.format(name, e))
log.error('Failed to update security group %s: %s', name, e)
return False
@ -724,18 +736,22 @@ def revoke_cache_security_group_ingress(name, region=None, key=None, keyid=None,
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if 'CacheSecurityGroupName' in args:
log.info("'name: {0}' param being overridden by explicitly provided "
"'CacheSecurityGroupName: {1}'".format(name, args['CacheSecurityGroupName']))
log.info(
"'name: %s' param being overridden by explicitly provided "
"'CacheSecurityGroupName: %s'",
name, args['CacheSecurityGroupName']
)
name = args['CacheSecurityGroupName']
else:
args['CacheSubnetGroupName'] = name
args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
try:
conn.revoke_cache_security_group_ingress(**args)
log.info('Revoked {0} from cache security group {1}.'.format(args['EC2SecurityGroupName'], name))
log.info('Revoked %s from cache security group %s.',
args['EC2SecurityGroupName'], name)
return True
except botocore.exceptions.ClientError as e:
log.error('Failed to update security group {0}: {1}'.format(name, e))
log.error('Failed to update security group %s: %s', name, e)
return False
@ -759,8 +775,10 @@ def list_tags_for_resource(name, region=None, key=None, keyid=None, profile=None
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if 'ResourceName' in args:
log.info("'name: {0}' param being overridden by explicitly provided "
"'ResourceName: {1}'".format(name, args['ResourceName']))
log.info(
"'name: %s' param being overridden by explicitly provided "
"'ResourceName: %s'", name, args['ResourceName']
)
name = args['ResourceName']
else:
args['ResourceName'] = name
@ -771,7 +789,7 @@ def list_tags_for_resource(name, region=None, key=None, keyid=None, profile=None
return r['TagList']
return []
except botocore.exceptions.ClientError as e:
log.error('Failed to list tags for resource {0}: {1}'.format(name, e))
log.error('Failed to list tags for resource %s: %s', name, e)
return []
@ -796,18 +814,20 @@ def add_tags_to_resource(name, region=None, key=None, keyid=None, profile=None,
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if 'ResourceName' in args:
log.info("'name: {0}' param being overridden by explicitly provided "
"'ResourceName: {1}'".format(name, args['ResourceName']))
log.info(
"'name: %s' param being overridden by explicitly provided "
"'ResourceName: %s'", name, args['ResourceName']
)
name = args['ResourceName']
else:
args['ResourceName'] = name
args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
try:
conn.add_tags_to_resource(**args)
log.info('Added tags {0} to {1}.'.format(args['Tags'], name))
log.info('Added tags %s to %s.', args['Tags'], name)
return True
except botocore.exceptions.ClientError as e:
log.error('Failed to add tags to {0}: {1}'.format(name, e))
log.error('Failed to add tags to %s: %s', name, e)
return False
@ -832,18 +852,20 @@ def remove_tags_from_resource(name, region=None, key=None, keyid=None, profile=N
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if 'ResourceName' in args:
log.info("'name: {0}' param being overridden by explicitly provided "
"'ResourceName: {1}'".format(name, args['ResourceName']))
log.info(
"'name: %s' param being overridden by explicitly provided "
"'ResourceName: %s'", name, args['ResourceName']
)
name = args['ResourceName']
else:
args['ResourceName'] = name
args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
try:
conn.remove_tags_from_resource(**args)
log.info('Added tags {0} to {1}.'.format(args['Tags'], name))
log.info('Added tags %s to %s.', args['Tags'], name)
return True
except botocore.exceptions.ClientError as e:
log.error('Failed to add tags to {0}: {1}'.format(name, e))
log.error('Failed to add tags to %s: %s', name, e)
return False
@ -860,18 +882,20 @@ def copy_snapshot(name, region=None, key=None, keyid=None, profile=None, **args)
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if 'SourceSnapshotName' in args:
log.info("'name: {0}' param being overridden by explicitly provided "
"'SourceSnapshotName: {1}'".format(name, args['SourceSnapshotName']))
log.info(
"'name: %s' param being overridden by explicitly provided "
"'SourceSnapshotName: %s'", name, args['SourceSnapshotName']
)
name = args['SourceSnapshotName']
else:
args['SourceSnapshotName'] = name
args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
try:
conn.copy_snapshot(**args)
log.info('Snapshot {0} copied to {1}.'.format(name, args['TargetSnapshotName']))
log.info('Snapshot %s copied to %s.', name, args['TargetSnapshotName'])
return True
except botocore.exceptions.ClientError as e:
log.error('Failed to copy snapshot {0}: {1}'.format(name, e))
log.error('Failed to copy snapshot %s: %s', name, e)
return False

View File

@ -49,7 +49,7 @@ Execution module for Amazon Route53 written against Boto 3
#pylint: disable=E0602,W0106
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
@ -101,7 +101,7 @@ def _collect_results(func, item, args, marker='Marker', nextmarker='NextMarker')
time.sleep(3)
tries -= 1
continue
log.error('Could not collect results from {0}(): {1}'.format(func, str(e)))
log.error('Could not collect results from %s(): %s', func, e)
return []
i = r.get(item, []) if item else r
i.pop('ResponseMetadata', None) if isinstance(i, dict) else None
@ -113,7 +113,7 @@ def _collect_results(func, item, args, marker='Marker', nextmarker='NextMarker')
def _wait_for_sync(change, conn, tries=10, sleep=20):
for retry in range(1, tries+1):
log.info('Getting route53 status (attempt {0})'.format(retry))
log.info('Getting route53 status (attempt %s)', retry)
status = 'wait'
try:
status = conn.get_change(Id=change)['ChangeInfo']['Status']
@ -176,8 +176,10 @@ def find_hosted_zone(Id=None, Name=None, PrivateZone=None,
if PrivateZone is not None:
ret = [m for m in ret if m['HostedZone']['Config']['PrivateZone'] is PrivateZone]
if len(ret) > 1:
log.error('Request matched more than one Hosted Zone ({0}). Refine your criteria and try '
'again.'.format([z['HostedZone']['Id'] for z in ret]))
log.error(
'Request matched more than one Hosted Zone (%s). Refine your '
'criteria and try again.', [z['HostedZone']['Id'] for z in ret]
)
ret = []
return ret
@ -348,8 +350,10 @@ def create_hosted_zone(Name, VPCId=None, VPCName=None, VPCRegion=None, CallerRef
deets = find_hosted_zone(Name=Name, PrivateZone=PrivateZone,
region=region, key=key, keyid=keyid, profile=profile)
if deets:
log.info('Route 53 hosted zone {0} already exists. You may want to pass e.g. '
"'PrivateZone=True' or similar...".format(Name))
log.info(
'Route 53 hosted zone %s already exists. You may want to pass '
'e.g. \'PrivateZone=True\' or similar...', Name
)
return None
args = {
'Name': Name,
@ -373,8 +377,10 @@ def create_hosted_zone(Name, VPCId=None, VPCName=None, VPCRegion=None, CallerRef
log.error('Private zone requested but no VPC matching given criteria found.')
return None
if len(vpcs) > 1:
log.error('Private zone requested but multiple VPCs matching given criteria found: '
'{0}.'.format([v['id'] for v in vpcs]))
log.error(
'Private zone requested but multiple VPCs matching given '
'criteria found: %s.', [v['id'] for v in vpcs]
)
return None
vpc = vpcs[0]
if VPCName:
@ -400,7 +406,7 @@ def create_hosted_zone(Name, VPCId=None, VPCName=None, VPCRegion=None, CallerRef
time.sleep(3)
tries -= 1
continue
log.error('Failed to create hosted zone {0}: {1}'.format(Name, str(e)))
log.error('Failed to create hosted zone %s: %s', Name, e)
return []
return []
@ -435,7 +441,7 @@ def update_hosted_zone_comment(Id=None, Name=None, Comment=None, PrivateZone=Non
'key': key, 'keyid': keyid, 'profile': profile}
zone = find_hosted_zone(**args)
if not zone:
log.error("Couldn't resolve domain name {0} to a hosted zone ID.".format(Name))
log.error("Couldn't resolve domain name %s to a hosted zone ID.", Name)
return []
Id = zone[0]['HostedZone']['Id']
tries = 10
@ -450,8 +456,8 @@ def update_hosted_zone_comment(Id=None, Name=None, Comment=None, PrivateZone=Non
time.sleep(3)
tries -= 1
continue
log.error('Failed to update comment on hosted zone {0}: {1}'.format(
Name or Id, str(e)))
log.error('Failed to update comment on hosted zone %s: %s',
Name or Id, e)
return []
@ -514,8 +520,10 @@ def associate_vpc_with_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
'key': key, 'keyid': keyid, 'profile': profile}
zone = find_hosted_zone(**args)
if not zone:
log.error("Couldn't resolve domain name {0} to a private hosted zone"
'ID.'.format(Name))
log.error(
"Couldn't resolve domain name %s to a private hosted zone ID.",
Name
)
return False
HostedZoneId = zone[0]['HostedZone']['Id']
vpcs = __salt__['boto_vpc.describe_vpcs'](vpc_id=VPCId, name=VPCName, region=region, key=key,
@ -526,8 +534,8 @@ def associate_vpc_with_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
log.error('No VPC matching the given criteria found.')
return False
if len(vpcs) > 1:
log.error('Multiple VPCs matching the given criteria found: {0}.'
''.format(', '.join([v['id'] for v in vpcs])))
log.error('Multiple VPCs matching the given criteria found: %s.',
', '.join([v['id'] for v in vpcs]))
return False
vpc = vpcs[0]
if VPCName:
@ -549,8 +557,8 @@ def associate_vpc_with_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
time.sleep(3)
tries -= 1
continue
log.error('Failed to associate VPC {0} with hosted zone {1}: {2}'.format(
VPCName or VPCId, Name or HostedZoneId, str(e)))
log.error('Failed to associate VPC %s with hosted zone %s: %s',
VPCName or VPCId, Name or HostedZoneId, e)
return False
@ -606,8 +614,7 @@ def diassociate_vpc_from_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
'key': key, 'keyid': keyid, 'profile': profile}
zone = find_hosted_zone(**args)
if not zone:
log.error("Couldn't resolve domain name {0} to a private hosted zone"
'ID.'.format(Name))
log.error("Couldn't resolve domain name %s to a private hosted zone ID.", Name)
return False
HostedZoneId = zone[0]['HostedZone']['Id']
vpcs = __salt__['boto_vpc.describe_vpcs'](vpc_id=VPCId, name=VPCName, region=region, key=key,
@ -618,8 +625,8 @@ def diassociate_vpc_from_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
log.error('No VPC matching the given criteria found.')
return False
if len(vpcs) > 1:
log.error('Multiple VPCs matching the given criteria found: {0}.'
''.format(', '.join([v['id'] for v in vpcs])))
log.error('Multiple VPCs matching the given criteria found: %s.',
', '.join([v['id'] for v in vpcs]))
return False
vpc = vpcs[0]
if VPCName:
@ -641,8 +648,8 @@ def diassociate_vpc_from_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
time.sleep(3)
tries -= 1
continue
log.error('Failed to associate VPC {0} with hosted zone {1}: {2}'.format(
VPCName or VPCId, Name or HostedZoneId, str(e)))
log.error('Failed to associate VPC %s with hosted zone %s: %s',
VPCName or VPCId, Name or HostedZoneId, e)
return False
@ -680,7 +687,7 @@ def delete_hosted_zone(Id, region=None, key=None, keyid=None, profile=None):
r = conn.delete_hosted_zone(Id=Id)
return _wait_for_sync(r['ChangeInfo']['Id'], conn)
except ClientError as e:
log.error('Failed to delete hosted zone {0}: {1}'.format(Id, str(e)))
log.error('Failed to delete hosted zone %s: %s', Id, e)
return False
@ -699,7 +706,7 @@ def delete_hosted_zone_by_domain(Name, PrivateZone=None, region=None, key=None,
# name resolves to both, fail and require them to declare it explicitly.
zone = find_hosted_zone(**args)
if not zone:
log.error("Couldn't resolve domain name {0} to a hosted zone ID.".format(Name))
log.error("Couldn't resolve domain name %s to a hosted zone ID.", Name)
return False
Id = zone[0]['HostedZone']['Id']
return delete_hosted_zone(Id=Id, region=region, key=key, keyid=keyid, profile=profile)
@ -736,7 +743,7 @@ def get_resource_records(HostedZoneId=None, Name=None, StartRecordName=None,
args.update({'PrivateZone': PrivateZone}) if PrivateZone is not None else None
zone = find_hosted_zone(**args)
if not zone:
log.error("Couldn't resolve domain name {0} to a hosted zone ID.".format(Name))
log.error("Couldn't resolve domain name %s to a hosted zone ID.", Name)
return []
HostedZoneId = zone[0]['HostedZone']['Id']
@ -862,7 +869,7 @@ def change_resource_record_sets(HostedZoneId=None, Name=None,
args.update({'PrivateZone': PrivateZone}) if PrivateZone is not None else None
zone = find_hosted_zone(**args)
if not zone:
log.error("Couldn't resolve domain name {0} to a hosted zone ID.".format(Name))
log.error("Couldn't resolve domain name %s to a hosted zone ID.", Name)
return []
HostedZoneId = zone[0]['HostedZone']['Id']
@ -879,6 +886,6 @@ def change_resource_record_sets(HostedZoneId=None, Name=None,
time.sleep(3)
tries -= 1
continue
log.error('Failed to apply requested changes to the hosted zone {0}: {1}'.format(
Name or HostedZoneId, str(e)))
log.error('Failed to apply requested changes to the hosted zone %s: %s',
Name or HostedZoneId, e)
return False

View File

@ -42,7 +42,7 @@ Connection module for Amazon SNS
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
@ -135,13 +135,13 @@ def create_topic(Name, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ret = conn.create_topic(Name=Name)
log.info('SNS topic {0} created with ARN {1}'.format(Name, ret['TopicArn']))
log.info('SNS topic %s created with ARN %s', Name, ret['TopicArn'])
return ret['TopicArn']
except botocore.exceptions.ClientError as e:
log.error('Failed to create SNS topic {0}: {1}'.format(Name, e))
log.error('Failed to create SNS topic %s: %s', Name, e)
return None
except KeyError:
log.error('Failed to create SNS topic {0}'.format(Name))
log.error('Failed to create SNS topic %s', Name)
return None
@ -156,10 +156,10 @@ def delete_topic(TopicArn, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.delete_topic(TopicArn=TopicArn)
log.info('SNS topic {0} deleted'.format(TopicArn))
log.info('SNS topic %s deleted', TopicArn)
return True
except botocore.exceptions.ClientError as e:
log.error('Failed to delete SNS topic {0}: {1}'.format(name, e))
log.error('Failed to delete SNS topic %s: %s', name, e)
return False
@ -176,7 +176,7 @@ def get_topic_attributes(TopicArn, region=None, key=None, keyid=None, profile=No
try:
return conn.get_topic_attributes(TopicArn=TopicArn).get('Attributes')
except botocore.exceptions.ClientError as e:
log.error('Failed to garner attributes for SNS topic {0}: {1}'.format(TopicArn, e))
log.error('Failed to garner attributes for SNS topic %s: %s', TopicArn, e)
return None
@ -193,13 +193,12 @@ def set_topic_attributes(TopicArn, AttributeName, AttributeValue, region=None, k
try:
conn.set_topic_attributes(TopicArn=TopicArn, AttributeName=AttributeName,
AttributeValue=AttributeValue)
log.debug('Set attribute {0}={1} on SNS topic {2}'.format(AttributeName, AttributeValue,
TopicArn))
log.debug('Set attribute %s=%s on SNS topic %s',
AttributeName, AttributeValue, TopicArn)
return True
except botocore.exceptions.ClientError as e:
log.error('Failed to set attribute {0}={1} for SNS topic {2}: {3}'.format(AttributeName,
AttributeValue,
TopicArn, e))
log.error('Failed to set attribute %s=%s for SNS topic %s: %s',
AttributeName, AttributeValue, TopicArn, e)
return False
@ -221,7 +220,7 @@ def list_subscriptions_by_topic(TopicArn, region=None, key=None, keyid=None, pro
subs = ret.get('Subscriptions', [])
res += subs
except botocore.exceptions.ClientError as e:
log.error('Failed to list subscriptions for SNS topic {0}: {1}'.format(TopicArn, e))
log.error('Failed to list subscriptions for SNS topic %s: %s', TopicArn, e)
return None
return res
@ -244,7 +243,7 @@ def list_subscriptions(region=None, key=None, keyid=None, profile=None):
subs = ret.get('Subscriptions', [])
res += subs
except botocore.exceptions.ClientError as e:
log.error('Failed to list SNS subscriptions: {0}'.format(e))
log.error('Failed to list SNS subscriptions: %s', e)
return None
return res
@ -262,11 +261,12 @@ def get_subscription_attributes(SubscriptionArn, region=None, key=None, keyid=No
ret = conn.get_subscription_attributes(SubscriptionArn=SubscriptionArn)
return ret['Attributes']
except botocore.exceptions.ClientError as e:
log.error('Failed to list attributes for SNS subscription {0}: {1}'.format(SubscriptionArn,
e))
log.error('Failed to list attributes for SNS subscription %s: %s',
SubscriptionArn, e)
return None
except KeyError:
log.error('Failed to list attributes for SNS subscription {0}'.format(SubscriptionArn))
log.error('Failed to list attributes for SNS subscription %s',
SubscriptionArn)
return None
@ -283,13 +283,12 @@ def set_subscription_attributes(SubscriptionArn, AttributeName, AttributeValue,
try:
conn.set_subscription_attributes(SubscriptionArn=SubscriptionArn,
AttributeName=AttributeName, AttributeValue=AttributeValue)
log.debug('Set attribute {0}={1} on SNS subscription {2}'.format(AttributeName,
AttributeValue,
SubscriptionArn))
log.debug('Set attribute %s=%s on SNS subscription %s',
AttributeName, AttributeValue, SubscriptionArn)
return True
except botocore.exceptions.ClientError as e:
log.error('Failed to set attribute {0}={1} for SNS subscription {2}: {3}'.format(
AttributeName, AttributeValue, SubscriptionArn, e))
log.error('Failed to set attribute %s=%s for SNS subscription %s: %s',
AttributeName, AttributeValue, SubscriptionArn, e)
return False
@ -304,14 +303,14 @@ def subscribe(TopicArn, Protocol, Endpoint, region=None, key=None, keyid=None, p
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ret = conn.subscribe(TopicArn=TopicArn, Protocol=Protocol, Endpoint=Endpoint)
log.info('Subscribed {0} {1} to topic {2} with SubscriptionArn {3}'.format(
Protocol, Endpoint, TopicArn, ret['SubscriptionArn']))
log.info('Subscribed %s %s to topic %s with SubscriptionArn %s',
Protocol, Endpoint, TopicArn, ret['SubscriptionArn'])
return ret['SubscriptionArn']
except botocore.exceptions.ClientError as e:
log.error('Failed to create subscription to SNS topic {0}: {1}'.format(TopicArn, e))
log.error('Failed to create subscription to SNS topic %s: %s', TopicArn, e)
return None
except KeyError:
log.error('Failed to create subscription to SNS topic {0}'.format(TopicArn))
log.error('Failed to create subscription to SNS topic %s', TopicArn)
return None
@ -328,14 +327,15 @@ def unsubscribe(SubscriptionArn, region=None, key=None, keyid=None, profile=None
subs = list_subscriptions(region=region, key=key, keyid=keyid, profile=profile)
sub = [s for s in subs if s.get('SubscriptionArn') == SubscriptionArn]
if not sub:
log.error('Subscription ARN {0} not found'.format(SubscriptionArn))
log.error('Subscription ARN %s not found', SubscriptionArn)
return False
TopicArn = sub[0]['TopicArn']
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.unsubscribe(SubscriptionArn=SubscriptionArn)
log.info('Deleted subscription {0} from SNS topic {1}'.format(SubscriptionArn, TopicArn))
log.info('Deleted subscription %s from SNS topic %s',
SubscriptionArn, TopicArn)
return True
except botocore.exceptions.ClientError as e:
log.error('Failed to delete subscription {0}: {1}'.format(SubscriptionArn, e))
log.error('Failed to delete subscription %s: %s', SubscriptionArn, e)
return False

View File

@ -76,7 +76,7 @@ Connection module for Amazon APIGateway
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import datetime
@ -952,7 +952,7 @@ def create_api_method(restApiId, resourcePath, httpMethod, authorizationType,
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
method = conn.put_method(restApiId=restApiId, resourceId=resource['id'], httpMethod=httpMethod,
authorizationType=str(authorizationType), apiKeyRequired=apiKeyRequired,
authorizationType=str(authorizationType), apiKeyRequired=apiKeyRequired, # future lint: disable=blacklisted-function
requestParameters=requestParameters, requestModels=requestModels)
return {'created': True, 'method': method}
return {'created': False, 'error': 'Failed to create method'}
@ -1029,7 +1029,7 @@ def create_api_method_response(restApiId, resourcePath, httpMethod, statusCode,
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
response = conn.put_method_response(restApiId=restApiId, resourceId=resource['id'],
httpMethod=httpMethod, statusCode=str(statusCode),
httpMethod=httpMethod, statusCode=str(statusCode), # future lint: disable=blacklisted-function
responseParameters=responseParameters, responseModels=responseModels)
return {'created': True, 'response': response}
return {'created': False, 'error': 'no such resource'}
@ -1055,7 +1055,7 @@ def delete_api_method_response(restApiId, resourcePath, httpMethod, statusCode,
if resource:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_method_response(restApiId=restApiId, resourceId=resource['id'],
httpMethod=httpMethod, statusCode=str(statusCode))
httpMethod=httpMethod, statusCode=str(statusCode)) # future lint: disable=blacklisted-function
return {'deleted': True}
return {'deleted': False, 'error': 'no such resource'}
except ClientError as e:
@ -1080,7 +1080,7 @@ def describe_api_method_response(restApiId, resourcePath, httpMethod, statusCode
if resource:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
response = conn.get_method_response(restApiId=restApiId, resourceId=resource['id'],
httpMethod=httpMethod, statusCode=str(statusCode))
httpMethod=httpMethod, statusCode=str(statusCode)) # future lint: disable=blacklisted-function
return {'response': _convert_datetime_str(response)}
return {'error': 'no such resource'}
except ClientError as e:
@ -1511,7 +1511,7 @@ def create_usage_plan(name, description=None, throttle=None, quota=None, region=
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
except (TypeError, ValueError) as e:
return {'error': '{0}'.format(e)}
return {'error': six.text_type(e)}
def update_usage_plan(plan_id, throttle=None, quota=None, region=None, key=None, keyid=None, profile=None):
@ -1563,17 +1563,17 @@ def update_usage_plan(plan_id, throttle=None, quota=None, region=None, key=None,
patchOperations.append({'op': 'remove', 'path': '/throttle'})
else:
if 'rateLimit' in throttle:
patchOperations.append({'op': 'replace', 'path': '/throttle/rateLimit', 'value': str(throttle['rateLimit'])})
patchOperations.append({'op': 'replace', 'path': '/throttle/rateLimit', 'value': str(throttle['rateLimit'])}) # future lint: disable=blacklisted-function
if 'burstLimit' in throttle:
patchOperations.append({'op': 'replace', 'path': '/throttle/burstLimit', 'value': str(throttle['burstLimit'])})
patchOperations.append({'op': 'replace', 'path': '/throttle/burstLimit', 'value': str(throttle['burstLimit'])}) # future lint: disable=blacklisted-function
if quota is None:
patchOperations.append({'op': 'remove', 'path': '/quota'})
else:
patchOperations.append({'op': 'replace', 'path': '/quota/period', 'value': str(quota['period'])})
patchOperations.append({'op': 'replace', 'path': '/quota/limit', 'value': str(quota['limit'])})
patchOperations.append({'op': 'replace', 'path': '/quota/period', 'value': str(quota['period'])}) # future lint: disable=blacklisted-function
patchOperations.append({'op': 'replace', 'path': '/quota/limit', 'value': str(quota['limit'])}) # future lint: disable=blacklisted-function
if 'offset' in quota:
patchOperations.append({'op': 'replace', 'path': '/quota/offset', 'value': str(quota['offset'])})
patchOperations.append({'op': 'replace', 'path': '/quota/offset', 'value': str(quota['offset'])}) # future lint: disable=blacklisted-function
if patchOperations:
res = conn.update_usage_plan(usagePlanId=plan_id,
@ -1585,7 +1585,7 @@ def update_usage_plan(plan_id, throttle=None, quota=None, region=None, key=None,
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
except (TypeError, ValueError) as e:
return {'error': '{0}'.format(e)}
return {'error': six.text_type(e)}
def delete_usage_plan(plan_id, region=None, key=None, keyid=None, profile=None):

View File

@ -46,7 +46,7 @@ Connection module for Amazon Autoscale Groups
#pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import time
import logging
@ -288,7 +288,7 @@ def create(name, launch_config_name, availability_zones, min_size, max_size,
# create notifications
if notification_arn and notification_types:
conn.put_notification_configuration(_asg, notification_arn, notification_types)
log.info('Created ASG {0}'.format(name))
log.info('Created ASG %s', name)
return True
except boto.exception.BotoServerError as e:
if retries and e.code == 'Throttling':
@ -297,7 +297,7 @@ def create(name, launch_config_name, availability_zones, min_size, max_size,
retries -= 1
continue
log.error(e)
msg = 'Failed to create ASG {0}'.format(name)
msg = 'Failed to create ASG %s', name
log.error(msg)
return False
@ -393,10 +393,10 @@ def update(name, launch_config_name, availability_zones, min_size, max_size,
# Seems the update call doesn't handle tags, so we'll need to update
# that separately.
if add_tags:
log.debug('Adding/updating tags from ASG: {}'.format(add_tags))
log.debug('Adding/updating tags from ASG: %s', add_tags)
conn.create_or_update_tags([autoscale.Tag(**t) for t in add_tags])
if delete_tags:
log.debug('Deleting tags from ASG: {}'.format(delete_tags))
log.debug('Deleting tags from ASG: %s', delete_tags)
conn.delete_tags([autoscale.Tag(**t) for t in delete_tags])
# update doesn't handle suspended_processes either
# Resume all processes
@ -405,7 +405,7 @@ def update(name, launch_config_name, availability_zones, min_size, max_size,
# list suspends all; don't do that.
if suspended_processes is not None and len(suspended_processes) > 0:
_asg.suspend_processes(suspended_processes)
log.info('Updated ASG {0}'.format(name))
log.info('Updated ASG %s', name)
# ### scaling policies
# delete all policies, then recreate them
for policy in conn.get_all_policies(as_group=name):
@ -428,7 +428,7 @@ def update(name, launch_config_name, availability_zones, min_size, max_size,
log.error(e)
msg = 'Failed to update ASG {0}'.format(name)
log.error(msg)
return False, str(e)
return False, six.text_type(e)
def _create_scaling_policies(conn, as_name, scaling_policies):
@ -687,7 +687,7 @@ def create_launch_configuration(name, image_id, key_name=None,
while True:
try:
conn.create_launch_configuration(lc)
log.info('Created LC {0}'.format(name))
log.info('Created LC %s', name)
return True
except boto.exception.BotoServerError as e:
if retries and e.code == 'Throttling':
@ -715,7 +715,7 @@ def delete_launch_configuration(name, region=None, key=None, keyid=None,
while True:
try:
conn.delete_launch_configuration(name)
log.info('Deleted LC {0}'.format(name))
log.info('Deleted LC %s', name)
return True
except boto.exception.BotoServerError as e:
if retries and e.code == 'Throttling':
@ -749,7 +749,7 @@ def get_scaling_policy_arn(as_group, scaling_policy_name, region=None,
for policy in policies:
if policy.name == scaling_policy_name:
return policy.policy_arn
log.error('Could not convert: {0}'.format(as_group))
log.error('Could not convert: %s', as_group)
return None
except boto.exception.BotoServerError as e:
if e.error_code != 'Throttling':
@ -839,7 +839,7 @@ def get_instances(name, lifecycle_state="InService", health_status="Healthy",
log.error(e)
return False
if len(asgs) != 1:
log.debug("name '{0}' returns multiple ASGs: {1}".format(name, [asg.name for asg in asgs]))
log.debug("name '%s' returns multiple ASGs: %s", name, [asg.name for asg in asgs])
return False
asg = asgs[0]
instance_ids = []

View File

@ -32,7 +32,7 @@ Connection module for Amazon Cloud Formation
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
@ -80,10 +80,10 @@ def exists(name, region=None, key=None, keyid=None, profile=None):
try:
# Returns an object if stack exists else an exception
exists = conn.describe_stacks(name)
log.debug('Stack {0} exists.'.format(name))
log.debug('Stack %s exists.', name)
return True
except BotoServerError as e:
log.debug('Exists returned an exception.\n{0}'.format(str(e)))
log.debug('boto_cfn.exists raised an exception', exc_info=True)
return False
@ -104,7 +104,7 @@ def describe(name, region=None, key=None, keyid=None, profile=None):
r = conn.describe_stacks(name)
if r:
stack = r[0]
log.debug('Found VPC: {0}'.format(stack.stack_id))
log.debug('Found VPC: %s', stack.stack_id)
keys = ('stack_id', 'description', 'stack_status', 'stack_status_reason', 'tags')
ret = dict([(k, getattr(stack, k)) for k in keys if hasattr(stack, k)])
@ -121,10 +121,10 @@ def describe(name, region=None, key=None, keyid=None, profile=None):
return {'stack': ret}
log.debug('Stack {0} exists.'.format(name))
log.debug('Stack %s exists.', name)
return True
except BotoServerError as e:
log.warning('Could not describe stack {0}.\n{1}'.format(name, str(e)))
log.warning('Could not describe stack %s.\n%s', name, e)
return False
@ -145,7 +145,7 @@ def create(name, template_body=None, template_url=None, parameters=None, notific
return conn.create_stack(name, template_body, template_url, parameters, notification_arns, disable_rollback,
timeout_in_minutes, capabilities, tags, on_failure, stack_policy_body, stack_policy_url)
except BotoServerError as e:
msg = 'Failed to create stack {0}.\n{1}'.format(name, str(e))
msg = 'Failed to create stack {0}.\n{1}'.format(name, e)
log.error(msg)
log.debug(e)
return False
@ -172,13 +172,13 @@ def update_stack(name, template_body=None, template_url=None, parameters=None, n
disable_rollback, timeout_in_minutes, capabilities, tags, use_previous_template,
stack_policy_during_update_body, stack_policy_during_update_url,
stack_policy_body, stack_policy_url)
log.debug('Updated result is : {0}.'.format(update))
log.debug('Updated result is : %s.', update)
return update
except BotoServerError as e:
msg = 'Failed to update stack {0}.'.format(name)
log.debug(e)
log.error(msg)
return str(e)
return six.text_type(e)
def delete(name, region=None, key=None, keyid=None, profile=None):
@ -197,7 +197,7 @@ def delete(name, region=None, key=None, keyid=None, profile=None):
msg = 'Failed to create stack {0}.'.format(name)
log.error(msg)
log.debug(e)
return str(e)
return six.text_type(e)
def get_template(name, region=None, key=None, keyid=None, profile=None):
@ -212,13 +212,13 @@ def get_template(name, region=None, key=None, keyid=None, profile=None):
try:
template = conn.get_template(name)
log.info('Retrieved template for stack {0}'.format(name))
log.info('Retrieved template for stack %s', name)
return template
except BotoServerError as e:
log.debug(e)
msg = 'Template {0} does not exist'.format(name)
log.error(msg)
return str(e)
return six.text_type(e)
def validate_template(template_body=None, template_url=None, region=None, key=None, keyid=None, profile=None):
@ -240,4 +240,4 @@ def validate_template(template_body=None, template_url=None, region=None, key=No
log.debug(e)
msg = 'Error while trying to validate template {0}.'.format(template_body)
log.error(msg)
return str(e)
return six.text_type(e)

View File

@ -51,7 +51,7 @@ Connection module for Amazon CloudFront
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libs
@ -111,9 +111,7 @@ def _list_distributions(
id_ = partial_dist['Id']
if 'Name' not in tags:
log.warning(
'CloudFront distribution {0} has no Name tag.'.format(id_),
)
log.warning('CloudFront distribution %s has no Name tag.', id_)
continue
distribution_name = tags.pop('Name', None)
if name is not None and distribution_name != name:

View File

@ -50,7 +50,7 @@ The dependencies listed above can be installed via package or pip.
#pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libs
@ -165,7 +165,7 @@ def create(Name,
S3BucketName=S3BucketName,
**kwargs)
if trail:
log.info('The newly created trail name is {0}'.format(trail['Name']))
log.info('The newly created trail name is %s', trail['Name'])
return {'created': True, 'name': trail['Name']}
else:
@ -338,7 +338,7 @@ def update(Name,
S3BucketName=S3BucketName,
**kwargs)
if trail:
log.info('The updated trail name is {0}'.format(trail['Name']))
log.info('The updated trail name is %s', trail['Name'])
return {'updated': True, 'name': trail['Name']}
else:
@ -430,9 +430,9 @@ def add_tags(Name,
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
tagslist = []
for k, v in six.iteritems(kwargs):
if str(k).startswith('__'):
if six.text_type(k).startswith('__'):
continue
tagslist.append({'Key': str(k), 'Value': str(v)})
tagslist.append({'Key': six.text_type(k), 'Value': six.text_type(v)})
conn.add_tags(ResourceId=_get_trail_arn(Name,
region=region, key=key, keyid=keyid,
profile=profile), TagsList=tagslist)
@ -461,9 +461,9 @@ def remove_tags(Name,
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
tagslist = []
for k, v in six.iteritems(kwargs):
if str(k).startswith('__'):
if six.text_type(k).startswith('__'):
continue
tagslist.append({'Key': str(k), 'Value': str(v)})
tagslist.append({'Key': six.text_type(k), 'Value': six.text_type(v)})
conn.remove_tags(ResourceId=_get_trail_arn(Name,
region=region, key=key, keyid=keyid,
profile=profile), TagsList=tagslist)

View File

@ -44,7 +44,7 @@ Connection module for Amazon CloudWatch
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
@ -95,7 +95,7 @@ def get_alarm(name, region=None, key=None, keyid=None, profile=None):
if len(alarms) == 0:
return None
if len(alarms) > 1:
log.error("multiple alarms matched name '{0}'".format(name))
log.error("multiple alarms matched name '%s'", name)
return _metric_alarm_to_dict(alarms[0])
@ -219,7 +219,7 @@ def create_or_update_alarm(
if isinstance(dimensions, six.string_types):
dimensions = salt.utils.json.loads(dimensions)
if not isinstance(dimensions, dict):
log.error("could not parse dimensions argument: must be json encoding of a dict: '{0}'".format(dimensions))
log.error("could not parse dimensions argument: must be json encoding of a dict: '%s'", dimensions)
return False
if isinstance(alarm_actions, six.string_types):
alarm_actions = alarm_actions.split(",")
@ -268,7 +268,7 @@ def create_or_update_alarm(
ok_actions=ok_actions
)
conn.create_alarm(alarm)
log.info('Created/updated alarm {0}'.format(name))
log.info('Created/updated alarm %s', name)
return True
@ -291,7 +291,7 @@ def convert_to_arn(arns, region=None, key=None, keyid=None, profile=None):
if policy_arn:
results.append(policy_arn)
else:
log.error('Could not convert: {0}'.format(arn))
log.error('Could not convert: %s', arn)
else:
results.append(arn)
return results
@ -308,7 +308,7 @@ def delete_alarm(name, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_alarms([name])
log.info('Deleted alarm {0}'.format(name))
log.info('Deleted alarm %s', name)
return True

View File

@ -44,7 +44,7 @@ Connection module for Amazon CloudWatch Events
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
@ -142,7 +142,7 @@ def create_or_update(Name,
rule = conn.put_rule(Name=Name,
**kwargs)
if rule:
log.info('The newly created event rule is {0}'.format(rule.get('RuleArn')))
log.info('The newly created event rule is %s', rule.get('RuleArn'))
return {'created': True, 'arn': rule.get('RuleArn')}
else:

View File

@ -77,7 +77,7 @@ Connection module for Amazon CognitoIdentity
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libs

View File

@ -6,11 +6,11 @@ Connection module for Amazon Data Pipeline
:depends: boto3
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
from salt._compat import string_types
from salt.ext import six
log = logging.getLogger(__name__)
@ -50,7 +50,7 @@ def activate_pipeline(pipeline_id, region=None, key=None, keyid=None, profile=No
client.activate_pipeline(pipelineId=pipeline_id)
r['result'] = True
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
r['error'] = str(e)
r['error'] = six.text_type(e)
return r
@ -75,7 +75,7 @@ def create_pipeline(name, unique_id, description='', region=None, key=None, keyi
)
r['result'] = response['pipelineId']
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
r['error'] = str(e)
r['error'] = six.text_type(e)
return r
@ -95,7 +95,7 @@ def delete_pipeline(pipeline_id, region=None, key=None, keyid=None, profile=None
client.delete_pipeline(pipelineId=pipeline_id)
r['result'] = True
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
r['error'] = str(e)
r['error'] = six.text_type(e)
return r
@ -114,7 +114,7 @@ def describe_pipelines(pipeline_ids, region=None, key=None, keyid=None, profile=
try:
r['result'] = client.describe_pipelines(pipelineIds=pipeline_ids)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
r['error'] = str(e)
r['error'] = six.text_type(e)
return r
@ -137,7 +137,7 @@ def get_pipeline_definition(pipeline_id, version='latest', region=None, key=None
version=version,
)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
r['error'] = str(e)
r['error'] = six.text_type(e)
return r
@ -160,7 +160,7 @@ def list_pipelines(region=None, key=None, keyid=None, profile=None):
pipelines += page['pipelineIdList']
r['result'] = pipelines
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
r['error'] = str(e)
r['error'] = six.text_type(e)
return r
@ -215,7 +215,7 @@ def put_pipeline_definition(pipeline_id, pipeline_objects, parameter_objects=Non
else:
r['result'] = response
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
r['error'] = str(e)
r['error'] = six.text_type(e)
return r
@ -236,7 +236,7 @@ def _get_session(region, key, keyid, profile):
Get a boto3 session
'''
if profile:
if isinstance(profile, string_types):
if isinstance(profile, six.string_types):
_profile = __salt__['config.option'](profile)
elif isinstance(profile, dict):
_profile = profile

View File

@ -45,7 +45,7 @@ Connection module for Amazon DynamoDB
#pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time

View File

@ -46,7 +46,7 @@ Connection module for Amazon EC2
#pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
@ -172,18 +172,20 @@ def get_unassociated_eip_address(domain='standard', region=None, key=None,
key=key, keyid=keyid,
profile=profile)[0]
if address_info['instance_id']:
log.debug('{0} is already associated with the instance {1}'.format(
address, address_info['instance_id']))
log.debug('%s is already associated with the instance %s',
address, address_info['instance_id'])
continue
if address_info['network_interface_id']:
log.debug('{0} is already associated with the network interface {1}'
.format(address, address_info['network_interface_id']))
log.debug('%s is already associated with the network interface %s',
address, address_info['network_interface_id'])
continue
if address_info['domain'] == domain:
log.debug("The first unassociated EIP address in the domain '{0}' "
"is {1}".format(domain, address))
log.debug(
"The first unassociated EIP address in the domain '%s' is %s",
domain, address
)
eip = address
break
@ -361,8 +363,10 @@ def associate_eip_address(instance_id=None, instance_name=None, public_ip=None,
log.error(e)
return False
if not instance_id:
log.error("Given instance_name '{0}' cannot be mapped to an "
"instance_id".format(instance_name))
log.error(
"Given instance_name '%s' cannot be mapped to an instance_id",
instance_name
)
return False
if network_interface_name:
@ -374,8 +378,8 @@ def associate_eip_address(instance_id=None, instance_name=None, public_ip=None,
log.error(e)
return False
if not network_interface_id:
log.error("Given network_interface_name '{0}' cannot be mapped to "
"an network_interface_id".format(network_interface_name))
log.error("Given network_interface_name '%s' cannot be mapped to "
"an network_interface_id", network_interface_name)
return False
try:
@ -468,8 +472,8 @@ def assign_private_ip_addresses(network_interface_name=None, network_interface_i
log.error(e)
return False
if not network_interface_id:
log.error("Given network_interface_name '{0}' cannot be mapped to "
"an network_interface_id".format(network_interface_name))
log.error("Given network_interface_name '%s' cannot be mapped to "
"an network_interface_id", network_interface_name)
return False
try:
@ -522,8 +526,8 @@ def unassign_private_ip_addresses(network_interface_name=None, network_interface
log.error(e)
return False
if not network_interface_id:
log.error("Given network_interface_name '{0}' cannot be mapped to "
"an network_interface_id".format(network_interface_name))
log.error("Given network_interface_name '%s' cannot be mapped to "
"an network_interface_id", network_interface_name)
return False
try:
@ -586,13 +590,15 @@ def find_instances(instance_id=None, name=None, tags=None, region=None,
reservations = conn.get_all_reservations(**filter_parameters)
instances = [i for r in reservations for i in r.instances]
log.debug('The filters criteria {0} matched the following '
'instances:{1}'.format(filter_parameters, instances))
log.debug('The filters criteria %s matched the following '
'instances:%s', filter_parameters, instances)
if in_states:
instances = [i for i in instances if i.state in in_states]
log.debug('Limiting instance matches to those in the requested '
'states: {0}'.format(instances))
log.debug(
'Limiting instance matches to those in the requested states: %s',
instances
)
if instances:
if return_objs:
return instances
@ -674,8 +680,8 @@ def find_images(ami_name=None, executable_by=None, owners=None, image_ids=None,
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
images = conn.get_all_images(**filter_parameters)
log.debug('The filters criteria {0} matched the following '
'images:{1}'.format(filter_parameters, images))
log.debug('The filters criteria %s matched the following '
'images:%s', filter_parameters, images)
if images:
if return_objs:
@ -732,7 +738,7 @@ def get_id(name=None, tags=None, region=None, key=None,
keyid=keyid, profile=profile, in_states=in_states,
filters=filters)
if instance_ids:
log.info("Instance ids: {0}".format(" ".join(instance_ids)))
log.info("Instance ids: %s", " ".join(instance_ids))
if len(instance_ids) == 1:
return instance_ids[0]
else:
@ -764,7 +770,7 @@ def get_tags(instance_id=None, keyid=None, key=None, profile=None,
for tag in result:
tags.append({tag.name: tag.value})
else:
log.info("No tags found for instance_id {}".format(instance_id))
log.info("No tags found for instance_id %s", instance_id)
return tags
@ -824,8 +830,8 @@ def _to_blockdev_map(thing):
if isinstance(thing, six.string_types):
thing = salt.utils.json.loads(thing)
if not isinstance(thing, dict):
log.error("Can't convert '{0}' of type {1} to a "
"boto.ec2.blockdevicemapping.BlockDeviceMapping".format(thing, type(thing)))
log.error("Can't convert '%s' of type %s to a "
"boto.ec2.blockdevicemapping.BlockDeviceMapping", thing, type(thing))
return None
bdm = BlockDeviceMapping()
@ -980,7 +986,7 @@ def run(image_id, name=None, tags=None, key_name=None, security_groups=None,
region=region, key=key,
keyid=keyid, profile=profile)
if 'id' not in r:
log.warning('Couldn\'t resolve subnet name {0}.').format(subnet_name)
log.warning('Couldn\'t resolve subnet name %s.', subnet_name)
return False
subnet_id = r['id']
@ -994,7 +1000,7 @@ def run(image_id, name=None, tags=None, key_name=None, security_groups=None,
region=region, key=key,
keyid=keyid, profile=profile)
if not r:
log.warning('Couldn\'t resolve security group name ' + str(sgn))
log.warning('Couldn\'t resolve security group name %s', sgn)
return False
security_group_ids += [r]
@ -1009,8 +1015,8 @@ def run(image_id, name=None, tags=None, key_name=None, security_groups=None,
network_interface_id = result['result']
if not network_interface_id:
log.warning(
"Given network_interface_name '{0}' cannot be mapped to an "
"network_interface_id".format(network_interface_name)
"Given network_interface_name '%s' cannot be mapped to an "
"network_interface_id", network_interface_name
)
if network_interface_id:
@ -1058,8 +1064,10 @@ def run(image_id, name=None, tags=None, key_name=None, security_groups=None,
instance.add_tags(tags)
return {'instance_id': instance.id}
else:
log.warning('Instance could not be started -- '
'status is "{0}"'.format(status))
log.warning(
'Instance could not be started -- status is "%s"',
status
)
def get_key(key_name, region=None, key=None, keyid=None, profile=None):
@ -1076,7 +1084,7 @@ def get_key(key_name, region=None, key=None, keyid=None, profile=None):
try:
key = conn.get_key_pair(key_name)
log.debug("the key to return is : {0}".format(key))
log.debug("the key to return is : %s", key)
if key is None:
return False
return key.name, key.fingerprint
@ -1101,7 +1109,7 @@ def create_key(key_name, save_path, region=None, key=None, keyid=None,
try:
key = conn.create_key_pair(key_name)
log.debug("the key to return is : {0}".format(key))
log.debug("the key to return is : %s", key)
key.save(save_path)
return key.material
except boto.exception.BotoServerError as e:
@ -1130,7 +1138,7 @@ def import_key(key_name, public_key_material, region=None, key=None,
try:
key = conn.import_key_pair(key_name, public_key_material)
log.debug("the key to return is : {0}".format(key))
log.debug("the key to return is : %s", key)
return key.fingerprint
except boto.exception.BotoServerError as e:
log.debug(e)
@ -1151,7 +1159,7 @@ def delete_key(key_name, region=None, key=None, keyid=None, profile=None):
try:
key = conn.delete_key_pair(key_name)
log.debug("the key to return is : {0}".format(key))
log.debug("the key to return is : %s", key)
return key
except boto.exception.BotoServerError as e:
log.debug(e)
@ -1180,7 +1188,7 @@ def get_keys(keynames=None, filters=None, region=None, key=None,
try:
keys = conn.get_all_key_pairs(keynames, filters)
log.debug("the key to return is : {0}".format(keys))
log.debug("the key to return is : %s", keys)
key_values = []
if keys:
for key in keys:
@ -1435,8 +1443,7 @@ def create_network_interface(name, subnet_id=None, subnet_name=None,
keyid=keyid,
profile=profile)
if 'id' not in resource:
log.warning('Couldn\'t resolve subnet name {0}.').format(
subnet_name)
log.warning('Couldn\'t resolve subnet name %s.', subnet_name)
return False
subnet_id = resource['id']
@ -1791,7 +1798,7 @@ def set_volumes_tags(tag_maps, authoritative=False, dry_run=False,
tags = dict(tm.get('tags', {}))
args = {'return_objs': True, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile}
new_filters = {}
log.debug('got filters: {0}'.format(filters))
log.debug('got filters: %s', filters)
instance_id = None
in_states = tm.get('in_states', running_states)
try:
@ -1812,18 +1819,18 @@ def set_volumes_tags(tag_maps, authoritative=False, dry_run=False,
continue # Hmme, abort or do what we can...? Guess the latter for now.
args['filters'] = new_filters
volumes = get_all_volumes(**args)
log.debug('got volume list: {0}'.format(volumes))
log.debug('got volume list: %s', volumes)
for vol in volumes:
tag_sets.setdefault(vol.id.replace('-', '_'), {'vol': vol, 'tags': tags.copy()})['tags'].update(tags.copy())
log.debug('tag_sets after munging: {0}'.format(tag_sets))
log.debug('tag_sets after munging: %s', tag_sets)
### ...then loop through all those volume->tag pairs and apply them.
changes = {'old': {}, 'new': {}}
for volume in tag_sets.values():
vol, tags = volume['vol'], volume['tags']
log.debug('current tags on vol.id {0}: {1}'.format(vol.id, dict(getattr(vol, 'tags', {}))))
log.debug('current tags on vol.id %s: %s', vol.id, dict(getattr(vol, 'tags', {})))
curr = set(dict(getattr(vol, 'tags', {})).keys())
log.debug('requested tags on vol.id {0}: {1}'.format(vol.id, tags))
log.debug('requested tags on vol.id %s: %s', vol.id, tags)
req = set(tags.keys())
add = list(req - curr)
update = [r for r in (req & curr) if vol.tags[r] != tags[r]]
@ -1832,13 +1839,13 @@ def set_volumes_tags(tag_maps, authoritative=False, dry_run=False,
changes['old'][vol.id] = dict(getattr(vol, 'tags', {}))
changes['new'][vol.id] = tags
else:
log.debug('No changes needed for vol.id {0}'.format(vol.id))
log.debug('No changes needed for vol.id %s', vol.id)
if len(add):
d = dict((k, tags[k]) for k in add)
log.debug('New tags for vol.id {0}: {1}'.format(vol.id, d))
log.debug('New tags for vol.id %s: %s', vol.id, d)
if len(update):
d = dict((k, tags[k]) for k in update)
log.debug('Updated tags for vol.id {0}: {1}'.format(vol.id, d))
log.debug('Updated tags for vol.id %s: %s', vol.id, d)
if not dry_run:
if not create_tags(vol.id, tags, region=region, key=key, keyid=keyid, profile=profile):
ret['success'] = False
@ -1846,7 +1853,7 @@ def set_volumes_tags(tag_maps, authoritative=False, dry_run=False,
return ret
if authoritative:
if len(remove):
log.debug('Removed tags for vol.id {0}: {1}'.format(vol.id, remove))
log.debug('Removed tags for vol.id %s: %s', vol.id, remove)
if not delete_tags(vol.id, remove, region=region, key=key, keyid=keyid, profile=profile):
ret['success'] = False
ret['comment'] = "Failed to remove tags on vol.id {0}: {1}".format(vol.id, remove)

View File

@ -50,7 +50,7 @@ Connection module for Amazon EFS
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging

View File

@ -44,7 +44,7 @@ Connection module for Amazon Elasticache
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
@ -134,7 +134,7 @@ def create_replication_group(name, primary_cluster_id, replication_group_descrip
cc = conn.create_replication_group(name, primary_cluster_id,
replication_group_description)
if not wait:
log.info('Created cache cluster {0}.'.format(name))
log.info('Created cache cluster %s.', name)
return True
while True:
time.sleep(3)
@ -446,7 +446,7 @@ def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, t
region=region, key=key,
keyid=keyid, profile=profile)
if 'id' not in r:
log.error('Couldn\'t resolve subnet name {0} to an ID.'.format(subnet_name))
log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name)
return False
subnet_ids += [r['id']]
try:
@ -455,7 +455,7 @@ def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, t
msg = 'Failed to create ElastiCache subnet group {0}'.format(name)
log.error(msg)
return False
log.info('Created ElastiCache subnet group {0}'.format(name))
log.info('Created ElastiCache subnet group %s', name)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
@ -560,7 +560,7 @@ def create(name, num_cache_nodes=None, engine=None, cache_node_type=None,
preferred_maintenance_window, port, notification_topic_arn,
auto_minor_version_upgrade)
if not wait:
log.info('Created cache cluster {0}.'.format(name))
log.info('Created cache cluster %s.', name)
return True
while True:
time.sleep(3)
@ -569,7 +569,7 @@ def create(name, num_cache_nodes=None, engine=None, cache_node_type=None,
return True
if config['cache_cluster_status'] == 'available':
return True
log.info('Created cache cluster {0}.'.format(name))
log.info('Created cache cluster %s.', name)
except boto.exception.BotoServerError as e:
msg = 'Failed to create cache cluster {0}.'.format(name)
log.error(msg)
@ -590,7 +590,7 @@ def delete(name, wait=False, region=None, key=None, keyid=None, profile=None):
try:
conn.delete_cache_cluster(name)
if not wait:
log.info('Deleted cache cluster {0}.'.format(name))
log.info('Deleted cache cluster %s.', name)
return True
while True:
config = get_config(name, region, key, keyid, profile)
@ -599,7 +599,7 @@ def delete(name, wait=False, region=None, key=None, keyid=None, profile=None):
if config['cache_cluster_status'] == 'deleting':
return True
time.sleep(2)
log.info('Deleted cache cluster {0}.'.format(name))
log.info('Deleted cache cluster %s.', name)
return True
except boto.exception.BotoServerError as e:
msg = 'Failed to delete cache cluster {0}.'.format(name)
@ -621,7 +621,7 @@ def create_cache_security_group(name, description, region=None, key=None,
created = conn.create_cache_security_group(name, description)
if created:
log.info('Created cache security group {0}.'.format(name))
log.info('Created cache security group %s.', name)
return True
else:
msg = 'Failed to create cache security group {0}.'.format(name)
@ -642,7 +642,7 @@ def delete_cache_security_group(name, region=None, key=None, keyid=None,
deleted = conn.delete_cache_security_group(name)
if deleted:
log.info('Deleted cache security group {0}.'.format(name))
log.info('Deleted cache security group %s.', name)
return True
else:
msg = 'Failed to delete cache security group {0}.'.format(name)

View File

@ -75,7 +75,7 @@ Connection module for Amazon Elasticsearch Service
#pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libs
@ -263,7 +263,7 @@ def create(DomainName, ElasticsearchClusterConfig=None, EBSOptions=None,
if 'AccessPolicies' in kwargs:
kwargs['AccessPolicies'] = salt.utils.json.dumps(kwargs['AccessPolicies'])
if 'ElasticsearchVersion' in kwargs:
kwargs['ElasticsearchVersion'] = str(kwargs['ElasticsearchVersion'])
kwargs['ElasticsearchVersion'] = six.text_type(kwargs['ElasticsearchVersion'])
domain = conn.create_elasticsearch_domain(DomainName=DomainName, **kwargs)
if domain and 'DomainStatus' in domain:
return {'created': True}
@ -367,9 +367,9 @@ def add_tags(DomainName=None, ARN=None,
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
tagslist = []
for k, v in six.iteritems(kwargs):
if str(k).startswith('__'):
if six.text_type(k).startswith('__'):
continue
tagslist.append({'Key': str(k), 'Value': str(v)})
tagslist.append({'Key': six.text_type(k), 'Value': six.text_type(v)})
if ARN is None:
if DomainName is None:
raise SaltInvocationError('One (but not both) of ARN or '

View File

@ -44,7 +44,7 @@ Connection module for Amazon ELB
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
@ -66,8 +66,7 @@ try:
required_boto_version = '2.33.0'
if (_LooseVersion(boto.__version__) <
_LooseVersion(required_boto_version)):
msg = 'boto_elb requires boto {0}.'.format(required_boto_version)
logging.debug(msg)
logging.debug('boto_elb requires boto %s.', required_boto_version)
raise ImportError()
from boto.ec2.elb import HealthCheck
from boto.ec2.elb.attributes import AccessLogAttribute
@ -107,8 +106,7 @@ def exists(name, region=None, key=None, keyid=None, profile=None):
if elb:
return True
else:
msg = 'The load balancer does not exist in region {0}'.format(region)
log.debug(msg)
log.debug('The load balancer does not exist in region %s', region)
return False
except boto.exception.BotoServerError as error:
log.warning(error)
@ -208,7 +206,7 @@ def get_elb_config(name, region=None, key=None, keyid=None, profile=None):
time.sleep(5)
retries -= 1
continue
log.error('Error fetching config for ELB {0}: {1}'.format(name, error.message))
log.error('Error fetching config for ELB %s: %s', name, error.message)
log.error(error)
return {}
return {}
@ -268,16 +266,15 @@ def create(name, availability_zones, listeners, subnets=None,
security_groups=security_groups, scheme=scheme,
complex_listeners=_complex_listeners)
if lb:
log.info('Created ELB {0}'.format(name))
log.info('Created ELB %s', name)
return True
else:
msg = 'Failed to create ELB {0}'.format(name)
log.error(msg)
log.error('Failed to create ELB %s', name)
return False
except boto.exception.BotoServerError as error:
log.debug(error)
msg = 'Failed to create ELB {0}: {1}: {2}'.format(name, error.error_code, error.message)
log.error(msg)
log.error('Failed to create ELB %s: %s: %s',
name, error.error_code, error.message,
exc_info_on_loglevel=logging.DEBUG)
return False
@ -297,13 +294,11 @@ def delete(name, region=None, key=None, keyid=None, profile=None):
return True
try:
conn.delete_load_balancer(name)
msg = 'Deleted ELB {0}.'.format(name)
log.info(msg)
log.info('Deleted ELB %s.', name)
return True
except boto.exception.BotoServerError as error:
log.debug(error)
msg = 'Failed to delete ELB {0}'.format(name)
log.error(msg)
log.error('Failed to delete ELB %s', name,
exc_info_on_loglevel=logging.DEBUG)
return False
@ -328,13 +323,11 @@ def create_listeners(name, listeners, region=None, key=None, keyid=None,
_complex_listeners.append(listener_dict_to_tuple(listener))
try:
conn.create_load_balancer_listeners(name, [], _complex_listeners)
msg = 'Created ELB listeners on {0}'.format(name)
log.info(msg)
log.info('Created ELB listeners on %s', name)
return True
except boto.exception.BotoServerError as error:
log.debug(error)
msg = 'Failed to create ELB listeners on {0}: {1}'.format(name, error)
log.error(msg)
log.error('Failed to create ELB listeners on %s: %s', name, error,
exc_info_on_loglevel=logging.DEBUG)
return False
@ -355,13 +348,11 @@ def delete_listeners(name, ports, region=None, key=None, keyid=None,
ports = salt.utils.json.loads(ports)
try:
conn.delete_load_balancer_listeners(name, ports)
msg = 'Deleted ELB listeners on {0}'.format(name)
log.info(msg)
log.info('Deleted ELB listeners on %s', name)
return True
except boto.exception.BotoServerError as error:
log.debug(error)
msg = 'Failed to delete ELB listeners on {0}: {1}'.format(name, error)
log.error(msg)
log.error('Failed to delete ELB listeners on %s: %s', name, error,
exc_info_on_loglevel=logging.DEBUG)
return False
@ -382,14 +373,12 @@ def apply_security_groups(name, security_groups, region=None, key=None,
security_groups = salt.utils.json.loads(security_groups)
try:
conn.apply_security_groups_to_lb(name, security_groups)
msg = 'Applied security_groups on ELB {0}'.format(name)
log.info(msg)
log.info('Applied security_groups on ELB %s', name)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
msg = 'Failed to appply security_groups on ELB {0}: {1}'
msg = msg.format(name, e.message)
log.error(msg)
log.error('Failed to appply security_groups on ELB %s: %s',
name, e.message)
return False
@ -410,13 +399,10 @@ def enable_availability_zones(name, availability_zones, region=None, key=None,
availability_zones = salt.utils.json.loads(availability_zones)
try:
conn.enable_availability_zones(name, availability_zones)
msg = 'Enabled availability_zones on ELB {0}'.format(name)
log.info(msg)
log.info('Enabled availability_zones on ELB %s', name)
return True
except boto.exception.BotoServerError as error:
log.debug(error)
msg = 'Failed to enable availability_zones on ELB {0}: {1}'.format(name, error)
log.error(msg)
log.error('Failed to enable availability_zones on ELB %s: %s', name, error)
return False
@ -437,13 +423,11 @@ def disable_availability_zones(name, availability_zones, region=None, key=None,
availability_zones = salt.utils.json.loads(availability_zones)
try:
conn.disable_availability_zones(name, availability_zones)
msg = 'Disabled availability_zones on ELB {0}'.format(name)
log.info(msg)
log.info('Disabled availability_zones on ELB %s', name)
return True
except boto.exception.BotoServerError as error:
log.debug(error)
msg = 'Failed to disable availability_zones on ELB {0}: {1}'.format(name, error)
log.error(msg)
log.error('Failed to disable availability_zones on ELB %s: %s',
name, error, exc_info_on_loglevel=logging.DEBUG)
return False
@ -464,13 +448,11 @@ def attach_subnets(name, subnets, region=None, key=None, keyid=None,
subnets = salt.utils.json.loads(subnets)
try:
conn.attach_lb_to_subnets(name, subnets)
msg = 'Attached ELB {0} on subnets.'.format(name)
log.info(msg)
log.info('Attached ELB %s on subnets.', name)
return True
except boto.exception.BotoServerError as error:
log.debug(error)
msg = 'Failed to attach ELB {0} on subnets: {1}'.format(name, error)
log.error(msg)
log.error('Failed to attach ELB %s on subnets: %s', name, error,
exc_info_on_loglevel=logging.DEBUG)
return False
@ -491,13 +473,11 @@ def detach_subnets(name, subnets, region=None, key=None, keyid=None,
subnets = salt.utils.json.loads(subnets)
try:
conn.detach_lb_from_subnets(name, subnets)
msg = 'Detached ELB {0} from subnets.'.format(name)
log.info(msg)
log.info('Detached ELB %s from subnets.', name)
return True
except boto.exception.BotoServerError as error:
log.debug(error)
msg = 'Failed to detach ELB {0} from subnets: {1}'.format(name, error)
log.error(msg)
log.error('Failed to detach ELB %s from subnets: %s', name, error,
exc_info_on_loglevel=logging.DEBUG)
return False
@ -541,7 +521,7 @@ def get_attributes(name, region=None, key=None, keyid=None, profile=None):
time.sleep(5)
retries -= 1
continue
log.error('ELB {0} does not exist: {1}'.format(name, e.message))
log.error('ELB %s does not exist: %s', name, e.message)
return {}
return {}
@ -609,10 +589,9 @@ def set_attributes(name, attributes, region=None, key=None, keyid=None,
_al.emit_interval = al.get('emit_interval', None)
added_attr = conn.modify_lb_attribute(name, 'accessLog', _al)
if added_attr:
log.info('Added access_log attribute to {0} elb.'.format(name))
log.info('Added access_log attribute to %s elb.', name)
else:
msg = 'Failed to add access_log attribute to {0} elb.'
log.error(msg.format(name))
log.error('Failed to add access_log attribute to %s elb.', name)
return False
if czlb:
_czlb = CrossZoneLoadBalancingAttribute()
@ -620,8 +599,7 @@ def set_attributes(name, attributes, region=None, key=None, keyid=None,
added_attr = conn.modify_lb_attribute(name, 'crossZoneLoadBalancing',
_czlb.enabled)
if added_attr:
msg = 'Added cross_zone_load_balancing attribute to {0} elb.'
log.info(msg.format(name))
log.info('Added cross_zone_load_balancing attribute to %s elb.', name)
else:
log.error('Failed to add cross_zone_load_balancing attribute.')
return False
@ -631,8 +609,7 @@ def set_attributes(name, attributes, region=None, key=None, keyid=None,
_cd.timeout = cd.get('timeout', 300)
added_attr = conn.modify_lb_attribute(name, 'connectionDraining', _cd)
if added_attr:
msg = 'Added connection_draining attribute to {0} elb.'
log.info(msg.format(name))
log.info('Added connection_draining attribute to %s elb.', name)
else:
log.error('Failed to add connection_draining attribute.')
return False
@ -641,8 +618,7 @@ def set_attributes(name, attributes, region=None, key=None, keyid=None,
_cs.idle_timeout = cs.get('idle_timeout', 60)
added_attr = conn.modify_lb_attribute(name, 'connectingSettings', _cs)
if added_attr:
msg = 'Added connecting_settings attribute to {0} elb.'
log.info(msg.format(name))
log.info('Added connecting_settings attribute to %s elb.', name)
else:
log.error('Failed to add connecting_settings attribute.')
return False
@ -680,8 +656,8 @@ def get_health_check(name, region=None, key=None, keyid=None, profile=None):
time.sleep(5)
retries -= 1
continue
log.error(error)
log.error('ELB {0} not found.'.format(name))
log.error('ELB %s not found.', name,
exc_info_on_logleve=logging.DEBUG)
return {}
@ -703,7 +679,7 @@ def set_health_check(name, health_check, region=None, key=None, keyid=None,
while True:
try:
conn.configure_health_check(name, hc)
log.info('Configured health check on ELB {0}'.format(name))
log.info('Configured health check on ELB %s', name)
return True
except boto.exception.BotoServerError as error:
if retries and e.code == 'Throttling':
@ -711,8 +687,7 @@ def set_health_check(name, health_check, region=None, key=None, keyid=None,
time.sleep(5)
retries -= 1
continue
log.error(error)
log.error('Failed to configure health check on ELB {0}'.format(name))
log.exception('Failed to configure health check on ELB %s', name)
return False
@ -751,8 +726,8 @@ def register_instances(name, instances, region=None, key=None, keyid=None,
# able to be registered with the given ELB
register_failures = set(instances).difference(set(registered_instance_ids))
if register_failures:
log.warning('Instance(s): {0} not registered with ELB {1}.'
.format(list(register_failures), name))
log.warning('Instance(s): %s not registered with ELB %s.',
list(register_failures), name)
register_result = False
else:
register_result = True
@ -792,9 +767,10 @@ def deregister_instances(name, instances, region=None, key=None, keyid=None,
# deregister_instances returns "None" because the instances are
# effectively deregistered from ELB
if error.error_code == 'InvalidInstance':
log.warning('One or more of instance(s) {0} are not part of ELB {1}.'
' deregister_instances not performed.'
.format(instances, name))
log.warning(
'One or more of instance(s) %s are not part of ELB %s. '
'deregister_instances not performed.', instances, name
)
return None
else:
log.warning(error)
@ -805,8 +781,10 @@ def deregister_instances(name, instances, region=None, key=None, keyid=None,
# unable to be deregistered from the given ELB
deregister_failures = set(instances).intersection(set(registered_instance_ids))
if deregister_failures:
log.warning('Instance(s): {0} not deregistered from ELB {1}.'
.format(list(deregister_failures), name))
log.warning(
'Instance(s): %s not deregistered from ELB %s.',
list(deregister_failures), name
)
deregister_result = False
else:
deregister_result = True
@ -888,16 +866,15 @@ def create_policy(name, policy_name, policy_type, policy, region=None,
try:
success = conn.create_lb_policy(name, policy_name, policy_type, policy)
if success:
log.info('Created policy {0} on ELB {1}'.format(policy_name, name))
log.info('Created policy %s on ELB %s', policy_name, name)
return True
else:
msg = 'Failed to create policy {0} on ELB {1}'.format(policy_name, name)
log.error(msg)
log.error('Failed to create policy %s on ELB %s', policy_name, name)
return False
except boto.exception.BotoServerError as e:
log.debug(e)
msg = 'Failed to create policy {0} on ELB {1}: {2}'.format(policy_name, name, e.message)
log.error(msg)
log.error('Failed to create policy %s on ELB %s: %s',
policy_name, name, e.message,
exc_info_on_loglevel=logging.DEBUG)
return False
@ -920,12 +897,12 @@ def delete_policy(name, policy_name, region=None, key=None, keyid=None,
return True
try:
conn.delete_lb_policy(name, policy_name)
log.info('Deleted policy {0} on ELB {1}'.format(policy_name, name))
log.info('Deleted policy %s on ELB %s', policy_name, name)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
msg = 'Failed to delete policy {0} on ELB {1}: {2}'.format(policy_name, name, e.message)
log.error(msg)
log.error('Failed to delete policy %s on ELB %s: %s',
policy_name, name, e.message,
exc_info_on_loglevel=logging.DEBUG)
return False
@ -950,10 +927,11 @@ def set_listener_policy(name, port, policies=None, region=None, key=None,
policies = []
try:
conn.set_lb_policies_of_listener(name, port, policies)
log.info('Set policies {0} on ELB {1} listener {2}'.format(policies, name, port))
log.info('Set policies %s on ELB %s listener %s', policies, name, port)
except boto.exception.BotoServerError as e:
log.debug(e)
log.info('Failed to set policy {0} on ELB {1} listener {2}: {3}'.format(policies, name, port, e.message))
log.info('Failed to set policy %s on ELB %s listener %s: %s',
policies, name, port, e.message,
exc_info_on_loglevel=logging.DEBUG)
return False
return True
@ -975,10 +953,12 @@ def set_backend_policy(name, port, policies=None, region=None, key=None,
policies = []
try:
conn.set_lb_policies_of_backend_server(name, port, policies)
log.info('Set policies {0} on ELB {1} backend server {2}'.format(policies, name, port))
log.info('Set policies %s on ELB %s backend server %s',
policies, name, port)
except boto.exception.BotoServerError as e:
log.debug(e)
log.info('Failed to set policy {0} on ELB {1} backend server {2}: {3}'.format(policies, name, port, e.message))
log.info('Failed to set policy %s on ELB %s backend server %s: %s',
policies, name, port, e.message,
exc_info_on_loglevel=logging.DEBUG)
return False
return True

View File

@ -40,7 +40,7 @@ Connection module for Amazon ALB
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
@ -150,17 +150,18 @@ def create_target_group(name,
HealthyThresholdCount=healthy_threshold_count,
UnhealthyThresholdCount=unhealthy_threshold_count)
if alb:
log.info('Created ALB {0}: {1}'.format(name,
alb['TargetGroups'][0]['TargetGroupArn']))
log.info('Created ALB %s: %s', name, alb['TargetGroups'][0]['TargetGroupArn'])
return True
else:
log.error('Failed to create ALB {0}'.format(name))
log.error('Failed to create ALB %s', name)
return False
except ClientError as error:
log.debug(error)
log.error('Failed to create ALB {0}: {1}: {2}'.format(name,
error.response['Error']['Code'],
error.response['Error']['Message']))
log.error(
'Failed to create ALB %s: %s: %s',
name, error.response['Error']['Code'],
error.response['Error']['Message'],
exc_info_on_loglevel=logging.DEBUG
)
def delete_target_group(name,
@ -191,18 +192,18 @@ def delete_target_group(name,
try:
if name.startswith('arn:aws:elasticloadbalancing'):
conn.delete_target_group(TargetGroupArn=name)
log.info('Deleted target group {0}'.format(name))
log.info('Deleted target group %s', name)
else:
tg_info = conn.describe_target_groups(Names=[name])
if len(tg_info['TargetGroups']) != 1:
return False
arn = tg_info['TargetGroups'][0]['TargetGroupArn']
conn.delete_target_group(TargetGroupArn=arn)
log.info('Deleted target group {0} ARN {1}'.format(name, arn))
log.info('Deleted target group %s ARN %s', name, arn)
return True
except ClientError as error:
log.debug(error)
log.error('Failed to delete target group {0}'.format(name))
log.error('Failed to delete target group %s', name,
exc_info_on_loglevel=logging.DEBUG)
return False
@ -230,10 +231,10 @@ def target_group_exists(name,
if alb:
return True
else:
log.warning('The target group does not exist in region {0}'.format(region))
log.warning('The target group does not exist in region %s', region)
return False
except ClientError as error:
log.warning('target_group_exists check for {0} returned: {1}'.format(name, error))
log.warning('target_group_exists check for %s returned: %s', name, error)
return False

View File

@ -50,7 +50,7 @@ The dependencies listed above can be installed via package or pip.
#pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import datetime
@ -209,7 +209,7 @@ def create_thing_type(thingTypeName, thingTypeDescription,
)
if thingtype:
log.info('The newly created thing type ARN is {0}'.format(thingtype['thingTypeArn']))
log.info('The newly created thing type ARN is %s', thingtype['thingTypeArn'])
return {'created': True, 'thingTypeArn': thingtype['thingTypeArn']}
else:
@ -333,7 +333,7 @@ def create_policy(policyName, policyDocument,
policy = conn.create_policy(policyName=policyName,
policyDocument=policyDocument)
if policy:
log.info('The newly created policy version is {0}'.format(policy['policyVersionId']))
log.info('The newly created policy version is %s', policy['policyVersionId'])
return {'created': True, 'versionId': policy['policyVersionId']}
else:
@ -451,7 +451,7 @@ def create_policy_version(policyName, policyDocument, setAsDefault=False,
policyDocument=policyDocument,
setAsDefault=setAsDefault)
if policy:
log.info('The newly created policy version is {0}'.format(policy['policyVersionId']))
log.info('The newly created policy version is %s', policy['policyVersionId'])
return {'created': True, 'name': policy['policyVersionId']}
else:
@ -609,7 +609,7 @@ def set_default_policy_version(policyName, policyVersionId,
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.set_default_policy_version(policyName=policyName,
policyVersionId=str(policyVersionId))
policyVersionId=str(policyVersionId)) # future lint: disable=blacklisted-function
return {'changed': True}
except ClientError as e:
return {'changed': False, 'error': salt.utils.boto3.get_error(e)}

View File

@ -46,7 +46,7 @@ Connection module for Amazon Kinesis
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import random
@ -300,7 +300,7 @@ def get_info_for_reshard(stream_details):
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# EndingSequenceNumber is null for open shards, so this shard must be closed
log.debug("skipping closed shard {0}".format(shard_id))
log.debug("skipping closed shard %s", shard_id)
continue
stream_details["OpenShards"].append(shard)
shard["HashKeyRange"]["StartingHashKey"] = long_int(
@ -359,8 +359,8 @@ def reshard(stream_name, desired_size, force=False,
stream_details = stream_response['result']["StreamDescription"]
min_hash_key, max_hash_key, stream_details = get_info_for_reshard(stream_details)
log.debug("found {0} open shards, min_hash_key {1} max_hash_key {2}".format(
len(stream_details["OpenShards"]), min_hash_key, max_hash_key))
log.debug("found %s open shards, min_hash_key %s max_hash_key %s",
len(stream_details["OpenShards"]), min_hash_key, max_hash_key)
# find the first open shard that doesn't match the desired pattern. When we find it,
# either split or merge (depending on if it's too big or too small), and then return.
@ -368,7 +368,7 @@ def reshard(stream_name, desired_size, force=False,
shard_id = shard["ShardId"]
if "EndingSequenceNumber" in shard["SequenceNumberRange"]:
# something went wrong, there's a closed shard in our open shard list
log.debug("this should never happen! closed shard {0}".format(shard_id))
log.debug("this should never happen! closed shard %s", shard_id)
continue
starting_hash_key = shard["HashKeyRange"]["StartingHashKey"]
@ -383,12 +383,16 @@ def reshard(stream_name, desired_size, force=False,
if expected_ending_hash_key > max_hash_key:
expected_ending_hash_key = max_hash_key
log.debug("Shard {0} ({1}) should start at {2}: {3}".format(shard_num, shard_id, expected_starting_hash_key,
starting_hash_key == expected_starting_hash_key
))
log.debug("Shard {0} ({1}) should end at {2}: {3}".format(shard_num, shard_id, expected_ending_hash_key,
ending_hash_key == expected_ending_hash_key
))
log.debug(
"Shard %s (%s) should start at %s: %s",
shard_num, shard_id, expected_starting_hash_key,
starting_hash_key == expected_starting_hash_key
)
log.debug(
"Shard %s (%s) should end at %s: %s",
shard_num, shard_id, expected_ending_hash_key,
ending_hash_key == expected_ending_hash_key
)
if starting_hash_key != expected_starting_hash_key:
r['error'] = "starting hash keys mismatch, don't know what to do!"
@ -400,16 +404,16 @@ def reshard(stream_name, desired_size, force=False,
if ending_hash_key > expected_ending_hash_key + 1:
# split at expected_ending_hash_key
if force:
log.debug("{0} should end at {1}, actual {2}, splitting".format(
shard_id, expected_ending_hash_key, ending_hash_key))
log.debug("%s should end at %s, actual %s, splitting",
shard_id, expected_ending_hash_key, ending_hash_key)
r = _execute_with_retries(conn,
"split_shard",
StreamName=stream_name,
ShardToSplit=shard_id,
NewStartingHashKey=str(expected_ending_hash_key + 1))
NewStartingHashKey=str(expected_ending_hash_key + 1)) # future lint: disable=blacklisted-function
else:
log.debug("{0} should end at {1}, actual {2} would split".format(
shard_id, expected_ending_hash_key, ending_hash_key))
log.debug("%s should end at %s, actual %s would split",
shard_id, expected_ending_hash_key, ending_hash_key)
if 'error' not in r:
r['result'] = True
@ -421,16 +425,16 @@ def reshard(stream_name, desired_size, force=False,
r['error'] = "failed to find next shard after {0}".format(shard_id)
return r
if force:
log.debug("{0} should continue past {1}, merging with {2}".format(
shard_id, ending_hash_key, next_shard_id))
log.debug("%s should continue past %s, merging with %s",
shard_id, ending_hash_key, next_shard_id)
r = _execute_with_retries(conn,
"merge_shards",
StreamName=stream_name,
ShardToMerge=shard_id,
AdjacentShardToMerge=next_shard_id)
else:
log.debug("{0} should continue past {1}, would merge with {2}".format(
shard_id, ending_hash_key, next_shard_id))
log.debug("%s should continue past %s, would merge with %s",
shard_id, ending_hash_key, next_shard_id)
if 'error' not in r:
r['result'] = True
@ -511,7 +515,7 @@ def _execute_with_retries(conn, function, **kwargs):
max_attempts = 18
max_retry_delay = 10
for attempt in range(max_attempts):
log.info("attempt: {0} function: {1}".format(attempt, function))
log.info("attempt: %s function: %s", attempt, function)
try:
fn = getattr(conn, function)
r['result'] = fn(**kwargs)
@ -521,7 +525,7 @@ def _execute_with_retries(conn, function, **kwargs):
if "LimitExceededException" in error_code or "ResourceInUseException" in error_code:
# could be rate limited by AWS or another command is blocking,
# retry with exponential backoff
log.debug("Retrying due to AWS exception {0}".format(e))
log.debug("Retrying due to AWS exception", exc_info=True)
time.sleep(_jittered_backoff(attempt, max_retry_delay))
else:
# ResourceNotFoundException or InvalidArgumentException

View File

@ -36,7 +36,7 @@ Connection module for Amazon KMS
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging

View File

@ -80,7 +80,7 @@ The dependencies listed above can be installed via package or pip.
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import random
@ -300,8 +300,7 @@ def create_function(FunctionName, Runtime, Role, Handler, ZipFile=None,
else:
break
if func:
log.info('The newly created function name is {0}'.format(
func['FunctionName']))
log.info('The newly created function name is %s', func['FunctionName'])
return {'created': True, 'name': func['FunctionName']}
else:
@ -519,9 +518,9 @@ def add_permission(FunctionName, StatementId, Action, Principal, SourceArn=None,
kwargs = {}
for key in ('SourceArn', 'SourceAccount', 'Qualifier'):
if locals()[key] is not None:
kwargs[key] = str(locals()[key])
kwargs[key] = str(locals()[key]) # future lint: disable=blacklisted-function
conn.add_permission(FunctionName=FunctionName, StatementId=StatementId,
Action=Action, Principal=str(Principal),
Action=Action, Principal=str(Principal), # future lint: disable=blacklisted-function
**kwargs)
return {'updated': True}
except ClientError as e:
@ -681,8 +680,7 @@ def create_alias(FunctionName, Name, FunctionVersion, Description="",
alias = conn.create_alias(FunctionName=FunctionName, Name=Name,
FunctionVersion=FunctionVersion, Description=Description)
if alias:
log.info(
'The newly created alias name is {0}'.format(alias['Name']))
log.info('The newly created alias name is %s', alias['Name'])
return {'created': True, 'name': alias['Name']}
else:
@ -846,8 +844,7 @@ def create_event_source_mapping(EventSourceArn, FunctionName, StartingPosition,
BatchSize=BatchSize,
StartingPosition=StartingPosition)
if obj:
log.info(
'The newly created event source mapping ID is {0}'.format(obj['UUID']))
log.info('The newly created event source mapping ID is %s', obj['UUID'])
return {'created': True, 'id': obj['UUID']}
else:

View File

@ -48,7 +48,7 @@ Connection module for Amazon RDS
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
@ -266,8 +266,8 @@ def create(name, allocated_storage, db_instance_class, engine,
if wait_status:
wait_stati = ['available', 'modifying', 'backing-up']
if wait_status not in wait_stati:
raise SaltInvocationError('wait_status can be one of: '
'{0}'.format(wait_stati))
raise SaltInvocationError(
'wait_status can be one of: {0}'.format(wait_stati))
if vpc_security_groups:
v_tmp = __salt__['boto_secgroup.convert_to_group_ids'](
groups=vpc_security_groups, region=region, key=key, keyid=keyid,
@ -320,7 +320,7 @@ def create(name, allocated_storage, db_instance_class, engine,
'message': 'RDS instance {0} created (current status '
'{1})'.format(name, stat)}
time.sleep(10)
log.info('Instance status after 10 seconds is: {0}'.format(stat))
log.info('Instance status after 10 seconds is: %s', stat)
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
@ -358,7 +358,7 @@ def create_read_replica(name, source_name, db_instance_class=None,
kwargs = {}
for key in ('OptionGroupName', 'MonitoringRoleArn'):
if locals()[key] is not None:
kwargs[key] = str(locals()[key])
kwargs[key] = str(locals()[key]) # future lint: disable=blacklisted-function
for key in ('MonitoringInterval', 'Iops', 'Port'):
if locals()[key] is not None:
@ -511,7 +511,7 @@ def update_parameter_group(name, parameters, apply_method="pending-reboot",
if type(value) is bool:
item.update({'ParameterValue': 'on' if value else 'off'})
else:
item.update({'ParameterValue': str(value)})
item.update({'ParameterValue': str(value)}) # future lint: disable=blacklisted-function
param_list.append(item)
if not len(param_list):
@ -687,7 +687,7 @@ def delete(name, skip_final_snapshot=None, final_db_snapshot_identifier=None,
kwargs['SkipFinalSnapshot'] = bool(locals()['skip_final_snapshot'])
if locals()['final_db_snapshot_identifier'] is not None:
kwargs['FinalDBSnapshotIdentifier'] = str(locals()['final_db_snapshot_identifier'])
kwargs['FinalDBSnapshotIdentifier'] = str(locals()['final_db_snapshot_identifier']) # future lint: disable=blacklisted-function
res = conn.delete_db_instance(DBInstanceIdentifier=name, **kwargs)
@ -708,8 +708,8 @@ def delete(name, skip_final_snapshot=None, final_db_snapshot_identifier=None,
raise SaltInvocationError('RDS instance {0} has not been '
'deleted completely after {1} '
'seconds'.format(name, timeout))
log.info('Waiting up to {0} seconds for RDS instance {1} to be '
'deleted.'.format(timeout, name))
log.info('Waiting up to %s seconds for RDS instance %s to be '
'deleted.', timeout, name)
time.sleep(10)
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
@ -808,7 +808,7 @@ def describe_parameter_group(name, Filters=None, MaxRecords=None, Marker=None,
kwargs = {}
for key in ('Marker', 'Filters'):
if locals()[key] is not None:
kwargs[key] = str(locals()[key])
kwargs[key] = str(locals()[key]) # future lint: disable=blacklisted-function
if locals()['MaxRecords'] is not None:
kwargs['MaxRecords'] = int(locals()['MaxRecords'])
@ -853,7 +853,7 @@ def describe_parameters(name, Source=None, MaxRecords=None, Marker=None,
kwargs.update({'DBParameterGroupName': name})
for key in ('Marker', 'Source'):
if locals()[key] is not None:
kwargs[key] = str(locals()[key])
kwargs[key] = str(locals()[key]) # future lint: disable=blacklisted-function
if locals()['MaxRecords'] is not None:
kwargs['MaxRecords'] = int(locals()['MaxRecords'])
@ -963,7 +963,7 @@ def _tag_doc(tags):
taglist = []
if tags is not None:
for k, v in six.iteritems(tags):
if str(k).startswith('__'):
if six.text_type(k).startswith('__'):
continue
taglist.append({'Key': str(k), 'Value': str(v)})
taglist.append({'Key': six.text_type(k), 'Value': six.text_type(v)})
return taglist

View File

@ -45,7 +45,7 @@ Connection module for Amazon Route53
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
@ -179,7 +179,7 @@ def describe_hosted_zones(zone_id=None, domain_name=None, region=None,
time.sleep(3)
retries -= 1
continue
log.error('Could not list zones: {0}'.format(e.message))
log.error('Could not list zones: %s', e.message)
return []
@ -761,8 +761,8 @@ def _try_func(conn, func, **args):
return getattr(conn, func)(**args)
except AttributeError as e:
# Don't include **args in log messages - security concern.
log.error('Function `{0}()` not found for AWS connection object '
'{1}'.format(func, conn))
log.error('Function `%s()` not found for AWS connection object %s',
func, conn)
return None
except DNSServerError as e:
if tries and e.code == 'Throttling':
@ -770,7 +770,7 @@ def _try_func(conn, func, **args):
time.sleep(5)
tries -= 1
continue
log.error('Failed calling {0}(): {1}'.format(func, str(e)))
log.error('Failed calling %s(): %s', func, e)
return None
@ -781,19 +781,21 @@ def _wait_for_sync(status, conn, wait=True):
if not wait:
return True
orig_wait = wait
log.info('Waiting up to {0} seconds for Route53 changes to synchronize'.format(orig_wait))
log.info('Waiting up to %s seconds for Route53 changes to synchronize', orig_wait)
while wait > 0:
change = conn.get_change(status)
current = change.GetChangeResponse.ChangeInfo.Status
if current == 'INSYNC':
return True
sleep = wait if wait % 60 == wait else 60
log.info('Sleeping {0} seconds waiting for changes to synch (current status {1})'.format(
sleep, current))
log.info(
'Sleeping %s seconds waiting for changes to synch (current status %s)',
sleep, current
)
time.sleep(sleep)
wait -= sleep
continue
log.error('Route53 changes not synced after {0} seconds.'.format(orig_wait))
log.error('Route53 changes not synced after %s seconds.', orig_wait)
return False
@ -867,7 +869,7 @@ def create_hosted_zone(domain_name, caller_ref=None, comment='', private_zone=Fa
deets = conn.get_hosted_zone_by_name(domain_name)
if deets:
log.info('Route53 hosted zone {0} already exists'.format(domain_name))
log.info('Route53 hosted zone %s already exists', domain_name)
return None
args = {'domain_name': domain_name,
@ -890,7 +892,7 @@ def create_hosted_zone(domain_name, caller_ref=None, comment='', private_zone=Fa
return None
if len(vpcs) > 1:
log.error('Private zone requested but multiple VPCs matching given '
'criteria found: {0}.'.format([v['id'] for v in vpcs]))
'criteria found: %s.', [v['id'] for v in vpcs])
return None
vpc = vpcs[0]
if vpc_name:
@ -905,13 +907,13 @@ def create_hosted_zone(domain_name, caller_ref=None, comment='', private_zone=Fa
r = _try_func(conn, 'create_hosted_zone', **args)
if r is None:
log.error('Failed to create hosted zone {0}'.format(domain_name))
log.error('Failed to create hosted zone %s', domain_name)
return None
r = r.get('CreateHostedZoneResponse', {})
# Pop it since it'll be irrelevant by the time we return
status = r.pop('ChangeInfo', {}).get('Id', '').replace('/change/', '')
synced = _wait_for_sync(status, conn, wait=600)
if not synced:
log.error('Hosted zone {0} not synced after 600 seconds.'.format(domain_name))
log.error('Hosted zone %s not synced after 600 seconds.', domain_name)
return None
return r

View File

@ -51,7 +51,7 @@ Connection module for Amazon S3 using boto3
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libs
@ -172,5 +172,5 @@ def upload_file(
except boto3.exceptions.S3UploadFailedError as e:
return {'error': __utils__['boto3.get_error'](e)}
log.info('S3 object uploaded to {0}'.format(name))
log.info('S3 object uploaded to %s', name)
return {'result': True}

View File

@ -52,7 +52,7 @@ The dependencies listed above can be installed via package or pip.
# pylint: disable=W0106
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libs
@ -166,14 +166,14 @@ def create(Bucket,
'GrantRead', 'GrantReadACP',
'GrantWrite', 'GrantWriteACP'):
if locals()[arg] is not None:
kwargs[arg] = str(locals()[arg])
kwargs[arg] = str(locals()[arg]) # future lint: disable=blacklisted-function
if LocationConstraint:
kwargs['CreateBucketConfiguration'] = {'LocationConstraint': LocationConstraint}
location = conn.create_bucket(Bucket=Bucket,
**kwargs)
conn.get_waiter("bucket_exists").wait(Bucket=Bucket)
if location:
log.info('The newly created bucket name is located at {0}'.format(location['Location']))
log.info('The newly created bucket name is located at %s', location['Location'])
return {'created': True, 'name': Bucket, 'Location': location['Location']}
else:
@ -490,7 +490,7 @@ def put_acl(Bucket,
'GrantRead', 'GrantReadACP',
'GrantWrite', 'GrantWriteACP'):
if locals()[arg] is not None:
kwargs[arg] = str(locals()[arg])
kwargs[arg] = str(locals()[arg]) # future lint: disable=blacklisted-function
conn.put_bucket_acl(Bucket=Bucket, **kwargs)
return {'updated': True, 'name': Bucket}
except ClientError as e:
@ -770,9 +770,9 @@ def put_tagging(Bucket,
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
tagslist = []
for k, v in six.iteritems(kwargs):
if str(k).startswith('__'):
if six.text_type(k).startswith('__'):
continue
tagslist.append({'Key': str(k), 'Value': str(v)})
tagslist.append({'Key': six.text_type(k), 'Value': six.text_type(v)})
conn.put_bucket_tagging(Bucket=Bucket, Tagging={
'TagSet': tagslist,
})

View File

@ -44,7 +44,7 @@ Connection module for Amazon Security Groups
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
@ -161,7 +161,7 @@ def _get_group(conn=None, name=None, vpc_id=None, vpc_name=None, group_id=None,
return None
if name:
if vpc_id is None:
log.debug('getting group for {0}'.format(name))
log.debug('getting group for %s', name)
group_filter = {'group-name': name}
filtered_groups = conn.get_all_security_groups(filters=group_filter)
# security groups can have the same name if groups exist in both
@ -179,7 +179,7 @@ def _get_group(conn=None, name=None, vpc_id=None, vpc_name=None, group_id=None,
return filtered_groups[0]
return None
elif vpc_id:
log.debug('getting group for {0} in vpc_id {1}'.format(name, vpc_id))
log.debug('getting group for %s in vpc_id %s', name, vpc_id)
group_filter = {'group-name': name, 'vpc_id': vpc_id}
filtered_groups = conn.get_all_security_groups(filters=group_filter)
if len(filtered_groups) == 1:
@ -205,7 +205,7 @@ def _get_group(conn=None, name=None, vpc_id=None, vpc_name=None, group_id=None,
def _parse_rules(sg, rules):
_rules = []
for rule in rules:
log.debug('examining rule {0} for group {1}'.format(rule, sg.id))
log.debug('examining rule %s for group %s', rule, sg.id)
attrs = ['ip_protocol', 'from_port', 'to_port', 'grants']
_rule = odict.OrderedDict()
for attr in attrs:
@ -215,7 +215,7 @@ def _parse_rules(sg, rules):
if attr == 'grants':
_grants = []
for grant in val:
log.debug('examining grant {0} for'.format(grant))
log.debug('examining grant %s for', grant)
g_attrs = {'name': 'source_group_name',
'owner_id': 'source_group_owner_id',
'group_id': 'source_group_group_id',
@ -311,7 +311,7 @@ def get_group_id(name, vpc_id=None, vpc_name=None, region=None, key=None,
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if name.startswith('sg-'):
log.debug('group {0} is a group id. get_group_id not called.'.format(name))
log.debug('group %s is a group id. get_group_id not called.', name)
return name
group = _get_group(conn=conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
region=region, key=key, keyid=keyid, profile=profile)
@ -328,7 +328,7 @@ def convert_to_group_ids(groups, vpc_id=None, vpc_name=None, region=None, key=No
salt myminion boto_secgroup.convert_to_group_ids mysecgroup vpc-89yhh7h
'''
log.debug('security group contents {0} pre-conversion'.format(groups))
log.debug('security group contents %s pre-conversion', groups)
group_ids = []
for group in groups:
group_id = get_group_id(name=group, vpc_id=vpc_id,
@ -339,8 +339,8 @@ def convert_to_group_ids(groups, vpc_id=None, vpc_name=None, region=None, key=No
raise CommandExecutionError('Could not resolve Security Group name '
'{0} to a Group ID'.format(group))
else:
group_ids.append(str(group_id))
log.debug('security group contents {0} post-conversion'.format(group_ids))
group_ids.append(six.text_type(group_id))
log.debug('security group contents %s post-conversion', group_ids)
return group_ids
@ -397,7 +397,7 @@ def create(name, description, vpc_id=None, vpc_name=None, region=None, key=None,
created = conn.create_security_group(name, description, vpc_id)
if created:
log.info('Created security group {0}.'.format(name))
log.info('Created security group %s.', name)
return True
else:
msg = 'Failed to create security group {0}.'.format(name)
@ -422,8 +422,7 @@ def delete(name=None, group_id=None, region=None, key=None, keyid=None,
if group:
deleted = conn.delete_security_group(group_id=group.id)
if deleted:
log.info('Deleted security group {0} with id {1}.'.format(group.name,
group.id))
log.info('Deleted security group %s with id %s.', group.name, group.id)
return True
else:
msg = 'Failed to delete security group {0}.'.format(name)
@ -467,8 +466,8 @@ def authorize(name=None, source_group_name=None,
cidr_ip=cidr_ip, group_id=group.id,
src_group_id=source_group_group_id)
if added:
log.info('Added rule to security group {0} with id {1}'
.format(group.name, group.id))
log.info('Added rule to security group %s with id %s',
group.name, group.id)
return True
else:
msg = ('Failed to add rule to security group {0} with id {1}.'
@ -520,8 +519,8 @@ def revoke(name=None, source_group_name=None,
src_group_id=source_group_group_id)
if revoked:
log.info('Removed rule from security group {0} with id {1}.'
.format(group.name, group.id))
log.info('Removed rule from security group %s with id %s.',
group.name, group.id)
return True
else:
msg = ('Failed to remove rule from security group {0} with id {1}.'
@ -571,7 +570,8 @@ def _find_vpcs(vpc_id=None, vpc_name=None, cidr=None, tags=None,
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
vpcs = conn.get_all_vpcs(**filter_parameters)
log.debug('The filters criteria {0} matched the following VPCs:{1}'.format(filter_parameters, vpcs))
log.debug('The filters criteria %s matched the following VPCs:%s',
filter_parameters, vpcs)
if vpcs:
return [vpc.id for vpc in vpcs]

View File

@ -42,7 +42,7 @@ Connection module for Amazon SNS
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
@ -120,7 +120,7 @@ def create(name, region=None, key=None, keyid=None, profile=None):
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.create_topic(name)
log.info('Created SNS topic {0}'.format(name))
log.info('Created SNS topic %s', name)
_invalidate_cache()
return True
@ -135,7 +135,7 @@ def delete(name, region=None, key=None, keyid=None, profile=None):
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_topic(get_arn(name, region, key, keyid, profile))
log.info('Deleted SNS topic {0}'.format(name))
log.info('Deleted SNS topic %s', name)
_invalidate_cache()
return True
@ -170,7 +170,7 @@ def subscribe(topic, protocol, endpoint, region=None, key=None, keyid=None, prof
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.subscribe(get_arn(topic, region, key, keyid, profile), protocol, endpoint)
log.info('Subscribe {0} {1} to {2} topic'.format(protocol, endpoint, topic))
log.info('Subscribe %s %s to %s topic', protocol, endpoint, topic)
try:
del __context__[_subscriptions_cache_key(topic)]
except KeyError:
@ -197,9 +197,9 @@ def unsubscribe(topic, subscription_arn, region=None, key=None, keyid=None, prof
try:
conn.unsubscribe(subscription_arn)
log.info('Unsubscribe {0} to {1} topic'.format(subscription_arn, topic))
log.info('Unsubscribe %s to %s topic', subscription_arn, topic)
except Exception as e:
log.error('Unsubscribe Error: {0}'.format(e))
log.error('Unsubscribe Error', exc_info=True)
return False
else:
__context__.pop(_subscriptions_cache_key(topic), None)

View File

@ -44,7 +44,7 @@ Connection module for Amazon SQS
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging

View File

@ -127,7 +127,7 @@ Deleting VPC peering connection via this module
#pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import time
@ -235,7 +235,7 @@ def check_vpc(vpc_id=None, vpc_name=None, region=None, key=None,
profile=profile)
elif not _find_vpcs(vpc_id=vpc_id, region=region, key=key, keyid=keyid,
profile=profile):
log.info('VPC {0} does not exist.'.format(vpc_id))
log.info('VPC %s does not exist.', vpc_id)
return None
return vpc_id
@ -267,7 +267,7 @@ def _create_resource(resource, name=None, tags=None, region=None, key=None,
if isinstance(r, bool):
return {'created': True}
else:
log.info('A {0} with id {1} was created'.format(resource, r.id))
log.info('A %s with id %s was created', resource, r.id)
_maybe_set_name_tag(name, r)
_maybe_set_tags(tags, r)
@ -521,7 +521,8 @@ def _find_vpcs(vpc_id=None, vpc_name=None, cidr=None, tags=None,
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
vpcs = conn.get_all_vpcs(**filter_parameters)
log.debug('The filters criteria {0} matched the following VPCs:{1}'.format(filter_parameters, vpcs))
log.debug('The filters criteria %s matched the following VPCs:%s',
filter_parameters, vpcs)
if vpcs:
return [vpc.id for vpc in vpcs]
@ -545,7 +546,7 @@ def _get_id(vpc_name=None, cidr=None, tags=None, region=None, key=None,
vpc_ids = _find_vpcs(vpc_name=vpc_name, cidr=cidr, tags=tags, region=region,
key=key, keyid=keyid, profile=profile)
if vpc_ids:
log.debug("Matching VPC: {0}".format(" ".join(vpc_ids)))
log.debug("Matching VPC: %s", " ".join(vpc_ids))
if len(vpc_ids) == 1:
vpc_id = vpc_ids[0]
if vpc_name:
@ -635,7 +636,7 @@ def create(cidr_block, instance_tenancy=None, vpc_name=None,
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
vpc = conn.create_vpc(cidr_block, instance_tenancy=instance_tenancy)
if vpc:
log.info('The newly created VPC id is {0}'.format(vpc.id))
log.info('The newly created VPC id is %s', vpc.id)
_maybe_set_name_tag(vpc_name, vpc)
_maybe_set_tags(tags, vpc)
@ -688,7 +689,7 @@ def delete(vpc_id=None, name=None, vpc_name=None, tags=None,
'VPC {0} not found'.format(vpc_name)}}
if conn.delete_vpc(vpc_id):
log.info('VPC {0} was deleted.'.format(vpc_id))
log.info('VPC %s was deleted.', vpc_id)
if vpc_name:
_cache_id(vpc_name, resource_id=vpc_id,
invalidate=True,
@ -697,7 +698,7 @@ def delete(vpc_id=None, name=None, vpc_name=None, tags=None,
profile=profile)
return {'deleted': True}
else:
log.warning('VPC {0} was not deleted.'.format(vpc_id))
log.warning('VPC %s was not deleted.', vpc_id)
return {'deleted': False}
except BotoServerError as e:
return {'deleted': False, 'error': salt.utils.boto.get_error(e)}
@ -747,7 +748,7 @@ def describe(vpc_id=None, vpc_name=None, region=None, key=None,
if vpcs:
vpc = vpcs[0] # Found!
log.debug('Found VPC: {0}'.format(vpc.id))
log.debug('Found VPC: %s', vpc.id)
keys = ('id', 'cidr_block', 'is_default', 'state', 'tags',
'dhcp_options_id', 'instance_tenancy')
@ -841,7 +842,8 @@ def _find_subnets(subnet_name=None, vpc_id=None, cidr=None, tags=None, conn=None
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
subnets = conn.get_all_subnets(**filter_parameters)
log.debug('The filters criteria {0} matched the following subnets: {1}'.format(filter_parameters, subnets))
log.debug('The filters criteria %s matched the following subnets: %s',
filter_parameters, subnets)
if subnets:
return [subnet.id for subnet in subnets]
@ -964,12 +966,13 @@ def subnet_exists(subnet_id=None, name=None, subnet_name=None, cidr=None,
return {'exists': False}
return {'error': boto_err}
log.debug('The filters criteria {0} matched the following subnets:{1}'.format(filter_parameters, subnets))
log.debug('The filters criteria %s matched the following subnets:%s',
filter_parameters, subnets)
if subnets:
log.info('Subnet {0} exists.'.format(subnet_name or subnet_id))
log.info('Subnet %s exists.', subnet_name or subnet_id)
return {'exists': True}
else:
log.info('Subnet {0} does not exist.'.format(subnet_name or subnet_id))
log.info('Subnet %s does not exist.', subnet_name or subnet_id)
return {'exists': False}
@ -1006,10 +1009,10 @@ def get_subnet_association(subnets, region=None, key=None, keyid=None,
# vpc_id values
vpc_ids = set()
for subnet in subnets:
log.debug('examining subnet id: {0} for vpc_id'.format(subnet.id))
log.debug('examining subnet id: %s for vpc_id', subnet.id)
if subnet in subnets:
log.debug('subnet id: {0} is associated with vpc id: {1}'
.format(subnet.id, subnet.vpc_id))
log.debug('subnet id: %s is associated with vpc id: %s',
subnet.id, subnet.vpc_id)
vpc_ids.add(subnet.vpc_id)
if not vpc_ids:
return {'vpc_id': None}
@ -1044,7 +1047,7 @@ def describe_subnet(subnet_id=None, subnet_name=None, region=None,
if not subnet:
return {'subnet': None}
log.debug('Found subnet: {0}'.format(subnet.id))
log.debug('Found subnet: %s', subnet.id)
keys = ('id', 'cidr_block', 'availability_zone', 'tags', 'vpc_id')
ret = {'subnet': dict((k, getattr(subnet, k)) for k in keys)}
@ -1101,8 +1104,8 @@ def describe_subnets(subnet_ids=None, subnet_names=None, vpc_id=None, cidr=None,
filter_parameters['filters']['tag:Name'] = subnet_names
subnets = conn.get_all_subnets(subnet_ids=subnet_ids, **filter_parameters)
log.debug('The filters criteria {0} matched the following subnets: '
'{1}'.format(filter_parameters, subnets))
log.debug('The filters criteria %s matched the following subnets: %s',
filter_parameters, subnets)
if not subnets:
return {'subnets': None}
@ -1157,8 +1160,10 @@ def create_internet_gateway(internet_gateway_name=None, vpc_id=None,
if r.get('created') and vpc_id:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.attach_internet_gateway(r['id'], vpc_id)
log.info('Attached internet gateway {0} to '
'VPC {1}'.format(r['id'], (vpc_name or vpc_id)))
log.info(
'Attached internet gateway %s to VPC %s',
r['id'], vpc_name or vpc_id
)
return r
except BotoServerError as e:
return {'created': False, 'error': salt.utils.boto.get_error(e)}
@ -1262,7 +1267,8 @@ def _find_nat_gateways(nat_gateway_id=None, subnet_id=None, subnet_name=None, vp
for gw in ret.get('NatGateways', []):
if gw.get('State') in states:
nat_gateways.append(gw)
log.debug('The filters criteria {0} matched the following nat gateways: {1}'.format(filter_parameters, nat_gateways))
log.debug('The filters criteria %s matched the following nat gateways: %s',
filter_parameters, nat_gateways)
if nat_gateways:
return nat_gateways
@ -1569,8 +1575,10 @@ def create_dhcp_options(domain_name=None, domain_name_servers=None, ntp_servers=
if r.get('created') and vpc_id:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.associate_dhcp_options(r['id'], vpc_id)
log.info('Associated options {0} to '
'VPC {1}'.format(r['id'], (vpc_name or vpc_id)))
log.info(
'Associated options %s to VPC %s',
r['id'], vpc_name or vpc_id
)
return r
except BotoServerError as e:
return {'created': False, 'error': salt.utils.boto.get_error(e)}
@ -1659,10 +1667,12 @@ def associate_dhcp_options_to_vpc(dhcp_options_id, vpc_id=None, vpc_name=None,
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if conn.associate_dhcp_options(dhcp_options_id, vpc_id):
log.info('DHCP options with id {0} were associated with VPC {1}'.format(dhcp_options_id, vpc_id))
log.info('DHCP options with id %s were associated with VPC %s',
dhcp_options_id, vpc_id)
return {'associated': True}
else:
log.warning('DHCP options with id {0} were not associated with VPC {1}'.format(dhcp_options_id, vpc_id))
log.warning('DHCP options with id %s were not associated with VPC %s',
dhcp_options_id, vpc_id)
return {'associated': False, 'error': {'message': 'DHCP options could not be associated.'}}
except BotoServerError as e:
return {'associated': False, 'error': salt.utils.boto.get_error(e)}
@ -1855,11 +1865,13 @@ def associate_network_acl_to_subnet(network_acl_id=None, subnet_id=None,
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
association_id = conn.associate_network_acl(network_acl_id, subnet_id)
if association_id:
log.info('Network ACL with id {0} was associated with subnet {1}'.format(network_acl_id, subnet_id))
log.info('Network ACL with id %s was associated with subnet %s',
network_acl_id, subnet_id)
return {'associated': True, 'id': association_id}
else:
log.warning('Network ACL with id {0} was not associated with subnet {1}'.format(network_acl_id, subnet_id))
log.warning('Network ACL with id %s was not associated with subnet %s',
network_acl_id, subnet_id)
return {'associated': False, 'error': {'message': 'ACL could not be assocaited.'}}
except BotoServerError as e:
return {'associated': False, 'error': salt.utils.boto.get_error(e)}
@ -1949,9 +1961,9 @@ def _create_network_acl_entry(network_acl_id=None, rule_number=None, protocol=No
icmp_type=icmp_type, port_range_from=port_range_from,
port_range_to=port_range_to)
if created:
log.info('Network ACL entry was {0}'.format(rkey))
log.info('Network ACL entry was %s', rkey)
else:
log.warning('Network ACL entry was not {0}'.format(rkey))
log.warning('Network ACL entry was not %s', rkey)
return {rkey: created}
except BotoServerError as e:
return {rkey: False, 'error': salt.utils.boto.get_error(e)}
@ -2167,10 +2179,10 @@ def route_exists(destination_cidr_block, route_table_name=None, route_table_id=N
}
route_comp = set(route_dict.items()) ^ set(route_check.items())
if len(route_comp) == 0:
log.info('Route {0} exists.'.format(destination_cidr_block))
log.info('Route %s exists.', destination_cidr_block)
return {'exists': True}
log.warning('Route {0} does not exist.'.format(destination_cidr_block))
log.warning('Route %s does not exist.', destination_cidr_block)
return {'exists': False}
except BotoServerError as e:
return {'error': salt.utils.boto.get_error(e)}
@ -2221,7 +2233,8 @@ def associate_route_table(route_table_id=None, subnet_id=None,
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
association_id = conn.associate_route_table(route_table_id, subnet_id)
log.info('Route table {0} was associated with subnet {1}'.format(route_table_id, subnet_id))
log.info('Route table %s was associated with subnet %s',
route_table_id, subnet_id)
return {'association_id': association_id}
except BotoServerError as e:
return {'associated': False, 'error': salt.utils.boto.get_error(e)}
@ -2245,10 +2258,10 @@ def disassociate_route_table(association_id, region=None, key=None, keyid=None,
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if conn.disassociate_route_table(association_id):
log.info('Route table with association id {0} has been disassociated.'.format(association_id))
log.info('Route table with association id %s has been disassociated.', association_id)
return {'disassociated': True}
else:
log.warning('Route table with association id {0} has not been disassociated.'.format(association_id))
log.warning('Route table with association id %s has not been disassociated.', association_id)
return {'disassociated': False}
except BotoServerError as e:
return {'disassociated': False, 'error': salt.utils.boto.get_error(e)}
@ -2269,7 +2282,8 @@ def replace_route_table_association(association_id, route_table_id, region=None,
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
association_id = conn.replace_route_table_association_with_assoc(association_id, route_table_id)
log.info('Route table {0} was reassociated with association id {1}'.format(route_table_id, association_id))
log.info('Route table %s was reassociated with association id %s',
route_table_id, association_id)
return {'replaced': True, 'association_id': association_id}
except BotoServerError as e:
return {'replaced': False, 'error': salt.utils.boto.get_error(e)}
@ -2446,12 +2460,16 @@ def replace_route(route_table_id=None, destination_cidr_block=None,
if conn.replace_route(route_table_id, destination_cidr_block,
gateway_id=gateway_id, instance_id=instance_id,
interface_id=interface_id, vpc_peering_connection_id=vpc_peering_connection_id):
log.info('Route with cidr block {0} on route table {1} was '
'replaced'.format(route_table_id, destination_cidr_block))
log.info(
'Route with cidr block %s on route table %s was replaced',
route_table_id, destination_cidr_block
)
return {'replaced': True}
else:
log.warning('Route with cidr block {0} on route table {1} was not replaced'.format(route_table_id,
destination_cidr_block))
log.warning(
'Route with cidr block %s on route table %s was not replaced',
route_table_id, destination_cidr_block
)
return {'replaced': False}
except BotoServerError as e:
return {'replaced': False, 'error': salt.utils.boto.get_error(e)}
@ -2613,8 +2631,7 @@ def _create_dhcp_options(conn, domain_name=None, domain_name_servers=None, ntp_s
def _maybe_set_name_tag(name, obj):
if name:
obj.add_tag("Name", name)
log.debug('{0} is now named as {1}'.format(obj, name))
log.debug('%s is now named as %s', obj, name)
def _maybe_set_tags(tags, obj):
@ -2626,17 +2643,16 @@ def _maybe_set_tags(tags, obj):
except AttributeError:
for tag, value in tags.items():
obj.add_tag(tag, value)
log.debug('The following tags: {0} were added to {1}'.format(', '.join(tags), obj))
log.debug('The following tags: %s were added to %s', ', '.join(tags), obj)
def _maybe_set_dns(conn, vpcid, dns_support, dns_hostnames):
if dns_support:
conn.modify_vpc_attribute(vpc_id=vpcid, enable_dns_support=dns_support)
log.debug('DNS spport was set to: {0} on vpc {1}'.format(dns_support, vpcid))
log.debug('DNS spport was set to: %s on vpc %s', dns_support, vpcid)
if dns_hostnames:
conn.modify_vpc_attribute(vpc_id=vpcid, enable_dns_hostnames=dns_hostnames)
log.debug('DNS hostnames was set to: {0} on vpc {1}'.format(dns_hostnames, vpcid))
log.debug('DNS hostnames was set to: %s on vpc %s', dns_hostnames, vpcid)
def _maybe_name_route_table(conn, vpcid, vpc_name):
@ -2656,7 +2672,7 @@ def _maybe_name_route_table(conn, vpcid, vpc_name):
name = '{0}-default-table'.format(vpc_name)
_maybe_set_name_tag(name, default_table)
log.debug('Default route table name was set to: {0} on vpc {1}'.format(name, vpcid))
log.debug('Default route table name was set to: %s on vpc %s', name, vpcid)
def _key_iter(key, keys, item):
@ -3018,7 +3034,7 @@ def delete_vpc_peering_connection(conn_id=None, conn_name=None, region=None,
return {'msg': 'VPC peering connection deleted.'}
except botocore.exceptions.ClientError as err:
e = salt.utils.boto.get_error(err)
log.error('Failed to delete VPC peering {0}: {1}'.format(conn_name or conn_id, e))
log.error('Failed to delete VPC peering %s: %s', conn_name or conn_id, e)
return {'error': e}
@ -3132,7 +3148,7 @@ def peering_connection_pending_from_vpc(conn_id=None, conn_name=None, vpc_id=Non
if vpc_name:
vpc_id = check_vpc(vpc_name=vpc_name, region=region, key=key, keyid=keyid, profile=profile)
if not vpc_id:
log.warning('Could not resolve VPC name {0} to an ID'.format(vpc_name))
log.warning('Could not resolve VPC name %s to an ID', vpc_name)
return False
conn = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)

View File

@ -8,7 +8,7 @@ Note that npm, git and bower must be installed for this module to be
available.
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging

View File

@ -2,7 +2,7 @@
'''
Module for gathering and managing bridging information
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import sys
import re

View File

@ -10,7 +10,7 @@ Manage the password database on BSD systems
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
try:
import pwd
except ImportError:
@ -19,6 +19,7 @@ except ImportError:
# Import salt libs
from salt.ext import six
import salt.utils.files
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError
# Define the module's virtual name
@ -66,7 +67,7 @@ def info(name):
'passwd': ''}
if not isinstance(name, six.string_types):
name = str(name)
name = six.text_type(name)
if ':' in name:
raise SaltInvocationError('Invalid username \'{0}\''.format(name))
@ -78,10 +79,11 @@ def info(name):
try:
with salt.utils.files.fopen('/etc/master.passwd', 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('{0}:'.format(name)):
key = line.split(':')
change, expire = key[5:7]
ret['passwd'] = str(key[1])
ret['passwd'] = six.text_type(key[1])
break
except IOError:
change = expire = None

View File

@ -19,11 +19,10 @@ Module for managing BTRFS file systems.
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import uuid
import logging
# Import Salt libs
@ -34,8 +33,6 @@ from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
'''

View File

@ -32,9 +32,7 @@ Management of the Salt beacons
- 1.0
'''
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
from __future__ import absolute_import, print_function, unicode_literals
def present(name,

View File

@ -6,7 +6,7 @@ A state module designed to enforce load-balancing configurations for F5 Big-IP e
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
import salt.utils.json

View File

@ -20,7 +20,7 @@ A state module to manage blockdevices
.. versionadded:: 2014.7.0
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import os
@ -167,7 +167,7 @@ def formatted(name, fs_type='ext4', force=False, **kwargs):
# This retry maybe superfluous - switching to blkid
for i in range(10):
log.info('Check blk fstype attempt %s of 10', str(i+1))
log.info('Check blk fstype attempt %d of 10', i + 1)
current_fs = _checkblk(name)
if current_fs == fs_type:

View File

@ -86,10 +86,7 @@ XXX FIXME
'''
# Import Python Libs
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
from __future__ import absolute_import, print_function, unicode_literals
def __virtual__():

View File

@ -65,7 +65,7 @@ passed in as a dict, or as a string to pull from pillars or minion config:
#pylint: disable=E1320
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import uuid
# Import Salt Libs
@ -129,7 +129,7 @@ def _to_aws_encoding(instring):
raise SaltInvocationError("Invalid Route53 domain character seen (octal {0}) in string "
"{1}. Do you need to punycode it?".format(octal, instring))
ret = ''.join(outlist)
log.debug('Name after massaging is: {}'.format(ret))
log.debug('Name after massaging is: %s', ret)
return ret
@ -247,7 +247,7 @@ def hosted_zone_present(name, Name=None, PrivateZone=False,
if len(fixed_vpcs) > 1:
add_vpcs = fixed_vpcs[1:]
fixed_vpcs = fixed_vpcs[:1]
CallerReference = CallerReference if CallerReference else str(uuid.uuid4())
CallerReference = CallerReference if CallerReference else str(uuid.uuid4()) # future lint: disable=blacklisted-function
else:
# Currently the only modifiable traits about a zone are associated VPCs and the comment.
zone = zone[0]
@ -600,7 +600,7 @@ def rr_present(name, HostedZoneId=None, DomainName=None, PrivateZone=False, Name
fields = rr.split(':')
if fields[1] == 'ec2_instance_tag':
if len(fields) != 5:
log.warning("Invalid magic RR value seen: '{}'. Passing as-is.".format(rr))
log.warning("Invalid magic RR value seen: '%s'. Passing as-is.", rr)
fixed_rrs += [rr]
continue
tag_name = fields[2]
@ -625,7 +625,7 @@ def rr_present(name, HostedZoneId=None, DomainName=None, PrivateZone=False, Name
instance = r[0]
res = getattr(instance, instance_attr, None)
if res:
log.debug('Found {} {} for instance {}'.format(instance_attr, res, instance.id))
log.debug('Found %s %s for instance %s', instance_attr, res, instance.id)
fixed_rrs += [_to_aws_encoding(res)]
else:
ret['comment'] = 'Attribute {} not found on instance {}'.format(instance_attr,
@ -649,9 +649,9 @@ def rr_present(name, HostedZoneId=None, DomainName=None, PrivateZone=False, Name
profile=profile)
if SetIdentifier and recordsets:
log.debug('Filter recordsets {} by SetIdentifier {}.'.format(recordsets, SetIdentifier))
log.debug('Filter recordsets %s by SetIdentifier %s.', recordsets, SetIdentifier)
recordsets = [r for r in recordsets if r.get('SetIdentifier') == SetIdentifier]
log.debug('Resulted in recordsets {}.'.format(recordsets))
log.debug('Resulted in recordsets %s.', recordsets)
create = False
update = False
@ -791,9 +791,9 @@ def rr_absent(name, HostedZoneId=None, DomainName=None, PrivateZone=False,
StartRecordName=Name, StartRecordType=Type, region=region, key=key, keyid=keyid,
profile=profile)
if SetIdentifier and recordsets:
log.debug('Filter recordsets {} by SetIdentifier {}.'.format(recordsets, SetIdentifier))
log.debug('Filter recordsets %s by SetIdentifier %s.', recordsets, SetIdentifier)
recordsets = [r for r in recordsets if r.get('SetIdentifier') == SetIdentifier]
log.debug('Resulted in recordsets {}.'.format(recordsets))
log.debug('Resulted in recordsets %s.', recordsets)
if not recordsets:
ret['comment'] = 'Route 53 resource record {} with type {} already absent.'.format(
Name, Type)

View File

@ -54,7 +54,7 @@ passed in as a dict, or as a string to pull from pillars or minion config:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import re
import logging
@ -277,12 +277,14 @@ def topic_absent(name, unsubscribe=False, region=None, key=None, keyid=None, pro
for sub in current['Subscriptions']:
if sub['SubscriptionArn'] == 'PendingConfirmation':
# The API won't let you delete subscriptions in pending status...
log.warning('Ignoring PendingConfirmation subscription {0} {1} on topic '
'{2}'.format(sub['Protocol'], sub['Endpoint'], sub['TopicArn']))
log.warning(
'Ignoring PendingConfirmation subscription %s %s on '
'topic %s', sub['Protocol'], sub['Endpoint'], sub['TopicArn']
)
continue
if __salt__['boto3_sns.unsubscribe'](sub['SubscriptionArn'], region=region, key=key,
keyid=keyid, profile=profile):
log.debug('Deleted subscription {0} for SNS topic {1}'.format(sub, TopicArn))
log.debug('Deleted subscription %s for SNS topic %s', sub, TopicArn)
something_changed = True
else:
ret['comment'] = 'Failed to delete subscription {0} for SNS topic {1}'.format(

View File

@ -47,7 +47,7 @@ config:
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import hashlib
import logging
import os
@ -440,7 +440,7 @@ def _gen_md5_filehash(fname, *args):
_hash.update(chunk)
for extra_arg in args:
_hash.update(six.b(str(extra_arg)))
_hash.update(six.b(str(extra_arg))) # future lint: disable=blacklisted-function
return _hash.hexdigest()
@ -748,7 +748,7 @@ class _Swagger(object):
if 'responses' not in opobj:
raise ValueError('missing mandatory responses field in path item object')
for rescode, resobj in six.iteritems(opobj.get('responses')):
if not self._is_http_error_rescode(str(rescode)):
if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function
continue
# only check for response code from 400-599
@ -1598,7 +1598,7 @@ class _Swagger(object):
if 'responses' in method_data:
for response, response_data in six.iteritems(method_data['responses']):
httpStatus = str(response)
httpStatus = str(response) # future lint: disable=blacklisted-function
method_response = self._parse_method_response(method_name.lower(),
_Swagger.SwaggerMethodResponse(response_data), httpStatus)

View File

@ -193,13 +193,14 @@ Overriding the alarm values on the resource:
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import hashlib
import logging
import copy
# Import Salt libs
import salt.utils.dictupdate as dictupdate
import salt.utils.stringutils
from salt.ext import six
from salt.exceptions import SaltInvocationError
@ -461,17 +462,17 @@ def present(
profile
)
vpc_id = vpc_id.get('vpc_id')
log.debug('Auto Scaling Group {0} is associated with VPC ID {1}'
.format(name, vpc_id))
log.debug('Auto Scaling Group %s is associated with VPC ID %s',
name, vpc_id)
else:
vpc_id = None
log.debug('Auto Scaling Group {0} has no VPC Association'
.format(name))
log.debug('Auto Scaling Group %s has no VPC Association', name)
# if launch_config is defined, manage the launch config first.
# hash the launch_config dict to create a unique name suffix and then
# ensure it is present
if launch_config:
launch_config_name = launch_config_name + '-' + hashlib.md5(str(launch_config)).hexdigest()
launch_config_bytes = salt.utils.stringutils.to_bytes(str(launch_config)) # future lint: disable=blacklisted-function
launch_config_name = launch_config_name + '-' + hashlib.md5(launch_config_bytes).hexdigest()
args = {
'name': launch_config_name,
'region': region,
@ -628,8 +629,7 @@ def present(
if asg_property in asg:
_value = __utils__['boto3.ordered'](asg[asg_property])
if not value == _value:
log_msg = '{0} asg_property differs from {1}'
log.debug(log_msg.format(value, _value))
log.debug('%s asg_property differs from %s', value, _value)
proposed.setdefault('old', {}).update({asg_property: _value})
proposed.setdefault('new', {}).update({asg_property: value})
need_update = True

View File

@ -38,7 +38,7 @@ Connection module for Amazon Cloud Formation
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libs
@ -149,13 +149,13 @@ def present(name, template_body=None, template_url=None, parameters=None, notifi
return i
_valid = _validate(template_body, template_url, region, key, keyid, profile)
log.debug('Validate is : {0}.'.format(_valid))
log.debug('Validate is : %s.', _valid)
if _valid is not True:
code, message = _valid
ret['result'] = False
ret['comment'] = 'Template could not be validated.\n{0} \n{1}'.format(code, message)
return ret
log.debug('Template {0} is valid.'.format(name))
log.debug('Template %s is valid.', name)
if __salt__['boto_cfn.exists'](name, region, key, keyid, profile):
template = __salt__['boto_cfn.get_template'](name, region, key, keyid, profile)
template = template['GetTemplateResponse']['GetTemplateResult']['TemplateBody'].encode('ascii', 'ignore')
@ -163,7 +163,7 @@ def present(name, template_body=None, template_url=None, parameters=None, notifi
_template_body = salt.utils.json.loads(template_body)
compare = salt.utils.compat.cmp(template, _template_body)
if compare != 0:
log.debug('Templates are not the same. Compare value is {0}'.format(compare))
log.debug('Templates are not the same. Compare value is %s', compare)
# At this point we should be able to run update safely since we already validated the template
if __opts__['test']:
ret['comment'] = 'Stack {0} is set to be updated.'.format(name)
@ -178,7 +178,7 @@ def present(name, template_body=None, template_url=None, parameters=None, notifi
region, key, keyid, profile)
if isinstance(updated, six.string_types):
code, message = _get_error(updated)
log.debug('Update error is {0} and message is {1}'.format(code, message))
log.debug('Update error is %s and message is %s', code, message)
ret['result'] = False
ret['comment'] = 'Stack {0} could not be updated.\n{1} \n{2}.'.format(name, code, message)
return ret
@ -257,10 +257,10 @@ def _get_template(template, name):
def _validate(template_body=None, template_url=None, region=None, key=None, keyid=None, profile=None):
# Validates template. returns true if template syntax is correct.
validate = __salt__['boto_cfn.validate_template'](template_body, template_url, region, key, keyid, profile)
log.debug('Validate result is {0}.'.format(str(validate)))
log.debug('Validate result is %s.', validate)
if isinstance(validate, six.string_types):
code, message = _get_error(validate)
log.debug('Validate error is {0} and message is {1}.'.format(code, message))
log.debug('Validate error is %s and message is %s.', code, message)
return code, message
return True

View File

@ -45,7 +45,7 @@ either passed in as a dict, or a string to pull from pillars or minion config:
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import difflib
import logging

View File

@ -53,7 +53,7 @@ config:
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import os.path

View File

@ -54,7 +54,10 @@ as a passed in dict, or as a string to pull from pillars or minion config:
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
import salt.utils.data
# Import 3rd-party libs
from salt.ext import six
@ -116,10 +119,11 @@ def present(
if k not in alarm_details:
difference.append("{0}={1} (new)".format(k, v))
continue
v2 = alarm_details[k]
v = salt.utils.data.decode(v)
v2 = salt.utils.data.decode(alarm_details[k])
if v == v2:
continue
if isinstance(v, six.string_types) and str(v) == str(v2):
if isinstance(v, six.string_types) and v == v2:
continue
if isinstance(v, float) and v == float(v2):
continue

View File

@ -53,7 +53,7 @@ config:
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os

View File

@ -47,7 +47,7 @@ config:
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
from salt.ext.six import string_types

View File

@ -50,12 +50,13 @@ config:
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import copy
import datetime
import difflib
# Import Salt lobs
import salt.utils.data
import salt.utils.json
from salt.ext import six
from salt.ext.six.moves import zip
@ -398,11 +399,11 @@ def _diff(old_pipeline_definition, new_pipeline_definition):
old_pipeline_definition.pop('ResponseMetadata', None)
new_pipeline_definition.pop('ResponseMetadata', None)
diff = difflib.unified_diff(
diff = salt.utils.data.decode(difflib.unified_diff(
salt.utils.json.dumps(old_pipeline_definition, indent=4).splitlines(True),
salt.utils.json.dumps(new_pipeline_definition, indent=4).splitlines(True),
)
return str('').join(diff) # future lint: disable=blacklisted-function
))
return ''.join(diff) # future lint: disable=blacklisted-function
def _standardize(structure):

View File

@ -155,7 +155,7 @@ Setting the alarms in a pillar:
period: 900
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import math
import sys
@ -541,7 +541,7 @@ def _update_global_secondary_indexes(ret, changes_old, changes_new, comments, ex
existing_index_names, provisioned_gsi_config, gsi_config)
except GsiNotUpdatableError as e:
ret['result'] = False
ret['comment'] = str(e)
ret['comment'] = six.text_type(e)
return
if index_updates:
@ -563,14 +563,13 @@ def _update_global_secondary_indexes(ret, changes_old, changes_new, comments, ex
if success:
comments.append(
'Updated GSIs with new throughputs {0}'.format(str(index_updates)))
'Updated GSIs with new throughputs {0}'.format(index_updates))
for index_name in index_updates:
changes_old['global_indexes'][index_name] = provisioned_throughputs[index_name]
changes_new['global_indexes'][index_name] = index_updates[index_name]
else:
ret['result'] = False
ret['comment'] = 'Failed to update GSI throughputs {0}'.format(
str(index_updates))
ret['comment'] = 'Failed to update GSI throughputs {0}'.format(index_updates)
def _determine_gsi_updates(existing_index_names, provisioned_gsi_config, gsi_config):
@ -763,8 +762,7 @@ def absent(name,
return ret
if __opts__['test']:
ret['comment'] = 'DynamoDB table {0} is set to be deleted \
'.format(name)
ret['comment'] = 'DynamoDB table {0} is set to be deleted'.format(name)
ret['result'] = None
return ret
@ -774,7 +772,6 @@ def absent(name,
ret['changes'].setdefault('old', 'Table {0} exists'.format(name))
ret['changes'].setdefault('new', 'Table {0} deleted'.format(name))
else:
ret['comment'] = 'Failed to delete DynamoDB table {0} \
'.format(name)
ret['comment'] = 'Failed to delete DynamoDB table {0}'.format(name)
ret['result'] = False
return ret

View File

@ -52,7 +52,7 @@ The below code deletes a key pair:
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
from time import time, sleep
@ -87,7 +87,7 @@ def key_present(name, save_private=None, upload_public=None, region=None,
'changes': {}
}
exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile)
log.debug('exists is {0}'.format(exists))
log.debug('exists is %s', exists)
if upload_public is not None and 'salt://' in upload_public:
try:
upload_public = __salt__['cp.get_file_str'](upload_public)
@ -151,7 +151,7 @@ def key_absent(name, region=None, key=None, keyid=None, profile=None):
deleted = __salt__['boto_ec2.delete_key'](name, region,
key, keyid,
profile)
log.debug('exists is {0}'.format(deleted))
log.debug('exists is %s', deleted)
if deleted:
ret['result'] = True
ret['comment'] = 'The key {0} is deleted.'.format(name)
@ -354,7 +354,7 @@ def eni_present(
if 'name' not in arecord:
msg = 'The arecord must contain a "name" property.'
raise SaltInvocationError(msg)
log.debug('processing arecord {0}'.format(arecord))
log.debug('processing arecord %s', arecord)
_ret = None
dns_provider = 'boto_route53'
arecord['record_type'] = 'A'
@ -383,7 +383,7 @@ def eni_present(
if 'region' not in arecord:
arecord['region'] = region
_ret = __states__['.'.join([dns_provider, 'present'])](**arecord)
log.debug('ret from dns_provider.present = {0}'.format(_ret))
log.debug('ret from dns_provider.present = %s', _ret)
ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
if not _ret['result']:
@ -851,7 +851,7 @@ def instance_present(name, instance_name=None, instance_id=None, image_id=None,
ret['comment'] = 'Failed to allocate new EIP.'
return ret
allocation_id = r['allocation_id']
log.info("New EIP with address {0} allocated.".format(r['public_ip']))
log.info("New EIP with address %s allocated.", r['public_ip'])
else:
log.info("EIP not requested.")
@ -866,8 +866,10 @@ def instance_present(name, instance_name=None, instance_id=None, image_id=None,
if r:
break
else:
log.info("Waiting up to {0} secs for new EIP {1} to become available".format(
tries * secs, public_ip or allocation_id))
log.info(
'Waiting up to %s secs for new EIP %s to become available',
tries * secs, public_ip or allocation_id
)
time.sleep(secs)
if not r:
ret['result'] = False
@ -1092,11 +1094,11 @@ def instance_absent(name, instance_name=None, instance_id=None,
# Race here - sometimes the terminate above will already have dropped this
if not __salt__['boto_ec2.disassociate_eip_address'](association_id=assoc_id,
**base_args):
log.warning("Failed to disassociate EIP {0}.".format(ip))
log.warning("Failed to disassociate EIP %s.", ip)
if __salt__['boto_ec2.release_eip_address'](allocation_id=alloc_id, public_ip=public_ip,
**base_args):
log.info("Released EIP address {0}".format(public_ip or r[0]['public_ip']))
log.info("Released EIP address %s", public_ip or r[0]['public_ip'])
ret['changes']['old']['public_ip'] = public_ip or r[0]['public_ip']
else:
ret['result'] = False
@ -1194,7 +1196,7 @@ def volume_absent(name, volume_name=None, volume_id=None, instance_name=None,
ret['result'] = False
return ret
vol = vols[0]
log.info('Matched Volume ID {0}'.format(vol))
log.info('Matched Volume ID %s', vol)
if __opts__['test']:
ret['comment'] = 'The volume {0} is set to be deleted.'.format(vol)

View File

@ -77,7 +77,7 @@ passed in as a dict, or as a string to pull from pillars or minion config:
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)

View File

@ -79,7 +79,7 @@ config:
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
@ -249,7 +249,7 @@ def present(name, DomainName,
AccessPolicies=AccessPolicies,
SnapshotOptions=SnapshotOptions,
AdvancedOptions=AdvancedOptions,
ElasticsearchVersion=str(ElasticsearchVersion),
ElasticsearchVersion=str(ElasticsearchVersion), # future lint: disable=blacklisted-function
region=region, key=key,
keyid=keyid, profile=profile)
if not r.get('created'):
@ -269,9 +269,15 @@ def present(name, DomainName,
_status = __salt__['boto_elasticsearch_domain.status'](DomainName=DomainName,
region=region, key=key, keyid=keyid,
profile=profile)['domain']
if _status.get('ElasticsearchVersion') != str(ElasticsearchVersion):
if _status.get('ElasticsearchVersion') != str(ElasticsearchVersion): # future lint: disable=blacklisted-function
ret['result'] = False
ret['comment'] = 'Failed to update domain: version cannot be modified from {0} to {1}.'.format(_status.get('ElasticsearchVersion'), str(ElasticsearchVersion))
ret['comment'] = (
'Failed to update domain: version cannot be modified '
'from {0} to {1}.'.format(
_status.get('ElasticsearchVersion'),
str(ElasticsearchVersion) # future lint: disable=blacklisted-function
)
)
return ret
_describe = __salt__['boto_elasticsearch_domain.describe'](DomainName=DomainName,
region=region, key=key, keyid=keyid,

View File

@ -236,14 +236,15 @@ Tags can also be set:
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt Libs
import hashlib
import re
import salt.utils.dictupdate as dictupdate
from salt.utils import exactly_one
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError
from salt.ext import six
@ -376,7 +377,7 @@ def present(name, listeners, availability_zones=None, subnets=None,
# load data from attributes_from_pillar and merge with attributes
tmp = __salt__['config.option'](attributes_from_pillar, {})
attributes = dictupdate.update(tmp, attributes) if attributes else tmp
attributes = salt.utils.dictupdate.update(tmp, attributes) if attributes else tmp
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
@ -402,7 +403,7 @@ def present(name, listeners, availability_zones=None, subnets=None,
if attributes:
_ret = _attributes_present(name, attributes, region, key, keyid, profile)
ret.update({'changes': dictupdate.update(ret['changes'], _ret['changes']),
ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']),
'comment': ' '.join([ret['comment'], _ret['comment']])})
ret['result'] = ret['result'] if _ret['result'] else _ret['result']
if ret['result'] is False:
@ -410,7 +411,7 @@ def present(name, listeners, availability_zones=None, subnets=None,
_ret = _health_check_present(name, health_check, region, key, keyid,
profile)
ret.update({'changes': dictupdate.update(ret['changes'], _ret['changes']),
ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']),
'comment': ' '.join([ret['comment'], _ret['comment']])})
ret['result'] = ret['result'] if _ret['result'] else _ret['result']
if ret['result'] is False:
@ -430,14 +431,14 @@ def present(name, listeners, availability_zones=None, subnets=None,
for p in ('profile', 'key', 'keyid', 'region', 'wait_for_sync'):
cname[p] = locals().get(p) if p not in cname else cname[p]
_ret = __states__['boto_route53.present'](**cname)
ret.update({'changes': dictupdate.update(ret['changes'], _ret['changes']),
ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']),
'comment': ' '.join([ret['comment'], _ret['comment']])})
ret['result'] = ret['result'] if _ret['result'] else _ret['result']
if ret['result'] is False:
return ret
_ret = _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile)
ret.update({'changes': dictupdate.update(ret['changes'], _ret['changes']),
ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']),
'comment': ' '.join([ret['comment'], _ret['comment']])})
ret['result'] = ret['result'] if _ret['result'] else _ret['result']
if ret['result'] is False:
@ -445,14 +446,14 @@ def present(name, listeners, availability_zones=None, subnets=None,
_ret = _policies_present(name, policies, policies_from_pillar, listeners,
backends, region, key, keyid, profile)
ret.update({'changes': dictupdate.update(ret['changes'], _ret['changes']),
ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']),
'comment': ' '.join([ret['comment'], _ret['comment']])})
ret['result'] = ret['result'] if _ret['result'] else _ret['result']
if ret['result'] is False:
return ret
_ret = _tags_present(name, tags, region, key, keyid, profile)
ret.update({'changes': dictupdate.update(ret['changes'], _ret['changes']),
ret.update({'changes': salt.utils.dictupdate.update(ret['changes'], _ret['changes']),
'comment': ' '.join([ret['comment'], _ret['comment']])})
ret['result'] = ret['result'] if _ret['result'] else _ret['result']
if ret['result'] is False:
@ -524,7 +525,7 @@ def register_instances(name, instances, region=None, key=None, keyid=None,
if value['description'] != 'Instance deregistration currently in progress.']
new = [value for value in instances if value not in nodes]
if not len(new):
msg = 'Instance/s {0} already exist.'.format(str(instances).strip('[]'))
msg = 'Instance/s {0} already exist.'.format(six.text_type(instances).strip('[]'))
log.debug(msg)
ret.update({'comment': msg})
return ret
@ -555,7 +556,7 @@ DEFAULT_PILLAR_LISTENER_POLICY_KEY = 'boto_elb_listener_policies'
def _elb_present(name, availability_zones, listeners, subnets, subnet_names,
security_groups, scheme, region, key, keyid, profile):
ret = {'result': True, 'comment': '', 'changes': {}}
if not exactly_one((availability_zones, subnets, subnet_names)):
if not salt.utils.data.exactly_one((availability_zones, subnets, subnet_names)):
raise SaltInvocationError('Exactly one of availability_zones, subnets, '
'subnet_names must be provided as arguments.')
if not listeners:
@ -619,8 +620,7 @@ def _elb_present(name, availability_zones, listeners, subnets, subnet_names,
keyid=keyid, profile=profile
)
if not _security_groups:
msg = 'Security groups {0} do not map to valid security group ids.'
ret['comment'] = msg.format(security_groups)
ret['comment'] = 'Security groups {0} do not map to valid security group ids.'.format(security_groups)
ret['result'] = False
return ret
exists = __salt__['boto_elb.exists'](name, region, key, keyid, profile)
@ -645,14 +645,14 @@ def _elb_present(name, availability_zones, listeners, subnets, subnet_names,
else:
ret['comment'] = 'ELB {0} present.'.format(name)
_ret = _security_groups_present(name, _security_groups, region, key, keyid, profile)
ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes'])
ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
if not _ret['result']:
ret['result'] = _ret['result']
if ret['result'] is False:
return ret
_ret = _listeners_present(name, listeners, region, key, keyid, profile)
ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes'])
ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
if not _ret['result']:
ret['result'] = _ret['result']
@ -660,7 +660,7 @@ def _elb_present(name, availability_zones, listeners, subnets, subnet_names,
return ret
if availability_zones:
_ret = _zones_present(name, availability_zones, region, key, keyid, profile)
ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes'])
ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
if not _ret['result']:
ret['result'] = _ret['result']
@ -668,7 +668,7 @@ def _elb_present(name, availability_zones, listeners, subnets, subnet_names,
return ret
elif subnets:
_ret = _subnets_present(name, subnets, region, key, keyid, profile)
ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
ret['changes'] = salt.utils.dictupdate.update(ret['changes'], _ret['changes'])
ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
if not _ret['result']:
ret['result'] = _ret['result']
@ -679,8 +679,7 @@ def _listeners_present(name, listeners, region, key, keyid, profile):
ret = {'result': True, 'comment': '', 'changes': {}}
lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile)
if not lb:
msg = '{0} ELB configuration could not be retrieved.'.format(name)
ret['comment'] = msg
ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name)
ret['result'] = False
return ret
if not listeners:
@ -729,8 +728,7 @@ def _listeners_present(name, listeners, region, key, keyid, profile):
if deleted:
ret['comment'] = 'Deleted listeners on {0} ELB.'.format(name)
else:
msg = 'Failed to delete listeners on {0} ELB.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Failed to delete listeners on {0} ELB.'.format(name)
ret['result'] = False
if to_create:
@ -739,10 +737,10 @@ def _listeners_present(name, listeners, region, key, keyid, profile):
profile)
if created:
msg = 'Created listeners on {0} ELB.'
ret['comment'] = ' '.join([ret['comment'], msg.format(name)])
ret['comment'] = ' '.join([ret['comment'], msg.format(name)])
else:
msg = 'Failed to create listeners on {0} ELB.'
ret['comment'] = ' '.join([ret['comment'], msg.format(name)])
ret['comment'] = ' '.join([ret['comment'], msg.format(name)])
ret['result'] = False
if to_create or to_delete:
@ -761,8 +759,7 @@ def _security_groups_present(name, security_groups, region, key, keyid, profile)
ret = {'result': True, 'comment': '', 'changes': {}}
lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile)
if not lb:
msg = '{0} ELB configuration could not be retrieved.'.format(name)
ret['comment'] = msg
ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name)
ret['result'] = False
return ret
if not security_groups:
@ -772,19 +769,16 @@ def _security_groups_present(name, security_groups, region, key, keyid, profile)
change_needed = True
if change_needed:
if __opts__['test']:
msg = 'ELB {0} set to have security groups modified.'.format(name)
ret['comment'] = msg
ret['comment'] = 'ELB {0} set to have security groups modified.'.format(name)
ret['result'] = None
return ret
changed = __salt__['boto_elb.apply_security_groups'](
name, security_groups, region, key, keyid, profile
)
if changed:
msg = 'Modified security_groups on {0} ELB.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Modified security_groups on {0} ELB.'.format(name)
else:
msg = 'Failed to modify security_groups on {0} ELB.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Failed to modify security_groups on {0} ELB.'.format(name)
ret['result'] = False
ret['changes']['old'] = {'security_groups': lb['security_groups']}
ret['changes']['new'] = {'security_groups': security_groups}
@ -799,8 +793,7 @@ def _attributes_present(name, attributes, region, key, keyid, profile):
profile)
if not _attributes:
ret['result'] = False
msg = 'Failed to retrieve attributes for ELB {0}.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Failed to retrieve attributes for ELB {0}.'.format(name)
return ret
attrs_to_set = []
if 'cross_zone_load_balancing' in attributes:
@ -821,7 +814,7 @@ def _attributes_present(name, attributes, region, key, keyid, profile):
attrs_to_set.append('connecting_settings')
if 'access_log' in attributes:
for attr, val in six.iteritems(attributes['access_log']):
if str(_attributes['access_log'][attr]) != str(val):
if six.text_type(_attributes['access_log'][attr]) != six.text_type(val):
attrs_to_set.append('access_log')
if 's3_bucket_prefix' in attributes['access_log']:
sbp = attributes['access_log']['s3_bucket_prefix']
@ -842,8 +835,7 @@ def _attributes_present(name, attributes, region, key, keyid, profile):
ret['comment'] = 'Set attributes on ELB {0}.'.format(name)
else:
ret['result'] = False
msg = 'Failed to set attributes on ELB {0}.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Failed to set attributes on ELB {0}.'.format(name)
else:
ret['comment'] = 'Attributes already set on ELB {0}.'.format(name)
return ret
@ -857,17 +849,15 @@ def _health_check_present(name, health_check, region, key, keyid, profile):
keyid, profile)
if not _health_check:
ret['result'] = False
msg = 'Failed to retrieve health_check for ELB {0}.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Failed to retrieve health_check for ELB {0}.'.format(name)
return ret
need_to_set = False
for attr, val in six.iteritems(health_check):
if str(_health_check[attr]) != str(val):
if six.text_type(_health_check[attr]) != six.text_type(val):
need_to_set = True
if need_to_set:
if __opts__['test']:
msg = 'ELB {0} set to have health check set.'.format(name)
ret['comment'] = msg
ret['comment'] = 'ELB {0} set to have health check set.'.format(name)
ret['result'] = None
return ret
was_set = __salt__['boto_elb.set_health_check'](name, health_check,
@ -882,8 +872,7 @@ def _health_check_present(name, health_check, region, key, keyid, profile):
ret['comment'] = 'Set health check on ELB {0}.'.format(name)
else:
ret['result'] = False
msg = 'Failed to set health check on ELB {0}.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Failed to set health check on ELB {0}.'.format(name)
else:
ret['comment'] = 'Health check already set on ELB {0}.'.format(name)
return ret
@ -894,8 +883,7 @@ def _zones_present(name, availability_zones, region, key, keyid, profile):
lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile)
if not lb:
ret['result'] = False
msg = 'Failed to retrieve ELB {0}.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Failed to retrieve ELB {0}.'.format(name)
return ret
to_enable = []
to_disable = []
@ -908,19 +896,16 @@ def _zones_present(name, availability_zones, region, key, keyid, profile):
to_disable.append(zone)
if to_enable or to_disable:
if __opts__['test']:
msg = 'ELB {0} to have availability zones set.'.format(name)
ret['comment'] = msg
ret['comment'] = 'ELB {0} to have availability zones set.'.format(name)
ret['result'] = None
return ret
if to_enable:
enabled = __salt__['boto_elb.enable_availability_zones'](
name, to_enable, region, key, keyid, profile)
if enabled:
msg = 'Enabled availability zones on {0} ELB.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Enabled availability zones on {0} ELB.'.format(name)
else:
msg = 'Failed to enable availability zones on {0} ELB.'
ret['comment'] = msg.format(name)
ret['comment'] = 'Failed to enable availability zones on {0} ELB.'.format(name)
ret['result'] = False
if to_disable:
disabled = __salt__['boto_elb.disable_availability_zones'](
@ -939,8 +924,7 @@ def _zones_present(name, availability_zones, region, key, keyid, profile):
ret['changes']['new'] = {'availability_zones':
lb['availability_zones']}
else:
msg = 'Availability zones already set on ELB {0}.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Availability zones already set on ELB {0}.'.format(name)
return ret
@ -951,8 +935,7 @@ def _subnets_present(name, subnets, region, key, keyid, profile):
lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile)
if not lb:
ret['result'] = False
msg = 'Failed to retrieve ELB {0}.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Failed to retrieve ELB {0}.'.format(name)
return ret
to_enable = []
to_disable = []
@ -965,8 +948,7 @@ def _subnets_present(name, subnets, region, key, keyid, profile):
to_disable.append(subnet)
if to_enable or to_disable:
if __opts__['test']:
msg = 'ELB {0} to have subnets set.'.format(name)
ret['comment'] = msg
ret['comment'] = 'ELB {0} to have subnets set.'.format(name)
ret['result'] = None
return ret
if to_enable:
@ -974,30 +956,31 @@ def _subnets_present(name, subnets, region, key, keyid, profile):
region, key, keyid,
profile)
if attached:
msg = 'Attached subnets on {0} ELB.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Attached subnets on {0} ELB.'.format(name)
else:
msg = 'Failed to attach subnets on {0} ELB.'
ret['comment'] = msg.format(name)
ret['comment'] = 'Failed to attach subnets on {0} ELB.'.format(name)
ret['result'] = False
if to_disable:
detached = __salt__['boto_elb.detach_subnets'](name, to_disable,
region, key, keyid,
profile)
if detached:
msg = 'Detached subnets on {0} ELB.'
ret['comment'] = ' '.join([ret['comment'], msg.format(name)])
ret['comment'] = ' '.join([
ret['comment'],
'Detached subnets on {0} ELB.'.format(name)
])
else:
msg = 'Failed to detach subnets on {0} ELB.'
ret['comment'] = ' '.join([ret['comment'], msg.format(name)])
ret['comment'] = ' '.join([
ret['comment'],
'Failed to detach subnets on {0} ELB.'.format(name)
])
ret['result'] = False
ret['changes']['old'] = {'subnets': lb['subnets']}
lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid,
profile)
ret['changes']['new'] = {'subnets': lb['subnets']}
else:
msg = 'Subnets already set on ELB {0}.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Subnets already set on ELB {0}.'.format(name)
return ret
@ -1005,7 +988,7 @@ def _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profil
'''helper method for present. ensure that cloudwatch_alarms are set'''
current = __salt__['config.option'](alarms_from_pillar, {})
if alarms:
current = dictupdate.update(current, alarms)
current = salt.utils.dictupdate.update(current, alarms)
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
for _, info in six.iteritems(current):
info["name"] = name + " " + info["name"]
@ -1077,8 +1060,7 @@ def _policies_present(name, policies, policies_from_pillar, listeners, backends,
lb = __salt__['boto_elb.get_elb_config'](name, region, key, keyid, profile)
if not lb:
msg = '{0} ELB configuration could not be retrieved.'.format(name)
ret['comment'] = msg
ret['comment'] = '{0} ELB configuration could not be retrieved.'.format(name)
ret['result'] = False
return ret
@ -1269,8 +1251,9 @@ def _policy_cname(policy_dict):
policy_name = policy_dict['policy_name']
policy_type = policy_dict['policy_type']
policy = policy_dict['policy']
canonical_policy_repr = str(sorted(list(six.iteritems(policy)), key=lambda x: str(x[0])))
policy_hash = hashlib.md5(str(canonical_policy_repr)).hexdigest()
canonical_policy_repr = six.text_type(sorted(list(six.iteritems(policy)), key=lambda x: six.text_type(x[0])))
policy_hash = hashlib.md5(
salt.utils.stringutils.to_bytes(str(canonical_policy_repr))).hexdigest() # future lint: disable=blacklisted-function
if policy_type.endswith('Type'):
policy_type = policy_type[:-4]
return "{0}-{1}-{2}".format(policy_type, policy_name, policy_hash)
@ -1340,7 +1323,7 @@ def _tags_present(name, tags, region, key, keyid, profile):
ret['comment'] = ' '.join([ret['comment'], msg])
return ret
if 'old' not in ret['changes']:
ret['changes'] = dictupdate.update(ret['changes'], {'old': {'tags': {}}})
ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'old': {'tags': {}}})
for _tag in tags_to_remove:
ret['changes']['old']['tags'][_tag] = lb['tags'][_tag]
if tags_to_add or tags_to_update:
@ -1357,7 +1340,7 @@ def _tags_present(name, tags, region, key, keyid, profile):
', '.join(tags_to_update.keys()))
ret['comment'] = ' '.join([ret['comment'], msg])
else:
all_tag_changes = dictupdate.update(tags_to_add, tags_to_update)
all_tag_changes = salt.utils.dictupdate.update(tags_to_add, tags_to_update)
_ret = __salt__['boto_elb.set_tags'](
name, all_tag_changes, region, key, keyid, profile)
if not _ret:
@ -1366,9 +1349,9 @@ def _tags_present(name, tags, region, key, keyid, profile):
ret['comment'] = ' '.join([ret['comment'], msg])
return ret
if 'old' not in ret['changes']:
ret['changes'] = dictupdate.update(ret['changes'], {'old': {'tags': {}}})
ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'old': {'tags': {}}})
if 'new' not in ret['changes']:
ret['changes'] = dictupdate.update(ret['changes'], {'new': {'tags': {}}})
ret['changes'] = salt.utils.dictupdate.update(ret['changes'], {'new': {'tags': {}}})
for tag in all_tag_changes:
ret['changes']['new']['tags'][tag] = tags[tag]
if 'tags' in lb:

View File

@ -34,7 +34,7 @@ passed in as a dict, or as a string to pull from pillars or minion config:
region: us-east-1
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Python Libs
import logging

View File

@ -132,7 +132,7 @@ passed in as a dict, or as a string to pull from pillars or minion config:
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
@ -140,6 +140,7 @@ import os
import salt.utils.data
import salt.utils.files
import salt.utils.json
import salt.utils.stringutils
import salt.utils.odict as odict
import salt.utils.dictupdate as dictupdate
from salt.ext import six
@ -233,7 +234,7 @@ def user_absent(name, delete_keys=True, delete_mfa_devices=True, delete_profile=
if delete_keys:
keys = __salt__['boto_iam.get_all_access_keys'](user_name=name, region=region, key=key,
keyid=keyid, profile=profile)
log.debug('Keys for user {0} are {1}.'.format(name, keys))
log.debug('Keys for user %s are %s.', name, keys)
if isinstance(keys, dict):
keys = keys['list_access_keys_response']['list_access_keys_result']['access_key_metadata']
for k in keys:
@ -361,13 +362,13 @@ def keys_present(name, number, save_dir, region=None, key=None, keyid=None, prof
keys = __salt__['boto_iam.get_all_access_keys'](user_name=name, region=region, key=key,
keyid=keyid, profile=profile)
if isinstance(keys, six.string_types):
log.debug('keys are : false {0}'.format(keys))
log.debug('keys are : false %s', keys)
error, message = _get_error(keys)
ret['comment'] = 'Could not get keys.\n{0}\n{1}'.format(error, message)
ret['result'] = False
return ret
keys = keys['list_access_keys_response']['list_access_keys_result']['access_key_metadata']
log.debug('Keys are : {0}.'.format(keys))
log.debug('Keys are : %s.', keys)
if len(keys) >= number:
ret['comment'] = 'The number of keys exist for user {0}'.format(name)
ret['result'] = True
@ -384,18 +385,25 @@ def keys_present(name, number, save_dir, region=None, key=None, keyid=None, prof
ret['comment'] = 'Could not create keys.\n{0}\n{1}'.format(error, message)
ret['result'] = False
return ret
log.debug('Created is : {0}'.format(created))
log.debug('Created is : %s', created)
response = 'create_access_key_response'
result = 'create_access_key_result'
new_keys[str(i)] = {}
new_keys[str(i)]['key_id'] = created[response][result]['access_key']['access_key_id']
new_keys[str(i)]['secret_key'] = created[response][result]['access_key']['secret_access_key']
new_keys[six.text_type(i)] = {}
new_keys[six.text_type(i)]['key_id'] = created[response][result]['access_key']['access_key_id']
new_keys[six.text_type(i)]['secret_key'] = created[response][result]['access_key']['secret_access_key']
try:
with salt.utils.files.fopen('{0}/{1}'.format(save_dir, name), 'a') as _wrf:
for key_num, key in new_keys.items():
key_id = key['key_id']
secret_key = key['secret_key']
_wrf.write(save_format.format(key_id, secret_key, 'key_id-{0}'.format(key_num), 'key-{0}'.format(key_num)))
_wrf.write(salt.utils.stringutils.to_str(
save_format.format(
key_id,
secret_key,
'key_id-{0}'.format(key_num),
'key-{0}'.format(key_num)
)
))
ret['comment'] = 'Keys have been written to file {0}/{1}.'.format(save_dir, name)
ret['result'] = True
ret['changes'] = new_keys
@ -445,15 +453,15 @@ def keys_absent(access_keys, user_name, region=None, key=None, keyid=None, profi
def _delete_key(ret, access_key_id, user_name, region=None, key=None, keyid=None, profile=None):
keys = __salt__['boto_iam.get_all_access_keys'](user_name=user_name, region=region, key=key,
keyid=keyid, profile=profile)
log.debug('Keys for user {1} are : {0}.'.format(keys, user_name))
log.debug('Keys for user %s are : %s.', keys, user_name)
if isinstance(keys, six.string_types):
log.debug('Keys {0} are a string. Something went wrong.'.format(keys))
log.debug('Keys %s are a string. Something went wrong.', keys)
ret['comment'] = ' '.join([ret['comment'], 'Key {0} could not be deleted.'.format(access_key_id)])
return ret
keys = keys['list_access_keys_response']['list_access_keys_result']['access_key_metadata']
for k in keys:
log.debug('Key is: {0} and is compared with: {1}'.format(k['access_key_id'], access_key_id))
if str(k['access_key_id']) == str(access_key_id):
log.debug('Key is: %s and is compared with: %s', k['access_key_id'], access_key_id)
if six.text_type(k['access_key_id']) == six.text_type(access_key_id):
if __opts__['test']:
ret['comment'] = 'Access key {0} is set to be deleted.'.format(access_key_id)
ret['result'] = None
@ -572,7 +580,7 @@ def _user_policies_present(name, policies=None, region=None, key=None, keyid=Non
dict_policy = _byteify(policy)
_policy = _byteify(__salt__['boto_iam.get_user_policy'](name, policy_name, region, key, keyid, profile))
if _policy != dict_policy:
log.debug("Policy mismatch:\n{0}\n{1}".format(_policy, dict_policy))
log.debug("Policy mismatch:\n%s\n%s", _policy, dict_policy)
policies_to_create[policy_name] = policy
_list = __salt__['boto_iam.get_all_user_policies'](
user_name=name, region=region, key=key, keyid=keyid, profile=profile
@ -584,8 +592,7 @@ def _user_policies_present(name, policies=None, region=None, key=None, keyid=Non
_to_modify = list(policies_to_delete)
_to_modify.extend(policies_to_create)
if __opts__['test']:
msg = '{0} policies to be modified on user {1}.'
ret['comment'] = msg.format(', '.join(_to_modify), name)
ret['comment'] = '{0} policies to be modified on user {1}.'.format(', '.join(_to_modify), name)
ret['result'] = None
return ret
ret['changes']['old'] = {'policies': _list}
@ -599,8 +606,7 @@ def _user_policies_present(name, policies=None, region=None, key=None, keyid=Non
)
ret['changes']['new'] = {'policies': _list}
ret['result'] = False
msg = 'Failed to add policy {0} for user {1}'
ret['comment'] = msg.format(policy_name, name)
ret['comment'] = 'Failed to add policy {0} for user {1}'.format(policy_name, name)
return ret
for policy_name in policies_to_delete:
policy_unset = __salt__['boto_iam.delete_user_policy'](
@ -612,15 +618,13 @@ def _user_policies_present(name, policies=None, region=None, key=None, keyid=Non
)
ret['changes']['new'] = {'policies': _list}
ret['result'] = False
msg = 'Failed to add policy {0} to user {1}'
ret['comment'] = msg.format(policy_name, name)
ret['comment'] = 'Failed to add policy {0} to user {1}'.format(policy_name, name)
return ret
_list = __salt__['boto_iam.get_all_user_policies'](
user_name=name, region=region, key=key, keyid=keyid, profile=profile
)
ret['changes']['new'] = {'policies': _list}
msg = '{0} policies modified on user {1}.'
ret['comment'] = msg.format(', '.join(_list), name)
ret['comment'] = '{0} policies modified on user {1}.'.format(', '.join(_list), name)
return ret
@ -657,8 +661,7 @@ def _user_policies_attached(
_to_modify = list(policies_to_detach)
_to_modify.extend(policies_to_attach)
if __opts__['test']:
msg = '{0} policies to be modified on user {1}.'
ret['comment'] = msg.format(', '.join(_to_modify), name)
ret['comment'] = '{0} policies to be modified on user {1}.'.format(', '.join(_to_modify), name)
ret['result'] = None
return ret
ret['changes']['old'] = {'managed_policies': oldpolicies}
@ -676,8 +679,7 @@ def _user_policies_attached(
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
ret['result'] = False
msg = 'Failed to add policy {0} to user {1}'
ret['comment'] = msg.format(policy_name, name)
ret['comment'] = 'Failed to add policy {0} to user {1}'.format(policy_name, name)
return ret
for policy_name in policies_to_detach:
policy_unset = __salt__['boto_iam.detach_user_policy'](policy_name,
@ -693,8 +695,7 @@ def _user_policies_attached(
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
ret['result'] = False
msg = 'Failed to remove policy {0} from user {1}'
ret['comment'] = msg.format(policy_name, name)
ret['comment'] = 'Failed to remove policy {0} from user {1}'.format(policy_name, name)
return ret
_list = __salt__['boto_iam.list_attached_user_policies'](name, region=region, key=key,
keyid=keyid,
@ -702,8 +703,7 @@ def _user_policies_attached(
newpolicies = [x.get('policy_arn') for x in _list]
log.debug(newpolicies)
ret['changes']['new'] = {'managed_policies': newpolicies}
msg = '{0} policies modified on user {1}.'
ret['comment'] = msg.format(', '.join(newpolicies), name)
ret['comment'] = '{0} policies modified on user {1}.'.format(', '.join(newpolicies), name)
return ret
@ -718,12 +718,10 @@ def _user_policies_detached(
region=region, key=key, keyid=keyid, profile=profile)
oldpolicies = [x.get('policy_arn') for x in _list]
if not _list:
msg = 'No attached policies in user {0}.'.format(name)
ret['comment'] = msg
ret['comment'] = 'No attached policies in user {0}.'.format(name)
return ret
if __opts__['test']:
msg = '{0} policies to be detached from user {1}.'
ret['comment'] = msg.format(', '.join(oldpolicies), name)
ret['comment'] = '{0} policies to be detached from user {1}.'.format(', '.join(oldpolicies), name)
ret['result'] = None
return ret
ret['changes']['old'] = {'managed_policies': oldpolicies}
@ -740,15 +738,13 @@ def _user_policies_detached(
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
ret['result'] = False
msg = 'Failed to detach {0} from user {1}'
ret['comment'] = msg.format(policy_arn, name)
ret['comment'] = 'Failed to detach {0} from user {1}'.format(policy_arn, name)
return ret
_list = __salt__['boto_iam.list_attached_user_policies'](name, region=region, key=key,
keyid=keyid, profile=profile)
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
msg = '{0} policies detached from user {1}.'
ret['comment'] = msg.format(', '.join(oldpolicies), name)
ret['comment'] = '{0} policies detached from user {1}.'.format(', '.join(oldpolicies), name)
return ret
@ -762,12 +758,10 @@ def _user_policies_deleted(
oldpolicies = __salt__['boto_iam.get_all_user_policies'](user_name=name,
region=region, key=key, keyid=keyid, profile=profile)
if not oldpolicies:
msg = 'No inline policies in user {0}.'.format(name)
ret['comment'] = msg
ret['comment'] = 'No inline policies in user {0}.'.format(name)
return ret
if __opts__['test']:
msg = '{0} policies to be deleted from user {1}.'
ret['comment'] = msg.format(', '.join(oldpolicies), name)
ret['comment'] = '{0} policies to be deleted from user {1}.'.format(', '.join(oldpolicies), name)
ret['result'] = None
return ret
ret['changes']['old'] = {'inline_policies': oldpolicies}
@ -783,14 +777,12 @@ def _user_policies_deleted(
profile=profile)
ret['changes']['new'] = {'inline_policies': newpolicies}
ret['result'] = False
msg = 'Failed to detach {0} from user {1}'
ret['comment'] = msg.format(policy_name, name)
ret['comment'] = 'Failed to detach {0} from user {1}'.format(policy_name, name)
return ret
newpolicies = __salt__['boto_iam.get_all_user_policies'](name, region=region, key=key,
keyid=keyid, profile=profile)
ret['changes']['new'] = {'inline_policies': newpolicies}
msg = '{0} policies deleted from user {1}.'
ret['comment'] = msg.format(', '.join(oldpolicies), name)
ret['comment'] = '{0} policies deleted from user {1}.'.format(', '.join(oldpolicies), name)
return ret
@ -800,7 +792,7 @@ def _case_password(ret, name, password, region=None, key=None, keyid=None, profi
ret['result'] = None
return ret
login = __salt__['boto_iam.create_login_profile'](name, password, region, key, keyid, profile)
log.debug('Login is : {0}.'.format(login))
log.debug('Login is : %s.', login)
if login:
if 'Conflict' in login:
ret['comment'] = ' '.join([ret['comment'], 'Login profile for user {0} exists.'.format(name)])
@ -978,7 +970,7 @@ def group_present(name, policies=None, policies_from_pillars=None, managed_polic
ret['result'] = _ret['result']
return ret
if users is not None:
log.debug('Users are : {0}.'.format(users))
log.debug('Users are : %s.', users)
existing_users = __salt__['boto_iam.get_group_members'](group_name=name, region=region, key=key, keyid=keyid, profile=profile)
ret = _case_group(ret, users, name, existing_users, region, key, keyid, profile)
return ret
@ -988,15 +980,15 @@ def _case_group(ret, users, group_name, existing_users, region, key, keyid, prof
_users = []
for user in existing_users:
_users.append(user['user_name'])
log.debug('upstream users are {0}'.format(_users))
log.debug('upstream users are %s', _users)
for user in users:
log.debug('users are {0}'.format(user))
log.debug('users are %s', user)
if user in _users:
log.debug('user exists')
ret['comment'] = ' '.join([ret['comment'], 'User {0} is already a member of group {1}.'.format(user, group_name)])
continue
else:
log.debug('user is set to be added {0}'.format(user))
log.debug('user is set to be added %s', user)
if __opts__['test']:
ret['comment'] = 'User {0} is set to be added to group {1}.'.format(user, group_name)
ret['result'] = None
@ -1035,7 +1027,7 @@ def _group_policies_present(
dict_policy = _byteify(policy)
_policy = _byteify(__salt__['boto_iam.get_group_policy'](name, policy_name, region, key, keyid, profile))
if _policy != dict_policy:
log.debug("Policy mismatch:\n{0}\n{1}".format(_policy, dict_policy))
log.debug("Policy mismatch:\n%s\n%s", _policy, dict_policy)
policies_to_create[policy_name] = policy
_list = __salt__['boto_iam.get_all_group_policies'](
name, region, key, keyid, profile
@ -1047,8 +1039,7 @@ def _group_policies_present(
_to_modify = list(policies_to_delete)
_to_modify.extend(policies_to_create)
if __opts__['test']:
msg = '{0} policies to be modified on group {1}.'
ret['comment'] = msg.format(', '.join(_to_modify), name)
ret['comment'] = '{0} policies to be modified on group {1}.'.format(', '.join(_to_modify), name)
ret['result'] = None
return ret
ret['changes']['old'] = {'policies': _list}
@ -1062,8 +1053,7 @@ def _group_policies_present(
)
ret['changes']['new'] = {'policies': _list}
ret['result'] = False
msg = 'Failed to add policy {0} to group {1}'
ret['comment'] = msg.format(policy_name, name)
ret['comment'] = 'Failed to add policy {0} to group {1}'.format(policy_name, name)
return ret
for policy_name in policies_to_delete:
policy_unset = __salt__['boto_iam.delete_group_policy'](
@ -1075,15 +1065,13 @@ def _group_policies_present(
)
ret['changes']['new'] = {'policies': _list}
ret['result'] = False
msg = 'Failed to add policy {0} to group {1}'
ret['comment'] = msg.format(policy_name, name)
ret['comment'] = 'Failed to add policy {0} to group {1}'.format(policy_name, name)
return ret
_list = __salt__['boto_iam.get_all_group_policies'](
name, region, key, keyid, profile
)
ret['changes']['new'] = {'policies': _list}
msg = '{0} policies modified on group {1}.'
ret['comment'] = msg.format(', '.join(_list), name)
ret['comment'] = '{0} policies modified on group {1}.'.format(', '.join(_list), name)
return ret
@ -1122,8 +1110,7 @@ def _group_policies_attached(
_to_modify = list(policies_to_detach)
_to_modify.extend(policies_to_attach)
if __opts__['test']:
msg = '{0} policies to be modified on group {1}.'
ret['comment'] = msg.format(', '.join(_to_modify), name)
ret['comment'] = '{0} policies to be modified on group {1}.'.format(', '.join(_to_modify), name)
ret['result'] = None
return ret
ret['changes']['old'] = {'managed_policies': oldpolicies}
@ -1140,8 +1127,7 @@ def _group_policies_attached(
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
ret['result'] = False
msg = 'Failed to add policy {0} to group {1}'
ret['comment'] = msg.format(policy_name, name)
ret['comment'] = 'Failed to add policy {0} to group {1}'.format(policy_name, name)
return ret
for policy_name in policies_to_detach:
policy_unset = __salt__['boto_iam.detach_group_policy'](policy_name,
@ -1156,16 +1142,14 @@ def _group_policies_attached(
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
ret['result'] = False
msg = 'Failed to remove policy {0} from group {1}'
ret['comment'] = msg.format(policy_name, name)
ret['comment'] = 'Failed to remove policy {0} from group {1}'.format(policy_name, name)
return ret
_list = __salt__['boto_iam.list_attached_group_policies'](name, region=region, key=key,
keyid=keyid, profile=profile)
newpolicies = [x.get('policy_arn') for x in _list]
log.debug(newpolicies)
ret['changes']['new'] = {'managed_policies': newpolicies}
msg = '{0} policies modified on group {1}.'
ret['comment'] = msg.format(', '.join(newpolicies), name)
ret['comment'] = '{0} policies modified on group {1}.'.format(', '.join(newpolicies), name)
return ret
@ -1180,12 +1164,10 @@ def _group_policies_detached(
region=region, key=key, keyid=keyid, profile=profile)
oldpolicies = [x.get('policy_arn') for x in _list]
if not _list:
msg = 'No attached policies in group {0}.'.format(name)
ret['comment'] = msg
ret['comment'] = 'No attached policies in group {0}.'.format(name)
return ret
if __opts__['test']:
msg = '{0} policies to be detached from group {1}.'
ret['comment'] = msg.format(', '.join(oldpolicies), name)
ret['comment'] = '{0} policies to be detached from group {1}.'.format(', '.join(oldpolicies), name)
ret['result'] = None
return ret
ret['changes']['old'] = {'managed_policies': oldpolicies}
@ -1202,15 +1184,13 @@ def _group_policies_detached(
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
ret['result'] = False
msg = 'Failed to detach {0} from group {1}'
ret['comment'] = msg.format(policy_arn, name)
ret['comment'] = 'Failed to detach {0} from group {1}'.format(policy_arn, name)
return ret
_list = __salt__['boto_iam.list_attached_group_policies'](name, region=region, key=key,
keyid=keyid, profile=profile)
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
msg = '{0} policies detached from group {1}.'
ret['comment'] = msg.format(', '.join(newpolicies), name)
ret['comment'] = '{0} policies detached from group {1}.'.format(', '.join(newpolicies), name)
return ret
@ -1224,12 +1204,10 @@ def _group_policies_deleted(
oldpolicies = __salt__['boto_iam.get_all_group_policies'](group_name=name,
region=region, key=key, keyid=keyid, profile=profile)
if not oldpolicies:
msg = 'No inline policies in group {0}.'.format(name)
ret['comment'] = msg
ret['comment'] = 'No inline policies in group {0}.'.format(name)
return ret
if __opts__['test']:
msg = '{0} policies to be deleted from group {1}.'
ret['comment'] = msg.format(', '.join(oldpolicies), name)
ret['comment'] = '{0} policies to be deleted from group {1}.'.format(', '.join(oldpolicies), name)
ret['result'] = None
return ret
ret['changes']['old'] = {'inline_policies': oldpolicies}
@ -1245,14 +1223,12 @@ def _group_policies_deleted(
profile=profile)
ret['changes']['new'] = {'inline_policies': newpolicies}
ret['result'] = False
msg = 'Failed to detach {0} from group {1}'
ret['comment'] = msg.format(policy_name, name)
ret['comment'] = 'Failed to detach {0} from group {1}'.format(policy_name, name)
return ret
newpolicies = __salt__['boto_iam.get_all_group_policies'](name, region=region, key=key,
keyid=keyid, profile=profile)
ret['changes']['new'] = {'inline_policies': newpolicies}
msg = '{0} policies deleted from group {1}.'
ret['comment'] = msg.format(', '.join(oldpolicies), name)
ret['comment'] = '{0} policies deleted from group {1}.'.format(', '.join(oldpolicies), name)
return ret
@ -1326,9 +1302,9 @@ def account_policy(name=None, allow_users_to_change_password=None,
for key, value in config.items():
if key in ('region', 'key', 'keyid', 'profile', 'name'):
continue
if value is not None and str(info[key]) != str(value).lower():
if value is not None and six.text_type(info[key]) != six.text_type(value).lower():
ret['comment'] = ' '.join([ret['comment'], 'Policy value {0} has been set to {1}.'.format(value, info[key])])
ret['changes'][key] = str(value).lower()
ret['changes'][key] = six.text_type(value).lower()
if not ret['changes']:
ret['comment'] = 'Account policy is not changed.'
return ret
@ -1430,7 +1406,7 @@ def server_cert_present(name, public_key, private_key, cert_chain=None, path=Non
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
exists = __salt__['boto_iam.get_server_certificate'](name, region, key, keyid, profile)
log.debug('Variables are : {0}.'.format(locals()))
log.debug('Variables are : %s.', locals())
if exists:
ret['comment'] = 'Certificate {0} exists.'.format(name)
return ret
@ -1539,8 +1515,7 @@ def policy_present(name, policy_document, path=None, description=None,
if bool(r):
if __opts__['test']:
msg = 'Policy {0} set to be modified.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Policy {0} set to be modified.'.format(name)
ret['result'] = None
return ret

View File

@ -86,7 +86,7 @@ on the IAM role to be persistent. This functionality was added in 2015.8.0.
will be used as the default region.
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import salt.utils.dictupdate as dictupdate
from salt.ext import six
@ -308,8 +308,7 @@ def _instance_profile_present(
keyid, profile)
if not exists:
if __opts__['test']:
msg = 'Instance profile {0} is set to be created.'
ret['comment'] = msg.format(name)
ret['comment'] = 'Instance profile {0} is set to be created.'.format(name)
ret['result'] = None
return ret
created = __salt__['boto_iam.create_instance_profile'](name, region,
@ -321,8 +320,7 @@ def _instance_profile_present(
ret['comment'] = 'Instance profile {0} created.'.format(name)
else:
ret['result'] = False
msg = 'Failed to create {0} instance profile.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Failed to create {0} instance profile.'.format(name)
return ret
@ -338,8 +336,7 @@ def _instance_profile_associated(
profile)
if not is_associated:
if __opts__['test']:
msg = 'Instance profile {0} is set to be associated.'
ret['comment'] = msg.format(name)
ret['comment'] = 'Instance profile {0} is set to be associated.'.format(name)
ret['result'] = None
return ret
associated = __salt__['boto_iam.associate_profile_to_role'](name, name,
@ -352,8 +349,7 @@ def _instance_profile_associated(
ret['comment'] = 'Instance profile {0} associated.'.format(name)
else:
ret['result'] = False
msg = 'Failed to associate {0} instance profile with {0} role.'
ret['comment'] = msg.format(name)
ret['comment'] = 'Failed to associate {0} instance profile with {0} role.'.format(name)
return ret
@ -396,8 +392,7 @@ def _policies_present(
_to_modify = list(policies_to_delete)
_to_modify.extend(policies_to_create)
if __opts__['test']:
msg = '{0} policies to be modified on role {1}.'
ret['comment'] = msg.format(', '.join(_to_modify), name)
ret['comment'] = '{0} policies to be modified on role {1}.'.format(', '.join(_to_modify), name)
ret['result'] = None
return ret
ret['changes']['old'] = {'policies': _list}
@ -414,8 +409,7 @@ def _policies_present(
profile)
ret['changes']['new'] = {'policies': _list}
ret['result'] = False
msg = 'Failed to add policy {0} to role {1}'
ret['comment'] = msg.format(policy_name, name)
ret['comment'] = 'Failed to add policy {0} to role {1}'.format(policy_name, name)
return ret
for policy_name in policies_to_delete:
policy_unset = __salt__['boto_iam.delete_role_policy'](name,
@ -429,14 +423,12 @@ def _policies_present(
profile)
ret['changes']['new'] = {'policies': _list}
ret['result'] = False
msg = 'Failed to remove policy {0} from role {1}'
ret['comment'] = msg.format(policy_name, name)
ret['comment'] = 'Failed to remove policy {0} from role {1}'.format(policy_name, name)
return ret
_list = __salt__['boto_iam.list_role_policies'](name, region, key,
keyid, profile)
ret['changes']['new'] = {'policies': _list}
msg = '{0} policies modified on role {1}.'
ret['comment'] = msg.format(', '.join(_list), name)
ret['comment'] = '{0} policies modified on role {1}.'.format(', '.join(_list), name)
return ret
@ -473,8 +465,7 @@ def _policies_attached(
_to_modify = list(policies_to_detach)
_to_modify.extend(policies_to_attach)
if __opts__['test']:
msg = '{0} policies to be modified on role {1}.'
ret['comment'] = msg.format(', '.join(_to_modify), name)
ret['comment'] = '{0} policies to be modified on role {1}.'.format(', '.join(_to_modify), name)
ret['result'] = None
return ret
ret['changes']['old'] = {'managed_policies': oldpolicies}
@ -493,8 +484,7 @@ def _policies_attached(
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
ret['result'] = False
msg = 'Failed to add policy {0} to role {1}'
ret['comment'] = msg.format(policy_name, name)
ret['comment'] = 'Failed to add policy {0} to role {1}'.format(policy_name, name)
return ret
for policy_name in policies_to_detach:
policy_unset = __salt__['boto_iam.detach_role_policy'](policy_name,
@ -511,8 +501,7 @@ def _policies_attached(
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
ret['result'] = False
msg = 'Failed to remove policy {0} from role {1}'
ret['comment'] = msg.format(policy_name, name)
ret['comment'] = 'Failed to remove policy {0} from role {1}'.format(policy_name, name)
return ret
_list = __salt__['boto_iam.list_attached_role_policies'](name, region=region, key=key,
keyid=keyid,
@ -520,8 +509,7 @@ def _policies_attached(
newpolicies = [x.get('policy_arn') for x in _list]
log.debug(newpolicies)
ret['changes']['new'] = {'managed_policies': newpolicies}
msg = '{0} policies modified on role {1}.'
ret['comment'] = msg.format(', '.join(newpolicies), name)
ret['comment'] = '{0} policies modified on role {1}.'.format(', '.join(newpolicies), name)
return ret
@ -629,8 +617,7 @@ def _instance_profile_absent(
keyid, profile)
if exists:
if __opts__['test']:
msg = 'Instance profile {0} is set to be removed.'
ret['comment'] = msg.format(name)
ret['comment'] = 'Instance profile {0} is set to be removed.'.format(name)
ret['result'] = None
return ret
deleted = __salt__['boto_iam.delete_instance_profile'](name, region,
@ -642,8 +629,7 @@ def _instance_profile_absent(
ret['comment'] = 'Instance profile {0} removed.'.format(name)
else:
ret['result'] = False
msg = 'Failed to delete {0} instance profile.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Failed to delete {0} instance profile.'.format(name)
else:
ret['comment'] = '{0} instance profile does not exist.'.format(name)
return ret
@ -659,12 +645,10 @@ def _policies_absent(
_list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid,
profile)
if not _list:
msg = 'No policies in role {0}.'.format(name)
ret['comment'] = msg
ret['comment'] = 'No policies in role {0}.'.format(name)
return ret
if __opts__['test']:
msg = '{0} policies to be removed from role {1}.'
ret['comment'] = msg.format(', '.join(_list), name)
ret['comment'] = '{0} policies to be removed from role {1}.'.format(', '.join(_list), name)
ret['result'] = None
return ret
ret['changes']['old'] = {'policies': _list}
@ -680,14 +664,12 @@ def _policies_absent(
profile)
ret['changes']['new'] = {'policies': _list}
ret['result'] = False
msg = 'Failed to add policy {0} to role {1}'
ret['comment'] = msg.format(policy_name, name)
ret['comment'] = 'Failed to add policy {0} to role {1}'.format(policy_name, name)
return ret
_list = __salt__['boto_iam.list_role_policies'](name, region, key,
keyid, profile)
ret['changes']['new'] = {'policies': _list}
msg = '{0} policies removed from role {1}.'
ret['comment'] = msg.format(', '.join(_list), name)
ret['comment'] = '{0} policies removed from role {1}.'.format(', '.join(_list), name)
return ret
@ -702,12 +684,10 @@ def _policies_detached(
region=region, key=key, keyid=keyid, profile=profile)
oldpolicies = [x.get('policy_arn') for x in _list]
if not _list:
msg = 'No attached policies in role {0}.'.format(name)
ret['comment'] = msg
ret['comment'] = 'No attached policies in role {0}.'.format(name)
return ret
if __opts__['test']:
msg = '{0} policies to be detached from role {1}.'
ret['comment'] = msg.format(', '.join(oldpolicies), name)
ret['comment'] = '{0} policies to be detached from role {1}.'.format(', '.join(oldpolicies), name)
ret['result'] = None
return ret
ret['changes']['old'] = {'managed_policies': oldpolicies}
@ -725,15 +705,13 @@ def _policies_detached(
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
ret['result'] = False
msg = 'Failed to detach {0} from role {1}'
ret['comment'] = msg.format(policy_arn, name)
ret['comment'] = 'Failed to detach {0} from role {1}'.format(policy_arn, name)
return ret
_list = __salt__['boto_iam.list_attached_role_policies'](name, region=region, key=key,
keyid=keyid, profile=profile)
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
msg = '{0} policies detached from role {1}.'
ret['comment'] = msg.format(', '.join(newpolicies), name)
ret['comment'] = '{0} policies detached from role {1}.'.format(', '.join(newpolicies), name)
return ret
@ -749,18 +727,15 @@ def _instance_profile_disassociated(
profile)
if is_associated:
if __opts__['test']:
msg = 'Instance profile {0} is set to be disassociated.'
ret['comment'] = msg.format(name)
ret['comment'] = 'Instance profile {0} is set to be disassociated.'.format(name)
ret['result'] = None
return ret
associated = __salt__['boto_iam.disassociate_profile_from_role'](name, name, region, key, keyid, profile)
if associated:
ret['changes']['old'] = {'profile_associated': True}
ret['changes']['new'] = {'profile_associated': False}
msg = 'Instance profile {0} disassociated.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Instance profile {0} disassociated.'.format(name)
else:
ret['result'] = False
msg = 'Failed to disassociate {0} instance profile from {0} role.'
ret['comment'] = msg.format(name)
ret['comment'] = 'Failed to disassociate {0} instance profile from {0} role.'.format(name)
return ret

View File

@ -71,7 +71,7 @@ config:
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import logging
import os
@ -275,8 +275,10 @@ def thing_type_absent(name, thingTypeName,
# wait required 5 minutes since deprecation time
if _delete_wait_timer:
log.warning('wait for {0} seconds per AWS (5 minutes after deprecation time) '
'before we can delete iot thing type'.format(_delete_wait_timer))
log.warning(
'wait for %s seconds per AWS (5 minutes after deprecation time) '
'before we can delete iot thing type', _delete_wait_timer
)
time.sleep(_delete_wait_timer)
# delete thing type

View File

@ -58,7 +58,7 @@ pillars or minion config:
# Keep pylint from chocking on ret
# pylint: disable=undefined-variable
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
@ -345,8 +345,10 @@ def present(name,
comments.append('Kinesis stream {0}: would be resharded from {1} to {2} shards'
.format(name, old_num_shards, num_shards))
else:
log.info("Resharding stream from {0} to {1} shards, this could take a while"
.format(old_num_shards, num_shards))
log.info(
'Resharding stream from %s to %s shards, this could take '
'a while', old_num_shards, num_shards
)
# reshard returns True when a split/merge action is taken,
# or False when no more actions are required
continue_reshard = True

View File

@ -56,7 +56,7 @@ config:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import salt.utils.dictupdate as dictupdate
from salt.exceptions import SaltInvocationError
@ -194,11 +194,12 @@ def _key_present(
# We can't recover from this. KMS only exposes enable/disable
# and disable is not necessarily a great action here. AWS sucks
# for not including alias in the create_key call.
msg = ('Failed to create key alias for key_id {0}.'
' This resource will be left dangling. Please clean'
' manually. Error: {1}')
ret['result'] = False
ret['comment'] = msg.format(kms_key_id, rn['error']['message'])
ret['comment'] = (
'Failed to create key alias for key_id {0}. This resource '
'will be left dangling. Please clean manually. '
'Error: {1}'.format(kms_key_id, rn['error']['message'])
)
return ret
ret['changes']['old'] = {'key': None}
ret['changes']['new'] = {'key': name}
@ -272,9 +273,8 @@ def _key_enabled(key_metadata, enabled, region, key, keyid, profile):
)
event = 'Disabled'
if 'error' in re:
msg = 'Failed to update key enabled status: {0}.'
ret['result'] = False
ret['comment'] = msg.format(re['error']['message'])
ret['comment'] = 'Failed to update key enabled status: {0}.'.format(re['error']['message'])
else:
ret['comment'] = '{0} key.'.format(event)
return ret
@ -292,9 +292,8 @@ def _key_description(key_metadata, description, region, key, keyid, profile):
key_metadata['KeyId'], description, region, key, keyid, profile
)
if 'error' in rdu:
msg = 'Failed to update key description: {0}.'
ret['result'] = False
ret['comment'] = msg.format(rdu['error']['message'])
ret['comment'] = 'Failed to update key description: {0}.'.format(rdu['error']['message'])
else:
ret['comment'] = 'Updated key description.'
return ret
@ -334,9 +333,8 @@ def _key_rotation(key_metadata, key_rotation, region, key, keyid, profile):
ret['result'] = None
ret['comment'] = msg
return ret
msg = 'Failed to set key rotation: {0}.'
ret['result'] = False
ret['comment'] = msg.format(rk['error']['message'])
ret['comment'] = 'Failed to set key rotation: {0}.'.format(rk['error']['message'])
else:
ret['changes'] = {'old': {'key_rotation': not key_rotation},
'new': {'key_rotation': key_rotation}}
@ -353,17 +351,15 @@ def _key_policy(key_metadata, policy, region, key, keyid, profile):
if rkp['key_policy'] == policy:
return ret
if __opts__['test']:
msg = '{0} Key set to have key policy updated.'
ret['comment'] = msg.format(ret['comment'])
ret['comment'] = '{0} Key set to have key policy updated.'.format(ret['comment'])
ret['result'] = None
return ret
rpkp = __salt__['boto_kms.put_key_policy'](
kms_key_id, 'default', policy, region, key, keyid, profile
)
if 'error' in rpkp:
msg = '{0} Failed to update key policy: {1}'
ret['result'] = False
ret['comment'] = msg.format(ret['comment'], rpkp['error']['message'])
ret['comment'] = '{0} Failed to update key policy: {1}'.format(ret['comment'], rpkp['error']['message'])
else:
ret['comment'] = 'Updated key policy.'
return ret

View File

@ -61,7 +61,7 @@ config:
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import hashlib
@ -379,8 +379,7 @@ def _function_config_present(FunctionName, Role, Handler, Description, Timeout,
ret['comment'] = os.linesep.join(
[ret['comment'], 'Function config to be modified'])
if __opts__['test']:
msg = 'Function {0} set to be modified.'.format(FunctionName)
ret['comment'] = msg
ret['comment'] = 'Function {0} set to be modified.'.format(FunctionName)
ret['result'] = None
return ret
_r = __salt__['boto_lambda.update_function_config'](
@ -422,8 +421,7 @@ def _function_code_present(FunctionName, ZipFile, S3Bucket, S3Key,
update = True
if update:
if __opts__['test']:
msg = 'Function {0} set to be modified.'.format(FunctionName)
ret['comment'] = msg
ret['comment'] = 'Function {0} set to be modified.'.format(FunctionName)
ret['result'] = None
return ret
ret['changes']['old'] = {
@ -468,8 +466,7 @@ def _function_permissions_present(FunctionName, Permissions,
ret['comment'] = os.linesep.join(
[ret['comment'], 'Function permissions to be modified'])
if __opts__['test']:
msg = 'Function {0} set to be modified.'.format(FunctionName)
ret['comment'] = msg
ret['comment'] = 'Function {0} set to be modified.'.format(FunctionName)
ret['result'] = None
return ret
for sid, diff in six.iteritems(diffs):
@ -654,8 +651,7 @@ def alias_present(name, FunctionName, Name, FunctionVersion, Description='',
ret['comment'] = os.linesep.join(
[ret['comment'], 'Alias config to be modified'])
if __opts__['test']:
msg = 'Alias {0} set to be modified.'.format(Name)
ret['comment'] = msg
ret['comment'] = 'Alias {0} set to be modified.'.format(Name)
ret['result'] = None
return ret
_r = __salt__['boto_lambda.update_alias'](
@ -873,9 +869,11 @@ def event_source_mapping_present(name, EventSourceArn, FunctionName,
ret['comment'] = os.linesep.join(
[ret['comment'], 'Event source mapping to be modified'])
if __opts__['test']:
msg = ('Event source mapping {0} set to be '
'modified.'.format(_describe['UUID']))
ret['comment'] = msg
ret['comment'] = (
'Event source mapping {0} set to be modified.'.format(
_describe['UUID']
)
)
ret['result'] = None
return ret
_r = __salt__['boto_lambda.update_event_source_mapping'](

View File

@ -98,7 +98,7 @@ and autoscale groups are completely dependent on each other.
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
from salt.exceptions import SaltInvocationError

View File

@ -71,11 +71,12 @@ config:
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
# Import Salt Libs
from salt.ext import six
from salt.exceptions import SaltInvocationError
import salt.utils.data
@ -700,16 +701,21 @@ def parameter_present(name, db_parameter_group_family, description, parameters=N
if type(value) is bool:
params[k] = 'on' if value else 'off'
else:
params[k] = str(value)
logging.debug('Parameters from user are : {0}.'.format(params))
params[k] = six.text_type(value)
log.debug('Parameters from user are : %s.', params)
options = __salt__['boto_rds.describe_parameters'](name=name, region=region, key=key, keyid=keyid, profile=profile)
if not options.get('result'):
ret['result'] = False
ret['comment'] = os.linesep.join([ret['comment'], 'Faled to get parameters for group {0}.'.format(name)])
return ret
for parameter in options['parameters'].values():
if parameter['ParameterName'] in params and params.get(parameter['ParameterName']) != str(parameter['ParameterValue']):
logging.debug('Values that are being compared for {0} are {1}:{2} .'.format(parameter['ParameterName'], params.get(parameter['ParameterName']), parameter['ParameterValue']))
if parameter['ParameterName'] in params and params.get(parameter['ParameterName']) != six.text_type(parameter['ParameterValue']):
log.debug(
'Values that are being compared for %s are %s:%s.',
parameter['ParameterName'],
params.get(parameter['ParameterName']),
parameter['ParameterValue']
)
changed[parameter['ParameterName']] = params.get(parameter['ParameterName'])
if len(changed) > 0:
if __opts__['test']:

View File

@ -72,13 +72,14 @@ passed in as a dict, or as a string to pull from pillars or minion config:
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import uuid
# Import Salt Libs
import salt.utils.data
import salt.utils.json
from salt.ext import six
from salt.exceptions import SaltInvocationError
log = logging.getLogger(__name__)
@ -156,13 +157,11 @@ def present(name, value, zone, record_type, ttl=None, identifier=None, region=No
in_states=in_states,
profile=profile)
if len(r) < 1:
msg = 'Error: instance with Name tag {0} not found'.format(name_tag)
ret['comment'] = msg
ret['comment'] = 'Error: instance with Name tag {0} not found'.format(name_tag)
ret['result'] = False
return ret
if len(r) > 1:
msg = 'Error: Name tag {0} matched more than one instance'
ret['comment'] = msg.format(name_tag)
ret['comment'] = 'Error: Name tag {0} matched more than one instance'.format(name_tag)
ret['result'] = False
return ret
instance = r[0]
@ -170,17 +169,14 @@ def present(name, value, zone, record_type, ttl=None, identifier=None, region=No
public_ip = getattr(instance, 'ip_address', None)
if value.startswith('private:'):
value = private_ip
log.info('Found private IP {0} for instance {1}'.format(private_ip,
name_tag))
log.info('Found private IP %s for instance %s', private_ip, name_tag)
else:
if public_ip is None:
msg = 'Error: No Public IP assigned to instance with Name {0}'
ret['comment'] = msg.format(name_tag)
ret['comment'] = 'Error: No Public IP assigned to instance with Name {0}'.format(name_tag)
ret['result'] = False
return ret
value = public_ip
log.info('Found public IP {0} for instance {1}'.format(public_ip,
name_tag))
log.info('Found public IP %s for instance %s', public_ip, name_tag)
try:
record = __salt__['boto_route53.get_record'](name, zone, record_type,
@ -229,12 +225,11 @@ def present(name, value, zone, record_type, ttl=None, identifier=None, region=No
need_to_update = True
if identifier and identifier != record['identifier']:
need_to_update = True
if ttl and str(ttl) != str(record['ttl']):
if ttl and six.text_type(ttl) != six.text_type(record['ttl']):
need_to_update = True
if need_to_update:
if __opts__['test']:
msg = 'Route53 record {0} set to be updated.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Route53 record {0} set to be updated.'.format(name)
ret['result'] = None
return ret
updated = __salt__['boto_route53.update_record'](name, value, zone,
@ -255,8 +250,7 @@ def present(name, value, zone, record_type, ttl=None, identifier=None, region=No
ret['comment'] = 'Updated {0} Route53 record.'.format(name)
else:
ret['result'] = False
msg = 'Failed to update {0} Route53 record.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Failed to update {0} Route53 record.'.format(name)
else:
ret['comment'] = '{0} exists.'.format(name)
return ret
@ -324,8 +318,7 @@ def absent(
private_zone, identifier)
if record:
if __opts__['test']:
msg = 'Route53 record {0} set to be deleted.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Route53 record {0} set to be deleted.'.format(name)
ret['result'] = None
return ret
deleted = __salt__['boto_route53.delete_record'](name, zone,
@ -342,8 +335,7 @@ def absent(
ret['comment'] = 'Deleted {0} Route53 record.'.format(name)
else:
ret['result'] = False
msg = 'Failed to delete {0} Route53 record.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Failed to delete {0} Route53 record.'.format(name)
else:
ret['comment'] = '{0} does not exist.'.format(name)
return ret
@ -416,15 +408,16 @@ def hosted_zone_present(name, domain_name=None, private_zone=False, caller_ref=N
if vpc_region and vpcs:
vpcs = [v for v in vpcs if v['region'] == vpc_region]
if not vpcs:
msg = ('Private zone requested but a VPC matching given criteria not found.')
msg = 'Private zone requested but a VPC matching given criteria not found.'
log.error(msg)
ret['result'] = False
ret['comment'] = msg
ret['result'] = False
return ret
if len(vpcs) > 1:
msg = ('Private zone requested but multiple VPCs matching given criteria found: '
'{0}.'.format([v['id'] for v in vpcs]))
log.error(msg)
log.error(
'Private zone requested but multiple VPCs matching given '
'criteria found: %s', [v['id'] for v in vpcs]
)
return None
vpc = vpcs[0]
if vpc_name:
@ -462,14 +455,14 @@ def hosted_zone_present(name, domain_name=None, private_zone=False, caller_ref=N
# toes. We can't just fail, because some scenarios (think split
# horizon DNS) require zones with identical names but different
# settings...
log.info('A Hosted Zone with name {0} already exists, but with '
log.info('A Hosted Zone with name %s already exists, but with '
'different settings. Will attempt to create the one '
'requested on the assumption this is what is desired. '
'This may fail...'.format(domain_name))
'This may fail...', domain_name)
if create:
if caller_ref is None:
caller_ref = str(uuid.uuid4())
caller_ref = six.text_type(uuid.uuid4())
if __opts__['test']:
ret['comment'] = 'Route53 Hosted Zone {0} set to be added.'.format(
domain_name)

View File

@ -50,7 +50,7 @@ config:
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import copy
import difflib
import logging

View File

@ -139,7 +139,7 @@ config:
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import copy
import logging

View File

@ -98,7 +98,7 @@ passed in as a dict, or as a string to pull from pillars or minion config:
will be used as the default region.
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
@ -107,7 +107,7 @@ import pprint
# Import salt libs
import salt.utils.dictupdate as dictupdate
from salt.exceptions import SaltInvocationError
from salt.ext.six import string_types
from salt.ext import six
log = logging.getLogger(__name__)
@ -243,8 +243,7 @@ def _security_group_present(name, description, vpc_id=None, vpc_name=None,
profile, vpc_id, vpc_name)
if not exists:
if __opts__['test']:
msg = 'Security group {0} is set to be created.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Security group {0} is set to be created.'.format(name)
ret['result'] = None
return ret
created = __salt__['boto_secgroup.create'](name=name, description=description,
@ -260,8 +259,7 @@ def _security_group_present(name, description, vpc_id=None, vpc_name=None,
ret['comment'] = 'Security group {0} created.'.format(name)
else:
ret['result'] = False
msg = 'Failed to create {0} security group.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Failed to create {0} security group.'.format(name)
else:
ret['comment'] = 'Security group {0} present.'.format(name)
return ret
@ -280,17 +278,17 @@ def _split_rules(rules):
cidr_ip = rule.get('cidr_ip')
group_name = rule.get('source_group_name')
group_id = rule.get('source_group_group_id')
if cidr_ip and not isinstance(cidr_ip, string_types):
if cidr_ip and not isinstance(cidr_ip, six.string_types):
for ip in cidr_ip:
_rule = rule.copy()
_rule['cidr_ip'] = ip
split.append(_rule)
elif group_name and not isinstance(group_name, string_types):
elif group_name and not isinstance(group_name, six.string_types):
for name in group_name:
_rule = rule.copy()
_rule['source_group_name'] = name
split.append(_rule)
elif group_id and not isinstance(group_id, string_types):
elif group_id and not isinstance(group_id, six.string_types):
for _id in group_id:
_rule = rule.copy()
_rule['source_group_group_id'] = _id
@ -318,8 +316,8 @@ def _check_rule(rule, _rule):
_rule['to_port'] = -1
if (rule['ip_protocol'] == _rule['ip_protocol'] and
str(rule['from_port']) == str(_rule['from_port']) and
str(rule['to_port']) == str(_rule['to_port'])):
six.text_type(rule['from_port']) == six.text_type(_rule['from_port']) and
six.text_type(rule['to_port']) == six.text_type(_rule['to_port'])):
_cidr_ip = _rule.get('cidr_ip')
if _cidr_ip and _cidr_ip == rule.get('cidr_ip'):
return True
@ -347,7 +345,7 @@ def _get_rule_changes(rules, _rules):
# 2. determine if rule exists in existing security group rules
for rule in rules:
try:
ip_protocol = str(rule.get('ip_protocol'))
ip_protocol = six.text_type(rule.get('ip_protocol'))
except KeyError:
raise SaltInvocationError('ip_protocol, to_port, and from_port are'
' required arguments for security group'
@ -356,8 +354,8 @@ def _get_rule_changes(rules, _rules):
'all', '-1', -1]
if ip_protocol not in supported_protocols and (not
'{0}'.format(ip_protocol).isdigit() or int(ip_protocol) > 255):
msg = ('Invalid ip_protocol {0} specified in security group rule.')
raise SaltInvocationError(msg.format(ip_protocol))
raise SaltInvocationError(
'Invalid ip_protocol {0} specified in security group rule.'.format(ip_protocol))
# For the 'all' case, we need to change the protocol name to '-1'.
if ip_protocol == 'all':
rule['ip_protocol'] = '-1'
@ -397,8 +395,8 @@ def _get_rule_changes(rules, _rules):
# entries, it doesn't matter which we pick.
_rule.pop('source_group_name', None)
to_delete.append(_rule)
log.debug('Rules to be deleted: {0}'.format(to_delete))
log.debug('Rules to be created: {0}'.format(to_create))
log.debug('Rules to be deleted: %s', to_delete)
log.debug('Rules to be created: %s', to_create)
return (to_delete, to_create)
@ -416,8 +414,7 @@ def _rules_present(name, rules, delete_ingress_rules=True, vpc_id=None,
keyid=keyid, profile=profile, vpc_id=vpc_id,
vpc_name=vpc_name)
if not sg:
msg = '{0} security group configuration could not be retrieved.'
ret['comment'] = msg.format(name)
ret['comment'] = '{0} security group configuration could not be retrieved.'.format(name)
ret['result'] = False
return ret
rules = _split_rules(rules)
@ -430,9 +427,10 @@ def _rules_present(name, rules, delete_ingress_rules=True, vpc_id=None,
region=region, key=key, keyid=keyid, profile=profile
)
if not _group_id:
msg = ('source_group_name {0} does not map to a valid'
' source group id.')
raise SaltInvocationError(msg.format(_source_group_name))
raise SaltInvocationError(
'source_group_name {0} does not map to a valid '
'source group id.'.format(_source_group_name)
)
rule['source_group_name'] = None
rule['source_group_group_id'] = _group_id
# rules = rules that exist in salt state
@ -457,11 +455,9 @@ def _rules_present(name, rules, delete_ingress_rules=True, vpc_id=None,
if not _deleted:
deleted = False
if deleted:
msg = 'Removed rules on {0} security group.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Removed rules on {0} security group.'.format(name)
else:
msg = 'Failed to remove rules on {0} security group.'
ret['comment'] = msg.format(name)
ret['comment'] = 'Failed to remove rules on {0} security group.'.format(name)
ret['result'] = False
if to_create:
created = True
@ -472,11 +468,15 @@ def _rules_present(name, rules, delete_ingress_rules=True, vpc_id=None,
if not _created:
created = False
if created:
msg = 'Created rules on {0} security group.'
ret['comment'] = ' '.join([ret['comment'], msg.format(name)])
ret['comment'] = ' '.join([
ret['comment'],
'Created rules on {0} security group.'.format(name)
])
else:
msg = 'Failed to create rules on {0} security group.'
ret['comment'] = ' '.join([ret['comment'], msg.format(name)])
ret['comment'] = ' '.join([
ret['comment'],
'Failed to create rules on {0} security group.'.format(name)
])
ret['result'] = False
ret['changes']['old'] = {'rules': sg['rules']}
sg = __salt__['boto_secgroup.get_config'](name=name, group_id=None, region=region, key=key,
@ -500,8 +500,7 @@ def _rules_egress_present(name, rules_egress, delete_egress_rules=True, vpc_id=N
keyid=keyid, profile=profile, vpc_id=vpc_id,
vpc_name=vpc_name)
if not sg:
msg = '{0} security group configuration could not be retrieved.'
ret['comment'] = msg.format(name)
ret['comment'] = '{0} security group configuration could not be retrieved.'.format(name)
ret['result'] = False
return ret
rules_egress = _split_rules(rules_egress)
@ -514,9 +513,10 @@ def _rules_egress_present(name, rules_egress, delete_egress_rules=True, vpc_id=N
region=region, key=key, keyid=keyid, profile=profile
)
if not _group_id:
msg = ('source_group_name {0} does not map to a valid'
' source group id.')
raise SaltInvocationError(msg.format(_source_group_name))
raise SaltInvocationError(
'source_group_name {0} does not map to a valid '
'source group id.'.format(_source_group_name)
)
rule['source_group_name'] = None
rule['source_group_group_id'] = _group_id
# rules_egress = rules that exist in salt state
@ -541,11 +541,15 @@ def _rules_egress_present(name, rules_egress, delete_egress_rules=True, vpc_id=N
if not _deleted:
deleted = False
if deleted:
msg = 'Removed egress rule on {0} security group.'.format(name)
ret['comment'] = ' '.join([ret['comment'], msg.format(name)])
ret['comment'] = ' '.join([
ret['comment'],
'Removed egress rule on {0} security group.'.format(name)
])
else:
msg = 'Failed to remove egress rule on {0} security group.'
ret['comment'] = ' '.join([ret['comment'], msg.format(name)])
ret['comment'] = ' '.join([
ret['comment'],
'Failed to remove egress rule on {0} security group.'.format(name)
])
ret['result'] = False
if to_create:
created = True
@ -556,11 +560,15 @@ def _rules_egress_present(name, rules_egress, delete_egress_rules=True, vpc_id=N
if not _created:
created = False
if created:
msg = 'Created egress rules on {0} security group.'
ret['comment'] = ' '.join([ret['comment'], msg.format(name)])
ret['comment'] = ' '.join([
ret['comment'],
'Created egress rules on {0} security group.'.format(name)
])
else:
msg = 'Failed to create egress rules on {0} security group.'
ret['comment'] = ' '.join([ret['comment'], msg.format(name)])
ret['comment'] = ' '.join([
ret['comment'],
'Failed to create egress rules on {0} security group.'.format(name)
])
ret['result'] = False
ret['changes']['old'] = {'rules_egress': sg['rules_egress']}
@ -616,8 +624,7 @@ def absent(
if sg:
if __opts__['test']:
msg = 'Security group {0} is set to be removed.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Security group {0} is set to be removed.'.format(name)
ret['result'] = None
return ret
deleted = __salt__['boto_secgroup.delete'](name=name, group_id=None, region=region, key=key,
@ -629,8 +636,7 @@ def absent(
ret['comment'] = 'Security group {0} deleted.'.format(name)
else:
ret['result'] = False
msg = 'Failed to delete {0} security group.'.format(name)
ret['comment'] = msg
ret['comment'] = 'Failed to delete {0} security group.'.format(name)
else:
ret['comment'] = '{0} security group does not exist.'.format(name)
return ret
@ -647,8 +653,7 @@ def _tags_present(name, tags, vpc_id=None, vpc_name=None, region=None,
keyid=keyid, profile=profile, vpc_id=vpc_id,
vpc_name=vpc_name)
if not sg:
msg = '{0} security group configuration could not be retrieved.'
ret['comment'] = msg.format(name)
ret['comment'] = '{0} security group configuration could not be retrieved.'.format(name)
ret['result'] = False
return ret
tags_to_add = tags
@ -681,8 +686,10 @@ def _tags_present(name, tags, vpc_id=None, vpc_name=None, region=None,
profile=profile)
if not temp_ret:
ret['result'] = False
msg = 'Error attempting to delete tags {1}.'.format(tags_to_remove)
ret['comment'] = ' '.join([ret['comment'], msg])
ret['comment'] = ' '.join([
ret['comment'],
'Error attempting to delete tags {0}.'.format(tags_to_remove)
])
return ret
if 'old' not in ret['changes']:
ret['changes'] = dictupdate.update(ret['changes'], {'old': {'tags': {}}})
@ -729,6 +736,5 @@ def _tags_present(name, tags, vpc_id=None, vpc_name=None, region=None,
if tag in sg['tags']:
ret['changes']['old']['tags'][tag] = sg['tags'][tag]
if not tags_to_update and not tags_to_remove and not tags_to_add:
msg = 'Tags are already set.'
ret['comment'] = ' '.join([ret['comment'], msg])
ret['comment'] = ' '.join([ret['comment'], 'Tags are already set.'])
return ret

View File

@ -54,7 +54,7 @@ passed in as a dict, or as a string to pull from pillars or minion config:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Standard Libs
import re

View File

@ -57,7 +57,7 @@ passed in as a dict, or as a string to pull from pillars or minion config:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import difflib
@ -188,10 +188,10 @@ def present(
if isinstance(val, six.string_types):
val = salt.utils.json.loads(val)
if _val != val:
log.debug('Policies differ:\n{0}\n{1}'.format(_val, val))
log.debug('Policies differ:\n%s\n%s', _val, val)
attrs_to_set[attr] = salt.utils.json.dumps(val, sort_keys=True)
elif str(_val) != str(val):
log.debug('Attributes differ:\n{0}\n{1}'.format(_val, val))
elif six.text_type(_val) != six.text_type(val):
log.debug('Attributes differ:\n%s\n%s', _val, val)
attrs_to_set[attr] = val
attr_names = ', '.join(attrs_to_set)
@ -287,7 +287,7 @@ def absent(
)
if 'error' in r:
ret['result'] = False
ret['comment'] = str(r['error'])
ret['comment'] = six.text_type(r['error'])
return ret
if not r['result']:
@ -312,7 +312,7 @@ def absent(
)
if 'error' in r:
ret['result'] = False
ret['comment'] = str(r['error'])
ret['comment'] = six.text_type(r['error'])
return ret
ret['comment'] = 'SQS queue {0} was deleted.'.format(name)

View File

@ -143,7 +143,7 @@ Delete also accepts a VPC peering connection id.
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt Libs
@ -1519,7 +1519,7 @@ def accept_vpc_peering_connection(name=None, conn_id=None, conn_name=None,
'Pending VPC peering connection found and can be accepted'})
return ret
fun = 'boto_vpc.accept_vpc_peering_connection'
log.debug('Calling `{0}()` to accept this VPC peering connection'.format(fun))
log.debug('Calling `%s()` to accept this VPC peering connection', fun)
result = __salt__[fun](conn_id=conn_id, name=conn_name, region=region, key=key,
keyid=keyid, profile=profile)

View File

@ -30,7 +30,7 @@ Example:
- npm: bower
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import salt libs
from salt.exceptions import CommandExecutionError, CommandNotFoundError

View File

@ -4,7 +4,7 @@
'''
# Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import os
# Salt Libs

View File

@ -4,7 +4,7 @@ Validate the boto_iam module
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing libs
from tests.support.case import ModuleCase

View File

@ -4,7 +4,7 @@ Validate the boto_sns module
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import re
# Import Salt Testing libs

View File

@ -4,7 +4,7 @@ Tests for the boto_sns state
"""
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import re
# Import Salt Testing libs

View File

@ -3,7 +3,7 @@
:codeauthor: :email:`Alexander Pyatkin <asp@thexyz.net`
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing libs
from tests.support.case import ModuleCase

View File

@ -4,7 +4,7 @@
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import os
# Import Salt Testing Libs

View File

@ -4,7 +4,7 @@
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import logging
import random

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import random
import string
import logging

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import random
import string
import logging

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import random
import string

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import copy
import logging
import random

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
from copy import deepcopy
import pkg_resources

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import random
import string
import logging

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import random
import string
import logging

View File

@ -2,7 +2,7 @@
# -*- coding: utf-8 -*-
# import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pkg_resources
import os.path

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import random
import string
import logging

View File

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import random
import string
from copy import deepcopy

View File

@ -4,7 +4,7 @@
# module functions.
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import random
import string
import os.path

View File

@ -4,7 +4,7 @@
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin

View File

@ -4,7 +4,7 @@
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin

View File

@ -3,7 +3,7 @@
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin

View File

@ -3,7 +3,7 @@
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import os
# Import Salt Testing Libs

Some files were not shown because too many files have changed in this diff Show More