Merge remote-tracking branch 'upstream/2014.7' into merge-forward-2015.2

Conflicts:
	salt/modules/win_ntp.py
	salt/templates/rh_ip/rh5_eth.jinja
	salt/templates/rh_ip/rh6_eth.jinja
	setup.py
This commit is contained in:
Colton Myers 2015-03-13 17:02:19 -06:00
commit a983f1b611
16 changed files with 82 additions and 970 deletions

View File

@ -6,7 +6,7 @@ Linode is a public cloud provider with a focus on Linux instances.
Dependencies
============
* linode-python >= 1.1
* linode-python >= 1.1.1
OR
@ -21,12 +21,11 @@ Driver selection is automatic. If linode-python is present it will be used.
If it is absent, salt-cloud will fall back to Libcloud. If neither are present
salt-cloud will abort.
NOTE: linode-python 1.1 or later is recommended. As of this publication it is
not yet on PyPi. Earlier versions of linode-python should work but can leak
sensitive information into the debug logs.
NOTE: linode-python 1.1.1 or later is recommended. Earlier versions of linode-python
should work but leak sensitive information into the debug logs.
Linode-python can be downloaded from
https://github.com/tjfontaine/linode-python.
https://github.com/tjfontaine/linode-python or installed via pip.
Configuration
=============

View File

@ -7,10 +7,16 @@ The Linode cloud module is used to control access to the Linode VPS system
Use of this module only requires the ``apikey`` parameter.
:depends: linode-python >= 1.0
:depends: linode-python >= 1.1.1
OR
:depends: apache-libcloud >= 0.13.2
.. note::
The linode-python driver will work with earlier versions of linode-python,
but it is highly recommended to use a minimum version of 1.1.1. Earlier
versions leak sensitive information into the debug logs.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/linode.conf``:
@ -981,6 +987,9 @@ def create(vm_):
else:
vm_['ssh_host'] = node_data.public_ips[0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Bootstrap, either apache-libcloud or linode-python
ret = salt.utils.cloud.bootstrap(vm_, __opts__)

View File

@ -1,940 +0,0 @@
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using linode-python bindings
=================================================
The Linode cloud module is used to control access to the Linode VPS system
Use of this module only requires the ``apikey`` parameter.
:depends: linode-python >= 1.0
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/linodepy.conf``:
.. code-block:: yaml
my-linode-config:
# Linode account api key
apikey: JVkbSJDGHSDKUKSDJfhsdklfjgsjdkflhjlsdfffhgdgjkenrtuinv
provider: linodepy
This provider supports cloning existing Linodes. To clone,
add a profile with a ``clonefrom`` key, and a ``script_args: -C``.
``Clonefrom`` should be the name of the that is the source for the clone.
``script_args: -C`` passes a -C to the bootstrap script, which only configures
the minion and doesn't try to install a new copy of salt-minion. This way the
minion gets new keys and the keys get pre-seeded on the master, and the
/etc/salt/minion file has the right 'id:' declaration.
Cloning requires a post 2015-02-01 salt-bootstrap.
'''
from __future__ import absolute_import
# pylint: disable=E0102
# Import python libs
import copy
import pprint
import logging
import time
from os.path import exists, expanduser
# Import linode-python
try:
import linode
import linode.api
HAS_LINODEPY = True
except ImportError:
HAS_LINODEPY = False
# Import salt cloud libs
import salt.config as config
from salt.cloud.exceptions import SaltCloudConfigError
from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401
from salt.utils import namespaced_function
# Get logging started
log = logging.getLogger(__name__)
# Human-readable status fields
LINODE_STATUS = {
'-2': 'Boot Failed (not in use)',
'-1': 'Being Created',
'0': 'Brand New',
'1': 'Running',
'2': 'Powered Off',
'3': 'Shutting Down (not in use)',
'4': 'Saved to Disk (not in use)',
}
# Redirect linode functions to this module namespace
#get_size = namespaced_function(get_size, globals())
#get_image = namespaced_function(get_image, globals())
# avail_locations = namespaced_function(avail_locations, globals())
# avail_images = namespaced_function(avail_distributions, globals())
# avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
# destroy = namespaced_function(destroy, globals())
# list_nodes = namespaced_function(list_nodes, globals())
# list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
# get_node = namespaced_function(get_node, globals())
# Borrowed from Apache Libcloud
class NodeAuthSSHKey(object):
"""
An SSH key to be installed for authentication to a node.
This is the actual contents of the users ssh public key which will
normally be installed as root's public key on the node.
>>> pubkey = '...' # read from file
>>> from libcloud.compute.base import NodeAuthSSHKey
>>> k = NodeAuthSSHKey(pubkey)
>>> k
<NodeAuthSSHKey>
"""
def __init__(self, pubkey):
"""
:param pubkey: Public key matetiral.
:type pubkey: ``str``
"""
self.pubkey = pubkey
def __repr__(self):
return '<NodeAuthSSHKey>'
class NodeAuthPassword(object):
"""
A password to be used for authentication to a node.
"""
def __init__(self, password, generated=False):
"""
:param password: Password.
:type password: ``str``
:type generated: ``True`` if this password was automatically generated,
``False`` otherwise.
"""
self.password = password
self.generated = generated
def __repr__(self):
return '<NodeAuthPassword>'
# Only load in this module if the LINODE configurations are in place
def __virtual__():
'''
Set up the libcloud functions and check for Linode configurations.
'''
if not HAS_LINODEPY:
return False
if get_configured_provider() is False:
return False
return True
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or 'linodepy',
('apikey',)
)
def get_conn():
'''
Return a conn object for the passed VM data
'''
return linode.api.Api(key=config.get_cloud_config_value(
'apikey',
get_configured_provider(),
__opts__, search_global=False)
)
def get_image(conn, vm_):
'''
Return a single image from the Linode API
'''
images = avail_images(conn)
return images[vm_['image']]['id']
def get_size(conn, vm_):
'''
Return available size from Linode (Linode calls them "plans")
'''
sizes = avail_sizes(conn)
return sizes[vm_['size']]
def avail_sizes(conn=None):
'''
Return available sizes ("plans" in LinodeSpeak)
'''
if not conn:
conn = get_conn()
sizes = {}
for plan in conn.avail_linodeplans():
key = plan['LABEL']
sizes[key] = {}
sizes[key]['id'] = plan['PLANID']
sizes[key]['extra'] = plan
sizes[key]['bandwidth'] = plan['XFER']
sizes[key]['disk'] = plan['DISK']
sizes[key]['price'] = plan['HOURLY']*24*30
sizes[key]['ram'] = plan['RAM']
return sizes
def avail_locations(conn=None):
'''
return available datacenter locations
'''
if not conn:
conn = get_conn()
locations = {}
for dc in conn.avail_datacenters():
key = dc['LOCATION']
locations[key] = {}
locations[key]['id'] = dc['DATACENTERID']
locations[key]['abbreviation'] = dc['ABBR']
return locations
def avail_images(conn=None):
'''
Return available images
'''
if not conn:
conn = get_conn()
images = {}
for d in conn.avail_distributions():
images[d['LABEL']] = {}
images[d['LABEL']]['id'] = d['DISTRIBUTIONID']
images[d['LABEL']]['extra'] = d
return images
def get_ips(conn=None, LinodeID=None):
'''
Return IP addresses, both public and provate
'''
if not conn:
conn = get_conn()
ips = conn.linode_ip_list(LinodeID=LinodeID)
all_ips = {'public_ips': [], 'private_ips': []}
for i in ips:
if i['ISPUBLIC']:
key = 'public_ips'
else:
key = 'private_ips'
all_ips[key].append(i['IPADDRESS'])
return all_ips
def linodes(full=False, include_ips=False, conn=None):
'''
Return data on all nodes
'''
if not conn:
conn = get_conn()
nodes = conn.linode_list()
results = {}
for n in nodes:
thisnode = {}
thisnode['id'] = n['LINODEID']
thisnode['image'] = None
thisnode['name'] = n['LABEL']
thisnode['size'] = n['TOTALRAM']
thisnode['state'] = n['STATUS']
thisnode['private_ips'] = []
thisnode['public_ips'] = []
thisnode['state'] = LINODE_STATUS[str(n['STATUS'])]
if include_ips:
thisnode = dict(thisnode.items() +
get_ips(conn, n['LINODEID']).items())
if full:
thisnode['extra'] = n
results[n['LABEL']] = thisnode
return results
def stop(*args, **kwargs):
'''
Execute a "stop" action on a VM in Linode.
'''
conn = get_conn()
node = get_node(name=args[0])
if not node:
node = get_node(LinodeID=args[0])
if node['state'] == 'Powered Off':
return {'success': True, 'state': 'Stopped',
'msg': 'Machine already stopped'}
result = conn.linode_shutdown(LinodeID=node['id'])
if waitfor_job(LinodeID=node['id'], JobID=result['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def start(*args, **kwargs):
'''
Execute a "start" action on a VM in Linode.
'''
conn = get_conn()
node = get_node(name=args[0])
if not node:
node = get_node(LinodeID=args[0])
if not node:
return False
if node['state'] == 'Running':
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
result = conn.linode_boot(LinodeID=node['id'])
if waitfor_job(LinodeID=node['id'], JobID=result['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def clone(*args, **kwargs):
'''
Clone an existing Linode
'''
conn = get_conn()
node = get_node(name=args[0], full=True)
if not node:
node = get_node(LinodeID=args[0], full=True)
if len(args) > 1:
actionargs = args[1]
if 'target' not in actionargs:
log.debug('Tried to clone but target not specified.')
return False
result = conn.linode_clone(LinodeID=node['id'],
DatacenterID=node['extra']['DATACENTERID'],
PlanID=node['extra']['PLANID'])
conn.linode_update(LinodeID=result['LinodeID'],
Label=actionargs['target'])
# Boot!
if 'boot' not in actionargs:
bootit = True
else:
bootit = actionargs['boot']
if bootit:
bootjob_status = conn.linode_boot(LinodeID=result['LinodeID'])
waitfor_job(LinodeID=result['LinodeID'], JobID=bootjob_status['JobID'])
node_data = get_node(name=actionargs['target'], full=True)
log.info('Cloned Cloud VM {0} to {1}'.format(args[0], actionargs['target']))
log.debug(
'{0!r} VM creation details:\n{1}'.format(
args[0], pprint.pformat(node_data)
)
)
return node_data
def list_nodes():
'''
Return basic data on nodes
'''
return linodes(full=False, include_ips=True)
def list_nodes_full():
'''
Return all data on nodes
'''
return linodes(full=True, include_ips=True)
def get_node(LinodeID=None, name=None, full=False):
'''
Return information on a single node
'''
c = get_conn()
linode_list = linodes(full=full, conn=c)
for l, d in linode_list.iteritems():
if LinodeID:
if d['id'] == LinodeID:
d = dict(d.items() + get_ips(conn=c, LinodeID=d['id']).items())
return d
if name:
if d['name'] == name:
d = dict(d.items() + get_ips(conn=c, LinodeID=d['id']).items())
return d
return None
def get_disk_size(vm_, size, swap):
'''
Return the size of of the root disk in MB
'''
conn = get_conn()
vmsize = get_size(conn, vm_)
disksize = int(vmsize['disk']) * 1024
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disksize - swap
)
def destroy(vm_):
conn = get_conn()
machines = linodes(full=False, include_ips=False)
return conn.linode_delete(LinodeID=machines[vm_]['id'], skipChecks=True)
def get_location(conn, vm_):
'''
Return the node location to use.
Linode wants a location id, which is an integer, when creating a new VM
To be flexible, let the user specify any of location id, abbreviation, or
full name of the location ("Fremont, CA, USA") in the config file)
'''
locations = avail_locations(conn)
# Default to Dallas if not otherwise set
loc = config.get_cloud_config_value('location', vm_, __opts__, default=2)
# Was this an id that matches something in locations?
if str(loc) not in [locations[k]['id'] for k in locations]:
# No, let's try to match it against the full name and the abbreviation and return the id
for key in locations:
if str(loc).lower() in (key,
str(locations[key]['id']).lower(),
str(locations[key]['abbreviation']).lower()):
return locations[key]['id']
else:
return loc
# No match. Return None, cloud provider will use a default or throw an exception
return None
def get_password(vm_):
'''
Return the password to use
'''
return config.get_cloud_config_value(
'password', vm_, __opts__, default=config.get_cloud_config_value(
'passwd', vm_, __opts__, search_global=False
), search_global=False
)
def get_pubkey(vm_):
'''
Return the SSH pubkey to use
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False)
def get_auth(vm_):
'''
Return either NodeAuthSSHKey or NodeAuthPassword, preferring
NodeAuthSSHKey if both are provided.
'''
if get_pubkey(vm_) is not None:
return NodeAuthSSHKey(get_pubkey(vm_))
elif get_password(vm_) is not None:
return NodeAuthPassword(get_password(vm_))
else:
raise SaltCloudConfigError(
'The Linode driver requires either a password or ssh_pubkey with '
'corresponding ssh_private_key.')
def get_ssh_key_filename(vm_):
'''
Return path to filename if get_auth() returns a NodeAuthSSHKey.
'''
key_filename = config.get_cloud_config_value(
'ssh_key_file', vm_, __opts__,
default=config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
), search_global=False)
if key_filename is not None and exists(expanduser(key_filename)):
return expanduser(key_filename)
return None
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'private_ip', vm_, __opts__, default=False
)
def get_swap(vm_):
'''
Return the amount of swap space to use in MB
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_kernels(conn=None):
'''
Get Linode's list of kernels available
'''
if not conn:
conn = get_conn()
kernel_response = conn.avail_kernels()
if len(kernel_response['ERRORARRAY']) == 0:
kernels = {}
for k in kernel_response['DATA']:
key = k['LABEL']
kernels[key] = {}
kernels[key]['id'] = k['KERNELID']
kernels[key]['name'] = k['LABEL']
kernels[key]['isvops'] = k['ISVOPS']
kernels[key]['isxen'] = k['ISXEN']
return kernels
else:
log.error("Linode avail_kernels returned {0}".format(kernel_response['ERRORARRAY']))
return None
def get_one_kernel(conn=None, name=None):
'''
Return data on one kernel
name=None returns latest kernel
'''
if not conn:
conn = get_conn()
kernels = get_kernels(conn)
if not name:
name = 'latest 64 bit'
else:
name = name.lower()
for k, v in kernels:
if name in k.lower():
return v
log.error('Did not find a kernel matching {0}'.format(name))
return None
def waitfor_status(conn=None, LinodeID=None, status=None, timeout=300, quiet=True):
'''
Wait for a certain status
'''
if not conn:
conn = get_conn()
if status is None:
status = 'Brand New'
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_node(LinodeID)
if result['state'] == status:
return True
time.sleep(interval)
if not quiet:
log.info('Status for {0} is {1}'.format(LinodeID, result['state']))
else:
log.debug('Status for {0} is {1}'.format(LinodeID, result))
return False
def waitfor_job(conn=None, LinodeID=None, JobID=None, timeout=300, quiet=True):
if not conn:
conn = get_conn()
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
try:
result = conn.linode_job_list(LinodeID=LinodeID, JobID=JobID)
except linode.ApiError as exc:
log.info('Waiting for job {0} on host {1} returned {2}'.format(LinodeID, JobID, exc))
return False
if result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
if not quiet:
log.info('Still waiting on Job {0} for {1}'.format(JobID, LinodeID))
else:
log.debug('Still waiting on Job {0} for {1}'.format(JobID, LinodeID))
return False
def boot(LinodeID=None, configid=None):
'''
Execute a boot sequence on a linode
'''
conn = get_conn()
return conn.linode_boot(LinodeID=LinodeID, ConfigID=configid)
def create_swap_disk(vm_=None, LinodeID=None, swapsize=None):
'''
Create the disk for the linode
'''
conn = get_conn()
if not swapsize:
swapsize = get_swap(vm_)
result = conn.linode_disk_create(LinodeID=LinodeID,
Label='swap',
Size=swapsize,
Type='swap')
return result
def create_disk_from_distro(vm_=None, LinodeID=None, swapsize=None):
'''
Create the disk for the linode
'''
conn = get_conn()
result = conn.linode_disk_createfromdistribution(
LinodeID=LinodeID,
DistributionID=get_image(conn, vm_),
Label='root',
Size=get_disk_size(vm_, get_size(conn, vm_)['disk'], get_swap(vm_)),
rootPass=get_password(vm_),
rootSSHKey=get_pubkey(vm_)
)
return result
def create_config(vm_, LinodeID=None, root_disk_id=None, swap_disk_id=None):
'''
Create a Linode Config
'''
conn = get_conn()
# 138 appears to always be the latest 64-bit kernel for Linux
kernelid = 138
result = conn.linode_config_create(LinodeID=LinodeID,
Label=vm_['name'],
Disklist='{0},{1}'.format(root_disk_id,
swap_disk_id),
KernelID=kernelid,
RootDeviceNum=1,
RootDeviceRO=True,
RunLevel='default',
helper_disableUpdateDB=True,
helper_xen=True,
helper_depmod=True)
return result
def create(vm_):
'''
Create a single VM from a data dict
'''
salt.utils.cloud.fire_event(
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
{
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['provider'],
},
transport=__opts__['transport']
)
log.info('Creating Cloud VM {0}'.format(vm_['name']))
conn = get_conn()
if 'clonefrom' in vm_:
kwargs = {
'name': vm_['name'],
'clonefrom': vm_['clonefrom'],
'auth': get_auth(vm_),
'ex_private': get_private_ip(vm_),
}
node_data = clone(vm_['clonefrom'], {'target': vm_['name']})
else:
kwargs = {
'name': vm_['name'],
'image': get_image(conn, vm_),
'size': get_size(conn, vm_)['id'],
'location': get_location(conn, vm_),
'auth': get_auth(vm_),
'ex_private': get_private_ip(vm_),
'ex_rsize': get_disk_size(vm_, get_size(conn, vm_)['disk'], get_swap(vm_)),
'ex_swap': get_swap(vm_)
}
# if 'libcloud_args' in vm_:
# kwargs.update(vm_['libcloud_args'])
salt.utils.cloud.fire_event(
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
{'kwargs': {'name': kwargs['name'],
'image': kwargs['image'],
'size': kwargs['size'],
'location': kwargs['location'],
'ex_private': kwargs['ex_private'],
'ex_rsize': kwargs['ex_rsize'],
'ex_swap': kwargs['ex_swap']}},
transport=__opts__['transport']
)
try:
node_data = conn.linode_create(DatacenterID=get_location(conn, vm_),
PlanID=kwargs['size'], PaymentTerm=1)
except Exception as exc:
log.error(
'Error creating {0} on LINODE\n\n'
'The following exception was thrown by linode-python when trying to '
'run the initial deployment: \n{1}'.format(
vm_['name'], str(exc)
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
if not waitfor_status(conn=conn, LinodeID=node_data['LinodeID'], status='Brand New'):
log.error('Error creating {0} on LINODE\n\n'
'while waiting for initial ready status'.format(
vm_['name']
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
# Set linode name
set_name_result = conn.linode_update(LinodeID=node_data['LinodeID'],
Label=vm_['name'])
log.debug('Set name action for {0} was {1}'.format(vm_['name'],
set_name_result))
# Create disks
log.debug('Creating disks for {0}'.format(node_data['LinodeID']))
swap_result = create_swap_disk(LinodeID=node_data['LinodeID'], swapsize=get_swap(vm_))
root_result = create_disk_from_distro(vm_, LinodeID=node_data['LinodeID'],
swapsize=get_swap(vm_))
# Create config
config_result = create_config(vm_, LinodeID=node_data['LinodeID'],
root_disk_id=root_result['DiskID'],
swap_disk_id=swap_result['DiskID'])
# Boot!
boot_result = boot(LinodeID=node_data['LinodeID'],
configid=config_result['ConfigID'])
if not waitfor_job(conn, LinodeID=node_data['LinodeID'],
JobID=boot_result['JobID']):
log.error('Boot failed for {0}.'.format(node_data))
return False
node_data.update(get_node(node_data['LinodeID']))
ssh_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default='root'
)
ret = {}
if config.get_cloud_config_value('deploy', vm_, __opts__) is True:
deploy_script = script(vm_)
deploy_kwargs = {
'opts': __opts__,
'host': node_data['public_ips'][0],
'username': ssh_username,
'password': get_password(vm_),
'script': deploy_script.script,
'name': vm_['name'],
'tmp_dir': config.get_cloud_config_value(
'tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'
),
'deploy_command': config.get_cloud_config_value(
'deploy_command', vm_, __opts__,
default='/tmp/.saltcloud/deploy.sh',
),
'start_action': __opts__['start_action'],
'parallel': __opts__['parallel'],
'sock_dir': __opts__['sock_dir'],
'conf_file': __opts__['conf_file'],
'minion_pem': vm_['priv_key'],
'minion_pub': vm_['pub_key'],
'keep_tmp': __opts__['keep_tmp'],
'preseed_minion_keys': vm_.get('preseed_minion_keys', None),
'sudo': config.get_cloud_config_value(
'sudo', vm_, __opts__, default=(ssh_username != 'root')
),
'sudo_password': config.get_cloud_config_value(
'sudo_password', vm_, __opts__, default=None
),
'tty': config.get_cloud_config_value(
'tty', vm_, __opts__, default=False
),
'display_ssh_output': config.get_cloud_config_value(
'display_ssh_output', vm_, __opts__, default=True
),
'script_args': config.get_cloud_config_value(
'script_args', vm_, __opts__
),
'script_env': config.get_cloud_config_value('script_env', vm_, __opts__),
'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_),
'has_ssh_agent': False
}
if get_ssh_key_filename(vm_) is not None and get_pubkey(vm_) is not None:
deploy_kwargs['key_filename'] = get_ssh_key_filename(vm_)
# Deploy salt-master files, if necessary
if config.get_cloud_config_value('make_master', vm_, __opts__) is True:
deploy_kwargs['make_master'] = True
deploy_kwargs['master_pub'] = vm_['master_pub']
deploy_kwargs['master_pem'] = vm_['master_pem']
master_conf = salt.utils.cloud.master_config(__opts__, vm_)
deploy_kwargs['master_conf'] = master_conf
if master_conf.get('syndic_master', None):
deploy_kwargs['make_syndic'] = True
deploy_kwargs['make_minion'] = config.get_cloud_config_value(
'make_minion', vm_, __opts__, default=True
)
# Check for Windows install params
win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__)
if win_installer:
deploy_kwargs['win_installer'] = win_installer
minion = salt.utils.cloud.minion_config(__opts__, vm_)
deploy_kwargs['master'] = minion['master']
deploy_kwargs['username'] = config.get_cloud_config_value(
'win_username', vm_, __opts__, default='Administrator'
)
deploy_kwargs['password'] = config.get_cloud_config_value(
'win_password', vm_, __opts__, default=''
)
# Store what was used to the deploy the VM
event_kwargs = copy.deepcopy(deploy_kwargs)
del event_kwargs['minion_pem']
del event_kwargs['minion_pub']
del event_kwargs['sudo_password']
if 'password' in event_kwargs:
del event_kwargs['password']
ret['deploy_kwargs'] = event_kwargs
salt.utils.cloud.fire_event(
'event',
'executing deploy script',
'salt/cloud/{0}/deploying'.format(vm_['name']),
{'kwargs': event_kwargs},
transport=__opts__['transport']
)
deployed = False
if win_installer:
deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs)
else:
deployed = salt.utils.cloud.deploy_script(**deploy_kwargs)
if deployed:
log.info('Salt installed on {0}'.format(vm_['name']))
else:
log.error(
'Failed to start Salt on Cloud VM {0}'.format(
vm_['name']
)
)
ret.update(node_data)
log.info('Created Cloud VM {0[name]!r}'.format(vm_))
log.debug(
'{0[name]!r} VM creation details:\n{1}'.format(
vm_, pprint.pformat(node_data)
)
)
salt.utils.cloud.fire_event(
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
{
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['provider'],
},
transport=__opts__['transport']
)
return ret

View File

@ -53,7 +53,9 @@ def tune(device, **kwargs):
switch = kwarg_map[key]
if key != 'read-write':
args.append(switch.replace('set', 'get'))
if kwargs[key] == 'True':
else:
args.append('getro')
if kwargs[key] == 'True' or kwargs[key] is True:
opts += '--{0} '.format(key)
else:
opts += '--{0} {1} '.format(switch, kwargs[key])

View File

@ -213,7 +213,7 @@ def delete_record(name, zone, record_type, identifier=None, all_records=False,
def _wait_for_sync(status, conn):
retry = 10
retry = 30
i = 0
while i < retry:
log.info('Getting route53 status (attempt {0})'.format(i + 1))

View File

@ -12,7 +12,13 @@ from salt.ext.six import string_types
# Import salt libs
import salt.utils
import salt.utils.cloud
try:
# Gated for salt-ssh (salt.utils.cloud imports msgpack)
import salt.utils.cloud
HAS_CLOUD = True
except ImportError:
HAS_CLOUD = False
import salt._compat
import salt.syspaths as syspaths
import salt.utils.sdb as sdb
@ -390,6 +396,8 @@ def gather_bootstrap_script(bootstrap=None):
salt '*' config.gather_bootstrap_script
'''
if not HAS_CLOUD:
return False, 'config.gather_bootstrap_script is unavailable'
ret = salt.utils.cloud.update_bootstrap(__opts__, url=bootstrap)
if 'Success' in ret and len(ret['Success']['Files updated']) > 0:
return ret['Success']['Files updated'][0]

View File

@ -423,7 +423,7 @@ IPV4_ATTR_MAP = {
'server': __ipv4_quad,
'hwaddr': __mac,
# tunnel
'mode': __within(['gre', 'GRE', 'ipip', 'IPIP'], dtype=str),
'mode': __within(['gre', 'GRE', 'ipip', 'IPIP', '802.3ad'], dtype=str),
'endpoint': __ipv4_quad,
'dstaddr': __ipv4_quad,
'local': __ipv4_quad,

View File

@ -2957,9 +2957,13 @@ def check_perms(name, ret, user, group, mode, follow_symlinks=False):
ret['changes']['mode'] = mode
# user/group changes if needed, then check if it worked
if user:
if isinstance(user, int):
user = uid_to_user(user)
if user != perms['luser']:
perms['cuser'] = user
if group:
if isinstance(group, int):
group = gid_to_group(group)
if group != perms['lgroup']:
perms['cgroup'] = group
if 'cuser' in perms or 'cgroup' in perms:

View File

@ -644,7 +644,7 @@ def _parse_settings_eth(opts, iface_type, enabled, iface):
result['enable_ipv6'] = opts['enable_ipv6']
valid = _CONFIG_TRUE + _CONFIG_FALSE
for opt in ['onparent', 'peerdns', 'slave', 'vlan', 'defroute']:
for opt in ['onparent', 'peerdns', 'slave', 'vlan', 'defroute', 'stp']:
if opt in opts:
if opts[opt] in _CONFIG_TRUE:
result[opt] = 'yes'

View File

@ -70,7 +70,7 @@ def get_servers():
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in lines:
try:
if 'NtpServer' in line:
if line.startswith('NtpServer:'):
_, ntpsvrs = line.rstrip(' (Local)').split(':', 1)
return sorted(ntpsvrs.split())
except ValueError as e:

View File

@ -189,18 +189,23 @@ def extracted(name,
tar_cmd = ['tar']
tar_shortopts = 'x'
tar_longopts = []
tar_afterfile = []
for opt in tar_opts:
if not opt.startswith('-'):
for shortopt in opt:
if shortopt not in ['x', 'f']:
tar_shortopts = tar_shortopts + shortopt
else:
for position, opt in enumerate(tar_opts):
if opt.startswith('-'):
tar_longopts.append(opt)
else:
if position > 0:
tar_afterfile.append(opt)
else:
append_opt = opt
append_opt = append_opt.replace('x', '').replace('f', '')
tar_shortopts = tar_shortopts + append_opt
tar_cmd.append(tar_shortopts)
tar_cmd.extend(tar_longopts)
tar_cmd.extend(['-f', filename])
tar_cmd.extend(tar_afterfile)
results = __salt__['cmd.run_all'](tar_cmd, cwd=name, python_shell=False)
if results['retcode'] != 0:

View File

@ -65,6 +65,11 @@ def tuned(name, **kwargs):
'name': name,
'result': True}
kwarg_map = {'read-ahead': 'getra',
'filesystem-read-ahead': 'getfra',
'read-only': 'getro',
'read-write': 'getro'}
if not __salt__['file.is_blkdev']:
ret['comment'] = ('Changes to {0} cannot be applied. '
'Not a block device. ').format(name)
@ -73,11 +78,30 @@ def tuned(name, **kwargs):
ret['result'] = None
return ret
else:
current = __salt__['blockdev.dump'](name)
changes = __salt__['blockdev.tune'](name, **kwargs)
changeset = {}
for key in kwargs:
if key in kwarg_map:
switch = kwarg_map[key]
if current[switch] != changes[switch]:
if isinstance(kwargs[key], bool):
old = (current[switch] == '1')
new = (changes[switch] == '1')
else:
old = current[switch]
new = changes[switch]
if key == 'read-write':
old = not old
new = not new
changeset[key] = 'Changed from {0} to {1}'.format(old, new)
if changes:
ret['comment'] = ('Block device {0} '
'successfully modified ').format(name)
ret['changes'] = changes
if changeset:
ret['comment'] = ('Block device {0} '
'successfully modified ').format(name)
ret['changes'] = changeset
else:
ret['comment'] = 'Block device {0} already in correct state'.format(name)
else:
ret['comment'] = 'Failed to modify block device {0}'.format(name)
ret['result'] = False

View File

@ -19,10 +19,11 @@ DEVICE={{name}}
{%endif%}{%endif%}{% if srcaddr %}SRCADDR={{srcaddr}}
{%endif%}{% if peerdns %}PEERDNS={{peerdns}}
{%endif%}{% if bridge %}BRIDGE={{bridge}}
{%endif%}{% if delay %}DELAY={{delay}}
{%endif%}{% if stp %}STP={{stp}}
{%endif%}{% if delay or delay == 0 %}DELAY={{delay}}
{%endif%}{% if bonding %}BONDING_OPTS="{%for item in bonding %}{{item}}={{bonding[item]}} {%endfor%}"
{%endif%}{% if my_inner_ipaddr %}MY_INNER_IPADDR={{my_inner_ipaddr}}
{%endif%}{% if my_outer_ipaddr %}MY_OUTER_IPADDR={{my_outer_ipaddr}}
{%endif%}{%if bonding %}BONDING_OPTS="{%for item in bonding %}{{item}}={{bonding[item]}} {%endfor%}"
{%endif%}{% if ethtool %}ETHTOOL_OPTS="{%for item in ethtool %}{{item}} {{ethtool[item]}} {%endfor%}"
{%endif%}{% if domain %}DOMAIN="{{ domain|join(' ') }}"
{% endif %}{% for server in dns -%}

View File

@ -20,10 +20,11 @@ DEVICE="{{name}}"
{%endif%}{% if peerdns %}PEERDNS="{{peerdns}}"
{%endif%}{% if defroute %}DEFROUTE="{{defroute}}"
{%endif%}{% if bridge %}BRIDGE="{{bridge}}"
{%endif%}{% if delay %}DELAY="{{delay}}"
{%endif%}{% if stp %}STP="{{stp}}"
{%endif%}{% if delay or delay == 0 %}DELAY="{{delay}}"
{%endif%}{% if bonding %}BONDING_OPTS="{%for item in bonding %}{{item}}={{bonding[item]}} {%endfor%}"
{%endif%}{% if my_inner_ipaddr %}MY_INNER_IPADDR={{my_inner_ipaddr}}
{%endif%}{% if my_outer_ipaddr %}MY_OUTER_IPADDR={{my_outer_ipaddr}}
{%endif%}{%if bonding %}BONDING_OPTS="{%for item in bonding %}{{item}}={{bonding[item]}} {%endfor%}"
{%endif%}{% if ethtool %}ETHTOOL_OPTS="{%for item in ethtool %}{{item}} {{ethtool[item]}} {%endfor%}"
{%endif%}{% if domain %}DOMAIN="{{ domain|join(' ') }}"
{% endif %}{% for server in dns -%}

View File

@ -20,11 +20,12 @@ DEVICE="{{name}}"
{%endif%}{% if peerdns %}PEERDNS="{{peerdns}}"
{%endif%}{% if defroute %}DEFROUTE="{{defroute}}"
{%endif%}{% if bridge %}BRIDGE="{{bridge}}"
{%endif%}{% if delay %}DELAY="{{delay}}"
{%endif%}{% if stp %}STP="{{stp}}"
{%endif%}{% if delay or delay == 0 %}DELAY="{{delay}}"
{%endif%}{% if mtu %}MTU="{{mtu}}"
{%endif%}{% if my_inner_ipaddr %}MY_INNER_IPADDR={{my_inner_ipaddr}}
{%endif%}{% if my_outer_ipaddr %}MY_OUTER_IPADDR={{my_outer_ipaddr}}
{%endif%}{%if bonding %}BONDING_OPTS="{%for item in bonding %}{{item}}={{bonding[item]}} {%endfor%}"
{%endif%}{% if bonding %}BONDING_OPTS="{%for item in bonding %}{{item}}={{bonding[item]}} {%endfor%}"
{%endif%}{% if ethtool %}ETHTOOL_OPTS="{%for item in ethtool %}{{item}} {{ethtool[item]}} {%endfor%}"
{%endif%}{% if domain %}DOMAIN="{{ domain|join(' ') }}"
{% endif %}{% for server in dns -%}

View File

@ -595,7 +595,7 @@ class SaltDistribution(distutils.dist.Distribution):
self.name = 'salt-ssh' if PACKAGED_FOR_SALT_SSH else 'salt'
self.salt_version = __version__ # pylint: disable=undefined-variable
self.version = __version__ # pylint: disable=undefined-variable
self.description = 'Portable, distributed, remote execution and configuration management system'
self.author = 'Thomas S Hatch'
self.author_email = 'thatch45@gmail.com'
@ -627,8 +627,6 @@ class SaltDistribution(distutils.dist.Distribution):
attrvalue = getattr(self, attrname, None)
if attrvalue == 0:
continue
if attrname == 'salt_version':
attrname = 'version'
if hasattr(self.metadata, 'set_{0}'.format(attrname)):
getattr(self.metadata, 'set_{0}'.format(attrname))(attrvalue)
elif hasattr(self.metadata, attrname):