Merge pull request #48873 from dmacvicar/terraform_roster_3

Terraform roster module to be used with terraform-provider-salt
This commit is contained in:
Daniel Wallace 2018-08-17 19:38:40 -05:00 committed by GitHub
commit 02823e5768
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 857 additions and 134 deletions

View File

@ -18,3 +18,4 @@ roster modules
range
scan
sshconfig
terraform

View File

@ -0,0 +1,6 @@
===================
salt.roster.terraform
===================
.. automodule:: salt.roster.terraform
:members:

View File

@ -51,7 +51,7 @@ def get_roster_file(options):
template = os.path.join(salt.syspaths.CONFIG_DIR, 'roster')
if not os.path.isfile(template):
raise IOError('No roster file found')
raise IOError('Roster file "{0}" not found'.format(template))
if not os.access(template, os.R_OK):
raise IOError('Access denied to roster "{0}"'.format(template))
@ -106,7 +106,7 @@ class Roster(object):
except salt.exceptions.SaltRenderError as exc:
log.error('Unable to render roster file: %s', exc)
except IOError as exc:
pass
log.error("Can't access roster for backend %s: %s", back, exc)
log.debug('Matched minions: %s', targets)
return targets

View File

@ -4,20 +4,6 @@ Read in the roster from a flat file using the renderer system
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import fnmatch
import re
import copy
# Try to import range from https://github.com/ytoolshed/range
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
# pylint: enable=import-error
# Import Salt libs
import salt.loader
import salt.config
@ -47,121 +33,4 @@ def targets(tgt, tgt_type='glob', **kwargs):
conditioned_raw = {}
for minion in raw:
conditioned_raw[six.text_type(minion)] = salt.config.apply_sdb(raw[minion])
rmatcher = RosterMatcher(conditioned_raw, tgt, tgt_type, 'ipv4')
return rmatcher.targets()
class RosterMatcher(object):
'''
Matcher for the roster data structure
'''
def __init__(self, raw, tgt, tgt_type, ipv='ipv4'):
self.tgt = tgt
self.tgt_type = tgt_type
self.raw = raw
self.ipv = ipv
def targets(self):
'''
Execute the correct tgt_type routine and return
'''
try:
return getattr(self, 'ret_{0}_minions'.format(self.tgt_type))()
except AttributeError:
return {}
def ret_glob_minions(self):
'''
Return minions that match via glob
'''
minions = {}
for minion in self.raw:
if fnmatch.fnmatch(minion, self.tgt):
data = self.get_data(minion)
if data:
minions[minion] = data.copy()
return minions
def ret_pcre_minions(self):
'''
Return minions that match via pcre
'''
minions = {}
for minion in self.raw:
if re.match(self.tgt, minion):
data = self.get_data(minion)
if data:
minions[minion] = data.copy()
return minions
def ret_list_minions(self):
'''
Return minions that match via list
'''
minions = {}
if not isinstance(self.tgt, list):
self.tgt = self.tgt.split(',')
for minion in self.raw:
if minion in self.tgt:
data = self.get_data(minion)
if data:
minions[minion] = data.copy()
return minions
def ret_nodegroup_minions(self):
'''
Return minions which match the special list-only groups defined by
ssh_list_nodegroups
'''
minions = {}
nodegroup = __opts__.get('ssh_list_nodegroups', {}).get(self.tgt, [])
if not isinstance(nodegroup, list):
nodegroup = nodegroup.split(',')
for minion in self.raw:
if minion in nodegroup:
data = self.get_data(minion)
if data:
minions[minion] = data.copy()
return minions
def ret_range_minions(self):
'''
Return minions that are returned by a range query
'''
if HAS_RANGE is False:
raise RuntimeError("Python lib 'seco.range' is not available")
minions = {}
range_hosts = _convert_range_to_list(self.tgt, __opts__['range_server'])
for minion in self.raw:
if minion in range_hosts:
data = self.get_data(minion)
if data:
minions[minion] = data.copy()
return minions
def get_data(self, minion):
'''
Return the configured ip
'''
ret = copy.deepcopy(__opts__.get('roster_defaults', {}))
if isinstance(self.raw[minion], six.string_types):
ret.update({'host': self.raw[minion]})
return ret
elif isinstance(self.raw[minion], dict):
ret.update(self.raw[minion])
return ret
return False
def _convert_range_to_list(tgt, range_server):
'''
convert a seco.range range into a list target
'''
r = seco.range.Range(range_server)
try:
return r.expand(tgt)
except seco.range.RangeException as err:
log.error('Range server exception: %s', err)
return []
return __utils__['roster_matcher.targets'](conditioned_raw, tgt, tgt_type, 'ipv4')

172
salt/roster/terraform.py Normal file
View File

@ -0,0 +1,172 @@
# -*- coding: utf-8 -*-
'''
Dynamic roster from terraform current state
===========================================
This roster module allows you dynamically generate the roster from the terraform resources defined with `terraform-provider-salt <https://github.com/dmacvicar/terraform-provider-salt>`_.
It exposes all salt_host resources with the same attributes to the salt-ssh roster, making it completely independent of the type of terraform resource, and providing the integration using terraform constructs with interpolation.
Basic Example
-------------
Given a simple salt-ssh tree with a Saltfile
.. code-block:: yaml
salt-ssh:
config_dir: etc/salt
max_procs: 30
wipe_ssh: True
and etc/salt/master
.. code-block:: yaml
root_dir: .
file_roots:
base:
- srv/salt
pillar_roots:
base:
- srv/pillar
roster: terraform
In the same folder as your `Saltfile`, create terraform file with resources like cloud instances, virtual machines, etc. For every single of those that you want to manage with Salt, create a `salt_host` resource:
.. code-block:: hcl
resource "salt_host" "dbminion" {
salt_id = "dbserver"
host = "${libvirt_domain.vm-db.network_interface.0.addresses.0}"
user = "root"
passwd = "linux"
}
You can use the count attribute to create multiple roster entries with a single definition. Please refer to the `terraform salt provider <https://github.com/dmacvicar/terraform-provider-salt>`_ for more detailed examples.
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals
import logging
import os.path
# Import Salt libs
import salt.utils.files
import salt.utils.json
log = logging.getLogger(__name__)
TF_OUTPUT_PREFIX = 'salt.roster.'
TF_ROSTER_ATTRS = {'host': 's',
'user': 's',
'passwd': 's',
'port': 'i',
'sudo': 'b',
'sudo_user': 's',
'tty': 'b', 'priv': 's',
'timeout': 'i',
'minion_opts': 'm',
'thin_dir': 's',
'cmd_umask': 'i'}
MINION_ID = 'salt_id'
def _handle_salt_host_resource(resource):
'''
Handles salt_host resources.
See https://github.com/dmacvicar/terraform-provider-salt
Returns roster attributes for the resource or None
'''
ret = {}
attrs = resource.get('primary', {}).get('attributes', {})
ret[MINION_ID] = attrs.get(MINION_ID)
valid_attrs = set(attrs.keys()).intersection(TF_ROSTER_ATTRS.keys())
for attr in valid_attrs:
ret[attr] = _cast_output_to_type(attrs.get(attr), TF_ROSTER_ATTRS.get(attr))
return ret
def _add_ssh_key(ret):
'''
Setups the salt-ssh minion to be accessed with salt-ssh default key
'''
priv = None
if __opts__.get('ssh_use_home_key') and os.path.isfile(os.path.expanduser('~/.ssh/id_rsa')):
priv = os.path.expanduser('~/.ssh/id_rsa')
else:
priv = __opts__.get(
'ssh_priv',
os.path.abspath(os.path.join(
__opts__['pki_dir'],
'ssh',
'salt-ssh.rsa'
))
)
if priv and os.path.isfile(priv):
ret['priv'] = priv
def _cast_output_to_type(value, typ):
'''cast the value depending on the terraform type'''
if typ == 'b':
return bool(value)
if typ == 'i':
return int(value)
return value
def _parse_state_file(state_file_path='terraform.tfstate'):
'''
Parses the terraform state file passing different resource types to the right handler
'''
ret = {}
with salt.utils.files.fopen(state_file_path, 'r') as fh_:
tfstate = salt.utils.json.load(fh_)
modules = tfstate.get('modules')
if not modules:
log.error('Malformed tfstate file. No modules found')
return ret
for module in modules:
resources = module.get('resources', [])
for resource_name, resource in salt.ext.six.iteritems(resources):
roster_entry = None
if resource['type'] == 'salt_host':
roster_entry = _handle_salt_host_resource(resource)
if not roster_entry:
continue
minion_id = roster_entry.get(MINION_ID, resource.get('id'))
if not minion_id:
continue
if MINION_ID in roster_entry:
del roster_entry[MINION_ID]
_add_ssh_key(roster_entry)
ret[minion_id] = roster_entry
return ret
def targets(tgt, tgt_type='glob', **kwargs): # pylint: disable=W0613
'''
Returns the roster from the terraform state file, checks opts for location, but defaults to terraform.tfstate
'''
roster_file = os.path.abspath('terraform.tfstate')
if __opts__.get('roster_file'):
roster_file = os.path.abspath(__opts__['roster_file'])
if not os.path.isfile(roster_file):
log.error("Can't find terraform state file '%s'", roster_file)
return {}
log.debug('terraform roster: using %s state file', roster_file)
if not roster_file.endswith('.tfstate'):
log.error("Terraform roster can only be used with terraform state files")
return {}
raw = _parse_state_file(roster_file)
log.debug('%s hosts in terraform state file', len(raw))
return __utils__['roster_matcher.targets'](raw, tgt, tgt_type, 'ipv4')

View File

@ -0,0 +1,147 @@
# -*- coding: utf-8 -*-
'''
Roster matching by various criteria (glob, pcre, etc)
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import fnmatch
import logging
import re
import copy
# Try to import range from https://github.com/ytoolshed/range
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
# pylint: enable=import-error
# Import Salt libs
from salt.ext import six
log = logging.getLogger(__name__)
def targets(conditioned_raw, tgt, tgt_type, ipv='ipv4'):
rmatcher = RosterMatcher(conditioned_raw, tgt, tgt_type, ipv)
return rmatcher.targets()
class RosterMatcher(object):
'''
Matcher for the roster data structure
'''
def __init__(self, raw, tgt, tgt_type, ipv='ipv4'):
self.tgt = tgt
self.tgt_type = tgt_type
self.raw = raw
self.ipv = ipv
def targets(self):
'''
Execute the correct tgt_type routine and return
'''
try:
return getattr(self, 'ret_{0}_minions'.format(self.tgt_type))()
except AttributeError:
return {}
def ret_glob_minions(self):
'''
Return minions that match via glob
'''
minions = {}
for minion in self.raw:
if fnmatch.fnmatch(minion, self.tgt):
data = self.get_data(minion)
if data:
minions[minion] = data.copy()
return minions
def ret_pcre_minions(self):
'''
Return minions that match via pcre
'''
minions = {}
for minion in self.raw:
if re.match(self.tgt, minion):
data = self.get_data(minion)
if data:
minions[minion] = data.copy()
return minions
def ret_list_minions(self):
'''
Return minions that match via list
'''
minions = {}
if not isinstance(self.tgt, list):
self.tgt = self.tgt.split(',')
for minion in self.raw:
if minion in self.tgt:
data = self.get_data(minion)
if data:
minions[minion] = data.copy()
return minions
def ret_nodegroup_minions(self):
'''
Return minions which match the special list-only groups defined by
ssh_list_nodegroups
'''
minions = {}
nodegroup = __opts__.get('ssh_list_nodegroups', {}).get(self.tgt, [])
if not isinstance(nodegroup, list):
nodegroup = nodegroup.split(',')
for minion in self.raw:
if minion in nodegroup:
data = self.get_data(minion)
if data:
minions[minion] = data.copy()
return minions
def ret_range_minions(self):
'''
Return minions that are returned by a range query
'''
if HAS_RANGE is False:
raise RuntimeError("Python lib 'seco.range' is not available")
minions = {}
range_hosts = _convert_range_to_list(self.tgt, __opts__['range_server'])
for minion in self.raw:
if minion in range_hosts:
data = self.get_data(minion)
if data:
minions[minion] = data.copy()
return minions
def get_data(self, minion):
'''
Return the configured ip
'''
ret = copy.deepcopy(__opts__.get('roster_defaults', {}))
if isinstance(self.raw[minion], six.string_types):
ret.update({'host': self.raw[minion]})
return ret
elif isinstance(self.raw[minion], dict):
ret.update(self.raw[minion])
return ret
return False
def _convert_range_to_list(tgt, range_server):
'''
convert a seco.range range into a list target
'''
r = seco.range.Range(range_server)
try:
return r.expand(tgt)
except seco.range.RangeException as err:
log.error('Range server exception: %s', err)
return []

View File

@ -0,0 +1 @@
FAKE-SSH-KEY

View File

@ -0,0 +1 @@
FAKE-SSH-KEY

View File

@ -0,0 +1,422 @@
{
"version": 3,
"terraform_version": "0.11.7",
"serial": 2,
"lineage": "cc4e87c6-4b74-8630-72a8-edba6e6178ae",
"modules": [
{
"path": [
"root"
],
"outputs": {},
"resources": {
"libvirt_cloudinit.init": {
"type": "libvirt_cloudinit",
"depends_on": [],
"primary": {
"id": "/var/lib/libvirt/images/test-init.iso;5b5f1340-6d8d-6a70-2150-523ef71ac035",
"attributes": {
"id": "/var/lib/libvirt/images/test-init.iso;5b5f1340-6d8d-6a70-2150-523ef71ac035",
"local_hostname": "",
"name": "test-init.iso",
"pool": "default",
"ssh_authorized_key": "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA1X1kYrfcZz62UxUdqentnWZwefNJ+tWvKxqBtuCAkP5NzBrh8keVRGpaHdRcdG6+mDPrkXPW3c62zDwZG0Mjdr73u7hP0vSsKu/x95qPkGEFyanQnUg2j0jKJgNdNzRNwTczfBo9q750rYxwQwgs9wxjPh8mSQJhw73SNcz/EtwIba5vIFgWYUVSGzM1Afx5R0JMzhXzEOvN/QphM5X5l/llOq5GhJPQD4ChYhrUWmIrUXPqZpzNrW06F/UpsdHQoxrxpL6yibUimSkfdJUhL2AmEu8aztYmV41IWaayf/57m4F5EPyQv27gpgH96tMKy4AYMjNhojrD4K0iE+BzVw== dmacvicar@suse.de\n",
"user_data": "#cloud-config\nssh_authorized_keys:\n- |\n ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA1X1kYrfcZz62UxUdqentnWZwefNJ+tWvKxqBtuCAkP5NzBrh8keVRGpaHdRcdG6+mDPrkXPW3c62zDwZG0Mjdr73u7hP0vSsKu/x95qPkGEFyanQnUg2j0jKJgNdNzRNwTczfBo9q750rYxwQwgs9wxjPh8mSQJhw73SNcz/EtwIba5vIFgWYUVSGzM1Afx5R0JMzhXzEOvN/QphM5X5l/llOq5GhJPQD4ChYhrUWmIrUXPqZpzNrW06F/UpsdHQoxrxpL6yibUimSkfdJUhL2AmEu8aztYmV41IWaayf/57m4F5EPyQv27gpgH96tMKy4AYMjNhojrD4K0iE+BzVw== dmacvicar@suse.de\n"
},
"meta": {},
"tainted": false
},
"deposed": [],
"provider": "provider.libvirt"
},
"libvirt_domain.vm-db.0": {
"type": "libvirt_domain",
"depends_on": [
"libvirt_volume.volume-db.*"
],
"primary": {
"id": "ffc8e8ca-7923-484c-ac75-b2ac4a09e778",
"attributes": {
"arch": "x86_64",
"autostart": "false",
"cmdline.#": "0",
"disk.#": "1",
"disk.0.file": "",
"disk.0.scsi": "false",
"disk.0.url": "",
"disk.0.volume_id": "/var/lib/libvirt/images/volume-db-0",
"disk.0.wwn": "",
"emulator": "/usr/bin/qemu-kvm",
"firmware": "",
"id": "ffc8e8ca-7923-484c-ac75-b2ac4a09e778",
"initrd": "",
"kernel": "",
"machine": "pc",
"memory": "512",
"name": "db-0",
"network_interface.#": "1",
"network_interface.0.addresses.#": "1",
"network_interface.0.addresses.0": "192.168.122.174",
"network_interface.0.bridge": "",
"network_interface.0.hostname": "",
"network_interface.0.mac": "9E:71:C6:6F:ED:62",
"network_interface.0.macvtap": "",
"network_interface.0.network_id": "72e67f35-6795-4fa1-aa6a-6e69db1b3d6f",
"network_interface.0.network_name": "default",
"network_interface.0.passthrough": "",
"network_interface.0.vepa": "",
"network_interface.0.wait_for_lease": "true",
"nvram.#": "0",
"running": "true",
"vcpu": "1"
},
"meta": {
"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0": {
"create": 300000000000
}
},
"tainted": false
},
"deposed": [],
"provider": "provider.libvirt"
},
"libvirt_domain.vm-db.1": {
"type": "libvirt_domain",
"depends_on": [
"libvirt_volume.volume-db.*"
],
"primary": {
"id": "b4dd7f0c-cd83-4b76-8ad8-e7a0f22bc37c",
"attributes": {
"arch": "x86_64",
"autostart": "false",
"cmdline.#": "0",
"disk.#": "1",
"disk.0.file": "",
"disk.0.scsi": "false",
"disk.0.url": "",
"disk.0.volume_id": "/var/lib/libvirt/images/volume-db-1",
"disk.0.wwn": "",
"emulator": "/usr/bin/qemu-kvm",
"firmware": "",
"id": "b4dd7f0c-cd83-4b76-8ad8-e7a0f22bc37c",
"initrd": "",
"kernel": "",
"machine": "pc",
"memory": "512",
"name": "db-1",
"network_interface.#": "1",
"network_interface.0.addresses.#": "1",
"network_interface.0.addresses.0": "192.168.122.190",
"network_interface.0.bridge": "",
"network_interface.0.hostname": "",
"network_interface.0.mac": "E6:09:00:50:5B:4D",
"network_interface.0.macvtap": "",
"network_interface.0.network_id": "72e67f35-6795-4fa1-aa6a-6e69db1b3d6f",
"network_interface.0.network_name": "default",
"network_interface.0.passthrough": "",
"network_interface.0.vepa": "",
"network_interface.0.wait_for_lease": "true",
"nvram.#": "0",
"running": "true",
"vcpu": "1"
},
"meta": {
"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0": {
"create": 300000000000
}
},
"tainted": false
},
"deposed": [],
"provider": "provider.libvirt"
},
"libvirt_domain.vm-web.0": {
"type": "libvirt_domain",
"depends_on": [
"libvirt_volume.volume-web.*"
],
"primary": {
"id": "6c5d3840-8a06-4e5f-bdfc-87eacf474b81",
"attributes": {
"arch": "x86_64",
"autostart": "false",
"cmdline.#": "0",
"disk.#": "1",
"disk.0.file": "",
"disk.0.scsi": "false",
"disk.0.url": "",
"disk.0.volume_id": "/var/lib/libvirt/images/volume-web-0",
"disk.0.wwn": "",
"emulator": "/usr/bin/qemu-kvm",
"firmware": "",
"id": "6c5d3840-8a06-4e5f-bdfc-87eacf474b81",
"initrd": "",
"kernel": "",
"machine": "pc",
"memory": "512",
"name": "web-0",
"network_interface.#": "1",
"network_interface.0.addresses.#": "1",
"network_interface.0.addresses.0": "192.168.122.106",
"network_interface.0.bridge": "",
"network_interface.0.hostname": "",
"network_interface.0.mac": "02:E1:FC:12:03:A4",
"network_interface.0.macvtap": "",
"network_interface.0.network_id": "72e67f35-6795-4fa1-aa6a-6e69db1b3d6f",
"network_interface.0.network_name": "default",
"network_interface.0.passthrough": "",
"network_interface.0.vepa": "",
"network_interface.0.wait_for_lease": "true",
"nvram.#": "0",
"running": "true",
"vcpu": "1"
},
"meta": {
"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0": {
"create": 300000000000
}
},
"tainted": false
},
"deposed": [],
"provider": "provider.libvirt"
},
"libvirt_domain.vm-web.1": {
"type": "libvirt_domain",
"depends_on": [
"libvirt_volume.volume-web.*"
],
"primary": {
"id": "30acaaeb-ee9c-4f98-8ebc-d79a46c22f83",
"attributes": {
"arch": "x86_64",
"autostart": "false",
"cmdline.#": "0",
"disk.#": "1",
"disk.0.file": "",
"disk.0.scsi": "false",
"disk.0.url": "",
"disk.0.volume_id": "/var/lib/libvirt/images/volume-web-1",
"disk.0.wwn": "",
"emulator": "/usr/bin/qemu-kvm",
"firmware": "",
"id": "30acaaeb-ee9c-4f98-8ebc-d79a46c22f83",
"initrd": "",
"kernel": "",
"machine": "pc",
"memory": "512",
"name": "web-1",
"network_interface.#": "1",
"network_interface.0.addresses.#": "1",
"network_interface.0.addresses.0": "192.168.122.235",
"network_interface.0.bridge": "",
"network_interface.0.hostname": "",
"network_interface.0.mac": "22:69:D5:05:F8:C4",
"network_interface.0.macvtap": "",
"network_interface.0.network_id": "72e67f35-6795-4fa1-aa6a-6e69db1b3d6f",
"network_interface.0.network_name": "default",
"network_interface.0.passthrough": "",
"network_interface.0.vepa": "",
"network_interface.0.wait_for_lease": "true",
"nvram.#": "0",
"running": "true",
"vcpu": "1"
},
"meta": {
"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0": {
"create": 300000000000
}
},
"tainted": false
},
"deposed": [],
"provider": "provider.libvirt"
},
"libvirt_volume.opensuse_leap": {
"type": "libvirt_volume",
"depends_on": [],
"primary": {
"id": "/var/lib/libvirt/images/leap.iso",
"attributes": {
"id": "/var/lib/libvirt/images/leap.iso",
"name": "leap.iso",
"pool": "default",
"size": "10737418240",
"source": "openSUSE-Leap-15.0-OpenStack.x86_64-0.0.4-Buildlp150.11.4.qcow2"
},
"meta": {},
"tainted": false
},
"deposed": [],
"provider": "provider.libvirt"
},
"libvirt_volume.volume-db.0": {
"type": "libvirt_volume",
"depends_on": [
"libvirt_volume.opensuse_leap"
],
"primary": {
"id": "/var/lib/libvirt/images/volume-db-0",
"attributes": {
"base_volume_id": "/var/lib/libvirt/images/leap.iso",
"id": "/var/lib/libvirt/images/volume-db-0",
"name": "volume-db-0",
"pool": "default",
"size": "10737418240"
},
"meta": {},
"tainted": false
},
"deposed": [],
"provider": "provider.libvirt"
},
"libvirt_volume.volume-db.1": {
"type": "libvirt_volume",
"depends_on": [
"libvirt_volume.opensuse_leap"
],
"primary": {
"id": "/var/lib/libvirt/images/volume-db-1",
"attributes": {
"base_volume_id": "/var/lib/libvirt/images/leap.iso",
"id": "/var/lib/libvirt/images/volume-db-1",
"name": "volume-db-1",
"pool": "default",
"size": "10737418240"
},
"meta": {},
"tainted": false
},
"deposed": [],
"provider": "provider.libvirt"
},
"libvirt_volume.volume-web.0": {
"type": "libvirt_volume",
"depends_on": [
"libvirt_volume.opensuse_leap"
],
"primary": {
"id": "/var/lib/libvirt/images/volume-web-0",
"attributes": {
"base_volume_id": "/var/lib/libvirt/images/leap.iso",
"id": "/var/lib/libvirt/images/volume-web-0",
"name": "volume-web-0",
"pool": "default",
"size": "10737418240"
},
"meta": {},
"tainted": false
},
"deposed": [],
"provider": "provider.libvirt"
},
"libvirt_volume.volume-web.1": {
"type": "libvirt_volume",
"depends_on": [
"libvirt_volume.opensuse_leap"
],
"primary": {
"id": "/var/lib/libvirt/images/volume-web-1",
"attributes": {
"base_volume_id": "/var/lib/libvirt/images/leap.iso",
"id": "/var/lib/libvirt/images/volume-web-1",
"name": "volume-web-1",
"pool": "default",
"size": "10737418240"
},
"meta": {},
"tainted": false
},
"deposed": [],
"provider": "provider.libvirt"
},
"salt_host.dbminion.0": {
"type": "salt_host",
"depends_on": [
"libvirt_domain.vm-db.*"
],
"primary": {
"id": "db0",
"attributes": {
"host": "192.168.122.174",
"id": "db0",
"passwd": "dbpw",
"salt_id": "db0",
"tty": "true",
"user": "root"
},
"meta": {},
"tainted": false
},
"deposed": [],
"provider": "provider.salt"
},
"salt_host.dbminion.1": {
"type": "salt_host",
"depends_on": [
"libvirt_domain.vm-db.*"
],
"primary": {
"id": "db1",
"attributes": {
"host": "192.168.122.190",
"id": "db1",
"passwd": "dbpw",
"salt_id": "db1",
"tty": "true",
"user": "root"
},
"meta": {},
"tainted": false
},
"deposed": [],
"provider": "provider.salt"
},
"salt_host.webminion.0": {
"type": "salt_host",
"depends_on": [
"libvirt_domain.vm-web.*"
],
"primary": {
"id": "web0",
"attributes": {
"host": "192.168.122.106",
"id": "web0",
"passwd": "linux",
"salt_id": "web0",
"timeout": "22",
"user": "root"
},
"meta": {},
"tainted": false
},
"deposed": [],
"provider": "provider.salt"
},
"salt_host.webminion.1": {
"type": "salt_host",
"depends_on": [
"libvirt_domain.vm-web.*"
],
"primary": {
"id": "web1",
"attributes": {
"host": "192.168.122.235",
"id": "web1",
"passwd": "linux",
"salt_id": "web1",
"timeout": "22",
"user": "root"
},
"meta": {},
"tainted": false
},
"deposed": [],
"provider": "provider.salt"
}
},
"depends_on": []
}
]
}

View File

@ -0,0 +1,104 @@
# -*- coding: utf-8 -*-
'''
unittests for terraform roster
'''
# Import Python libs
from __future__ import absolute_import
import os.path
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.runtests import RUNTIME_VARS
from tests.support.mock import (
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.config
import salt.loader
from salt.roster import terraform
@skipIf(NO_MOCK, NO_MOCK_REASON)
class TerraformTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.roster.terraform
'''
def setup_loader_modules(self):
opts = salt.config.master_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'))
utils = salt.loader.utils(opts, whitelist=['roster_matcher'])
return {
terraform: {
'__utils__': utils,
'__opts__': {},
}
}
def test_default_output(self):
'''
Test the output of a fixture tfstate file wich contains libvirt
resources.
'''
tfstate = os.path.join(os.path.dirname(__file__), 'terraform.data', 'terraform.tfstate')
pki_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'terraform.data'))
with patch.dict(terraform.__opts__, {'roster_file': tfstate, 'pki_dir': pki_dir}):
expected_result = {
'db0': {
'host': '192.168.122.174',
'user': 'root',
'passwd': 'dbpw',
'tty': True,
'priv': os.path.join(pki_dir, 'ssh/salt-ssh.rsa')},
'db1': {
'host': '192.168.122.190',
'user': 'root',
'passwd': 'dbpw',
'tty': True,
'priv': os.path.join(pki_dir, 'ssh/salt-ssh.rsa')},
'web0': {
'host': '192.168.122.106',
'user': 'root',
'passwd': 'linux',
'timeout': 22,
'priv': os.path.join(pki_dir, 'ssh/salt-ssh.rsa')},
'web1': {
'host': '192.168.122.235',
'user': 'root',
'passwd': 'linux',
'timeout': 22,
'priv': os.path.join(pki_dir, 'ssh/salt-ssh.rsa')}
}
ret = terraform.targets('*')
self.assertDictEqual(expected_result, ret)
def test_default_matching(self):
'''
Test the output of a fixture tfstate file wich contains libvirt
resources using matching
'''
tfstate = os.path.join(os.path.dirname(__file__), 'terraform.data', 'terraform.tfstate')
pki_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'terraform.data'))
with patch.dict(terraform.__opts__, {'roster_file': tfstate, 'pki_dir': pki_dir}):
expected_result = {
'web0': {
'host': '192.168.122.106',
'user': 'root',
'passwd': 'linux',
'timeout': 22,
'priv': os.path.join(pki_dir, 'ssh/salt-ssh.rsa')},
'web1': {
'host': '192.168.122.235',
'user': 'root',
'passwd': 'linux',
'timeout': 22,
'priv': os.path.join(pki_dir, 'ssh/salt-ssh.rsa')}
}
ret = terraform.targets('*web*')
self.assertDictEqual(expected_result, ret)