Merge branch 'develop' into v2016.11.5_protect_master_key

This commit is contained in:
Anil Kabra 2017-07-27 17:04:51 -05:00 committed by GitHub
commit 3a9746f67e
25 changed files with 2132 additions and 766 deletions

View File

@ -165,6 +165,12 @@ class BaseCaller(object):
ret['jid']
)
if fun not in self.minion.functions:
docs = self.minion.functions['sys.doc']('{0}*'.format(fun))
if docs:
docs[fun] = self.minion.functions.missing_fun_string(fun)
ret['out'] = 'nested'
ret['return'] = docs
return ret
sys.stderr.write(self.minion.functions.missing_fun_string(fun))
mod_name = fun.split('.')[0]
if mod_name in self.minion.function_errors:

View File

@ -5,6 +5,7 @@ A collection of mixins useful for the various *Client interfaces
# Import Python libs
from __future__ import absolute_import, print_function, with_statement
import fnmatch
import signal
import logging
import weakref
@ -436,10 +437,18 @@ class SyncClientMixin(object):
Return a dictionary of functions and the inline documentation for each
'''
if arg:
target_mod = arg + '.' if not arg.endswith('.') else arg
docs = [(fun, self.functions[fun].__doc__)
for fun in sorted(self.functions)
if fun == arg or fun.startswith(target_mod)]
if '*' in arg:
target_mod = arg
_use_fnmatch = True
else:
target_mod = arg + '.' if not arg.endswith('.') else arg
if _use_fnmatch:
docs = [(fun, self.functions[fun].__doc__)
for fun in fnmatch.filter(self.functions, target_mod)]
else:
docs = [(fun, self.functions[fun].__doc__)
for fun in sorted(self.functions)
if fun == arg or fun.startswith(target_mod)]
else:
docs = [(fun, self.functions[fun].__doc__)
for fun in sorted(self.functions)]

View File

@ -0,0 +1,43 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Alexandru Bleotu (alexandru.bleotu@morganstanley.com)`
salt.config.schemas.esxdatacenter
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ESX Datacenter configuration schemas
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt libs
from salt.utils.schema import (Schema,
ArrayItem,
IntegerItem,
StringItem)
class EsxdatacenterProxySchema(Schema):
'''
Schema of the esxdatacenter proxy input
'''
title = 'Esxdatacenter Proxy Schema'
description = 'Esxdatacenter proxy schema'
additional_properties = False
proxytype = StringItem(required=True,
enum=['esxdatacenter'])
vcenter = StringItem(required=True, pattern=r'[^\s]+')
datacenter = StringItem(required=True)
mechanism = StringItem(required=True, enum=['userpass', 'sspi'])
username = StringItem()
passwords = ArrayItem(min_items=1,
items=StringItem(),
unique_items=True)
# TODO Should be changed when anyOf is supported for schemas
domain = StringItem()
principal = StringItem()
protocol = StringItem()
port = IntegerItem(minimum=1)

View File

@ -389,6 +389,13 @@ class TemplateError(SaltException):
'''
# Validation related exceptions
class InvalidConfigError(CommandExecutionError):
'''
Used when the input is invalid
'''
# VMware related exceptions
class VMwareSaltError(CommandExecutionError):
'''

View File

@ -1522,12 +1522,17 @@ class Minion(MinionBase):
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name))
if docs:
docs[function_name] = minion_instance.functions.missing_fun_string(function_name)
ret['return'] = docs
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = 254
ret['out'] = 'nested'

View File

@ -10,7 +10,7 @@ import base64
import logging
# Import Salt libs
import salt.utils.files
import salt.utils
import salt.ext.six.moves.http_client # pylint: disable=import-error,redefined-builtin,no-name-in-module
from salt.ext.six.moves import urllib # pylint: disable=no-name-in-module
from salt.ext.six.moves.urllib.error import HTTPError, URLError # pylint: disable=no-name-in-module
@ -38,7 +38,7 @@ def __virtual__():
return True
def get_latest_snapshot(artifactory_url, repository, group_id, artifact_id, packaging, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None):
def get_latest_snapshot(artifactory_url, repository, group_id, artifact_id, packaging, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None, use_literal_group_id=False):
'''
Gets latest snapshot of the given artifact
@ -69,15 +69,15 @@ def get_latest_snapshot(artifactory_url, repository, group_id, artifact_id, pack
headers = {}
if username and password:
headers['Authorization'] = 'Basic {0}'.format(base64.encodestring('{0}:{1}'.format(username, password)).replace('\n', ''))
artifact_metadata = _get_artifact_metadata(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers)
artifact_metadata = _get_artifact_metadata(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers, use_literal_group_id=use_literal_group_id)
version = artifact_metadata['latest_version']
snapshot_url, file_name = _get_snapshot_url(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, packaging=packaging, classifier=classifier, headers=headers)
snapshot_url, file_name = _get_snapshot_url(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, packaging=packaging, classifier=classifier, headers=headers, use_literal_group_id=use_literal_group_id)
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(snapshot_url, target_file, headers)
def get_snapshot(artifactory_url, repository, group_id, artifact_id, packaging, version, snapshot_version=None, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None):
def get_snapshot(artifactory_url, repository, group_id, artifact_id, packaging, version, snapshot_version=None, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None, use_literal_group_id=False):
'''
Gets snapshot of the desired version of the artifact
@ -109,13 +109,13 @@ def get_snapshot(artifactory_url, repository, group_id, artifact_id, packaging,
headers = {}
if username and password:
headers['Authorization'] = 'Basic {0}'.format(base64.encodestring('{0}:{1}'.format(username, password)).replace('\n', ''))
snapshot_url, file_name = _get_snapshot_url(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, packaging=packaging, snapshot_version=snapshot_version, classifier=classifier, headers=headers)
snapshot_url, file_name = _get_snapshot_url(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, packaging=packaging, snapshot_version=snapshot_version, classifier=classifier, headers=headers, use_literal_group_id=use_literal_group_id)
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(snapshot_url, target_file, headers)
def get_latest_release(artifactory_url, repository, group_id, artifact_id, packaging, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None):
def get_latest_release(artifactory_url, repository, group_id, artifact_id, packaging, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None, use_literal_group_id=False):
'''
Gets the latest release of the artifact
@ -146,13 +146,13 @@ def get_latest_release(artifactory_url, repository, group_id, artifact_id, packa
if username and password:
headers['Authorization'] = 'Basic {0}'.format(base64.encodestring('{0}:{1}'.format(username, password)).replace('\n', ''))
version = __find_latest_version(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers)
release_url, file_name = _get_release_url(repository, group_id, artifact_id, packaging, version, artifactory_url, classifier)
release_url, file_name = _get_release_url(repository, group_id, artifact_id, packaging, version, artifactory_url, classifier, use_literal_group_id)
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(release_url, target_file, headers)
def get_release(artifactory_url, repository, group_id, artifact_id, packaging, version, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None):
def get_release(artifactory_url, repository, group_id, artifact_id, packaging, version, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None, use_literal_group_id=False):
'''
Gets the specified release of the artifact
@ -184,7 +184,7 @@ def get_release(artifactory_url, repository, group_id, artifact_id, packaging, v
headers = {}
if username and password:
headers['Authorization'] = 'Basic {0}'.format(base64.encodestring('{0}:{1}'.format(username, password)).replace('\n', ''))
release_url, file_name = _get_release_url(repository, group_id, artifact_id, packaging, version, artifactory_url, classifier)
release_url, file_name = _get_release_url(repository, group_id, artifact_id, packaging, version, artifactory_url, classifier, use_literal_group_id)
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(release_url, target_file, headers)
@ -196,7 +196,7 @@ def __resolve_target_file(file_name, target_dir, target_file=None):
return target_file
def _get_snapshot_url(artifactory_url, repository, group_id, artifact_id, version, packaging, snapshot_version=None, classifier=None, headers=None):
def _get_snapshot_url(artifactory_url, repository, group_id, artifact_id, version, packaging, snapshot_version=None, classifier=None, headers=None, use_literal_group_id=False):
if headers is None:
headers = {}
has_classifier = classifier is not None and classifier != ""
@ -242,7 +242,7 @@ def _get_snapshot_url(artifactory_url, repository, group_id, artifact_id, versio
snapshot_version = snapshot_version_metadata['snapshot_versions'][packaging]
group_url = __get_group_id_subpath(group_id)
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
file_name = '{artifact_id}-{snapshot_version}{classifier}.{packaging}'.format(
artifact_id=artifact_id,
@ -262,8 +262,8 @@ def _get_snapshot_url(artifactory_url, repository, group_id, artifact_id, versio
return snapshot_url, file_name
def _get_release_url(repository, group_id, artifact_id, packaging, version, artifactory_url, classifier=None):
group_url = __get_group_id_subpath(group_id)
def _get_release_url(repository, group_id, artifact_id, packaging, version, artifactory_url, classifier=None, use_literal_group_id=False):
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
# for released versions the suffix for the file is same as version
file_name = '{artifact_id}-{version}{classifier}.{packaging}'.format(
@ -283,8 +283,8 @@ def _get_release_url(repository, group_id, artifact_id, packaging, version, arti
return release_url, file_name
def _get_artifact_metadata_url(artifactory_url, repository, group_id, artifact_id):
group_url = __get_group_id_subpath(group_id)
def _get_artifact_metadata_url(artifactory_url, repository, group_id, artifact_id, use_literal_group_id=False):
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
# for released versions the suffix for the file is same as version
artifact_metadata_url = '{artifactory_url}/{repository}/{group_url}/{artifact_id}/maven-metadata.xml'.format(
artifactory_url=artifactory_url,
@ -295,13 +295,14 @@ def _get_artifact_metadata_url(artifactory_url, repository, group_id, artifact_i
return artifact_metadata_url
def _get_artifact_metadata_xml(artifactory_url, repository, group_id, artifact_id, headers):
def _get_artifact_metadata_xml(artifactory_url, repository, group_id, artifact_id, headers, use_literal_group_id=False):
artifact_metadata_url = _get_artifact_metadata_url(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id
artifact_id=artifact_id,
use_literal_group_id=use_literal_group_id
)
try:
@ -318,8 +319,8 @@ def _get_artifact_metadata_xml(artifactory_url, repository, group_id, artifact_i
return artifact_metadata_xml
def _get_artifact_metadata(artifactory_url, repository, group_id, artifact_id, headers):
metadata_xml = _get_artifact_metadata_xml(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers)
def _get_artifact_metadata(artifactory_url, repository, group_id, artifact_id, headers, use_literal_group_id=False):
metadata_xml = _get_artifact_metadata_xml(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers, use_literal_group_id=use_literal_group_id)
root = ET.fromstring(metadata_xml)
assert group_id == root.find('groupId').text
@ -331,8 +332,8 @@ def _get_artifact_metadata(artifactory_url, repository, group_id, artifact_id, h
# functions for handling snapshots
def _get_snapshot_version_metadata_url(artifactory_url, repository, group_id, artifact_id, version):
group_url = __get_group_id_subpath(group_id)
def _get_snapshot_version_metadata_url(artifactory_url, repository, group_id, artifact_id, version, use_literal_group_id=False):
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
# for released versions the suffix for the file is same as version
snapshot_version_metadata_url = '{artifactory_url}/{repository}/{group_url}/{artifact_id}/{version}/maven-metadata.xml'.format(
artifactory_url=artifactory_url,
@ -344,14 +345,15 @@ def _get_snapshot_version_metadata_url(artifactory_url, repository, group_id, ar
return snapshot_version_metadata_url
def _get_snapshot_version_metadata_xml(artifactory_url, repository, group_id, artifact_id, version, headers):
def _get_snapshot_version_metadata_xml(artifactory_url, repository, group_id, artifact_id, version, headers, use_literal_group_id=False):
snapshot_version_metadata_url = _get_snapshot_version_metadata_url(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
version=version
version=version,
use_literal_group_id=use_literal_group_id
)
try:
@ -388,8 +390,8 @@ def _get_snapshot_version_metadata(artifactory_url, repository, group_id, artifa
}
def __get_latest_version_url(artifactory_url, repository, group_id, artifact_id):
group_url = __get_group_id_subpath(group_id)
def __get_latest_version_url(artifactory_url, repository, group_id, artifact_id, use_literal_group_id=False):
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
# for released versions the suffix for the file is same as version
latest_version_url = '{artifactory_url}/api/search/latestVersion?g={group_url}&a={artifact_id}&repos={repository}'.format(
artifactory_url=artifactory_url,
@ -400,13 +402,14 @@ def __get_latest_version_url(artifactory_url, repository, group_id, artifact_id)
return latest_version_url
def __find_latest_version(artifactory_url, repository, group_id, artifact_id, headers):
def __find_latest_version(artifactory_url, repository, group_id, artifact_id, headers, use_literal_group_id=False):
latest_version_url = __get_latest_version_url(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id
artifact_id=artifact_id,
use_literal_group_id=use_literal_group_id
)
try:
@ -478,9 +481,11 @@ def __save_artifact(artifact_url, target_file, headers):
return result
def __get_group_id_subpath(group_id):
group_url = group_id.replace('.', '/')
return group_url
def __get_group_id_subpath(group_id, use_literal_group_id=False):
if not use_literal_group_id:
group_url = group_id.replace('.', '/')
return group_url
return group_id
def __get_classifier_url(classifier):

View File

@ -0,0 +1,29 @@
# -*- coding: utf-8 -*-
'''
Module used to access the esxdatacenter proxy connection methods
'''
from __future__ import absolute_import
# Import python libs
import logging
import salt.utils
log = logging.getLogger(__name__)
__proxyenabled__ = ['esxdatacenter']
# Define the module's virtual name
__virtualname__ = 'esxdatacenter'
def __virtual__():
'''
Only work on proxy
'''
if salt.utils.is_proxy():
return __virtualname__
return (False, 'Must be run on a proxy minion')
def get_details():
return __proxy__['esxdatacenter.get_details']()

File diff suppressed because it is too large Load Diff

View File

@ -362,12 +362,127 @@ def environment(**kwargs): # pylint: disable=unused-argument
@proxy_napalm_wrap
def cli(*commands, **kwargs): # pylint: disable=unused-argument
'''
Returns a dictionary with the raw output of all commands passed as arguments.
:param commands: list of commands to be executed on the device
:return: a dictionary with the mapping between each command and its raw output
commands
List of commands to be executed on the device.
textfsm_parse: ``False``
Try parsing the outputs using the TextFSM templates.
.. versionadded:: Oxygen
.. note::
This option can be also specified in the minion configuration
file or pillar as ``napalm_cli_textfsm_parse``.
textfsm_path
The path where the TextFSM templates can be found. This option implies
the usage of the TextFSM index file.
``textfsm_path`` can be either absolute path on the server,
either specified using the following URL mschemes: ``file://``,
``salt://``, ``http://``, ``https://``, ``ftp://``,
``s3://``, ``swift://``.
.. versionadded:: Oxygen
.. note::
This needs to be a directory with a flat structure, having an
index file (whose name can be specified using the ``index_file`` option)
and a number of TextFSM templates.
.. note::
This option can be also specified in the minion configuration
file or pillar as ``textfsm_path``.
textfsm_template
The path to a certain the TextFSM template.
This can be specified using the absolute path
to the file, or using one of the following URL schemes:
- ``salt://``, to fetch the template from the Salt fileserver.
- ``http://`` or ``https://``
- ``ftp://``
- ``s3://``
- ``swift://``
.. versionadded:: Oxygen
textfsm_template_dict
A dictionary with the mapping between a command
and the corresponding TextFSM path to use to extract the data.
The TextFSM paths can be specified as in ``textfsm_template``.
.. versionadded:: Oxygen
.. note::
This option can be also specified in the minion configuration
file or pillar as ``napalm_cli_textfsm_template_dict``.
platform_grain_name: ``os``
The name of the grain used to identify the platform name
in the TextFSM index file. Default: ``os``.
.. versionadded:: Oxygen
.. note::
This option can be also specified in the minion configuration
file or pillar as ``textfsm_platform_grain``.
platform_column_name: ``Platform``
The column name used to identify the platform,
exactly as specified in the TextFSM index file.
Default: ``Platform``.
.. versionadded:: Oxygen
.. note::
This is field is case sensitive, make sure
to assign the correct value to this option,
exactly as defined in the index file.
.. note::
This option can be also specified in the minion configuration
file or pillar as ``textfsm_platform_column_name``.
index_file: ``index``
The name of the TextFSM index file, under the ``textfsm_path``. Default: ``index``.
.. versionadded:: Oxygen
.. note::
This option can be also specified in the minion configuration
file or pillar as ``textfsm_index_file``.
saltenv: ``base``
Salt fileserver envrionment from which to retrieve the file.
Ignored if ``textfsm_path`` is not a ``salt://`` URL.
.. versionadded:: Oxygen
include_empty: ``False``
Include empty files under the ``textfsm_path``.
.. versionadded:: Oxygen
include_pat
Glob or regex to narrow down the files cached from the given path.
If matching with a regex, the regex must be prefixed with ``E@``,
otherwise the expression will be interpreted as a glob.
.. versionadded:: Oxygen
exclude_pat
Glob or regex to exclude certain files from being cached from the given path.
If matching with a regex, the regex must be prefixed with ``E@``,
otherwise the expression will be interpreted as a glob.
.. versionadded:: Oxygen
.. note::
If used with ``include_pat``, files matching this pattern will be
excluded from the subset of files defined by ``include_pat``.
CLI Example:
@ -375,6 +490,12 @@ def cli(*commands, **kwargs): # pylint: disable=unused-argument
salt '*' net.cli "show version" "show chassis fan"
CLI Example with TextFSM template:
.. code-block::
salt '*' net.cli textfsm_parse=True textfsm_path=salt://textfsm/
Example output:
.. code-block:: python
@ -396,9 +517,31 @@ def cli(*commands, **kwargs): # pylint: disable=unused-argument
Bottom Front Fan OK 3840 Spinning at intermediate-speed
'
}
'''
return salt.utils.napalm.call(
Example output with TextFSM parsing:
.. code-block:: json
{
"comment": "",
"result": true,
"out": {
"sh ver": [
{
"kernel": "9.1S3.5",
"documentation": "9.1S3.5",
"boot": "9.1S3.5",
"crypto": "9.1S3.5",
"chassis": "",
"routing": "9.1S3.5",
"base": "9.1S3.5",
"model": "mx960"
}
]
}
}
'''
raw_cli_outputs = salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
'cli',
**{
@ -407,6 +550,107 @@ def cli(*commands, **kwargs): # pylint: disable=unused-argument
)
# thus we can display the output as is
# in case of errors, they'll be catched in the proxy
if not raw_cli_outputs['result']:
# Error -> dispaly the output as-is.
return raw_cli_outputs
textfsm_parse = kwargs.get('textfsm_parse') or __opts__.get('napalm_cli_textfsm_parse') or\
__pillar__.get('napalm_cli_textfsm_parse', False)
if not textfsm_parse:
# No TextFSM parsing required, return raw commands.
log.debug('No TextFSM parsing requested.')
return raw_cli_outputs
if 'textfsm.extract' not in __salt__ or 'textfsm.index' not in __salt__:
raw_cli_outputs['comment'] += 'Unable to process: is TextFSM installed?'
log.error(raw_cli_outputs['comment'])
return raw_cli_outputs
textfsm_template = kwargs.get('textfsm_template')
log.debug('textfsm_template: {}'.format(textfsm_template))
textfsm_path = kwargs.get('textfsm_path') or __opts__.get('textfsm_path') or\
__pillar__.get('textfsm_path')
log.debug('textfsm_path: {}'.format(textfsm_path))
textfsm_template_dict = kwargs.get('textfsm_template_dict') or __opts__.get('napalm_cli_textfsm_template_dict') or\
__pillar__.get('napalm_cli_textfsm_template_dict', {})
log.debug('TextFSM command-template mapping: {}'.format(textfsm_template_dict))
index_file = kwargs.get('index_file') or __opts__.get('textfsm_index_file') or\
__pillar__.get('textfsm_index_file')
log.debug('index_file: {}'.format(index_file))
platform_grain_name = kwargs.get('platform_grain_name') or __opts__.get('textfsm_platform_grain') or\
__pillar__.get('textfsm_platform_grain', 'os')
log.debug('platform_grain_name: {}'.format(platform_grain_name))
platform_column_name = kwargs.get('platform_column_name') or __opts__.get('textfsm_platform_column_name') or\
__pillar__.get('textfsm_platform_column_name', 'Platform')
log.debug('platform_column_name: {}'.format(platform_column_name))
saltenv = kwargs.get('saltenv', 'base')
include_empty = kwargs.get('include_empty', False)
include_pat = kwargs.get('include_pat')
exclude_pat = kwargs.get('exclude_pat')
processed_cli_outputs = {
'comment': raw_cli_outputs.get('comment', ''),
'result': raw_cli_outputs['result'],
'out': {}
}
log.debug('Starting to analyse the raw outputs')
for command in list(commands):
command_output = raw_cli_outputs['out'][command]
log.debug('Output from command: {}'.format(command))
log.debug(command_output)
processed_command_output = None
if textfsm_path:
log.debug('Using the templates under {}'.format(textfsm_path))
processed_cli_output = __salt__['textfsm.index'](command,
platform_grain_name=platform_grain_name,
platform_column_name=platform_column_name,
output=command_output.strip(),
textfsm_path=textfsm_path,
saltenv=saltenv,
include_empty=include_empty,
include_pat=include_pat,
exclude_pat=exclude_pat)
log.debug('Processed CLI output:')
log.debug(processed_cli_output)
if not processed_cli_output['result']:
log.debug('Apparently this didnt work, returnin the raw output')
processed_command_output = command_output
processed_cli_outputs['comment'] += '\nUnable to process the output from {0}: {1}.'.format(command,
processed_cli_output['comment'])
log.error(processed_cli_outputs['comment'])
elif processed_cli_output['out']:
log.debug('All good, {} has a nice output!'.format(command))
processed_command_output = processed_cli_output['out']
else:
comment = '''\nProcessing "{}" didn't fail, but didn't return anything either. Dumping raw.'''.format(
command)
processed_cli_outputs['comment'] += comment
log.error(comment)
processed_command_output = command_output
elif textfsm_template or command in textfsm_template_dict:
if command in textfsm_template_dict:
textfsm_template = textfsm_template_dict[command]
log.debug('Using {0} to process the command: {1}'.format(textfsm_template, command))
processed_cli_output = __salt__['textfsm.extract'](textfsm_template,
raw_text=command_output,
saltenv=saltenv)
log.debug('Processed CLI output:')
log.debug(processed_cli_output)
if not processed_cli_output['result']:
log.debug('Apparently this didnt work, returnin the raw output')
processed_command_output = command_output
processed_cli_outputs['comment'] += '\nUnable to process the output from {0}: {1}'.format(command,
processed_cli_output['comment'])
log.error(processed_cli_outputs['comment'])
elif processed_cli_output['out']:
log.debug('All good, {} has a nice output!'.format(command))
processed_command_output = processed_cli_output['out']
else:
log.debug('Processing {} didnt fail, but didnt return anything either. Dumping raw.'.format(command))
processed_command_output = command_output
else:
log.error('No TextFSM template specified, or no TextFSM path defined')
processed_command_output = command_output
processed_cli_outputs['comment'] += '\nUnable to process the output from {}.'.format(command)
processed_cli_outputs['out'][command] = processed_command_output
processed_cli_outputs['comment'] = processed_cli_outputs['comment'].strip()
return processed_cli_outputs
@proxy_napalm_wrap

View File

@ -194,7 +194,7 @@ else:
log = logging.getLogger(__name__)
__virtualname__ = 'vsphere'
__proxyenabled__ = ['esxi']
__proxyenabled__ = ['esxi', 'esxdatacenter']
def __virtual__():
@ -226,6 +226,8 @@ def _get_proxy_connection_details():
proxytype = get_proxy_type()
if proxytype == 'esxi':
details = __salt__['esxi.get_details']()
elif proxytype == 'esxdatacenter':
details = __salt__['esxdatacenter.get_details']()
else:
raise CommandExecutionError('\'{0}\' proxy is not supported'
''.format(proxytype))
@ -264,6 +266,8 @@ def gets_service_instance_via_proxy(fn):
proxy details and passes the connection (vim.ServiceInstance) to
the decorated function.
Supported proxies: esxi, esxdatacenter.
Notes:
1. The decorated function must have a ``service_instance`` parameter
or a ``**kwarg`` type argument (name of argument is not important);
@ -349,7 +353,7 @@ def gets_service_instance_via_proxy(fn):
@depends(HAS_PYVMOMI)
@supports_proxies('esxi')
@supports_proxies('esxi', 'esxdatacenter')
def get_service_instance_via_proxy(service_instance=None):
'''
Returns a service instance to the proxied endpoint (vCenter/ESXi host).
@ -368,7 +372,8 @@ def get_service_instance_via_proxy(service_instance=None):
return salt.utils.vmware.get_service_instance(*connection_details)
@supports_proxies('esxi')
@depends(HAS_PYVMOMI)
@supports_proxies('esxi', 'esxdatacenter')
def disconnect(service_instance):
'''
Disconnects from a vCenter or ESXi host
@ -1903,7 +1908,7 @@ def get_vsan_eligible_disks(host, username, password, protocol=None, port=None,
@depends(HAS_PYVMOMI)
@supports_proxies('esxi')
@supports_proxies('esxi', 'esxdatacenter')
@gets_service_instance_via_proxy
def test_vcenter_connection(service_instance=None):
'''
@ -4278,3 +4283,13 @@ def add_host_to_dvs(host, username, password, vmknic_name, vmnic_name,
raise
return ret
def _get_esxdatacenter_proxy_details():
'''
Returns the running esxdatacenter's proxy details
'''
det = __salt__['esxdatacenter.get_details']()
return det.get('vcenter'), det.get('username'), det.get('password'), \
det.get('protocol'), det.get('port'), det.get('mechanism'), \
det.get('principal'), det.get('domain'), det.get('datacenter')

View File

@ -115,6 +115,7 @@ def start():
ssl_opts.update({'keyfile': mod_opts['ssl_key']})
kwargs['ssl_options'] = ssl_opts
import tornado.httpserver
http_server = tornado.httpserver.HTTPServer(get_application(__opts__), **kwargs)
try:
http_server.bind(mod_opts['port'],

298
salt/proxy/esxdatacenter.py Normal file
View File

@ -0,0 +1,298 @@
# -*- coding: utf-8 -*-
'''
Proxy Minion interface module for managing VMWare ESXi clusters.
Dependencies
============
- pyVmomi
- jsonschema
Configuration
=============
To use this integration proxy module, please configure the following:
Pillar
------
Proxy minions get their configuration from Salt's Pillar. This can now happen
from the proxy's configuration file.
Example pillars:
``userpass`` mechanism:
.. code-block:: yaml
proxy:
proxytype: esxdatacenter
datacenter: <datacenter name>
vcenter: <ip or dns name of parent vcenter>
mechanism: userpass
username: <vCenter username>
passwords: (required if userpass is used)
- first_password
- second_password
- third_password
``sspi`` mechanism:
.. code-block:: yaml
proxy:
proxytype: esxdatacenter
datacenter: <datacenter name>
vcenter: <ip or dns name of parent vcenter>
mechanism: sspi
domain: <user domain>
principal: <host kerberos principal>
proxytype
^^^^^^^^^
To use this Proxy Module, set this to ``esxdatacenter``.
datacenter
^^^^^^^^^^
Name of the managed datacenter. Required.
vcenter
^^^^^^^
The location of the VMware vCenter server (host of ip) where the datacenter
should be managed. Required.
mechanism
^^^^^^^^
The mechanism used to connect to the vCenter server. Supported values are
``userpass`` and ``sspi``. Required.
Note:
Connections are attempted using all (``username``, ``password``)
combinations on proxy startup.
username
^^^^^^^^
The username used to login to the host, such as ``root``. Required if mechanism
is ``userpass``.
passwords
^^^^^^^^^
A list of passwords to be used to try and login to the vCenter server. At least
one password in this list is required if mechanism is ``userpass``. When the
proxy comes up, it will try the passwords listed in order.
domain
^^^^^^
User domain. Required if mechanism is ``sspi``.
principal
^^^^^^^^
Kerberos principal. Rquired if mechanism is ``sspi``.
protocol
^^^^^^^^
If the ESXi host is not using the default protocol, set this value to an
alternate protocol. Default is ``https``.
port
^^^^
If the ESXi host is not using the default port, set this value to an
alternate port. Default is ``443``.
Salt Proxy
----------
After your pillar is in place, you can test the proxy. The proxy can run on
any machine that has network connectivity to your Salt Master and to the
vCenter server in the pillar. SaltStack recommends that the machine running the
salt-proxy process also run a regular minion, though it is not strictly
necessary.
To start a proxy minion one needs to establish its identity <id>:
.. code-block:: bash
salt-proxy --proxyid <proxy_id>
On the machine that will run the proxy, make sure there is a configuration file
present. By default this is ``/etc/salt/proxy``. If in a different location, the
``<configuration_folder>`` has to be specified when running the proxy:
file with at least the following in it:
.. code-block:: bash
salt-proxy --proxyid <proxy_id> -c <configuration_folder>
Commands
--------
Once the proxy is running it will connect back to the specified master and
individual commands can be runs against it:
.. code-block:: bash
# Master - minion communication
salt <datacenter_name> test.ping
# Test vcenter connection
salt <datacenter_name> vsphere.test_vcenter_connection
States
------
Associated states are documented in
:mod:`salt.states.esxdatacenter </ref/states/all/salt.states.esxdatacenter>`.
Look there to find an example structure for Pillar as well as an example
``.sls`` file for configuring an ESX datacenter from scratch.
'''
# Import Python Libs
from __future__ import absolute_import
import logging
import os
# Import Salt Libs
import salt.exceptions
from salt.config.schemas.esxdatacenter import EsxdatacenterProxySchema
# This must be present or the Salt loader won't load this module.
__proxyenabled__ = ['esxdatacenter']
# External libraries
try:
import jsonschema
HAS_JSONSCHEMA = True
except ImportError:
HAS_JSONSCHEMA = False
# Variables are scoped to this module so we can have persistent data
# across calls to fns in here.
DETAILS = {}
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'esxdatacenter'
def __virtual__():
'''
Only load if the vsphere execution module is available.
'''
if HAS_JSONSCHEMA:
return __virtualname__
return False, 'The esxdatacenter proxy module did not load.'
def init(opts):
'''
This function gets called when the proxy starts up.
All login details are cached.
'''
log.debug('Initting esxdatacenter proxy module in process '
'{}'.format(os.getpid()))
log.trace('Validating esxdatacenter proxy input')
schema = EsxdatacenterProxySchema.serialize()
log.trace('schema = {}'.format(schema))
try:
jsonschema.validate(opts['proxy'], schema)
except jsonschema.exceptions.ValidationError as exc:
raise salt.exceptions.InvalidConfigError(exc)
# Save mandatory fields in cache
for key in ('vcenter', 'datacenter', 'mechanism'):
DETAILS[key] = opts['proxy'][key]
# Additional validation
if DETAILS['mechanism'] == 'userpass':
if 'username' not in opts['proxy']:
raise salt.exceptions.InvalidConfigError(
'Mechanism is set to \'userpass\', but no '
'\'username\' key found in proxy config.')
if 'passwords' not in opts['proxy']:
raise salt.exceptions.InvalidConfigError(
'Mechanism is set to \'userpass\', but no '
'\'passwords\' key found in proxy config.')
for key in ('username', 'passwords'):
DETAILS[key] = opts['proxy'][key]
else:
if 'domain' not in opts['proxy']:
raise salt.exceptions.InvalidConfigError(
'Mechanism is set to \'sspi\', but no '
'\'domain\' key found in proxy config.')
if 'principal' not in opts['proxy']:
raise salt.exceptions.InvalidConfigError(
'Mechanism is set to \'sspi\', but no '
'\'principal\' key found in proxy config.')
for key in ('domain', 'principal'):
DETAILS[key] = opts['proxy'][key]
# Save optional
DETAILS['protocol'] = opts['proxy'].get('protocol')
DETAILS['port'] = opts['proxy'].get('port')
# Test connection
if DETAILS['mechanism'] == 'userpass':
# Get the correct login details
log.debug('Retrieving credentials and testing vCenter connection for '
'mehchanism \'userpass\'')
try:
username, password = find_credentials()
DETAILS['password'] = password
except salt.exceptions.SaltSystemExit as err:
log.critical('Error: {0}'.format(err))
return False
return True
def ping():
'''
Returns True.
CLI Example:
.. code-block:: bash
salt dc_id test.ping
'''
return True
def shutdown():
'''
Shutdown the connection to the proxy device. For this proxy,
shutdown is a no-op.
'''
log.debug('esxdatacenter proxy shutdown() called...')
def find_credentials():
'''
Cycle through all the possible credentials and return the first one that
works.
'''
# if the username and password were already found don't fo though the
# connection process again
if 'username' in DETAILS and 'password' in DETAILS:
return DETAILS['username'], DETAILS['password']
passwords = DETAILS['passwords']
for password in passwords:
DETAILS['password'] = password
if not __salt__['vsphere.test_vcenter_connection']():
# We are unable to authenticate
continue
# If we have data returned from above, we've successfully authenticated.
return DETAILS['username'], password
# We've reached the end of the list without successfully authenticating.
raise salt.exceptions.VMwareConnectionError('Cannot complete login due to '
'incorrect credentials.')
def get_details():
'''
Function that returns the cached details
'''
return DETAILS

View File

@ -276,7 +276,16 @@ class Runner(RunnerClient):
'fun_args': fun_args,
'jid': self.jid},
tag='salt/run/{0}/ret'.format(self.jid))
ret = '{0}'.format(exc)
# Attempt to grab documentation
if 'fun' in low:
ret = self.get_docs('{0}*'.format(low['fun']))
else:
ret = None
# If we didn't get docs returned then
# return the `not availble` message.
if not ret:
ret = '{0}'.format(exc)
if not self.opts.get('quiet', False):
display_output(ret, 'nested', self.opts)
else:

View File

@ -11,10 +11,10 @@ import logging
log = logging.getLogger(__name__)
def downloaded(name, artifact, target_dir='/tmp', target_file=None):
def downloaded(name, artifact, target_dir='/tmp', target_file=None, use_literal_group_id=False):
'''
Ensures that the artifact from artifactory exists at given location. If it doesn't exist, then
it will be downloaded. It it already exists then the checksum of existing file is checked against checksum
it will be downloaded. If it already exists then the checksum of existing file is checked against checksum
in artifactory. If it is different then the step will fail.
artifact
@ -84,7 +84,7 @@ def downloaded(name, artifact, target_dir='/tmp', target_file=None):
'comment': ''}
try:
fetch_result = __fetch_from_artifactory(artifact, target_dir, target_file)
fetch_result = __fetch_from_artifactory(artifact, target_dir, target_file, use_literal_group_id)
except Exception as exc:
ret['result'] = False
ret['comment'] = str(exc)
@ -100,7 +100,7 @@ def downloaded(name, artifact, target_dir='/tmp', target_file=None):
return ret
def __fetch_from_artifactory(artifact, target_dir, target_file):
def __fetch_from_artifactory(artifact, target_dir, target_file, use_literal_group_id):
if ('latest_snapshot' in artifact and artifact['latest_snapshot']) or artifact['version'] == 'latest_snapshot':
fetch_result = __salt__['artifactory.get_latest_snapshot'](artifactory_url=artifact['artifactory_url'],
repository=artifact['repository'],
@ -111,7 +111,8 @@ def __fetch_from_artifactory(artifact, target_dir, target_file):
target_dir=target_dir,
target_file=target_file,
username=artifact['username'] if 'username' in artifact else None,
password=artifact['password'] if 'password' in artifact else None)
password=artifact['password'] if 'password' in artifact else None,
use_literal_group_id=use_literal_group_id)
elif artifact['version'].endswith('SNAPSHOT'):
fetch_result = __salt__['artifactory.get_snapshot'](artifactory_url=artifact['artifactory_url'],
repository=artifact['repository'],
@ -123,7 +124,8 @@ def __fetch_from_artifactory(artifact, target_dir, target_file):
target_dir=target_dir,
target_file=target_file,
username=artifact['username'] if 'username' in artifact else None,
password=artifact['password'] if 'password' in artifact else None)
password=artifact['password'] if 'password' in artifact else None,
use_literal_group_id=use_literal_group_id)
elif artifact['version'] == 'latest':
fetch_result = __salt__['artifactory.get_latest_release'](artifactory_url=artifact['artifactory_url'],
repository=artifact['repository'],
@ -134,7 +136,8 @@ def __fetch_from_artifactory(artifact, target_dir, target_file):
target_dir=target_dir,
target_file=target_file,
username=artifact['username'] if 'username' in artifact else None,
password=artifact['password'] if 'password' in artifact else None)
password=artifact['password'] if 'password' in artifact else None,
use_literal_group_id=use_literal_group_id)
else:
fetch_result = __salt__['artifactory.get_release'](artifactory_url=artifact['artifactory_url'],
repository=artifact['repository'],
@ -146,5 +149,6 @@ def __fetch_from_artifactory(artifact, target_dir, target_file):
target_dir=target_dir,
target_file=target_file,
username=artifact['username'] if 'username' in artifact else None,
password=artifact['password'] if 'password' in artifact else None)
password=artifact['password'] if 'password' in artifact else None,
use_literal_group_id=use_literal_group_id)
return fetch_result

View File

@ -1,239 +0,0 @@
# -*- coding: utf-8 -*-
'''
states for infoblox stuff
ensures a record is either present or absent in an Infoblox DNS system
.. versionadded:: 2016.3.0
'''
from __future__ import absolute_import
# Import Python libs
import logging
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
'''
make sure the infoblox module is available
'''
return True if 'infoblox.get_record' in __salt__ else False
def present(name,
value,
record_type,
dns_view,
infoblox_server=None,
infoblox_user=None,
infoblox_password=None,
infoblox_api_version='v1.4.2',
sslVerify=True):
'''
Ensure a record exists
name
Name of the record
value
Value of the record
record_type
record type (host, a, cname, etc)
dns_view
DNS View
infoblox_server
infoblox server to connect to (will try pillar if not specified)
infoblox_user
username to use to connect to infoblox (will try pillar if not specified)
infoblox_password
password to use to connect to infoblox (will try pillar if not specified)
verify_ssl
verify SSL certificates
Example:
.. code-block:: yaml
some-state:
infoblox.present:
- name: some.dns.record
- value: 10.1.1.3
- record_type: host
- sslVerify: False
'''
record_type = record_type.lower()
value_utf8 = six.text_type(value, "utf-8")
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
records = __salt__['infoblox.get_record'](name,
record_type,
infoblox_server=infoblox_server,
infoblox_user=infoblox_user,
infoblox_password=infoblox_password,
dns_view=dns_view,
infoblox_api_version=infoblox_api_version,
sslVerify=sslVerify)
if records:
# check records for updates
for record in records:
update_record = False
if record_type == 'cname':
if record['Canonical Name'] != value_utf8:
update_record = True
elif record_type == 'a':
if record['IP Address'] != value_utf8:
update_record = True
elif record_type == 'host':
if record['IP Addresses'] != [value_utf8]:
update_record = True
if update_record:
if __opts__['test']:
ret['result'] = None
ret['comment'] = ' '.join([ret['comment'],
'DNS {0} record {1} in view {2} will be update'.format(record_type,
name,
dns_view)])
else:
retval = __salt__['infoblox.update_record'](name,
value,
dns_view,
record_type,
infoblox_server=infoblox_server,
infoblox_user=infoblox_user,
infoblox_password=infoblox_password,
infoblox_api_version=infoblox_api_version,
sslVerify=sslVerify)
if retval:
if 'old' not in ret['changes']:
ret['changes']['old'] = []
if 'new' not in ret['changes']:
ret['changes']['new'] = []
ret['changes']['old'].append(record)
ret['changes']['new'].append(__salt__['infoblox.get_record'](name,
record_type,
infoblox_server=infoblox_server,
infoblox_user=infoblox_user,
infoblox_password=infoblox_password,
dns_view=dns_view,
infoblox_api_version=infoblox_api_version,
sslVerify=sslVerify))
else:
ret['result'] = False
return ret
else:
# no records
if __opts__['test']:
ret['result'] = None
ret['comment'] = ' '.join([ret['comment'],
'DNS {0} record {1} set to be added to view {2}'.format(record_type,
name,
dns_view)])
return ret
retval = __salt__['infoblox.add_record'](name,
value,
record_type,
dns_view,
infoblox_server=infoblox_server,
infoblox_user=infoblox_user,
infoblox_password=infoblox_password,
infoblox_api_version='v1.4.2',
sslVerify=sslVerify)
if retval:
ret['result'] = True
ret['changes']['old'] = None
ret['changes']['new'] = __salt__['infoblox.get_record'](name,
record_type,
infoblox_server=infoblox_server,
infoblox_user=infoblox_user,
infoblox_password=infoblox_password,
dns_view=dns_view,
infoblox_api_version=infoblox_api_version,
sslVerify=sslVerify)
return ret
def absent(name,
record_type,
dns_view,
infoblox_server=None,
infoblox_user=None,
infoblox_password=None,
infoblox_api_version='v1.4.2',
sslVerify=True):
'''
Ensure a record does not exists
name
Name of the record
record_type
record type (host, a, cname, etc)
dns_view
DNS View
infoblox_server
infoblox server to connect to (will try pillar if not specified)
infoblox_user
username to use to connect to infoblox (will try pillar if not specified)
infoblox_password
password to use to connect to infoblox (will try pillar if not specified)
verify_ssl
verify SSL certificates
Example:
.. code-block:: yaml
some-state:
infoblox.absent:
- name: some.dns.record
- record_type: host
- dns_view: MyView
- sslVerify: False
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
record = __salt__['infoblox.get_record'](name,
record_type,
infoblox_server=infoblox_server,
infoblox_user=infoblox_user,
infoblox_password=infoblox_password,
dns_view=dns_view,
infoblox_api_version=infoblox_api_version,
sslVerify=sslVerify)
if record:
if __opts__['test']:
ret['result'] = None
ret['comment'] = ' '.join([ret['comment'],
'DNS {0} record {1} in view {2} will be removed'.format(record_type,
name,
dns_view)])
else:
retval = __salt__['infoblox.delete_record'](name,
dns_view,
record_type,
infoblox_server=infoblox_server,
infoblox_user=infoblox_user,
infoblox_password=infoblox_password,
infoblox_api_version=infoblox_api_version,
sslVerify=sslVerify)
if retval:
if 'old' not in ret['changes']:
ret['changes']['old'] = []
ret['changes']['new'] = None
ret['changes']['old'].append(record)
else:
ret['result'] = False
return ret
else:
# record not found
ret['result'] = True
ret['changes']['old'] = None
ret['changes']['new'] = None
ret['comment'] = 'DNS record does not exist'
return ret

121
salt/states/infoblox_a.py Normal file
View File

@ -0,0 +1,121 @@
# -*- coding: utf-8 -*-
'''
Infoblox A record managment.
functions accept api_opts:
api_verifyssl: verify SSL [default to True or pillar value]
api_url: server to connect to [default to pillar value]
api_username: [default to pillar value]
api_password: [default to pillar value]
'''
def present(name=None, ipv4addr=None, data=None, ensure_data=True, **api_opts):
'''
Ensure infoblox A record.
When you wish to update a hostname ensure `name` is set to the hostname
of the current record. You can give a new name in the `data.name`.
State example:
.. code-block:: yaml
infoblox_a.present:
- name: example-ha-0.domain.com
- data:
name: example-ha-0.domain.com
ipv4addr: 123.0.31.2
view: Internal
'''
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
if not data:
data = {}
if 'name' not in data:
data.update({'name': name})
if 'ipv4addr' not in data:
data.update({'ipv4addr': ipv4addr})
obj = __salt__['infoblox.get_a'](name=name, ipv4addr=ipv4addr, allow_array=False, **api_opts)
if obj is None:
# perhaps the user updated the name
obj = __salt__['infoblox.get_a'](name=data['name'], ipv4addr=data['ipv4addr'], allow_array=False, **api_opts)
if obj:
# warn user that the data was updated and does not match
ret['result'] = False
ret['comment'] = '** please update the name: {0} to equal the updated data name {1}'.format(name, data['name'])
return ret
if obj:
obj = obj[0]
if not ensure_data:
ret['result'] = True
ret['comment'] = 'infoblox record already created (supplied fields not ensured to match)'
return ret
diff = __salt__['infoblox.diff_objects'](data, obj)
if not diff:
ret['result'] = True
ret['comment'] = 'supplied fields already updated (note: removing fields might not update)'
return ret
if diff:
ret['changes'] = {'diff': diff}
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'would attempt to update infoblox record'
return ret
## TODO: perhaps need to review the output of new_obj
new_obj = __salt__['infoblox.update_object'](obj['_ref'], data=data, **api_opts)
ret['result'] = True
ret['comment'] = 'infoblox record fields updated (note: removing fields might not update)'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'would attempt to create infoblox record {0}'.format(data['name'])
return ret
new_obj_ref = __salt__['infoblox.create_a'](data=data, **api_opts)
new_obj = __salt__['infoblox.get_a'](name=name, ipv4addr=ipv4addr, allow_array=False, **api_opts)
ret['result'] = True
ret['comment'] = 'infoblox record created'
ret['changes'] = {'old': 'None', 'new': {'_ref': new_obj_ref, 'data': new_obj}}
return ret
def absent(name=None, ipv4addr=None, **api_opts):
'''
Ensure infoblox A record is removed.
State example:
.. code-block:: yaml
infoblox_a.absent:
- name: example-ha-0.domain.com
infoblox_a.absent:
- name:
- ipv4addr: 127.0.23.23
'''
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
obj = __salt__['infoblox.get_a'](name=name, ipv4addr=ipv4addr, allow_array=False, **api_opts)
if not obj:
ret['result'] = True
ret['comment'] = 'infoblox already removed'
return ret
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'old': obj, 'new': 'absent'}
return ret
if __salt__['infoblox.delete_a'](name=name, ipv4addr=ipv4addr, **api_opts):
ret['result'] = True
ret['changes'] = {'old': obj, 'new': 'absent'}
return ret

View File

@ -0,0 +1,121 @@
# -*- coding: utf-8 -*-
'''
Infoblox CNAME managment.
functions accept api_opts:
api_verifyssl: verify SSL [default to True or pillar value]
api_url: server to connect to [default to pillar value]
api_username: [default to pillar value]
api_password: [default to pillar value]
'''
def present(name=None, data=None, ensure_data=True, **api_opts):
'''
Ensure the CNAME with the given data is present.
name
CNAME of record
data
raw CNAME api data see: https://INFOBLOX/wapidoc
State example:
.. code-block:: yaml
infoblox_cname.present:
- name: example-ha-0.domain.com
- data:
name: example-ha-0.domain.com
canonical: example.domain.com
zone: example.com
view: Internal
comment: Example comment
infoblox_cname.present:
- name: example-ha-0.domain.com
- data:
name: example-ha-0.domain.com
canonical: example.domain.com
zone: example.com
view: Internal
comment: Example comment
- api_url: https://INFOBLOX/wapi/v1.2.1
- api_username: username
- api_password: passwd
'''
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
if not data:
data = {}
if 'name' not in data:
data.update({'name': name})
obj = __salt__['infoblox.get_cname'](name=name, **api_opts)
if obj is None:
# perhaps the user updated the name
obj = __salt__['infoblox.get_cname'](name=data['name'], **api_opts)
if obj:
# warn user that the data was updated and does not match
ret['result'] = False
ret['comment'] = '** please update the name: {0} to equal the updated data name {1}'.format(name, data['name'])
return ret
if obj:
if not ensure_data:
ret['result'] = True
ret['comment'] = 'infoblox record already created (supplied fields not ensured to match)'
return ret
diff = __salt__['infoblox.diff_objects'](data, obj)
if not diff:
ret['result'] = True
ret['comment'] = 'supplied fields already updated (note: removing fields might not update)'
return ret
if diff:
ret['changes'] = {'diff': diff}
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'would attempt to update infoblox record'
return ret
new_obj = __salt__['infoblox.update_object'](obj['_ref'], data=data, **api_opts)
ret['result'] = True
ret['comment'] = 'infoblox record fields updated (note: removing fields might not update)'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'would attempt to create infoblox record {0}'.format(data['name'])
return ret
new_obj_ref = __salt__['infoblox.create_cname'](data=data, **api_opts)
new_obj = __salt__['infoblox.get_cname'](name=name, **api_opts)
ret['result'] = True
ret['comment'] = 'infoblox record created'
ret['changes'] = {'old': 'None', 'new': {'_ref': new_obj_ref, 'data': new_obj}}
return ret
def absent(name=None, canonical=None, **api_opts):
'''
Ensure the CNAME with the given name or canonical name is removed
'''
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
obj = __salt__['infoblox.get_cname'](name=name, canonical=canonical, **api_opts)
if not obj:
ret['result'] = True
ret['comment'] = 'infoblox already removed'
return ret
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'old': obj, 'new': 'absent'}
return ret
if __salt__['infoblox.delete_cname'](name=name, canonical=canonical, **api_opts):
ret['result'] = True
ret['changes'] = {'old': obj, 'new': 'absent'}
return ret

View File

@ -0,0 +1,163 @@
# -*- coding: utf-8 -*-
'''
Infoblox host record managment.
functions accept api_opts:
api_verifyssl: verify SSL [default to True or pillar value]
api_url: server to connect to [default to pillar value]
api_username: [default to pillar value]
api_password: [default to pillar value]
'''
def present(name=None, data=None, ensure_data=True, **api_opts):
'''
This will ensure that a host with the provided name exists.
This will try to ensure that the state of the host matches the given data
If the host is not found then one will be created.
When trying to update a hostname ensure `name` is set to the hostname
of the current record. You can give a new name in the `data.name`.
Avoid race conditions, use func:nextavailableip:
- func:nextavailableip:network/ZG54dfgsrDFEFfsfsLzA:10.0.0.0/8/default
- func:nextavailableip:10.0.0.0/8
- func:nextavailableip:10.0.0.0/8,externalconfigure_for_dns
- func:nextavailableip:10.0.0.3-10.0.0.10
State Example:
.. code-block:: yaml
# this would update `original_hostname.example.ca` to changed `data`.
infoblox_host_record.present:
- name: original_hostname.example.ca
- data: {'namhostname.example.cae': 'hostname.example.ca',
'aliases': ['hostname.math.example.ca'],
'extattrs': [{'Business Contact': {'value': 'EXAMPLE@example.ca'}}],
'ipv4addrs': [{'configure_for_dhcp': True,
'ipv4addr': 'func:nextavailableip:129.97.139.0/24',
'mac': '00:50:56:84:6e:ae'}],
'ipv6addrs': [], }
'''
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
if data is None:
data = {}
if 'name' not in data:
data.update({'name': name})
obj = __salt__['infoblox.get_host'](name=name, **api_opts)
if obj is None:
# perhaps the user updated the name
obj = __salt__['infoblox.get_host'](name=data['name'], **api_opts)
if obj:
# warn user that the host name was updated and does not match
ret['result'] = False
ret['comment'] = 'please update the name: {0} to equal the updated data name {1}'.format(name, data['name'])
return ret
if obj:
if not ensure_data:
ret['result'] = True
ret['comment'] = 'infoblox record already created (supplied fields not ensured to match)'
return ret
obj = __salt__['infoblox.get_host_advanced'](name=name, **api_opts)
diff = __salt__['infoblox.diff_objects'](data, obj)
if not diff:
ret['result'] = True
ret['comment'] = 'supplied fields already updated (note: removing fields might not update)'
return ret
if diff:
ret['changes'] = {'diff': diff}
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'would attempt to update infoblox record'
return ret
# replace func:nextavailableip with current ip address if in range
# get list of ipaddresses that are defined.
obj_addrs = []
if 'ipv4addrs' in obj:
for addr in obj['ipv4addrs']:
if 'ipv4addr' in addr:
obj_addrs.append(addr['ipv4addr'])
if 'ipv6addrs' in obj:
for addr in obj['ipv6addrs']:
if 'ipv6addr' in addr:
obj_addrs.append(addr['ipv6addr'])
# replace func:nextavailableip: if an ip address is already found in that range.
if 'ipv4addrs' in data:
for addr in data['ipv4addrs']:
if 'ipv4addr' in addr:
addrobj = addr['ipv4addr']
if addrobj.startswith('func:nextavailableip:'):
found_matches = 0
for ip in obj_addrs:
if __salt__['infoblox.is_ipaddr_in_ipfunc_range'](ip, addrobj):
addr['ipv4addr'] = ip
found_matches += 1
if found_matches > 1:
ret['comment'] = 'infoblox record cant updated because ipaddress {0} matches mutiple func:nextavailableip'.format(ip)
ret['result'] = False
return ret
new_obj = __salt__['infoblox.update_object'](obj['_ref'], data=data, **api_opts)
ret['result'] = True
ret['comment'] = 'infoblox record fields updated (note: removing fields might not update)'
#ret['changes'] = {'diff': diff }
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'would attempt to create infoblox record {0}'.format(name)
return ret
new_obj_ref = __salt__['infoblox.create_host'](data=data, **api_opts)
new_obj = __salt__['infoblox.get_host'](name=name, **api_opts)
ret['result'] = True
ret['comment'] = 'infoblox record created'
ret['changes'] = {'old': 'None', 'new': {'_ref': new_obj_ref, 'data': new_obj}}
return ret
def absent(name=None, ipv4addr=None, mac=None, **api_opts):
'''
Ensure the host with the given Name ipv4addr or mac is removed.
State example:
.. code-block:: yaml
infoblox_host_record.absent:
- name: hostname.of.record.to.remove
infoblox_host_record.absent:
- name:
- ipv4addr: 192.168.0.1
infoblox_host_record.absent:
- name:
- mac: 12:02:12:31:23:43
'''
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
obj = __salt__['infoblox.get_host'](name=name, ipv4addr=ipv4addr, mac=mac, **api_opts)
if not obj:
ret['result'] = True
ret['comment'] = 'infoblox already removed'
return ret
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'old': obj, 'new': 'absent'}
return ret
if __salt__['infoblox.delete_host'](name=name, mac=mac, **api_opts):
ret['result'] = True
ret['changes'] = {'old': obj, 'new': 'absent'}
return ret

View File

@ -0,0 +1,188 @@
# -*- coding: utf-8 -*-
'''
Infoblox host record managment.
functions accept api_opts:
api_verifyssl: verify SSL [default to True or pillar value]
api_url: server to connect to [default to pillar value]
api_username: [default to pillar value]
api_password: [default to pillar value]
'''
def present(name=None, start_addr=None, end_addr=None, data=None, **api_opts):
'''
Ensure range record is present.
infoblox_range.present:
start_addr: '129.97.150.160',
end_addr: '129.97.150.170',
Verbose state example:
.. code-block:: yaml
infoblox_range.present:
data: {
'always_update_dns': False,
'authority': False,
'comment': 'range of IP addresses used for salt.. was used for ghost images deployment',
'ddns_generate_hostname': True,
'deny_all_clients': False,
'deny_bootp': False,
'disable': False,
'email_list': [],
'enable_ddns': False,
'enable_dhcp_thresholds': False,
'enable_email_warnings': False,
'enable_ifmap_publishing': False,
'enable_snmp_warnings': False,
'end_addr': '129.97.150.169',
'exclude': [],
'extattrs': {},
'fingerprint_filter_rules': [],
'high_water_mark': 95,
'high_water_mark_reset': 85,
'ignore_dhcp_option_list_request': False,
'lease_scavenge_time': -1,
'logic_filter_rules': [],
'low_water_mark': 0,
'low_water_mark_reset': 10,
'mac_filter_rules': [],
'member': {'_struct': 'dhcpmember',
'ipv4addr': '129.97.128.9',
'name': 'cn-dhcp-mc.example.ca'},
'ms_options': [],
'nac_filter_rules': [],
'name': 'ghost-range',
'network': '129.97.150.0/24',
'network_view': 'default',
'option_filter_rules': [],
'options': [{'name': 'dhcp-lease-time',
'num': 51,
'use_option': False,
'value': '43200',
'vendor_class': 'DHCP'}],
'recycle_leases': True,
'relay_agent_filter_rules': [],
'server_association_type': 'MEMBER',
'start_addr': '129.97.150.160',
'update_dns_on_lease_renewal': False,
'use_authority': False,
'use_bootfile': False,
'use_bootserver': False,
'use_ddns_domainname': False,
'use_ddns_generate_hostname': True,
'use_deny_bootp': False,
'use_email_list': False,
'use_enable_ddns': False,
'use_enable_dhcp_thresholds': False,
'use_enable_ifmap_publishing': False,
'use_ignore_dhcp_option_list_request': False,
'use_known_clients': False,
'use_lease_scavenge_time': False,
'use_nextserver': False,
'use_options': False,
'use_recycle_leases': False,
'use_unknown_clients': False,
'use_update_dns_on_lease_renewal': False
}
'''
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
if not data:
data = {}
if 'name' not in data:
data.update({'name': name})
if 'start_addr' not in data:
data.update({'start_addr': start_addr})
if 'end_addr' not in data:
data.update({'end_addr': end_addr})
obj = __salt__['infoblox.get_ipv4_range'](data['start_addr'], data['end_addr'], **api_opts)
if obj is None:
obj = __salt__['infoblox.get_ipv4_range'](start_addr=data['start_addr'], end_addr=None, **api_opts)
if obj is None:
obj = __salt__['infoblox.get_ipv4_range'](start_addr=None, end_addr=data['end_addr'], **api_opts)
if obj:
diff = __salt__['infoblox.diff_objects'](data, obj)
if not diff:
ret['result'] = True
ret['comment'] = 'supplied fields in correct state'
return ret
if diff:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'would attempt to update record'
return ret
new_obj = __salt__['infoblox.update_object'](obj['_ref'], data=data, **api_opts)
ret['result'] = True
ret['comment'] = 'record fields updated'
ret['changes'] = {'diff': diff}
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'would attempt to create record {0}'.format(name)
return ret
new_obj_ref = __salt__['infoblox.create_ipv4_range'](data, **api_opts)
new_obj = __salt__['infoblox.get_ipv4_range'](data['start_addr'], data['end_addr'], **api_opts)
ret['result'] = True
ret['comment'] = 'record created'
ret['changes'] = {'old': 'None', 'new': {'_ref': new_obj_ref, 'data': new_obj}}
return ret
def absent(name=None, start_addr=None, end_addr=None, data=None, **api_opts):
'''
Ensure the range is removed
Supplying the end of the range is optional.
State example:
.. code-block:: yaml
infoblox_range.absent:
- name: 'vlan10'
infoblox_range.absent:
- name:
- start_addr: 127.0.1.20
'''
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
if not data:
data = {}
if 'name' not in data:
data.update({'name': name})
if 'start_addr' not in data:
data.update({'start_addr': start_addr})
if 'end_addr' not in data:
data.update({'end_addr': end_addr})
obj = __salt__['infoblox.get_ipv4_range'](data['start_addr'], data['end_addr'], **api_opts)
if obj is None:
obj = __salt__['infoblox.get_ipv4_range'](start_addr=data['start_addr'], end_addr=None, **api_opts)
if obj is None:
obj = __salt__['infoblox.get_ipv4_range'](start_addr=None, end_addr=data['end_addr'], **api_opts)
if not obj:
ret['result'] = True
ret['comment'] = 'already deleted'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'would attempt to delete range'
return ret
if __salt__['infoblox.delete_object'](objref=obj['_ref']):
ret['result'] = True
ret['changes'] = {'old': 'Found {0} - {1}'.format(start_addr, end_addr),
'new': 'Removed'}
return ret

View File

@ -9,6 +9,11 @@ import salt.utils.decorators
def _fallbackfunc():
'''
CLI Example:
.. code-block:: bash
'''
return False, 'fallback'
@ -33,11 +38,21 @@ def booldependsTrue():
@salt.utils.decorators.depends(False)
def booldependsFalse():
'''
CLI Example:
.. code-block:: bash
'''
return True
@salt.utils.decorators.depends('time')
def depends():
'''
CLI Example:
.. code-block:: bash
'''
ret = {'ret': True,
'time': time.time()}
return ret
@ -45,6 +60,11 @@ def depends():
@salt.utils.decorators.depends('time123')
def missing_depends():
'''
CLI Example:
.. code-block:: bash
'''
return True
@ -62,6 +82,11 @@ def depends_will_not_fallback():
@salt.utils.decorators.depends('time123', fallback_function=_fallbackfunc)
def missing_depends_will_fallback():
'''
CLI Example:
.. code-block:: bash
'''
ret = {'ret': True,
'time': time.time()}
return ret

View File

@ -22,8 +22,9 @@ class DecoratorTest(ModuleCase):
self.assertTrue(isinstance(ret['time'], float))
def test_missing_depends(self):
self.assertIn(
'is not available',
self.assertEqual(
{'runtests_decorators.missing_depends_will_fallback': '\n CLI Example:\n\n ',
'runtests_decorators.missing_depends': "'runtests_decorators.missing_depends' is not available."},
self.run_function('runtests_decorators.missing_depends'
)
)

View File

@ -0,0 +1,38 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Alexandru Bleotu <alexandru.bleotu@morganstanley.com>`
Tests for functions in salt.modules.esxdatacenter
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Libs
import salt.modules.esxdatacenter as esxdatacenter
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class GetDetailsTestCase(TestCase, LoaderModuleMockMixin):
'''Tests for salt.modules.esxdatacenter.get_details'''
def setup_loader_modules(self):
return {esxdatacenter: {'__virtual__':
MagicMock(return_value='esxdatacenter'),
'__proxy__': {}}}
def test_get_details(self):
mock_get_details = MagicMock()
with patch.dict(esxdatacenter.__proxy__,
{'esxdatacenter.get_details': mock_get_details}):
esxdatacenter.get_details()
mock_get_details.assert_called_once_with()

View File

@ -619,6 +619,14 @@ class _GetProxyConnectionDetailsTestCase(TestCase, LoaderModuleMockMixin):
'mechanism': 'fake_mechanism',
'principal': 'fake_principal',
'domain': 'fake_domain'}
self.esxdatacenter_details = {'vcenter': 'fake_vcenter',
'username': 'fake_username',
'password': 'fake_password',
'protocol': 'fake_protocol',
'port': 'fake_port',
'mechanism': 'fake_mechanism',
'principal': 'fake_principal',
'domain': 'fake_domain'}
def tearDown(self):
for attrname in ('esxi_host_details', 'esxi_vcenter_details'):
@ -638,6 +646,14 @@ class _GetProxyConnectionDetailsTestCase(TestCase, LoaderModuleMockMixin):
'fake_protocol', 'fake_port', 'fake_mechanism',
'fake_principal', 'fake_domain'), ret)
def test_esxdatacenter_proxy_details(self):
with patch('salt.modules.vsphere.get_proxy_type',
MagicMock(return_value='esxdatacenter')):
with patch.dict(vsphere.__salt__,
{'esxdatacenter.get_details': MagicMock(
return_value=self.esxdatacenter_details)}):
ret = vsphere._get_proxy_connection_details()
def test_esxi_proxy_vcenter_details(self):
with patch('salt.modules.vsphere.get_proxy_type',
MagicMock(return_value='esxi')):
@ -847,7 +863,7 @@ class GetServiceInstanceViaProxyTestCase(TestCase, LoaderModuleMockMixin):
}
def test_supported_proxes(self):
supported_proxies = ['esxi']
supported_proxies = ['esxi', 'esxdatacenter']
for proxy_type in supported_proxies:
with patch('salt.modules.vsphere.get_proxy_type',
MagicMock(return_value=proxy_type)):
@ -890,7 +906,7 @@ class DisconnectTestCase(TestCase, LoaderModuleMockMixin):
}
def test_supported_proxes(self):
supported_proxies = ['esxi']
supported_proxies = ['esxi', 'esxdatacenter']
for proxy_type in supported_proxies:
with patch('salt.modules.vsphere.get_proxy_type',
MagicMock(return_value=proxy_type)):
@ -931,7 +947,7 @@ class TestVcenterConnectionTestCase(TestCase, LoaderModuleMockMixin):
}
def test_supported_proxes(self):
supported_proxies = ['esxi']
supported_proxies = ['esxi', 'esxdatacenter']
for proxy_type in supported_proxies:
with patch('salt.modules.vsphere.get_proxy_type',
MagicMock(return_value=proxy_type)):

View File

@ -0,0 +1 @@
# -*- coding: utf-8 -*-

View File

@ -0,0 +1,153 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Alexandru Bleotu <alexandru.bleotu@morganstanley.com>`
Tests for esxdatacenter proxy
'''
# Import Python Libs
from __future__ import absolute_import
# Import external libs
try:
import jsonschema
HAS_JSONSCHEMA = True
except ImportError:
HAS_JSONSCHEMA = False
# Import Salt Libs
import salt.proxy.esxdatacenter as esxdatacenter
import salt.exceptions
from salt.config.schemas.esxdatacenter import EsxdatacenterProxySchema
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(not HAS_JSONSCHEMA, 'jsonschema is required')
class InitTestCase(TestCase, LoaderModuleMockMixin):
'''Tests for salt.proxy.esxdatacenter.init'''
def setup_loader_modules(self):
return {esxdatacenter: {'__virtual__':
MagicMock(return_value='esxdatacenter'),
'DETAILS': {}}}
def setUp(self):
self.opts_userpass = {'proxy': {'proxytype': 'esxdatacenter',
'vcenter': 'fake_vcenter',
'datacenter': 'fake_dc',
'mechanism': 'userpass',
'username': 'fake_username',
'passwords': ['fake_password'],
'protocol': 'fake_protocol',
'port': 100}}
self.opts_sspi = {'proxy': {'proxytype': 'esxdatacenter',
'vcenter': 'fake_vcenter',
'datacenter': 'fake_dc',
'mechanism': 'sspi',
'domain': 'fake_domain',
'principal': 'fake_principal',
'protocol': 'fake_protocol',
'port': 100}}
def test_esxdatacenter_schema(self):
mock_json_validate = MagicMock()
serialized_schema = EsxdatacenterProxySchema().serialize()
with patch('salt.proxy.esxdatacenter.jsonschema.validate',
mock_json_validate):
esxdatacenter.init(self.opts_sspi)
mock_json_validate.assert_called_once_with(
self.opts_sspi['proxy'], serialized_schema)
def test_invalid_proxy_input_error(self):
with patch('salt.proxy.esxdatacenter.jsonschema.validate',
MagicMock(side_effect=jsonschema.exceptions.ValidationError(
'Validation Error'))):
with self.assertRaises(salt.exceptions.InvalidConfigError) as \
excinfo:
esxdatacenter.init(self.opts_userpass)
self.assertEqual(excinfo.exception.strerror.message,
'Validation Error')
def test_no_username(self):
opts = self.opts_userpass.copy()
del opts['proxy']['username']
with self.assertRaises(salt.exceptions.InvalidConfigError) as \
excinfo:
esxdatacenter.init(opts)
self.assertEqual(excinfo.exception.strerror,
'Mechanism is set to \'userpass\', but no '
'\'username\' key found in proxy config.')
def test_no_passwords(self):
opts = self.opts_userpass.copy()
del opts['proxy']['passwords']
with self.assertRaises(salt.exceptions.InvalidConfigError) as \
excinfo:
esxdatacenter.init(opts)
self.assertEqual(excinfo.exception.strerror,
'Mechanism is set to \'userpass\', but no '
'\'passwords\' key found in proxy config.')
def test_no_domain(self):
opts = self.opts_sspi.copy()
del opts['proxy']['domain']
with self.assertRaises(salt.exceptions.InvalidConfigError) as \
excinfo:
esxdatacenter.init(opts)
self.assertEqual(excinfo.exception.strerror,
'Mechanism is set to \'sspi\', but no '
'\'domain\' key found in proxy config.')
def test_no_principal(self):
opts = self.opts_sspi.copy()
del opts['proxy']['principal']
with self.assertRaises(salt.exceptions.InvalidConfigError) as \
excinfo:
esxdatacenter.init(opts)
self.assertEqual(excinfo.exception.strerror,
'Mechanism is set to \'sspi\', but no '
'\'principal\' key found in proxy config.')
def test_find_credentials(self):
mock_find_credentials = MagicMock(return_value=('fake_username',
'fake_password'))
with patch('salt.proxy.esxdatacenter.find_credentials',
mock_find_credentials):
esxdatacenter.init(self.opts_userpass)
mock_find_credentials.assert_called_once_with()
def test_details_userpass(self):
mock_find_credentials = MagicMock(return_value=('fake_username',
'fake_password'))
with patch('salt.proxy.esxdatacenter.find_credentials',
mock_find_credentials):
esxdatacenter.init(self.opts_userpass)
self.assertDictEqual(esxdatacenter.DETAILS,
{'vcenter': 'fake_vcenter',
'datacenter': 'fake_dc',
'mechanism': 'userpass',
'username': 'fake_username',
'password': 'fake_password',
'passwords': ['fake_password'],
'protocol': 'fake_protocol',
'port': 100})
def test_details_sspi(self):
esxdatacenter.init(self.opts_sspi)
self.assertDictEqual(esxdatacenter.DETAILS,
{'vcenter': 'fake_vcenter',
'datacenter': 'fake_dc',
'mechanism': 'sspi',
'domain': 'fake_domain',
'principal': 'fake_principal',
'protocol': 'fake_protocol',
'port': 100})