mirror of
https://github.com/valitydev/salt.git
synced 2024-11-07 00:55:19 +00:00
Merge remote-tracking branch 'upstream/2015.8' into merge-forward-develop
Conflicts: doc/topics/cloud/vmware.rst salt/cloud/clouds/vmware.py salt/grains/chronos.py salt/grains/marathon.py salt/loader.py salt/modules/chronos.py salt/modules/marathon.py salt/proxy/chronos.py salt/proxy/marathon.py salt/state.py salt/states/chronos_job.py salt/states/marathon_app.py salt/states/user.py salt/utils/schedule.py
This commit is contained in:
commit
efb479a1bd
@ -409,6 +409,11 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or
|
||||
Specify the virtual hardware version for the vm/template that is supported by the
|
||||
host.
|
||||
|
||||
``customization``
|
||||
Specify whether the new virtual machine should be customized or not. If
|
||||
``customization: False`` is set, the new virtual machine will not be customized.
|
||||
Default is ``customization: True``.
|
||||
|
||||
``image``
|
||||
Specify the guest id of the VM. For a full list of supported values see the
|
||||
VMware vSphere documentation:
|
||||
|
1010
doc/topics/releases/2015.8.2.rst
Normal file
1010
doc/topics/releases/2015.8.2.rst
Normal file
File diff suppressed because it is too large
Load Diff
@ -24,7 +24,6 @@ describes the package. An example of this file is:
|
||||
name: apache
|
||||
os: RedHat, Debian, Ubuntu, Suse, FreeBSD
|
||||
os_family: RedHat, Debian, Suse, FreeBSD
|
||||
dependencies: None
|
||||
version: 201506
|
||||
release: 2
|
||||
summary: Formula for installing Apache
|
||||
@ -97,12 +96,6 @@ Files outside the ``top_level_dir``, such as ``README.rst``, ``FORMULA``, and
|
||||
``LICENSE`` will not be installed. The exceptions to this rule are files that
|
||||
are already treated specially, such as ``pillar.example`` and ``_modules/``.
|
||||
|
||||
dependencies
|
||||
~~~~~~~~~~~~
|
||||
A list of packages which must be installed before this package can function. If
|
||||
a matching package is found in an SPM repository, the dependency is installed
|
||||
automatically.
|
||||
|
||||
recommended
|
||||
~~~~~~~~~~~
|
||||
A list of optional packages that are recommended to be installed with the
|
||||
|
@ -274,3 +274,12 @@ The default configuration for the ``file_roots`` is:
|
||||
- /srv/salt
|
||||
|
||||
So the top file is defaulted to the location ``/srv/salt/top.sls``
|
||||
|
||||
|
||||
Salt Master Umask
|
||||
=================
|
||||
|
||||
The salt master uses a cache to track jobs as they are published and returns come back.
|
||||
The recommended umask for a salt-master is `022`, which is the default for most users
|
||||
on a system. Incorrect umasks can result in permission-denied errors when the master
|
||||
tries to access files in its cache.
|
||||
|
@ -341,7 +341,7 @@ class Resolver(object):
|
||||
self.auth = salt.loader.auth(opts)
|
||||
|
||||
def _send_token_request(self, load):
|
||||
if self.opts['transport'] == 'zeromq':
|
||||
if self.opts['transport'] in ('zeromq', 'tcp'):
|
||||
master_uri = 'tcp://' + salt.utils.ip_bracket(self.opts['interface']) + \
|
||||
':' + str(self.opts['ret_port'])
|
||||
channel = salt.transport.client.ReqChannel.factory(self.opts,
|
||||
|
@ -1151,15 +1151,7 @@ def list_datacenters(kwargs=None, call=None):
|
||||
'-f or --function.'
|
||||
)
|
||||
|
||||
datacenters = []
|
||||
datacenter_properties = ["name"]
|
||||
|
||||
datacenter_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.Datacenter, datacenter_properties)
|
||||
|
||||
for datacenter in datacenter_list:
|
||||
datacenters.append(datacenter["name"])
|
||||
|
||||
return {'Datacenters': datacenters}
|
||||
return {'Datacenters': salt.utils.vmware.list_datacenters(_get_si())}
|
||||
|
||||
|
||||
def list_clusters(kwargs=None, call=None):
|
||||
@ -1178,17 +1170,7 @@ def list_clusters(kwargs=None, call=None):
|
||||
'-f or --function.'
|
||||
)
|
||||
|
||||
clusters = []
|
||||
cluster_properties = ["name"]
|
||||
|
||||
cluster_list = salt.utils.vmware.get_mors_with_properties(_get_si(),
|
||||
vim.ClusterComputeResource,
|
||||
cluster_properties)
|
||||
|
||||
for cluster in cluster_list:
|
||||
clusters.append(cluster["name"])
|
||||
|
||||
return {'Clusters': clusters}
|
||||
return {'Clusters': salt.utils.vmware.list_clusters(_get_si())}
|
||||
|
||||
|
||||
def list_datastore_clusters(kwargs=None, call=None):
|
||||
@ -1207,17 +1189,7 @@ def list_datastore_clusters(kwargs=None, call=None):
|
||||
'-f or --function.'
|
||||
)
|
||||
|
||||
datastore_clusters = []
|
||||
datastore_cluster_properties = ["name"]
|
||||
|
||||
datastore_cluster_list = salt.utils.vmware.get_mors_with_properties(_get_si(),
|
||||
vim.StoragePod,
|
||||
datastore_cluster_properties)
|
||||
|
||||
for datastore_cluster in datastore_cluster_list:
|
||||
datastore_clusters.append(datastore_cluster["name"])
|
||||
|
||||
return {'Datastore Clusters': datastore_clusters}
|
||||
return {'Datastore Clusters': salt.utils.vmware.list_datastore_clusters(_get_si())}
|
||||
|
||||
|
||||
def list_datastores(kwargs=None, call=None):
|
||||
@ -1236,17 +1208,7 @@ def list_datastores(kwargs=None, call=None):
|
||||
'-f or --function.'
|
||||
)
|
||||
|
||||
datastores = []
|
||||
datastore_properties = ["name"]
|
||||
|
||||
datastore_list = salt.utils.vmware.get_mors_with_properties(_get_si(),
|
||||
vim.Datastore,
|
||||
datastore_properties)
|
||||
|
||||
for datastore in datastore_list:
|
||||
datastores.append(datastore["name"])
|
||||
|
||||
return {'Datastores': datastores}
|
||||
return {'Datastores': salt.utils.vmware.list_datastores(_get_si())}
|
||||
|
||||
|
||||
def list_hosts(kwargs=None, call=None):
|
||||
@ -1265,15 +1227,7 @@ def list_hosts(kwargs=None, call=None):
|
||||
'-f or --function.'
|
||||
)
|
||||
|
||||
hosts = []
|
||||
host_properties = ["name"]
|
||||
host_list = salt.utils.vmware.get_mors_with_properties(_get_si(),
|
||||
vim.HostSystem,
|
||||
host_properties)
|
||||
for host in host_list:
|
||||
hosts.append(host["name"])
|
||||
|
||||
return {'Hosts': hosts}
|
||||
return {'Hosts': salt.utils.vmware.list_hosts(_get_si())}
|
||||
|
||||
|
||||
def list_resourcepools(kwargs=None, call=None):
|
||||
@ -1292,17 +1246,7 @@ def list_resourcepools(kwargs=None, call=None):
|
||||
'-f or --function.'
|
||||
)
|
||||
|
||||
resource_pools = []
|
||||
resource_pool_properties = ["name"]
|
||||
|
||||
resource_pool_list = salt.utils.vmware.get_mors_with_properties(_get_si(),
|
||||
vim.ResourcePool,
|
||||
resource_pool_properties)
|
||||
|
||||
for resource_pool in resource_pool_list:
|
||||
resource_pools.append(resource_pool["name"])
|
||||
|
||||
return {'Resource Pools': resource_pools}
|
||||
return {'Resource Pools': salt.utils.vmware.list_resourcepools(_get_si())}
|
||||
|
||||
|
||||
def list_networks(kwargs=None, call=None):
|
||||
@ -1321,15 +1265,7 @@ def list_networks(kwargs=None, call=None):
|
||||
'-f or --function.'
|
||||
)
|
||||
|
||||
networks = []
|
||||
network_properties = ["name"]
|
||||
|
||||
network_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.Network, network_properties)
|
||||
|
||||
for network in network_list:
|
||||
networks.append(network["name"])
|
||||
|
||||
return {'Networks': networks}
|
||||
return {'Networks': salt.utils.vmware.list_networks(_get_si())}
|
||||
|
||||
|
||||
def list_nodes_min(kwargs=None, call=None):
|
||||
@ -1715,15 +1651,7 @@ def list_folders(kwargs=None, call=None):
|
||||
'-f or --function.'
|
||||
)
|
||||
|
||||
folders = []
|
||||
folder_properties = ["name"]
|
||||
|
||||
folder_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.Folder, folder_properties)
|
||||
|
||||
for folder in folder_list:
|
||||
folders.append(folder["name"])
|
||||
|
||||
return {'Folders': folders}
|
||||
return {'Folders': salt.utils.vmware.list_folders(_get_si())}
|
||||
|
||||
|
||||
def list_snapshots(kwargs=None, call=None):
|
||||
@ -2186,6 +2114,9 @@ def create(vm_):
|
||||
guest_id = config.get_cloud_config_value(
|
||||
'image', vm_, __opts__, search_global=False, default=None
|
||||
)
|
||||
customization = config.get_cloud_config_value(
|
||||
'customization', vm_, __opts__, search_global=False, default=True
|
||||
)
|
||||
|
||||
if 'clonefrom' in vm_:
|
||||
# Clone VM/template from specified VM/template
|
||||
@ -2342,7 +2273,7 @@ def create(vm_):
|
||||
config=config_spec
|
||||
)
|
||||
|
||||
if devices and 'network' in list(devices.keys()) and 'Windows' not in object_ref.config.guestFullName:
|
||||
if customization and (devices and 'network' in list(devices.keys())) and 'Windows' not in object_ref.config.guestFullName:
|
||||
global_ip = vim.vm.customization.GlobalIPSettings()
|
||||
|
||||
if 'dns_servers' in list(vm_.keys()):
|
||||
@ -2941,15 +2872,7 @@ def list_dvs(kwargs=None, call=None):
|
||||
'-f or --function.'
|
||||
)
|
||||
|
||||
distributed_vswitches = []
|
||||
dvs_properties = ["name"]
|
||||
|
||||
dvs_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.DistributedVirtualSwitch, dvs_properties)
|
||||
|
||||
for dvs in dvs_list:
|
||||
distributed_vswitches.append(dvs["name"])
|
||||
|
||||
return {'Distributed Virtual Switches': distributed_vswitches}
|
||||
return {'Distributed Virtual Switches': salt.utils.vmware.list_dvs(_get_si())}
|
||||
|
||||
|
||||
def list_vapps(kwargs=None, call=None):
|
||||
@ -2968,15 +2891,7 @@ def list_vapps(kwargs=None, call=None):
|
||||
'-f or --function.'
|
||||
)
|
||||
|
||||
vapps = []
|
||||
vapp_properties = ["name"]
|
||||
|
||||
vapp_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualApp, vapp_properties)
|
||||
|
||||
for vapp in vapp_list:
|
||||
vapps.append(vapp["name"])
|
||||
|
||||
return {'vApps': vapps}
|
||||
return {'vApps': salt.utils.vmware.list_vapps(_get_si())}
|
||||
|
||||
|
||||
def enter_maintenance_mode(kwargs=None, call=None):
|
||||
|
@ -1171,7 +1171,7 @@ DEFAULT_MASTER_OPTS = {
|
||||
'keysize': 2048,
|
||||
'transport': 'zeromq',
|
||||
'enumerate_proxy_minions': False,
|
||||
'gather_job_timeout': 5,
|
||||
'gather_job_timeout': 10,
|
||||
'syndic_event_forward_timeout': 0.5,
|
||||
'syndic_max_event_process_time': 0.5,
|
||||
'syndic_jid_forward_cache_hwm': 100,
|
||||
|
@ -2,7 +2,7 @@
|
||||
'''
|
||||
Generate chronos proxy minion grains.
|
||||
|
||||
.. versionadded:: Boron
|
||||
.. versionadded:: 2015.8.2
|
||||
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
@ -983,8 +983,8 @@ _OS_FAMILY_MAP = {
|
||||
'SLED': 'Suse',
|
||||
'openSUSE': 'Suse',
|
||||
'SUSE': 'Suse',
|
||||
'SUSE Leap': 'Suse',
|
||||
'openSUSE Leap': 'Suse',
|
||||
'openSUSE Tumbleweed': 'Suse',
|
||||
'Solaris': 'Solaris',
|
||||
'SmartOS': 'Solaris',
|
||||
'OpenIndiana Development': 'Solaris',
|
||||
|
@ -1,13 +1,26 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Generate baseline proxy minion grains
|
||||
Generate baseline proxy minion grains for Dell FX2 chassis.
|
||||
The challenge is that most of Salt isn't bootstrapped yet,
|
||||
so we need to repeat a bunch of things that would normally happen
|
||||
in proxy/fx2.py--just enough to get data from the chassis to include
|
||||
in grains.
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
import salt.utils
|
||||
import logging
|
||||
import salt.proxy.fx2
|
||||
import salt.modules.cmdmod
|
||||
import salt.modules.dracr
|
||||
|
||||
__proxyenabled__ = ['rest_sample']
|
||||
__proxyenabled__ = ['fx2']
|
||||
|
||||
__virtualname__ = 'rest_sample'
|
||||
__virtualname__ = 'fx2'
|
||||
|
||||
logger = logging.getLogger(__file__)
|
||||
|
||||
|
||||
GRAINS_CACHE = {}
|
||||
|
||||
|
||||
def __virtual__():
|
||||
@ -17,16 +30,52 @@ def __virtual__():
|
||||
return __virtualname__
|
||||
|
||||
|
||||
def _grains():
|
||||
'''
|
||||
Get the grains from the proxied device
|
||||
'''
|
||||
r = salt.modules.dracr.system_info(host=__pillar__['proxy']['host'],
|
||||
admin_username=__pillar__['proxy']['admin_username'],
|
||||
admin_password=__pillar__['proxy']['admin_password'])
|
||||
|
||||
if r.get('retcode', 0) == 0:
|
||||
GRAINS_CACHE = r
|
||||
username = __pillar__['proxy']['admin_username']
|
||||
password = __pillar__['proxy']['admin_password']
|
||||
else:
|
||||
r = salt.modules.dracr.system_info(host=__pillar__['proxy']['host'],
|
||||
admin_username=__pillar__['proxy']['fallback_admin_username'],
|
||||
admin_password=__pillar__['proxy']['fallback_admin_password'])
|
||||
if r.get('retcode', 0) == 0:
|
||||
GRAINS_CACHE = r
|
||||
username = __pillar__['proxy']['fallback_admin_username']
|
||||
password = __pillar__['proxy']['fallback_admin_password']
|
||||
else:
|
||||
GRAINS_CACHE = {}
|
||||
|
||||
GRAINS_CACHE.update(salt.modules.dracr.inventory(host=__pillar__['proxy']['host'],
|
||||
admin_username=username,
|
||||
admin_password=password))
|
||||
|
||||
return GRAINS_CACHE
|
||||
|
||||
|
||||
def fx2():
|
||||
return _grains()
|
||||
|
||||
|
||||
def kernel():
|
||||
return {'kernel': 'proxy'}
|
||||
|
||||
|
||||
def os():
|
||||
return {'os': 'RestExampleOS'}
|
||||
|
||||
|
||||
def location():
|
||||
return {'location': 'In this darn virtual machine. Let me out!'}
|
||||
if not GRAINS_CACHE:
|
||||
GRAINS_CACHE.update(_grains())
|
||||
|
||||
try:
|
||||
return {'location': GRAINS_CACHE.get('Chassis Information').get('Chassis Location')}
|
||||
except AttributeError:
|
||||
return {'location': 'Unknown'}
|
||||
|
||||
|
||||
def os_family():
|
||||
@ -34,4 +83,4 @@ def os_family():
|
||||
|
||||
|
||||
def os_data():
|
||||
return {'os_data': 'funkyHttp release 1.0.a.4.g'}
|
||||
return {'os_data': 'Unknown'}
|
||||
|
@ -2,7 +2,7 @@
|
||||
'''
|
||||
Generate marathon proxy minion grains.
|
||||
|
||||
.. versionadded:: Boron
|
||||
.. versionadded:: 2015.8.2
|
||||
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
@ -1443,28 +1443,23 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
||||
try:
|
||||
error_reason = None
|
||||
if hasattr(mod, '__virtual__') and inspect.isfunction(mod.__virtual__):
|
||||
if self.opts.get('virtual_timer', False):
|
||||
try:
|
||||
start = time.time()
|
||||
virtual = mod.__virtual__()
|
||||
if isinstance(virtual, tuple):
|
||||
error_reason = virtual[1]
|
||||
virtual = virtual[0]
|
||||
end = time.time() - start
|
||||
msg = 'Virtual function took {0} seconds for {1}'.format(
|
||||
end, module_name)
|
||||
log.warning(msg)
|
||||
else:
|
||||
try:
|
||||
virtual = mod.__virtual__()
|
||||
if isinstance(virtual, tuple):
|
||||
error_reason = virtual[1]
|
||||
virtual = virtual[0]
|
||||
except Exception as exc:
|
||||
log.error('Exception raised when processing __virtual__ function'
|
||||
' for {0}. Module will not be loaded: {1}'.format(
|
||||
module_name, exc),
|
||||
exc_info_on_loglevel=logging.DEBUG)
|
||||
virtual = None
|
||||
if self.opts.get('virtual_timer', False):
|
||||
end = time.time() - start
|
||||
msg = 'Virtual function took {0} seconds for {1}'.format(
|
||||
end, module_name)
|
||||
log.warning(msg)
|
||||
except Exception as exc:
|
||||
error_reason = ('Exception raised when processing __virtual__ function'
|
||||
' for {0}. Module will not be loaded {1}'.format(
|
||||
module_name, exc))
|
||||
log.error(error_reason, exc_info_on_loglevel=logging.DEBUG)
|
||||
virtual = None
|
||||
# Get the module's virtual name
|
||||
virtualname = getattr(mod, '__virtualname__', virtual)
|
||||
if not virtual:
|
||||
|
@ -473,7 +473,7 @@ def install(name=None,
|
||||
|
||||
.. versionadded:: 2015.5.0
|
||||
|
||||
force_conf_new
|
||||
force_conf_new
|
||||
Always install the new version of any configuration files.
|
||||
|
||||
.. versionadded:: 2015.8.0
|
||||
@ -839,7 +839,7 @@ def upgrade(refresh=True, dist_upgrade=False, **kwargs):
|
||||
'''
|
||||
Upgrades all packages via ``apt-get dist-upgrade``
|
||||
|
||||
Returns a dict containing the changes.
|
||||
Returns a dict containing the changes::
|
||||
|
||||
{'<package>': {'old': '<old-version>',
|
||||
'new': '<new-version>'}}
|
||||
@ -848,9 +848,9 @@ def upgrade(refresh=True, dist_upgrade=False, **kwargs):
|
||||
Whether to perform the upgrade using dist-upgrade vs upgrade. Default
|
||||
is to use upgrade.
|
||||
|
||||
.. versionadded:: 2014.7.0
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
force_conf_new
|
||||
force_conf_new
|
||||
Always install the new version of any configuration files.
|
||||
|
||||
.. versionadded:: 2015.8.0
|
||||
@ -1622,19 +1622,30 @@ def mod_repo(repo, saltenv='base', **kwargs):
|
||||
'''
|
||||
Modify one or more values for a repo. If the repo does not exist, it will
|
||||
be created, so long as the definition is well formed. For Ubuntu the
|
||||
"ppa:<project>/repo" format is acceptable. "ppa:" format can only be
|
||||
``ppa:<project>/repo`` format is acceptable. ``ppa:`` format can only be
|
||||
used to create a new repository.
|
||||
|
||||
The following options are available to modify a repo definition::
|
||||
The following options are available to modify a repo definition:
|
||||
|
||||
comps (a comma separated list of components for the repo, e.g. "main")
|
||||
file (a file name to be used)
|
||||
keyserver (keyserver to get gpg key from)
|
||||
keyid (key id to load with the keyserver argument)
|
||||
key_url (URL to a gpg key to add to the apt gpg keyring)
|
||||
consolidate (if true, will attempt to de-dup and consolidate sources)
|
||||
comps
|
||||
a comma separated list of components for the repo, e.g. ``main``
|
||||
|
||||
* Note: Due to the way keys are stored for apt, there is a known issue
|
||||
file
|
||||
a file name to be used
|
||||
|
||||
keyserver
|
||||
keyserver to get gpg key from
|
||||
|
||||
keyid
|
||||
key id to load with the keyserver argument
|
||||
|
||||
key_url
|
||||
URL to a GPG key to add to the APT GPG keyring
|
||||
|
||||
consolidate
|
||||
if ``True``, will attempt to de-dup and consolidate sources
|
||||
|
||||
.. note:: Due to the way keys are stored for APT, there is a known issue
|
||||
where the key wont be updated unless another change is made
|
||||
at the same time. Keys should be properly added on initial
|
||||
configuration.
|
||||
@ -2043,10 +2054,10 @@ def set_selections(path=None, selection=None, clear=False, saltenv='base'):
|
||||
|
||||
The state can be any one of, documented in ``dpkg(1)``:
|
||||
|
||||
- install
|
||||
- hold
|
||||
- deinstall
|
||||
- purge
|
||||
- install
|
||||
- hold
|
||||
- deinstall
|
||||
- purge
|
||||
|
||||
This command is commonly used to mark specific packages to be held from
|
||||
being upgraded, that is, to be kept at a certain version. When a state is
|
||||
|
@ -7,7 +7,6 @@ Module for fetching artifacts from Artifactory
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
import base64
|
||||
import xml.etree.ElementTree as ET
|
||||
import logging
|
||||
|
||||
# Import Salt libs
|
||||
@ -16,8 +15,27 @@ import salt.ext.six.moves.http_client # pylint: disable=import-error,redefined-
|
||||
from salt.ext.six.moves import urllib # pylint: disable=no-name-in-module
|
||||
from salt.ext.six.moves.urllib.error import HTTPError, URLError # pylint: disable=no-name-in-module
|
||||
|
||||
# Import 3rd party libs
|
||||
try:
|
||||
from salt._compat import ElementTree as ET
|
||||
HAS_ELEMENT_TREE = True
|
||||
except ImportError:
|
||||
HAS_ELEMENT_TREE = False
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'artifactory'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load if elementtree xml library is available.
|
||||
'''
|
||||
if not HAS_ELEMENT_TREE:
|
||||
return (False, 'Cannot load {0} module: ElementTree library unavailable'.format(__virtualname__))
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def get_latest_snapshot(artifactory_url, repository, group_id, artifact_id, packaging, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None):
|
||||
'''
|
||||
|
@ -36,8 +36,9 @@ def __virtual__():
|
||||
|
||||
|
||||
def cmd(cmd, *args, **kwargs):
|
||||
proxyprefix = __opts__['proxy']['proxytype']
|
||||
kwargs['admin_username'] = __proxy__[proxyprefix+'.admin_username']()
|
||||
kwargs['admin_password'] = __proxy__[proxyprefix+'.admin_password']()
|
||||
kwargs['host'] = __proxy__[proxyprefix+'.host']()
|
||||
proxycmd = __opts__['proxy']['proxytype'] + '.chconfig'
|
||||
kwargs['admin_username'] = __pillar__['proxy']['admin_username']
|
||||
kwargs['admin_password'] = __pillar__['proxy']['admin_password']
|
||||
kwargs['host'] = __pillar__['proxy']['host']
|
||||
return __proxy__[proxycmd](cmd, *args, **kwargs)
|
||||
|
@ -4,7 +4,7 @@ Module providing a simple management interface to a chronos cluster.
|
||||
|
||||
Currently this only works when run through a proxy minion.
|
||||
|
||||
.. versionadded:: Boron
|
||||
.. versionadded:: 2015.8.2
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
@ -26,6 +26,11 @@ log = logging.getLogger(__name__)
|
||||
|
||||
from salt.exceptions import SaltInvocationError
|
||||
|
||||
# Don't shadow built-ins.
|
||||
__func_alias__ = {
|
||||
'list_': 'list'
|
||||
}
|
||||
|
||||
__virtualname__ = 'consul'
|
||||
|
||||
|
||||
@ -92,7 +97,7 @@ def _query(function,
|
||||
return ret
|
||||
|
||||
|
||||
def list(consul_url=None, key=None, **kwargs):
|
||||
def list_(consul_url=None, key=None, **kwargs):
|
||||
'''
|
||||
List keys in Consul
|
||||
|
||||
|
@ -21,6 +21,14 @@ from salt.ext.six.moves import map
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
run_all = __salt__['cmd.run_all']
|
||||
except NameError:
|
||||
import salt.modules.cmdmod
|
||||
__salt__ = {
|
||||
'cmd.run_all': salt.modules.cmdmod._run_all_quiet
|
||||
}
|
||||
|
||||
|
||||
def __virtual__():
|
||||
if salt.utils.which('racadm'):
|
||||
@ -37,13 +45,16 @@ def __parse_drac(output):
|
||||
section = ''
|
||||
|
||||
for i in output.splitlines():
|
||||
if i.strip().endswith(':') and '=' not in i:
|
||||
section = i[0:-1]
|
||||
drac[section] = {}
|
||||
if len(i.rstrip()) > 0 and '=' in i:
|
||||
if section in drac:
|
||||
drac[section].update(dict(
|
||||
[[prop.strip() for prop in i.split('=')]]
|
||||
))
|
||||
else:
|
||||
section = i.strip()[1:-1]
|
||||
section = i.strip()
|
||||
if section not in drac and section:
|
||||
drac[section] = {}
|
||||
|
||||
@ -127,7 +138,6 @@ def __execute_ret(command, host=None,
|
||||
fmtlines.append(l)
|
||||
if '=' in l:
|
||||
continue
|
||||
break
|
||||
cmd['stdout'] = '\n'.join(fmtlines)
|
||||
|
||||
return cmd
|
||||
@ -135,8 +145,6 @@ def __execute_ret(command, host=None,
|
||||
|
||||
def get_dns_dracname(host=None,
|
||||
admin_username=None, admin_password=None):
|
||||
import pydevd
|
||||
pydevd.settrace('172.16.207.1', port=65500, stdoutToServer=True, stderrToServer=True)
|
||||
|
||||
ret = __execute_ret('get iDRAC.NIC.DNSRacName', host=host,
|
||||
admin_username=admin_username,
|
||||
@ -976,7 +984,10 @@ def get_slotname(slot, host=None, admin_username=None, admin_password=None):
|
||||
'''
|
||||
slots = list_slotnames(host=host, admin_username=admin_username,
|
||||
admin_password=admin_password)
|
||||
return slots[slot]
|
||||
# The keys for this dictionary are strings, not integers, so convert the
|
||||
# argument to a string
|
||||
slot = str(slot)
|
||||
return slots[slot]['slotname']
|
||||
|
||||
|
||||
def set_slotname(slot, name, host=None,
|
||||
|
@ -646,7 +646,9 @@ def install(name=None,
|
||||
if not changes:
|
||||
inst_v = version(param)
|
||||
|
||||
if latest_version(param) == inst_v:
|
||||
# Prevent latest_version from calling refresh_db. Either we
|
||||
# just called it or we were asked not to.
|
||||
if latest_version(param, refresh=False) == inst_v:
|
||||
all_uses = __salt__['portage_config.get_cleared_flags'](param)
|
||||
if _flags_changed(*all_uses):
|
||||
changes[param] = {'version': inst_v,
|
||||
|
@ -480,7 +480,7 @@ def get_saved_rules(conf_file=None, family='ipv4'):
|
||||
IPv6:
|
||||
salt '*' iptables.get_saved_rules family=ipv6
|
||||
'''
|
||||
return _parse_conf(conf_file, family)
|
||||
return _parse_conf(conf_file=conf_file, family=family)
|
||||
|
||||
|
||||
def get_rules(family='ipv4'):
|
||||
|
@ -106,7 +106,7 @@ def version():
|
||||
'''
|
||||
Return the actual lxc client version
|
||||
|
||||
.. versionadded:: 2015.8.0
|
||||
.. versionadded:: 2015.8.0
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -297,61 +297,61 @@ def cloud_init_interface(name, vm_=None, **kwargs):
|
||||
|
||||
Legacy but still supported options:
|
||||
|
||||
from_container
|
||||
which container we use as a template
|
||||
when running lxc.clone
|
||||
image
|
||||
which template do we use when we
|
||||
are using lxc.create. This is the default
|
||||
mode unless you specify something in from_container
|
||||
backing
|
||||
which backing store to use.
|
||||
Values can be: overlayfs, dir(default), lvm, zfs, brtfs
|
||||
fstype
|
||||
When using a blockdevice level backing store,
|
||||
which filesystem to use on
|
||||
size
|
||||
When using a blockdevice level backing store,
|
||||
which size for the filesystem to use on
|
||||
snapshot
|
||||
Use snapshot when cloning the container source
|
||||
vgname
|
||||
if using LVM: vgname
|
||||
lvname
|
||||
if using LVM: lvname
|
||||
ip
|
||||
ip for the primary nic
|
||||
mac
|
||||
mac address for the primary nic
|
||||
netmask
|
||||
netmask for the primary nic (24)
|
||||
= ``vm_.get('netmask', '24')``
|
||||
bridge
|
||||
bridge for the primary nic (lxcbr0)
|
||||
gateway
|
||||
network gateway for the container
|
||||
additional_ips
|
||||
additional ips which will be wired on the main bridge (br0)
|
||||
which is connected to internet.
|
||||
Be aware that you may use manual virtual mac addresses
|
||||
providen by you provider (online, ovh, etc).
|
||||
This is a list of mappings {ip: '', mac: '', netmask:''}
|
||||
Set gateway to None and an interface with a gateway
|
||||
to escape from another interface that eth0.
|
||||
eg::
|
||||
from_container
|
||||
which container we use as a template
|
||||
when running lxc.clone
|
||||
image
|
||||
which template do we use when we
|
||||
are using lxc.create. This is the default
|
||||
mode unless you specify something in from_container
|
||||
backing
|
||||
which backing store to use.
|
||||
Values can be: overlayfs, dir(default), lvm, zfs, brtfs
|
||||
fstype
|
||||
When using a blockdevice level backing store,
|
||||
which filesystem to use on
|
||||
size
|
||||
When using a blockdevice level backing store,
|
||||
which size for the filesystem to use on
|
||||
snapshot
|
||||
Use snapshot when cloning the container source
|
||||
vgname
|
||||
if using LVM: vgname
|
||||
lvname
|
||||
if using LVM: lvname
|
||||
ip
|
||||
ip for the primary nic
|
||||
mac
|
||||
mac address for the primary nic
|
||||
netmask
|
||||
netmask for the primary nic (24)
|
||||
= ``vm_.get('netmask', '24')``
|
||||
bridge
|
||||
bridge for the primary nic (lxcbr0)
|
||||
gateway
|
||||
network gateway for the container
|
||||
additional_ips
|
||||
additional ips which will be wired on the main bridge (br0)
|
||||
which is connected to internet.
|
||||
Be aware that you may use manual virtual mac addresses
|
||||
providen by you provider (online, ovh, etc).
|
||||
This is a list of mappings {ip: '', mac: '', netmask:''}
|
||||
Set gateway to None and an interface with a gateway
|
||||
to escape from another interface that eth0.
|
||||
eg::
|
||||
|
||||
- {'mac': '00:16:3e:01:29:40',
|
||||
'gateway': None, (default)
|
||||
'link': 'br0', (default)
|
||||
'netmask': '', (default)
|
||||
'ip': '22.1.4.25'}
|
||||
- {'mac': '00:16:3e:01:29:40',
|
||||
'gateway': None, (default)
|
||||
'link': 'br0', (default)
|
||||
'netmask': '', (default)
|
||||
'ip': '22.1.4.25'}
|
||||
|
||||
users
|
||||
administrative users for the container
|
||||
default: [root] and [root, ubuntu] on ubuntu
|
||||
default_nic
|
||||
name of the first interface, you should
|
||||
really not override this
|
||||
users
|
||||
administrative users for the container
|
||||
default: [root] and [root, ubuntu] on ubuntu
|
||||
default_nic
|
||||
name of the first interface, you should
|
||||
really not override this
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -609,7 +609,7 @@ def get_container_profile(name=None, **kwargs):
|
||||
of variable names and values. See the :ref:`LXC Tutorial
|
||||
<tutorial-lxc-profiles>` for more information on how to use LXC profiles.
|
||||
|
||||
CLI Example::
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@ -669,7 +669,7 @@ def get_network_profile(name=None, **kwargs):
|
||||
``/etc/sysconfig/network-scripts/ifcfg-<interface_name>`` on
|
||||
RHEL/CentOS, or ``/etc/network/interfaces`` on Debian/Ubuntu/etc.)
|
||||
|
||||
CLI Example::
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@ -2363,7 +2363,6 @@ def restart(name, path=None, lxc_config=None, force=False):
|
||||
.. versionadded:: 2015.8.0
|
||||
|
||||
lxc_config
|
||||
|
||||
path to a lxc config file
|
||||
config file will be guessed from container name otherwise
|
||||
|
||||
@ -2407,7 +2406,6 @@ def start(name, **kwargs):
|
||||
.. versionadded:: 2015.8.0
|
||||
|
||||
lxc_config
|
||||
|
||||
path to a lxc config file
|
||||
config file will be guessed from container name otherwise
|
||||
|
||||
@ -2972,7 +2970,6 @@ def update_lxc_conf(name, lxc_conf, lxc_conf_unset, path=None):
|
||||
Edit LXC configuration options
|
||||
|
||||
path
|
||||
|
||||
path to the container parent
|
||||
default: /var/lib/lxc (system default)
|
||||
|
||||
@ -3426,7 +3423,7 @@ def bootstrap(name,
|
||||
that the salt-master be configured to either auto-accept all keys or
|
||||
expect a signing request from the target host. Default: ``True``
|
||||
|
||||
path
|
||||
path
|
||||
path to the container parent
|
||||
default: /var/lib/lxc (system default)
|
||||
|
||||
@ -4291,9 +4288,9 @@ def read_conf(conf_file, out_format='simple'):
|
||||
dict, but can also return a more detailed structure including blank lines
|
||||
and comments.
|
||||
|
||||
out_format:
|
||||
set to 'simple' if you need the old and unsupported behavior.
|
||||
This wont support the multiple lxc values (eg: multiple network nics)
|
||||
out_format:
|
||||
set to 'simple' if you need the old and unsupported behavior.
|
||||
This wont support the multiple lxc values (eg: multiple network nics)
|
||||
|
||||
CLI Examples:
|
||||
|
||||
|
@ -4,7 +4,7 @@ Module providing a simple management interface to a marathon cluster.
|
||||
|
||||
Currently this only works when run through a proxy minion.
|
||||
|
||||
.. versionadded:: Boron
|
||||
.. versionadded:: 2015.8.2
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
@ -67,7 +67,7 @@ def _mine_send(load, opts):
|
||||
|
||||
|
||||
def _mine_get(load, opts):
|
||||
if opts.get('transport', '') == 'zeromq':
|
||||
if opts.get('transport', '') in ('zeromq', 'tcp'):
|
||||
try:
|
||||
load['tok'] = _auth().gen_token('salt')
|
||||
except AttributeError:
|
||||
|
@ -646,7 +646,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
|
||||
cmd.append('--pre')
|
||||
|
||||
if cert:
|
||||
cmd.append(['--cert', cert])
|
||||
cmd.extend(['--cert', cert])
|
||||
|
||||
if global_options:
|
||||
if isinstance(global_options, string_types):
|
||||
@ -697,7 +697,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
|
||||
allow_external = [p.strip() for p in allow_external.split(',')]
|
||||
|
||||
for pkg in allow_external:
|
||||
cmd.append('--allow-external {0}'.format(pkg))
|
||||
cmd.extend(['--allow-external', pkg])
|
||||
|
||||
if allow_unverified:
|
||||
if isinstance(allow_unverified, string_types):
|
||||
@ -705,7 +705,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
|
||||
[p.strip() for p in allow_unverified.split(',')]
|
||||
|
||||
for pkg in allow_unverified:
|
||||
cmd.append('--allow-unverified {0}'.format(pkg))
|
||||
cmd.extend(['--allow-unverified', pkg])
|
||||
|
||||
if process_dependency_links:
|
||||
cmd.append('--process-dependency-links')
|
||||
@ -717,7 +717,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
|
||||
raise CommandExecutionError('env_vars {0} is not a dictionary'.format(env_vars))
|
||||
|
||||
if trusted_host:
|
||||
cmd.append('--trusted-host {0}'.format(trusted_host))
|
||||
cmd.extend(['--trusted-host', trusted_host])
|
||||
|
||||
try:
|
||||
cmd_kwargs = dict(cwd=cwd, saltenv=saltenv, use_vt=use_vt, runas=user)
|
||||
|
@ -21,7 +21,7 @@ __virtualname__ = 'publish'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
return __virtualname__ if __opts__.get('transport', '') == 'zeromq' else False
|
||||
return __virtualname__ if __opts__.get('transport', '') in ('zeromq', 'tcp') else False
|
||||
|
||||
|
||||
def _parse_args(arg):
|
||||
|
@ -16,7 +16,7 @@ from salt.exceptions import CommandExecutionError
|
||||
# Import 3rd-party libs
|
||||
import yaml
|
||||
import salt.ext.six as six
|
||||
|
||||
from salt.ext.six.moves import range
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -143,16 +143,18 @@ def run(*args, **kwargs):
|
||||
'''
|
||||
puppet = _Puppet()
|
||||
|
||||
if args:
|
||||
# new args tuple to filter out agent/apply for _Puppet.arguments()
|
||||
buildargs = ()
|
||||
for arg in range(len(args)):
|
||||
# based on puppet documentation action must come first. making the same
|
||||
# assertion. need to ensure the list of supported cmds here matches
|
||||
# those defined in _Puppet.arguments()
|
||||
if args[0] in ['agent', 'apply']:
|
||||
puppet.subcmd = args[0]
|
||||
puppet.arguments(args[1:])
|
||||
else:
|
||||
# args will exist as an empty list even if none have been provided
|
||||
puppet.arguments(args)
|
||||
if args[arg] in ['agent', 'apply']:
|
||||
puppet.subcmd = args[arg]
|
||||
else:
|
||||
buildargs += (args[arg],)
|
||||
# args will exist as an empty list even if none have been provided
|
||||
puppet.arguments(buildargs)
|
||||
|
||||
puppet.kwargs.update(salt.utils.clean_kwargs(**kwargs))
|
||||
|
||||
|
@ -152,12 +152,11 @@ def add(name,
|
||||
# /etc/usermgmt.conf not present: defaults will be used
|
||||
pass
|
||||
|
||||
if salt.utils.is_true(createhome):
|
||||
if createhome:
|
||||
cmd.append('-m')
|
||||
elif (__grains__['kernel'] != 'NetBSD'
|
||||
and __grains__['kernel'] != 'OpenBSD'):
|
||||
cmd.append('-M')
|
||||
if createhome:
|
||||
cmd.append('-m')
|
||||
elif (__grains__['kernel'] != 'NetBSD'
|
||||
and __grains__['kernel'] != 'OpenBSD'):
|
||||
cmd.append('-M')
|
||||
|
||||
if home is not None:
|
||||
cmd.extend(['-d', home])
|
||||
|
@ -38,9 +38,13 @@ class NetapiClient(object):
|
||||
Note, this will return an invalid success if the master crashed or was
|
||||
not shut down cleanly.
|
||||
'''
|
||||
if self.opts['transport'] == 'tcp':
|
||||
ipc_file = 'publish_pull.ipc'
|
||||
else:
|
||||
ipc_file = 'workers.ipc'
|
||||
return os.path.exists(os.path.join(
|
||||
self.opts['sock_dir'],
|
||||
'workers.ipc'))
|
||||
ipc_file))
|
||||
|
||||
def run(self, low):
|
||||
'''
|
||||
|
@ -1193,6 +1193,7 @@ class Keys(LowDataAdapter):
|
||||
module <salt.wheel.key>` functions.
|
||||
'''
|
||||
|
||||
@cherrypy.config(**{'tools.salt_token.on': True})
|
||||
def GET(self, mid=None):
|
||||
'''
|
||||
Show the list of minion keys or detail on a specific key
|
||||
@ -1260,8 +1261,6 @@ class Keys(LowDataAdapter):
|
||||
minions:
|
||||
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
|
||||
'''
|
||||
self._cp_config['tools.salt_token.on'] = True
|
||||
|
||||
if mid:
|
||||
lowstate = [{
|
||||
'client': 'wheel',
|
||||
@ -1279,6 +1278,7 @@ class Keys(LowDataAdapter):
|
||||
|
||||
return {'return': next(result, {}).get('data', {}).get('return', {})}
|
||||
|
||||
@cherrypy.config(**{'tools.hypermedia_out.on': False, 'tools.sessions.on': False})
|
||||
def POST(self, mid, keysize=None, force=None, **kwargs):
|
||||
r'''
|
||||
Easily generate keys for a minion and auto-accept the new key
|
||||
@ -1339,9 +1339,6 @@ class Keys(LowDataAdapter):
|
||||
|
||||
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
|
||||
'''
|
||||
self._cp_config['tools.hypermedia_out.on'] = False
|
||||
self._cp_config['tools.sessions.on'] = False
|
||||
|
||||
lowstate = [{
|
||||
'client': 'wheel',
|
||||
'fun': 'key.gen_accept',
|
||||
|
@ -69,7 +69,6 @@ import textwrap
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.utils.locales
|
||||
import salt.output
|
||||
from salt.utils.locales import sdecode
|
||||
|
||||
@ -87,6 +86,8 @@ def output(data):
|
||||
|
||||
|
||||
def _format_host(host, data):
|
||||
host = sdecode(host)
|
||||
|
||||
colors = salt.utils.get_colors(
|
||||
__opts__.get('color'),
|
||||
__opts__.get('color_theme'))
|
||||
@ -161,7 +162,7 @@ def _format_host(host, data):
|
||||
if ret['result'] is None:
|
||||
hcolor = colors['LIGHT_YELLOW']
|
||||
tcolor = colors['LIGHT_YELLOW']
|
||||
comps = tname.split('_|-')
|
||||
comps = [sdecode(comp) for comp in tname.split('_|-')]
|
||||
if __opts__.get('state_output', 'full').lower() == 'filter':
|
||||
# By default, full data is shown for all types. However, return
|
||||
# data may be excluded by setting state_output_exclude to a
|
||||
@ -238,7 +239,7 @@ def _format_host(host, data):
|
||||
# but try to continue on errors
|
||||
pass
|
||||
try:
|
||||
comment = salt.utils.locales.sdecode(ret['comment'])
|
||||
comment = sdecode(ret['comment'])
|
||||
comment = comment.strip().replace(
|
||||
u'\n',
|
||||
u'\n' + u' ' * 14)
|
||||
@ -271,7 +272,7 @@ def _format_host(host, data):
|
||||
'tcolor': tcolor,
|
||||
'comps': comps,
|
||||
'ret': ret,
|
||||
'comment': comment,
|
||||
'comment': sdecode(comment),
|
||||
# This nukes any trailing \n and indents the others.
|
||||
'colors': colors
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ the chronos endpoint:
|
||||
proxytype: chronos
|
||||
base_url: http://my-chronos-master.mydomain.com:4400
|
||||
|
||||
.. versionadded:: Boron
|
||||
.. versionadded:: 2015.8.2
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
@ -171,24 +171,86 @@ def __virtual__():
|
||||
|
||||
def init(opts):
|
||||
'''
|
||||
This function gets called when the proxy starts up. For
|
||||
FX2 devices we just cache the credentials and hostname.
|
||||
This function gets called when the proxy starts up.
|
||||
We check opts to see if a fallback user and password are supplied.
|
||||
If they are present, and the primary credentials don't work, then
|
||||
we try the backup before failing.
|
||||
|
||||
Whichever set of credentials works is placed in the persistent
|
||||
DETAILS dictionary and will be used for further communication with the
|
||||
chassis.
|
||||
'''
|
||||
# Save the login details
|
||||
DETAILS['admin_username'] = opts['proxy']['admin_username']
|
||||
DETAILS['admin_password'] = opts['proxy']['admin_password']
|
||||
if 'host' not in opts['proxy']:
|
||||
log.critical('No "host" key found in pillar for this proxy')
|
||||
return False
|
||||
|
||||
DETAILS['host'] = opts['proxy']['host']
|
||||
|
||||
first_user = opts['proxy']['admin_username']
|
||||
first_password = opts['proxy']['admin_password']
|
||||
if 'fallback_admin_username' in opts['proxy'] \
|
||||
and 'fallback_admin_password' in opts['proxy']:
|
||||
fallback_available = True
|
||||
fallback_user = opts['proxy']['fallback_admin_username']
|
||||
fallback_password = opts['proxy']['fallback_admin_password']
|
||||
else:
|
||||
fallback_available = False
|
||||
|
||||
check_grains = _grains(DETAILS['host'], first_user, first_password)
|
||||
if check_grains:
|
||||
DETAILS['admin_username'] = opts['proxy']['admin_username']
|
||||
DETAILS['admin_password'] = opts['proxy']['admin_password']
|
||||
return True
|
||||
elif fallback_available and _grains(DETAILS['host'],
|
||||
fallback_user,
|
||||
fallback_password):
|
||||
log.info('Fallback credentials used'
|
||||
' to access chassis {0}'.format(opts['proxy']['host']))
|
||||
DETAILS['admin_username'] = opts['proxy']['fallback_admin_username']
|
||||
DETAILS['admin_password'] = opts['proxy']['fallback_admin_password']
|
||||
return True
|
||||
else:
|
||||
log.critical('Neither the primary nor the fallback credentials'
|
||||
' were able to access the chassis '
|
||||
' at {0}'.format(opts['proxy']['host']))
|
||||
return False
|
||||
|
||||
|
||||
def admin_username():
|
||||
return DETAILS['admin_username']
|
||||
|
||||
|
||||
def admin_password():
|
||||
return DETAILS['admin_password']
|
||||
|
||||
|
||||
def host():
|
||||
return DETAILS['host']
|
||||
|
||||
|
||||
def _grains(host, user, password):
|
||||
'''
|
||||
Get the grains from the proxied device
|
||||
'''
|
||||
r = __salt__['dracr.system_info'](host=host,
|
||||
admin_username=user,
|
||||
admin_password=password)
|
||||
if r.get('retcode', 0) == 0:
|
||||
GRAINS_CACHE = r
|
||||
else:
|
||||
GRAINS_CACHE = {}
|
||||
return GRAINS_CACHE
|
||||
|
||||
|
||||
def grains():
|
||||
'''
|
||||
Get the grains from the proxied device
|
||||
'''
|
||||
if not GRAINS_CACHE:
|
||||
r = __salt__['dracr.system_info'](host=DETAILS['host'],
|
||||
admin_username=DETAILS['admin_username'],
|
||||
admin_password=DETAILS['admin_password'])
|
||||
GRAINS_CACHE = r
|
||||
return _grains(DETAILS['host'],
|
||||
DETAILS['admin_username'],
|
||||
DETAILS['admin_password'])
|
||||
|
||||
return GRAINS_CACHE
|
||||
|
||||
|
||||
|
@ -22,7 +22,7 @@ the marathon endpoint:
|
||||
proxytype: marathon
|
||||
base_url: http://my-marathon-master.mydomain.com:8080
|
||||
|
||||
.. versionadded:: Boron
|
||||
.. versionadded:: 2015.8.2
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
@ -42,6 +42,7 @@ from salt.exceptions import (
|
||||
SaltReqTimeoutError
|
||||
)
|
||||
from salt.utils.odict import OrderedDict, DefaultOrderedDict
|
||||
from salt.utils.locales import sdecode
|
||||
# Explicit late import to avoid circular import. DO NOT MOVE THIS.
|
||||
import salt.utils.yamlloader as yamlloader
|
||||
|
||||
@ -507,6 +508,7 @@ class Compiler(object):
|
||||
chunk['order'] = chunk['order'] + chunk.pop('name_order') / 10000.0
|
||||
if chunk['order'] < 0:
|
||||
chunk['order'] = cap + 1000000 + chunk['order']
|
||||
chunk['name'] = sdecode(chunk['name'])
|
||||
chunks.sort(key=lambda chunk: (chunk['order'], '{0[state]}{0[name]}{0[fun]}'.format(chunk)))
|
||||
return chunks
|
||||
|
||||
|
@ -66,16 +66,11 @@ def present(name, timespec, tag=None, user=None, job=None):
|
||||
timespec)
|
||||
return ret
|
||||
|
||||
if __grains__['os_family'] == 'RedHat':
|
||||
echo_cmd = 'echo -e'
|
||||
else:
|
||||
echo_cmd = 'echo'
|
||||
|
||||
if tag:
|
||||
cmd = '{0} "### SALT: {4}\n{1}" | {2} {3}'.format(echo_cmd,
|
||||
job, binary, timespec, tag)
|
||||
stdin = '### SALT: {0}\n{1}'.format(tag, job)
|
||||
else:
|
||||
cmd = '{0} "{1}" | {2} {3}'.format(echo_cmd, name, binary, timespec)
|
||||
stdin = name
|
||||
cmd = '{0} {1}'.format(binary, timespec)
|
||||
|
||||
if user:
|
||||
luser = __salt__['user.info'](user)
|
||||
@ -83,9 +78,9 @@ def present(name, timespec, tag=None, user=None, job=None):
|
||||
ret['comment'] = 'User: {0} is not exists'.format(user)
|
||||
ret['result'] = False
|
||||
return ret
|
||||
ret['comment'] = __salt__['cmd.run']('{0}'.format(cmd), runas=user, python_shell=True)
|
||||
ret['comment'] = __salt__['cmd.run'](cmd, stdin=stdin, runas=user)
|
||||
else:
|
||||
ret['comment'] = __salt__['cmd.run']('{0}'.format(cmd), python_shell=True)
|
||||
ret['comment'] = __salt__['cmd.run'](cmd, stdin=stdin)
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -25,12 +25,18 @@ from __future__ import absolute_import
|
||||
# Import python libs
|
||||
import os
|
||||
import os.path
|
||||
import time
|
||||
import logging
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
from salt.ext.six.moves import range
|
||||
|
||||
__virtualname__ = 'blockdev'
|
||||
|
||||
# Init logger
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
@ -130,12 +136,7 @@ def formatted(name, fs_type='ext4', **kwargs):
|
||||
ret['comment'] = '{0} does not exist'.format(name)
|
||||
return ret
|
||||
|
||||
blk = __salt__['cmd.run']('lsblk -o fstype {0}'.format(name)).splitlines()
|
||||
|
||||
if len(blk) == 1:
|
||||
current_fs = ''
|
||||
else:
|
||||
current_fs = blk[1]
|
||||
current_fs = _checkblk(name)
|
||||
|
||||
if current_fs == fs_type:
|
||||
ret['result'] = True
|
||||
@ -152,12 +153,36 @@ def formatted(name, fs_type='ext4', **kwargs):
|
||||
__salt__['blockdev.format'](name, fs_type, **kwargs)
|
||||
current_fs = __salt__['blockdev.fstype'](name)
|
||||
|
||||
if current_fs == fs_type:
|
||||
ret['comment'] = ('{0} has been formatted '
|
||||
'with {1}').format(name, fs_type)
|
||||
ret['changes'] = {'new': fs_type, 'old': current_fs}
|
||||
ret['result'] = True
|
||||
else:
|
||||
ret['comment'] = 'Failed to format {0}'.format(name)
|
||||
ret['result'] = False
|
||||
# Repeat lsblk check up to 10 times with 3s sleeping between each
|
||||
# to avoid lsblk failing although mkfs has succeeded
|
||||
# see https://github.com/saltstack/salt/issues/25775
|
||||
for i in range(10):
|
||||
|
||||
log.info('Check blk fstype attempt %s of 10', str(i+1))
|
||||
current_fs = _checkblk(name)
|
||||
|
||||
if current_fs == fs_type:
|
||||
ret['comment'] = ('{0} has been formatted '
|
||||
'with {1}').format(name, fs_type)
|
||||
ret['changes'] = {'new': fs_type, 'old': current_fs}
|
||||
ret['result'] = True
|
||||
return ret
|
||||
|
||||
if current_fs == '':
|
||||
log.info('Waiting 3s before next check')
|
||||
time.sleep(3)
|
||||
else:
|
||||
break
|
||||
|
||||
ret['comment'] = 'Failed to format {0}'.format(name)
|
||||
ret['result'] = False
|
||||
return ret
|
||||
|
||||
|
||||
def _checkblk(name):
|
||||
'''
|
||||
Check if the blk exists and return its fstype if ok
|
||||
'''
|
||||
|
||||
blk = __salt__['cmd.run']('lsblk -o fstype {0}'.format(name)).splitlines()
|
||||
return '' if len(blk) == 1 else blk[1]
|
||||
|
@ -42,17 +42,31 @@ from __future__ import absolute_import
|
||||
# Import Python libs
|
||||
import logging
|
||||
import json
|
||||
import xml.etree.cElementTree as xml
|
||||
|
||||
# Import 3rd party libs
|
||||
try:
|
||||
from salt._compat import ElementTree as ET
|
||||
HAS_ELEMENT_TREE = True
|
||||
except ImportError:
|
||||
HAS_ELEMENT_TREE = False
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'boto_cfn'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load if boto is available.
|
||||
Only load if elementtree xml library and boto are available.
|
||||
'''
|
||||
return 'boto_cfn.exists' in __salt__
|
||||
if not HAS_ELEMENT_TREE:
|
||||
return (False, 'Cannot load {0} state: ElementTree library unavailable'.format(__virtualname__))
|
||||
|
||||
if 'boto_cfn.exists' in __salt__:
|
||||
return True
|
||||
else:
|
||||
return (False, 'Cannot load {0} state: boto_cfn module unavailable'.format(__virtualname__))
|
||||
|
||||
|
||||
def present(name, template_body=None, template_url=None, parameters=None, notification_arns=None, disable_rollback=None,
|
||||
@ -246,7 +260,7 @@ def _validate(template_body=None, template_url=None, region=None, key=None, keyi
|
||||
def _get_error(error):
|
||||
# Converts boto exception to string that can be used to output error.
|
||||
error = '\n'.join(error.split('\n')[1:])
|
||||
error = xml.fromstring(error)
|
||||
error = ET.fromstring(error)
|
||||
code = error[0][1].text
|
||||
message = error[0][2].text
|
||||
return code, message
|
||||
|
@ -1,7 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Manage IAM roles.
|
||||
=================
|
||||
Manage IAM objects
|
||||
==================
|
||||
|
||||
.. versionadded:: 2015.8.0
|
||||
|
||||
@ -100,7 +100,7 @@ passed in as a dict, or as a string to pull from pillars or minion config:
|
||||
boto_iam.server_cert_absent:
|
||||
- name: mycert
|
||||
|
||||
. code-block:: yaml
|
||||
.. code-block:: yaml
|
||||
create keys for user:
|
||||
boto_iam.keys_present:
|
||||
- name: myusername
|
||||
@ -116,7 +116,6 @@ from __future__ import absolute_import
|
||||
import logging
|
||||
import json
|
||||
import os
|
||||
import xml.etree.cElementTree as xml
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.utils
|
||||
@ -126,14 +125,29 @@ import salt.ext.six as six
|
||||
from salt.ext.six import string_types
|
||||
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
|
||||
|
||||
# Import 3rd party libs
|
||||
try:
|
||||
from salt._compat import ElementTree as ET
|
||||
HAS_ELEMENT_TREE = True
|
||||
except ImportError:
|
||||
HAS_ELEMENT_TREE = False
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'boto_cfn'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load if boto is available.
|
||||
Only load if elementtree xml library and boto are available.
|
||||
'''
|
||||
return 'boto_iam.get_user' in __salt__
|
||||
if not HAS_ELEMENT_TREE:
|
||||
return (False, 'Cannot load {0} state: ElementTree library unavailable'.format(__virtualname__))
|
||||
|
||||
if 'boto_iam.get_user' in __salt__:
|
||||
return True
|
||||
else:
|
||||
return (False, 'Cannot load {0} state: boto_iam module unavailable'.format(__virtualname__))
|
||||
|
||||
|
||||
def user_absent(name, delete_keys=True, delete_mfa_devices=True, delete_profile=True, region=None, key=None, keyid=None, profile=None):
|
||||
@ -951,7 +965,7 @@ def server_cert_present(name, public_key, private_key, cert_chain=None, path=Non
|
||||
def _get_error(error):
|
||||
# Converts boto exception to string that can be used to output error.
|
||||
error = '\n'.join(error.split('\n')[1:])
|
||||
error = xml.fromstring(error)
|
||||
error = ET.fromstring(error)
|
||||
code = error[0][1].text
|
||||
message = error[0][2].text
|
||||
return code, message
|
||||
|
@ -1,6 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Manage IAM roles
|
||||
================
|
||||
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
|
@ -10,7 +10,7 @@ Configure Chronos jobs via a salt proxy.
|
||||
command: "echo 'hi'"
|
||||
owner: "me@mycompany.com"
|
||||
|
||||
.. versionadded:: Boron
|
||||
.. versionadded:: 2015.8.2
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
import copy
|
||||
|
@ -87,11 +87,37 @@ structure:
|
||||
snmp: nonpublic
|
||||
password: saltstack1
|
||||
|
||||
And to go with it, here's an example state that pulls the data from pillar
|
||||
And to go with it, here's an example state that pulls the data from pillar.
|
||||
This example assumes that the pillar data would be structured like
|
||||
|
||||
Pillar:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
{% set details = pillar['chassis'] with context %}
|
||||
proxy:
|
||||
host: 192.168.1.1
|
||||
admin_username: root
|
||||
admin_password: sekrit
|
||||
fallback_admin_username: root
|
||||
fallback_admin_password: old_sekrit
|
||||
proxytype: fx2
|
||||
|
||||
chassis:
|
||||
name: fx2-1
|
||||
username: root
|
||||
datacenter: UT1
|
||||
location: UT1
|
||||
management_mode: 2
|
||||
idrac_launch: 0
|
||||
slot_names:
|
||||
1: blade1
|
||||
2: blade2
|
||||
|
||||
State:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
{% set details = pillar.get('proxy:chassis', {}) %}
|
||||
standup-step1:
|
||||
dellchassis.chassis:
|
||||
- name: {{ details['name'] }}
|
||||
|
@ -75,7 +75,7 @@ def __virtual__():
|
||||
_validate_input, globals()
|
||||
)
|
||||
return __virtualname__
|
||||
return (False, __modules__.missing_fun_string('dockerng.version')) # pylint: disable=E0602
|
||||
return (False, __salt__.missing_fun_string('dockerng.version'))
|
||||
|
||||
|
||||
def _format_comments(comments):
|
||||
|
@ -252,6 +252,7 @@ import salt.payload
|
||||
import salt.utils
|
||||
import salt.utils.templates
|
||||
import salt.utils.url
|
||||
from salt.utils.locales import sdecode
|
||||
from salt.exceptions import CommandExecutionError
|
||||
from salt.serializers import yaml as yaml_serializer
|
||||
from salt.serializers import json as json_serializer
|
||||
@ -2109,7 +2110,7 @@ def recurse(name,
|
||||
recursively removed so that symlink creation can proceed. This
|
||||
option is usually not needed except in special circumstances.
|
||||
'''
|
||||
name = os.path.expanduser(name)
|
||||
name = os.path.expanduser(sdecode(name))
|
||||
|
||||
user = _test_owner(kwargs, user=user)
|
||||
if salt.utils.is_windows():
|
||||
@ -2347,7 +2348,7 @@ def recurse(name,
|
||||
# the file to copy from; it is either a normal file or an
|
||||
# empty dir(if include_empty==true).
|
||||
|
||||
relname = os.path.relpath(fn_, srcpath)
|
||||
relname = sdecode(os.path.relpath(fn_, srcpath))
|
||||
if relname.startswith('..'):
|
||||
continue
|
||||
|
||||
|
@ -11,7 +11,7 @@ Configure Marathon apps via a salt proxy.
|
||||
mem: 10
|
||||
instances: 3
|
||||
|
||||
.. versionadded:: Boron
|
||||
.. versionadded:: 2015.8.2
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
import copy
|
||||
|
@ -171,7 +171,7 @@ def present(name,
|
||||
{'old': tags,
|
||||
'new': new_tags}})
|
||||
try:
|
||||
existing_perms = __salt__['rabbitmq.list_user_permissions'](name, runas=runas)[0]
|
||||
existing_perms = __salt__['rabbitmq.list_user_permissions'](name, runas=runas)
|
||||
except CommandExecutionError as err:
|
||||
ret['comment'] = 'Error: {0}'.format(err)
|
||||
return ret
|
||||
|
@ -115,7 +115,7 @@ def _changes(name,
|
||||
wanted_groups.remove(default_grp)
|
||||
if _group_changes(lusr['groups'], wanted_groups, remove_groups):
|
||||
change['groups'] = wanted_groups
|
||||
if home and lusr['home'] != home:
|
||||
if home and lusr['home'] != home and createhome:
|
||||
change['home'] = home
|
||||
if createhome:
|
||||
newhome = home if home else lusr['home']
|
||||
@ -340,20 +340,24 @@ def present(name,
|
||||
mapped to the specified drive. Must be a letter followed by a colon.
|
||||
Because of the colon, the value must be surrounded by single quotes. ie:
|
||||
- win_homedrive: 'U:
|
||||
.. versionchanged:: 2015.8.0
|
||||
|
||||
.. versionchanged:: 2015.8.0
|
||||
|
||||
win_profile (Windows Only)
|
||||
The custom profile directory of the user. Uses default value of
|
||||
underlying system if not set.
|
||||
.. versionchanged:: 2015.8.0
|
||||
|
||||
.. versionchanged:: 2015.8.0
|
||||
|
||||
win_logonscript (Windows Only)
|
||||
The full path to the logon script to run when the user logs in.
|
||||
.. versionchanged:: 2015.8.0
|
||||
|
||||
.. versionchanged:: 2015.8.0
|
||||
|
||||
win_description (Windows Only)
|
||||
A brief description of the purpose of the users account.
|
||||
.. versionchanged:: 2015.8.0
|
||||
|
||||
.. versionchanged:: 2015.8.0
|
||||
'''
|
||||
if fullname is not None:
|
||||
fullname = salt.utils.locales.sdecode(fullname)
|
||||
@ -589,6 +593,9 @@ def present(name,
|
||||
if __salt__['user.add'](**params):
|
||||
ret['comment'] = 'New user {0} created'.format(name)
|
||||
ret['changes'] = __salt__['user.info'](name)
|
||||
if not createhome:
|
||||
# pwd incorrectly reports presence of home
|
||||
ret['changes']['home'] = ''
|
||||
if 'shadow.info' in __salt__ and not salt.utils.is_windows():
|
||||
if password and not empty_password:
|
||||
__salt__['shadow.set_password'](name, password)
|
||||
|
@ -86,7 +86,7 @@ class AsyncTCPReqChannel(salt.transport.client.ReqChannel):
|
||||
opts['master_uri'] = kwargs['master_uri']
|
||||
return (opts['pki_dir'], # where the keys are stored
|
||||
opts['id'], # minion ID
|
||||
opts['master_uri'], # master ID
|
||||
opts['master_uri'],
|
||||
kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt
|
||||
)
|
||||
|
||||
|
@ -281,11 +281,11 @@ class SerializerExtension(Extension, object):
|
||||
|
||||
**Load tags**
|
||||
|
||||
Salt implements **import_yaml** and **import_json** tags. They work like
|
||||
Salt implements ``import_yaml`` and ``import_json`` tags. They work like
|
||||
the `import tag`_, except that the document is also deserialized.
|
||||
|
||||
Syntaxes are {% load_yaml as [VARIABLE] %}[YOUR DATA]{% endload %}
|
||||
and {% load_json as [VARIABLE] %}[YOUR DATA]{% endload %}
|
||||
Syntaxes are ``{% load_yaml as [VARIABLE] %}[YOUR DATA]{% endload %}``
|
||||
and ``{% load_json as [VARIABLE] %}[YOUR DATA]{% endload %}``
|
||||
|
||||
For example:
|
||||
|
||||
|
@ -733,20 +733,23 @@ class Schedule(object):
|
||||
)
|
||||
)
|
||||
|
||||
if 'return_job' in data and not data['return_job']:
|
||||
pass
|
||||
else:
|
||||
# Send back to master so the job is included in the job list
|
||||
mret = ret.copy()
|
||||
mret['jid'] = 'req'
|
||||
channel = salt.transport.Channel.factory(self.opts, usage='salt_schedule')
|
||||
load = {'cmd': '_return', 'id': self.opts['id']}
|
||||
for key, value in six.iteritems(mret):
|
||||
load[key] = value
|
||||
try:
|
||||
channel.send(load)
|
||||
except salt.exceptions.SaltReqTimeoutError:
|
||||
log.error('Timeout error during scheduled job: {0}. Salt master could not be reached.'.format(ret['fun']))
|
||||
# Only attempt to return data to the master
|
||||
# if the scheduled job is running on a minion.
|
||||
if '__role' in self.opts and self.opts['__role'] == 'minion':
|
||||
if 'return_job' in data and not data['return_job']:
|
||||
pass
|
||||
else:
|
||||
# Send back to master so the job is included in the job list
|
||||
mret = ret.copy()
|
||||
mret['jid'] = 'req'
|
||||
channel = salt.transport.Channel.factory(self.opts, usage='salt_schedule')
|
||||
load = {'cmd': '_return', 'id': self.opts['id']}
|
||||
for key, value in six.iteritems(mret):
|
||||
load[key] = value
|
||||
try:
|
||||
channel.send(load)
|
||||
except salt.exceptions.SaltReqTimeoutError:
|
||||
log.error('Timeout error during scheduled job: {0}. Salt master could not be reached.'.format(ret['fun']))
|
||||
|
||||
except Exception:
|
||||
log.exception("Unhandled exception running {0}".format(ret['fun']))
|
||||
|
@ -29,6 +29,24 @@ try:
|
||||
except ImportError:
|
||||
HAS_CERTIFI = False
|
||||
|
||||
try:
|
||||
import singledispatch
|
||||
HAS_SINGLEDISPATCH = True
|
||||
except ImportError:
|
||||
HAS_SINGLEDISPATCH = False
|
||||
|
||||
try:
|
||||
import singledispatch_helpers
|
||||
HAS_SINGLEDISPATCH_HELPERS = True
|
||||
except ImportError:
|
||||
HAS_SINGLEDISPATCH_HELPERS = False
|
||||
|
||||
try:
|
||||
import backports_abc
|
||||
HAS_BACKPORTS_ABC = True
|
||||
except ImportError:
|
||||
HAS_BACKPORTS_ABC = False
|
||||
|
||||
try:
|
||||
import markupsafe
|
||||
HAS_MARKUPSAFE = True
|
||||
@ -99,6 +117,15 @@ def get_tops(extra_mods='', so_mods=''):
|
||||
if HAS_CERTIFI:
|
||||
tops.append(os.path.dirname(certifi.__file__))
|
||||
|
||||
if HAS_SINGLEDISPATCH:
|
||||
tops.append(singledispatch.__file__.replace('.pyc', '.py'))
|
||||
|
||||
if HAS_SINGLEDISPATCH_HELPERS:
|
||||
tops.append(singledispatch_helpers.__file__.replace('.pyc', '.py'))
|
||||
|
||||
if HAS_BACKPORTS_ABC:
|
||||
tops.append(backports_abc.__file__.replace('.pyc', '.py'))
|
||||
|
||||
if HAS_SSL_MATCH_HOSTNAME:
|
||||
tops.append(os.path.dirname(os.path.dirname(ssl_match_hostname.__file__)))
|
||||
|
||||
|
@ -243,3 +243,137 @@ def get_network_adapter_type(adapter_type):
|
||||
return vim.vm.device.VirtualE1000()
|
||||
elif adapter_type == "e1000e":
|
||||
return vim.vm.device.VirtualE1000e()
|
||||
|
||||
|
||||
def list_objects(service_instance, vim_object, properties=None):
|
||||
'''
|
||||
Returns a simple list of objects from a given service instance.
|
||||
|
||||
service_instance
|
||||
The Service Instance for which to obtain a list of objects.
|
||||
|
||||
object_type
|
||||
The type of content for which to obtain information.
|
||||
|
||||
property_list
|
||||
An optional list of object properties used to return reference results.
|
||||
If not provided, defaults to ``name``.
|
||||
'''
|
||||
if properties is None:
|
||||
properties = ['name']
|
||||
|
||||
items = []
|
||||
item_list = get_mors_with_properties(service_instance, vim_object, properties)
|
||||
for item in item_list:
|
||||
items.append(item['name'])
|
||||
return items
|
||||
|
||||
|
||||
def list_datacenters(service_instance):
|
||||
'''
|
||||
Returns a list of datacenters associated with a given service instance.
|
||||
|
||||
service_instance
|
||||
The Service Instance Object from which to obtain datacenters.
|
||||
'''
|
||||
return list_objects(service_instance, vim.Datacenter)
|
||||
|
||||
|
||||
def list_clusters(service_instance):
|
||||
'''
|
||||
Returns a list of clusters associated with a given service instance.
|
||||
|
||||
service_instance
|
||||
The Service Instance Object from which to obtain clusters.
|
||||
'''
|
||||
return list_objects(service_instance, vim.ClusterComputeResource)
|
||||
|
||||
|
||||
def list_datastore_clusters(service_instance):
|
||||
'''
|
||||
Returns a list of datastore clusters associated with a given service instance.
|
||||
|
||||
service_instance
|
||||
The Service Instance Object from which to obtain datastore clusters.
|
||||
'''
|
||||
return list_objects(service_instance, vim.StoragePod)
|
||||
|
||||
|
||||
def list_datastores(service_instance):
|
||||
'''
|
||||
Returns a list of datastores associated with a given service instance.
|
||||
|
||||
service_instance
|
||||
The Service Instance Object from which to obtain datastores.
|
||||
'''
|
||||
return list_objects(service_instance, vim.Datastore)
|
||||
|
||||
|
||||
def list_hosts(service_instance):
|
||||
'''
|
||||
Returns a list of hosts associated with a given service instance.
|
||||
|
||||
service_instance
|
||||
The Service Instance Object from which to obtain hosts.
|
||||
'''
|
||||
return list_objects(service_instance, vim.HostSystem)
|
||||
|
||||
|
||||
def list_resourcepools(service_instance):
|
||||
'''
|
||||
Returns a list of resource pools associated with a given service instance.
|
||||
|
||||
service_instance
|
||||
The Service Instance Object from which to obtain resource pools.
|
||||
'''
|
||||
return list_objects(service_instance, vim.ResourcePool)
|
||||
|
||||
|
||||
def list_networks(service_instance):
|
||||
'''
|
||||
Returns a list of networks associated with a given service instance.
|
||||
|
||||
service_instance
|
||||
The Service Instance Object from which to obtain networks.
|
||||
'''
|
||||
return list_objects(service_instance, vim.Network)
|
||||
|
||||
|
||||
def list_vms(service_instance):
|
||||
'''
|
||||
Returns a list of VMs associated with a given service instance.
|
||||
|
||||
service_instance
|
||||
The Service Instance Object from which to obtain VMs.
|
||||
'''
|
||||
return list_objects(service_instance, vim.VirtualMachine)
|
||||
|
||||
|
||||
def list_folders(service_instance):
|
||||
'''
|
||||
Returns a list of folders associated with a given service instance.
|
||||
|
||||
service_instance
|
||||
The Service Instance Object from which to obtain folders.
|
||||
'''
|
||||
return list_objects(service_instance, vim.Folder)
|
||||
|
||||
|
||||
def list_dvs(service_instance):
|
||||
'''
|
||||
Returns a list of distributed virtual switches associated with a given service instance.
|
||||
|
||||
service_instance
|
||||
The Service Instance Object from which to obtain distributed virtual switches.
|
||||
'''
|
||||
return list_objects(service_instance, vim.DistributedVirtualSwitch)
|
||||
|
||||
|
||||
def list_vapps(service_instance):
|
||||
'''
|
||||
Returns a list of vApps associated with a given service instance.
|
||||
|
||||
service_instance
|
||||
The Service Instance Object from which to obtain vApps.
|
||||
'''
|
||||
return list_objects(service_instance, vim.VirtualApp)
|
||||
|
@ -156,7 +156,8 @@ class CallTest(integration.ShellCase, integration.ShellCaseCommonTestsMixIn):
|
||||
'open_mode': True,
|
||||
'log_file': logfile,
|
||||
'log_level': 'quiet',
|
||||
'log_level_logfile': 'info'
|
||||
'log_level_logfile': 'info',
|
||||
'transport': self.master_opts['transport'],
|
||||
}
|
||||
|
||||
# Remove existing logfile
|
||||
|
@ -170,7 +170,7 @@ class MatchTest(integration.ShellCase, integration.ShellCaseCommonTestsMixIn):
|
||||
self.assertNotIn('minion', data.replace('sub_minion', 'stub'))
|
||||
data = self.run_salt('-G "planets:pluto" test.ping')
|
||||
expect = None
|
||||
if self.master_opts['transport'] == 'zeromq':
|
||||
if self.master_opts['transport'] in ('zeromq', 'tcp'):
|
||||
expect = (
|
||||
'No minions matched the target. '
|
||||
'No command was sent, no jid was '
|
||||
|
@ -21,7 +21,7 @@ class WheelModuleTest(integration.TestCase, integration.AdaptedConfigurationTest
|
||||
'''
|
||||
Configure an eauth user to test with
|
||||
'''
|
||||
self.wheel = salt.wheel.Wheel(self.get_config('client_config'))
|
||||
self.wheel = salt.wheel.Wheel(dict(self.get_config('client_config')))
|
||||
|
||||
def test_master_call(self):
|
||||
'''
|
||||
@ -45,7 +45,7 @@ class WheelModuleTest(integration.TestCase, integration.AdaptedConfigurationTest
|
||||
The choice of using key.list_all for this is arbitrary and should be
|
||||
changed to some mocked function that is more testing friendly.
|
||||
'''
|
||||
auth = salt.auth.LoadAuth(self.get_config('client_config'))
|
||||
auth = salt.auth.LoadAuth(dict(self.get_config('client_config')))
|
||||
token = auth.mk_token(self.eauth_creds)
|
||||
|
||||
token = auth.mk_token({
|
||||
|
@ -39,7 +39,8 @@ class RabbitmqUserTestCase(TestCase):
|
||||
name = 'foo'
|
||||
passwd = 'password'
|
||||
tag = 'user'
|
||||
perms = [{'/': ['.*', '.*']}]
|
||||
existing_perms = {'/': ['.*', '.*']}
|
||||
perms = [existing_perms]
|
||||
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
@ -49,7 +50,7 @@ class RabbitmqUserTestCase(TestCase):
|
||||
mock = MagicMock(side_effect=[True, False, True, True,
|
||||
True, True, True])
|
||||
mock_dct = MagicMock(return_value={name: set(tag)})
|
||||
mock_pr = MagicMock(return_value=perms)
|
||||
mock_pr = MagicMock(return_value=existing_perms)
|
||||
mock_add = MagicMock(return_value={'Added': name})
|
||||
with patch.dict(rabbitmq_user.__salt__,
|
||||
{'rabbitmq.user_exists': mock,
|
||||
|
Loading…
Reference in New Issue
Block a user