mirror of
https://github.com/valitydev/salt.git
synced 2024-11-07 17:09:03 +00:00
Merge from upstream
Merge branch 'develop' of github.com:saltstack/salt into develop
This commit is contained in:
commit
62af0d7f07
@ -265,13 +265,13 @@ Set up an initial profile at ``/etc/salt/cloud.profiles``:
|
||||
base_ec2_private:
|
||||
provider: my-ec2-southeast-private-ips
|
||||
image: ami-e565ba8c
|
||||
size: t1.micro
|
||||
size: t2.micro
|
||||
ssh_username: ec2-user
|
||||
|
||||
base_ec2_public:
|
||||
provider: my-ec2-southeast-public-ips
|
||||
image: ami-e565ba8c
|
||||
size: t1.micro
|
||||
size: t2.micro
|
||||
ssh_username: ec2-user
|
||||
|
||||
base_ec2_db:
|
||||
|
@ -13,7 +13,7 @@ the right salt minion commands are automatically available on the right
|
||||
systems.
|
||||
|
||||
It is important to remember that grains are bits of information loaded when
|
||||
the salt minion starts, so this information is static. This means that the
|
||||
the salt minion starts, so this information is static, until explicitly changed, and then it needs refreshing for the changes to show in the grains. This means that the
|
||||
information in grains is unchanging, therefore the nature of the data is
|
||||
static. So grains information are things like the running kernel, or the
|
||||
operating system.
|
||||
|
@ -189,7 +189,7 @@ controlled device and make sure it is really available.
|
||||
Here is an example proxymodule used to interface to a *very* simple REST
|
||||
server. Code for the server is in the `salt-contrib GitHub repository <https://github.com/saltstack/salt-contrib/proxyminion_rest_example>`_
|
||||
|
||||
This proxymodule enables "service" enumration, starting, stopping, restarting,
|
||||
This proxymodule enables "service" enumeration, starting, stopping, restarting,
|
||||
and status; "package" installation, and a ping.
|
||||
|
||||
.. code-block:: python
|
||||
@ -417,7 +417,7 @@ are interested in. Here's an example:
|
||||
The __proxyenabled__ directive
|
||||
------------------------------
|
||||
|
||||
Salt execution moduless, by, and large, cannot "automatically" work
|
||||
Salt execution modules, by, and large, cannot "automatically" work
|
||||
with proxied devices. Execution modules like ``pkg`` or ``sqlite3`` have no
|
||||
meaning on a network switch or a housecat. For an execution module to be
|
||||
available to a proxy-minion, the ``__proxyenabled__`` variable must be defined
|
||||
|
@ -8,6 +8,11 @@
|
||||
<true/>
|
||||
<key>KeepAlive</key>
|
||||
<true/>
|
||||
<key>EnvironmentVariables</key>
|
||||
<dict>
|
||||
<key>PATH</key>
|
||||
<string>/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin</string>
|
||||
</dict>
|
||||
<key>ProgramArguments</key>
|
||||
<array>
|
||||
<string>/usr/local/bin/salt-minion</string>
|
||||
|
@ -14,6 +14,7 @@ import os
|
||||
import struct
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.utils
|
||||
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
|
||||
|
||||
__virtualname__ = 'btmp'
|
||||
@ -70,7 +71,7 @@ def beacon(config):
|
||||
btmp: {}
|
||||
'''
|
||||
ret = []
|
||||
with open(BTMP, 'rb') as fp_:
|
||||
with salt.utils.fopen(BTMP, 'rb') as fp_:
|
||||
loc = __context__.get(LOC_KEY, 0)
|
||||
if loc == 0:
|
||||
fp_.seek(0, 2)
|
||||
|
@ -16,6 +16,9 @@ import struct
|
||||
# Import 3rd-party libs
|
||||
from salt.ext.six.moves import range
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
|
||||
__virtualname__ = 'wtmp'
|
||||
WTMP = '/var/log/wtmp'
|
||||
FMT = '<hI32s4s32s256siili4l20s'
|
||||
@ -74,7 +77,7 @@ def beacon(config):
|
||||
wtmp: {}
|
||||
'''
|
||||
ret = []
|
||||
with open(WTMP, 'rb') as fp_:
|
||||
with salt.utils.fopen(WTMP, 'rb') as fp_:
|
||||
loc = __context__.get(LOC_KEY, 0)
|
||||
if loc == 0:
|
||||
fp_.seek(0, 2)
|
||||
|
@ -401,7 +401,7 @@ class ProxyMinion(parsers.MinionOptionParser): # pylint: disable=no-init
|
||||
|
||||
def start(self, proxydetails):
|
||||
'''
|
||||
Start the actual minion.
|
||||
Start the actual proxy minion.
|
||||
|
||||
If sub-classed, don't **ever** forget to run:
|
||||
|
||||
|
@ -570,9 +570,9 @@ def create(vm_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'aliyun',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'aliyun',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -223,9 +223,9 @@ def create(vm_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'cloudstack',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'cloudstack',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -297,9 +297,9 @@ def create(vm_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'digital_ocean',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'digital_ocean',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -2165,9 +2165,9 @@ def create(vm_=None, call=None):
|
||||
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'ec2',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'ec2',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -2029,9 +2029,9 @@ def create(vm_=None, call=None):
|
||||
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'gce',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'gce',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
@ -2121,9 +2121,10 @@ def create(vm_=None, call=None):
|
||||
ssh_user, ssh_key = __get_ssh_credentials(vm_)
|
||||
vm_['ssh_host'] = __get_host(node_data, vm_)
|
||||
vm_['key_filename'] = ssh_key
|
||||
salt.utils.cloud.bootstrap(vm_, __opts__)
|
||||
|
||||
log.info('Created Cloud VM {0[name]!r}'.format(vm_))
|
||||
log.debug(
|
||||
log.trace(
|
||||
'{0[name]!r} VM creation details:\n{1}'.format(
|
||||
vm_, pprint.pformat(node_dict)
|
||||
)
|
||||
|
@ -84,9 +84,9 @@ def create(vm_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'gogrid',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'gogrid',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -244,9 +244,9 @@ def create(vm_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'joyent',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'joyent',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -312,9 +312,9 @@ def create(vm_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'aws',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'aws',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -199,9 +199,9 @@ def create(vm_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'linode',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'linode',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -401,9 +401,9 @@ def create(vm_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'azure',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'azure',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -543,9 +543,9 @@ def create(vm_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'nova',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'nova',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
@ -1054,3 +1054,111 @@ def virtual_interface_create(name, net_name, **kwargs):
|
||||
'''
|
||||
conn = get_conn()
|
||||
return conn.virtual_interface_create(name, net_name)
|
||||
|
||||
|
||||
def floating_ip_pool_list(call=None):
|
||||
'''
|
||||
List all floating IP pools
|
||||
|
||||
.. versionadded:: Boron
|
||||
'''
|
||||
if call != 'function':
|
||||
raise SaltCloudSystemExit(
|
||||
'The floating_ip_pool_list action must be called with -f or --function'
|
||||
)
|
||||
|
||||
conn = get_conn()
|
||||
return conn.floating_ip_pool_list()
|
||||
|
||||
|
||||
def floating_ip_list(call=None):
|
||||
'''
|
||||
List floating IPs
|
||||
|
||||
.. versionadded:: Boron
|
||||
'''
|
||||
if call != 'function':
|
||||
raise SaltCloudSystemExit(
|
||||
'The floating_ip_list action must be called with -f or --function'
|
||||
)
|
||||
|
||||
conn = get_conn()
|
||||
return conn.floating_ip_list()
|
||||
|
||||
|
||||
def floating_ip_create(kwargs, call=None):
|
||||
'''
|
||||
Allocate a floating IP
|
||||
|
||||
.. versionadded:: Boron
|
||||
'''
|
||||
if call != 'function':
|
||||
raise SaltCloudSystemExit(
|
||||
'The floating_ip_create action must be called with -f or --function'
|
||||
)
|
||||
|
||||
if 'pool' not in kwargs:
|
||||
log.error('pool is required')
|
||||
return False
|
||||
|
||||
conn = get_conn()
|
||||
return conn.floating_ip_create(kwargs['pool'])
|
||||
|
||||
|
||||
def floating_ip_delete(kwargs, call=None):
|
||||
'''
|
||||
De-allocate floating IP
|
||||
|
||||
.. versionadded:: Boron
|
||||
'''
|
||||
if call != 'function':
|
||||
raise SaltCloudSystemExit(
|
||||
'The floating_ip_delete action must be called with -f or --function'
|
||||
)
|
||||
|
||||
if 'floating_ip' not in kwargs:
|
||||
log.error('floating_ip is required')
|
||||
return False
|
||||
|
||||
conn = get_conn()
|
||||
return conn.floating_ip_delete(kwargs['floating_ip'])
|
||||
|
||||
|
||||
def floating_ip_associate(name, kwargs, call=None):
|
||||
'''
|
||||
Associate a floating IP address to a server
|
||||
|
||||
.. versionadded:: Boron
|
||||
'''
|
||||
if call != 'action':
|
||||
raise SaltCloudSystemExit(
|
||||
'The floating_ip_associate action must be called with -a of --action.'
|
||||
)
|
||||
|
||||
if 'floating_ip' not in kwargs:
|
||||
log.error('floating_ip is required')
|
||||
return False
|
||||
|
||||
conn = get_conn()
|
||||
conn.floating_ip_associate(name, kwargs['floating_ip'])
|
||||
return list_nodes()[name]
|
||||
|
||||
|
||||
def floating_ip_disassociate(name, kwargs, call=None):
|
||||
'''
|
||||
Disassociate a floating IP from a server
|
||||
|
||||
.. versionadded:: Boron
|
||||
'''
|
||||
if call != 'action':
|
||||
raise SaltCloudSystemExit(
|
||||
'The floating_ip_disassociate action must be called with -a of --action.'
|
||||
)
|
||||
|
||||
if 'floating_ip' not in kwargs:
|
||||
log.error('floating_ip is required')
|
||||
return False
|
||||
|
||||
conn = get_conn()
|
||||
conn.floating_ip_disassociate(name, kwargs['floating_ip'])
|
||||
return list_nodes()[name]
|
||||
|
@ -825,9 +825,9 @@ def create(vm_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'opennebula',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'opennebula',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -595,9 +595,9 @@ def create(vm_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'openstack',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'openstack',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -271,9 +271,9 @@ def create(vm_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'parallels',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'parallels',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -488,9 +488,9 @@ def create(vm_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'proxmox',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'proxmox',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -640,9 +640,9 @@ def create(vm_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'qingcloud',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'qingcloud',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -183,9 +183,9 @@ def create(vm_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'rackspace',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'rackspace',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -191,9 +191,9 @@ def create(server_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'scaleway',
|
||||
server_['profile']) is False:
|
||||
if server_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'scaleway',
|
||||
server_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -234,9 +234,9 @@ def create(vm_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'softlayer',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'softlayer',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -205,9 +205,9 @@ def create(vm_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'softlayer_hw',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'softlayer_hw',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -2154,7 +2154,7 @@ def create(vm_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'vmware',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
|
@ -230,9 +230,9 @@ def create(vm_):
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'vsphere',
|
||||
vm_['profile']) is False:
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'vsphere',
|
||||
vm_['profile']) is False:
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
@ -728,6 +728,13 @@ VALID_OPTS = {
|
||||
|
||||
# If set, all minion exec module actions will be rerouted through sudo as this user
|
||||
'sudo_user': str,
|
||||
|
||||
# HTTP request timeout in seconds. Applied for tornado http fetch functions like cp.get_url should be greater than
|
||||
# overall download time.
|
||||
'http_request_timeout': float,
|
||||
|
||||
# HTTP request max file content size.
|
||||
'http_max_body': int,
|
||||
}
|
||||
|
||||
# default configurations
|
||||
@ -911,6 +918,8 @@ DEFAULT_MINION_OPTS = {
|
||||
'cache_sreqs': True,
|
||||
'cmd_safe': True,
|
||||
'sudo_user': '',
|
||||
'http_request_timeout': 1 * 60 * 60.0, # 1 hour
|
||||
'http_max_body': 100 * 1024 * 1024 * 1024, # 100GB
|
||||
}
|
||||
|
||||
DEFAULT_MASTER_OPTS = {
|
||||
@ -1133,6 +1142,8 @@ DEFAULT_MASTER_OPTS = {
|
||||
'rotate_aes_key': True,
|
||||
'cache_sreqs': True,
|
||||
'dummy_pub': False,
|
||||
'http_request_timeout': 1 * 60 * 60.0, # 1 hour
|
||||
'http_max_body': 100 * 1024 * 1024 * 1024, # 100GB
|
||||
}
|
||||
|
||||
# ----- Salt Cloud Configuration Defaults ----------------------------------->
|
||||
|
@ -1071,26 +1071,32 @@ def os_data():
|
||||
try:
|
||||
os.stat('/run/systemd/system')
|
||||
grains['init'] = 'systemd'
|
||||
except OSError:
|
||||
except (OSError, IOError):
|
||||
if os.path.exists('/proc/1/cmdline'):
|
||||
with salt.utils.fopen('/proc/1/cmdline') as fhr:
|
||||
init_cmdline = fhr.read().replace('\x00', ' ').split()
|
||||
init_bin = salt.utils.which(init_cmdline[0])
|
||||
if init_bin is not None:
|
||||
supported_inits = ('upstart', 'sysvinit', 'systemd')
|
||||
supported_inits = (six.b('upstart'), six.b('sysvinit'), six.b('systemd'))
|
||||
edge_len = max(len(x) for x in supported_inits) - 1
|
||||
buf_size = __opts__['file_buffer_size']
|
||||
try:
|
||||
with open(init_bin, 'rb') as fp_:
|
||||
buf_size = __opts__['file_buffer_size']
|
||||
except KeyError:
|
||||
# Default to the value of file_buffer_size for the minion
|
||||
buf_size = 262144
|
||||
try:
|
||||
with salt.utils.fopen(init_bin, 'rb') as fp_:
|
||||
buf = True
|
||||
edge = ''
|
||||
edge = six.b('')
|
||||
buf = fp_.read(buf_size).lower()
|
||||
while buf:
|
||||
buf = edge + buf
|
||||
for item in supported_inits:
|
||||
if item in buf:
|
||||
if six.PY3:
|
||||
item = item.decode('utf-8')
|
||||
grains['init'] = item
|
||||
buf = ''
|
||||
buf = six.b('')
|
||||
break
|
||||
edge = buf[-edge_len:]
|
||||
buf = fp_.read(buf_size).lower()
|
||||
|
@ -707,6 +707,42 @@ def sdb(opts, functions=None, whitelist=None):
|
||||
)
|
||||
|
||||
|
||||
def pkgdb(opts):
|
||||
'''
|
||||
Return modules for SPM's package database
|
||||
|
||||
.. versionadded:: 2015.8.0
|
||||
'''
|
||||
return LazyLoader(
|
||||
_module_dirs(
|
||||
opts,
|
||||
'pkgdb',
|
||||
'pkgdb',
|
||||
base_path=os.path.join(SALT_BASE_PATH, 'spm')
|
||||
),
|
||||
opts,
|
||||
tag='pkgdb'
|
||||
)
|
||||
|
||||
|
||||
def pkgfiles(opts):
|
||||
'''
|
||||
Return modules for SPM's file handling
|
||||
|
||||
.. versionadded:: 2015.8.0
|
||||
'''
|
||||
return LazyLoader(
|
||||
_module_dirs(
|
||||
opts,
|
||||
'pkgfiles',
|
||||
'pkgfiles',
|
||||
base_path=os.path.join(SALT_BASE_PATH, 'spm')
|
||||
),
|
||||
opts,
|
||||
tag='pkgfiles'
|
||||
)
|
||||
|
||||
|
||||
def clouds(opts):
|
||||
'''
|
||||
Return the cloud functions
|
||||
@ -1048,7 +1084,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
||||
if not self.initial_load:
|
||||
self._reload_submodules(mod)
|
||||
else:
|
||||
with open(fpath, desc[1]) as fn_:
|
||||
with salt.utils.fopen(fpath, desc[1]) as fn_:
|
||||
mod = imp.load_module(
|
||||
'{0}.{1}.{2}.{3}'.format(
|
||||
self.loaded_base_name,
|
||||
|
@ -208,10 +208,8 @@ def cql_query(query, contact_points=None, port=None, cql_user=None, cql_pass=Non
|
||||
if not isinstance(value, six.text_type):
|
||||
# Must support Cassandra collection types.
|
||||
# Namely, Cassandras set, list, and map collections.
|
||||
if not isinstance(value, set):
|
||||
if not isinstance(value, list):
|
||||
if not isinstance(value, dict):
|
||||
value = str(value)
|
||||
if not isinstance(value, (set, list, dict)):
|
||||
value = str(value)
|
||||
values[key] = value
|
||||
ret.append(values)
|
||||
|
||||
@ -289,8 +287,8 @@ def info(contact_points=None, port=None, cql_user=None, cql_pass=None):
|
||||
release_version,
|
||||
cql_version,
|
||||
schema_version,
|
||||
thrift_version
|
||||
from system.local
|
||||
thrift_version
|
||||
from system.local
|
||||
limit 1;'''
|
||||
|
||||
ret = {}
|
||||
@ -490,7 +488,7 @@ def create_keyspace(keyspace, replication_strategy='SimpleStrategy', replication
|
||||
replication_map['replication_factor'] = replication_factor
|
||||
|
||||
query = '''create keyspace {0}
|
||||
with replication = {1}
|
||||
with replication = {1}
|
||||
and durable_writes = true;'''.format(keyspace, replication_map)
|
||||
|
||||
try:
|
||||
|
@ -19,6 +19,7 @@ from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: dis
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
from salt.exceptions import SaltInvocationError
|
||||
|
||||
# pylint: disable=import-error
|
||||
|
||||
@ -34,9 +35,46 @@ def __virtual__():
|
||||
return False
|
||||
|
||||
|
||||
def _create_pbuilders():
|
||||
def _get_env(env):
|
||||
'''
|
||||
Get environment overrides dictionary to use in build process
|
||||
'''
|
||||
env_override = ""
|
||||
if env is None:
|
||||
return env_override
|
||||
if not isinstance(env, dict):
|
||||
raise SaltInvocationError(
|
||||
'\'env\' must be a Python dictionary'
|
||||
)
|
||||
for key, value in env.items():
|
||||
env_override += '{0}={1}\n'.format(key, value)
|
||||
env_override += 'export {0}\n'.format(key)
|
||||
return env_override
|
||||
|
||||
|
||||
def _create_pbuilders(env):
|
||||
'''
|
||||
Create the .pbuilder family of files in user's home directory
|
||||
|
||||
env
|
||||
A list or dictionary of environment variables to be set prior to execution.
|
||||
Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
- env:
|
||||
- DEB_BUILD_OPTIONS: 'nocheck'
|
||||
|
||||
.. warning::
|
||||
|
||||
The above illustrates a common PyYAML pitfall, that **yes**,
|
||||
**no**, **on**, **off**, **true**, and **false** are all loaded as
|
||||
boolean ``True`` and ``False`` values, and must be enclosed in
|
||||
quotes to be used as strings. More info on this (and other) PyYAML
|
||||
idiosyncrasies can be found :doc:`here
|
||||
</topics/troubleshooting/yaml_idiosyncrasies>`.
|
||||
|
||||
|
||||
'''
|
||||
hook_text = '''#!/bin/sh
|
||||
set -e
|
||||
@ -100,6 +138,11 @@ OTHERMIRROR="deb http://ftp.us.debian.org/debian/ testing main contrib non-free
|
||||
with open(pbuilderrc, "w") as fow:
|
||||
fow.write('{0}'.format(pbldrc_text))
|
||||
|
||||
env_overrides = _get_env(env)
|
||||
if env_overrides and not env_overrides.isspace():
|
||||
with open(pbuilderrc, "a") as fow:
|
||||
fow.write('{0}'.format(env_overrides))
|
||||
|
||||
|
||||
def _mk_tree():
|
||||
'''
|
||||
@ -133,7 +176,7 @@ def _get_src(tree_base, source, saltenv='base'):
|
||||
shutil.copy(source, dest)
|
||||
|
||||
|
||||
def make_src_pkg(dest_dir, spec, sources, template=None, saltenv='base'):
|
||||
def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base'):
|
||||
'''
|
||||
Create a platform specific source package from the given platform spec/control file and sources
|
||||
|
||||
@ -145,7 +188,7 @@ def make_src_pkg(dest_dir, spec, sources, template=None, saltenv='base'):
|
||||
This example command should build the libnacl SOURCE package and place it in
|
||||
/var/www/html/ on the minion
|
||||
'''
|
||||
_create_pbuilders()
|
||||
_create_pbuilders(env)
|
||||
tree_base = _mk_tree()
|
||||
ret = []
|
||||
if not os.path.isdir(dest_dir):
|
||||
@ -221,7 +264,7 @@ def make_src_pkg(dest_dir, spec, sources, template=None, saltenv='base'):
|
||||
return ret
|
||||
|
||||
|
||||
def build(runas, tgt, dest_dir, spec, sources, deps, template, saltenv='base'):
|
||||
def build(runas, tgt, dest_dir, spec, sources, deps, env, template, saltenv='base'):
|
||||
'''
|
||||
Given the package destination directory, the tarball containing debian files (e.g. control)
|
||||
and package sources, use pbuilder to safely build the platform package
|
||||
@ -241,7 +284,7 @@ def build(runas, tgt, dest_dir, spec, sources, deps, template, saltenv='base'):
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
dsc_dir = tempfile.mkdtemp()
|
||||
dscs = make_src_pkg(dsc_dir, spec, sources, template, saltenv)
|
||||
dscs = make_src_pkg(dsc_dir, spec, sources, env, template, saltenv)
|
||||
|
||||
# dscs should only contain salt orig and debian tarballs and dsc file
|
||||
for dsc in dscs:
|
||||
|
@ -86,9 +86,18 @@ __virtualname__ = 'pkg'
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Load as 'pkg' on FreeBSD versions less than 10
|
||||
Load as 'pkg' on FreeBSD versions less than 10.
|
||||
Don't load on FreeBSD 9 when the config option
|
||||
``providers:pkg`` is set to 'pkgng'.
|
||||
'''
|
||||
if __grains__['os'] == 'FreeBSD' and float(__grains__['osrelease']) < 10:
|
||||
providers = {}
|
||||
if 'providers' in __opts__:
|
||||
providers = __opts__['providers']
|
||||
if providers and 'pkg' in providers and providers['pkg'] == 'pkgng':
|
||||
log.debug('Configuration option \'providers:pkg\' is set to '
|
||||
'\'pkgng\', won\'t load old provider \'freebsdpkg\'.')
|
||||
return False
|
||||
return __virtualname__
|
||||
return False
|
||||
|
||||
|
@ -63,7 +63,7 @@ def wol(mac, bcast='255.255.255.255', destport=9):
|
||||
|
||||
def ping(host, timeout=False, return_boolean=False):
|
||||
'''
|
||||
Performs a ping to a host
|
||||
Performs an ICMP ping to a host
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -50,10 +50,22 @@ __virtualname__ = 'pkg'
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Load as 'pkg' on FreeBSD 10 and greater
|
||||
Load as 'pkg' on FreeBSD 10 and greater.
|
||||
Load as 'pkg' on FreeBSD 9 when config option
|
||||
``providers:pkg`` is set to 'pkgng'.
|
||||
'''
|
||||
if __grains__['os'] == 'FreeBSD' and float(__grains__['osrelease']) >= 10:
|
||||
return __virtualname__
|
||||
if __grains__['os'] == 'FreeBSD' and \
|
||||
float(__grains__['osmajorrelease']) == 9:
|
||||
providers = {}
|
||||
if 'providers' in __opts__:
|
||||
providers = __opts__['providers']
|
||||
log.debug('__opts__.providers: {0}'.format(providers))
|
||||
if providers and 'pkg' in providers and providers['pkg'] == 'pkgng':
|
||||
log.debug('Configuration option \'providers:pkg\' is set to '
|
||||
'\'pkgng\', using \'pkgng\' in favor of \'freebsdpkg\'.')
|
||||
return __virtualname__
|
||||
return False
|
||||
|
||||
|
||||
|
@ -55,7 +55,7 @@ def version(*names, **kwargs):
|
||||
salt '*' pkg.version <package1> <package2> <package3> ...
|
||||
'''
|
||||
if len(names) == 1:
|
||||
return str(__opts__['proxymodule']['rest_sample.package_status'](names))
|
||||
return str(__opts__['proxymodule']['rest_sample.package_status'](names[0]))
|
||||
|
||||
|
||||
def installed(
|
||||
|
@ -26,6 +26,7 @@ def __virtual__():
|
||||
# Enable on these platforms only.
|
||||
enable = set((
|
||||
'RestExampleOS',
|
||||
'proxy',
|
||||
))
|
||||
if __grains__['os'] in enable:
|
||||
return __virtualname__
|
||||
@ -40,9 +41,9 @@ def start(name):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' rest_service.start <service name>
|
||||
salt '*' service.start <service name>
|
||||
'''
|
||||
return __opts__['proxyobject'].service_start(name)
|
||||
return __opts__['proxymodule']['rest_sample.service_start'](name)
|
||||
|
||||
|
||||
def stop(name):
|
||||
@ -53,9 +54,9 @@ def stop(name):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' rest_service.stop <service name>
|
||||
salt '*' service.stop <service name>
|
||||
'''
|
||||
return __opts__['proxyobject'].service_stop(name)
|
||||
return __opts__['proxymodule']['rest_sample.service_stop'](name)
|
||||
|
||||
|
||||
def restart(name):
|
||||
@ -66,10 +67,10 @@ def restart(name):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' rest_service.restart <service name>
|
||||
salt '*' service.restart <service name>
|
||||
'''
|
||||
|
||||
return __opts__['proxyobject'].service_restart(name)
|
||||
return __opts__['proxymodule']['rest_sample.service_restart'](name)
|
||||
|
||||
|
||||
def status(name):
|
||||
@ -81,9 +82,9 @@ def status(name):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' rest_service.status <service name>
|
||||
salt '*' service.status <service name>
|
||||
'''
|
||||
return __opts__['proxyobject'].service_status(name)
|
||||
return __opts__['proxymodule']['rest_sample.service_status'](name)
|
||||
|
||||
|
||||
def list_():
|
||||
@ -94,6 +95,6 @@ def list_():
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' rest_service.list <service name>
|
||||
salt '*' service.list <service name>
|
||||
'''
|
||||
return __opts__['proxyobject'].service_list()
|
||||
return __opts__['proxymodule']['rest_sample.service_list']()
|
||||
|
@ -130,7 +130,7 @@ def _get_deps(deps, tree_base, saltenv='base'):
|
||||
return deps_list
|
||||
|
||||
|
||||
def make_src_pkg(dest_dir, spec, sources, template=None, saltenv='base'):
|
||||
def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base'):
|
||||
'''
|
||||
Create a source rpm from the given spec file and sources
|
||||
|
||||
@ -164,7 +164,7 @@ def make_src_pkg(dest_dir, spec, sources, template=None, saltenv='base'):
|
||||
return ret
|
||||
|
||||
|
||||
def build(runas, tgt, dest_dir, spec, sources, deps, template, saltenv='base'):
|
||||
def build(runas, tgt, dest_dir, spec, sources, deps, env, template, saltenv='base'):
|
||||
'''
|
||||
Given the package destination directory, the spec file source and package
|
||||
sources, use mock to safely build the rpm defined in the spec file
|
||||
@ -183,7 +183,7 @@ def build(runas, tgt, dest_dir, spec, sources, deps, template, saltenv='base'):
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
srpm_dir = tempfile.mkdtemp()
|
||||
srpms = make_src_pkg(srpm_dir, spec, sources, template, saltenv)
|
||||
srpms = make_src_pkg(srpm_dir, spec, sources, env, template, saltenv)
|
||||
|
||||
distset = _get_distset(tgt)
|
||||
|
||||
|
@ -60,9 +60,14 @@ def get(**kwargs):
|
||||
|
||||
ret = {}
|
||||
for sysrc in sysrcs.split("\n"):
|
||||
rcfile = sysrc.split(': ')[0]
|
||||
var = sysrc.split(': ')[1]
|
||||
val = sysrc.split(': ')[2]
|
||||
line_components = sysrc.split(': ')
|
||||
rcfile = line_components[0]
|
||||
if len(line_components) > 2:
|
||||
var = line_components[1]
|
||||
val = line_components[2]
|
||||
else:
|
||||
var = line_components[1].rstrip(':')
|
||||
val = ''
|
||||
if rcfile not in ret:
|
||||
ret[rcfile] = {}
|
||||
ret[rcfile][var] = val
|
||||
|
@ -409,7 +409,9 @@ def mask(name):
|
||||
|
||||
def masked(name):
|
||||
'''
|
||||
Return if the named service is masked
|
||||
Return if the named service is masked.
|
||||
|
||||
.. versionadded:: 2015.8.0
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -675,7 +675,7 @@ def remove(name=None, pkgs=None, version=None, extra_uninstall_flags=None, **kwa
|
||||
cmd.append(expanded_cached_pkg)
|
||||
cmd.extend(shlex.split(uninstall_flags))
|
||||
if extra_uninstall_flags:
|
||||
cmd.extend(str(extra_uninstall_flags).split())
|
||||
cmd.extend(shlex.split(extra_uninstall_flags))
|
||||
|
||||
__salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False)
|
||||
|
||||
|
@ -2,20 +2,24 @@
|
||||
'''
|
||||
Return salt data via slack
|
||||
|
||||
.. versionadded:: 2015.5.0
|
||||
.. versionadded:: 2015.5.0
|
||||
|
||||
The following fields can be set in the minion conf file::
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
slack.channel (required)
|
||||
slack.api_key (required)
|
||||
slack.username (required)
|
||||
slack.as_user (required to see the profile picture of your bot)
|
||||
slack.profile (optional)
|
||||
|
||||
|
||||
Alternative configuration values can be used by prefacing the configuration.
|
||||
Any values not found in the alternative configuration will be pulled from
|
||||
the default location::
|
||||
|
||||
.. code-block:: yaml
|
||||
slack.channel
|
||||
slack.api_key
|
||||
slack.username
|
||||
@ -23,6 +27,7 @@ the default location::
|
||||
|
||||
Slack settings may also be configured as::
|
||||
|
||||
.. code-block:: yaml
|
||||
slack:
|
||||
channel: RoomName
|
||||
api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
|
@ -12,7 +12,6 @@ import yaml
|
||||
import tarfile
|
||||
import shutil
|
||||
import msgpack
|
||||
import sqlite3
|
||||
import datetime
|
||||
import hashlib
|
||||
import logging
|
||||
@ -21,6 +20,7 @@ import grp
|
||||
|
||||
# Import Salt libs
|
||||
import salt.config
|
||||
import salt.loader
|
||||
import salt.utils
|
||||
import salt.utils.http as http
|
||||
import salt.syspaths as syspaths
|
||||
@ -43,6 +43,18 @@ class SPMClient(object):
|
||||
)
|
||||
self.opts = opts
|
||||
|
||||
self.db_prov = opts.get('spm_db_provider', 'sqlite3')
|
||||
db_fun = '{0}.init'.format(self.db_prov)
|
||||
|
||||
self.pkgdb = salt.loader.pkgdb(self.opts)
|
||||
self.db_conn = self.pkgdb[db_fun]()
|
||||
|
||||
self.files_prov = opts.get('spm_files_provider', 'roots')
|
||||
files_fun = '{0}.init'.format(self.files_prov)
|
||||
|
||||
self.pkgfiles = salt.loader.pkgfiles(self.opts)
|
||||
self.files_conn = self.pkgfiles[files_fun]()
|
||||
|
||||
def run(self, args):
|
||||
'''
|
||||
Run the SPM command
|
||||
@ -87,31 +99,20 @@ class SPMClient(object):
|
||||
return False
|
||||
|
||||
pkg_file = args[1]
|
||||
|
||||
self._init_db()
|
||||
roots_path = self.opts['file_roots']['base'][0]
|
||||
pillar_path = self.opts['pillar_roots']['base'][0]
|
||||
comps = pkg_file.split('-')
|
||||
comps = '-'.join(comps[:-2]).split('/')
|
||||
name = comps[-1]
|
||||
|
||||
if not os.path.exists(pkg_file):
|
||||
log.error('File {0} not found'.format(pkg_file))
|
||||
return False
|
||||
|
||||
if not os.path.exists(roots_path):
|
||||
os.makedirs(roots_path)
|
||||
|
||||
sqlite3.enable_callback_tracebacks(True)
|
||||
conn = sqlite3.connect(self.opts['spm_db'], isolation_level=None)
|
||||
cur = conn.cursor()
|
||||
comps = pkg_file.split('-')
|
||||
comps = '-'.join(comps[:-2]).split('/')
|
||||
name = comps[-1]
|
||||
|
||||
formula_tar = tarfile.open(pkg_file, 'r:bz2')
|
||||
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
|
||||
formula_def = yaml.safe_load(formula_ref)
|
||||
|
||||
data = conn.execute('SELECT package FROM packages WHERE package=?', (formula_def['name'], ))
|
||||
if data.fetchone() and not self.opts['force']:
|
||||
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](name, self.db_conn)
|
||||
if pkg_info is not None and not self.opts['force']:
|
||||
print('Package {0} already installed, not installing again'.format(formula_def['name']))
|
||||
return
|
||||
|
||||
@ -122,8 +123,8 @@ class SPMClient(object):
|
||||
for dep in formula_def['dependencies']:
|
||||
if not isinstance(dep, string_types):
|
||||
continue
|
||||
data = conn.execute('SELECT package FROM packages WHERE package=?', (dep, ))
|
||||
if data.fetchone():
|
||||
data = self.pkgdb['{0}.info'.format(self.db_prov)](dep, self.db_conn)
|
||||
if data is not None:
|
||||
continue
|
||||
needs.append(dep)
|
||||
print('Cannot install {0}, the following dependencies are needed: '
|
||||
@ -143,8 +144,6 @@ class SPMClient(object):
|
||||
|
||||
print('... installing')
|
||||
|
||||
log.debug('Locally installing package file {0} to {1}'.format(pkg_file, roots_path))
|
||||
|
||||
for field in ('version', 'release', 'summary', 'description'):
|
||||
if field not in formula_def:
|
||||
log.error('Invalid package: the {0} was not found'.format(field))
|
||||
@ -152,42 +151,15 @@ class SPMClient(object):
|
||||
|
||||
pkg_files = formula_tar.getmembers()
|
||||
# First pass: check for files that already exist
|
||||
existing_files = []
|
||||
for member in pkg_files:
|
||||
if member.isdir():
|
||||
continue
|
||||
if member.name.startswith('{0}/_'.format(name)):
|
||||
# Module files are distributed via _modules, _states, etc
|
||||
new_name = member.name.replace('{0}/'.format(name), '')
|
||||
out_file = os.path.join(roots_path, new_name)
|
||||
elif member.name == '{0}/pillar.example'.format(name):
|
||||
# Pillars are automatically put in the pillar_roots
|
||||
new_name = '{0}.sls.orig'.format(name)
|
||||
out_file = os.path.join(pillar_path, new_name)
|
||||
else:
|
||||
out_file = os.path.join(roots_path, member.name)
|
||||
if os.path.exists(out_file):
|
||||
existing_files.append(out_file)
|
||||
if not self.opts['force']:
|
||||
log.error('{0} already exists, not installing'.format(out_file))
|
||||
existing_files = self.pkgfiles['{0}.check_existing'.format(self.files_prov)](
|
||||
name, pkg_files
|
||||
)
|
||||
|
||||
if existing_files and not self.opts['force']:
|
||||
return
|
||||
|
||||
# We've decided to install
|
||||
conn.execute('INSERT INTO packages VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (
|
||||
name,
|
||||
formula_def['version'],
|
||||
formula_def['release'],
|
||||
datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT'),
|
||||
formula_def.get('os', None),
|
||||
formula_def.get('os_family', None),
|
||||
formula_def.get('dependencies', None),
|
||||
formula_def.get('os_dependencies', None),
|
||||
formula_def.get('os_family_dependencies', None),
|
||||
formula_def['summary'],
|
||||
formula_def['description'],
|
||||
))
|
||||
self.pkgdb['{0}.register_pkg'.format(self.db_prov)](name, formula_def, self.db_conn)
|
||||
|
||||
# No defaults for this in config.py; default to the current running
|
||||
# user and group
|
||||
@ -198,42 +170,31 @@ class SPMClient(object):
|
||||
|
||||
# Second pass: install the files
|
||||
for member in pkg_files:
|
||||
out_path = roots_path
|
||||
file_ref = formula_tar.extractfile(member)
|
||||
member.uid = uid
|
||||
member.gid = gid
|
||||
member.uname = uname
|
||||
member.gname = gname
|
||||
|
||||
file_ref = formula_tar.extractfile(member)
|
||||
if member.isdir():
|
||||
digest = ''
|
||||
else:
|
||||
file_hash = hashlib.sha1()
|
||||
file_hash.update(file_ref.read())
|
||||
digest = file_hash.hexdigest()
|
||||
if member.name.startswith('{0}/_'.format(name)):
|
||||
# Module files are distributed via _modules, _states, etc
|
||||
member.name = member.name.replace('{0}/'.format(name), '')
|
||||
elif member.name == '{0}/pillar.example'.format(name):
|
||||
# Pillars are automatically put in the pillar_roots
|
||||
member.name = '{0}.sls.orig'.format(name)
|
||||
out_path = pillar_path
|
||||
formula_tar.extract(member, out_path)
|
||||
conn.execute('INSERT INTO files VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (
|
||||
|
||||
out_path = self.pkgfiles['{0}.install_file'.format(self.files_prov)](
|
||||
name, formula_tar, member, self.files_conn
|
||||
)
|
||||
self.pkgdb['{0}.register_file'.format(self.db_prov)](
|
||||
name,
|
||||
'{0}/{1}'.format(out_path, member.path),
|
||||
member.size,
|
||||
member.mode,
|
||||
member,
|
||||
out_path,
|
||||
digest,
|
||||
member.devmajor,
|
||||
member.devminor,
|
||||
member.linkname,
|
||||
member.linkpath,
|
||||
member.uname,
|
||||
member.gname,
|
||||
member.mtime
|
||||
))
|
||||
self.db_conn
|
||||
)
|
||||
|
||||
formula_tar.close()
|
||||
conn.close()
|
||||
|
||||
def _traverse_repos(self, callback, repo_name=None):
|
||||
'''
|
||||
@ -294,12 +255,18 @@ class SPMClient(object):
|
||||
'''
|
||||
metadata = {}
|
||||
|
||||
if not os.path.exists(self.opts['spm_cache_dir']):
|
||||
os.makedirs(self.opts['spm_cache_dir'])
|
||||
|
||||
def _read_metadata(repo, repo_info):
|
||||
cache_path = '{0}/{1}.p'.format(
|
||||
self.opts['spm_cache_dir'],
|
||||
repo
|
||||
)
|
||||
|
||||
if not os.path.exists(cache_path):
|
||||
return
|
||||
|
||||
with salt.utils.fopen(cache_path, 'r') as cph:
|
||||
metadata[repo] = {
|
||||
'info': repo_info,
|
||||
@ -379,6 +346,7 @@ class SPMClient(object):
|
||||
|
||||
self._local_install((None, out_file), package)
|
||||
return
|
||||
log.error('Cannot install package {0}, no source package'.format(package))
|
||||
|
||||
def _remove(self, args):
|
||||
'''
|
||||
@ -404,19 +372,15 @@ class SPMClient(object):
|
||||
return
|
||||
|
||||
# Look at local repo index
|
||||
sqlite3.enable_callback_tracebacks(True)
|
||||
conn = sqlite3.connect(self.opts['spm_db'], isolation_level=None)
|
||||
cur = conn.cursor()
|
||||
|
||||
data = conn.execute('SELECT * FROM packages WHERE package=?', (package, ))
|
||||
if not data.fetchone():
|
||||
log.error('Package {0} not installed'.format(package))
|
||||
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](package, self.db_conn)
|
||||
if pkg_info is None:
|
||||
print('package {0} not installed'.format(package))
|
||||
return
|
||||
|
||||
# Find files that have not changed and remove them
|
||||
data = conn.execute('SELECT path, sum FROM files WHERE package=?', (package, ))
|
||||
files = self.pkgdb['{0}.list_files'.format(self.db_prov)](package, self.db_conn)
|
||||
dirs = []
|
||||
for filerow in data.fetchall():
|
||||
for filerow in files:
|
||||
if os.path.isdir(filerow[0]):
|
||||
dirs.append(filerow[0])
|
||||
continue
|
||||
@ -426,14 +390,14 @@ class SPMClient(object):
|
||||
digest = file_hash.hexdigest()
|
||||
if filerow[1] == digest:
|
||||
log.trace('Removing file {0}'.format(filerow[0]))
|
||||
os.remove(filerow[0])
|
||||
self.pkgfiles['{0}.remove_file'.format(self.files_prov)](filerow[0], self.files_conn)
|
||||
else:
|
||||
log.trace('Not removing file {0}'.format(filerow[0]))
|
||||
conn.execute('DELETE FROM files WHERE path=?', (filerow[0], ))
|
||||
self.pkgdb['{0}.unregister_file'.format(self.db_prov)](filerow[0], self.db_conn)
|
||||
|
||||
# Clean up directories
|
||||
for dir_ in sorted(dirs, reverse=True):
|
||||
conn.execute('DELETE FROM files WHERE path=?', (dir_, ))
|
||||
self.pkgdb['{0}.unregister_file'.format(self.db_prov)](dir_, self.db_conn)
|
||||
try:
|
||||
log.trace('Removing directory {0}'.format(dir_))
|
||||
os.rmdir(dir_)
|
||||
@ -441,7 +405,7 @@ class SPMClient(object):
|
||||
# Leave directories in place that still have files in them
|
||||
log.trace('Cannot remove directory {0}, probably not empty'.format(dir_))
|
||||
|
||||
conn.execute('DELETE FROM packages WHERE package=?', (package, ))
|
||||
self.pkgdb['{0}.unregister_pkg'.format(self.db_prov)](package, self.db_conn)
|
||||
|
||||
def _local_info(self, args):
|
||||
'''
|
||||
@ -473,40 +437,18 @@ class SPMClient(object):
|
||||
|
||||
package = args[1]
|
||||
|
||||
conn = sqlite3.connect(self.opts['spm_db'], isolation_level=None)
|
||||
cur = conn.cursor()
|
||||
|
||||
fields = (
|
||||
'package',
|
||||
'version',
|
||||
'release',
|
||||
'installed',
|
||||
'os',
|
||||
'os_family',
|
||||
'dependencies',
|
||||
'os_dependencies',
|
||||
'os_family_dependencies',
|
||||
'summary',
|
||||
'description',
|
||||
)
|
||||
data = conn.execute(
|
||||
'SELECT {0} FROM packages WHERE package=?'.format(','.join(fields)),
|
||||
(package, )
|
||||
)
|
||||
row = data.fetchone()
|
||||
if not row:
|
||||
print('Package {0} not installed'.format(package))
|
||||
return
|
||||
|
||||
formula_def = dict(list(zip(fields, row)))
|
||||
formula_def['name'] = formula_def['package']
|
||||
|
||||
self._print_info(formula_def)
|
||||
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](package, self.db_conn)
|
||||
if pkg_info is None:
|
||||
print('package {0} not installed'.format(package))
|
||||
else:
|
||||
self._print_info(pkg_info)
|
||||
|
||||
def _print_info(self, formula_def):
|
||||
'''
|
||||
Print package info
|
||||
'''
|
||||
import pprint
|
||||
pprint.pprint(formula_def)
|
||||
fields = (
|
||||
'name',
|
||||
'os',
|
||||
@ -565,17 +507,12 @@ Description:
|
||||
|
||||
package = args[1]
|
||||
|
||||
conn = sqlite3.connect(self.opts['spm_db'], isolation_level=None)
|
||||
cur = conn.cursor()
|
||||
|
||||
data = conn.execute('SELECT package FROM packages WHERE package=?', (package, ))
|
||||
if not data.fetchone():
|
||||
files = self.pkgdb['{0}.list_files'.format(self.db_prov)](package, self.db_conn)
|
||||
if files is None:
|
||||
print('Package {0} not installed'.format(package))
|
||||
return
|
||||
|
||||
data = conn.execute('SELECT path FROM files WHERE package=?', (package, ))
|
||||
for file_ in data.fetchall():
|
||||
print(file_[0])
|
||||
else:
|
||||
for file_ in files:
|
||||
print(file_[0])
|
||||
|
||||
def _build(self, args):
|
||||
'''
|
||||
@ -629,40 +566,3 @@ Description:
|
||||
if name.startswith(exclude_name):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _init_db(self):
|
||||
'''
|
||||
Initialize the package database
|
||||
'''
|
||||
if not os.path.exists(self.opts['spm_db']):
|
||||
log.debug('Creating new package database at {0}'.format(self.opts['spm_db']))
|
||||
conn = sqlite3.connect(self.opts['spm_db'], isolation_level=None)
|
||||
cur = conn.cursor()
|
||||
conn.execute('''CREATE TABLE packages (
|
||||
package text,
|
||||
version text,
|
||||
release text,
|
||||
installed text,
|
||||
os text,
|
||||
os_family text,
|
||||
dependencies text,
|
||||
os_dependencies text,
|
||||
os_family_dependencies text,
|
||||
summary text,
|
||||
description text
|
||||
)''')
|
||||
conn.execute('''CREATE TABLE files (
|
||||
package text,
|
||||
path text,
|
||||
size real,
|
||||
mode text,
|
||||
sum text,
|
||||
major text,
|
||||
minor text,
|
||||
linkname text,
|
||||
linkpath text,
|
||||
uname text,
|
||||
gname text,
|
||||
mtime text
|
||||
)''')
|
||||
conn.close()
|
||||
|
1
salt/spm/pkgdb/__init__.py
Normal file
1
salt/spm/pkgdb/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
# -*- coding: utf-8 -*-
|
187
salt/spm/pkgdb/sqlite3.py
Normal file
187
salt/spm/pkgdb/sqlite3.py
Normal file
@ -0,0 +1,187 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
This module allows SPM to use sqlite3 as the backend for SPM's package database.
|
||||
|
||||
.. versionadded:: 2015.8.0
|
||||
'''
|
||||
|
||||
from __future__ import absolute_import
|
||||
import os.path
|
||||
import logging
|
||||
import sqlite3
|
||||
import datetime
|
||||
from sqlite3 import OperationalError
|
||||
from salt.ext.six.moves import zip
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def init():
|
||||
'''
|
||||
Get an sqlite3 connection, and initialize the package database if necessary
|
||||
'''
|
||||
if not os.path.exists(__opts__['spm_cache_dir']):
|
||||
log.debug('Creating SPM cache directory at {0}'.format(__opts__['spm_db']))
|
||||
os.makedirs(__opts__['spm_cache_dir'])
|
||||
|
||||
if not os.path.exists(__opts__['spm_db']):
|
||||
log.debug('Creating new package database at {0}'.format(__opts__['spm_db']))
|
||||
|
||||
sqlite3.enable_callback_tracebacks(True)
|
||||
conn = sqlite3.connect(__opts__['spm_db'], isolation_level=None)
|
||||
|
||||
try:
|
||||
conn.execute('SELECT count(*) FROM packages')
|
||||
except OperationalError:
|
||||
conn.execute('''CREATE TABLE packages (
|
||||
package text,
|
||||
version text,
|
||||
release text,
|
||||
installed text,
|
||||
os text,
|
||||
os_family text,
|
||||
dependencies text,
|
||||
os_dependencies text,
|
||||
os_family_dependencies text,
|
||||
summary text,
|
||||
description text
|
||||
)''')
|
||||
|
||||
try:
|
||||
conn.execute('SELECT count(*) FROM files')
|
||||
except OperationalError:
|
||||
conn.execute('''CREATE TABLE files (
|
||||
package text,
|
||||
path text,
|
||||
size real,
|
||||
mode text,
|
||||
sum text,
|
||||
major text,
|
||||
minor text,
|
||||
linkname text,
|
||||
linkpath text,
|
||||
uname text,
|
||||
gname text,
|
||||
mtime text
|
||||
)''')
|
||||
|
||||
return conn
|
||||
|
||||
|
||||
def info(package, conn=None):
|
||||
'''
|
||||
List info for a package
|
||||
'''
|
||||
if conn is None:
|
||||
conn = init()
|
||||
|
||||
fields = (
|
||||
'package',
|
||||
'version',
|
||||
'release',
|
||||
'installed',
|
||||
'os',
|
||||
'os_family',
|
||||
'dependencies',
|
||||
'os_dependencies',
|
||||
'os_family_dependencies',
|
||||
'summary',
|
||||
'description',
|
||||
)
|
||||
data = conn.execute(
|
||||
'SELECT {0} FROM packages WHERE package=?'.format(','.join(fields)),
|
||||
(package, )
|
||||
)
|
||||
row = data.fetchone()
|
||||
if not row:
|
||||
return None
|
||||
|
||||
formula_def = dict(list(zip(fields, row)))
|
||||
formula_def['name'] = formula_def['package']
|
||||
|
||||
return formula_def
|
||||
|
||||
|
||||
def list_files(package, conn=None):
|
||||
'''
|
||||
List files for an installed package
|
||||
'''
|
||||
if conn is None:
|
||||
conn = init()
|
||||
|
||||
data = conn.execute('SELECT package FROM packages WHERE package=?', (package, ))
|
||||
if not data.fetchone():
|
||||
return None
|
||||
|
||||
ret = []
|
||||
data = conn.execute('SELECT path, sum FROM files WHERE package=?', (package, ))
|
||||
for file_ in data.fetchall():
|
||||
ret.append(file_)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def register_pkg(name, formula_def, conn=None):
|
||||
'''
|
||||
Register a package in the package database
|
||||
'''
|
||||
if conn is None:
|
||||
conn = init()
|
||||
|
||||
conn.execute('INSERT INTO packages VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (
|
||||
name,
|
||||
formula_def['version'],
|
||||
formula_def['release'],
|
||||
datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT'),
|
||||
formula_def.get('os', None),
|
||||
formula_def.get('os_family', None),
|
||||
formula_def.get('dependencies', None),
|
||||
formula_def.get('os_dependencies', None),
|
||||
formula_def.get('os_family_dependencies', None),
|
||||
formula_def['summary'],
|
||||
formula_def['description'],
|
||||
))
|
||||
|
||||
|
||||
def register_file(name, member, path, digest='', conn=None):
|
||||
'''
|
||||
Register a file in the package database
|
||||
'''
|
||||
if conn is None:
|
||||
conn = init()
|
||||
|
||||
conn.execute('INSERT INTO files VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (
|
||||
name,
|
||||
'{0}/{1}'.format(path, member.path),
|
||||
member.size,
|
||||
member.mode,
|
||||
digest,
|
||||
member.devmajor,
|
||||
member.devminor,
|
||||
member.linkname,
|
||||
member.linkpath,
|
||||
member.uname,
|
||||
member.gname,
|
||||
member.mtime
|
||||
))
|
||||
|
||||
|
||||
def unregister_pkg(name, conn=None):
|
||||
'''
|
||||
Unregister a package from the package database
|
||||
'''
|
||||
if conn is None:
|
||||
conn = init()
|
||||
|
||||
conn.execute('DELETE FROM packages WHERE package=?', (name, ))
|
||||
|
||||
|
||||
def unregister_file(path, conn=None):
|
||||
'''
|
||||
Unregister a file from the package database
|
||||
'''
|
||||
if conn is None:
|
||||
conn = init()
|
||||
|
||||
conn.execute('DELETE FROM files WHERE path=?', (path, ))
|
1
salt/spm/pkgfiles/__init__.py
Normal file
1
salt/spm/pkgfiles/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
# -*- coding: utf-8 -*-
|
92
salt/spm/pkgfiles/roots.py
Normal file
92
salt/spm/pkgfiles/roots.py
Normal file
@ -0,0 +1,92 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
This module allows SPM to use the local filesystem (``file_roots``) to install
|
||||
files for SPM.
|
||||
|
||||
.. versionadded:: 2015.8.0
|
||||
'''
|
||||
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
import logging
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def init(**kwargs):
|
||||
'''
|
||||
Initialize the directories for the files
|
||||
'''
|
||||
roots_path = __opts__['file_roots']['base'][0]
|
||||
pillar_path = __opts__['pillar_roots']['base'][0]
|
||||
for dir_ in (roots_path, pillar_path):
|
||||
if not os.path.exists(dir_):
|
||||
os.makedirs(dir_)
|
||||
return {
|
||||
'roots_path': roots_path,
|
||||
'pillar_path': pillar_path,
|
||||
}
|
||||
|
||||
|
||||
def check_existing(package, pkg_files, conn=None):
|
||||
'''
|
||||
Check the filesystem for existing files
|
||||
'''
|
||||
if conn is None:
|
||||
conn = init()
|
||||
|
||||
existing_files = []
|
||||
for member in pkg_files:
|
||||
if member.isdir():
|
||||
continue
|
||||
if member.name.startswith('{0}/_'.format(package)):
|
||||
# Module files are distributed via _modules, _states, etc
|
||||
new_name = member.name.replace('{0}/'.format(package), '')
|
||||
out_file = os.path.join(conn['roots_path'], new_name)
|
||||
elif member.name == '{0}/pillar.example'.format(package):
|
||||
# Pillars are automatically put in the pillar_roots
|
||||
new_name = '{0}.sls.orig'.format(package)
|
||||
out_file = os.path.join(conn['pillar_path'], new_name)
|
||||
else:
|
||||
out_file = os.path.join(conn['roots_path'], member.name)
|
||||
if os.path.exists(out_file):
|
||||
existing_files.append(out_file)
|
||||
if not __opts__['force']:
|
||||
log.error('{0} already exists, not installing'.format(out_file))
|
||||
|
||||
return existing_files
|
||||
|
||||
|
||||
def install_file(package, formula_tar, member, conn=None):
|
||||
'''
|
||||
Install a single file to the file system
|
||||
'''
|
||||
if conn is None:
|
||||
conn = init()
|
||||
|
||||
out_path = conn['roots_path']
|
||||
|
||||
if member.name.startswith('{0}/_'.format(package)):
|
||||
# Module files are distributed via _modules, _states, etc
|
||||
member.name = member.name.replace('{0}/'.format(package), '')
|
||||
elif member.name == '{0}/pillar.example'.format(package):
|
||||
# Pillars are automatically put in the pillar_roots
|
||||
member.name = '{0}.sls.orig'.format(package)
|
||||
out_path = conn['pillar_path']
|
||||
|
||||
log.debug('Installing package file {0} to {1}'.format(member.name, out_path))
|
||||
formula_tar.extract(member, out_path)
|
||||
|
||||
return out_path
|
||||
|
||||
|
||||
def remove_file(path, conn=None):
|
||||
'''
|
||||
Install a single file to the file system
|
||||
'''
|
||||
if conn is None:
|
||||
conn = init()
|
||||
|
||||
log.debug('Removing package file {0}'.format(path))
|
||||
os.remove(path)
|
@ -278,7 +278,7 @@ def _load_accumulators():
|
||||
serial = salt.payload.Serial(__opts__)
|
||||
ret = {'accumulators': {}, 'accumulators_deps': {}}
|
||||
try:
|
||||
with open(path, 'rb') as f:
|
||||
with salt.utils.fopen(path, 'rb') as f:
|
||||
loaded = serial.load(f)
|
||||
return loaded if loaded else ret
|
||||
except (IOError, NameError):
|
||||
@ -296,7 +296,7 @@ def _persist_accummulators(accumulators, accumulators_deps):
|
||||
|
||||
serial = salt.payload.Serial(__opts__)
|
||||
try:
|
||||
with open(_get_accumulator_filepath(), 'w+b') as f:
|
||||
with salt.utils.fopen(_get_accumulator_filepath(), 'w+b') as f:
|
||||
serial.dump(accumm_data, f)
|
||||
except NameError:
|
||||
# msgpack error from salt-ssh
|
||||
|
@ -235,9 +235,18 @@ def mounted(name,
|
||||
# Some filesystems have options which should not force a remount.
|
||||
mount_ignore_fs_keys = {
|
||||
'ramfs': ['size']
|
||||
}
|
||||
}
|
||||
|
||||
# Some options are translated once mounted
|
||||
mount_translate_options = {
|
||||
'tcp': 'proto=tcp',
|
||||
'udp': 'proto=udp',
|
||||
}
|
||||
|
||||
for opt in opts:
|
||||
if opt in mount_translate_options:
|
||||
opt = mount_translate_options[opt]
|
||||
|
||||
keyval_option = opt.split('=')[0]
|
||||
if keyval_option in mount_invisible_keys:
|
||||
opt = keyval_option
|
||||
|
@ -21,6 +21,8 @@ automatically
|
||||
- dest_dir: /tmp/pkg
|
||||
- spec: salt://pkg/salt/spec/salt.spec
|
||||
- template: jinja
|
||||
- deps:
|
||||
- salt://pkg/salt/sources/required_dependency.rpm
|
||||
- tgt: epel-7-x86_64
|
||||
- sources:
|
||||
- salt://pkg/salt/sources/logrotate.salt
|
||||
@ -54,6 +56,7 @@ def built(
|
||||
template,
|
||||
tgt,
|
||||
deps=None,
|
||||
env=None,
|
||||
results=None,
|
||||
always=False,
|
||||
saltenv='base'):
|
||||
@ -88,6 +91,24 @@ def built(
|
||||
downloading directly from Amazon S3 compatible URLs with both
|
||||
pre-configured and automatic IAM credentials
|
||||
|
||||
env
|
||||
A dictionary of environment variables to be set prior to execution.
|
||||
Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
- env:
|
||||
DEB_BUILD_OPTIONS: 'nocheck'
|
||||
|
||||
.. warning::
|
||||
|
||||
The above illustrates a common PyYAML pitfall, that **yes**,
|
||||
**no**, **on**, **off**, **true**, and **false** are all loaded as
|
||||
boolean ``True`` and ``False`` values, and must be enclosed in
|
||||
quotes to be used as strings. More info on this (and other) PyYAML
|
||||
idiosyncrasies can be found :doc:`here
|
||||
</topics/troubleshooting/yaml_idiosyncrasies>`.
|
||||
|
||||
results
|
||||
The names of the expected rpms that will be built
|
||||
|
||||
@ -118,6 +139,14 @@ def built(
|
||||
ret['comment'] = 'Packages need to be built'
|
||||
ret['result'] = None
|
||||
return ret
|
||||
|
||||
# Need the check for None here, if env is not provided then it falls back
|
||||
# to None and it is assumed that the environment is not being overridden.
|
||||
if env is not None and not isinstance(env, dict):
|
||||
ret['comment'] = ('Invalidly-formatted \'env\' parameter. See '
|
||||
'documentation.')
|
||||
return ret
|
||||
|
||||
ret['changes'] = __salt__['pkgbuild.build'](
|
||||
runas,
|
||||
tgt,
|
||||
@ -125,6 +154,7 @@ def built(
|
||||
spec,
|
||||
sources,
|
||||
deps,
|
||||
env,
|
||||
template,
|
||||
saltenv)
|
||||
ret['comment'] = 'Packages Built'
|
||||
|
@ -302,7 +302,7 @@ def managed(name, **kwargs):
|
||||
|
||||
# empty file before configure
|
||||
if kwargs.get('clean_file', False):
|
||||
open(kwargs['file'], 'w').close()
|
||||
salt.utils.fopen(kwargs['file'], 'w').close()
|
||||
|
||||
try:
|
||||
if __grains__['os_family'] == 'Debian':
|
||||
|
@ -5,6 +5,7 @@ from __future__ import absolute_import
|
||||
import logging
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.utils
|
||||
from salt.transport.client import ReqChannel
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@ -26,12 +27,12 @@ class LocalChannel(ReqChannel):
|
||||
#data = json.loads(load)
|
||||
#{'path': 'apt-cacher-ng/map.jinja', 'saltenv': 'base', 'cmd': '_serve_file', 'loc': 0}
|
||||
#f = open(data['path'])
|
||||
f = open(load['path'])
|
||||
ret = {
|
||||
'data': ''.join(f.readlines()),
|
||||
'dest': load['path'],
|
||||
}
|
||||
print ('returning', ret)
|
||||
with salt.utils.fopen(load['path']) as f:
|
||||
ret = {
|
||||
'data': ''.join(f.readlines()),
|
||||
'dest': load['path'],
|
||||
}
|
||||
print ('returning', ret)
|
||||
else:
|
||||
# end of buffer
|
||||
ret = {
|
||||
|
@ -169,6 +169,7 @@ def sig4(method, endpoint, params, prov_dict,
|
||||
endpoint,
|
||||
amzdate,
|
||||
)
|
||||
|
||||
signed_headers = 'host;x-amz-date'
|
||||
|
||||
if isinstance(headers, dict):
|
||||
@ -177,6 +178,10 @@ def sig4(method, endpoint, params, prov_dict,
|
||||
signed_headers += ';{0}'.format(header)
|
||||
canonical_headers += '\n'
|
||||
|
||||
if token != '':
|
||||
canonical_headers += 'x-amz-security-token:{0}\n'.format(token)
|
||||
signed_headers += ';x-amz-security-token'
|
||||
|
||||
algorithm = 'AWS4-HMAC-SHA256'
|
||||
|
||||
# Create payload hash (hash of the request body content). For GET
|
||||
|
@ -403,8 +403,11 @@ def query(url,
|
||||
if isinstance(data, dict):
|
||||
data = urllib.urlencode(data)
|
||||
|
||||
max_body = opts.get('http_max_body', salt.config.DEFAULT_MINION_OPTS['http_max_body'])
|
||||
timeout = opts.get('http_request_timeout', salt.config.DEFAULT_MINION_OPTS['http_request_timeout'])
|
||||
|
||||
try:
|
||||
result = HTTPClient(max_body_size=100*1024*1024*1024).fetch(
|
||||
result = HTTPClient(max_body_size=max_body).fetch(
|
||||
url_full,
|
||||
method=method,
|
||||
headers=header_dict,
|
||||
@ -414,7 +417,7 @@ def query(url,
|
||||
validate_cert=verify_ssl,
|
||||
allow_nonstandard_methods=True,
|
||||
streaming_callback=streaming_callback,
|
||||
request_timeout=3600.0,
|
||||
request_timeout=timeout,
|
||||
**req_kwargs
|
||||
)
|
||||
except tornado.httpclient.HTTPError as exc:
|
||||
|
@ -87,11 +87,13 @@ class NovaServer(object):
|
||||
return self.__dict__
|
||||
|
||||
|
||||
def get_entry(dict_, key, value):
|
||||
def get_entry(dict_, key, value, raise_error=True):
|
||||
for entry in dict_:
|
||||
if entry[key] == value:
|
||||
return entry
|
||||
raise SaltCloudSystemExit('Unable to find {0} in {1}.'.format(key, dict_))
|
||||
if raise_error is True:
|
||||
raise SaltCloudSystemExit('Unable to find {0} in {1}.'.format(key, dict_))
|
||||
return {}
|
||||
|
||||
|
||||
def sanatize_novaclient(kwargs):
|
||||
@ -194,25 +196,25 @@ class SaltNova(OpenStackComputeShell):
|
||||
|
||||
self.compute_conn = client.Client(**self.kwargs)
|
||||
|
||||
if region_name is not None:
|
||||
servers_endpoints = get_entry(
|
||||
self.catalog,
|
||||
'type',
|
||||
'volume'
|
||||
)['endpoints']
|
||||
self.kwargs['bypass_url'] = get_entry(
|
||||
servers_endpoints,
|
||||
'region',
|
||||
region_name
|
||||
)['publicURL']
|
||||
volume_endpoints = get_entry(self.catalog, 'type', 'volume', raise_error=False).get('endpoints', {})
|
||||
if volume_endpoints:
|
||||
if region_name is not None:
|
||||
self.kwargs['bypass_url'] = get_entry(
|
||||
volume_endpoints,
|
||||
'region',
|
||||
region_name
|
||||
)['publicURL']
|
||||
|
||||
self.kwargs['service_type'] = 'volume'
|
||||
self.volume_conn = client.Client(**self.kwargs)
|
||||
if hasattr(self, 'extensions'):
|
||||
self.expand_extensions()
|
||||
self.volume_conn = client.Client(**self.kwargs)
|
||||
if hasattr(self, 'extensions'):
|
||||
self.expand_extensions()
|
||||
else:
|
||||
self.volume_conn = None
|
||||
|
||||
def expand_extensions(self):
|
||||
for connection in (self.compute_conn, self.volume_conn):
|
||||
if connection is None:
|
||||
continue
|
||||
for extension in self.extensions:
|
||||
for attr in extension.module.__dict__:
|
||||
if not inspect.isclass(getattr(extension.module, attr)):
|
||||
@ -301,6 +303,8 @@ class SaltNova(OpenStackComputeShell):
|
||||
'''
|
||||
Organize information about a volume from the volume_id
|
||||
'''
|
||||
if self.volume_conn is None:
|
||||
raise SaltCloudSystemExit('No cinder endpoint available')
|
||||
nt_ks = self.volume_conn
|
||||
volume = nt_ks.volumes.get(volume_id)
|
||||
response = {'name': volume.display_name,
|
||||
@ -316,6 +320,8 @@ class SaltNova(OpenStackComputeShell):
|
||||
'''
|
||||
List all block volumes
|
||||
'''
|
||||
if self.volume_conn is None:
|
||||
raise SaltCloudSystemExit('No cinder endpoint available')
|
||||
nt_ks = self.volume_conn
|
||||
volumes = nt_ks.volumes.list(search_opts=search_opts)
|
||||
response = {}
|
||||
@ -334,6 +340,8 @@ class SaltNova(OpenStackComputeShell):
|
||||
'''
|
||||
Show one volume
|
||||
'''
|
||||
if self.volume_conn is None:
|
||||
raise SaltCloudSystemExit('No cinder endpoint available')
|
||||
nt_ks = self.volume_conn
|
||||
volumes = self.volume_list(
|
||||
search_opts={'display_name': name},
|
||||
@ -351,6 +359,8 @@ class SaltNova(OpenStackComputeShell):
|
||||
'''
|
||||
Create a block device
|
||||
'''
|
||||
if self.volume_conn is None:
|
||||
raise SaltCloudSystemExit('No cinder endpoint available')
|
||||
nt_ks = self.volume_conn
|
||||
response = nt_ks.volumes.create(
|
||||
size=size,
|
||||
@ -366,6 +376,8 @@ class SaltNova(OpenStackComputeShell):
|
||||
'''
|
||||
Delete a block device
|
||||
'''
|
||||
if self.volume_conn is None:
|
||||
raise SaltCloudSystemExit('No cinder endpoint available')
|
||||
nt_ks = self.volume_conn
|
||||
try:
|
||||
volume = self.volume_show(name)
|
||||
@ -875,13 +887,110 @@ class SaltNova(OpenStackComputeShell):
|
||||
nets = nt_ks.virtual_interfaces.create(networkid, serverid)
|
||||
return nets
|
||||
|
||||
def floating_ip_pool_list(self):
|
||||
'''
|
||||
List all floating IP pools
|
||||
|
||||
.. versionadded:: Boron
|
||||
'''
|
||||
nt_ks = self.compute_conn
|
||||
pools = nt_ks.floating_ip_pools.list()
|
||||
response = {}
|
||||
for pool in pools:
|
||||
response[pool.name] = {
|
||||
'name': pool.name,
|
||||
}
|
||||
return response
|
||||
|
||||
def floating_ip_list(self):
|
||||
'''
|
||||
List floating IPs
|
||||
|
||||
.. versionadded:: Boron
|
||||
'''
|
||||
nt_ks = self.compute_conn
|
||||
floating_ips = nt_ks.floating_ips.list()
|
||||
response = {}
|
||||
for floating_ip in floating_ips:
|
||||
response[floating_ip.ip] = {
|
||||
'ip': floating_ip.ip,
|
||||
'fixed_ip': floating_ip.fixed_ip,
|
||||
'id': floating_ip.id,
|
||||
'instance_id': floating_ip.instance_id,
|
||||
'pool': floating_ip.pool
|
||||
}
|
||||
return response
|
||||
|
||||
def floating_ip_show(self, ip):
|
||||
'''
|
||||
Show info on specific floating IP
|
||||
|
||||
.. versionadded:: Boron
|
||||
'''
|
||||
nt_ks = self.compute_conn
|
||||
floating_ips = nt_ks.floating_ips.list()
|
||||
for floating_ip in floating_ips:
|
||||
if floating_ip.ip == ip:
|
||||
return floating_ip
|
||||
return {}
|
||||
|
||||
def floating_ip_create(self, pool=None):
|
||||
'''
|
||||
Allocate a floating IP
|
||||
|
||||
.. versionadded:: Boron
|
||||
'''
|
||||
nt_ks = self.compute_conn
|
||||
floating_ip = nt_ks.floating_ips.create(pool)
|
||||
response = {
|
||||
'ip': floating_ip.ip,
|
||||
'fixed_ip': floating_ip.fixed_ip,
|
||||
'id': floating_ip.id,
|
||||
'instance_id': floating_ip.instance_id,
|
||||
'pool': floating_ip.pool
|
||||
}
|
||||
return response
|
||||
|
||||
def floating_ip_delete(self, floating_ip):
|
||||
'''
|
||||
De-allocate a floating IP
|
||||
|
||||
.. versionadded:: Boron
|
||||
'''
|
||||
ip = self.floating_ip_show(floating_ip)
|
||||
nt_ks = self.compute_conn
|
||||
return nt_ks.floating_ips.delete(ip)
|
||||
|
||||
def floating_ip_associate(self, server_name, floating_ip):
|
||||
'''
|
||||
Associate floating IP address to server
|
||||
|
||||
.. versionadded:: Boron
|
||||
'''
|
||||
nt_ks = self.compute_conn
|
||||
server_ = self.server_by_name(server_name)
|
||||
server = nt_ks.servers.get(server_.__dict__['id'])
|
||||
server.add_floating_ip(floating_ip)
|
||||
return self.floating_ip_list()[floating_ip]
|
||||
|
||||
def floating_ip_disassociate(self, server_name, floating_ip):
|
||||
'''
|
||||
Disassociate a floating IP from server
|
||||
|
||||
.. versionadded:: Boron
|
||||
'''
|
||||
nt_ks = self.compute_conn
|
||||
server_ = self.server_by_name(server_name)
|
||||
server = nt_ks.servers.get(server_.__dict__['id'])
|
||||
server.remove_floating_ip(floating_ip)
|
||||
return self.floating_ip_list()[floating_ip]
|
||||
|
||||
# The following is a list of functions that need to be incorporated in the
|
||||
# nova module. This list should be updated as functions are added.
|
||||
#
|
||||
# absolute-limits Print a list of absolute limits for a user
|
||||
# actions Retrieve server actions.
|
||||
# add-fixed-ip Add new IP address to network.
|
||||
# add-floating-ip Add a floating IP address to a server.
|
||||
# aggregate-add-host Add the host to the specified aggregate.
|
||||
# aggregate-create Create a new aggregate with the specified details.
|
||||
# aggregate-delete Delete the aggregate by its id.
|
||||
@ -911,11 +1020,6 @@ class SaltNova(OpenStackComputeShell):
|
||||
# and name.
|
||||
# endpoints Discover endpoints that get returned from the
|
||||
# authenticate services
|
||||
# floating-ip-create Allocate a floating IP for the current tenant.
|
||||
# floating-ip-delete De-allocate a floating IP.
|
||||
# floating-ip-list List floating ips for this tenant.
|
||||
# floating-ip-pool-list
|
||||
# List all floating ip pools.
|
||||
# get-vnc-console Get a vnc console to a server.
|
||||
# host-action Perform a power action on a host.
|
||||
# host-update Update host settings.
|
||||
@ -930,7 +1034,6 @@ class SaltNova(OpenStackComputeShell):
|
||||
# reboot Reboot a server.
|
||||
# rebuild Shutdown, re-image, and re-boot a server.
|
||||
# remove-fixed-ip Remove an IP address from a server.
|
||||
# remove-floating-ip Remove a floating IP address from a server.
|
||||
# rename Rename a server.
|
||||
# rescue Rescue a server.
|
||||
# resize Resize a server.
|
||||
|
@ -89,9 +89,8 @@ def query(key, keyid, method='GET', params=None, headers=None,
|
||||
|
||||
# Try grabbing the credentials from the EC2 instance IAM metadata if available
|
||||
if not key or not keyid:
|
||||
iam_creds = iam.get_iam_metadata()
|
||||
key = iam_creds['secret_key']
|
||||
keyid = iam_creds['access_key']
|
||||
key = salt.utils.aws.IROLE_CODE
|
||||
keyid = salt.utils.aws.IROLE_CODE
|
||||
|
||||
if not location:
|
||||
location = iam.get_iam_region()
|
||||
|
@ -1290,6 +1290,22 @@ class DictItem(BaseSchemaItem):
|
||||
self.__flatten__ = flatten
|
||||
return self
|
||||
|
||||
def serialize(self):
|
||||
result = super(DictItem, self).serialize()
|
||||
required = []
|
||||
if self.properties is not None:
|
||||
if isinstance(self.properties, Schema):
|
||||
serialized = self.properties.serialize()
|
||||
if 'required' in serialized:
|
||||
required.extend(serialized['required'])
|
||||
else:
|
||||
for key, prop in self.properties.items():
|
||||
if prop.required:
|
||||
required.append(key)
|
||||
if required:
|
||||
result['required'] = required
|
||||
return result
|
||||
|
||||
|
||||
class RequirementsItem(SchemaItem):
|
||||
__type__ = 'object'
|
||||
|
@ -24,6 +24,7 @@ from salt.ext.six.moves.urllib.parse import urljoin as _urljoin
|
||||
import salt.ext.six.moves.http_client
|
||||
from salt.version import __version__
|
||||
# pylint: enable=import-error,no-name-in-module
|
||||
import salt.utils.http
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -136,13 +136,12 @@ class PkgTest(integration.ModuleCase,
|
||||
# fails then the _PKG_TARGETS dict above needs to have an entry added,
|
||||
# with two packages that are not installed before these tests are run
|
||||
self.assertTrue(pkg_targets)
|
||||
|
||||
version = self.run_function('pkg.version', pkg_targets)
|
||||
|
||||
# If this assert fails, we need to find new targets, this test needs to
|
||||
# be able to test successful installation of packages, so these
|
||||
# packages need to not be installed before we run the states below
|
||||
self.assertFalse(any(version.values()))
|
||||
# self.assertFalse(any(version.values()))
|
||||
|
||||
ret = self.run_state('pkg.installed', name=None, pkgs=pkg_targets)
|
||||
self.assertSaltTrueReturn(ret)
|
||||
|
@ -40,7 +40,8 @@ class MineTestCase(TestCase):
|
||||
'''
|
||||
with patch.dict(mine.__salt__,
|
||||
{'config.option': MagicMock(return_value={'A': 'B'}),
|
||||
'data.update': MagicMock(return_value='A')}):
|
||||
'data.update': MagicMock(return_value='A'),
|
||||
'A': MagicMock(return_value='B')}):
|
||||
with patch.dict(mine.__opts__, {'file_client': 'local',
|
||||
'id': 'id'}):
|
||||
self.assertEqual(mine.update(True), 'A')
|
||||
|
@ -170,51 +170,51 @@ class ConfigTestCase(TestCase):
|
||||
ssh_key_names = SSHKeyNamesSchema(flatten=True)
|
||||
|
||||
expected = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"title": "Digital Ocean",
|
||||
"description": "Digital Ocean Cloud VM configuration requirements.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"driver": {
|
||||
"default": "digital_ocean",
|
||||
"format": "hidden",
|
||||
"type": "string",
|
||||
"title": "driver"
|
||||
'$schema': 'http://json-schema.org/draft-04/schema#',
|
||||
'title': 'Digital Ocean',
|
||||
'description': 'Digital Ocean Cloud VM configuration requirements.',
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'driver': {
|
||||
'default': 'digital_ocean',
|
||||
'format': 'hidden',
|
||||
'type': 'string',
|
||||
'title': 'driver'
|
||||
},
|
||||
"personal_access_token": {
|
||||
"type": "string",
|
||||
"description": "This is the API access token which can be "
|
||||
"generated under the API/Application on your account",
|
||||
"title": "Personal Access Token"
|
||||
'personal_access_token': {
|
||||
'type': 'string',
|
||||
'description': 'This is the API access token which can be '
|
||||
'generated under the API/Application on your account',
|
||||
'title': 'Personal Access Token'
|
||||
},
|
||||
"ssh_key_file": {
|
||||
"type": "string",
|
||||
"description": "The path to an SSH private key which will "
|
||||
"be used to authenticate on the deployed VMs",
|
||||
"title": "SSH Private Key"
|
||||
'ssh_key_file': {
|
||||
'type': 'string',
|
||||
'description': 'The path to an SSH private key which will '
|
||||
'be used to authenticate on the deployed VMs',
|
||||
'title': 'SSH Private Key'
|
||||
},
|
||||
"ssh_key_names": {
|
||||
"type": "string",
|
||||
"description": "The names of an SSH key being managed on Digital "
|
||||
"Ocean account which will be used to authenticate "
|
||||
"on the deployed VMs",
|
||||
"title": "SSH Key Names"
|
||||
'ssh_key_names': {
|
||||
'type': 'string',
|
||||
'description': 'The names of an SSH key being managed on Digital '
|
||||
'Ocean account which will be used to authenticate '
|
||||
'on the deployed VMs',
|
||||
'title': 'SSH Key Names'
|
||||
}
|
||||
},
|
||||
"anyOf": [
|
||||
{"required": ["ssh_key_file"]},
|
||||
{"required": ["ssh_key_names"]}
|
||||
'anyOf': [
|
||||
{'required': ['ssh_key_file']},
|
||||
{'required': ['ssh_key_names']}
|
||||
],
|
||||
"required": [
|
||||
"personal_access_token"
|
||||
'required': [
|
||||
'personal_access_token'
|
||||
],
|
||||
"x-ordering": [
|
||||
"driver",
|
||||
"personal_access_token",
|
||||
"ssh_key_file",
|
||||
"ssh_key_names",
|
||||
'x-ordering': [
|
||||
'driver',
|
||||
'personal_access_token',
|
||||
'ssh_key_file',
|
||||
'ssh_key_names',
|
||||
],
|
||||
"additionalProperties": False
|
||||
'additionalProperties': False
|
||||
}
|
||||
self.assertDictEqual(expected, Requirements.serialize())
|
||||
|
||||
@ -247,51 +247,51 @@ class ConfigTestCase(TestCase):
|
||||
)(flatten=True)
|
||||
|
||||
expected = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"title": "Digital Ocean",
|
||||
"description": "Digital Ocean Cloud VM configuration requirements.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"driver": {
|
||||
"default": "digital_ocean",
|
||||
"format": "hidden",
|
||||
"type": "string",
|
||||
"title": "driver"
|
||||
'$schema': 'http://json-schema.org/draft-04/schema#',
|
||||
'title': 'Digital Ocean',
|
||||
'description': 'Digital Ocean Cloud VM configuration requirements.',
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'driver': {
|
||||
'default': 'digital_ocean',
|
||||
'format': 'hidden',
|
||||
'type': 'string',
|
||||
'title': 'driver'
|
||||
},
|
||||
"personal_access_token": {
|
||||
"type": "string",
|
||||
"description": "This is the API access token which can be "
|
||||
"generated under the API/Application on your account",
|
||||
"title": "Personal Access Token"
|
||||
'personal_access_token': {
|
||||
'type': 'string',
|
||||
'description': 'This is the API access token which can be '
|
||||
'generated under the API/Application on your account',
|
||||
'title': 'Personal Access Token'
|
||||
},
|
||||
"ssh_key_file": {
|
||||
"type": "string",
|
||||
"description": "The path to an SSH private key which will "
|
||||
"be used to authenticate on the deployed VMs",
|
||||
"title": "SSH Private Key"
|
||||
'ssh_key_file': {
|
||||
'type': 'string',
|
||||
'description': 'The path to an SSH private key which will '
|
||||
'be used to authenticate on the deployed VMs',
|
||||
'title': 'SSH Private Key'
|
||||
},
|
||||
"ssh_key_names": {
|
||||
"type": "string",
|
||||
"description": "The names of an SSH key being managed on Digital "
|
||||
"Ocean account which will be used to authenticate "
|
||||
"on the deployed VMs",
|
||||
"title": "SSH Key Names"
|
||||
'ssh_key_names': {
|
||||
'type': 'string',
|
||||
'description': 'The names of an SSH key being managed on Digital '
|
||||
'Ocean account which will be used to authenticate '
|
||||
'on the deployed VMs',
|
||||
'title': 'SSH Key Names'
|
||||
}
|
||||
},
|
||||
"anyOf": [
|
||||
{"required": ["ssh_key_file"]},
|
||||
{"required": ["ssh_key_names"]}
|
||||
'anyOf': [
|
||||
{'required': ['ssh_key_file']},
|
||||
{'required': ['ssh_key_names']}
|
||||
],
|
||||
"required": [
|
||||
"personal_access_token"
|
||||
'required': [
|
||||
'personal_access_token'
|
||||
],
|
||||
"x-ordering": [
|
||||
"driver",
|
||||
"personal_access_token",
|
||||
"ssh_key_file",
|
||||
"ssh_key_names",
|
||||
'x-ordering': [
|
||||
'driver',
|
||||
'personal_access_token',
|
||||
'ssh_key_file',
|
||||
'ssh_key_names',
|
||||
],
|
||||
"additionalProperties": False
|
||||
'additionalProperties': False
|
||||
}
|
||||
self.assertDictContainsSubset(expected, Requirements2.serialize())
|
||||
|
||||
@ -302,51 +302,51 @@ class ConfigTestCase(TestCase):
|
||||
merge_reqs = Requirements(flatten=True)
|
||||
|
||||
expected = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"title": "Digital Ocean",
|
||||
"description": "Digital Ocean Cloud VM configuration requirements.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"driver": {
|
||||
"default": "digital_ocean",
|
||||
"format": "hidden",
|
||||
"type": "string",
|
||||
"title": "driver"
|
||||
'$schema': 'http://json-schema.org/draft-04/schema#',
|
||||
'title': 'Digital Ocean',
|
||||
'description': 'Digital Ocean Cloud VM configuration requirements.',
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'driver': {
|
||||
'default': 'digital_ocean',
|
||||
'format': 'hidden',
|
||||
'type': 'string',
|
||||
'title': 'driver'
|
||||
},
|
||||
"personal_access_token": {
|
||||
"type": "string",
|
||||
"description": "This is the API access token which can be "
|
||||
"generated under the API/Application on your account",
|
||||
"title": "Personal Access Token"
|
||||
'personal_access_token': {
|
||||
'type': 'string',
|
||||
'description': 'This is the API access token which can be '
|
||||
'generated under the API/Application on your account',
|
||||
'title': 'Personal Access Token'
|
||||
},
|
||||
"ssh_key_file": {
|
||||
"type": "string",
|
||||
"description": "The path to an SSH private key which will "
|
||||
"be used to authenticate on the deployed VMs",
|
||||
"title": "SSH Private Key"
|
||||
'ssh_key_file': {
|
||||
'type': 'string',
|
||||
'description': 'The path to an SSH private key which will '
|
||||
'be used to authenticate on the deployed VMs',
|
||||
'title': 'SSH Private Key'
|
||||
},
|
||||
"ssh_key_names": {
|
||||
"type": "string",
|
||||
"description": "The names of an SSH key being managed on Digital "
|
||||
"Ocean account which will be used to authenticate "
|
||||
"on the deployed VMs",
|
||||
"title": "SSH Key Names"
|
||||
'ssh_key_names': {
|
||||
'type': 'string',
|
||||
'description': 'The names of an SSH key being managed on Digital '
|
||||
'Ocean account which will be used to authenticate '
|
||||
'on the deployed VMs',
|
||||
'title': 'SSH Key Names'
|
||||
}
|
||||
},
|
||||
"anyOf": [
|
||||
{"required": ["ssh_key_file"]},
|
||||
{"required": ["ssh_key_names"]}
|
||||
'anyOf': [
|
||||
{'required': ['ssh_key_file']},
|
||||
{'required': ['ssh_key_names']}
|
||||
],
|
||||
"required": [
|
||||
"personal_access_token"
|
||||
'required': [
|
||||
'personal_access_token'
|
||||
],
|
||||
"x-ordering": [
|
||||
"driver",
|
||||
"personal_access_token",
|
||||
"ssh_key_file",
|
||||
"ssh_key_names",
|
||||
'x-ordering': [
|
||||
'driver',
|
||||
'personal_access_token',
|
||||
'ssh_key_file',
|
||||
'ssh_key_names',
|
||||
],
|
||||
"additionalProperties": False
|
||||
'additionalProperties': False
|
||||
}
|
||||
self.assertDictContainsSubset(expected, Requirements3.serialize())
|
||||
|
||||
@ -375,68 +375,68 @@ class ConfigTestCase(TestCase):
|
||||
)(flatten=True)
|
||||
|
||||
expected = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"title": "Digital Ocean",
|
||||
"description": "Digital Ocean Cloud VM configuration requirements.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"driver": {
|
||||
"default": "digital_ocean",
|
||||
"format": "hidden",
|
||||
"type": "string",
|
||||
"title": "driver"
|
||||
'$schema': 'http://json-schema.org/draft-04/schema#',
|
||||
'title': 'Digital Ocean',
|
||||
'description': 'Digital Ocean Cloud VM configuration requirements.',
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'driver': {
|
||||
'default': 'digital_ocean',
|
||||
'format': 'hidden',
|
||||
'type': 'string',
|
||||
'title': 'driver'
|
||||
},
|
||||
"personal_access_token": {
|
||||
"type": "string",
|
||||
"description": "This is the API access token which can be "
|
||||
"generated under the API/Application on your account",
|
||||
"title": "Personal Access Token"
|
||||
'personal_access_token': {
|
||||
'type': 'string',
|
||||
'description': 'This is the API access token which can be '
|
||||
'generated under the API/Application on your account',
|
||||
'title': 'Personal Access Token'
|
||||
},
|
||||
"ssh_key_file": {
|
||||
"type": "string",
|
||||
"description": "The path to an SSH private key which will "
|
||||
"be used to authenticate on the deployed VMs",
|
||||
"title": "SSH Private Key"
|
||||
'ssh_key_file': {
|
||||
'type': 'string',
|
||||
'description': 'The path to an SSH private key which will '
|
||||
'be used to authenticate on the deployed VMs',
|
||||
'title': 'SSH Private Key'
|
||||
},
|
||||
"ssh_key_names": {
|
||||
"type": "string",
|
||||
"description": "The names of an SSH key being managed on Digital "
|
||||
"Ocean account which will be used to authenticate "
|
||||
"on the deployed VMs",
|
||||
"title": "SSH Key Names"
|
||||
'ssh_key_names': {
|
||||
'type': 'string',
|
||||
'description': 'The names of an SSH key being managed on Digital '
|
||||
'Ocean account which will be used to authenticate '
|
||||
'on the deployed VMs',
|
||||
'title': 'SSH Key Names'
|
||||
},
|
||||
"ssh_key_file_2": {
|
||||
"type": "string",
|
||||
"description": "The path to an SSH private key which will "
|
||||
"be used to authenticate on the deployed VMs",
|
||||
"title": "SSH Private Key"
|
||||
'ssh_key_file_2': {
|
||||
'type': 'string',
|
||||
'description': 'The path to an SSH private key which will '
|
||||
'be used to authenticate on the deployed VMs',
|
||||
'title': 'SSH Private Key'
|
||||
},
|
||||
"ssh_key_names_2": {
|
||||
"type": "string",
|
||||
"description": "The names of an SSH key being managed on Digital "
|
||||
"Ocean account which will be used to authenticate "
|
||||
"on the deployed VMs",
|
||||
"title": "SSH Key Names"
|
||||
'ssh_key_names_2': {
|
||||
'type': 'string',
|
||||
'description': 'The names of an SSH key being managed on Digital '
|
||||
'Ocean account which will be used to authenticate '
|
||||
'on the deployed VMs',
|
||||
'title': 'SSH Key Names'
|
||||
}
|
||||
},
|
||||
"anyOf": [
|
||||
{"required": ["ssh_key_file"]},
|
||||
{"required": ["ssh_key_names"]},
|
||||
{"required": ["ssh_key_file_2"]},
|
||||
{"required": ["ssh_key_names_2"]}
|
||||
'anyOf': [
|
||||
{'required': ['ssh_key_file']},
|
||||
{'required': ['ssh_key_names']},
|
||||
{'required': ['ssh_key_file_2']},
|
||||
{'required': ['ssh_key_names_2']}
|
||||
],
|
||||
"required": [
|
||||
"personal_access_token"
|
||||
'required': [
|
||||
'personal_access_token'
|
||||
],
|
||||
"x-ordering": [
|
||||
"driver",
|
||||
"personal_access_token",
|
||||
"ssh_key_file",
|
||||
"ssh_key_names",
|
||||
"ssh_key_file_2",
|
||||
"ssh_key_names_2",
|
||||
'x-ordering': [
|
||||
'driver',
|
||||
'personal_access_token',
|
||||
'ssh_key_file',
|
||||
'ssh_key_names',
|
||||
'ssh_key_file_2',
|
||||
'ssh_key_names_2',
|
||||
],
|
||||
"additionalProperties": False
|
||||
'additionalProperties': False
|
||||
}
|
||||
self.assertDictContainsSubset(expected, Requirements4.serialize())
|
||||
|
||||
@ -479,7 +479,7 @@ class ConfigTestCase(TestCase):
|
||||
|
||||
try:
|
||||
jsonschema.validate(
|
||||
{"personal_access_token": "foo", "ssh_key_names": "bar", "ssh_key_file": "test"},
|
||||
{'personal_access_token': 'foo', 'ssh_key_names': 'bar', 'ssh_key_file': 'test'},
|
||||
Requirements.serialize()
|
||||
)
|
||||
except jsonschema.exceptions.ValidationError as exc:
|
||||
@ -487,7 +487,7 @@ class ConfigTestCase(TestCase):
|
||||
|
||||
try:
|
||||
jsonschema.validate(
|
||||
{"personal_access_token": "foo", "ssh_key_names": "bar"},
|
||||
{'personal_access_token': 'foo', 'ssh_key_names': 'bar'},
|
||||
Requirements.serialize()
|
||||
)
|
||||
except jsonschema.exceptions.ValidationError as exc:
|
||||
@ -495,7 +495,7 @@ class ConfigTestCase(TestCase):
|
||||
|
||||
try:
|
||||
jsonschema.validate(
|
||||
{"personal_access_token": "foo", "ssh_key_file": "test"},
|
||||
{'personal_access_token': 'foo', 'ssh_key_file': 'test'},
|
||||
Requirements.serialize()
|
||||
)
|
||||
except jsonschema.exceptions.ValidationError as exc:
|
||||
@ -503,7 +503,7 @@ class ConfigTestCase(TestCase):
|
||||
|
||||
with self.assertRaises(jsonschema.exceptions.ValidationError) as excinfo:
|
||||
jsonschema.validate(
|
||||
{"personal_access_token": "foo"},
|
||||
{'personal_access_token': 'foo'},
|
||||
Requirements.serialize()
|
||||
)
|
||||
self.assertIn('is not valid under any of the given schemas', excinfo.exception.message)
|
||||
@ -1587,6 +1587,53 @@ class ConfigTestCase(TestCase):
|
||||
}
|
||||
)
|
||||
|
||||
class TestConf(schema.Schema):
|
||||
item = schema.DictItem(
|
||||
title='Poligon',
|
||||
description='Describe the Poligon',
|
||||
properties={
|
||||
'sides': schema.IntegerItem(required=True)
|
||||
},
|
||||
additional_properties=schema.OneOfItem(items=[schema.BooleanItem(),
|
||||
schema.StringItem()])
|
||||
)
|
||||
|
||||
self.assertDictContainsSubset(
|
||||
TestConf.serialize(), {
|
||||
'$schema': 'http://json-schema.org/draft-04/schema#',
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'item': {
|
||||
'title': 'Poligon',
|
||||
'description': 'Describe the Poligon',
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'sides': {
|
||||
'type': 'integer'
|
||||
}
|
||||
},
|
||||
'additionalProperties': {
|
||||
'oneOf': [
|
||||
{
|
||||
'type': 'boolean'
|
||||
},
|
||||
{
|
||||
'type': 'string'
|
||||
}
|
||||
]
|
||||
},
|
||||
'required': [
|
||||
'sides'
|
||||
],
|
||||
}
|
||||
},
|
||||
'x-ordering': [
|
||||
'item'
|
||||
],
|
||||
'additionalProperties': False
|
||||
}
|
||||
)
|
||||
|
||||
@skipIf(HAS_JSONSCHEMA is False, 'The \'jsonschema\' library is missing')
|
||||
def test_dict_config_validation(self):
|
||||
class TestConf(schema.Schema):
|
||||
@ -1712,6 +1759,41 @@ class ConfigTestCase(TestCase):
|
||||
'opaque': True}}, TestConf.serialize())
|
||||
self.assertIn('has too many properties', excinfo.exception.message)
|
||||
|
||||
class TestConf(schema.Schema):
|
||||
item = schema.DictItem(
|
||||
title='Poligon',
|
||||
description='Describe the Poligon',
|
||||
properties={
|
||||
'sides': schema.IntegerItem(required=True)
|
||||
},
|
||||
additional_properties=schema.OneOfItem(items=[schema.BooleanItem(),
|
||||
schema.StringItem()])
|
||||
)
|
||||
|
||||
with self.assertRaises(jsonschema.exceptions.ValidationError) as excinfo:
|
||||
jsonschema.validate({'item': {'color': 'blue',
|
||||
'rugged_surface': False,
|
||||
'opaque': True}}, TestConf.serialize())
|
||||
self.assertIn('\'sides\' is a required property', excinfo.exception.message)
|
||||
|
||||
class Props(schema.Schema):
|
||||
sides = schema.IntegerItem(required=True)
|
||||
|
||||
class TestConf(schema.Schema):
|
||||
item = schema.DictItem(
|
||||
title='Poligon',
|
||||
description='Describe the Poligon',
|
||||
properties=Props(),
|
||||
additional_properties=schema.OneOfItem(items=[schema.BooleanItem(),
|
||||
schema.StringItem()])
|
||||
)
|
||||
|
||||
with self.assertRaises(jsonschema.exceptions.ValidationError) as excinfo:
|
||||
jsonschema.validate({'item': {'color': 'blue',
|
||||
'rugged_surface': False,
|
||||
'opaque': True}}, TestConf.serialize())
|
||||
self.assertIn('\'sides\' is a required property', excinfo.exception.message)
|
||||
|
||||
def test_oneof_config(self):
|
||||
item = schema.OneOfItem(
|
||||
items=(schema.StringItem(title='Yes', enum=['yes']),
|
||||
|
Loading…
Reference in New Issue
Block a user