mirror of
https://github.com/valitydev/salt.git
synced 2024-11-07 08:58:59 +00:00
Merge pull request #44638 from terminalmage/new-docker-hotness
Many improvements to docker network and container states
This commit is contained in:
commit
53eee476ac
@ -1292,8 +1292,42 @@ The password used for HTTP proxy access.
|
||||
|
||||
proxy_password: obolus
|
||||
|
||||
.. conf_minion:: docker.compare_container_networks
|
||||
|
||||
``docker.compare_container_networks``
|
||||
-------------------------------------
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Default: ``{'static': ['Aliases', 'Links', 'IPAMConfig'], 'automatic': ['IPAddress', 'Gateway', 'GlobalIPv6Address', 'IPv6Gateway']}``
|
||||
|
||||
Specifies which keys are examined by
|
||||
:py:func:`docker.compare_container_networks
|
||||
<salt.modules.dockermod.compare_container_networks>`.
|
||||
|
||||
.. note::
|
||||
This should not need to be modified unless new features added to Docker
|
||||
result in new keys added to the network configuration which must be
|
||||
compared to determine if two containers have different network configs.
|
||||
This config option exists solely as a way to allow users to continue using
|
||||
Salt to manage their containers after an API change, without waiting for a
|
||||
new Salt release to catch up to the changes in the Docker API.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
docker.compare_container_networks:
|
||||
static:
|
||||
- Aliases
|
||||
- Links
|
||||
- IPAMConfig
|
||||
automatic:
|
||||
- IPAddress
|
||||
- Gateway
|
||||
- GlobalIPv6Address
|
||||
- IPv6Gateway
|
||||
|
||||
Minion Execution Module Management
|
||||
========================
|
||||
==================================
|
||||
|
||||
.. conf_minion:: disable_modules
|
||||
|
||||
@ -1303,7 +1337,7 @@ Minion Execution Module Management
|
||||
Default: ``[]`` (all execution modules are enabled by default)
|
||||
|
||||
The event may occur in which the administrator desires that a minion should not
|
||||
be able to execute a certain module.
|
||||
be able to execute a certain module.
|
||||
|
||||
However, the ``sys`` module is built into the minion and cannot be disabled.
|
||||
|
||||
|
@ -4,6 +4,109 @@
|
||||
Salt Release Notes - Codename Oxygen
|
||||
====================================
|
||||
|
||||
Lots of Docker Improvements
|
||||
---------------------------
|
||||
|
||||
Much Improved Support for Docker Networking
|
||||
===========================================
|
||||
|
||||
The :py:func:`docker_network.present <salt.states.docker_network.present>`
|
||||
state has undergone a full rewrite, which includes the following improvements:
|
||||
|
||||
Full API Support for Network Management
|
||||
---------------------------------------
|
||||
|
||||
The improvements made to input handling in the
|
||||
:py:func:`docker_container.running <salt.states.docker_container.running>`
|
||||
state for 2017.7.0 have now been expanded to :py:func:`docker_network.present
|
||||
<salt.states.docker_network.present>`. This brings with it full support for all
|
||||
tunable configuration arguments.
|
||||
|
||||
Custom Subnets
|
||||
--------------
|
||||
|
||||
Custom subnets can now be configured. Both IPv4 and mixed IPv4/IPv6 networks
|
||||
are supported. See :ref:`here <salt-states-docker-network-present-ipam>` for
|
||||
more information.
|
||||
|
||||
Network Configuration in :py:func:`docker_container.running` States
|
||||
-------------------------------------------------------------------
|
||||
|
||||
A long-requested feature has finally been added! It is now possible to
|
||||
configure static IPv4/IPv6 addresses, as well as links and labels. See
|
||||
:ref:`here <salt-states-docker-container-network-management>` for more
|
||||
information.
|
||||
|
||||
.. note::
|
||||
While the ``containers`` argument to :py:func:`docker_network.present`
|
||||
will continue to be supported, it will no longer be the recommended way of
|
||||
ensuring that a container is attached to a network.
|
||||
|
||||
Improved Handling of Images from Custom Registries
|
||||
==================================================
|
||||
|
||||
Rather than attempting to parse the tag from the passed image name, Salt will
|
||||
now resolve that tag down to an image ID and use that ID instead.
|
||||
|
||||
.. important::
|
||||
Due to this change, there are some backward-incompatible changes to image
|
||||
management. See below for a full list of these changes.
|
||||
|
||||
Backward-incompatible Changes to Docker Image Management
|
||||
********************************************************
|
||||
|
||||
Passing image names to the following functions must now be done using separate
|
||||
``repository`` and ``tag`` arguments:
|
||||
|
||||
- :py:func:`docker.build <salt.modules.dockermod.build>`
|
||||
- :py:func:`docker.commit <salt.modules.dockermod.commit>`
|
||||
- :py:func:`docker.import <salt.modules.dockermod.import_>`
|
||||
- :py:func:`docker.load <salt.modules.dockermod.load>`
|
||||
- :py:func:`docker.tag <salt.modules.dockermod.tag_>`
|
||||
- :py:func:`docker.sls_build <salt.modules.dockermod.sls_build>`
|
||||
|
||||
Additionally, the ``tag`` argument must now be explicitly passed to the
|
||||
:py:func:`docker_image.present <salt.states.docker_image.present>` state,
|
||||
unless the image is being pulled from a docker registry.
|
||||
|
||||
State and Execution Module Support for ``docker run`` Functionality
|
||||
===================================================================
|
||||
|
||||
The :py:func:`docker_container.running <salt.states.docker_container.running>`
|
||||
state is good for containers which run services, but it is not as useful for
|
||||
cases in which the container only needs to run once. The ``start`` argument to
|
||||
:py:func:`docker_container.running <salt.states.docker_container.running>` can
|
||||
be set to ``False`` to prevent the container from being started again on a
|
||||
subsequent run, but for many use cases this is not sufficient. Therefore, the
|
||||
:py:func:`docker.run_container <salt.modules.dockermod.run_container>`
|
||||
remote-execution function was added. When used on the Salt CLI, it will return
|
||||
information about the container, such as its name, ID, exit code, and any
|
||||
output it produces.
|
||||
|
||||
State support has also been added via the :py:func:`docker_container.run
|
||||
<salt.states.docker_container.run>` state. This state is modeled after the
|
||||
:py:func:`cmd.run <salt.states.cmd.run>` state, and includes arguments like
|
||||
``onlyif``, ``unless``, and ``creates`` to control whether or not the container
|
||||
is run.
|
||||
|
||||
Full API Support for :py:func:`docker.logs <salt.modules.dockermod.logs>`
|
||||
=========================================================================
|
||||
|
||||
This function now supports all of the functions that its Docker API counterpart
|
||||
does, allowing you to do things like include timestamps, and also suppress
|
||||
stdout/stderr, etc. in the return.
|
||||
|
||||
`start` Argument Added to :py:func:`docker.create <salt.modules.dockermod.create>` Function
|
||||
===========================================================================================
|
||||
|
||||
This removes the need to run :py:func:`docker.start
|
||||
<salt.modules.dockermod.start_>` separately when creating containers on the
|
||||
Salt CLI.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion docker.create image=foo/bar:baz command=/path/to/command start=True
|
||||
|
||||
Comparison Operators in Package Installation
|
||||
--------------------------------------------
|
||||
|
||||
|
@ -1153,6 +1153,9 @@ VALID_OPTS = {
|
||||
# part of the extra_minion_data param
|
||||
# Subconfig entries can be specified by using the ':' notation (e.g. key:subkey)
|
||||
'pass_to_ext_pillars': (six.string_types, list),
|
||||
|
||||
# Used by salt.modules.dockermod.compare_container_networks to specify which keys are compared
|
||||
'docker.compare_container_networks': dict,
|
||||
}
|
||||
|
||||
# default configurations
|
||||
@ -1432,6 +1435,11 @@ DEFAULT_MINION_OPTS = {
|
||||
'extmod_whitelist': {},
|
||||
'extmod_blacklist': {},
|
||||
'minion_sign_messages': False,
|
||||
'docker.compare_container_networks': {
|
||||
'static': ['Aliases', 'Links', 'IPAMConfig'],
|
||||
'automatic': ['IPAddress', 'Gateway',
|
||||
'GlobalIPv6Address', 'IPv6Gateway'],
|
||||
},
|
||||
}
|
||||
|
||||
DEFAULT_MASTER_OPTS = {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -657,11 +657,11 @@ def run(name,
|
||||
|
||||
onlyif
|
||||
A command to run as a check, run the named command only if the command
|
||||
passed to the ``onlyif`` option returns true
|
||||
passed to the ``onlyif`` option returns a zero exit status
|
||||
|
||||
unless
|
||||
A command to run as a check, only run the named command if the command
|
||||
passed to the ``unless`` option returns false
|
||||
passed to the ``unless`` option returns a non-zero exit status
|
||||
|
||||
cwd
|
||||
The current working directory to execute the command in, defaults to
|
||||
@ -752,13 +752,13 @@ def run(name,
|
||||
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
use_vt
|
||||
use_vt : False
|
||||
Use VT utils (saltstack) to stream the command output more
|
||||
interactively to the console and the logs.
|
||||
This is experimental.
|
||||
|
||||
bg
|
||||
If ``True``, run command in background and do not await or deliver it's
|
||||
bg : False
|
||||
If ``True``, run command in background and do not await or deliver its
|
||||
results.
|
||||
|
||||
.. versionadded:: 2016.3.6
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -42,6 +42,8 @@ import logging
|
||||
import salt.utils.docker
|
||||
import salt.utils.args
|
||||
from salt.ext.six.moves import zip
|
||||
from salt.ext import six
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
||||
# Enable proper logging
|
||||
log = logging.getLogger(__name__) # pylint: disable=invalid-name
|
||||
@ -61,6 +63,7 @@ def __virtual__():
|
||||
|
||||
|
||||
def present(name,
|
||||
tag=None,
|
||||
build=None,
|
||||
load=None,
|
||||
force=False,
|
||||
@ -72,45 +75,56 @@ def present(name,
|
||||
saltenv='base',
|
||||
**kwargs):
|
||||
'''
|
||||
Ensure that an image is present. The image can either be pulled from a
|
||||
Docker registry, built from a Dockerfile, or loaded from a saved image.
|
||||
Image names can be specified either using ``repo:tag`` notation, or just
|
||||
the repo name (in which case a tag of ``latest`` is assumed).
|
||||
Repo identifier is mandatory, we don't assume the default repository
|
||||
is docker hub.
|
||||
.. versionchanged:: Oxygen
|
||||
The ``tag`` argument has been added. It is now required unless pulling
|
||||
from a registry.
|
||||
|
||||
If neither of the ``build`` or ``load`` arguments are used, then Salt will
|
||||
pull from the :ref:`configured registries <docker-authentication>`. If the
|
||||
specified image already exists, it will not be pulled unless ``force`` is
|
||||
set to ``True``. Here is an example of a state that will pull an image from
|
||||
the Docker Hub:
|
||||
Ensure that an image is present. The image can either be pulled from a
|
||||
Docker registry, built from a Dockerfile, loaded from a saved image, or
|
||||
built by running SLS files against a base image.
|
||||
|
||||
If none of the ``build``, ``load``, or ``sls`` arguments are used, then Salt
|
||||
will pull from the :ref:`configured registries <docker-authentication>`. If
|
||||
the specified image already exists, it will not be pulled unless ``force``
|
||||
is set to ``True``. Here is an example of a state that will pull an image
|
||||
from the Docker Hub:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
myuser/myimage:mytag:
|
||||
docker_image.present
|
||||
myuser/myimage:
|
||||
docker_image.present:
|
||||
- tag: mytag
|
||||
|
||||
tag
|
||||
Tag name for the image. Required when using ``build``, ``load``, or
|
||||
``sls`` to create the image, but optional if pulling from a repository.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
build
|
||||
Path to directory on the Minion containing a Dockerfile
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
myuser/myimage:mytag:
|
||||
myuser/myimage:
|
||||
docker_image.present:
|
||||
- build: /home/myuser/docker/myimage
|
||||
- tag: mytag
|
||||
|
||||
|
||||
myuser/myimage:mytag:
|
||||
myuser/myimage:
|
||||
docker_image.present:
|
||||
- build: /home/myuser/docker/myimage
|
||||
- tag: mytag
|
||||
- dockerfile: Dockerfile.alternative
|
||||
|
||||
.. versionadded:: 2016.11.0
|
||||
|
||||
The image will be built using :py:func:`docker.build
|
||||
<salt.modules.dockermod.build>` and the specified image name and tag
|
||||
will be applied to it.
|
||||
|
||||
.. versionadded:: 2016.11.0
|
||||
.. versionchanged: Oxygen
|
||||
The ``tag`` must be manually specified using the ``tag`` argument.
|
||||
|
||||
load
|
||||
Loads a tar archive created with :py:func:`docker.load
|
||||
<salt.modules.dockermod.load>` (or the ``docker load`` Docker CLI
|
||||
@ -118,9 +132,13 @@ def present(name,
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
myuser/myimage:mytag:
|
||||
myuser/myimage:
|
||||
docker_image.present:
|
||||
- load: salt://path/to/image.tar
|
||||
- tag: mytag
|
||||
|
||||
.. versionchanged: Oxygen
|
||||
The ``tag`` must be manually specified using the ``tag`` argument.
|
||||
|
||||
force : False
|
||||
Set this parameter to ``True`` to force Salt to pull/build/load the
|
||||
@ -143,8 +161,9 @@ def present(name,
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
myuser/myimage:mytag:
|
||||
myuser/myimage:
|
||||
docker_image.present:
|
||||
- tag: latest
|
||||
- sls:
|
||||
- webapp1
|
||||
- webapp2
|
||||
@ -152,6 +171,8 @@ def present(name,
|
||||
- saltenv: base
|
||||
|
||||
.. versionadded: 2017.7.0
|
||||
.. versionchanged: Oxygen
|
||||
The ``tag`` must be manually specified using the ``tag`` argument.
|
||||
|
||||
base
|
||||
Base image with which to start :py:func:`docker.sls_build
|
||||
@ -170,29 +191,48 @@ def present(name,
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
|
||||
if build is not None and load is not None:
|
||||
ret['comment'] = 'Only one of \'build\' or \'load\' is permitted.'
|
||||
if not isinstance(name, six.string_types):
|
||||
name = str(name)
|
||||
|
||||
# At most one of the args that result in an image being built can be used
|
||||
num_build_args = len([x for x in (build, load, sls) if x is not None])
|
||||
if num_build_args > 1:
|
||||
ret['comment'] = \
|
||||
'Only one of \'build\', \'load\', or \'sls\' is permitted.'
|
||||
return ret
|
||||
|
||||
image = ':'.join(salt.utils.docker.get_repo_tag(name))
|
||||
resolved_tag = __salt__['docker.resolve_tag'](image)
|
||||
|
||||
if resolved_tag is False:
|
||||
# Specified image is not present
|
||||
image_info = None
|
||||
elif num_build_args == 1:
|
||||
# If building, we need the tag to be specified
|
||||
if not tag:
|
||||
ret['comment'] = (
|
||||
'The \'tag\' argument is required if any one of \'build\', '
|
||||
'\'load\', or \'sls\' is used.'
|
||||
)
|
||||
return ret
|
||||
if not isinstance(tag, six.string_types):
|
||||
tag = str(tag)
|
||||
full_image = ':'.join((name, tag))
|
||||
else:
|
||||
if tag:
|
||||
name = '{0}:{1}'.format(name, tag)
|
||||
full_image = name
|
||||
|
||||
try:
|
||||
image_info = __salt__['docker.inspect_image'](full_image)
|
||||
except CommandExecutionError as exc:
|
||||
msg = exc.__str__()
|
||||
if '404' in msg:
|
||||
# Image not present
|
||||
image_info = None
|
||||
else:
|
||||
ret['comment'] = msg
|
||||
return ret
|
||||
|
||||
if image_info is not None:
|
||||
# Specified image is present
|
||||
if not force:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Image \'{0}\' already present'.format(name)
|
||||
ret['comment'] = 'Image {0} already present'.format(full_image)
|
||||
return ret
|
||||
else:
|
||||
try:
|
||||
image_info = __salt__['docker.inspect_image'](name)
|
||||
except Exception as exc:
|
||||
ret['comment'] = \
|
||||
'Unable to get info for image \'{0}\': {1}'.format(name, exc)
|
||||
return ret
|
||||
|
||||
if build or sls:
|
||||
action = 'built'
|
||||
@ -203,12 +243,12 @@ def present(name,
|
||||
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
if (resolved_tag is not False and force) or resolved_tag is False:
|
||||
ret['comment'] = 'Image \'{0}\' will be {1}'.format(name, action)
|
||||
if (image_info is not None and force) or image_info is None:
|
||||
ret['comment'] = 'Image {0} will be {1}'.format(full_image, action)
|
||||
return ret
|
||||
|
||||
if build:
|
||||
# get the functions default value and args
|
||||
# Get the functions default value and args
|
||||
argspec = salt.utils.args.get_function_argspec(__salt__['docker.build'])
|
||||
# Map any if existing args from kwargs into the build_args dictionary
|
||||
build_args = dict(list(zip(argspec.args, argspec.defaults)))
|
||||
@ -218,30 +258,30 @@ def present(name,
|
||||
try:
|
||||
# map values passed from the state to the build args
|
||||
build_args['path'] = build
|
||||
build_args['image'] = image
|
||||
build_args['image'] = full_image
|
||||
build_args['dockerfile'] = dockerfile
|
||||
image_update = __salt__['docker.build'](**build_args)
|
||||
except Exception as exc:
|
||||
ret['comment'] = (
|
||||
'Encountered error building {0} as {1}: {2}'
|
||||
.format(build, image, exc)
|
||||
'Encountered error building {0} as {1}: {2}'.format(
|
||||
build, full_image, exc
|
||||
)
|
||||
)
|
||||
return ret
|
||||
if image_info is None or image_update['Id'] != image_info['Id'][:12]:
|
||||
ret['changes'] = image_update
|
||||
|
||||
elif sls:
|
||||
if isinstance(sls, list):
|
||||
sls = ','.join(sls)
|
||||
try:
|
||||
image_update = __salt__['docker.sls_build'](name=image,
|
||||
image_update = __salt__['docker.sls_build'](repository=name,
|
||||
tag=tag,
|
||||
base=base,
|
||||
mods=sls,
|
||||
saltenv=saltenv)
|
||||
except Exception as exc:
|
||||
ret['comment'] = (
|
||||
'Encountered error using sls {0} for building {1}: {2}'
|
||||
.format(sls, image, exc)
|
||||
'Encountered error using SLS {0} for building {1}: {2}'
|
||||
.format(sls, full_image, exc)
|
||||
)
|
||||
return ret
|
||||
if image_info is None or image_update['Id'] != image_info['Id'][:12]:
|
||||
@ -249,11 +289,13 @@ def present(name,
|
||||
|
||||
elif load:
|
||||
try:
|
||||
image_update = __salt__['docker.load'](path=load, image=image)
|
||||
image_update = __salt__['docker.load'](path=load,
|
||||
repository=name,
|
||||
tag=tag)
|
||||
except Exception as exc:
|
||||
ret['comment'] = (
|
||||
'Encountered error loading {0} as {1}: {2}'
|
||||
.format(load, image, exc)
|
||||
.format(load, full_image, exc)
|
||||
)
|
||||
return ret
|
||||
if image_info is None or image_update.get('Layers', []):
|
||||
@ -262,13 +304,13 @@ def present(name,
|
||||
else:
|
||||
try:
|
||||
image_update = __salt__['docker.pull'](
|
||||
image,
|
||||
name,
|
||||
insecure_registry=insecure_registry,
|
||||
client_timeout=client_timeout
|
||||
)
|
||||
except Exception as exc:
|
||||
ret['comment'] = \
|
||||
'Encountered error pulling {0}: {1}'.format(image, exc)
|
||||
'Encountered error pulling {0}: {1}'.format(full_image, exc)
|
||||
return ret
|
||||
if (image_info is not None and image_info['Id'][:12] == image_update
|
||||
.get('Layers', {})
|
||||
@ -280,18 +322,28 @@ def present(name,
|
||||
# Only add to the changes dict if layers were pulled
|
||||
ret['changes'] = image_update
|
||||
|
||||
ret['result'] = bool(__salt__['docker.resolve_tag'](image))
|
||||
try:
|
||||
__salt__['docker.inspect_image'](full_image)
|
||||
error = False
|
||||
except CommandExecutionError:
|
||||
msg = exc.__str__()
|
||||
if '404' not in msg:
|
||||
error = 'Failed to inspect image \'{0}\' after it was {1}: {2}'.format(
|
||||
full_image, action, msg
|
||||
)
|
||||
|
||||
if not ret['result']:
|
||||
# This shouldn't happen, failure to pull should be caught above
|
||||
ret['comment'] = 'Image \'{0}\' could not be {1}'.format(name, action)
|
||||
elif not ret['changes']:
|
||||
ret['comment'] = (
|
||||
'Image \'{0}\' was {1}, but there were no changes'
|
||||
.format(name, action)
|
||||
)
|
||||
if error:
|
||||
ret['comment'] = error
|
||||
else:
|
||||
ret['comment'] = 'Image \'{0}\' was {1}'.format(name, action)
|
||||
ret['result'] = True
|
||||
if not ret['changes']:
|
||||
ret['comment'] = (
|
||||
'Image \'{0}\' was {1}, but there were no changes'.format(
|
||||
name, action
|
||||
)
|
||||
)
|
||||
else:
|
||||
ret['comment'] = 'Image \'{0}\' was {1}'.format(full_image, action)
|
||||
return ret
|
||||
|
||||
|
||||
@ -362,19 +414,16 @@ def absent(name=None, images=None, force=False):
|
||||
elif name:
|
||||
targets = [name]
|
||||
|
||||
pre_tags = __salt__['docker.list_tags']()
|
||||
to_delete = []
|
||||
for target in targets:
|
||||
resolved_tag = __salt__['docker.resolve_tag'](target, tags=pre_tags)
|
||||
resolved_tag = __salt__['docker.resolve_tag'](target)
|
||||
if resolved_tag is not False:
|
||||
to_delete.append(resolved_tag)
|
||||
log.debug('targets = {0}'.format(targets))
|
||||
log.debug('to_delete = {0}'.format(to_delete))
|
||||
|
||||
if not to_delete:
|
||||
ret['result'] = True
|
||||
if len(targets) == 1:
|
||||
ret['comment'] = 'Image \'{0}\' is not present'.format(name)
|
||||
ret['comment'] = 'Image {0} is not present'.format(name)
|
||||
else:
|
||||
ret['comment'] = 'All specified images are not present'
|
||||
return ret
|
||||
@ -382,11 +431,13 @@ def absent(name=None, images=None, force=False):
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
if len(to_delete) == 1:
|
||||
ret['comment'] = ('Image \'{0}\' will be removed'
|
||||
.format(to_delete[0]))
|
||||
ret['comment'] = 'Image {0} will be removed'.format(to_delete[0])
|
||||
else:
|
||||
ret['comment'] = ('The following images will be removed: {0}'
|
||||
.format(', '.join(to_delete)))
|
||||
ret['comment'] = (
|
||||
'The following images will be removed: {0}'.format(
|
||||
', '.join(to_delete)
|
||||
)
|
||||
)
|
||||
return ret
|
||||
|
||||
result = __salt__['docker.rmi'](*to_delete, force=force)
|
||||
@ -397,8 +448,9 @@ def absent(name=None, images=None, force=False):
|
||||
if [x for x in to_delete if x not in post_tags]:
|
||||
ret['changes'] = result
|
||||
ret['comment'] = (
|
||||
'The following image(s) failed to be removed: {0}'
|
||||
.format(', '.join(failed))
|
||||
'The following image(s) failed to be removed: {0}'.format(
|
||||
', '.join(failed)
|
||||
)
|
||||
)
|
||||
else:
|
||||
ret['comment'] = 'None of the specified images were removed'
|
||||
@ -410,11 +462,12 @@ def absent(name=None, images=None, force=False):
|
||||
else:
|
||||
ret['changes'] = result
|
||||
if len(to_delete) == 1:
|
||||
ret['comment'] = 'Image \'{0}\' was removed'.format(to_delete[0])
|
||||
ret['comment'] = 'Image {0} was removed'.format(to_delete[0])
|
||||
else:
|
||||
ret['comment'] = (
|
||||
'The following images were removed: {0}'
|
||||
.format(', '.join(to_delete))
|
||||
'The following images were removed: {0}'.format(
|
||||
', '.join(to_delete)
|
||||
)
|
||||
)
|
||||
ret['result'] = True
|
||||
|
||||
@ -430,5 +483,5 @@ def mod_watch(name, sfun=None, **kwargs):
|
||||
return {'name': name,
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': ('watch requisite is not'
|
||||
' implemented for {0}'.format(sfun))}
|
||||
'comment': 'watch requisite is not implemented for '
|
||||
'{0}'.format(sfun)}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -15,6 +15,7 @@ import os
|
||||
import salt.utils.args
|
||||
import salt.utils.data
|
||||
import salt.utils.docker.translate
|
||||
from salt.utils.docker.translate.helpers import split as _split
|
||||
from salt.exceptions import CommandExecutionError, SaltInvocationError
|
||||
from salt.utils.args import get_function_argspec as _argspec
|
||||
|
||||
@ -40,29 +41,6 @@ except ImportError:
|
||||
pass
|
||||
|
||||
NOTSET = object()
|
||||
ALIASES = {
|
||||
'cmd': 'command',
|
||||
'cpuset': 'cpuset_cpus',
|
||||
'dns_option': 'dns_opt',
|
||||
'env': 'environment',
|
||||
'expose': 'ports',
|
||||
'interactive': 'stdin_open',
|
||||
'ipc': 'ipc_mode',
|
||||
'label': 'labels',
|
||||
'memory': 'mem_limit',
|
||||
'memory_swap': 'memswap_limit',
|
||||
'publish': 'port_bindings',
|
||||
'publish_all': 'publish_all_ports',
|
||||
'restart': 'restart_policy',
|
||||
'rm': 'auto_remove',
|
||||
'sysctl': 'sysctls',
|
||||
'security_opts': 'security_opt',
|
||||
'ulimit': 'ulimits',
|
||||
'user_ns_mode': 'userns_mode',
|
||||
'volume': 'volumes',
|
||||
'workdir': 'working_dir',
|
||||
}
|
||||
ALIASES_REVMAP = dict([(y, x) for x, y in six.iteritems(ALIASES)])
|
||||
|
||||
# Default timeout as of docker-py 1.0.0
|
||||
CLIENT_TIMEOUT = 60
|
||||
@ -72,117 +50,146 @@ SHUTDOWN_TIMEOUT = 10
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _split(item, sep=',', maxsplit=-1):
|
||||
return [x.strip() for x in item.split(sep, maxsplit)]
|
||||
|
||||
|
||||
def get_client_args():
|
||||
def get_client_args(limit=None):
|
||||
if not HAS_DOCKER_PY:
|
||||
raise CommandExecutionError('docker Python module not imported')
|
||||
try:
|
||||
create_args = _argspec(docker.APIClient.create_container).args
|
||||
except AttributeError:
|
||||
try:
|
||||
create_args = _argspec(docker.Client.create_container).args
|
||||
except AttributeError:
|
||||
raise CommandExecutionError(
|
||||
'Coult not get create_container argspec'
|
||||
)
|
||||
|
||||
try:
|
||||
host_config_args = \
|
||||
_argspec(docker.types.HostConfig.__init__).args
|
||||
except AttributeError:
|
||||
try:
|
||||
host_config_args = _argspec(docker.utils.create_host_config).args
|
||||
except AttributeError:
|
||||
raise CommandExecutionError(
|
||||
'Could not get create_host_config argspec'
|
||||
)
|
||||
limit = salt.utils.args.split_input(limit or [])
|
||||
ret = {}
|
||||
|
||||
try:
|
||||
endpoint_config_args = \
|
||||
_argspec(docker.types.EndpointConfig.__init__).args
|
||||
except AttributeError:
|
||||
if not limit or any(x in limit for x in
|
||||
('create_container', 'host_config', 'connect_container_to_network')):
|
||||
try:
|
||||
endpoint_config_args = \
|
||||
_argspec(docker.utils.utils.create_endpoint_config).args
|
||||
ret['create_container'] = \
|
||||
_argspec(docker.APIClient.create_container).args
|
||||
except AttributeError:
|
||||
try:
|
||||
endpoint_config_args = \
|
||||
_argspec(docker.utils.create_endpoint_config).args
|
||||
ret['create_container'] = \
|
||||
_argspec(docker.Client.create_container).args
|
||||
except AttributeError:
|
||||
raise CommandExecutionError(
|
||||
'Could not get create_endpoint_config argspec'
|
||||
'Coult not get create_container argspec'
|
||||
)
|
||||
|
||||
for arglist in (create_args, host_config_args, endpoint_config_args):
|
||||
try:
|
||||
# The API version is passed automagically by the API code that
|
||||
# imports these classes/functions and is not an arg that we will be
|
||||
# passing, so remove it if present.
|
||||
arglist.remove('version')
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# Remove any args in host or networking config from the main config dict.
|
||||
# This keeps us from accidentally allowing args that docker-py has moved
|
||||
# from the container config to the host config.
|
||||
for arglist in (host_config_args, endpoint_config_args):
|
||||
for item in arglist:
|
||||
ret['host_config'] = \
|
||||
_argspec(docker.types.HostConfig.__init__).args
|
||||
except AttributeError:
|
||||
try:
|
||||
create_args.remove(item)
|
||||
ret['host_config'] = \
|
||||
_argspec(docker.utils.create_host_config).args
|
||||
except AttributeError:
|
||||
raise CommandExecutionError(
|
||||
'Could not get create_host_config argspec'
|
||||
)
|
||||
|
||||
try:
|
||||
ret['connect_container_to_network'] = \
|
||||
_argspec(docker.types.EndpointConfig.__init__).args
|
||||
except AttributeError:
|
||||
try:
|
||||
ret['connect_container_to_network'] = \
|
||||
_argspec(docker.utils.utils.create_endpoint_config).args
|
||||
except AttributeError:
|
||||
try:
|
||||
ret['connect_container_to_network'] = \
|
||||
_argspec(docker.utils.create_endpoint_config).args
|
||||
except AttributeError:
|
||||
raise CommandExecutionError(
|
||||
'Could not get connect_container_to_network argspec'
|
||||
)
|
||||
|
||||
for key, wrapped_func in (
|
||||
('logs', docker.api.container.ContainerApiMixin.logs),
|
||||
('create_network', docker.api.network.NetworkApiMixin.create_network)):
|
||||
if not limit or key in limit:
|
||||
try:
|
||||
func_ref = wrapped_func
|
||||
if six.PY2:
|
||||
try:
|
||||
# create_network is decorated, so we have to dig into the
|
||||
# closure created by functools.wraps
|
||||
ret[key] = \
|
||||
_argspec(func_ref.__func__.__closure__[0].cell_contents).args
|
||||
except (AttributeError, IndexError):
|
||||
# functools.wraps changed (unlikely), bail out
|
||||
ret[key] = []
|
||||
else:
|
||||
try:
|
||||
# functools.wraps makes things a little easier in Python 3
|
||||
ret[key] = _argspec(func_ref.__wrapped__).args
|
||||
except AttributeError:
|
||||
# functools.wraps changed (unlikely), bail out
|
||||
ret[key] = []
|
||||
except AttributeError:
|
||||
# Function moved, bail out
|
||||
ret[key] = []
|
||||
|
||||
if not limit or 'ipam_config' in limit:
|
||||
try:
|
||||
ret['ipam_config'] = _argspec(docker.types.IPAMPool.__init__).args
|
||||
except AttributeError:
|
||||
try:
|
||||
ret['ipam_config'] = _argspec(docker.utils.create_ipam_pool).args
|
||||
except AttributeError:
|
||||
raise CommandExecutionError('Could not get ipam args')
|
||||
|
||||
for item in ret:
|
||||
# The API version is passed automagically by the API code that imports
|
||||
# these classes/functions and is not an arg that we will be passing, so
|
||||
# remove it if present. Similarly, don't include "self" if it shows up
|
||||
# in the arglist.
|
||||
for argname in ('version', 'self'):
|
||||
try:
|
||||
ret[item].remove(argname)
|
||||
except ValueError:
|
||||
# Arg is not in create_args
|
||||
pass
|
||||
|
||||
return {'create_container': create_args,
|
||||
'host_config': host_config_args,
|
||||
'networking_config': endpoint_config_args}
|
||||
# Remove any args in host or endpoint config from the create_container
|
||||
# arglist. This keeps us from accidentally allowing args that docker-py has
|
||||
# moved from the create_container function to the either the host or
|
||||
# endpoint config.
|
||||
for item in ('host_config', 'connect_container_to_network'):
|
||||
for val in ret.get(item, []):
|
||||
try:
|
||||
ret['create_container'].remove(val)
|
||||
except ValueError:
|
||||
# Arg is not in create_container arglist
|
||||
pass
|
||||
|
||||
for item in ('create_container', 'host_config', 'connect_container_to_network'):
|
||||
if limit and item not in limit:
|
||||
ret.pop(item, None)
|
||||
|
||||
def get_repo_tag(image, default_tag='latest'):
|
||||
'''
|
||||
Resolves the docker repo:tag notation and returns repo name and tag
|
||||
'''
|
||||
if not isinstance(image, six.string_types):
|
||||
image = str(image)
|
||||
try:
|
||||
r_name, r_tag = image.rsplit(':', 1)
|
||||
except ValueError:
|
||||
r_name = image
|
||||
r_tag = default_tag
|
||||
if not r_tag:
|
||||
# Would happen if some wiseguy requests a tag ending in a colon
|
||||
# (e.g. 'somerepo:')
|
||||
log.warning(
|
||||
'Assuming tag \'%s\' for repo \'%s\'', default_tag, image
|
||||
)
|
||||
r_tag = default_tag
|
||||
elif '/' in r_tag:
|
||||
# Public registry notation with no tag specified
|
||||
# (e.g. foo.bar.com:5000/imagename)
|
||||
return image, default_tag
|
||||
return r_name, r_tag
|
||||
ret['logs'].remove('container')
|
||||
except (KeyError, ValueError, TypeError):
|
||||
pass
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def translate_input(**kwargs):
|
||||
def translate_input(translator,
|
||||
skip_translate=None,
|
||||
ignore_collisions=False,
|
||||
validate_ip_addrs=True,
|
||||
**kwargs):
|
||||
'''
|
||||
Translate CLI/SLS input into the format the API expects. A
|
||||
``skip_translate`` kwarg can be passed to control which arguments are
|
||||
translated. It can be either a comma-separated list or an iterable
|
||||
containing strings (e.g. a list or tuple), and members of that tuple will
|
||||
have their translation skipped. Optionally, skip_translate can be set to
|
||||
True to skip *all* translation.
|
||||
Translate CLI/SLS input into the format the API expects. The ``translator``
|
||||
argument must be a module containing translation functions, within
|
||||
salt.utils.docker.translate. A ``skip_translate`` kwarg can be passed to
|
||||
control which arguments are translated. It can be either a comma-separated
|
||||
list or an iterable containing strings (e.g. a list or tuple), and members
|
||||
of that tuple will have their translation skipped. Optionally,
|
||||
skip_translate can be set to True to skip *all* translation.
|
||||
'''
|
||||
kwargs = salt.utils.args.clean_kwargs(**kwargs)
|
||||
invalid = {}
|
||||
collisions = []
|
||||
|
||||
skip_translate = kwargs.pop('skip_translate', None)
|
||||
if skip_translate is True:
|
||||
# Skip all translation
|
||||
return kwargs, invalid, collisions
|
||||
return kwargs
|
||||
else:
|
||||
if not skip_translate:
|
||||
skip_translate = ()
|
||||
@ -195,120 +202,148 @@ def translate_input(**kwargs):
|
||||
log.error('skip_translate is not an iterable, ignoring')
|
||||
skip_translate = ()
|
||||
|
||||
validate_ip_addrs = kwargs.pop('validate_ip_addrs', True)
|
||||
try:
|
||||
# Using list(kwargs) here because if there are any invalid arguments we
|
||||
# will be popping them from the kwargs.
|
||||
for key in list(kwargs):
|
||||
real_key = translator.ALIASES.get(key, key)
|
||||
if real_key in skip_translate:
|
||||
continue
|
||||
|
||||
# Using list(kwargs) here because if there are any invalid arguments we
|
||||
# will be popping them from the kwargs.
|
||||
for key in list(kwargs):
|
||||
real_key = ALIASES.get(key, key)
|
||||
if real_key in skip_translate:
|
||||
continue
|
||||
# ipam_pools is designed to be passed as a list of actual
|
||||
# dictionaries, but if each of the dictionaries passed has a single
|
||||
# element, it will be incorrectly repacked.
|
||||
if key != 'ipam_pools' and salt.utils.data.is_dictlist(kwargs[key]):
|
||||
kwargs[key] = salt.utils.data.repack_dictlist(kwargs[key])
|
||||
|
||||
if salt.utils.data.is_dictlist(kwargs[key]):
|
||||
kwargs[key] = salt.utils.data.repack_dictlist(kwargs[key])
|
||||
try:
|
||||
kwargs[key] = getattr(translator, real_key)(
|
||||
kwargs[key],
|
||||
validate_ip_addrs=validate_ip_addrs,
|
||||
skip_translate=skip_translate)
|
||||
except AttributeError:
|
||||
log.debug('No translation function for argument \'%s\'', key)
|
||||
continue
|
||||
except SaltInvocationError as exc:
|
||||
kwargs.pop(key)
|
||||
invalid[key] = exc.strerror
|
||||
|
||||
try:
|
||||
func = getattr(salt.utils.docker.translate, real_key)
|
||||
kwargs[key] = func(kwargs[key], validate_ip_addrs=validate_ip_addrs)
|
||||
translator._merge_keys(kwargs)
|
||||
except AttributeError:
|
||||
log.debug('No translation function for argument \'%s\'', key)
|
||||
continue
|
||||
except SaltInvocationError as exc:
|
||||
kwargs.pop(key)
|
||||
invalid[key] = exc.strerror
|
||||
pass
|
||||
|
||||
log_driver = kwargs.pop('log_driver', NOTSET)
|
||||
log_opt = kwargs.pop('log_opt', NOTSET)
|
||||
if 'log_config' not in kwargs:
|
||||
# The log_config is a mixture of the CLI options --log-driver and
|
||||
# --log-opt (which we support in Salt as log_driver and log_opt,
|
||||
# respectively), but it must be submitted to the host config in the
|
||||
# format {'Type': log_driver, 'Config': log_opt}. So, we need to
|
||||
# construct this argument to be passed to the API from those two
|
||||
# arguments.
|
||||
if log_driver is not NOTSET or log_opt is not NOTSET:
|
||||
kwargs['log_config'] = {
|
||||
'Type': log_driver if log_driver is not NOTSET else 'none',
|
||||
'Config': log_opt if log_opt is not NOTSET else {}
|
||||
}
|
||||
|
||||
# Convert CLI versions of commands to their API counterparts
|
||||
for key in ALIASES:
|
||||
if key in kwargs:
|
||||
new_key = ALIASES[key]
|
||||
value = kwargs.pop(key)
|
||||
if new_key in kwargs:
|
||||
collisions.append(new_key)
|
||||
else:
|
||||
kwargs[new_key] = value
|
||||
|
||||
# Don't allow conflicting options to be set
|
||||
if kwargs.get('port_bindings') is not None \
|
||||
and kwargs.get('publish_all_ports'):
|
||||
kwargs.pop('port_bindings')
|
||||
invalid['port_bindings'] = 'Cannot be used when publish_all_ports=True'
|
||||
if kwargs.get('hostname') is not None \
|
||||
and kwargs.get('network_mode') == 'host':
|
||||
kwargs.pop('hostname')
|
||||
invalid['hostname'] = 'Cannot be used when network_mode=True'
|
||||
|
||||
# Make sure volumes and ports are defined to match the binds and port_bindings
|
||||
if kwargs.get('binds') is not None \
|
||||
and (skip_translate is True or
|
||||
all(x not in skip_translate
|
||||
for x in ('binds', 'volume', 'volumes'))):
|
||||
# Make sure that all volumes defined in "binds" are included in the
|
||||
# "volumes" param.
|
||||
auto_volumes = []
|
||||
if isinstance(kwargs['binds'], dict):
|
||||
for val in six.itervalues(kwargs['binds']):
|
||||
try:
|
||||
if 'bind' in val:
|
||||
auto_volumes.append(val['bind'])
|
||||
except TypeError:
|
||||
continue
|
||||
else:
|
||||
if isinstance(kwargs['binds'], list):
|
||||
auto_volume_defs = kwargs['binds']
|
||||
else:
|
||||
try:
|
||||
auto_volume_defs = _split(kwargs['binds'])
|
||||
except AttributeError:
|
||||
auto_volume_defs = []
|
||||
for val in auto_volume_defs:
|
||||
try:
|
||||
auto_volumes.append(_split(val, ':')[1])
|
||||
except IndexError:
|
||||
continue
|
||||
if auto_volumes:
|
||||
actual_volumes = kwargs.setdefault('volumes', [])
|
||||
actual_volumes.extend([x for x in auto_volumes
|
||||
if x not in actual_volumes])
|
||||
# Sort list to make unit tests more reliable
|
||||
actual_volumes.sort()
|
||||
|
||||
if kwargs.get('port_bindings') is not None \
|
||||
and (skip_translate is True or
|
||||
all(x not in skip_translate
|
||||
for x in ('port_bindings', 'expose', 'ports'))):
|
||||
# Make sure that all ports defined in "port_bindings" are included in
|
||||
# the "ports" param.
|
||||
auto_ports = list(kwargs['port_bindings'])
|
||||
if auto_ports:
|
||||
actual_ports = []
|
||||
# Sort list to make unit tests more reliable
|
||||
for port in auto_ports:
|
||||
if port in actual_ports:
|
||||
continue
|
||||
if isinstance(port, six.integer_types):
|
||||
actual_ports.append((port, 'tcp'))
|
||||
# Convert CLI versions of commands to their docker-py counterparts
|
||||
for key in translator.ALIASES:
|
||||
if key in kwargs:
|
||||
new_key = translator.ALIASES[key]
|
||||
value = kwargs.pop(key)
|
||||
if new_key in kwargs:
|
||||
collisions.append(new_key)
|
||||
else:
|
||||
port, proto = port.split('/')
|
||||
actual_ports.append((int(port), proto))
|
||||
actual_ports.sort()
|
||||
actual_ports = [
|
||||
port if proto == 'tcp' else '{}/{}'.format(port, proto) for (port, proto) in actual_ports
|
||||
]
|
||||
kwargs.setdefault('ports', actual_ports)
|
||||
kwargs[new_key] = value
|
||||
|
||||
return kwargs, invalid, sorted(collisions)
|
||||
try:
|
||||
translator._post_processing(kwargs, skip_translate, invalid)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
except Exception as exc:
|
||||
error_message = exc.__str__()
|
||||
log.error(
|
||||
'Error translating input: \'%s\'', error_message, exc_info=True)
|
||||
else:
|
||||
error_message = None
|
||||
|
||||
error_data = {}
|
||||
if error_message is not None:
|
||||
error_data['error_message'] = error_message
|
||||
if invalid:
|
||||
error_data['invalid'] = invalid
|
||||
if collisions and not ignore_collisions:
|
||||
for item in collisions:
|
||||
error_data.setdefault('collisions', []).append(
|
||||
'\'{0}\' is an alias for \'{1}\', they cannot both be used'
|
||||
.format(translator.ALIASES_REVMAP[item], item)
|
||||
)
|
||||
if error_data:
|
||||
raise CommandExecutionError(
|
||||
'Failed to translate input', info=error_data)
|
||||
|
||||
return kwargs
|
||||
|
||||
|
||||
def create_ipam_config(*pools, **kwargs):
|
||||
'''
|
||||
Builds an IP address management (IPAM) config dictionary
|
||||
'''
|
||||
kwargs = salt.utils.args.clean_kwargs(**kwargs)
|
||||
|
||||
try:
|
||||
# docker-py 2.0 and newer
|
||||
pool_args = salt.utils.args.get_function_argspec(
|
||||
docker.types.IPAMPool.__init__).args
|
||||
create_pool = docker.types.IPAMPool
|
||||
create_config = docker.types.IPAMConfig
|
||||
except AttributeError:
|
||||
# docker-py < 2.0
|
||||
pool_args = salt.utils.args.get_function_argspec(
|
||||
docker.utils.create_ipam_pool).args
|
||||
create_pool = docker.utils.create_ipam_pool
|
||||
create_config = docker.utils.create_ipam_config
|
||||
|
||||
for primary_key, alias_key in (('driver', 'ipam_driver'),
|
||||
('options', 'ipam_opts')):
|
||||
|
||||
if alias_key in kwargs:
|
||||
alias_val = kwargs.pop(alias_key)
|
||||
if primary_key in kwargs:
|
||||
log.warning(
|
||||
'docker.create_ipam_config: Both \'%s\' and \'%s\' '
|
||||
'passed. Ignoring \'%s\'',
|
||||
alias_key, primary_key, alias_key
|
||||
)
|
||||
else:
|
||||
kwargs[primary_key] = alias_val
|
||||
|
||||
if salt.utils.data.is_dictlist(kwargs.get('options')):
|
||||
kwargs['options'] = salt.utils.data.repack_dictlist(kwargs['options'])
|
||||
|
||||
# Get all of the IPAM pool args that were passed as individual kwargs
|
||||
# instead of in the *pools tuple
|
||||
pool_kwargs = {}
|
||||
for key in list(kwargs):
|
||||
if key in pool_args:
|
||||
pool_kwargs[key] = kwargs.pop(key)
|
||||
|
||||
pool_configs = []
|
||||
if pool_kwargs:
|
||||
pool_configs.append(create_pool(**pool_kwargs))
|
||||
pool_configs.extend([create_pool(**pool) for pool in pools])
|
||||
|
||||
if pool_configs:
|
||||
# Sanity check the IPAM pools. docker-py's type/function for creating
|
||||
# an IPAM pool will allow you to create a pool with a gateway, IP
|
||||
# range, or map of aux addresses, even when no subnet is passed.
|
||||
# However, attempting to use this IPAM pool when creating the network
|
||||
# will cause the Docker Engine to throw an error.
|
||||
if any('Subnet' not in pool for pool in pool_configs):
|
||||
raise SaltInvocationError('A subnet is required in each IPAM pool')
|
||||
else:
|
||||
kwargs['pool_configs'] = pool_configs
|
||||
|
||||
ret = create_config(**kwargs)
|
||||
pool_dicts = ret.get('Config')
|
||||
if pool_dicts:
|
||||
# When you inspect a network with custom IPAM configuration, only
|
||||
# arguments which were explictly passed are reflected. By contrast,
|
||||
# docker-py will include keys for arguments which were not passed in
|
||||
# but set the value to None. Thus, for ease of comparison, the below
|
||||
# loop will remove all keys with a value of None from the generated
|
||||
# pool configs.
|
||||
for idx, _ in enumerate(pool_dicts):
|
||||
for key in list(pool_dicts[idx]):
|
||||
if pool_dicts[idx][key] is None:
|
||||
del pool_dicts[idx][key]
|
||||
|
||||
return ret
|
||||
|
1
salt/utils/docker/translate/__init__.py
Normal file
1
salt/utils/docker/translate/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
# -*- coding: utf-8 -*-
|
@ -1,278 +1,146 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Functions to translate input in the docker CLI format to the format desired by
|
||||
by the API.
|
||||
Functions to translate input for container creation
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import os
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils.network
|
||||
from salt.exceptions import SaltInvocationError
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
from salt.ext.six.moves import range, zip # pylint: disable=import-error,redefined-builtin
|
||||
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
NOTSET = object()
|
||||
# Import helpers
|
||||
from . import helpers
|
||||
|
||||
ALIASES = {
|
||||
'cmd': 'command',
|
||||
'cpuset': 'cpuset_cpus',
|
||||
'dns_option': 'dns_opt',
|
||||
'env': 'environment',
|
||||
'expose': 'ports',
|
||||
'interactive': 'stdin_open',
|
||||
'ipc': 'ipc_mode',
|
||||
'label': 'labels',
|
||||
'memory': 'mem_limit',
|
||||
'memory_swap': 'memswap_limit',
|
||||
'publish': 'port_bindings',
|
||||
'publish_all': 'publish_all_ports',
|
||||
'restart': 'restart_policy',
|
||||
'rm': 'auto_remove',
|
||||
'sysctl': 'sysctls',
|
||||
'security_opts': 'security_opt',
|
||||
'ulimit': 'ulimits',
|
||||
'user_ns_mode': 'userns_mode',
|
||||
'volume': 'volumes',
|
||||
'workdir': 'working_dir',
|
||||
}
|
||||
ALIASES_REVMAP = dict([(y, x) for x, y in six.iteritems(ALIASES)])
|
||||
|
||||
|
||||
def _split(item, sep=',', maxsplit=-1):
|
||||
return [x.strip() for x in item.split(sep, maxsplit)]
|
||||
|
||||
|
||||
def _get_port_def(port_num, proto='tcp'):
|
||||
def _merge_keys(kwargs):
|
||||
'''
|
||||
Given a port number and protocol, returns the port definition expected by
|
||||
docker-py. For TCP ports this is simply an integer, for UDP ports this is
|
||||
(port_num, 'udp').
|
||||
|
||||
port_num can also be a string in the format 'port_num/udp'. If so, the
|
||||
"proto" argument will be ignored. The reason we need to be able to pass in
|
||||
the protocol separately is because this function is sometimes invoked on
|
||||
data derived from a port range (e.g. '2222-2223/udp'). In these cases the
|
||||
protocol has already been stripped off and the port range resolved into the
|
||||
start and end of the range, and get_port_def() is invoked once for each
|
||||
port number in that range. So, rather than munge udp ports back into
|
||||
strings before passing them to this function, the function will see if it
|
||||
has a string and use the protocol from it if present.
|
||||
|
||||
This function does not catch the TypeError or ValueError which would be
|
||||
raised if the port number is non-numeric. This function either needs to be
|
||||
run on known good input, or should be run within a try/except that catches
|
||||
these two exceptions.
|
||||
The log_config is a mixture of the CLI options --log-driver and --log-opt
|
||||
(which we support in Salt as log_driver and log_opt, respectively), but it
|
||||
must be submitted to the host config in the format {'Type': log_driver,
|
||||
'Config': log_opt}. So, we need to construct this argument to be passed to
|
||||
the API from those two arguments.
|
||||
'''
|
||||
try:
|
||||
port_num, _, port_num_proto = port_num.partition('/')
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
if port_num_proto:
|
||||
proto = port_num_proto
|
||||
try:
|
||||
if proto.lower() == 'udp':
|
||||
return int(port_num), 'udp'
|
||||
except AttributeError:
|
||||
pass
|
||||
return int(port_num)
|
||||
log_driver = kwargs.pop('log_driver', helpers.NOTSET)
|
||||
log_opt = kwargs.pop('log_opt', helpers.NOTSET)
|
||||
if 'log_config' not in kwargs:
|
||||
if log_driver is not helpers.NOTSET \
|
||||
or log_opt is not helpers.NOTSET:
|
||||
kwargs['log_config'] = {
|
||||
'Type': log_driver
|
||||
if log_driver is not helpers.NOTSET
|
||||
else 'none',
|
||||
'Config': log_opt
|
||||
if log_opt is not helpers.NOTSET
|
||||
else {}
|
||||
}
|
||||
|
||||
|
||||
def _get_port_range(port_def):
|
||||
def _post_processing(kwargs, skip_translate, invalid):
|
||||
'''
|
||||
Given a port number or range, return a start and end to that range. Port
|
||||
ranges are defined as a string containing two numbers separated by a dash
|
||||
(e.g. '4505-4506').
|
||||
|
||||
A ValueError will be raised if bad input is provided.
|
||||
Additional container-specific post-translation processing
|
||||
'''
|
||||
if isinstance(port_def, six.integer_types):
|
||||
# Single integer, start/end of range is the same
|
||||
return port_def, port_def
|
||||
try:
|
||||
comps = [int(x) for x in _split(port_def, '-')]
|
||||
if len(comps) == 1:
|
||||
range_start = range_end = comps[0]
|
||||
# Don't allow conflicting options to be set
|
||||
if kwargs.get('port_bindings') is not None \
|
||||
and kwargs.get('publish_all_ports'):
|
||||
kwargs.pop('port_bindings')
|
||||
invalid['port_bindings'] = 'Cannot be used when publish_all_ports=True'
|
||||
if kwargs.get('hostname') is not None \
|
||||
and kwargs.get('network_mode') == 'host':
|
||||
kwargs.pop('hostname')
|
||||
invalid['hostname'] = 'Cannot be used when network_mode=True'
|
||||
|
||||
# Make sure volumes and ports are defined to match the binds and port_bindings
|
||||
if kwargs.get('binds') is not None \
|
||||
and (skip_translate is True or
|
||||
all(x not in skip_translate
|
||||
for x in ('binds', 'volume', 'volumes'))):
|
||||
# Make sure that all volumes defined in "binds" are included in the
|
||||
# "volumes" param.
|
||||
auto_volumes = []
|
||||
if isinstance(kwargs['binds'], dict):
|
||||
for val in six.itervalues(kwargs['binds']):
|
||||
try:
|
||||
if 'bind' in val:
|
||||
auto_volumes.append(val['bind'])
|
||||
except TypeError:
|
||||
continue
|
||||
else:
|
||||
range_start, range_end = comps
|
||||
if range_start > range_end:
|
||||
raise ValueError('start > end')
|
||||
except (TypeError, ValueError) as exc:
|
||||
if exc.__str__() == 'start > end':
|
||||
msg = (
|
||||
'Start of port range ({0}) cannot be greater than end of '
|
||||
'port range ({1})'.format(range_start, range_end)
|
||||
)
|
||||
else:
|
||||
msg = '\'{0}\' is non-numeric or an invalid port range'.format(
|
||||
port_def
|
||||
)
|
||||
raise ValueError(msg)
|
||||
else:
|
||||
return range_start, range_end
|
||||
if isinstance(kwargs['binds'], list):
|
||||
auto_volume_defs = kwargs['binds']
|
||||
else:
|
||||
try:
|
||||
auto_volume_defs = helpers.split(kwargs['binds'])
|
||||
except AttributeError:
|
||||
auto_volume_defs = []
|
||||
for val in auto_volume_defs:
|
||||
try:
|
||||
auto_volumes.append(helpers.split(val, ':')[1])
|
||||
except IndexError:
|
||||
continue
|
||||
if auto_volumes:
|
||||
actual_volumes = kwargs.setdefault('volumes', [])
|
||||
actual_volumes.extend([x for x in auto_volumes
|
||||
if x not in actual_volumes])
|
||||
# Sort list to make unit tests more reliable
|
||||
actual_volumes.sort()
|
||||
|
||||
if kwargs.get('port_bindings') is not None \
|
||||
and (skip_translate is True or
|
||||
all(x not in skip_translate
|
||||
for x in ('port_bindings', 'expose', 'ports'))):
|
||||
# Make sure that all ports defined in "port_bindings" are included in
|
||||
# the "ports" param.
|
||||
auto_ports = list(kwargs['port_bindings'])
|
||||
if auto_ports:
|
||||
actual_ports = []
|
||||
# Sort list to make unit tests more reliable
|
||||
for port in auto_ports:
|
||||
if port in actual_ports:
|
||||
continue
|
||||
if isinstance(port, six.integer_types):
|
||||
actual_ports.append((port, 'tcp'))
|
||||
else:
|
||||
port, proto = port.split('/')
|
||||
actual_ports.append((int(port), proto))
|
||||
actual_ports.sort()
|
||||
actual_ports = [
|
||||
port if proto == 'tcp' else '{}/{}'.format(port, proto) for (port, proto) in actual_ports
|
||||
]
|
||||
kwargs.setdefault('ports', actual_ports)
|
||||
|
||||
|
||||
def _map_vals(val, *names, **extra_opts):
|
||||
'''
|
||||
Many arguments come in as a list of VAL1:VAL2 pairs, but map to a list
|
||||
of dicts in the format {NAME1: VAL1, NAME2: VAL2}. This function
|
||||
provides common code to handle these instances.
|
||||
'''
|
||||
fill = extra_opts.pop('fill', NOTSET)
|
||||
expected_num_elements = len(names)
|
||||
val = _translate_stringlist(val)
|
||||
for idx, item in enumerate(val):
|
||||
if not isinstance(item, dict):
|
||||
elements = [x.strip() for x in item.split(':')]
|
||||
num_elements = len(elements)
|
||||
if num_elements < expected_num_elements:
|
||||
if fill is NOTSET:
|
||||
raise SaltInvocationError(
|
||||
'\'{0}\' contains {1} value(s) (expected {2})'.format(
|
||||
item, num_elements, expected_num_elements
|
||||
)
|
||||
)
|
||||
elements.extend([fill] * (expected_num_elements - num_elements))
|
||||
elif num_elements > expected_num_elements:
|
||||
raise SaltInvocationError(
|
||||
'\'{0}\' contains {1} value(s) (expected {2})'.format(
|
||||
item,
|
||||
num_elements,
|
||||
expected_num_elements if fill is NOTSET
|
||||
else 'up to {0}'.format(expected_num_elements)
|
||||
)
|
||||
)
|
||||
val[idx] = dict(zip(names, elements))
|
||||
return val
|
||||
|
||||
|
||||
def _validate_ip(val):
|
||||
try:
|
||||
if not salt.utils.network.is_ip(val):
|
||||
raise SaltInvocationError(
|
||||
'\'{0}\' is not a valid IP address'.format(val)
|
||||
)
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
|
||||
# Helpers to perform common translation actions
|
||||
def _translate_str(val):
|
||||
return str(val) if not isinstance(val, six.string_types) else val
|
||||
|
||||
|
||||
def _translate_int(val):
|
||||
if not isinstance(val, six.integer_types):
|
||||
try:
|
||||
val = int(val)
|
||||
except (TypeError, ValueError):
|
||||
raise SaltInvocationError('\'{0}\' is not an integer'.format(val))
|
||||
return val
|
||||
|
||||
|
||||
def _translate_bool(val):
|
||||
return bool(val) if not isinstance(val, bool) else val
|
||||
|
||||
|
||||
def _translate_dict(val):
|
||||
'''
|
||||
Not really translating, just raising an exception if it's not a dict
|
||||
'''
|
||||
if not isinstance(val, dict):
|
||||
raise SaltInvocationError('\'{0}\' is not a dictionary'.format(val))
|
||||
return val
|
||||
|
||||
|
||||
def _translate_command(val):
|
||||
'''
|
||||
Input should either be a single string, or a list of strings. This is used
|
||||
for the two args that deal with commands ("command" and "entrypoint").
|
||||
'''
|
||||
if isinstance(val, six.string_types):
|
||||
return val
|
||||
elif isinstance(val, list):
|
||||
for idx in range(len(val)):
|
||||
if not isinstance(val[idx], six.string_types):
|
||||
val[idx] = str(val[idx])
|
||||
else:
|
||||
# Make sure we have a string
|
||||
val = str(val)
|
||||
return val
|
||||
|
||||
|
||||
def _translate_bytes(val):
|
||||
'''
|
||||
These values can be expressed as an integer number of bytes, or a string
|
||||
expression (i.e. 100mb, 1gb, etc.).
|
||||
'''
|
||||
try:
|
||||
val = int(val)
|
||||
except (TypeError, ValueError):
|
||||
if not isinstance(val, six.string_types):
|
||||
val = str(val)
|
||||
return val
|
||||
|
||||
|
||||
def _translate_stringlist(val):
|
||||
'''
|
||||
On the CLI, these are passed as multiple instances of a given CLI option.
|
||||
In Salt, we accept these as a comma-delimited list but the API expects a
|
||||
Python list. This function accepts input and returns it back as a Python
|
||||
list of strings. If the input is a string which is a comma-separated list
|
||||
of items, split that string and return it.
|
||||
'''
|
||||
if not isinstance(val, list):
|
||||
try:
|
||||
val = _split(val)
|
||||
except AttributeError:
|
||||
val = _split(str(val))
|
||||
for idx in range(len(val)):
|
||||
if not isinstance(val[idx], six.string_types):
|
||||
val[idx] = str(val[idx])
|
||||
return val
|
||||
|
||||
|
||||
def _translate_device_rates(val, numeric_rate=True):
|
||||
'''
|
||||
CLI input is a list of PATH:RATE pairs, but the API expects a list of
|
||||
dictionaries in the format [{'Path': path, 'Rate': rate}]
|
||||
'''
|
||||
val = _map_vals(val, 'Path', 'Rate')
|
||||
for idx in range(len(val)):
|
||||
try:
|
||||
is_abs = os.path.isabs(val[idx]['Path'])
|
||||
except AttributeError:
|
||||
is_abs = False
|
||||
if not is_abs:
|
||||
raise SaltInvocationError(
|
||||
'Path \'{Path}\' is not absolute'.format(**val[idx])
|
||||
)
|
||||
|
||||
# Attempt to convert to an integer. Will fail if rate was specified as
|
||||
# a shorthand (e.g. 1mb), this is OK as we will check to make sure the
|
||||
# value is an integer below if that is what is required.
|
||||
try:
|
||||
val[idx]['Rate'] = int(val[idx]['Rate'])
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
|
||||
if numeric_rate:
|
||||
try:
|
||||
val[idx]['Rate'] = int(val[idx]['Rate'])
|
||||
except ValueError:
|
||||
raise SaltInvocationError(
|
||||
'Rate \'{Rate}\' for path \'{Path}\' is '
|
||||
'non-numeric'.format(**val[idx])
|
||||
)
|
||||
return val
|
||||
|
||||
|
||||
def _translate_key_val(val, delimiter='='):
|
||||
'''
|
||||
CLI input is a list of key/val pairs, but the API expects a dictionary in
|
||||
the format {key: val}
|
||||
'''
|
||||
if isinstance(val, dict):
|
||||
return val
|
||||
val = _translate_stringlist(val)
|
||||
new_val = {}
|
||||
for item in val:
|
||||
try:
|
||||
lvalue, rvalue = _split(item, delimiter, 1)
|
||||
except (AttributeError, TypeError, ValueError):
|
||||
raise SaltInvocationError(
|
||||
'\'{0}\' is not a key{1}value pair'.format(item, delimiter)
|
||||
)
|
||||
new_val[lvalue] = rvalue
|
||||
return new_val
|
||||
|
||||
|
||||
# Functions below must match names of API arguments
|
||||
# Functions below must match names of docker-py arguments
|
||||
def auto_remove(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_bool(val)
|
||||
return helpers.translate_bool(val)
|
||||
|
||||
|
||||
def binds(val, **kwargs): # pylint: disable=unused-argument
|
||||
@ -284,7 +152,7 @@ def binds(val, **kwargs): # pylint: disable=unused-argument
|
||||
if not isinstance(val, dict):
|
||||
if not isinstance(val, list):
|
||||
try:
|
||||
val = _split(val)
|
||||
val = helpers.split(val)
|
||||
except AttributeError:
|
||||
raise SaltInvocationError(
|
||||
'\'{0}\' is not a dictionary or list of bind '
|
||||
@ -294,7 +162,7 @@ def binds(val, **kwargs): # pylint: disable=unused-argument
|
||||
|
||||
|
||||
def blkio_weight(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_int(val)
|
||||
return helpers.translate_int(val)
|
||||
|
||||
|
||||
def blkio_weight_device(val, **kwargs): # pylint: disable=unused-argument
|
||||
@ -302,7 +170,7 @@ def blkio_weight_device(val, **kwargs): # pylint: disable=unused-argument
|
||||
CLI input is a list of PATH:WEIGHT pairs, but the API expects a list of
|
||||
dictionaries in the format [{'Path': path, 'Weight': weight}]
|
||||
'''
|
||||
val = _map_vals(val, 'Path', 'Weight')
|
||||
val = helpers.map_vals(val, 'Path', 'Weight')
|
||||
for idx in range(len(val)):
|
||||
try:
|
||||
val[idx]['Weight'] = int(val[idx]['Weight'])
|
||||
@ -315,205 +183,179 @@ def blkio_weight_device(val, **kwargs): # pylint: disable=unused-argument
|
||||
|
||||
|
||||
def cap_add(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_stringlist(val)
|
||||
return helpers.translate_stringlist(val)
|
||||
|
||||
|
||||
def cap_drop(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_stringlist(val)
|
||||
return helpers.translate_stringlist(val)
|
||||
|
||||
|
||||
def command(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_command(val)
|
||||
return helpers.translate_command(val)
|
||||
|
||||
|
||||
def cpuset_cpus(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_str(val)
|
||||
return helpers.translate_str(val)
|
||||
|
||||
|
||||
def cpuset_mems(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_str(val)
|
||||
return helpers.translate_str(val)
|
||||
|
||||
|
||||
def cpu_group(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_int(val)
|
||||
return helpers.translate_int(val)
|
||||
|
||||
|
||||
def cpu_period(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_int(val)
|
||||
return helpers.translate_int(val)
|
||||
|
||||
|
||||
def cpu_shares(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_int(val)
|
||||
return helpers.translate_int(val)
|
||||
|
||||
|
||||
def detach(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_bool(val)
|
||||
return helpers.translate_bool(val)
|
||||
|
||||
|
||||
def device_read_bps(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_device_rates(val, numeric_rate=False)
|
||||
return helpers.translate_device_rates(val, numeric_rate=False)
|
||||
|
||||
|
||||
def device_read_iops(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_device_rates(val, numeric_rate=True)
|
||||
return helpers.translate_device_rates(val, numeric_rate=True)
|
||||
|
||||
|
||||
def device_write_bps(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_device_rates(val, numeric_rate=False)
|
||||
return helpers.translate_device_rates(val, numeric_rate=False)
|
||||
|
||||
|
||||
def device_write_iops(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_device_rates(val, numeric_rate=True)
|
||||
return helpers.translate_device_rates(val, numeric_rate=True)
|
||||
|
||||
|
||||
def devices(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_stringlist(val)
|
||||
return helpers.translate_stringlist(val)
|
||||
|
||||
|
||||
def dns_opt(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_stringlist(val)
|
||||
return helpers.translate_stringlist(val)
|
||||
|
||||
|
||||
def dns_search(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_stringlist(val)
|
||||
return helpers.translate_stringlist(val)
|
||||
|
||||
|
||||
def dns(val, **kwargs):
|
||||
val = _translate_stringlist(val)
|
||||
val = helpers.translate_stringlist(val)
|
||||
if kwargs.get('validate_ip_addrs', True):
|
||||
for item in val:
|
||||
_validate_ip(item)
|
||||
helpers.validate_ip(item)
|
||||
return val
|
||||
|
||||
|
||||
def domainname(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_str(val)
|
||||
return helpers.translate_str(val)
|
||||
|
||||
|
||||
def entrypoint(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_command(val)
|
||||
return helpers.translate_command(val)
|
||||
|
||||
|
||||
def environment(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_key_val(val, delimiter='=')
|
||||
return helpers.translate_key_val(val, delimiter='=')
|
||||
|
||||
|
||||
def extra_hosts(val, **kwargs):
|
||||
val = _translate_key_val(val, delimiter=':')
|
||||
val = helpers.translate_key_val(val, delimiter=':')
|
||||
if kwargs.get('validate_ip_addrs', True):
|
||||
for key in val:
|
||||
_validate_ip(val[key])
|
||||
helpers.validate_ip(val[key])
|
||||
return val
|
||||
|
||||
|
||||
def group_add(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_stringlist(val)
|
||||
return helpers.translate_stringlist(val)
|
||||
|
||||
|
||||
def host_config(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_dict(val)
|
||||
return helpers.translate_dict(val)
|
||||
|
||||
|
||||
def hostname(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_str(val)
|
||||
return helpers.translate_str(val)
|
||||
|
||||
|
||||
def ipc_mode(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_str(val)
|
||||
return helpers.translate_str(val)
|
||||
|
||||
|
||||
def isolation(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_str(val)
|
||||
return helpers.translate_str(val)
|
||||
|
||||
|
||||
def labels(val, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Can either be a list of label names, or a list of name=value pairs. The API
|
||||
can accept either a list of label names or a dictionary mapping names to
|
||||
values, so the value we translate will be different depending on the input.
|
||||
'''
|
||||
if not isinstance(val, dict):
|
||||
val = _translate_stringlist(val)
|
||||
try:
|
||||
has_mappings = all('=' in x for x in val)
|
||||
except TypeError:
|
||||
has_mappings = False
|
||||
|
||||
if has_mappings:
|
||||
# The try/except above where has_mappings was defined has
|
||||
# already confirmed that all elements are strings, and that
|
||||
# all contain an equal sign. So we do not need to enclose
|
||||
# the split in another try/except.
|
||||
val = dict([_split(x, '=', 1) for x in val])
|
||||
else:
|
||||
# Stringify any non-string values
|
||||
for idx in range(len(val)):
|
||||
if '=' in val[idx]:
|
||||
raise SaltInvocationError(
|
||||
'Mix of labels with and without values'
|
||||
)
|
||||
return val
|
||||
return val
|
||||
return helpers.translate_labels(val)
|
||||
|
||||
|
||||
def links(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_key_val(val, delimiter=':')
|
||||
return helpers.translate_key_val(val, delimiter=':')
|
||||
|
||||
|
||||
def log_driver(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_str(val)
|
||||
return helpers.translate_str(val)
|
||||
|
||||
|
||||
def log_opt(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_key_val(val, delimiter='=')
|
||||
return helpers.translate_key_val(val, delimiter='=')
|
||||
|
||||
|
||||
def lxc_conf(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_key_val(val, delimiter='=')
|
||||
return helpers.translate_key_val(val, delimiter='=')
|
||||
|
||||
|
||||
def mac_address(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_str(val)
|
||||
return helpers.translate_str(val)
|
||||
|
||||
|
||||
def mem_limit(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_bytes(val)
|
||||
return helpers.translate_bytes(val)
|
||||
|
||||
|
||||
def mem_swappiness(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_int(val)
|
||||
return helpers.translate_int(val)
|
||||
|
||||
|
||||
def memswap_limit(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_bytes(val)
|
||||
return helpers.translate_bytes(val)
|
||||
|
||||
|
||||
def name(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_str(val)
|
||||
return helpers.translate_str(val)
|
||||
|
||||
|
||||
def network_disabled(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_bool(val)
|
||||
return helpers.translate_bool(val)
|
||||
|
||||
|
||||
def network_mode(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_str(val)
|
||||
return helpers.translate_str(val)
|
||||
|
||||
|
||||
def oom_kill_disable(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_bool(val)
|
||||
return helpers.translate_bool(val)
|
||||
|
||||
|
||||
def oom_score_adj(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_int(val)
|
||||
return helpers.translate_int(val)
|
||||
|
||||
|
||||
def pid_mode(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_str(val)
|
||||
return helpers.translate_str(val)
|
||||
|
||||
|
||||
def pids_limit(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_int(val)
|
||||
return helpers.translate_int(val)
|
||||
|
||||
|
||||
def port_bindings(val, **kwargs):
|
||||
@ -531,9 +373,9 @@ def port_bindings(val, **kwargs):
|
||||
if not isinstance(val, dict):
|
||||
if not isinstance(val, list):
|
||||
try:
|
||||
val = _split(val)
|
||||
val = helpers.split(val)
|
||||
except AttributeError:
|
||||
val = _split(str(val))
|
||||
val = helpers.split(str(val))
|
||||
|
||||
for idx in range(len(val)):
|
||||
if not isinstance(val[idx], six.string_types):
|
||||
@ -544,7 +386,7 @@ def port_bindings(val, **kwargs):
|
||||
|
||||
bindings = {}
|
||||
for binding in val:
|
||||
bind_parts = _split(binding, ':')
|
||||
bind_parts = helpers.split(binding, ':')
|
||||
num_bind_parts = len(bind_parts)
|
||||
if num_bind_parts == 1:
|
||||
# Single port or port range being passed through (no
|
||||
@ -556,7 +398,7 @@ def port_bindings(val, **kwargs):
|
||||
)
|
||||
container_port, _, proto = container_port.partition('/')
|
||||
try:
|
||||
start, end = _get_port_range(container_port)
|
||||
start, end = helpers.get_port_range(container_port)
|
||||
except ValueError as exc:
|
||||
# Using __str__() to avoid deprecation warning for using
|
||||
# the message attribute of the ValueError.
|
||||
@ -578,8 +420,10 @@ def port_bindings(val, **kwargs):
|
||||
)
|
||||
container_port, _, proto = bind_parts[1].partition('/')
|
||||
try:
|
||||
cport_start, cport_end = _get_port_range(container_port)
|
||||
hport_start, hport_end = _get_port_range(bind_parts[0])
|
||||
cport_start, cport_end = \
|
||||
helpers.get_port_range(container_port)
|
||||
hport_start, hport_end = \
|
||||
helpers.get_port_range(bind_parts[0])
|
||||
except ValueError as exc:
|
||||
# Using __str__() to avoid deprecation warning for
|
||||
# using the message attribute of the ValueError.
|
||||
@ -600,10 +444,11 @@ def port_bindings(val, **kwargs):
|
||||
elif num_bind_parts == 3:
|
||||
host_ip, host_port = bind_parts[0:2]
|
||||
if validate_ip_addrs:
|
||||
_validate_ip(host_ip)
|
||||
helpers.validate_ip(host_ip)
|
||||
container_port, _, proto = bind_parts[2].partition('/')
|
||||
try:
|
||||
cport_start, cport_end = _get_port_range(container_port)
|
||||
cport_start, cport_end = \
|
||||
helpers.get_port_range(container_port)
|
||||
except ValueError as exc:
|
||||
# Using __str__() to avoid deprecation warning for
|
||||
# using the message attribute of the ValueError.
|
||||
@ -613,7 +458,8 @@ def port_bindings(val, **kwargs):
|
||||
hport_list = [None] * len(cport_list)
|
||||
else:
|
||||
try:
|
||||
hport_start, hport_end = _get_port_range(host_port)
|
||||
hport_start, hport_end = \
|
||||
helpers.get_port_range(host_port)
|
||||
except ValueError as exc:
|
||||
# Using __str__() to avoid deprecation warning for
|
||||
# using the message attribute of the ValueError.
|
||||
@ -678,7 +524,7 @@ def ports(val, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
if not isinstance(val, list):
|
||||
try:
|
||||
val = _split(val)
|
||||
val = helpers.split(val)
|
||||
except AttributeError:
|
||||
if isinstance(val, six.integer_types):
|
||||
val = [val]
|
||||
@ -698,12 +544,13 @@ def ports(val, **kwargs): # pylint: disable=unused-argument
|
||||
'\'{0}\' is not a valid port definition'.format(item)
|
||||
)
|
||||
try:
|
||||
range_start, range_end = _get_port_range(item)
|
||||
range_start, range_end = \
|
||||
helpers.get_port_range(item)
|
||||
except ValueError as exc:
|
||||
# Using __str__() to avoid deprecation warning for using
|
||||
# the "message" attribute of the ValueError.
|
||||
raise SaltInvocationError(exc.__str__())
|
||||
new_ports.update([_get_port_def(x, proto)
|
||||
new_ports.update([helpers.get_port_def(x, proto)
|
||||
for x in range(range_start, range_end + 1)])
|
||||
ordered_new_ports = [
|
||||
port if proto == 'tcp' else (port, proto) for (port, proto) in sorted(
|
||||
@ -715,15 +562,15 @@ def ports(val, **kwargs): # pylint: disable=unused-argument
|
||||
|
||||
|
||||
def privileged(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_bool(val)
|
||||
return helpers.translate_bool(val)
|
||||
|
||||
|
||||
def publish_all_ports(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_bool(val)
|
||||
return helpers.translate_bool(val)
|
||||
|
||||
|
||||
def read_only(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_bool(val)
|
||||
return helpers.translate_bool(val)
|
||||
|
||||
|
||||
def restart_policy(val, **kwargs): # pylint: disable=unused-argument
|
||||
@ -733,8 +580,12 @@ def restart_policy(val, **kwargs): # pylint: disable=unused-argument
|
||||
to make sure the mapped result uses '0' for the count if this optional
|
||||
value was omitted.
|
||||
'''
|
||||
val = _map_vals(val, 'Name', 'MaximumRetryCount', fill='0')
|
||||
# _map_vals() converts the input into a list of dicts, but the API
|
||||
val = helpers.map_vals(
|
||||
val,
|
||||
'Name',
|
||||
'MaximumRetryCount',
|
||||
fill='0')
|
||||
# map_vals() converts the input into a list of dicts, but the API
|
||||
# wants just a dict, so extract the value from the single-element
|
||||
# list. If there was more than one element in the list, then
|
||||
# invalid input was passed (i.e. a comma-separated list, when what
|
||||
@ -754,48 +605,49 @@ def restart_policy(val, **kwargs): # pylint: disable=unused-argument
|
||||
|
||||
|
||||
def security_opt(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_stringlist(val)
|
||||
return helpers.translate_stringlist(val)
|
||||
|
||||
|
||||
def shm_size(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_bytes(val)
|
||||
return helpers.translate_bytes(val)
|
||||
|
||||
|
||||
def stdin_open(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_bool(val)
|
||||
return helpers.translate_bool(val)
|
||||
|
||||
|
||||
def stop_signal(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_str(val)
|
||||
return helpers.translate_str(val)
|
||||
|
||||
|
||||
def stop_timeout(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_int(val)
|
||||
return helpers.translate_int(val)
|
||||
|
||||
|
||||
def storage_opt(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_key_val(val, delimiter='=')
|
||||
return helpers.translate_key_val(val, delimiter='=')
|
||||
|
||||
|
||||
def sysctls(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_key_val(val, delimiter='=')
|
||||
return helpers.translate_key_val(val, delimiter='=')
|
||||
|
||||
|
||||
def tmpfs(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_dict(val)
|
||||
return helpers.translate_dict(val)
|
||||
|
||||
|
||||
def tty(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_bool(val)
|
||||
return helpers.translate_bool(val)
|
||||
|
||||
|
||||
def ulimits(val, **kwargs): # pylint: disable=unused-argument
|
||||
val = _translate_stringlist(val)
|
||||
val = helpers.translate_stringlist(val)
|
||||
for idx in range(len(val)):
|
||||
if not isinstance(val[idx], dict):
|
||||
try:
|
||||
ulimit_name, limits = _split(val[idx], '=', 1)
|
||||
comps = _split(limits, ':', 1)
|
||||
ulimit_name, limits = \
|
||||
helpers.split(val[idx], '=', 1)
|
||||
comps = helpers.split(limits, ':', 1)
|
||||
except (AttributeError, ValueError):
|
||||
raise SaltInvocationError(
|
||||
'Ulimit definition \'{0}\' is not in the format '
|
||||
@ -839,18 +691,18 @@ def user(val, **kwargs): # pylint: disable=unused-argument
|
||||
|
||||
|
||||
def userns_mode(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_str(val)
|
||||
return helpers.translate_str(val)
|
||||
|
||||
|
||||
def volume_driver(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_str(val)
|
||||
return helpers.translate_str(val)
|
||||
|
||||
|
||||
def volumes(val, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Should be a list of absolute paths
|
||||
'''
|
||||
val = _translate_stringlist(val)
|
||||
val = helpers.translate_stringlist(val)
|
||||
for item in val:
|
||||
if not os.path.isabs(item):
|
||||
raise SaltInvocationError(
|
||||
@ -860,7 +712,7 @@ def volumes(val, **kwargs): # pylint: disable=unused-argument
|
||||
|
||||
|
||||
def volumes_from(val, **kwargs): # pylint: disable=unused-argument
|
||||
return _translate_stringlist(val)
|
||||
return helpers.translate_stringlist(val)
|
||||
|
||||
|
||||
def working_dir(val, **kwargs): # pylint: disable=unused-argument
|
308
salt/utils/docker/translate/helpers.py
Normal file
308
salt/utils/docker/translate/helpers.py
Normal file
@ -0,0 +1,308 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Functions to translate input in the docker CLI format to the format desired by
|
||||
by the API.
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils.data
|
||||
import salt.utils.network
|
||||
from salt.exceptions import SaltInvocationError
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
from salt.ext.six.moves import range, zip # pylint: disable=import-error,redefined-builtin
|
||||
|
||||
NOTSET = object()
|
||||
|
||||
|
||||
def split(item, sep=',', maxsplit=-1):
|
||||
return [x.strip() for x in item.split(sep, maxsplit)]
|
||||
|
||||
|
||||
def get_port_def(port_num, proto='tcp'):
|
||||
'''
|
||||
Given a port number and protocol, returns the port definition expected by
|
||||
docker-py. For TCP ports this is simply an integer, for UDP ports this is
|
||||
(port_num, 'udp').
|
||||
|
||||
port_num can also be a string in the format 'port_num/udp'. If so, the
|
||||
"proto" argument will be ignored. The reason we need to be able to pass in
|
||||
the protocol separately is because this function is sometimes invoked on
|
||||
data derived from a port range (e.g. '2222-2223/udp'). In these cases the
|
||||
protocol has already been stripped off and the port range resolved into the
|
||||
start and end of the range, and get_port_def() is invoked once for each
|
||||
port number in that range. So, rather than munge udp ports back into
|
||||
strings before passing them to this function, the function will see if it
|
||||
has a string and use the protocol from it if present.
|
||||
|
||||
This function does not catch the TypeError or ValueError which would be
|
||||
raised if the port number is non-numeric. This function either needs to be
|
||||
run on known good input, or should be run within a try/except that catches
|
||||
these two exceptions.
|
||||
'''
|
||||
try:
|
||||
port_num, _, port_num_proto = port_num.partition('/')
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
if port_num_proto:
|
||||
proto = port_num_proto
|
||||
try:
|
||||
if proto.lower() == 'udp':
|
||||
return int(port_num), 'udp'
|
||||
except AttributeError:
|
||||
pass
|
||||
return int(port_num)
|
||||
|
||||
|
||||
def get_port_range(port_def):
|
||||
'''
|
||||
Given a port number or range, return a start and end to that range. Port
|
||||
ranges are defined as a string containing two numbers separated by a dash
|
||||
(e.g. '4505-4506').
|
||||
|
||||
A ValueError will be raised if bad input is provided.
|
||||
'''
|
||||
if isinstance(port_def, six.integer_types):
|
||||
# Single integer, start/end of range is the same
|
||||
return port_def, port_def
|
||||
try:
|
||||
comps = [int(x) for x in split(port_def, '-')]
|
||||
if len(comps) == 1:
|
||||
range_start = range_end = comps[0]
|
||||
else:
|
||||
range_start, range_end = comps
|
||||
if range_start > range_end:
|
||||
raise ValueError('start > end')
|
||||
except (TypeError, ValueError) as exc:
|
||||
if exc.__str__() == 'start > end':
|
||||
msg = (
|
||||
'Start of port range ({0}) cannot be greater than end of '
|
||||
'port range ({1})'.format(range_start, range_end)
|
||||
)
|
||||
else:
|
||||
msg = '\'{0}\' is non-numeric or an invalid port range'.format(
|
||||
port_def
|
||||
)
|
||||
raise ValueError(msg)
|
||||
else:
|
||||
return range_start, range_end
|
||||
|
||||
|
||||
def map_vals(val, *names, **extra_opts):
|
||||
'''
|
||||
Many arguments come in as a list of VAL1:VAL2 pairs, but map to a list
|
||||
of dicts in the format {NAME1: VAL1, NAME2: VAL2}. This function
|
||||
provides common code to handle these instances.
|
||||
'''
|
||||
fill = extra_opts.pop('fill', NOTSET)
|
||||
expected_num_elements = len(names)
|
||||
val = translate_stringlist(val)
|
||||
for idx, item in enumerate(val):
|
||||
if not isinstance(item, dict):
|
||||
elements = [x.strip() for x in item.split(':')]
|
||||
num_elements = len(elements)
|
||||
if num_elements < expected_num_elements:
|
||||
if fill is NOTSET:
|
||||
raise SaltInvocationError(
|
||||
'\'{0}\' contains {1} value(s) (expected {2})'.format(
|
||||
item, num_elements, expected_num_elements
|
||||
)
|
||||
)
|
||||
elements.extend([fill] * (expected_num_elements - num_elements))
|
||||
elif num_elements > expected_num_elements:
|
||||
raise SaltInvocationError(
|
||||
'\'{0}\' contains {1} value(s) (expected {2})'.format(
|
||||
item,
|
||||
num_elements,
|
||||
expected_num_elements if fill is NOTSET
|
||||
else 'up to {0}'.format(expected_num_elements)
|
||||
)
|
||||
)
|
||||
val[idx] = dict(zip(names, elements))
|
||||
return val
|
||||
|
||||
|
||||
def validate_ip(val):
|
||||
try:
|
||||
if not salt.utils.network.is_ip(val):
|
||||
raise SaltInvocationError(
|
||||
'\'{0}\' is not a valid IP address'.format(val)
|
||||
)
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
|
||||
def validate_subnet(val):
|
||||
try:
|
||||
if not salt.utils.network.is_subnet(val):
|
||||
raise SaltInvocationError(
|
||||
'\'{0}\' is not a valid subnet'.format(val)
|
||||
)
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
|
||||
def translate_str(val):
|
||||
return str(val) if not isinstance(val, six.string_types) else val
|
||||
|
||||
|
||||
def translate_int(val):
|
||||
if not isinstance(val, six.integer_types):
|
||||
try:
|
||||
val = int(val)
|
||||
except (TypeError, ValueError):
|
||||
raise SaltInvocationError('\'{0}\' is not an integer'.format(val))
|
||||
return val
|
||||
|
||||
|
||||
def translate_bool(val):
|
||||
return bool(val) if not isinstance(val, bool) else val
|
||||
|
||||
|
||||
def translate_dict(val):
|
||||
'''
|
||||
Not really translating, just raising an exception if it's not a dict
|
||||
'''
|
||||
if not isinstance(val, dict):
|
||||
raise SaltInvocationError('\'{0}\' is not a dictionary'.format(val))
|
||||
return val
|
||||
|
||||
|
||||
def translate_command(val):
|
||||
'''
|
||||
Input should either be a single string, or a list of strings. This is used
|
||||
for the two args that deal with commands ("command" and "entrypoint").
|
||||
'''
|
||||
if isinstance(val, six.string_types):
|
||||
return val
|
||||
elif isinstance(val, list):
|
||||
for idx in range(len(val)):
|
||||
if not isinstance(val[idx], six.string_types):
|
||||
val[idx] = str(val[idx])
|
||||
else:
|
||||
# Make sure we have a string
|
||||
val = str(val)
|
||||
return val
|
||||
|
||||
|
||||
def translate_bytes(val):
|
||||
'''
|
||||
These values can be expressed as an integer number of bytes, or a string
|
||||
expression (i.e. 100mb, 1gb, etc.).
|
||||
'''
|
||||
try:
|
||||
val = int(val)
|
||||
except (TypeError, ValueError):
|
||||
if not isinstance(val, six.string_types):
|
||||
val = str(val)
|
||||
return val
|
||||
|
||||
|
||||
def translate_stringlist(val):
|
||||
'''
|
||||
On the CLI, these are passed as multiple instances of a given CLI option.
|
||||
In Salt, we accept these as a comma-delimited list but the API expects a
|
||||
Python list. This function accepts input and returns it back as a Python
|
||||
list of strings. If the input is a string which is a comma-separated list
|
||||
of items, split that string and return it.
|
||||
'''
|
||||
if not isinstance(val, list):
|
||||
try:
|
||||
val = split(val)
|
||||
except AttributeError:
|
||||
val = split(str(val))
|
||||
for idx in range(len(val)):
|
||||
if not isinstance(val[idx], six.string_types):
|
||||
val[idx] = str(val[idx])
|
||||
return val
|
||||
|
||||
|
||||
def translate_device_rates(val, numeric_rate=True):
|
||||
'''
|
||||
CLI input is a list of PATH:RATE pairs, but the API expects a list of
|
||||
dictionaries in the format [{'Path': path, 'Rate': rate}]
|
||||
'''
|
||||
val = map_vals(val, 'Path', 'Rate')
|
||||
for idx in range(len(val)):
|
||||
try:
|
||||
is_abs = os.path.isabs(val[idx]['Path'])
|
||||
except AttributeError:
|
||||
is_abs = False
|
||||
if not is_abs:
|
||||
raise SaltInvocationError(
|
||||
'Path \'{Path}\' is not absolute'.format(**val[idx])
|
||||
)
|
||||
|
||||
# Attempt to convert to an integer. Will fail if rate was specified as
|
||||
# a shorthand (e.g. 1mb), this is OK as we will check to make sure the
|
||||
# value is an integer below if that is what is required.
|
||||
try:
|
||||
val[idx]['Rate'] = int(val[idx]['Rate'])
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
|
||||
if numeric_rate:
|
||||
try:
|
||||
val[idx]['Rate'] = int(val[idx]['Rate'])
|
||||
except ValueError:
|
||||
raise SaltInvocationError(
|
||||
'Rate \'{Rate}\' for path \'{Path}\' is '
|
||||
'non-numeric'.format(**val[idx])
|
||||
)
|
||||
return val
|
||||
|
||||
|
||||
def translate_key_val(val, delimiter='='):
|
||||
'''
|
||||
CLI input is a list of key/val pairs, but the API expects a dictionary in
|
||||
the format {key: val}
|
||||
'''
|
||||
if isinstance(val, dict):
|
||||
return val
|
||||
val = translate_stringlist(val)
|
||||
new_val = {}
|
||||
for item in val:
|
||||
try:
|
||||
lvalue, rvalue = split(item, delimiter, 1)
|
||||
except (AttributeError, TypeError, ValueError):
|
||||
raise SaltInvocationError(
|
||||
'\'{0}\' is not a key{1}value pair'.format(item, delimiter)
|
||||
)
|
||||
new_val[lvalue] = rvalue
|
||||
return new_val
|
||||
|
||||
|
||||
def translate_labels(val):
|
||||
'''
|
||||
Can either be a list of label names, or a list of name=value pairs. The API
|
||||
can accept either a list of label names or a dictionary mapping names to
|
||||
values, so the value we translate will be different depending on the input.
|
||||
'''
|
||||
if not isinstance(val, dict):
|
||||
if not isinstance(val, list):
|
||||
val = split(val)
|
||||
new_val = {}
|
||||
for item in val:
|
||||
if isinstance(item, dict):
|
||||
if len(item) != 1:
|
||||
raise SaltInvocationError('Invalid label(s)')
|
||||
key = next(iter(item))
|
||||
val = item[key]
|
||||
else:
|
||||
try:
|
||||
key, val = split(item, '=', 1)
|
||||
except ValueError:
|
||||
key = item
|
||||
val = ''
|
||||
if not isinstance(key, six.string_types):
|
||||
key = str(key)
|
||||
if not isinstance(val, six.string_types):
|
||||
val = str(val)
|
||||
new_val[key] = val
|
||||
val = new_val
|
||||
return val
|
142
salt/utils/docker/translate/network.py
Normal file
142
salt/utils/docker/translate/network.py
Normal file
@ -0,0 +1,142 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Functions to translate input for network creation
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt libs
|
||||
from salt.exceptions import SaltInvocationError
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
|
||||
# Import helpers
|
||||
from . import helpers
|
||||
|
||||
ALIASES = {
|
||||
'driver_opt': 'options',
|
||||
'driver_opts': 'options',
|
||||
'ipv6': 'enable_ipv6',
|
||||
}
|
||||
IPAM_ALIASES = {
|
||||
'ip_range': 'iprange',
|
||||
'aux_address': 'aux_addresses',
|
||||
}
|
||||
# ALIASES is a superset of IPAM_ALIASES
|
||||
ALIASES.update(IPAM_ALIASES)
|
||||
ALIASES_REVMAP = dict([(y, x) for x, y in six.iteritems(ALIASES)])
|
||||
|
||||
DEFAULTS = {'check_duplicate': True}
|
||||
|
||||
|
||||
def _post_processing(kwargs, skip_translate, invalid): # pylint: disable=unused-argument
|
||||
'''
|
||||
Additional network-specific post-translation processing
|
||||
'''
|
||||
# If any defaults were not expicitly passed, add them
|
||||
for item in DEFAULTS:
|
||||
if item not in kwargs:
|
||||
kwargs[item] = DEFAULTS[item]
|
||||
|
||||
|
||||
# Functions below must match names of docker-py arguments
|
||||
def driver(val, **kwargs): # pylint: disable=unused-argument
|
||||
return helpers.translate_str(val)
|
||||
|
||||
|
||||
def options(val, **kwargs): # pylint: disable=unused-argument
|
||||
return helpers.translate_key_val(val, delimiter='=')
|
||||
|
||||
|
||||
def ipam(val, **kwargs): # pylint: disable=unused-argument
|
||||
return helpers.translate_dict(val)
|
||||
|
||||
|
||||
def check_duplicate(val, **kwargs): # pylint: disable=unused-argument
|
||||
return helpers.translate_bool(val)
|
||||
|
||||
|
||||
def internal(val, **kwargs): # pylint: disable=unused-argument
|
||||
return helpers.translate_bool(val)
|
||||
|
||||
|
||||
def labels(val, **kwargs): # pylint: disable=unused-argument
|
||||
return helpers.translate_labels(val)
|
||||
|
||||
|
||||
def enable_ipv6(val, **kwargs): # pylint: disable=unused-argument
|
||||
return helpers.translate_bool(val)
|
||||
|
||||
|
||||
def attachable(val, **kwargs): # pylint: disable=unused-argument
|
||||
return helpers.translate_bool(val)
|
||||
|
||||
|
||||
def ingress(val, **kwargs): # pylint: disable=unused-argument
|
||||
return helpers.translate_bool(val)
|
||||
|
||||
|
||||
# IPAM args
|
||||
def ipam_driver(val, **kwargs): # pylint: disable=unused-argument
|
||||
return driver(val, **kwargs)
|
||||
|
||||
|
||||
def ipam_opts(val, **kwargs): # pylint: disable=unused-argument
|
||||
return options(val, **kwargs)
|
||||
|
||||
|
||||
def ipam_pools(val, **kwargs): # pylint: disable=unused-argument
|
||||
if not hasattr(val, '__iter__') \
|
||||
or not all(isinstance(x, dict) for x in val):
|
||||
# Can't do a simple dictlist check because each dict may have more than
|
||||
# one element.
|
||||
raise SaltInvocationError('ipam_pools must be a list of dictionaries')
|
||||
skip_translate = kwargs.get('skip_translate', ())
|
||||
if not (skip_translate is True or 'ipam_pools' in skip_translate):
|
||||
_globals = globals()
|
||||
for ipam_dict in val:
|
||||
for key in list(ipam_dict):
|
||||
if skip_translate is not True and key in skip_translate:
|
||||
continue
|
||||
if key in IPAM_ALIASES:
|
||||
# Make sure we resolve aliases, since this wouldn't have
|
||||
# been done within the individual IPAM dicts
|
||||
ipam_dict[IPAM_ALIASES[key]] = ipam_dict.pop(key)
|
||||
key = IPAM_ALIASES[key]
|
||||
if key in _globals:
|
||||
ipam_dict[key] = _globals[key](ipam_dict[key])
|
||||
return val
|
||||
|
||||
|
||||
def subnet(val, **kwargs): # pylint: disable=unused-argument
|
||||
validate_ip_addrs = kwargs.get('validate_ip_addrs', True)
|
||||
val = helpers.translate_str(val)
|
||||
if validate_ip_addrs:
|
||||
helpers.validate_subnet(val)
|
||||
return val
|
||||
|
||||
|
||||
def iprange(val, **kwargs): # pylint: disable=unused-argument
|
||||
validate_ip_addrs = kwargs.get('validate_ip_addrs', True)
|
||||
val = helpers.translate_str(val)
|
||||
if validate_ip_addrs:
|
||||
helpers.validate_subnet(val)
|
||||
return val
|
||||
|
||||
|
||||
def gateway(val, **kwargs): # pylint: disable=unused-argument
|
||||
validate_ip_addrs = kwargs.get('validate_ip_addrs', True)
|
||||
val = helpers.translate_str(val)
|
||||
if validate_ip_addrs:
|
||||
helpers.validate_ip(val)
|
||||
return val
|
||||
|
||||
|
||||
def aux_addresses(val, **kwargs): # pylint: disable=unused-argument
|
||||
validate_ip_addrs = kwargs.get('validate_ip_addrs', True)
|
||||
val = helpers.translate_key_val(val, delimiter='=')
|
||||
if validate_ip_addrs:
|
||||
for address in six.itervalues(val):
|
||||
helpers.validate_ip(address)
|
||||
return val
|
@ -4,100 +4,102 @@ Functions for working with Mako templates
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
import os
|
||||
import urlparse
|
||||
try:
|
||||
from mako.lookup import TemplateCollection, TemplateLookup # pylint: disable=import-error,3rd-party-module-not-gated
|
||||
HAS_MAKO = True
|
||||
except ImportError:
|
||||
HAS_MAKO = False
|
||||
|
||||
# Import third party libs
|
||||
# pylint: disable=import-error,no-name-in-module
|
||||
from salt.ext.six.moves.urllib.parse import urlparse
|
||||
# pylint: enable=import-error,no-name-in-module
|
||||
from mako.lookup import TemplateCollection, TemplateLookup # pylint: disable=import-error,3rd-party-module-not-gated
|
||||
if HAS_MAKO:
|
||||
# Import Python libs
|
||||
import os
|
||||
|
||||
# Import salt libs
|
||||
import salt.fileclient
|
||||
import salt.utils.url
|
||||
# Import third-party libs
|
||||
from salt.ext.six.moves.urllib.parse import urlparse # pylint: disable=import-error,no-name-in-module
|
||||
|
||||
# Import salt libs
|
||||
import salt.fileclient
|
||||
import salt.utils.url
|
||||
|
||||
class SaltMakoTemplateLookup(TemplateCollection):
|
||||
"""
|
||||
Look up Mako template files using file:// or salt:// URLs with <%include/>
|
||||
or <%namespace/>.
|
||||
class SaltMakoTemplateLookup(TemplateCollection):
|
||||
"""
|
||||
Look up Mako template files using file:// or salt:// URLs with <%include/>
|
||||
or <%namespace/>.
|
||||
|
||||
(1) Look up mako template files on local file system via files://... URL.
|
||||
Make sure mako template file is present locally on minion beforehand.
|
||||
(1) Look up mako template files on local file system via files://... URL.
|
||||
Make sure mako template file is present locally on minion beforehand.
|
||||
|
||||
Examples:
|
||||
<%include file="file:///etc/salt/lib/templates/sls-parts.mako"/>
|
||||
<%namespace file="file:///etc/salt/lib/templates/utils.mako" import="helper"/>
|
||||
Examples:
|
||||
<%include file="file:///etc/salt/lib/templates/sls-parts.mako"/>
|
||||
<%namespace file="file:///etc/salt/lib/templates/utils.mako" import="helper"/>
|
||||
|
||||
(2) Look up mako template files on Salt master via salt://... URL.
|
||||
If URL is a relative path (without an URL scheme) then assume it's relative
|
||||
to the directory of the salt file that's doing the lookup. If URL is an absolute
|
||||
path then it's treated as if it has been prefixed with salt://.
|
||||
(2) Look up mako template files on Salt master via salt://... URL.
|
||||
If URL is a relative path (without an URL scheme) then assume it's relative
|
||||
to the directory of the salt file that's doing the lookup. If URL is an absolute
|
||||
path then it's treated as if it has been prefixed with salt://.
|
||||
|
||||
Examples::
|
||||
<%include file="templates/sls-parts.mako"/>
|
||||
<%include file="salt://lib/templates/sls-parts.mako"/>
|
||||
<%include file="/lib/templates/sls-parts.mako"/> ##-- treated as salt://
|
||||
Examples::
|
||||
<%include file="templates/sls-parts.mako"/>
|
||||
<%include file="salt://lib/templates/sls-parts.mako"/>
|
||||
<%include file="/lib/templates/sls-parts.mako"/> ##-- treated as salt://
|
||||
|
||||
<%namespace file="templates/utils.mako"/>
|
||||
<%namespace file="salt://lib/templates/utils.mako" import="helper"/>
|
||||
<%namespace file="/lib/templates/utils.mako" import="helper"/> ##-- treated as salt://
|
||||
<%namespace file="templates/utils.mako"/>
|
||||
<%namespace file="salt://lib/templates/utils.mako" import="helper"/>
|
||||
<%namespace file="/lib/templates/utils.mako" import="helper"/> ##-- treated as salt://
|
||||
|
||||
"""
|
||||
"""
|
||||
|
||||
def __init__(self, opts, saltenv='base', pillar_rend=False):
|
||||
self.opts = opts
|
||||
self.saltenv = saltenv
|
||||
self._file_client = None
|
||||
self.pillar_rend = pillar_rend
|
||||
self.lookup = TemplateLookup(directories='/')
|
||||
self.cache = {}
|
||||
def __init__(self, opts, saltenv='base', pillar_rend=False):
|
||||
self.opts = opts
|
||||
self.saltenv = saltenv
|
||||
self._file_client = None
|
||||
self.pillar_rend = pillar_rend
|
||||
self.lookup = TemplateLookup(directories='/')
|
||||
self.cache = {}
|
||||
|
||||
def file_client(self):
|
||||
'''
|
||||
Setup and return file_client
|
||||
'''
|
||||
if not self._file_client:
|
||||
self._file_client = salt.fileclient.get_file_client(
|
||||
self.opts, self.pillar_rend)
|
||||
return self._file_client
|
||||
def file_client(self):
|
||||
'''
|
||||
Setup and return file_client
|
||||
'''
|
||||
if not self._file_client:
|
||||
self._file_client = salt.fileclient.get_file_client(
|
||||
self.opts, self.pillar_rend)
|
||||
return self._file_client
|
||||
|
||||
def adjust_uri(self, uri, filename):
|
||||
scheme = urlparse(uri).scheme
|
||||
if scheme in ('salt', 'file'):
|
||||
return uri
|
||||
elif scheme:
|
||||
raise ValueError(
|
||||
'Unsupported URL scheme({0}) in {1}'.format(
|
||||
scheme, uri
|
||||
def adjust_uri(self, uri, filename):
|
||||
scheme = urlparse(uri).scheme
|
||||
if scheme in ('salt', 'file'):
|
||||
return uri
|
||||
elif scheme:
|
||||
raise ValueError(
|
||||
'Unsupported URL scheme({0}) in {1}'.format(
|
||||
scheme, uri
|
||||
)
|
||||
)
|
||||
)
|
||||
return self.lookup.adjust_uri(uri, filename)
|
||||
return self.lookup.adjust_uri(uri, filename)
|
||||
|
||||
def get_template(self, uri, relativeto=None):
|
||||
if uri.startswith("file://"):
|
||||
proto = "file://"
|
||||
searchpath = "/"
|
||||
salt_uri = uri
|
||||
else:
|
||||
proto = "salt://"
|
||||
if self.opts['file_client'] == 'local':
|
||||
searchpath = self.opts['file_roots'][self.saltenv]
|
||||
def get_template(self, uri, relativeto=None):
|
||||
if uri.startswith("file://"):
|
||||
proto = "file://"
|
||||
searchpath = "/"
|
||||
salt_uri = uri
|
||||
else:
|
||||
searchpath = [os.path.join(self.opts['cachedir'],
|
||||
'files',
|
||||
self.saltenv)]
|
||||
salt_uri = uri if uri.startswith(proto) else salt.utils.url.create(uri)
|
||||
self.cache_file(salt_uri)
|
||||
proto = "salt://"
|
||||
if self.opts['file_client'] == 'local':
|
||||
searchpath = self.opts['file_roots'][self.saltenv]
|
||||
else:
|
||||
searchpath = [os.path.join(self.opts['cachedir'],
|
||||
'files',
|
||||
self.saltenv)]
|
||||
salt_uri = uri if uri.startswith(proto) else salt.utils.url.create(uri)
|
||||
self.cache_file(salt_uri)
|
||||
|
||||
self.lookup = TemplateLookup(directories=searchpath)
|
||||
return self.lookup.get_template(salt_uri[len(proto):])
|
||||
self.lookup = TemplateLookup(directories=searchpath)
|
||||
return self.lookup.get_template(salt_uri[len(proto):])
|
||||
|
||||
def cache_file(self, fpath):
|
||||
if fpath not in self.cache:
|
||||
self.cache[fpath] = self.file_client().get_file(fpath,
|
||||
'',
|
||||
True,
|
||||
self.saltenv)
|
||||
def cache_file(self, fpath):
|
||||
if fpath not in self.cache:
|
||||
self.cache[fpath] = self.file_client().get_file(fpath,
|
||||
'',
|
||||
True,
|
||||
self.saltenv)
|
||||
|
@ -263,6 +263,33 @@ def is_ipv6(ip):
|
||||
return False
|
||||
|
||||
|
||||
def is_subnet(cidr):
|
||||
'''
|
||||
Returns a bool telling if the passed string is an IPv4 or IPv6 subnet
|
||||
'''
|
||||
return is_ipv4_subnet(cidr) or is_ipv6_subnet(cidr)
|
||||
|
||||
|
||||
def is_ipv4_subnet(cidr):
|
||||
'''
|
||||
Returns a bool telling if the passed string is an IPv4 subnet
|
||||
'''
|
||||
try:
|
||||
return '/' in cidr and bool(ipaddress.IPv4Network(cidr))
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def is_ipv6_subnet(cidr):
|
||||
'''
|
||||
Returns a bool telling if the passed string is an IPv6 subnet
|
||||
'''
|
||||
try:
|
||||
return '/' in cidr and bool(ipaddress.IPv6Network(cidr))
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
@jinja_filter('is_ip')
|
||||
def is_ip_filter(ip, options=None):
|
||||
'''
|
||||
|
@ -48,130 +48,130 @@ msg = raetlane.wait(share=track, timeout=5.0)
|
||||
if not msg:
|
||||
raise ValueError("Timed out out waiting for response")
|
||||
'''
|
||||
# pylint: disable=3rd-party-module-not-gated
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import time
|
||||
|
||||
# Import Salt Libs
|
||||
try:
|
||||
from raet import raeting, nacling
|
||||
from raet.lane.stacking import LaneStack
|
||||
from raet.lane.yarding import RemoteYard
|
||||
HAS_RAET = True
|
||||
except ImportError:
|
||||
HAS_RAET = False
|
||||
|
||||
import logging
|
||||
import salt.utils.kinds as kinds
|
||||
if HAS_RAET:
|
||||
# pylint: disable=3rd-party-module-not-gated
|
||||
import time
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
# Import Salt Libs
|
||||
|
||||
from raet import raeting, nacling
|
||||
from raet.lane.stacking import LaneStack
|
||||
from raet.lane.yarding import RemoteYard
|
||||
import logging
|
||||
import salt.utils.kinds as kinds
|
||||
|
||||
# Module globals for default shared LaneStack for a process.
|
||||
rx_msgs = {} # module global dict of deques one for each receipient of msgs
|
||||
lane_stack = None # module global that holds raet LaneStack
|
||||
remote_yard = None # module global that holds raet remote Yard
|
||||
master_estate_name = None # module global of motivating master estate name
|
||||
master_yard_name = None # module global of motivating master yard name
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Module globals for default shared LaneStack for a process.
|
||||
rx_msgs = {} # module global dict of deques one for each receipient of msgs
|
||||
lane_stack = None # module global that holds raet LaneStack
|
||||
remote_yard = None # module global that holds raet remote Yard
|
||||
master_estate_name = None # module global of motivating master estate name
|
||||
master_yard_name = None # module global of motivating master yard name
|
||||
|
||||
def prep(opts, ryn='manor'):
|
||||
'''
|
||||
required items in opts are keys
|
||||
'id'
|
||||
'__role'
|
||||
'sock_dir'
|
||||
def prep(opts, ryn='manor'):
|
||||
'''
|
||||
required items in opts are keys
|
||||
'id'
|
||||
'__role'
|
||||
'sock_dir'
|
||||
|
||||
ryn is the remote yard name to communicate with
|
||||
each use much call raetlane.prep() to ensure lanestack is setup
|
||||
'''
|
||||
if not lane_stack:
|
||||
_setup(opts=opts, ryn=ryn)
|
||||
ryn is the remote yard name to communicate with
|
||||
each use much call raetlane.prep() to ensure lanestack is setup
|
||||
'''
|
||||
if not lane_stack:
|
||||
_setup(opts=opts, ryn=ryn)
|
||||
|
||||
def _setup(opts, ryn='manor'):
|
||||
'''
|
||||
Setup the LaneStack lane_stack and RemoteYard lane_remote_yard global
|
||||
'''
|
||||
global lane_stack, remote_yard # pylint: disable=W0602
|
||||
|
||||
def _setup(opts, ryn='manor'):
|
||||
'''
|
||||
Setup the LaneStack lane_stack and RemoteYard lane_remote_yard global
|
||||
'''
|
||||
global lane_stack, remote_yard # pylint: disable=W0602
|
||||
role = opts.get('id')
|
||||
if not role:
|
||||
emsg = ("Missing role required to setup LaneStack.")
|
||||
log.error(emsg + "\n")
|
||||
raise ValueError(emsg)
|
||||
|
||||
role = opts.get('id')
|
||||
if not role:
|
||||
emsg = ("Missing role required to setup LaneStack.")
|
||||
log.error(emsg + "\n")
|
||||
raise ValueError(emsg)
|
||||
kind = opts.get('__role') # application kind 'master', 'minion', etc
|
||||
if kind not in kinds.APPL_KINDS:
|
||||
emsg = ("Invalid application kind = '{0}' for LaneStack.".format(kind))
|
||||
log.error(emsg + "\n")
|
||||
raise ValueError(emsg)
|
||||
if kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.master],
|
||||
kinds.APPL_KIND_NAMES[kinds.applKinds.syndic]]:
|
||||
lanename = 'master'
|
||||
elif kind == [kinds.APPL_KIND_NAMES[kinds.applKinds.minion],
|
||||
kinds.APPL_KIND_NAMES[kinds.applKinds.caller]]:
|
||||
lanename = "{0}_{1}".format(role, kind)
|
||||
else:
|
||||
emsg = ("Unsupported application kind '{0}' for LaneStack.".format(kind))
|
||||
log.error(emsg + '\n')
|
||||
raise ValueError(emsg)
|
||||
|
||||
kind = opts.get('__role') # application kind 'master', 'minion', etc
|
||||
if kind not in kinds.APPL_KINDS:
|
||||
emsg = ("Invalid application kind = '{0}' for LaneStack.".format(kind))
|
||||
log.error(emsg + "\n")
|
||||
raise ValueError(emsg)
|
||||
if kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.master],
|
||||
kinds.APPL_KIND_NAMES[kinds.applKinds.syndic]]:
|
||||
lanename = 'master'
|
||||
elif kind == [kinds.APPL_KIND_NAMES[kinds.applKinds.minion],
|
||||
kinds.APPL_KIND_NAMES[kinds.applKinds.caller]]:
|
||||
lanename = "{0}_{1}".format(role, kind)
|
||||
else:
|
||||
emsg = ("Unsupported application kind '{0}' for LaneStack.".format(kind))
|
||||
log.error(emsg + '\n')
|
||||
raise ValueError(emsg)
|
||||
name = 'lanestack' + nacling.uuid(size=18)
|
||||
lane_stack = LaneStack(name=name,
|
||||
lanename=lanename,
|
||||
sockdirpath=opts['sock_dir'])
|
||||
|
||||
name = 'lanestack' + nacling.uuid(size=18)
|
||||
lane_stack = LaneStack(name=name,
|
||||
lanename=lanename,
|
||||
sockdirpath=opts['sock_dir'])
|
||||
lane_stack.Pk = raeting.PackKind.pack.value
|
||||
log.debug("Created new LaneStack and local Yard named {0} at {1}\n"
|
||||
"".format(lane_stack.name, lane_stack.ha))
|
||||
remote_yard = RemoteYard(stack=lane_stack,
|
||||
name=ryn,
|
||||
lanename=lanename,
|
||||
dirpath=opts['sock_dir'])
|
||||
lane_stack.addRemote(remote_yard)
|
||||
log.debug("Added to LaneStack {0} remote Yard named {1} at {2}\n"
|
||||
"".format(lane_stack.name, remote_yard.name, remote_yard.ha))
|
||||
|
||||
lane_stack.Pk = raeting.PackKind.pack.value
|
||||
log.debug("Created new LaneStack and local Yard named {0} at {1}\n"
|
||||
"".format(lane_stack.name, lane_stack.ha))
|
||||
remote_yard = RemoteYard(stack=lane_stack,
|
||||
name=ryn,
|
||||
lanename=lanename,
|
||||
dirpath=opts['sock_dir'])
|
||||
lane_stack.addRemote(remote_yard)
|
||||
log.debug("Added to LaneStack {0} remote Yard named {1} at {2}\n"
|
||||
"".format(lane_stack.name, remote_yard.name, remote_yard.ha))
|
||||
def transmit(msg):
|
||||
'''
|
||||
Sends msg to remote_yard
|
||||
'''
|
||||
lane_stack.transmit(msg, remote_yard.uid)
|
||||
|
||||
def service():
|
||||
'''
|
||||
Service the lane_stack and move any received messages into their associated
|
||||
deques in rx_msgs keyed by the destination share in the msg route dict
|
||||
'''
|
||||
lane_stack.serviceAll()
|
||||
while lane_stack.rxMsgs:
|
||||
msg, sender = lane_stack.rxMsgs.popleft()
|
||||
rx_msgs[msg['route']['dst'][2]] = msg
|
||||
|
||||
def transmit(msg):
|
||||
'''
|
||||
Sends msg to remote_yard
|
||||
'''
|
||||
lane_stack.transmit(msg, remote_yard.uid)
|
||||
def receive(share):
|
||||
'''
|
||||
Returns first message from deque at key given by share in rx_msgs if any
|
||||
otherwise returns None
|
||||
'''
|
||||
service()
|
||||
if share in rx_msgs:
|
||||
if rx_msgs[share]:
|
||||
return rx_msgs[share].popleft()
|
||||
return None
|
||||
|
||||
|
||||
def service():
|
||||
'''
|
||||
Service the lane_stack and move any received messages into their associated
|
||||
deques in rx_msgs keyed by the destination share in the msg route dict
|
||||
'''
|
||||
lane_stack.serviceAll()
|
||||
while lane_stack.rxMsgs:
|
||||
msg, sender = lane_stack.rxMsgs.popleft()
|
||||
rx_msgs[msg['route']['dst'][2]] = msg
|
||||
|
||||
|
||||
def receive(share):
|
||||
'''
|
||||
Returns first message from deque at key given by share in rx_msgs if any
|
||||
otherwise returns None
|
||||
'''
|
||||
service()
|
||||
if share in rx_msgs:
|
||||
if rx_msgs[share]:
|
||||
return rx_msgs[share].popleft()
|
||||
return None
|
||||
|
||||
|
||||
def wait(share, timeout=0.0, delay=0.01):
|
||||
'''
|
||||
Blocks until receives a msg addressed to share or timeout
|
||||
Return msg or None if timed out
|
||||
Delay is sleep time between services
|
||||
'''
|
||||
start = time.time()
|
||||
while True:
|
||||
msg = receive(share)
|
||||
if msg:
|
||||
return msg
|
||||
time.sleep(delay)
|
||||
if timeout > 0.0 and (time.time() - start) >= timeout:
|
||||
return None
|
||||
def wait(share, timeout=0.0, delay=0.01):
|
||||
'''
|
||||
Blocks until receives a msg addressed to share or timeout
|
||||
Return msg or None if timed out
|
||||
Delay is sleep time between services
|
||||
'''
|
||||
start = time.time()
|
||||
while True:
|
||||
msg = receive(share)
|
||||
if msg:
|
||||
return msg
|
||||
time.sleep(delay)
|
||||
if timeout > 0.0 and (time.time() - start) >= timeout:
|
||||
return None
|
||||
|
@ -20,6 +20,56 @@ if ! ldd "$busybox" 2>&1 | grep -q 'not a dynamic executable'; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
etc_passwd="\
|
||||
root:x:0:0:root:/root:/bin/sh
|
||||
daemon:x:1:1:daemon:/usr/sbin:/bin/false
|
||||
bin:x:2:2:bin:/bin:/bin/false
|
||||
sys:x:3:3:sys:/dev:/bin/false
|
||||
sync:x:4:100:sync:/bin:/bin/sync
|
||||
mail:x:8:8:mail:/var/spool/mail:/bin/false
|
||||
www-data:x:33:33:www-data:/var/www:/bin/false
|
||||
operator:x:37:37:Operator:/var:/bin/false
|
||||
nobody:x:65534:65534:nobody:/home:/bin/false"
|
||||
|
||||
etc_group="\
|
||||
root:x:0:
|
||||
daemon:x:1:
|
||||
bin:x:2:
|
||||
sys:x:3:
|
||||
adm:x:4:
|
||||
tty:x:5:
|
||||
disk:x:6:
|
||||
lp:x:7:
|
||||
mail:x:8:
|
||||
kmem:x:9:
|
||||
wheel:x:10:root
|
||||
cdrom:x:11:
|
||||
dialout:x:18:
|
||||
floppy:x:19:
|
||||
video:x:28:
|
||||
audio:x:29:
|
||||
tape:x:32:
|
||||
www-data:x:33:
|
||||
operator:x:37:
|
||||
utmp:x:43:
|
||||
plugdev:x:46:
|
||||
staff:x:50:
|
||||
lock:x:54:
|
||||
netdev:x:82:
|
||||
users:x:100:
|
||||
nogroup:x:65534:"
|
||||
|
||||
etc_shadow="\
|
||||
root::10933:0:99999:7:::
|
||||
daemon:*:10933:0:99999:7:::
|
||||
bin:*:10933:0:99999:7:::
|
||||
sys:*:10933:0:99999:7:::
|
||||
sync:*:10933:0:99999:7:::
|
||||
mail:*:10933:0:99999:7:::
|
||||
www-data:*:10933:0:99999:7:::
|
||||
operator:*:10933:0:99999:7:::
|
||||
nobody:*:10933:0:99999:7:::"
|
||||
|
||||
mkdir -p "$rootfsDir/bin"
|
||||
rm -f "$rootfsDir/bin/busybox" # just in case
|
||||
cp "$busybox" "$rootfsDir/bin/busybox"
|
||||
@ -35,6 +85,12 @@ cp "$busybox" "$rootfsDir/bin/busybox"
|
||||
mkdir -p "$(dirname "$module")"
|
||||
ln -sf /bin/busybox "$module"
|
||||
done
|
||||
# Make sure the image has the needed files to make users work
|
||||
mkdir etc
|
||||
echo "$etc_passwd" >etc/passwd
|
||||
echo "$etc_group" >etc/group
|
||||
echo "$etc_shadow" >etc/shadow
|
||||
# Import the image
|
||||
tar --numeric-owner -cf- . | docker import --change "CMD sleep 300" - "$imageName"
|
||||
docker run --rm -i "$imageName" /bin/true
|
||||
exit $?
|
||||
|
File diff suppressed because it is too large
Load Diff
505
tests/integration/states/test_docker_network.py
Normal file
505
tests/integration/states/test_docker_network.py
Normal file
@ -0,0 +1,505 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Integration tests for the docker_network states
|
||||
'''
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
import errno
|
||||
import functools
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from tests.support.unit import skipIf
|
||||
from tests.support.case import ModuleCase
|
||||
from tests.support.docker import with_network, random_name
|
||||
from tests.support.paths import FILES, TMP
|
||||
from tests.support.helpers import destructiveTest, requires_system_grains
|
||||
from tests.support.mixins import SaltReturnAssertsMixin
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.utils.files
|
||||
import salt.utils.network
|
||||
import salt.utils.path
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
IMAGE_NAME = random_name(prefix='salt_busybox_')
|
||||
IPV6_ENABLED = bool(salt.utils.network.ip_addrs6(include_loopback=True))
|
||||
|
||||
|
||||
def network_name(func):
|
||||
'''
|
||||
Generate a randomized name for a network and clean it up afterward
|
||||
'''
|
||||
@functools.wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
name = random_name(prefix='salt_net_')
|
||||
try:
|
||||
return func(self, name, *args, **kwargs)
|
||||
finally:
|
||||
self.run_function(
|
||||
'docker.disconnect_all_containers_from_network', [name])
|
||||
try:
|
||||
self.run_function('docker.remove_network', [name])
|
||||
except CommandExecutionError as exc:
|
||||
if 'No such network' not in exc.__str__():
|
||||
raise
|
||||
return wrapper
|
||||
|
||||
|
||||
def container_name(func):
|
||||
'''
|
||||
Generate a randomized name for a container and clean it up afterward
|
||||
'''
|
||||
def build_image():
|
||||
# Create temp dir
|
||||
image_build_rootdir = tempfile.mkdtemp(dir=TMP)
|
||||
script_path = \
|
||||
os.path.join(FILES, 'file/base/mkimage-busybox-static')
|
||||
cmd = [script_path, image_build_rootdir, IMAGE_NAME]
|
||||
log.debug('Running \'%s\' to build busybox image', ' '.join(cmd))
|
||||
process = subprocess.Popen(
|
||||
cmd,
|
||||
close_fds=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
output = process.communicate()[0]
|
||||
log.debug('Output from mkimge-busybox-static:\n%s', output)
|
||||
|
||||
if process.returncode != 0:
|
||||
raise Exception('Failed to build image')
|
||||
|
||||
try:
|
||||
salt.utils.files.rm_rf(image_build_rootdir)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
try:
|
||||
self.run_function('docker.inspect_image', [IMAGE_NAME])
|
||||
except CommandExecutionError:
|
||||
pass
|
||||
else:
|
||||
build_image()
|
||||
|
||||
name = random_name(prefix='salt_test_')
|
||||
self.run_function(
|
||||
'docker.create',
|
||||
name=name,
|
||||
image=IMAGE_NAME,
|
||||
command='sleep 600',
|
||||
start=True)
|
||||
try:
|
||||
return func(self, name, *args, **kwargs)
|
||||
finally:
|
||||
try:
|
||||
self.run_function('docker.rm', [name], force=True)
|
||||
except CommandExecutionError as exc:
|
||||
if 'No such container' not in exc.__str__():
|
||||
raise
|
||||
return wrapper
|
||||
|
||||
|
||||
@destructiveTest
|
||||
@skipIf(not salt.utils.path.which('dockerd'), 'Docker not installed')
|
||||
class DockerNetworkTestCase(ModuleCase, SaltReturnAssertsMixin):
|
||||
'''
|
||||
Test docker_network states
|
||||
'''
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
'''
|
||||
Remove test image if present. Note that this will run a docker rmi even
|
||||
if no test which required the image was run.
|
||||
'''
|
||||
cmd = ['docker', 'rmi', '--force', IMAGE_NAME]
|
||||
log.debug('Running \'%s\' to destroy busybox image', ' '.join(cmd))
|
||||
process = subprocess.Popen(
|
||||
cmd,
|
||||
close_fds=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
output = process.communicate()[0]
|
||||
log.debug('Output from %s:\n%s', ' '.join(cmd), output)
|
||||
|
||||
if process.returncode != 0 and 'No such image' not in output:
|
||||
raise Exception('Failed to destroy image')
|
||||
|
||||
def run_state(self, function, **kwargs):
|
||||
ret = super(DockerNetworkTestCase, self).run_state(function, **kwargs)
|
||||
log.debug('ret = %s', ret)
|
||||
return ret
|
||||
|
||||
@with_network(create=False)
|
||||
def test_absent(self, net):
|
||||
self.assertSaltTrueReturn(
|
||||
self.run_state('docker_network.present', name=net.name))
|
||||
ret = self.run_state('docker_network.absent', name=net.name)
|
||||
self.assertSaltTrueReturn(ret)
|
||||
ret = ret[next(iter(ret))]
|
||||
|
||||
self.assertEqual(ret['changes'], {'removed': True})
|
||||
self.assertEqual(
|
||||
ret['comment'],
|
||||
'Removed network \'{0}\''.format(net.name)
|
||||
)
|
||||
|
||||
@container_name
|
||||
@with_network(create=False)
|
||||
def test_absent_with_disconnected_container(self, net, container_name):
|
||||
self.assertSaltTrueReturn(
|
||||
self.run_state('docker_network.present',
|
||||
name=net.name,
|
||||
containers=[container_name])
|
||||
)
|
||||
ret = self.run_state('docker_network.absent', name=net.name)
|
||||
self.assertSaltTrueReturn(ret)
|
||||
ret = ret[next(iter(ret))]
|
||||
|
||||
self.assertEqual(
|
||||
ret['changes'],
|
||||
{
|
||||
'removed': True,
|
||||
'disconnected': [container_name],
|
||||
}
|
||||
)
|
||||
self.assertEqual(
|
||||
ret['comment'],
|
||||
'Removed network \'{0}\''.format(net.name)
|
||||
)
|
||||
|
||||
@with_network(create=False)
|
||||
def test_absent_when_not_present(self, net):
|
||||
ret = self.run_state('docker_network.absent', name=net.name)
|
||||
self.assertSaltTrueReturn(ret)
|
||||
ret = ret[next(iter(ret))]
|
||||
self.assertEqual(ret['changes'], {})
|
||||
self.assertEqual(
|
||||
ret['comment'],
|
||||
'Network \'{0}\' already absent'.format(net.name)
|
||||
)
|
||||
|
||||
@with_network(create=False)
|
||||
def test_present(self, net):
|
||||
ret = self.run_state('docker_network.present', name=net.name)
|
||||
self.assertSaltTrueReturn(ret)
|
||||
ret = ret[next(iter(ret))]
|
||||
|
||||
# Make sure the state return is what we expect
|
||||
self.assertEqual(ret['changes'], {'created': True})
|
||||
self.assertEqual(
|
||||
ret['comment'],
|
||||
'Network \'{0}\' created'.format(net.name)
|
||||
)
|
||||
|
||||
# Now check to see that the network actually exists. If it doesn't,
|
||||
# this next function call will raise an exception.
|
||||
self.run_function('docker.inspect_network', [net.name])
|
||||
|
||||
@container_name
|
||||
@with_network(create=False)
|
||||
def test_present_with_containers(self, net, container_name):
|
||||
ret = self.run_state(
|
||||
'docker_network.present',
|
||||
name=net.name,
|
||||
containers=[container_name])
|
||||
self.assertSaltTrueReturn(ret)
|
||||
ret = ret[next(iter(ret))]
|
||||
|
||||
self.assertEqual(
|
||||
ret['changes'],
|
||||
{
|
||||
'created': True,
|
||||
'connected': [container_name],
|
||||
}
|
||||
)
|
||||
self.assertEqual(
|
||||
ret['comment'],
|
||||
'Network \'{0}\' created'.format(net.name)
|
||||
)
|
||||
|
||||
# Now check to see that the network actually exists. If it doesn't,
|
||||
# this next function call will raise an exception.
|
||||
self.run_function('docker.inspect_network', [net.name])
|
||||
|
||||
def _test_present_reconnect(self, net, container_name, reconnect=True):
|
||||
ret = self.run_state(
|
||||
'docker_network.present',
|
||||
name=net.name,
|
||||
driver='bridge')
|
||||
self.assertSaltTrueReturn(ret)
|
||||
ret = ret[next(iter(ret))]
|
||||
|
||||
self.assertEqual(ret['changes'], {'created': True})
|
||||
self.assertEqual(
|
||||
ret['comment'],
|
||||
'Network \'{0}\' created'.format(net.name)
|
||||
)
|
||||
|
||||
# Connect the container
|
||||
self.run_function(
|
||||
'docker.connect_container_to_network',
|
||||
[container_name, net.name]
|
||||
)
|
||||
|
||||
# Change the driver to force the network to be replaced
|
||||
ret = self.run_state(
|
||||
'docker_network.present',
|
||||
name=net.name,
|
||||
driver='macvlan',
|
||||
reconnect=reconnect)
|
||||
self.assertSaltTrueReturn(ret)
|
||||
ret = ret[next(iter(ret))]
|
||||
|
||||
self.assertEqual(
|
||||
ret['changes'],
|
||||
{
|
||||
'recreated': True,
|
||||
'reconnected' if reconnect else 'disconnected': [container_name],
|
||||
net.name: {
|
||||
'Driver': {
|
||||
'old': 'bridge',
|
||||
'new': 'macvlan',
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
self.assertEqual(
|
||||
ret['comment'],
|
||||
'Network \'{0}\' was replaced with updated config'.format(net.name)
|
||||
)
|
||||
|
||||
@container_name
|
||||
@with_network(create=False)
|
||||
def test_present_with_reconnect(self, net, container_name):
|
||||
'''
|
||||
Test reconnecting with containers not passed to state
|
||||
'''
|
||||
self._test_present_reconnect(net, container_name, reconnect=True)
|
||||
|
||||
@container_name
|
||||
@with_network(create=False)
|
||||
def test_present_with_no_reconnect(self, net, container_name):
|
||||
'''
|
||||
Test reconnecting with containers not passed to state
|
||||
'''
|
||||
self._test_present_reconnect(net, container_name, reconnect=False)
|
||||
|
||||
@with_network()
|
||||
def test_present_internal(self, net):
|
||||
self.assertSaltTrueReturn(
|
||||
self.run_state(
|
||||
'docker_network.present',
|
||||
name=net.name,
|
||||
internal=True,
|
||||
)
|
||||
)
|
||||
net_info = self.run_function('docker.inspect_network', [net.name])
|
||||
self.assertIs(net_info['Internal'], True)
|
||||
|
||||
@with_network()
|
||||
def test_present_labels(self, net):
|
||||
# Test a mix of different ways of specifying labels
|
||||
self.assertSaltTrueReturn(
|
||||
self.run_state(
|
||||
'docker_network.present',
|
||||
name=net.name,
|
||||
labels=[
|
||||
'foo',
|
||||
'bar=baz',
|
||||
{'hello': 'world'},
|
||||
],
|
||||
)
|
||||
)
|
||||
net_info = self.run_function('docker.inspect_network', [net.name])
|
||||
self.assertEqual(
|
||||
net_info['Labels'],
|
||||
{'foo': '',
|
||||
'bar': 'baz',
|
||||
'hello': 'world'},
|
||||
)
|
||||
|
||||
@with_network(subnet='fe3f:2180:26:1::/123')
|
||||
@with_network(subnet='10.247.197.96/27')
|
||||
@skipIf(not IPV6_ENABLED, 'IPv6 not enabled')
|
||||
def test_present_enable_ipv6(self, net1, net2):
|
||||
self.assertSaltTrueReturn(
|
||||
self.run_state(
|
||||
'docker_network.present',
|
||||
name=net1.name,
|
||||
enable_ipv6=True,
|
||||
ipam_pools=[
|
||||
{'subnet': net1.subnet},
|
||||
{'subnet': net2.subnet},
|
||||
],
|
||||
)
|
||||
)
|
||||
net_info = self.run_function('docker.inspect_network', [net1.name])
|
||||
self.assertIs(net_info['EnableIPv6'], True)
|
||||
|
||||
@requires_system_grains
|
||||
@with_network()
|
||||
def test_present_attachable(self, net, grains):
|
||||
if grains['os_family'] == 'RedHat' \
|
||||
and grains.get('osmajorrelease', 0) <= 7:
|
||||
self.skipTest('Cannot reliably manage attachable on RHEL <= 7')
|
||||
|
||||
self.assertSaltTrueReturn(
|
||||
self.run_state(
|
||||
'docker_network.present',
|
||||
name=net.name,
|
||||
attachable=True,
|
||||
)
|
||||
)
|
||||
net_info = self.run_function('docker.inspect_network', [net.name])
|
||||
self.assertIs(net_info['Attachable'], True)
|
||||
|
||||
@skipIf(True, 'Skip until we can set up docker swarm testing')
|
||||
@with_network()
|
||||
def test_present_scope(self, net):
|
||||
self.assertSaltTrueReturn(
|
||||
self.run_state(
|
||||
'docker_network.present',
|
||||
name=net.name,
|
||||
scope='global',
|
||||
)
|
||||
)
|
||||
net_info = self.run_function('docker.inspect_network', [net.name])
|
||||
self.assertIs(net_info['Scope'], 'global')
|
||||
|
||||
@skipIf(True, 'Skip until we can set up docker swarm testing')
|
||||
@with_network()
|
||||
def test_present_ingress(self, net):
|
||||
self.assertSaltTrueReturn(
|
||||
self.run_state(
|
||||
'docker_network.present',
|
||||
name=net.name,
|
||||
ingress=True,
|
||||
)
|
||||
)
|
||||
net_info = self.run_function('docker.inspect_network', [net.name])
|
||||
self.assertIs(net_info['Ingress'], True)
|
||||
|
||||
@with_network(subnet='10.247.197.128/27')
|
||||
@with_network(subnet='10.247.197.96/27')
|
||||
def test_present_with_custom_ipv4(self, net1, net2):
|
||||
# First run will test passing the IPAM arguments individually
|
||||
self.assertSaltTrueReturn(
|
||||
self.run_state(
|
||||
'docker_network.present',
|
||||
name=net1.name,
|
||||
subnet=net1.subnet,
|
||||
gateway=net1.gateway,
|
||||
)
|
||||
)
|
||||
# Second run will pass them in the ipam_pools argument
|
||||
ret = self.run_state(
|
||||
'docker_network.present',
|
||||
name=net1.name, # We want to keep the same network name
|
||||
ipam_pools=[
|
||||
{'subnet': net2.subnet,
|
||||
'gateway': net2.gateway},
|
||||
],
|
||||
)
|
||||
self.assertSaltTrueReturn(ret)
|
||||
ret = ret[next(iter(ret))]
|
||||
|
||||
# Docker requires there to be IPv4, even when only an IPv6 subnet was
|
||||
# provided. So, there will be both an IPv4 and IPv6 pool in the
|
||||
# configuration.
|
||||
expected = {
|
||||
'recreated': True,
|
||||
net1.name: {
|
||||
'IPAM': {
|
||||
'Config': {
|
||||
'old': [
|
||||
{'Subnet': net1.subnet,
|
||||
'Gateway': net1.gateway},
|
||||
],
|
||||
'new': [
|
||||
{'Subnet': net2.subnet,
|
||||
'Gateway': net2.gateway},
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
self.assertEqual(ret['changes'], expected)
|
||||
self.assertEqual(
|
||||
ret['comment'],
|
||||
'Network \'{0}\' was replaced with updated config'.format(
|
||||
net1.name
|
||||
)
|
||||
)
|
||||
|
||||
@with_network(subnet='fe3f:2180:26:1::20/123')
|
||||
@with_network(subnet='fe3f:2180:26:1::/123')
|
||||
@with_network(subnet='10.247.197.96/27')
|
||||
@skipIf(not IPV6_ENABLED, 'IPv6 not enabled')
|
||||
def test_present_with_custom_ipv6(self, ipv4_net, ipv6_net1, ipv6_net2):
|
||||
self.assertSaltTrueReturn(
|
||||
self.run_state(
|
||||
'docker_network.present',
|
||||
name=ipv4_net.name,
|
||||
enable_ipv6=True,
|
||||
ipam_pools=[
|
||||
{'subnet': ipv4_net.subnet,
|
||||
'gateway': ipv4_net.gateway},
|
||||
{'subnet': ipv6_net1.subnet,
|
||||
'gateway': ipv6_net1.gateway}
|
||||
],
|
||||
)
|
||||
)
|
||||
|
||||
ret = self.run_state(
|
||||
'docker_network.present',
|
||||
name=ipv4_net.name, # We want to keep the same network name
|
||||
enable_ipv6=True,
|
||||
ipam_pools=[
|
||||
{'subnet': ipv4_net.subnet,
|
||||
'gateway': ipv4_net.gateway},
|
||||
{'subnet': ipv6_net2.subnet,
|
||||
'gateway': ipv6_net2.gateway}
|
||||
],
|
||||
)
|
||||
self.assertSaltTrueReturn(ret)
|
||||
ret = ret[next(iter(ret))]
|
||||
|
||||
# Docker requires there to be IPv4, even when only an IPv6 subnet was
|
||||
# provided. So, there will be both an IPv4 and IPv6 pool in the
|
||||
# configuration.
|
||||
expected = {
|
||||
'recreated': True,
|
||||
ipv4_net.name: {
|
||||
'IPAM': {
|
||||
'Config': {
|
||||
'old': [
|
||||
{'Subnet': ipv4_net.subnet,
|
||||
'Gateway': ipv4_net.gateway},
|
||||
{'Subnet': ipv6_net1.subnet,
|
||||
'Gateway': ipv6_net1.gateway}
|
||||
],
|
||||
'new': [
|
||||
{'Subnet': ipv4_net.subnet,
|
||||
'Gateway': ipv4_net.gateway},
|
||||
{'Subnet': ipv6_net2.subnet,
|
||||
'Gateway': ipv6_net2.gateway}
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
self.assertEqual(ret['changes'], expected)
|
||||
self.assertEqual(
|
||||
ret['comment'],
|
||||
'Network \'{0}\' was replaced with updated config'.format(
|
||||
ipv4_net.name
|
||||
)
|
||||
)
|
109
tests/support/docker.py
Normal file
109
tests/support/docker.py
Normal file
@ -0,0 +1,109 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Common code used in Docker integration tests
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import functools
|
||||
import random
|
||||
import string
|
||||
|
||||
# Import Salt libs
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt._compat import ipaddress
|
||||
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
|
||||
|
||||
|
||||
def random_name(prefix=''):
|
||||
ret = prefix
|
||||
for _ in range(8):
|
||||
ret += random.choice(string.ascii_lowercase)
|
||||
return ret
|
||||
|
||||
|
||||
class Network(object):
|
||||
def __init__(self, name, **kwargs):
|
||||
self.kwargs = kwargs
|
||||
self.name = name
|
||||
try:
|
||||
self.net = ipaddress.ip_network(self.kwargs['subnet'])
|
||||
self._rand_indexes = random.sample(
|
||||
range(2, self.net.num_addresses - 1),
|
||||
self.net.num_addresses - 3)
|
||||
self.ip_arg = 'ipv{0}_address'.format(self.net.version)
|
||||
except KeyError:
|
||||
# No explicit subnet passed
|
||||
self.net = self.ip_arg = None
|
||||
|
||||
def __getitem__(self, index):
|
||||
try:
|
||||
return self.net[self._rand_indexes[index]].compressed
|
||||
except (TypeError, AttributeError):
|
||||
raise ValueError(
|
||||
'Indexing not supported for networks without a custom subnet')
|
||||
|
||||
def arg_map(self, arg_name):
|
||||
return {'ipv4_address': 'IPv4Address',
|
||||
'ipv6_address': 'IPv6Address',
|
||||
'links': 'Links',
|
||||
'aliases': 'Aliases'}[arg_name]
|
||||
|
||||
@property
|
||||
def subnet(self):
|
||||
try:
|
||||
return self.net.compressed
|
||||
except AttributeError:
|
||||
return None
|
||||
|
||||
@property
|
||||
def gateway(self):
|
||||
try:
|
||||
return self.kwargs['gateway']
|
||||
except KeyError:
|
||||
try:
|
||||
return self.net[1].compressed
|
||||
except (AttributeError, IndexError):
|
||||
return None
|
||||
|
||||
|
||||
class with_network(object):
|
||||
'''
|
||||
Generate a network for the test. Information about the network will be
|
||||
passed to the wrapped function.
|
||||
'''
|
||||
def __init__(self, **kwargs):
|
||||
self.create = kwargs.pop('create', False)
|
||||
self.network = Network(random_name(prefix='salt_net_'), **kwargs)
|
||||
if self.network.net is not None:
|
||||
if 'enable_ipv6' not in kwargs:
|
||||
kwargs['enable_ipv6'] = self.network.net.version == 6
|
||||
self.kwargs = kwargs
|
||||
|
||||
def __call__(self, func):
|
||||
self.func = func
|
||||
return functools.wraps(func)(
|
||||
lambda testcase, *args, **kwargs: self.wrap(testcase, *args, **kwargs) # pylint: disable=W0108
|
||||
)
|
||||
|
||||
def wrap(self, testcase, *args, **kwargs):
|
||||
if self.create:
|
||||
testcase.run_function(
|
||||
'docker.create_network',
|
||||
[self.network.name],
|
||||
**self.kwargs)
|
||||
try:
|
||||
return self.func(testcase, self.network, *args, **kwargs)
|
||||
finally:
|
||||
try:
|
||||
testcase.run_function(
|
||||
'docker.disconnect_all_containers_from_network',
|
||||
[self.network.name])
|
||||
except CommandExecutionError as exc:
|
||||
if '404' not in exc.__str__():
|
||||
raise
|
||||
else:
|
||||
testcase.run_function(
|
||||
'docker.remove_network',
|
||||
[self.network.name])
|
@ -16,6 +16,8 @@ from tests.support.mock import (
|
||||
NO_MOCK_REASON,
|
||||
patch
|
||||
)
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.config
|
||||
@ -45,6 +47,9 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
|
||||
salt.config.DEFAULT_MINION_OPTS,
|
||||
whitelist=['state']
|
||||
)
|
||||
# Force the LazyDict to populate its references. Otherwise the lookup
|
||||
# will fail inside the unit tests.
|
||||
utils.keys()
|
||||
return {docker_mod: {'__context__': {'docker.docker_version': ''},
|
||||
'__utils__': utils}}
|
||||
|
||||
@ -130,7 +135,7 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
|
||||
('kill', ()),
|
||||
('pause', ()),
|
||||
('signal_', ('KILL',)),
|
||||
('start', ()),
|
||||
('start_', ()),
|
||||
('stop', ()),
|
||||
('unpause', ()),
|
||||
('_run', ('command',)),
|
||||
@ -164,8 +169,12 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
|
||||
host_config = {}
|
||||
client = Mock()
|
||||
client.api_version = '1.21'
|
||||
client.networks = Mock(return_value=[
|
||||
{'Name': 'foo',
|
||||
'Id': '01234',
|
||||
'Containers': {}}
|
||||
])
|
||||
get_client_mock = MagicMock(return_value=client)
|
||||
|
||||
with patch.dict(docker_mod.__dict__,
|
||||
{'__salt__': __salt__}):
|
||||
with patch.object(docker_mod, '_get_client', get_client_mock):
|
||||
@ -207,7 +216,6 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
|
||||
'Subnet': '192.168.0.0/24'
|
||||
}],
|
||||
'Driver': 'default',
|
||||
'Options': {}
|
||||
},
|
||||
check_duplicate=True)
|
||||
|
||||
@ -276,7 +284,7 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
|
||||
with patch.object(docker_mod, '_get_client', get_client_mock):
|
||||
docker_mod.connect_container_to_network('container', 'foo')
|
||||
client.connect_container_to_network.assert_called_once_with(
|
||||
'container', 'foo', None)
|
||||
'container', 'foo')
|
||||
|
||||
@skipIf(docker_version < (1, 5, 0),
|
||||
'docker module must be installed to run this test or is too old. >=1.5.0')
|
||||
@ -546,16 +554,13 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
|
||||
})
|
||||
|
||||
ret = None
|
||||
with patch.object(docker_mod, 'start', docker_start_mock):
|
||||
with patch.object(docker_mod, 'create', docker_create_mock):
|
||||
with patch.object(docker_mod, 'stop', docker_stop_mock):
|
||||
with patch.object(docker_mod, 'commit', docker_commit_mock):
|
||||
with patch.object(docker_mod, 'sls', docker_sls_mock):
|
||||
with patch.object(docker_mod, 'rm_', docker_rm_mock):
|
||||
ret = docker_mod.sls_build(
|
||||
'foo',
|
||||
mods='foo',
|
||||
)
|
||||
with patch.object(docker_mod, 'start_', docker_start_mock), \
|
||||
patch.object(docker_mod, 'create', docker_create_mock), \
|
||||
patch.object(docker_mod, 'stop', docker_stop_mock), \
|
||||
patch.object(docker_mod, 'commit', docker_commit_mock), \
|
||||
patch.object(docker_mod, 'sls', docker_sls_mock), \
|
||||
patch.object(docker_mod, 'rm_', docker_rm_mock):
|
||||
ret = docker_mod.sls_build('foo', mods='foo')
|
||||
docker_create_mock.assert_called_once_with(
|
||||
cmd='sleep infinity',
|
||||
image='opensuse/python', interactive=True, tty=True)
|
||||
@ -563,7 +568,7 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
|
||||
docker_sls_mock.assert_called_once_with('ID', 'foo', 'base')
|
||||
docker_stop_mock.assert_called_once_with('ID')
|
||||
docker_rm_mock.assert_called_once_with('ID')
|
||||
docker_commit_mock.assert_called_once_with('ID', 'foo')
|
||||
docker_commit_mock.assert_called_once_with('ID', 'foo', tag='latest')
|
||||
self.assertEqual(
|
||||
{'Id': 'ID2', 'Image': 'foo', 'Time_Elapsed': 42}, ret)
|
||||
|
||||
@ -604,16 +609,12 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
|
||||
})
|
||||
|
||||
ret = None
|
||||
with patch.object(docker_mod, 'start', docker_start_mock):
|
||||
with patch.object(docker_mod, 'create', docker_create_mock):
|
||||
with patch.object(docker_mod, 'stop', docker_stop_mock):
|
||||
with patch.object(docker_mod, 'rm_', docker_rm_mock):
|
||||
with patch.object(docker_mod, 'sls', docker_sls_mock):
|
||||
ret = docker_mod.sls_build(
|
||||
'foo',
|
||||
mods='foo',
|
||||
dryrun=True
|
||||
)
|
||||
with patch.object(docker_mod, 'start_', docker_start_mock), \
|
||||
patch.object(docker_mod, 'create', docker_create_mock), \
|
||||
patch.object(docker_mod, 'stop', docker_stop_mock), \
|
||||
patch.object(docker_mod, 'rm_', docker_rm_mock), \
|
||||
patch.object(docker_mod, 'sls', docker_sls_mock):
|
||||
ret = docker_mod.sls_build('foo', mods='foo', dryrun=True)
|
||||
docker_create_mock.assert_called_once_with(
|
||||
cmd='sleep infinity',
|
||||
image='opensuse/python', interactive=True, tty=True)
|
||||
@ -669,19 +670,15 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
|
||||
context = {'docker.exec_driver': 'docker-exec'}
|
||||
salt_dunder = {'config.option': docker_config_mock}
|
||||
|
||||
with patch.object(docker_mod, 'run_all', docker_run_all_mock):
|
||||
with patch.object(docker_mod, 'copy_to', docker_copy_to_mock):
|
||||
with patch.object(docker_mod, '_get_client', get_client_mock):
|
||||
with patch.dict(docker_mod.__opts__, {'cachedir': '/tmp'}):
|
||||
with patch.dict(docker_mod.__salt__, salt_dunder):
|
||||
with patch.dict(docker_mod.__context__, context):
|
||||
# call twice to verify tmp path later
|
||||
for i in range(2):
|
||||
ret = docker_mod.call(
|
||||
'ID',
|
||||
'test.arg',
|
||||
1, 2,
|
||||
arg1='val1')
|
||||
with patch.object(docker_mod, 'run_all', docker_run_all_mock), \
|
||||
patch.object(docker_mod, 'copy_to', docker_copy_to_mock), \
|
||||
patch.object(docker_mod, '_get_client', get_client_mock), \
|
||||
patch.dict(docker_mod.__opts__, {'cachedir': '/tmp'}), \
|
||||
patch.dict(docker_mod.__salt__, salt_dunder), \
|
||||
patch.dict(docker_mod.__context__, context):
|
||||
# call twice to verify tmp path later
|
||||
for i in range(2):
|
||||
ret = docker_mod.call('ID', 'test.arg', 1, 2, arg1='val1')
|
||||
|
||||
# Check that the directory is different each time
|
||||
# [ call(name, [args]), ...
|
||||
@ -759,21 +756,28 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
|
||||
|
||||
def test_resolve_tag(self):
|
||||
'''
|
||||
Test the resolve_tag function
|
||||
Test the resolve_tag function. It runs docker.insect_image on the image
|
||||
name passed and then looks for the RepoTags key in the result
|
||||
'''
|
||||
with_prefix = 'docker.io/foo:latest'
|
||||
no_prefix = 'bar:latest'
|
||||
with patch.object(docker_mod,
|
||||
'list_tags',
|
||||
MagicMock(return_value=[with_prefix])):
|
||||
self.assertEqual(docker_mod.resolve_tag('foo'), with_prefix)
|
||||
self.assertEqual(docker_mod.resolve_tag('foo:latest'), with_prefix)
|
||||
self.assertEqual(docker_mod.resolve_tag(with_prefix), with_prefix)
|
||||
self.assertEqual(docker_mod.resolve_tag('foo:bar'), False)
|
||||
id_ = 'sha256:6ad733544a6317992a6fac4eb19fe1df577d4dec7529efec28a5bd0edad0fd30'
|
||||
tags = ['foo:latest', 'foo:bar']
|
||||
mock_tagged = MagicMock(return_value={'Id': id_, 'RepoTags': tags})
|
||||
mock_untagged = MagicMock(return_value={'Id': id_, 'RepoTags': []})
|
||||
mock_unexpected = MagicMock(return_value={'Id': id_})
|
||||
mock_not_found = MagicMock(side_effect=CommandExecutionError())
|
||||
|
||||
with patch.object(docker_mod,
|
||||
'list_tags',
|
||||
MagicMock(return_value=[no_prefix])):
|
||||
self.assertEqual(docker_mod.resolve_tag('bar'), no_prefix)
|
||||
self.assertEqual(docker_mod.resolve_tag(no_prefix), no_prefix)
|
||||
self.assertEqual(docker_mod.resolve_tag('bar:baz'), False)
|
||||
with patch.object(docker_mod, 'inspect_image', mock_tagged):
|
||||
self.assertEqual(docker_mod.resolve_tag('foo'), tags[0])
|
||||
self.assertEqual(docker_mod.resolve_tag('foo', all=True), tags)
|
||||
|
||||
with patch.object(docker_mod, 'inspect_image', mock_untagged):
|
||||
self.assertEqual(docker_mod.resolve_tag('foo'), id_)
|
||||
self.assertEqual(docker_mod.resolve_tag('foo', all=True), [id_])
|
||||
|
||||
with patch.object(docker_mod, 'inspect_image', mock_unexpected):
|
||||
self.assertEqual(docker_mod.resolve_tag('foo'), id_)
|
||||
self.assertEqual(docker_mod.resolve_tag('foo', all=True), [id_])
|
||||
|
||||
with patch.object(docker_mod, 'inspect_image', mock_not_found):
|
||||
self.assertIs(docker_mod.resolve_tag('foo'), False)
|
||||
self.assertIs(docker_mod.resolve_tag('foo', all=True), False)
|
||||
|
@ -1,216 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Unit tests for the docker state
|
||||
'''
|
||||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from tests.support.mixins import LoaderModuleMockMixin
|
||||
from tests.support.unit import skipIf, TestCase
|
||||
from tests.support.mock import (
|
||||
Mock,
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON,
|
||||
patch
|
||||
)
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.modules.dockermod as docker_mod
|
||||
import salt.states.docker_network as docker_state
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class DockerNetworkTestCase(TestCase, LoaderModuleMockMixin):
|
||||
'''
|
||||
Test docker_network states
|
||||
'''
|
||||
def setup_loader_modules(self):
|
||||
return {
|
||||
docker_mod: {
|
||||
'__context__': {'docker.docker_version': ''}
|
||||
},
|
||||
docker_state: {
|
||||
'__opts__': {'test': False}
|
||||
}
|
||||
}
|
||||
|
||||
def test_present(self):
|
||||
'''
|
||||
Test docker_network.present
|
||||
'''
|
||||
docker_create_network = Mock(return_value='created')
|
||||
docker_connect_container_to_network = Mock(return_value='connected')
|
||||
docker_inspect_container = Mock(return_value={'Id': 'abcd', 'Name': 'container_bar'})
|
||||
# Get docker.networks to return a network with a name which is a superset of the name of
|
||||
# the network which is to be created, despite this network existing we should still expect
|
||||
# that the new network will be created.
|
||||
# Regression test for #41982.
|
||||
docker_networks = Mock(return_value=[{
|
||||
'Name': 'network_foobar',
|
||||
'Containers': {'container': {}}
|
||||
}])
|
||||
__salt__ = {'docker.create_network': docker_create_network,
|
||||
'docker.inspect_container': docker_inspect_container,
|
||||
'docker.connect_container_to_network': docker_connect_container_to_network,
|
||||
'docker.networks': docker_networks,
|
||||
}
|
||||
with patch.dict(docker_state.__dict__,
|
||||
{'__salt__': __salt__}):
|
||||
ret = docker_state.present(
|
||||
'network_foo',
|
||||
containers=['container'],
|
||||
gateway='192.168.0.1',
|
||||
ip_range='192.168.0.128/25',
|
||||
subnet='192.168.0.0/24'
|
||||
)
|
||||
docker_create_network.assert_called_with('network_foo',
|
||||
driver=None,
|
||||
driver_opts=None,
|
||||
gateway='192.168.0.1',
|
||||
ip_range='192.168.0.128/25',
|
||||
subnet='192.168.0.0/24')
|
||||
docker_connect_container_to_network.assert_called_with('abcd',
|
||||
'network_foo')
|
||||
self.assertEqual(ret, {'name': 'network_foo',
|
||||
'comment': '',
|
||||
'changes': {'connected': ['container_bar'],
|
||||
'created': 'created'},
|
||||
'result': True})
|
||||
|
||||
def test_present_with_change(self):
|
||||
'''
|
||||
Test docker_network.present when the specified network has properties differing from the already present network
|
||||
'''
|
||||
network_details = {
|
||||
'Id': 'abcd',
|
||||
'Name': 'network_foo',
|
||||
'Driver': 'macvlan',
|
||||
'Containers': {
|
||||
'abcd': {}
|
||||
},
|
||||
'Options': {
|
||||
'parent': 'eth0'
|
||||
},
|
||||
'IPAM': {
|
||||
'Config': [
|
||||
{
|
||||
'Subnet': '192.168.0.0/24',
|
||||
'Gateway': '192.168.0.1'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
docker_networks = Mock(return_value=[network_details])
|
||||
network_details['Containers'] = {'abcd': {'Id': 'abcd', 'Name': 'container_bar'}}
|
||||
docker_inspect_network = Mock(return_value=network_details)
|
||||
docker_inspect_container = Mock(return_value={'Id': 'abcd', 'Name': 'container_bar'})
|
||||
docker_disconnect_container_from_network = Mock(return_value='disconnected')
|
||||
docker_remove_network = Mock(return_value='removed')
|
||||
docker_create_network = Mock(return_value='created')
|
||||
docker_connect_container_to_network = Mock(return_value='connected')
|
||||
|
||||
__salt__ = {'docker.networks': docker_networks,
|
||||
'docker.inspect_network': docker_inspect_network,
|
||||
'docker.inspect_container': docker_inspect_container,
|
||||
'docker.disconnect_container_from_network': docker_disconnect_container_from_network,
|
||||
'docker.remove_network': docker_remove_network,
|
||||
'docker.create_network': docker_create_network,
|
||||
'docker.connect_container_to_network': docker_connect_container_to_network,
|
||||
}
|
||||
with patch.dict(docker_state.__dict__,
|
||||
{'__salt__': __salt__}):
|
||||
ret = docker_state.present(
|
||||
'network_foo',
|
||||
driver='macvlan',
|
||||
gateway='192.168.1.1',
|
||||
subnet='192.168.1.0/24',
|
||||
driver_opts={'parent': 'eth1'},
|
||||
containers=['abcd']
|
||||
)
|
||||
|
||||
docker_disconnect_container_from_network.assert_called_with('abcd', 'network_foo')
|
||||
docker_remove_network.assert_called_with('network_foo')
|
||||
docker_create_network.assert_called_with('network_foo',
|
||||
driver='macvlan',
|
||||
driver_opts={'parent': 'eth1'},
|
||||
gateway='192.168.1.1',
|
||||
ip_range=None,
|
||||
subnet='192.168.1.0/24')
|
||||
docker_connect_container_to_network.assert_called_with('abcd', 'network_foo')
|
||||
|
||||
self.assertEqual(ret, {'name': 'network_foo',
|
||||
'comment': 'Network \'network_foo\' was replaced with updated config',
|
||||
'changes': {
|
||||
'updated': {'network_foo': {
|
||||
'old': {
|
||||
'driver_opts': {'parent': 'eth0'},
|
||||
'gateway': '192.168.0.1',
|
||||
'subnet': '192.168.0.0/24'
|
||||
},
|
||||
'new': {
|
||||
'driver_opts': {'parent': 'eth1'},
|
||||
'gateway': '192.168.1.1',
|
||||
'subnet': '192.168.1.0/24'
|
||||
}
|
||||
}},
|
||||
'reconnected': ['container_bar']
|
||||
},
|
||||
'result': True})
|
||||
|
||||
def test_absent(self):
|
||||
'''
|
||||
Test docker_network.absent
|
||||
'''
|
||||
docker_remove_network = Mock(return_value='removed')
|
||||
docker_disconnect_container_from_network = Mock(return_value='disconnected')
|
||||
docker_networks = Mock(return_value=[{
|
||||
'Name': 'network_foo',
|
||||
'Containers': {'container': {}}
|
||||
}])
|
||||
__salt__ = {
|
||||
'docker.remove_network': docker_remove_network,
|
||||
'docker.disconnect_container_from_network': docker_disconnect_container_from_network,
|
||||
'docker.networks': docker_networks,
|
||||
}
|
||||
with patch.dict(docker_state.__dict__,
|
||||
{'__salt__': __salt__}):
|
||||
ret = docker_state.absent('network_foo')
|
||||
docker_disconnect_container_from_network.assert_called_with('container',
|
||||
'network_foo')
|
||||
docker_remove_network.assert_called_with('network_foo')
|
||||
self.assertEqual(ret, {'name': 'network_foo',
|
||||
'comment': '',
|
||||
'changes': {'disconnected': 'disconnected',
|
||||
'removed': 'removed'},
|
||||
'result': True})
|
||||
|
||||
def test_absent_with_matching_network(self):
|
||||
'''
|
||||
Test docker_network.absent when the specified network does not exist,
|
||||
but another network with a name which is a superset of the specified
|
||||
name does exist. In this case we expect there to be no attempt to remove
|
||||
any network.
|
||||
Regression test for #41982.
|
||||
'''
|
||||
docker_remove_network = Mock(return_value='removed')
|
||||
docker_disconnect_container_from_network = Mock(return_value='disconnected')
|
||||
docker_networks = Mock(return_value=[{
|
||||
'Name': 'network_foobar',
|
||||
'Containers': {'container': {}}
|
||||
}])
|
||||
__salt__ = {
|
||||
'docker.remove_network': docker_remove_network,
|
||||
'docker.disconnect_container_from_network': docker_disconnect_container_from_network,
|
||||
'docker.networks': docker_networks,
|
||||
}
|
||||
with patch.dict(docker_state.__dict__,
|
||||
{'__salt__': __salt__}):
|
||||
ret = docker_state.absent('network_foo')
|
||||
docker_disconnect_container_from_network.assert_not_called()
|
||||
docker_remove_network.assert_not_called()
|
||||
self.assertEqual(ret, {'name': 'network_foo',
|
||||
'comment': 'Network \'network_foo\' already absent',
|
||||
'changes': {},
|
||||
'result': True})
|
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import socket
|
||||
|
||||
# Import Salt Testing libs
|
||||
@ -11,6 +12,8 @@ from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock
|
||||
# Import salt libs
|
||||
import salt.utils.network as network
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
LINUX = '''\
|
||||
eth0 Link encap:Ethernet HWaddr e0:3f:49:85:6a:af
|
||||
inet addr:10.10.10.56 Bcast:10.10.10.255 Mask:255.255.252.0
|
||||
@ -96,6 +99,11 @@ USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS
|
||||
salt-master python2.781106 35 tcp4 127.0.0.1:61115 127.0.0.1:4506
|
||||
'''
|
||||
|
||||
IPV4_SUBNETS = {True: ('10.10.0.0/24',),
|
||||
False: ('10.10.0.0', '10.10.0.0/33', 'FOO', 9, '0.9.800.1000/24')}
|
||||
IPV6_SUBNETS = {True: ('::1/128',),
|
||||
False: ('::1', '::1/129', 'FOO', 9, 'aj01::feac/64')}
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class NetworkTestCase(TestCase):
|
||||
@ -160,6 +168,31 @@ class NetworkTestCase(TestCase):
|
||||
self.assertFalse(network.is_ipv6('10.0.1.2'))
|
||||
self.assertFalse(network.is_ipv6('2001.0db8.85a3.0000.0000.8a2e.0370.7334'))
|
||||
|
||||
def test_is_subnet(self):
|
||||
for subnet_data in (IPV4_SUBNETS, IPV6_SUBNETS):
|
||||
for item in subnet_data[True]:
|
||||
log.debug('Testing that %s is a valid subnet', item)
|
||||
self.assertTrue(network.is_subnet(item))
|
||||
for item in subnet_data[False]:
|
||||
log.debug('Testing that %s is not a valid subnet', item)
|
||||
self.assertFalse(network.is_subnet(item))
|
||||
|
||||
def test_is_ipv4_subnet(self):
|
||||
for item in IPV4_SUBNETS[True]:
|
||||
log.debug('Testing that %s is a valid subnet', item)
|
||||
self.assertTrue(network.is_ipv4_subnet(item))
|
||||
for item in IPV4_SUBNETS[False]:
|
||||
log.debug('Testing that %s is not a valid subnet', item)
|
||||
self.assertFalse(network.is_ipv4_subnet(item))
|
||||
|
||||
def test_is_ipv6_subnet(self):
|
||||
for item in IPV6_SUBNETS[True]:
|
||||
log.debug('Testing that %s is a valid subnet', item)
|
||||
self.assertTrue(network.is_ipv6_subnet(item))
|
||||
for item in IPV6_SUBNETS[False]:
|
||||
log.debug('Testing that %s is not a valid subnet', item)
|
||||
self.assertFalse(network.is_ipv6_subnet(item))
|
||||
|
||||
def test_cidr_to_ipv4_netmask(self):
|
||||
self.assertEqual(network.cidr_to_ipv4_netmask(24), '255.255.255.0')
|
||||
self.assertEqual(network.cidr_to_ipv4_netmask(21), '255.255.248.0')
|
||||
|
Loading…
Reference in New Issue
Block a user